xref: /dragonfly/sys/dev/raid/arcmsr/arcmsr.c (revision ef3ac1d1)
1 /*
2 ********************************************************************************
3 **        OS    : FreeBSD
4 **   FILE NAME  : arcmsr.c
5 **        BY    : Erich Chen, Ching Huang
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x)
8 **                SATA/SAS RAID HOST Adapter
9 ********************************************************************************
10 ********************************************************************************
11 **
12 ** Copyright (C) 2002 - 2012, Areca Technology Corporation All rights reserved.
13 **
14 ** Redistribution and use in source and binary forms, with or without
15 ** modification, are permitted provided that the following conditions
16 ** are met:
17 ** 1. Redistributions of source code must retain the above copyright
18 **    notice, this list of conditions and the following disclaimer.
19 ** 2. Redistributions in binary form must reproduce the above copyright
20 **    notice, this list of conditions and the following disclaimer in the
21 **    documentation and/or other materials provided with the distribution.
22 ** 3. The name of the author may not be used to endorse or promote products
23 **    derived from this software without specific prior written permission.
24 **
25 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
30 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
32 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
33 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
34 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 ********************************************************************************
36 ** History
37 **
38 **    REV#         DATE         NAME        DESCRIPTION
39 ** 1.00.00.00   03/31/2004  Erich Chen      First release
40 ** 1.20.00.02   11/29/2004  Erich Chen      bug fix with arcmsr_bus_reset when PHY error
41 ** 1.20.00.03   04/19/2005  Erich Chen      add SATA 24 Ports adapter type support
42 **                                          clean unused function
43 ** 1.20.00.12   09/12/2005  Erich Chen      bug fix with abort command handling,
44 **                                          firmware version check
45 **                                          and firmware update notify for hardware bug fix
46 **                                          handling if none zero high part physical address
47 **                                          of srb resource
48 ** 1.20.00.13   08/18/2006  Erich Chen      remove pending srb and report busy
49 **                                          add iop message xfer
50 **                                          with scsi pass-through command
51 **                                          add new device id of sas raid adapters
52 **                                          code fit for SPARC64 & PPC
53 ** 1.20.00.14   02/05/2007  Erich Chen      bug fix for incorrect ccb_h.status report
54 **                                          and cause g_vfs_done() read write error
55 ** 1.20.00.15   10/10/2007  Erich Chen      support new RAID adapter type ARC120x
56 ** 1.20.00.16   10/10/2009  Erich Chen      Bug fix for RAID adapter type ARC120x
57 **                                          bus_dmamem_alloc() with BUS_DMA_ZERO
58 ** 1.20.00.17   07/15/2010  Ching Huang     Added support ARC1880
59 **                                          report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
60 **                                          prevent cam_periph_error removing all LUN devices of one Target id
61 **                                          for any one LUN device failed
62 ** 1.20.00.18   10/14/2010  Ching Huang     Fixed "inquiry data fails comparion at DV1 step"
63 **              10/25/2010  Ching Huang     Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
64 ** 1.20.00.19   11/11/2010  Ching Huang     Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
65 ** 1.20.00.20   12/08/2010  Ching Huang     Avoid calling atomic_set_int function
66 ** 1.20.00.21   02/08/2011  Ching Huang     Implement I/O request timeout
67 **              02/14/2011  Ching Huang     Modified pktRequestCount
68 ** 1.20.00.21   03/03/2011  Ching Huang     if a command timeout, then wait its ccb back before free it
69 ** 1.20.00.22   07/04/2011  Ching Huang     Fixed multiple MTX panic
70 ** 1.20.00.23   10/28/2011  Ching Huang     Added TIMEOUT_DELAY in case of too many HDDs need to start
71 ** 1.20.00.23   11/08/2011  Ching Huang     Added report device transfer speed
72 ** 1.20.00.23   01/30/2012  Ching Huang     Fixed Request requeued and Retrying command
73 ** 1.20.00.24   06/11/2012  Ching Huang     Fixed return sense data condition
74 ** 1.20.00.25   08/17/2012  Ching Huang     Fixed hotplug device no function on type A adapter
75 ** 1.20.00.26   12/14/2012  Ching Huang     Added support ARC1214,1224,1264,1284
76 ** 1.20.00.27   05/06/2013  Ching Huang     Fixed out standing cmd full on ARC-12x4
77 ** 1.20.00.28   09/13/2013  Ching Huang     Removed recursive mutex in arcmsr_abort_dr_ccbs
78 ** 1.20.00.29   12/18/2013  Ching Huang     Change simq allocation number, support ARC1883
79 ******************************************************************************************
80 * $FreeBSD: head/sys/dev/arcmsr/arcmsr.c 259565 2013-12-18 19:25:40Z delphij $
81 */
82 #if 0
83 #define ARCMSR_DEBUG1			1
84 #endif
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/bus.h>
90 #include <sys/queue.h>
91 #include <sys/stat.h>
92 #include <sys/kthread.h>
93 #include <sys/module.h>
94 #include <sys/proc.h>
95 #include <sys/lock.h>
96 #include <sys/sysctl.h>
97 #include <sys/thread2.h>
98 #include <sys/poll.h>
99 #include <sys/device.h>
100 #include <vm/vm.h>
101 #include <vm/vm_param.h>
102 #include <vm/pmap.h>
103 
104 #include <machine/atomic.h>
105 #include <sys/conf.h>
106 #include <sys/rman.h>
107 
108 #include <bus/cam/cam.h>
109 #include <bus/cam/cam_ccb.h>
110 #include <bus/cam/cam_sim.h>
111 #include <bus/cam/cam_periph.h>
112 #include <bus/cam/cam_xpt_periph.h>
113 #include <bus/cam/cam_xpt_sim.h>
114 #include <bus/cam/cam_debug.h>
115 #include <bus/cam/scsi/scsi_all.h>
116 #include <bus/cam/scsi/scsi_message.h>
117 /*
118 **************************************************************************
119 **************************************************************************
120 */
121 #include <sys/endian.h>
122 #include <bus/pci/pcivar.h>
123 #include <bus/pci/pcireg.h>
124 
125 #define arcmsr_callout_init(a)	callout_init_mp(a);
126 
127 #define ARCMSR_DRIVER_VERSION	"arcmsr version 1.20.00.29 2013-12-18"
128 #include <dev/raid/arcmsr/arcmsr.h>
129 /*
130 **************************************************************************
131 **************************************************************************
132 */
133 static void arcmsr_free_srb(struct CommandControlBlock *srb);
134 static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb);
135 static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb);
136 static int arcmsr_probe(device_t dev);
137 static int arcmsr_attach(device_t dev);
138 static int arcmsr_detach(device_t dev);
139 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
140 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
141 static int arcmsr_shutdown(device_t dev);
142 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
143 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
144 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
145 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
146 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
147 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
148 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
149 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
150 static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer);
151 static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb);
152 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
153 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
154 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
155 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
156 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg);
157 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb);
158 static int arcmsr_resume(device_t dev);
159 static int arcmsr_suspend(device_t dev);
160 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
161 static void	arcmsr_polling_devmap(void *arg);
162 static void	arcmsr_srb_timeout(void *arg);
163 static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb);
164 #ifdef ARCMSR_DEBUG1
165 static void arcmsr_dump_data(struct AdapterControlBlock *acb);
166 #endif
167 /*
168 **************************************************************************
169 **************************************************************************
170 */
171 static void UDELAY(u_int32_t us) { DELAY(us); }
172 /*
173 **************************************************************************
174 **************************************************************************
175 */
176 static bus_dmamap_callback_t arcmsr_map_free_srb;
177 static bus_dmamap_callback_t arcmsr_execute_srb;
178 /*
179 **************************************************************************
180 **************************************************************************
181 */
182 static d_open_t	arcmsr_open;
183 static d_close_t arcmsr_close;
184 static d_ioctl_t arcmsr_ioctl;
185 
186 static device_method_t arcmsr_methods[]={
187 	DEVMETHOD(device_probe,		arcmsr_probe),
188 	DEVMETHOD(device_attach,	arcmsr_attach),
189 	DEVMETHOD(device_detach,	arcmsr_detach),
190 	DEVMETHOD(device_shutdown,	arcmsr_shutdown),
191 	DEVMETHOD(device_suspend,	arcmsr_suspend),
192 	DEVMETHOD(device_resume,	arcmsr_resume),
193 	DEVMETHOD_END
194 };
195 
196 static driver_t arcmsr_driver={
197 	"arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
198 };
199 
200 static devclass_t arcmsr_devclass;
201 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL);
202 MODULE_VERSION(arcmsr, 1);
203 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
204 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
205 #ifndef BUS_DMA_COHERENT
206 	#define	BUS_DMA_COHERENT	0x04	/* hint: map memory in a coherent way */
207 #endif
208 
209 static struct dev_ops arcmsr_ops = {
210 	{ "arcmsr", 0, D_MPSAFE },
211 	.d_open =	arcmsr_open,		        /* open     */
212 	.d_close =	arcmsr_close,		        /* close    */
213 	.d_ioctl =	arcmsr_ioctl,		        /* ioctl    */
214 };
215 
216 static int	arcmsr_msi_enable = 1;
217 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable);
218 
219 
220 /*
221 **************************************************************************
222 **************************************************************************
223 */
224 
225 static int
226 arcmsr_open(struct dev_open_args *ap)
227 {
228 	cdev_t dev = ap->a_head.a_dev;
229 	struct AdapterControlBlock *acb = dev->si_drv1;
230 
231 	if(acb == NULL) {
232 		return ENXIO;
233 	}
234 	return (0);
235 }
236 
237 /*
238 **************************************************************************
239 **************************************************************************
240 */
241 
242 static int
243 arcmsr_close(struct dev_close_args *ap)
244 {
245 	cdev_t dev = ap->a_head.a_dev;
246 	struct AdapterControlBlock *acb = dev->si_drv1;
247 
248 	if(acb == NULL) {
249 		return ENXIO;
250 	}
251 	return 0;
252 }
253 
254 /*
255 **************************************************************************
256 **************************************************************************
257 */
258 
259 static int
260 arcmsr_ioctl(struct dev_ioctl_args *ap)
261 {
262 	cdev_t dev = ap->a_head.a_dev;
263 	u_long ioctl_cmd = ap->a_cmd;
264 	caddr_t arg = ap->a_data;
265 	struct AdapterControlBlock *acb = dev->si_drv1;
266 
267 	if(acb == NULL) {
268 		return ENXIO;
269 	}
270 	return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
271 }
272 
273 /*
274 **********************************************************************
275 **********************************************************************
276 */
277 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
278 {
279 	u_int32_t intmask_org = 0;
280 
281 	switch (acb->adapter_type) {
282 	case ACB_ADAPTER_TYPE_A: {
283 			/* disable all outbound interrupt */
284 			intmask_org = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
285 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
286 		}
287 		break;
288 	case ACB_ADAPTER_TYPE_B: {
289 			/* disable all outbound interrupt */
290 			intmask_org = CHIP_REG_READ32(HBB_DOORBELL,
291 			0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
292 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
293 		}
294 		break;
295 	case ACB_ADAPTER_TYPE_C: {
296 			/* disable all outbound interrupt */
297 			intmask_org = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask)	; /* disable outbound message0 int */
298 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
299 		}
300 		break;
301 	case ACB_ADAPTER_TYPE_D: {
302 			/* disable all outbound interrupt */
303 			intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable)	; /* disable outbound message0 int */
304 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE);
305 		}
306 		break;
307 	}
308 	return (intmask_org);
309 }
310 /*
311 **********************************************************************
312 **********************************************************************
313 */
314 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
315 {
316 	u_int32_t mask;
317 
318 	switch (acb->adapter_type) {
319 	case ACB_ADAPTER_TYPE_A: {
320 			/* enable outbound Post Queue, outbound doorbell Interrupt */
321 			mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
322 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
323 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
324 		}
325 		break;
326 	case ACB_ADAPTER_TYPE_B: {
327 			/* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
328 			mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
329 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
330 			acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
331 		}
332 		break;
333 	case ACB_ADAPTER_TYPE_C: {
334 			/* enable outbound Post Queue, outbound doorbell Interrupt */
335 			mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
336 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
337 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
338 		}
339 		break;
340 	case ACB_ADAPTER_TYPE_D: {
341 			/* enable outbound Post Queue, outbound doorbell Interrupt */
342 			mask = ARCMSR_HBDMU_ALL_INT_ENABLE;
343 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | mask);
344 			CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable);
345 			acb->outbound_int_enable = mask;
346 		}
347 		break;
348 	}
349 }
350 /*
351 **********************************************************************
352 **********************************************************************
353 */
354 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
355 {
356 	u_int32_t Index;
357 	u_int8_t Retries = 0x00;
358 
359 	do {
360 		for(Index=0; Index < 100; Index++) {
361 			if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
362 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
363 				return TRUE;
364 			}
365 			UDELAY(10000);
366 		}/*max 1 seconds*/
367 	}while(Retries++ < 20);/*max 20 sec*/
368 	return (FALSE);
369 }
370 /*
371 **********************************************************************
372 **********************************************************************
373 */
374 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
375 {
376 	u_int32_t Index;
377 	u_int8_t Retries = 0x00;
378 
379 	do {
380 		for(Index=0; Index < 100; Index++) {
381 			if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
382 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
383 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
384 				return TRUE;
385 			}
386 			UDELAY(10000);
387 		}/*max 1 seconds*/
388 	}while(Retries++ < 20);/*max 20 sec*/
389 	return (FALSE);
390 }
391 /*
392 **********************************************************************
393 **********************************************************************
394 */
395 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
396 {
397 	u_int32_t Index;
398 	u_int8_t Retries = 0x00;
399 
400 	do {
401 		for(Index=0; Index < 100; Index++) {
402 			if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
403 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
404 				return TRUE;
405 			}
406 			UDELAY(10000);
407 		}/*max 1 seconds*/
408 	}while(Retries++ < 20);/*max 20 sec*/
409 	return (FALSE);
410 }
411 /*
412 **********************************************************************
413 **********************************************************************
414 */
415 static u_int8_t arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock *acb)
416 {
417 	u_int32_t Index;
418 	u_int8_t Retries = 0x00;
419 
420 	do {
421 		for(Index=0; Index < 100; Index++) {
422 			if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) {
423 				CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);/*clear interrupt*/
424 				return TRUE;
425 			}
426 			UDELAY(10000);
427 		}/*max 1 seconds*/
428 	}while(Retries++ < 20);/*max 20 sec*/
429 	return (FALSE);
430 }
431 /*
432 ************************************************************************
433 ************************************************************************
434 */
435 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
436 {
437 	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
438 
439 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
440 	do {
441 		if(arcmsr_hba_wait_msgint_ready(acb)) {
442 			break;
443 		} else {
444 			retry_count--;
445 		}
446 	}while(retry_count != 0);
447 }
448 /*
449 ************************************************************************
450 ************************************************************************
451 */
452 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
453 {
454 	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
455 
456 	CHIP_REG_WRITE32(HBB_DOORBELL,
457 	0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
458 	do {
459 		if(arcmsr_hbb_wait_msgint_ready(acb)) {
460 			break;
461 		} else {
462 			retry_count--;
463 		}
464 	}while(retry_count != 0);
465 }
466 /*
467 ************************************************************************
468 ************************************************************************
469 */
470 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
471 {
472 	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
473 
474 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
475 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
476 	do {
477 		if(arcmsr_hbc_wait_msgint_ready(acb)) {
478 			break;
479 		} else {
480 			retry_count--;
481 		}
482 	}while(retry_count != 0);
483 }
484 /*
485 ************************************************************************
486 ************************************************************************
487 */
488 static void arcmsr_flush_hbd_cache(struct AdapterControlBlock *acb)
489 {
490 	int retry_count = 30; /* enlarge wait flush adapter cache time: 10 minute */
491 
492 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
493 	do {
494 		if(arcmsr_hbd_wait_msgint_ready(acb)) {
495 			break;
496 		} else {
497 			retry_count--;
498 		}
499 	}while(retry_count != 0);
500 }
501 /*
502 ************************************************************************
503 ************************************************************************
504 */
505 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
506 {
507 	switch (acb->adapter_type) {
508 	case ACB_ADAPTER_TYPE_A: {
509 			arcmsr_flush_hba_cache(acb);
510 		}
511 		break;
512 	case ACB_ADAPTER_TYPE_B: {
513 			arcmsr_flush_hbb_cache(acb);
514 		}
515 		break;
516 	case ACB_ADAPTER_TYPE_C: {
517 			arcmsr_flush_hbc_cache(acb);
518 		}
519 		break;
520 	case ACB_ADAPTER_TYPE_D: {
521 			arcmsr_flush_hbd_cache(acb);
522 		}
523 		break;
524 	}
525 }
526 /*
527 *******************************************************************************
528 *******************************************************************************
529 */
530 static int arcmsr_suspend(device_t dev)
531 {
532 	struct AdapterControlBlock	*acb = device_get_softc(dev);
533 
534 	/* flush controller */
535 	arcmsr_iop_parking(acb);
536 	/* disable all outbound interrupt */
537 	arcmsr_disable_allintr(acb);
538 	return(0);
539 }
540 /*
541 *******************************************************************************
542 *******************************************************************************
543 */
544 static int arcmsr_resume(device_t dev)
545 {
546 	struct AdapterControlBlock	*acb = device_get_softc(dev);
547 
548 	arcmsr_iop_init(acb);
549 	return(0);
550 }
551 /*
552 *********************************************************************************
553 *********************************************************************************
554 */
555 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
556 {
557 	struct AdapterControlBlock *acb;
558 	u_int8_t target_id, target_lun;
559 	struct cam_sim *sim;
560 
561 	sim = (struct cam_sim *) cb_arg;
562 	acb =(struct AdapterControlBlock *) cam_sim_softc(sim);
563 	switch (code) {
564 	case AC_LOST_DEVICE:
565 		target_id = xpt_path_target_id(path);
566 		target_lun = xpt_path_lun_id(path);
567 		if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) {
568 			break;
569 		}
570 	//	printf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun);
571 		break;
572 	default:
573 		break;
574 	}
575 }
576 /*
577 **********************************************************************
578 **********************************************************************
579 */
580 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
581 {
582 	union ccb *pccb = srb->pccb;
583 
584 	pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
585 	pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
586 	if(pccb->csio.sense_len) {
587 		memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
588 		memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
589 		get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
590 		((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
591 		pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
592 	}
593 }
594 /*
595 *********************************************************************
596 *********************************************************************
597 */
598 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
599 {
600 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
601 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
602 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
603 	}
604 }
605 /*
606 *********************************************************************
607 *********************************************************************
608 */
609 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
610 {
611 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
612 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
613 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
614 	}
615 }
616 /*
617 *********************************************************************
618 *********************************************************************
619 */
620 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
621 {
622 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
623 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
624 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
625 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
626 	}
627 }
628 /*
629 *********************************************************************
630 *********************************************************************
631 */
632 static void arcmsr_abort_hbd_allcmd(struct AdapterControlBlock *acb)
633 {
634 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
635 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
636 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
637 	}
638 }
639 /*
640 *********************************************************************
641 *********************************************************************
642 */
643 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
644 {
645 	switch (acb->adapter_type) {
646 	case ACB_ADAPTER_TYPE_A: {
647 			arcmsr_abort_hba_allcmd(acb);
648 		}
649 		break;
650 	case ACB_ADAPTER_TYPE_B: {
651 			arcmsr_abort_hbb_allcmd(acb);
652 		}
653 		break;
654 	case ACB_ADAPTER_TYPE_C: {
655 			arcmsr_abort_hbc_allcmd(acb);
656 		}
657 		break;
658 	case ACB_ADAPTER_TYPE_D: {
659 			arcmsr_abort_hbd_allcmd(acb);
660 		}
661 		break;
662 	}
663 }
664 /*
665 **********************************************************************
666 **********************************************************************
667 */
668 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
669 {
670 	struct AdapterControlBlock *acb = srb->acb;
671 	union ccb *pccb = srb->pccb;
672 
673 	if(srb->srb_flags & SRB_FLAG_TIMER_START)
674 		callout_stop(&srb->ccb_callout);
675 	if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
676 		bus_dmasync_op_t op;
677 
678 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
679 			op = BUS_DMASYNC_POSTREAD;
680 		} else {
681 			op = BUS_DMASYNC_POSTWRITE;
682 		}
683 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
684 		bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
685 	}
686 	if(stand_flag == 1) {
687 		atomic_subtract_int(&acb->srboutstandingcount, 1);
688 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
689 		acb->srboutstandingcount < (acb->maxOutstanding -10))) {
690 			acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
691 			pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
692 		}
693 	}
694 	if(srb->srb_state != ARCMSR_SRB_TIMEOUT)
695 		arcmsr_free_srb(srb);
696 	acb->pktReturnCount++;
697 	xpt_done(pccb);
698 }
699 /*
700 **************************************************************************
701 **************************************************************************
702 */
703 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
704 {
705 	int target, lun;
706 
707 	target = srb->pccb->ccb_h.target_id;
708 	lun = srb->pccb->ccb_h.target_lun;
709 	if(error == FALSE) {
710 		if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
711 			acb->devstate[target][lun] = ARECA_RAID_GOOD;
712 		}
713 		srb->pccb->ccb_h.status |= CAM_REQ_CMP;
714 		arcmsr_srb_complete(srb, 1);
715 	} else {
716 		switch(srb->arcmsr_cdb.DeviceStatus) {
717 		case ARCMSR_DEV_SELECT_TIMEOUT: {
718 				if(acb->devstate[target][lun] == ARECA_RAID_GOOD) {
719 					kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
720 				}
721 				acb->devstate[target][lun] = ARECA_RAID_GONE;
722 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
723 				arcmsr_srb_complete(srb, 1);
724 			}
725 			break;
726 		case ARCMSR_DEV_ABORTED:
727 		case ARCMSR_DEV_INIT_FAIL: {
728 				acb->devstate[target][lun] = ARECA_RAID_GONE;
729 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
730 				arcmsr_srb_complete(srb, 1);
731 			}
732 			break;
733 		case SCSISTAT_CHECK_CONDITION: {
734 				acb->devstate[target][lun] = ARECA_RAID_GOOD;
735 				arcmsr_report_sense_info(srb);
736 				arcmsr_srb_complete(srb, 1);
737 			}
738 			break;
739 		default:
740 			kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknown DeviceStatus=0x%x \n"
741 					, acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
742 			acb->devstate[target][lun] = ARECA_RAID_GONE;
743 			srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
744 			/*unknown error or crc error just for retry*/
745 			arcmsr_srb_complete(srb, 1);
746 			break;
747 		}
748 	}
749 }
750 /*
751 **************************************************************************
752 **************************************************************************
753 */
754 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
755 {
756 	struct CommandControlBlock *srb;
757 
758 	/* check if command done with no error*/
759 	switch (acb->adapter_type) {
760 	case ACB_ADAPTER_TYPE_C:
761 	case ACB_ADAPTER_TYPE_D:
762 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0)); /*frame must be 32 bytes aligned*/
763 		break;
764 	case ACB_ADAPTER_TYPE_A:
765 	case ACB_ADAPTER_TYPE_B:
766 	default:
767 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
768 		break;
769 	}
770 	if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
771 		if(srb->srb_state == ARCMSR_SRB_TIMEOUT) {
772 			arcmsr_free_srb(srb);
773 			kprintf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb);
774 			return;
775 		}
776 		kprintf("arcmsr%d: return srb has been completed\n"
777 			"srb='%p' srb_state=0x%x outstanding srb count=%d \n",
778 			acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount);
779 		return;
780 	}
781 	arcmsr_report_srb_state(acb, srb, error);
782 }
783 /*
784 **************************************************************************
785 **************************************************************************
786 */
787 static void	arcmsr_srb_timeout(void *arg)
788 {
789 	struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
790 	struct AdapterControlBlock *acb;
791 	int target, lun;
792 	u_int8_t cmd;
793 
794 	target = srb->pccb->ccb_h.target_id;
795 	lun = srb->pccb->ccb_h.target_lun;
796 	acb = srb->acb;
797 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
798 	if(srb->srb_state == ARCMSR_SRB_START)
799 	{
800 		cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
801 		srb->srb_state = ARCMSR_SRB_TIMEOUT;
802 		srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
803 		arcmsr_srb_complete(srb, 1);
804 		kprintf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n",
805 				 acb->pci_unit, target, lun, cmd, srb);
806 	}
807 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
808 #ifdef ARCMSR_DEBUG1
809 	arcmsr_dump_data(acb);
810 #endif
811 }
812 
813 /*
814 **********************************************************************
815 **********************************************************************
816 */
817 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
818 {
819 	int i=0;
820 	u_int32_t flag_srb;
821 	u_int16_t error;
822 
823 	switch (acb->adapter_type) {
824 	case ACB_ADAPTER_TYPE_A: {
825 			u_int32_t outbound_intstatus;
826 
827 			/*clear and abort all outbound posted Q*/
828 			outbound_intstatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
829 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
830 			while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
831                 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
832 				arcmsr_drain_donequeue(acb, flag_srb, error);
833 			}
834 		}
835 		break;
836 	case ACB_ADAPTER_TYPE_B: {
837 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
838 
839 			/*clear all outbound posted Q*/
840 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
841 			for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
842 				if((flag_srb = phbbmu->done_qbuffer[i]) != 0) {
843 					phbbmu->done_qbuffer[i] = 0;
844                 	error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
845 					arcmsr_drain_donequeue(acb, flag_srb, error);
846 				}
847 				phbbmu->post_qbuffer[i] = 0;
848 			}/*drain reply FIFO*/
849 			phbbmu->doneq_index = 0;
850 			phbbmu->postq_index = 0;
851 		}
852 		break;
853 	case ACB_ADAPTER_TYPE_C: {
854 
855 			while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
856 				flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
857                 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
858 				arcmsr_drain_donequeue(acb, flag_srb, error);
859 			}
860 		}
861 		break;
862 	case ACB_ADAPTER_TYPE_D: {
863 			arcmsr_hbd_postqueue_isr(acb);
864 		}
865 		break;
866 	}
867 }
868 /*
869 ****************************************************************************
870 ****************************************************************************
871 */
872 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
873 {
874 	struct CommandControlBlock *srb;
875 	u_int32_t intmask_org;
876 	u_int32_t i=0;
877 
878 	if(acb->srboutstandingcount>0) {
879 		/* disable all outbound interrupt */
880 		intmask_org = arcmsr_disable_allintr(acb);
881 		/*clear and abort all outbound posted Q*/
882 		arcmsr_done4abort_postqueue(acb);
883 		/* talk to iop 331 outstanding command aborted*/
884 		arcmsr_abort_allcmd(acb);
885 		for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
886 			srb = acb->psrb_pool[i];
887 			if(srb->srb_state == ARCMSR_SRB_START) {
888 				srb->srb_state = ARCMSR_SRB_ABORTED;
889 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
890 				arcmsr_srb_complete(srb, 1);
891 				kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p' aborted\n"
892 						, acb->pci_unit, srb->pccb->ccb_h.target_id
893 						, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
894 			}
895 		}
896 		/* enable all outbound interrupt */
897 		arcmsr_enable_allintr(acb, intmask_org);
898 	}
899 	acb->srboutstandingcount = 0;
900 	acb->workingsrb_doneindex = 0;
901 	acb->workingsrb_startindex = 0;
902 	acb->pktRequestCount = 0;
903 	acb->pktReturnCount = 0;
904 }
905 /*
906 **********************************************************************
907 **********************************************************************
908 */
909 static void arcmsr_build_srb(struct CommandControlBlock *srb,
910 		bus_dma_segment_t *dm_segs, u_int32_t nseg)
911 {
912 	struct ARCMSR_CDB *arcmsr_cdb = &srb->arcmsr_cdb;
913 	u_int8_t *psge = (u_int8_t *)&arcmsr_cdb->u;
914 	u_int32_t address_lo, address_hi;
915 	union ccb *pccb = srb->pccb;
916 	struct ccb_scsiio *pcsio = &pccb->csio;
917 	u_int32_t arccdbsize = 0x30;
918 
919 	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
920 	arcmsr_cdb->Bus = 0;
921 	arcmsr_cdb->TargetID = pccb->ccb_h.target_id;
922 	arcmsr_cdb->LUN = pccb->ccb_h.target_lun;
923 	arcmsr_cdb->Function = 1;
924 	arcmsr_cdb->CdbLength = (u_int8_t)pcsio->cdb_len;
925 	bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
926 	if(nseg != 0) {
927 		struct AdapterControlBlock *acb = srb->acb;
928 		bus_dmasync_op_t op;
929 		u_int32_t length, i, cdb_sgcount = 0;
930 
931 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
932 			op = BUS_DMASYNC_PREREAD;
933 		} else {
934 			op = BUS_DMASYNC_PREWRITE;
935 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
936 			srb->srb_flags |= SRB_FLAG_WRITE;
937 		}
938 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
939 		for(i=0; i < nseg; i++) {
940 			/* Get the physical address of the current data pointer */
941 			length = arcmsr_htole32(dm_segs[i].ds_len);
942 			address_lo = arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
943 			address_hi = arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
944 			if(address_hi == 0) {
945 				struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
946 				pdma_sg->address = address_lo;
947 				pdma_sg->length = length;
948 				psge += sizeof(struct SG32ENTRY);
949 				arccdbsize += sizeof(struct SG32ENTRY);
950 			} else {
951 				u_int32_t sg64s_size = 0, tmplength = length;
952 
953 				while(1) {
954 					u_int64_t span4G, length0;
955 					struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
956 
957 					span4G = (u_int64_t)address_lo + tmplength;
958 					pdma_sg->addresshigh = address_hi;
959 					pdma_sg->address = address_lo;
960 					if(span4G > 0x100000000) {
961 						/*see if cross 4G boundary*/
962 						length0 = 0x100000000-address_lo;
963 						pdma_sg->length = (u_int32_t)length0 | IS_SG64_ADDR;
964 						address_hi = address_hi+1;
965 						address_lo = 0;
966 						tmplength = tmplength - (u_int32_t)length0;
967 						sg64s_size += sizeof(struct SG64ENTRY);
968 						psge += sizeof(struct SG64ENTRY);
969 						cdb_sgcount++;
970 					} else {
971 						pdma_sg->length = tmplength | IS_SG64_ADDR;
972 						sg64s_size += sizeof(struct SG64ENTRY);
973 						psge += sizeof(struct SG64ENTRY);
974 						break;
975 					}
976 				}
977 				arccdbsize += sg64s_size;
978 			}
979 			cdb_sgcount++;
980 		}
981 		arcmsr_cdb->sgcount = (u_int8_t)cdb_sgcount;
982 		arcmsr_cdb->DataLength = pcsio->dxfer_len;
983 		if( arccdbsize > 256) {
984 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
985 		}
986 	} else {
987 		arcmsr_cdb->DataLength = 0;
988 	}
989     srb->arc_cdb_size = arccdbsize;
990     arcmsr_cdb->msgPages = (arccdbsize/256) + ((arccdbsize % 256) ? 1 : 0);
991 }
992 /*
993 **************************************************************************
994 **************************************************************************
995 */
996 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
997 {
998 	u_int32_t cdb_phyaddr_low = (u_int32_t) srb->cdb_phyaddr_low;
999 	struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&srb->arcmsr_cdb;
1000 
1001 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
1002 	atomic_add_int(&acb->srboutstandingcount, 1);
1003 	srb->srb_state = ARCMSR_SRB_START;
1004 
1005 	switch (acb->adapter_type) {
1006 	case ACB_ADAPTER_TYPE_A: {
1007 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1008 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
1009 			} else {
1010 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low);
1011 			}
1012 		}
1013 		break;
1014 	case ACB_ADAPTER_TYPE_B: {
1015 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1016 			int ending_index, index;
1017 
1018 			index = phbbmu->postq_index;
1019 			ending_index = ((index+1) % ARCMSR_MAX_HBB_POSTQUEUE);
1020 			phbbmu->post_qbuffer[ending_index] = 0;
1021 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1022 				phbbmu->post_qbuffer[index] = cdb_phyaddr_low | ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
1023 			} else {
1024 				phbbmu->post_qbuffer[index] = cdb_phyaddr_low;
1025 			}
1026 			index++;
1027 			index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
1028 			phbbmu->postq_index = index;
1029 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
1030 		}
1031 		break;
1032     case ACB_ADAPTER_TYPE_C: {
1033             u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
1034 
1035             arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size;
1036             ccb_post_stamp = (cdb_phyaddr_low | ((arc_cdb_size-1) >> 6) | 1);
1037 			cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
1038             if(cdb_phyaddr_hi32)
1039             {
1040 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
1041 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
1042             }
1043             else
1044             {
1045 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
1046             }
1047         }
1048         break;
1049 	case ACB_ADAPTER_TYPE_D: {
1050 			struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
1051 			u_int16_t index_stripped;
1052 			u_int16_t postq_index;
1053 			struct InBound_SRB *pinbound_srb;
1054 
1055 			ARCMSR_LOCK_ACQUIRE(&acb->postDone_lock);
1056 			postq_index = phbdmu->postq_index;
1057 			pinbound_srb = (struct InBound_SRB *)&phbdmu->post_qbuffer[postq_index & 0xFF];
1058 			pinbound_srb->addressHigh = srb->cdb_phyaddr_high;
1059 			pinbound_srb->addressLow = srb->cdb_phyaddr_low;
1060 			pinbound_srb->length = srb->arc_cdb_size >> 2;
1061 			arcmsr_cdb->Context = srb->cdb_phyaddr_low;
1062 			if (postq_index & 0x4000) {
1063 				index_stripped = postq_index & 0xFF;
1064 				index_stripped += 1;
1065 				index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
1066 				phbdmu->postq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped;
1067 			} else {
1068 				index_stripped = postq_index;
1069 				index_stripped += 1;
1070 				index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
1071 				phbdmu->postq_index = index_stripped ? index_stripped : (index_stripped | 0x4000);
1072 			}
1073 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inboundlist_write_pointer, postq_index);
1074 			ARCMSR_LOCK_RELEASE(&acb->postDone_lock);
1075 		}
1076 		break;
1077 	}
1078 }
1079 /*
1080 ************************************************************************
1081 ************************************************************************
1082 */
1083 static struct QBUFFER *arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
1084 {
1085 	struct QBUFFER *qbuffer=NULL;
1086 
1087 	switch (acb->adapter_type) {
1088 	case ACB_ADAPTER_TYPE_A: {
1089 			struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu;
1090 
1091 			qbuffer = (struct QBUFFER *)&phbamu->message_rbuffer;
1092 		}
1093 		break;
1094 	case ACB_ADAPTER_TYPE_B: {
1095 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1096 
1097 			qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
1098 		}
1099 		break;
1100 	case ACB_ADAPTER_TYPE_C: {
1101 			struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu;
1102 
1103 			qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer;
1104 		}
1105 		break;
1106 	case ACB_ADAPTER_TYPE_D: {
1107 			struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
1108 
1109 			qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_rbuffer;
1110 		}
1111 		break;
1112 	}
1113 	return(qbuffer);
1114 }
1115 /*
1116 ************************************************************************
1117 ************************************************************************
1118 */
1119 static struct QBUFFER *arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
1120 {
1121 	struct QBUFFER *qbuffer = NULL;
1122 
1123 	switch (acb->adapter_type) {
1124 	case ACB_ADAPTER_TYPE_A: {
1125 			struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu;
1126 
1127 			qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
1128 		}
1129 		break;
1130 	case ACB_ADAPTER_TYPE_B: {
1131 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1132 
1133 			qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
1134 		}
1135 		break;
1136 	case ACB_ADAPTER_TYPE_C: {
1137 			struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu;
1138 
1139 			qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
1140 		}
1141 		break;
1142 	case ACB_ADAPTER_TYPE_D: {
1143 			struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
1144 
1145 			qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_wbuffer;
1146 		}
1147 		break;
1148 	}
1149 	return(qbuffer);
1150 }
1151 /*
1152 **************************************************************************
1153 **************************************************************************
1154 */
1155 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1156 {
1157 	switch (acb->adapter_type) {
1158 	case ACB_ADAPTER_TYPE_A: {
1159 			/* let IOP know data has been read */
1160 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1161 		}
1162 		break;
1163 	case ACB_ADAPTER_TYPE_B: {
1164 			/* let IOP know data has been read */
1165 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1166 		}
1167 		break;
1168 	case ACB_ADAPTER_TYPE_C: {
1169 			/* let IOP know data has been read */
1170 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1171 		}
1172 		break;
1173 	case ACB_ADAPTER_TYPE_D: {
1174 			/* let IOP know data has been read */
1175 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ);
1176 		}
1177 		break;
1178 	}
1179 }
1180 /*
1181 **************************************************************************
1182 **************************************************************************
1183 */
1184 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1185 {
1186 	switch (acb->adapter_type) {
1187 	case ACB_ADAPTER_TYPE_A: {
1188 			/*
1189 			** push inbound doorbell tell iop, driver data write ok
1190 			** and wait reply on next hwinterrupt for next Qbuffer post
1191 			*/
1192 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1193 		}
1194 		break;
1195 	case ACB_ADAPTER_TYPE_B: {
1196 			/*
1197 			** push inbound doorbell tell iop, driver data write ok
1198 			** and wait reply on next hwinterrupt for next Qbuffer post
1199 			*/
1200 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1201 		}
1202 		break;
1203 	case ACB_ADAPTER_TYPE_C: {
1204 			/*
1205 			** push inbound doorbell tell iop, driver data write ok
1206 			** and wait reply on next hwinterrupt for next Qbuffer post
1207 			*/
1208 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1209 		}
1210 		break;
1211 	case ACB_ADAPTER_TYPE_D: {
1212 			/*
1213 			** push inbound doorbell tell iop, driver data write ok
1214 			** and wait reply on next hwinterrupt for next Qbuffer post
1215 			*/
1216 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_IN_READY);
1217 		}
1218 		break;
1219 	}
1220 }
1221 /*
1222 ************************************************************************
1223 ************************************************************************
1224 */
1225 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1226 {
1227 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1228 	CHIP_REG_WRITE32(HBA_MessageUnit,
1229 	0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1230 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
1231 		kprintf("arcmsr%d: wait 'stop adapter background rebuild' timeout \n"
1232 			, acb->pci_unit);
1233 	}
1234 }
1235 /*
1236 ************************************************************************
1237 ************************************************************************
1238 */
1239 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1240 {
1241 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1242 	CHIP_REG_WRITE32(HBB_DOORBELL,
1243 	0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1244 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1245 		kprintf( "arcmsr%d: wait 'stop adapter background rebuild' timeout \n"
1246 			, acb->pci_unit);
1247 	}
1248 }
1249 /*
1250 ************************************************************************
1251 ************************************************************************
1252 */
1253 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1254 {
1255 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1256 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1257 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1258 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1259 		kprintf("arcmsr%d: wait 'stop adapter background rebuild' timeout \n", acb->pci_unit);
1260 	}
1261 }
1262 /*
1263 ************************************************************************
1264 ************************************************************************
1265 */
1266 static void arcmsr_stop_hbd_bgrb(struct AdapterControlBlock *acb)
1267 {
1268 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1269 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1270 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
1271 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1272 	}
1273 }
1274 /*
1275 ************************************************************************
1276 ************************************************************************
1277 */
1278 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1279 {
1280 	switch (acb->adapter_type) {
1281 	case ACB_ADAPTER_TYPE_A: {
1282 			arcmsr_stop_hba_bgrb(acb);
1283 		}
1284 		break;
1285 	case ACB_ADAPTER_TYPE_B: {
1286 			arcmsr_stop_hbb_bgrb(acb);
1287 		}
1288 		break;
1289 	case ACB_ADAPTER_TYPE_C: {
1290 			arcmsr_stop_hbc_bgrb(acb);
1291 		}
1292 		break;
1293 	case ACB_ADAPTER_TYPE_D: {
1294 			arcmsr_stop_hbd_bgrb(acb);
1295 		}
1296 		break;
1297 	}
1298 }
1299 /*
1300 ************************************************************************
1301 ************************************************************************
1302 */
1303 static void arcmsr_poll(struct cam_sim *psim)
1304 {
1305 	struct AdapterControlBlock *acb;
1306 	int	mutex;
1307 
1308 	acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1309 	mutex = lockstatus(&acb->isr_lock, curthread);
1310 	if( mutex == 0 )
1311 		ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
1312 	arcmsr_interrupt(acb);
1313 	if( mutex == 0 )
1314 		ARCMSR_LOCK_RELEASE(&acb->isr_lock);
1315 }
1316 /*
1317 **************************************************************************
1318 **************************************************************************
1319 */
1320 static u_int32_t arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock *acb,
1321     struct QBUFFER *prbuffer) {
1322 
1323 	u_int8_t *pQbuffer;
1324 	u_int8_t *buf1 = NULL;
1325 	u_int32_t *iop_data, *buf2 = NULL;
1326 	u_int32_t iop_len, data_len;
1327 
1328 	iop_data = (u_int32_t *)prbuffer->data;
1329 	iop_len = (u_int32_t)prbuffer->data_len;
1330 	if ( iop_len > 0 )
1331 	{
1332 		buf1 = kmalloc(128, M_DEVBUF, M_NOWAIT | M_ZERO);
1333 		buf2 = (u_int32_t *)buf1;
1334 		if( buf1 == NULL)
1335 			return (0);
1336 		data_len = iop_len;
1337 		while(data_len >= 4)
1338 		{
1339 			*buf2++ = *iop_data++;
1340 			data_len -= 4;
1341 		}
1342 		if(data_len)
1343 			*buf2 = *iop_data;
1344 		buf2 = (u_int32_t *)buf1;
1345 	}
1346 	while (iop_len > 0) {
1347 		pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex];
1348 		*pQbuffer = *buf1;
1349 		acb->rqbuf_lastindex++;
1350 		/* if last, index number set it to 0 */
1351 		acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1352 		buf1++;
1353 		iop_len--;
1354 	}
1355 	if(buf2)
1356 		kfree( (u_int8_t *)buf2, M_DEVBUF);
1357 	/* let IOP know data has been read */
1358 	arcmsr_iop_message_read(acb);
1359 	return (1);
1360 }
1361 /*
1362 **************************************************************************
1363 **************************************************************************
1364 */
1365 static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
1366     struct QBUFFER *prbuffer) {
1367 
1368 	u_int8_t *pQbuffer;
1369 	u_int8_t *iop_data;
1370 	u_int32_t iop_len;
1371 
1372 	if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
1373 		return(arcmsr_Read_iop_rqbuffer_data_D(acb, prbuffer));
1374 	}
1375 	iop_data = (u_int8_t *)prbuffer->data;
1376 	iop_len = (u_int32_t)prbuffer->data_len;
1377 	while (iop_len > 0) {
1378 		pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex];
1379 		*pQbuffer = *iop_data;
1380 		acb->rqbuf_lastindex++;
1381 		/* if last, index number set it to 0 */
1382 		acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1383 		iop_data++;
1384 		iop_len--;
1385 	}
1386 	/* let IOP know data has been read */
1387 	arcmsr_iop_message_read(acb);
1388 	return (1);
1389 }
1390 /*
1391 **************************************************************************
1392 **************************************************************************
1393 */
1394 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1395 {
1396 	struct QBUFFER *prbuffer;
1397 	int my_empty_len;
1398 
1399 	/*check this iop data if overflow my rqbuffer*/
1400 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1401 	prbuffer = arcmsr_get_iop_rqbuffer(acb);
1402 	my_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) &
1403 	    (ARCMSR_MAX_QBUFFER-1);
1404 	if(my_empty_len >= prbuffer->data_len) {
1405 		if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
1406 			acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1407 	} else {
1408 		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1409 	}
1410 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1411 }
1412 /*
1413 **********************************************************************
1414 **********************************************************************
1415 */
1416 static void arcmsr_Write_data_2iop_wqbuffer_D(struct AdapterControlBlock *acb)
1417 {
1418 	u_int8_t *pQbuffer;
1419 	struct QBUFFER *pwbuffer;
1420 	u_int8_t *buf1 = NULL;
1421 	u_int32_t *iop_data, *buf2 = NULL;
1422 	u_int32_t allxfer_len = 0, data_len;
1423 
1424 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1425 		buf1 = kmalloc(128, M_DEVBUF, M_NOWAIT | M_ZERO);
1426 		buf2 = (u_int32_t *)buf1;
1427 		if( buf1 == NULL)
1428 			return;
1429 
1430 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1431 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1432 		iop_data = (u_int32_t *)pwbuffer->data;
1433 		while((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
1434 			&& (allxfer_len < 124)) {
1435 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1436 			*buf1 = *pQbuffer;
1437 			acb->wqbuf_firstindex++;
1438 			acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1439 			buf1++;
1440 			allxfer_len++;
1441 		}
1442 		pwbuffer->data_len = allxfer_len;
1443 		data_len = allxfer_len;
1444 		buf1 = (u_int8_t *)buf2;
1445 		while(data_len >= 4)
1446 		{
1447 			*iop_data++ = *buf2++;
1448 			data_len -= 4;
1449 		}
1450 		if(data_len)
1451 			*iop_data = *buf2;
1452 		kfree( buf1, M_DEVBUF);
1453 		arcmsr_iop_message_wrote(acb);
1454 	}
1455 }
1456 /*
1457 **********************************************************************
1458 **********************************************************************
1459 */
1460 static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb)
1461 {
1462 	u_int8_t *pQbuffer;
1463 	struct QBUFFER *pwbuffer;
1464 	u_int8_t *iop_data;
1465 	int32_t allxfer_len=0;
1466 
1467 	if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
1468 		arcmsr_Write_data_2iop_wqbuffer_D(acb);
1469 		return;
1470 	}
1471 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1472 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1473 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1474 		iop_data = (u_int8_t *)pwbuffer->data;
1475 		while((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
1476 			&& (allxfer_len < 124)) {
1477 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1478 			*iop_data = *pQbuffer;
1479 			acb->wqbuf_firstindex++;
1480 			acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1481 			iop_data++;
1482 			allxfer_len++;
1483 		}
1484 		pwbuffer->data_len = allxfer_len;
1485 		arcmsr_iop_message_wrote(acb);
1486 	}
1487 }
1488 /*
1489 **************************************************************************
1490 **************************************************************************
1491 */
1492 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1493 {
1494 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1495 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1496 	/*
1497 	*****************************************************************
1498 	**   check if there are any mail packages from user space program
1499 	**   in my post bag, now is the time to send them into Areca's firmware
1500 	*****************************************************************
1501 	*/
1502 	if(acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
1503 		arcmsr_Write_data_2iop_wqbuffer(acb);
1504 	}
1505 	if(acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
1506 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1507 	}
1508 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1509 }
1510 /*
1511 **************************************************************************
1512 **************************************************************************
1513 */
1514 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1515 {
1516 /*
1517 	if (ccb->ccb_h.status != CAM_REQ_CMP)
1518 		kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x,"
1519 		    "failure status=%x\n", ccb->ccb_h.target_id,
1520 		    ccb->ccb_h.target_lun, ccb->ccb_h.status);
1521 	else
1522 		kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1523 */
1524 	xpt_free_path(ccb->ccb_h.path);
1525 	xpt_free_ccb(ccb);
1526 }
1527 
1528 static void	arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1529 {
1530 	struct cam_path     *path;
1531 	union ccb           *ccb;
1532 
1533 	if ((ccb = (union ccb *)xpt_alloc_ccb()) == NULL)
1534  		return;
1535 	if (xpt_create_path(&path, NULL, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1536 	{
1537 		xpt_free_ccb(ccb);
1538 		return;
1539 	}
1540 /*	kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
1541 	bzero(ccb, sizeof(union ccb));
1542 	xpt_setup_ccb(&ccb->ccb_h, path, 5);
1543 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
1544 	ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1545 	ccb->crcn.flags = CAM_FLAG_NONE;
1546 	xpt_action(ccb);
1547 }
1548 
1549 
1550 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1551 {
1552 	struct CommandControlBlock *srb;
1553 	u_int32_t intmask_org;
1554 	int i;
1555 
1556 	/* disable all outbound interrupts */
1557 	intmask_org = arcmsr_disable_allintr(acb);
1558 	for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1559 	{
1560 		srb = acb->psrb_pool[i];
1561 		if (srb->srb_state == ARCMSR_SRB_START)
1562 		{
1563 		if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1564             {
1565 			srb->srb_state = ARCMSR_SRB_ABORTED;
1566 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1567 			arcmsr_srb_complete(srb, 1);
1568 				kprintf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
1569 		}
1570 		}
1571 	}
1572 	/* enable outbound Post Queue, outbound doorbell Interrupt */
1573 	arcmsr_enable_allintr(acb, intmask_org);
1574 }
1575 /*
1576 **************************************************************************
1577 **************************************************************************
1578 */
1579 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1580 	u_int32_t	devicemap;
1581 	u_int32_t	target, lun;
1582     u_int32_t	deviceMapCurrent[4]={0};
1583     u_int8_t	*pDevMap;
1584 
1585 	switch (acb->adapter_type) {
1586 	case ACB_ADAPTER_TYPE_A:
1587 			devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1588 			for (target = 0; target < 4; target++)
1589 			{
1590 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1591 		devicemap += 4;
1592 			}
1593 			break;
1594 
1595 	case ACB_ADAPTER_TYPE_B:
1596 			devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1597 			for (target = 0; target < 4; target++)
1598 			{
1599 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1],  devicemap);
1600 		devicemap += 4;
1601 			}
1602 			break;
1603 
1604 	case ACB_ADAPTER_TYPE_C:
1605 			devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1606 			for (target = 0; target < 4; target++)
1607 			{
1608 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1609 		devicemap += 4;
1610 			}
1611 			break;
1612 	case ACB_ADAPTER_TYPE_D:
1613 			devicemap = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1614 			for (target = 0; target < 4; target++)
1615 			{
1616             	deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1617             	devicemap += 4;
1618 			}
1619 			break;
1620 	}
1621 
1622 		if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1623 		{
1624 			acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1625 		}
1626 		/*
1627 		** adapter posted CONFIG message
1628 		** copy the new map, note if there are differences with the current map
1629 		*/
1630 		pDevMap = (u_int8_t	*)&deviceMapCurrent[0];
1631 		for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1632 		{
1633 			if (*pDevMap != acb->device_map[target])
1634 			{
1635                 u_int8_t difference, bit_check;
1636 
1637                 difference = *pDevMap ^ acb->device_map[target];
1638                 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1639                 {
1640                     bit_check = (1 << lun);						/*check bit from 0....31*/
1641                     if(difference & bit_check)
1642                     {
1643                         if(acb->device_map[target] & bit_check)
1644                         {/* unit departed */
1645 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1646 							arcmsr_abort_dr_ccbs(acb, target, lun);
1647 				arcmsr_rescan_lun(acb, target, lun);
1648 						acb->devstate[target][lun] = ARECA_RAID_GONE;
1649                         }
1650                         else
1651                         {/* unit arrived */
1652 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
1653 				arcmsr_rescan_lun(acb, target, lun);
1654 						acb->devstate[target][lun] = ARECA_RAID_GOOD;
1655                         }
1656                     }
1657                 }
1658 /*				kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1659 				acb->device_map[target] = *pDevMap;
1660 			}
1661 			pDevMap++;
1662 		}
1663 }
1664 /*
1665 **************************************************************************
1666 **************************************************************************
1667 */
1668 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1669 	u_int32_t outbound_message;
1670 
1671 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1672 	outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1673 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1674 		arcmsr_dr_handle( acb );
1675 }
1676 /*
1677 **************************************************************************
1678 **************************************************************************
1679 */
1680 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1681 	u_int32_t outbound_message;
1682 
1683 	/* clear interrupts */
1684 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1685 	outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1686 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1687 		arcmsr_dr_handle( acb );
1688 }
1689 /*
1690 **************************************************************************
1691 **************************************************************************
1692 */
1693 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1694 	u_int32_t outbound_message;
1695 
1696 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1697 	outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1698 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1699 		arcmsr_dr_handle( acb );
1700 }
1701 /*
1702 **************************************************************************
1703 **************************************************************************
1704 */
1705 static void arcmsr_hbd_message_isr(struct AdapterControlBlock *acb) {
1706 	u_int32_t outbound_message;
1707 
1708 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);
1709 	outbound_message = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[0]);
1710 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1711 		arcmsr_dr_handle( acb );
1712 }
1713 /*
1714 **************************************************************************
1715 **************************************************************************
1716 */
1717 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1718 {
1719 	u_int32_t doorbell_status;
1720 
1721 	/*
1722 	*******************************************************************
1723 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1724 	**  DOORBELL: din! don!
1725 	**  check if there are any mail need to pack from firmware
1726 	*******************************************************************
1727 	*/
1728 	doorbell_status = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
1729 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
1730 	if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1731 		arcmsr_iop2drv_data_wrote_handle(acb);
1732 	}
1733 	if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1734 		arcmsr_iop2drv_data_read_handle(acb);
1735 	}
1736 }
1737 /*
1738 **************************************************************************
1739 **************************************************************************
1740 */
1741 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1742 {
1743 	u_int32_t doorbell_status;
1744 
1745 	/*
1746 	*******************************************************************
1747 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1748 	**  DOORBELL: din! don!
1749 	**  check if there are any mail need to pack from firmware
1750 	*******************************************************************
1751 	*/
1752 	doorbell_status = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1753 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, doorbell_status); /* clear doorbell interrupt */
1754 	if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1755 		arcmsr_iop2drv_data_wrote_handle(acb);
1756 	}
1757 	if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1758 		arcmsr_iop2drv_data_read_handle(acb);
1759 	}
1760 	if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1761 		arcmsr_hbc_message_isr(acb);    /* messenger of "driver to iop commands" */
1762 	}
1763 }
1764 /*
1765 **************************************************************************
1766 **************************************************************************
1767 */
1768 static void arcmsr_hbd_doorbell_isr(struct AdapterControlBlock *acb)
1769 {
1770 	u_int32_t doorbell_status;
1771 
1772 	/*
1773 	*******************************************************************
1774 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1775 	**  DOORBELL: din! don!
1776 	**  check if there are any mail need to pack from firmware
1777 	*******************************************************************
1778 	*/
1779 	doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
1780 	if(doorbell_status)
1781 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
1782 	while( doorbell_status & ARCMSR_HBDMU_F0_DOORBELL_CAUSE ) {
1783 		if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK) {
1784 			arcmsr_iop2drv_data_wrote_handle(acb);
1785 		}
1786 		if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK) {
1787 			arcmsr_iop2drv_data_read_handle(acb);
1788 		}
1789 		if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) {
1790 			arcmsr_hbd_message_isr(acb);    /* messenger of "driver to iop commands" */
1791 		}
1792 		doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
1793 		if(doorbell_status)
1794 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
1795 	}
1796 }
1797 /*
1798 **************************************************************************
1799 **************************************************************************
1800 */
1801 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1802 {
1803 	u_int32_t flag_srb;
1804 	u_int16_t error;
1805 
1806 	/*
1807 	*****************************************************************************
1808 	**               areca cdb command done
1809 	*****************************************************************************
1810 	*/
1811 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1812 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1813 	while((flag_srb = CHIP_REG_READ32(HBA_MessageUnit,
1814 		0, outbound_queueport)) != 0xFFFFFFFF) {
1815 		/* check if command done with no error*/
1816         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0) ? TRUE : FALSE;
1817 		arcmsr_drain_donequeue(acb, flag_srb, error);
1818 	}	/*drain reply FIFO*/
1819 }
1820 /*
1821 **************************************************************************
1822 **************************************************************************
1823 */
1824 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1825 {
1826 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1827 	u_int32_t flag_srb;
1828 	int index;
1829 	u_int16_t error;
1830 
1831 	/*
1832 	*****************************************************************************
1833 	**               areca cdb command done
1834 	*****************************************************************************
1835 	*/
1836 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1837 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1838 	index = phbbmu->doneq_index;
1839 	while((flag_srb = phbbmu->done_qbuffer[index]) != 0) {
1840 		phbbmu->done_qbuffer[index] = 0;
1841 		index++;
1842 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
1843 		phbbmu->doneq_index = index;
1844 		/* check if command done with no error*/
1845         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1846 		arcmsr_drain_donequeue(acb, flag_srb, error);
1847 	}	/*drain reply FIFO*/
1848 }
1849 /*
1850 **************************************************************************
1851 **************************************************************************
1852 */
1853 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1854 {
1855 	u_int32_t flag_srb,throttling = 0;
1856 	u_int16_t error;
1857 
1858 	/*
1859 	*****************************************************************************
1860 	**               areca cdb command done
1861 	*****************************************************************************
1862 	*/
1863 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1864 	do {
1865 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
1866 		/* check if command done with no error*/
1867         	error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
1868 		arcmsr_drain_donequeue(acb, flag_srb, error);
1869         	throttling++;
1870 		if(throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1871 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
1872 			throttling = 0;
1873         	}
1874 	} while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR);
1875 }
1876 /*
1877 **********************************************************************
1878 **
1879 **********************************************************************
1880 */
1881 static uint16_t arcmsr_get_doneq_index(struct HBD_MessageUnit0 *phbdmu)
1882 {
1883 	uint16_t doneq_index, index_stripped;
1884 
1885 	doneq_index = phbdmu->doneq_index;
1886 	if (doneq_index & 0x4000) {
1887 		index_stripped = doneq_index & 0xFF;
1888 		index_stripped += 1;
1889 		index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
1890 		phbdmu->doneq_index = index_stripped ?
1891 		    (index_stripped | 0x4000) : index_stripped;
1892 	} else {
1893 		index_stripped = doneq_index;
1894 		index_stripped += 1;
1895 		index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
1896 		phbdmu->doneq_index = index_stripped ?
1897 		    index_stripped : (index_stripped | 0x4000);
1898 	}
1899 	return (phbdmu->doneq_index);
1900 }
1901 /*
1902 **************************************************************************
1903 **************************************************************************
1904 */
1905 static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb)
1906 {
1907 	struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
1908 	u_int32_t outbound_write_pointer;
1909 	u_int32_t addressLow;
1910 	uint16_t doneq_index;
1911 	u_int16_t error;
1912 	/*
1913 	*****************************************************************************
1914 	**               areca cdb command done
1915 	*****************************************************************************
1916 	*/
1917 	if((CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause) &
1918 	    ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT) == 0)
1919 	    return;
1920 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1921 		BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1922 	outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
1923 	doneq_index = phbdmu->doneq_index;
1924 	while ((doneq_index & 0xFF) != (outbound_write_pointer & 0xFF)) {
1925 		doneq_index = arcmsr_get_doneq_index(phbdmu);
1926 		addressLow = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow;
1927 		error = (addressLow & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
1928 		arcmsr_drain_donequeue(acb, addressLow, error); /*Check if command done with no error */
1929 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index);
1930 		outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
1931 	}
1932 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_interrupt_cause, ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT_CLEAR);
1933 	CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause); /*Dummy ioread32 to force pci flush */
1934 }
1935 /*
1936 **********************************************************************
1937 **********************************************************************
1938 */
1939 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
1940 {
1941 	u_int32_t outbound_intStatus;
1942 	/*
1943 	*********************************************
1944 	**   check outbound intstatus
1945 	*********************************************
1946 	*/
1947 	outbound_intStatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
1948 	if(!outbound_intStatus) {
1949 		/*it must be share irq*/
1950 		return;
1951 	}
1952 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus); /*clear interrupt*/
1953 	/* MU doorbell interrupts*/
1954 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1955 		arcmsr_hba_doorbell_isr(acb);
1956 	}
1957 	/* MU post queue interrupts*/
1958 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1959 		arcmsr_hba_postqueue_isr(acb);
1960 	}
1961 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1962 		arcmsr_hba_message_isr(acb);
1963 	}
1964 }
1965 /*
1966 **********************************************************************
1967 **********************************************************************
1968 */
1969 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
1970 {
1971 	u_int32_t outbound_doorbell;
1972 	/*
1973 	*********************************************
1974 	**   check outbound intstatus
1975 	*********************************************
1976 	*/
1977 	outbound_doorbell = CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
1978 	if(!outbound_doorbell) {
1979 		/*it must be share irq*/
1980 		return;
1981 	}
1982 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
1983 	CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
1984 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1985 	/* MU ioctl transfer doorbell interrupts*/
1986 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1987 		arcmsr_iop2drv_data_wrote_handle(acb);
1988 	}
1989 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1990 		arcmsr_iop2drv_data_read_handle(acb);
1991 	}
1992 	/* MU post queue interrupts*/
1993 	if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1994 		arcmsr_hbb_postqueue_isr(acb);
1995 	}
1996 	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1997 		arcmsr_hbb_message_isr(acb);
1998 	}
1999 }
2000 /*
2001 **********************************************************************
2002 **********************************************************************
2003 */
2004 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
2005 {
2006 	u_int32_t host_interrupt_status;
2007 	/*
2008 	*********************************************
2009 	**   check outbound intstatus
2010 	*********************************************
2011 	*/
2012 	host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) &
2013 		(ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2014 		ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
2015 	if(!host_interrupt_status) {
2016 		/*it must be share irq*/
2017 		return;
2018 	}
2019 	do {
2020 		/* MU doorbell interrupts*/
2021 		if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
2022 			arcmsr_hbc_doorbell_isr(acb);
2023 		}
2024 		/* MU post queue interrupts*/
2025 		if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
2026 			arcmsr_hbc_postqueue_isr(acb);
2027 		}
2028 		host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
2029 	} while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2030 }
2031 /*
2032 **********************************************************************
2033 **********************************************************************
2034 */
2035 static void arcmsr_handle_hbd_isr( struct AdapterControlBlock *acb)
2036 {
2037 	u_int32_t host_interrupt_status;
2038 	u_int32_t intmask_org;
2039 	/*
2040 	*********************************************
2041 	**   check outbound intstatus
2042 	*********************************************
2043 	*/
2044 	host_interrupt_status = CHIP_REG_READ32(HBD_MessageUnit, 0, host_int_status) & acb->outbound_int_enable;
2045 	if(!(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_INT)) {
2046 		/*it must be share irq*/
2047 		return;
2048 	}
2049 	/* disable outbound interrupt */
2050 	intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable)	; /* disable outbound message0 int */
2051 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE);
2052 	/* MU doorbell interrupts*/
2053 	if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_DOORBELL_INT) {
2054 		arcmsr_hbd_doorbell_isr(acb);
2055 	}
2056 	/* MU post queue interrupts*/
2057 	if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_POSTQUEUE_INT) {
2058 		arcmsr_hbd_postqueue_isr(acb);
2059 	}
2060 	/* enable all outbound interrupt */
2061 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | ARCMSR_HBDMU_ALL_INT_ENABLE);
2062 //	CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable);
2063 }
2064 /*
2065 ******************************************************************************
2066 ******************************************************************************
2067 */
2068 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
2069 {
2070 	switch (acb->adapter_type) {
2071 	case ACB_ADAPTER_TYPE_A:
2072 		arcmsr_handle_hba_isr(acb);
2073 		break;
2074 	case ACB_ADAPTER_TYPE_B:
2075 		arcmsr_handle_hbb_isr(acb);
2076 		break;
2077 	case ACB_ADAPTER_TYPE_C:
2078 		arcmsr_handle_hbc_isr(acb);
2079 		break;
2080 	case ACB_ADAPTER_TYPE_D:
2081 		arcmsr_handle_hbd_isr(acb);
2082 		break;
2083 	default:
2084 		kprintf("arcmsr%d: interrupt service,"
2085 		" unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type);
2086 		break;
2087 	}
2088 }
2089 /*
2090 **********************************************************************
2091 **********************************************************************
2092 */
2093 static void arcmsr_intr_handler(void *arg)
2094 {
2095 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
2096 
2097 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
2098 	arcmsr_interrupt(acb);
2099 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
2100 }
2101 /*
2102 ******************************************************************************
2103 ******************************************************************************
2104 */
2105 static void	arcmsr_polling_devmap(void *arg)
2106 {
2107 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
2108 	switch (acb->adapter_type) {
2109 	case ACB_ADAPTER_TYPE_A:
2110 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2111 		break;
2112 
2113 	case ACB_ADAPTER_TYPE_B:
2114 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2115 		break;
2116 
2117 	case ACB_ADAPTER_TYPE_C:
2118 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2119 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2120 		break;
2121 
2122     	case ACB_ADAPTER_TYPE_D:
2123 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2124 	    	break;
2125 	}
2126 
2127 	if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
2128 	{
2129 		callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb);	/* polling per 5 seconds */
2130 	}
2131 }
2132 
2133 /*
2134 *******************************************************************************
2135 **
2136 *******************************************************************************
2137 */
2138 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2139 {
2140 	u_int32_t intmask_org;
2141 
2142 	if(acb != NULL) {
2143 		/* stop adapter background rebuild */
2144 		if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
2145 			intmask_org = arcmsr_disable_allintr(acb);
2146 			arcmsr_stop_adapter_bgrb(acb);
2147 			arcmsr_flush_adapter_cache(acb);
2148 			arcmsr_enable_allintr(acb, intmask_org);
2149 		}
2150 	}
2151 }
2152 /*
2153 ***********************************************************************
2154 **
2155 ************************************************************************
2156 */
2157 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
2158 {
2159 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2160 	u_int32_t retvalue = EINVAL;
2161 
2162 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) arg;
2163 	if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
2164 		return retvalue;
2165 	}
2166 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2167 	switch(ioctl_cmd) {
2168 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
2169 			u_int8_t *pQbuffer;
2170 			u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer;
2171 			u_int32_t allxfer_len=0;
2172 
2173 			while((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2174 				&& (allxfer_len < 1031)) {
2175 				/*copy READ QBUFFER to srb*/
2176 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2177 				*ptmpQbuffer = *pQbuffer;
2178 				acb->rqbuf_firstindex++;
2179 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2180 				/*if last index number set it to 0 */
2181 				ptmpQbuffer++;
2182 				allxfer_len++;
2183 			}
2184 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2185 				struct QBUFFER *prbuffer;
2186 
2187 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2188 				prbuffer = arcmsr_get_iop_rqbuffer(acb);
2189 				if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2190 					acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2191 			}
2192 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
2193 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2194 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2195 		}
2196 		break;
2197 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2198 			u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2199 			u_int8_t *pQbuffer;
2200 			u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer;
2201 
2202 			user_len = pcmdmessagefld->cmdmessage.Length;
2203 			/*check if data xfer length of this request will overflow my array qbuffer */
2204 			wqbuf_lastindex = acb->wqbuf_lastindex;
2205 			wqbuf_firstindex = acb->wqbuf_firstindex;
2206 			if(wqbuf_lastindex != wqbuf_firstindex) {
2207 				arcmsr_Write_data_2iop_wqbuffer(acb);
2208 				pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
2209 			} else {
2210 				my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) &
2211 				    (ARCMSR_MAX_QBUFFER - 1);
2212 				if(my_empty_len >= user_len) {
2213 					while(user_len > 0) {
2214 						/*copy srb data to wqbuffer*/
2215 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2216 						*pQbuffer = *ptmpuserbuffer;
2217 						acb->wqbuf_lastindex++;
2218 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2219 						/*if last index number set it to 0 */
2220 						ptmpuserbuffer++;
2221 						user_len--;
2222 					}
2223 					/*post fist Qbuffer*/
2224 					if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2225 						acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2226 						arcmsr_Write_data_2iop_wqbuffer(acb);
2227 					}
2228 					pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2229 				} else {
2230 					pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
2231 				}
2232 			}
2233 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2234 		}
2235 		break;
2236 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2237 			u_int8_t *pQbuffer = acb->rqbuffer;
2238 
2239 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2240 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2241 				arcmsr_iop_message_read(acb);
2242 				/*signature, let IOP know data has been readed */
2243 			}
2244 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2245 			acb->rqbuf_firstindex = 0;
2246 			acb->rqbuf_lastindex = 0;
2247 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2248 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2249 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2250 		}
2251 		break;
2252 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
2253 		{
2254 			u_int8_t *pQbuffer = acb->wqbuffer;
2255 
2256 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2257 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2258                 arcmsr_iop_message_read(acb);
2259 				/*signature, let IOP know data has been readed */
2260 			}
2261 			acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
2262 			acb->wqbuf_firstindex = 0;
2263 			acb->wqbuf_lastindex = 0;
2264 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2265 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2266 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2267 		}
2268 		break;
2269 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2270 			u_int8_t *pQbuffer;
2271 
2272 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2273 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2274                 arcmsr_iop_message_read(acb);
2275 				/*signature, let IOP know data has been readed */
2276 			}
2277 			acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
2278 					|ACB_F_MESSAGE_RQBUFFER_CLEARED
2279 					|ACB_F_MESSAGE_WQBUFFER_READ);
2280 			acb->rqbuf_firstindex = 0;
2281 			acb->rqbuf_lastindex = 0;
2282 			acb->wqbuf_firstindex = 0;
2283 			acb->wqbuf_lastindex = 0;
2284 			pQbuffer = acb->rqbuffer;
2285 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
2286 			pQbuffer = acb->wqbuffer;
2287 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
2288 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2289 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2290 		}
2291 		break;
2292 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2293 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2294 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2295 		}
2296 		break;
2297 	case ARCMSR_MESSAGE_SAY_HELLO: {
2298 			u_int8_t *hello_string = "Hello! I am ARCMSR";
2299 			u_int8_t *puserbuffer = (u_int8_t *)pcmdmessagefld->messagedatabuffer;
2300 
2301 			if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
2302 				pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
2303 				ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2304 				return ENOIOCTL;
2305 			}
2306 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2307 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2308 		}
2309 		break;
2310 	case ARCMSR_MESSAGE_SAY_GOODBYE: {
2311 			arcmsr_iop_parking(acb);
2312 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2313 		}
2314 		break;
2315 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
2316 			arcmsr_flush_adapter_cache(acb);
2317 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2318 		}
2319 		break;
2320 	}
2321 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2322 	return (retvalue);
2323 }
2324 /*
2325 **************************************************************************
2326 **************************************************************************
2327 */
2328 static void arcmsr_free_srb(struct CommandControlBlock *srb)
2329 {
2330 	struct AdapterControlBlock	*acb;
2331 
2332 	acb = srb->acb;
2333 	ARCMSR_LOCK_ACQUIRE(&acb->srb_lock);
2334 	srb->srb_state = ARCMSR_SRB_DONE;
2335 	srb->srb_flags = 0;
2336 	acb->srbworkingQ[acb->workingsrb_doneindex] = srb;
2337 	acb->workingsrb_doneindex++;
2338 	acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
2339 	ARCMSR_LOCK_RELEASE(&acb->srb_lock);
2340 }
2341 /*
2342 **************************************************************************
2343 **************************************************************************
2344 */
2345 struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb)
2346 {
2347 	struct CommandControlBlock *srb = NULL;
2348 	u_int32_t workingsrb_startindex, workingsrb_doneindex;
2349 
2350 	ARCMSR_LOCK_ACQUIRE(&acb->srb_lock);
2351 	workingsrb_doneindex = acb->workingsrb_doneindex;
2352 	workingsrb_startindex = acb->workingsrb_startindex;
2353 	srb = acb->srbworkingQ[workingsrb_startindex];
2354 	workingsrb_startindex++;
2355 	workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
2356 	if(workingsrb_doneindex != workingsrb_startindex) {
2357 		acb->workingsrb_startindex = workingsrb_startindex;
2358 	} else {
2359 		srb = NULL;
2360 	}
2361 	ARCMSR_LOCK_RELEASE(&acb->srb_lock);
2362 	return(srb);
2363 }
2364 /*
2365 **************************************************************************
2366 **************************************************************************
2367 */
2368 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb)
2369 {
2370 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2371 	int retvalue = 0, transfer_len = 0;
2372 	char *buffer;
2373 	u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
2374 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
2375 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8  |
2376 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
2377 					/* 4 bytes: Areca io control code */
2378 	if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2379 		buffer = pccb->csio.data_ptr;
2380 		transfer_len = pccb->csio.dxfer_len;
2381 	} else {
2382 		retvalue = ARCMSR_MESSAGE_FAIL;
2383 		goto message_out;
2384 	}
2385 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2386 		retvalue = ARCMSR_MESSAGE_FAIL;
2387 		goto message_out;
2388 	}
2389 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
2390 	switch(controlcode) {
2391 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
2392 			u_int8_t *pQbuffer;
2393 			u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer;
2394 			int32_t allxfer_len = 0;
2395 
2396 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2397 			while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2398 				&& (allxfer_len < 1031)) {
2399 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2400 				*ptmpQbuffer = *pQbuffer;
2401 				acb->rqbuf_firstindex++;
2402 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2403 				ptmpQbuffer++;
2404 				allxfer_len++;
2405 			}
2406 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2407 				struct QBUFFER  *prbuffer;
2408 
2409 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2410 				prbuffer = arcmsr_get_iop_rqbuffer(acb);
2411 				if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2412 					acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2413 			}
2414 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
2415 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2416 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2417 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2418 		}
2419 		break;
2420 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2421 			int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2422 			u_int8_t *pQbuffer;
2423 			u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer;
2424 
2425 			user_len = pcmdmessagefld->cmdmessage.Length;
2426 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2427 			wqbuf_lastindex = acb->wqbuf_lastindex;
2428 			wqbuf_firstindex = acb->wqbuf_firstindex;
2429 			if (wqbuf_lastindex != wqbuf_firstindex) {
2430 				arcmsr_Write_data_2iop_wqbuffer(acb);
2431 				/* has error report sensedata */
2432 			    if(pccb->csio.sense_len) {
2433 				((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2434 				/* Valid,ErrorCode */
2435 				((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2436 				/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2437 				((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2438 				/* AdditionalSenseLength */
2439 				((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2440 				/* AdditionalSenseCode */
2441 				}
2442 				retvalue = ARCMSR_MESSAGE_FAIL;
2443 			} else {
2444 				my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2445 						&(ARCMSR_MAX_QBUFFER - 1);
2446 				if (my_empty_len >= user_len) {
2447 					while (user_len > 0) {
2448 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2449 						*pQbuffer = *ptmpuserbuffer;
2450 						acb->wqbuf_lastindex++;
2451 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2452 						ptmpuserbuffer++;
2453 						user_len--;
2454 					}
2455 					if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2456 						acb->acb_flags &=
2457 						    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2458 						arcmsr_Write_data_2iop_wqbuffer(acb);
2459 					}
2460 				} else {
2461 					/* has error report sensedata */
2462 					if(pccb->csio.sense_len) {
2463 					((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2464 					/* Valid,ErrorCode */
2465 					((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2466 					/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2467 					((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2468 					/* AdditionalSenseLength */
2469 					((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2470 					/* AdditionalSenseCode */
2471 					}
2472 					retvalue = ARCMSR_MESSAGE_FAIL;
2473 				}
2474 			}
2475 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2476 		}
2477 		break;
2478 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2479 			u_int8_t *pQbuffer = acb->rqbuffer;
2480 
2481 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2482 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2483 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2484 				arcmsr_iop_message_read(acb);
2485 			}
2486 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2487 			acb->rqbuf_firstindex = 0;
2488 			acb->rqbuf_lastindex = 0;
2489 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2490 			pcmdmessagefld->cmdmessage.ReturnCode =
2491 			    ARCMSR_MESSAGE_RETURNCODE_OK;
2492 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2493 		}
2494 		break;
2495 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2496 			u_int8_t *pQbuffer = acb->wqbuffer;
2497 
2498 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2499 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2500 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2501 				arcmsr_iop_message_read(acb);
2502 			}
2503 			acb->acb_flags |=
2504 				(ACB_F_MESSAGE_WQBUFFER_CLEARED |
2505 					ACB_F_MESSAGE_WQBUFFER_READ);
2506 			acb->wqbuf_firstindex = 0;
2507 			acb->wqbuf_lastindex = 0;
2508 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2509 			pcmdmessagefld->cmdmessage.ReturnCode =
2510 				ARCMSR_MESSAGE_RETURNCODE_OK;
2511 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2512 		}
2513 		break;
2514 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2515 			u_int8_t *pQbuffer;
2516 
2517 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2518 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2519 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2520 				arcmsr_iop_message_read(acb);
2521 			}
2522 			acb->acb_flags |=
2523 				(ACB_F_MESSAGE_WQBUFFER_CLEARED
2524 				| ACB_F_MESSAGE_RQBUFFER_CLEARED
2525 				| ACB_F_MESSAGE_WQBUFFER_READ);
2526 			acb->rqbuf_firstindex = 0;
2527 			acb->rqbuf_lastindex = 0;
2528 			acb->wqbuf_firstindex = 0;
2529 			acb->wqbuf_lastindex = 0;
2530 			pQbuffer = acb->rqbuffer;
2531 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2532 			pQbuffer = acb->wqbuffer;
2533 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2534 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2535 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2536 		}
2537 		break;
2538 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2539 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2540 		}
2541 		break;
2542 	case ARCMSR_MESSAGE_SAY_HELLO: {
2543 			int8_t *hello_string = "Hello! I am ARCMSR";
2544 
2545 			memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2546 				, (int16_t)strlen(hello_string));
2547 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2548 		}
2549 		break;
2550 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2551 		arcmsr_iop_parking(acb);
2552 		break;
2553 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2554 		arcmsr_flush_adapter_cache(acb);
2555 		break;
2556 	default:
2557 		retvalue = ARCMSR_MESSAGE_FAIL;
2558 	}
2559 message_out:
2560 	return (retvalue);
2561 }
2562 /*
2563 *********************************************************************
2564 *********************************************************************
2565 */
2566 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2567 {
2568 	struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
2569 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)srb->acb;
2570 	union ccb *pccb;
2571 	int target, lun;
2572 
2573 	pccb = srb->pccb;
2574 	target = pccb->ccb_h.target_id;
2575 	lun = pccb->ccb_h.target_lun;
2576 	acb->pktRequestCount++;
2577 	if(error != 0) {
2578 		if(error != EFBIG) {
2579 			kprintf("arcmsr%d: unexpected error %x"
2580 				" returned from 'bus_dmamap_load' \n"
2581 				, acb->pci_unit, error);
2582 		}
2583 		if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2584 			pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2585 		}
2586 		arcmsr_srb_complete(srb, 0);
2587 		return;
2588 	}
2589 	if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2590 		pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2591 		arcmsr_srb_complete(srb, 0);
2592 		return;
2593 	}
2594 	if(acb->acb_flags & ACB_F_BUS_RESET) {
2595 		kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2596 		pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2597 		arcmsr_srb_complete(srb, 0);
2598 		return;
2599 	}
2600 	if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
2601 		u_int8_t block_cmd, cmd;
2602 
2603 		cmd = pccb->csio.cdb_io.cdb_bytes[0];
2604 		block_cmd = cmd & 0x0f;
2605 		if(block_cmd == 0x08 || block_cmd == 0x0a) {
2606 			kprintf("arcmsr%d:block 'read/write' command "
2607 				"with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n"
2608 				, acb->pci_unit, cmd, target, lun);
2609 			pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2610 			arcmsr_srb_complete(srb, 0);
2611 			return;
2612 		}
2613 	}
2614 	if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2615 		if(nseg != 0) {
2616 			bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2617 		}
2618 		arcmsr_srb_complete(srb, 0);
2619 		return;
2620 	}
2621 	if(acb->srboutstandingcount >= acb->maxOutstanding) {
2622 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) == 0)
2623 		{
2624 			xpt_freeze_simq(acb->psim, 1);
2625 			acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2626 		}
2627 		pccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2628 		pccb->ccb_h.status |= CAM_REQUEUE_REQ;
2629 		arcmsr_srb_complete(srb, 0);
2630 		return;
2631 	}
2632 	pccb->ccb_h.status |= CAM_SIM_QUEUED;
2633 	arcmsr_build_srb(srb, dm_segs, nseg);
2634 	arcmsr_post_srb(acb, srb);
2635 	if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2636 	{
2637 		arcmsr_callout_init(&srb->ccb_callout);
2638 		callout_reset(&srb->ccb_callout, ((pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)) * hz) / 1000, arcmsr_srb_timeout, srb);
2639 		srb->srb_flags |= SRB_FLAG_TIMER_START;
2640 	}
2641 }
2642 /*
2643 *****************************************************************************************
2644 *****************************************************************************************
2645 */
2646 static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb)
2647 {
2648 	struct CommandControlBlock *srb;
2649 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2650 	u_int32_t intmask_org;
2651 	int i = 0;
2652 
2653 	acb->num_aborts++;
2654 	/*
2655 	***************************************************************************
2656 	** It is the upper layer do abort command this lock just prior to calling us.
2657 	** First determine if we currently own this command.
2658 	** Start by searching the device queue. If not found
2659 	** at all, and the system wanted us to just abort the
2660 	** command return success.
2661 	***************************************************************************
2662 	*/
2663 	if(acb->srboutstandingcount != 0) {
2664 		/* disable all outbound interrupt */
2665 		intmask_org = arcmsr_disable_allintr(acb);
2666 		for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
2667 			srb = acb->psrb_pool[i];
2668 			if(srb->srb_state == ARCMSR_SRB_START) {
2669 				if(srb->pccb == abortccb) {
2670 					srb->srb_state = ARCMSR_SRB_ABORTED;
2671 					kprintf("arcmsr%d:scsi id=%d lun=%jx abort srb '%p'"
2672 						"outstanding command \n"
2673 						, acb->pci_unit, abortccb->ccb_h.target_id
2674 						, (uintmax_t)abortccb->ccb_h.target_lun, srb);
2675 					arcmsr_polling_srbdone(acb, srb);
2676 					/* enable outbound Post Queue, outbound doorbell Interrupt */
2677 					arcmsr_enable_allintr(acb, intmask_org);
2678 					return (TRUE);
2679 				}
2680 			}
2681 		}
2682 		/* enable outbound Post Queue, outbound doorbell Interrupt */
2683 		arcmsr_enable_allintr(acb, intmask_org);
2684 	}
2685 	return(FALSE);
2686 }
2687 /*
2688 ****************************************************************************
2689 ****************************************************************************
2690 */
2691 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2692 {
2693 	int retry = 0;
2694 
2695 	acb->num_resets++;
2696 	acb->acb_flags |= ACB_F_BUS_RESET;
2697 	while(acb->srboutstandingcount != 0 && retry < 400) {
2698 		arcmsr_interrupt(acb);
2699 		UDELAY(25000);
2700 		retry++;
2701 	}
2702 	arcmsr_iop_reset(acb);
2703 	acb->acb_flags &= ~ACB_F_BUS_RESET;
2704 }
2705 /*
2706 **************************************************************************
2707 **************************************************************************
2708 */
2709 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2710 		union ccb *pccb)
2711 {
2712 	if (pccb->ccb_h.target_lun) {
2713 		pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2714 		xpt_done(pccb);
2715 		return;
2716 	}
2717 	pccb->ccb_h.status |= CAM_REQ_CMP;
2718 	switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2719 	case INQUIRY: {
2720 		unsigned char inqdata[36];
2721 		char *buffer = pccb->csio.data_ptr;
2722 
2723 		inqdata[0] = T_PROCESSOR;	/* Periph Qualifier & Periph Dev Type */
2724 		inqdata[1] = 0;				/* rem media bit & Dev Type Modifier */
2725 		inqdata[2] = 0;				/* ISO, ECMA, & ANSI versions */
2726 		inqdata[3] = 0;
2727 		inqdata[4] = 31;			/* length of additional data */
2728 		inqdata[5] = 0;
2729 		inqdata[6] = 0;
2730 		inqdata[7] = 0;
2731 		strncpy(&inqdata[8], "Areca   ", 8);	/* Vendor Identification */
2732 		strncpy(&inqdata[16], "RAID controller ", 16);	/* Product Identification */
2733 		strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2734 		memcpy(buffer, inqdata, sizeof(inqdata));
2735 		xpt_done(pccb);
2736 	}
2737 	break;
2738 	case WRITE_BUFFER:
2739 	case READ_BUFFER: {
2740 		if (arcmsr_iop_message_xfer(acb, pccb)) {
2741 			pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2742 			pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2743 		}
2744 		xpt_done(pccb);
2745 	}
2746 	break;
2747 	default:
2748 		xpt_done(pccb);
2749 	}
2750 }
2751 /*
2752 *********************************************************************
2753 *********************************************************************
2754 */
2755 static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
2756 {
2757 	struct AdapterControlBlock *acb;
2758 
2759 	acb = (struct AdapterControlBlock *) cam_sim_softc(psim);
2760 	if(acb == NULL) {
2761 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2762 		xpt_done(pccb);
2763 		return;
2764 	}
2765 	switch (pccb->ccb_h.func_code) {
2766 	case XPT_SCSI_IO: {
2767 			struct CommandControlBlock *srb;
2768 			int target = pccb->ccb_h.target_id;
2769 
2770 			if(target == 16) {
2771 				/* virtual device for iop message transfer */
2772 				arcmsr_handle_virtual_command(acb, pccb);
2773 				return;
2774 			}
2775 			if((srb = arcmsr_get_freesrb(acb)) == NULL) {
2776 				pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
2777 				xpt_done(pccb);
2778 				return;
2779 			}
2780 			pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
2781 			pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
2782 			srb->pccb=pccb;
2783 			if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2784 				if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
2785 					/* Single buffer */
2786 					if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
2787 						/* Buffer is virtual */
2788 						u_int32_t error;
2789 
2790 						crit_enter();
2791 						error =	bus_dmamap_load(acb->dm_segs_dmat
2792 							, srb->dm_segs_dmamap
2793 							, pccb->csio.data_ptr
2794 							, pccb->csio.dxfer_len
2795 							, arcmsr_execute_srb, srb, /*flags*/0);
2796 						if(error == EINPROGRESS) {
2797 							xpt_freeze_simq(acb->psim, 1);
2798 							pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2799 						}
2800 						crit_exit();
2801 					}
2802 					else {		/* Buffer is physical */
2803 						struct bus_dma_segment seg;
2804 
2805 						seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
2806 						seg.ds_len = pccb->csio.dxfer_len;
2807 						arcmsr_execute_srb(srb, &seg, 1, 0);
2808 					}
2809 				} else {
2810 					/* Scatter/gather list */
2811 					struct bus_dma_segment *segs;
2812 
2813 					if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
2814 					|| (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2815 						pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
2816 						xpt_done(pccb);
2817 						kfree(srb, M_DEVBUF);
2818 						return;
2819 					}
2820 					segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
2821 					arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
2822 				}
2823 			} else {
2824 				arcmsr_execute_srb(srb, NULL, 0, 0);
2825 			}
2826 			break;
2827 		}
2828 	case XPT_TARGET_IO: {
2829 			/* target mode not yet support vendor specific commands. */
2830 			pccb->ccb_h.status |= CAM_REQ_CMP;
2831 			xpt_done(pccb);
2832 			break;
2833 		}
2834 	case XPT_PATH_INQ: {
2835 			struct ccb_pathinq *cpi = &pccb->cpi;
2836 
2837 			cpi->version_num = 1;
2838 			cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
2839 			cpi->target_sprt = 0;
2840 			cpi->hba_misc = 0;
2841 			cpi->hba_eng_cnt = 0;
2842 			cpi->max_target = ARCMSR_MAX_TARGETID;        /* 0-16 */
2843 			cpi->max_lun = ARCMSR_MAX_TARGETLUN;	    /* 0-7 */
2844 			cpi->initiator_id = ARCMSR_SCSI_INITIATOR_ID; /* 255 */
2845 			cpi->bus_id = cam_sim_bus(psim);
2846 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2847 			strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
2848 			strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
2849 			cpi->unit_number = cam_sim_unit(psim);
2850 			if(acb->adapter_bus_speed == ACB_BUS_SPEED_12G)
2851 				cpi->base_transfer_speed = 1200000;
2852 			else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
2853 				cpi->base_transfer_speed = 600000;
2854 			else
2855 				cpi->base_transfer_speed = 300000;
2856 			if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2857 			   (acb->vendor_device_id == PCIDevVenIDARC1680) ||
2858 			   (acb->vendor_device_id == PCIDevVenIDARC1214))
2859 			{
2860 				cpi->transport = XPORT_SAS;
2861 				cpi->transport_version = 0;
2862 				cpi->protocol_version = SCSI_REV_SPC2;
2863 			}
2864 			else
2865 			{
2866 				cpi->transport = XPORT_SPI;
2867 				cpi->transport_version = 2;
2868 				cpi->protocol_version = SCSI_REV_2;
2869 			}
2870 			cpi->protocol = PROTO_SCSI;
2871 			cpi->ccb_h.status |= CAM_REQ_CMP;
2872 			xpt_done(pccb);
2873 			break;
2874 		}
2875 	case XPT_ABORT: {
2876 			union ccb *pabort_ccb;
2877 
2878 			pabort_ccb = pccb->cab.abort_ccb;
2879 			switch (pabort_ccb->ccb_h.func_code) {
2880 			case XPT_ACCEPT_TARGET_IO:
2881 			case XPT_IMMED_NOTIFY:
2882 			case XPT_CONT_TARGET_IO:
2883 				if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
2884 					pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
2885 					xpt_done(pabort_ccb);
2886 					pccb->ccb_h.status |= CAM_REQ_CMP;
2887 				} else {
2888 					xpt_print_path(pabort_ccb->ccb_h.path);
2889 					kprintf("Not found\n");
2890 					pccb->ccb_h.status |= CAM_PATH_INVALID;
2891 				}
2892 				break;
2893 			case XPT_SCSI_IO:
2894 				pccb->ccb_h.status |= CAM_UA_ABORT;
2895 				break;
2896 			default:
2897 				pccb->ccb_h.status |= CAM_REQ_INVALID;
2898 				break;
2899 			}
2900 			xpt_done(pccb);
2901 			break;
2902 		}
2903 	case XPT_RESET_BUS:
2904 	case XPT_RESET_DEV: {
2905 			u_int32_t     i;
2906 
2907 			arcmsr_bus_reset(acb);
2908 			for (i=0; i < 500; i++) {
2909 				DELAY(1000);
2910 			}
2911 			pccb->ccb_h.status |= CAM_REQ_CMP;
2912 			xpt_done(pccb);
2913 			break;
2914 		}
2915 	case XPT_TERM_IO: {
2916 			pccb->ccb_h.status |= CAM_REQ_INVALID;
2917 			xpt_done(pccb);
2918 			break;
2919 		}
2920 	case XPT_GET_TRAN_SETTINGS: {
2921 			struct ccb_trans_settings *cts;
2922 
2923 			if(pccb->ccb_h.target_id == 16) {
2924 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2925 				xpt_done(pccb);
2926 				break;
2927 			}
2928 			cts = &pccb->cts;
2929 			{
2930 				struct ccb_trans_settings_scsi *scsi;
2931 				struct ccb_trans_settings_spi *spi;
2932 				struct ccb_trans_settings_sas *sas;
2933 
2934 				scsi = &cts->proto_specific.scsi;
2935 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2936 				scsi->valid = CTS_SCSI_VALID_TQ;
2937 				cts->protocol = PROTO_SCSI;
2938 
2939 				if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2940 				   (acb->vendor_device_id == PCIDevVenIDARC1680) ||
2941 				   (acb->vendor_device_id == PCIDevVenIDARC1214))
2942 				{
2943 					cts->protocol_version = SCSI_REV_SPC2;
2944 					cts->transport_version = 0;
2945 					cts->transport = XPORT_SAS;
2946 					sas = &cts->xport_specific.sas;
2947 					sas->valid = CTS_SAS_VALID_SPEED;
2948 					if (acb->sub_device_id == ARECA_SUB_DEV_ID_1883)
2949 						sas->bitrate = 1200000;
2950 					else if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2951 					   (acb->vendor_device_id == PCIDevVenIDARC1214))
2952 						sas->bitrate = 600000;
2953 					else if(acb->vendor_device_id == PCIDevVenIDARC1680)
2954 						sas->bitrate = 300000;
2955 				}
2956 				else
2957 				{
2958 					cts->protocol_version = SCSI_REV_2;
2959 					cts->transport_version = 2;
2960 					cts->transport = XPORT_SPI;
2961 					spi = &cts->xport_specific.spi;
2962 					spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2963 					spi->sync_period = 2;
2964 					spi->sync_offset = 32;
2965 					spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2966 					spi->valid = CTS_SPI_VALID_DISC
2967 						| CTS_SPI_VALID_SYNC_RATE
2968 						| CTS_SPI_VALID_SYNC_OFFSET
2969 						| CTS_SPI_VALID_BUS_WIDTH;
2970 				}
2971 			}
2972 			pccb->ccb_h.status |= CAM_REQ_CMP;
2973 			xpt_done(pccb);
2974 			break;
2975 		}
2976 	case XPT_SET_TRAN_SETTINGS: {
2977 			pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2978 			xpt_done(pccb);
2979 			break;
2980 		}
2981 	case XPT_CALC_GEOMETRY:
2982 			if(pccb->ccb_h.target_id == 16) {
2983 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2984 				xpt_done(pccb);
2985 				break;
2986 			}
2987 			cam_calc_geometry(&pccb->ccg, 1);
2988 			xpt_done(pccb);
2989 			break;
2990 	default:
2991 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2992 		xpt_done(pccb);
2993 		break;
2994 	}
2995 }
2996 /*
2997 **********************************************************************
2998 **********************************************************************
2999 */
3000 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
3001 {
3002 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3003 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3004 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
3005 		kprintf("arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
3006 	}
3007 }
3008 /*
3009 **********************************************************************
3010 **********************************************************************
3011 */
3012 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
3013 {
3014 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3015 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,  ARCMSR_MESSAGE_START_BGRB);
3016 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3017 		kprintf( "arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
3018 	}
3019 }
3020 /*
3021 **********************************************************************
3022 **********************************************************************
3023 */
3024 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
3025 {
3026 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3027 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3028 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3029 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3030 		kprintf("arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
3031 	}
3032 }
3033 /*
3034 **********************************************************************
3035 **********************************************************************
3036 */
3037 static void arcmsr_start_hbd_bgrb(struct AdapterControlBlock *acb)
3038 {
3039 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3040 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3041 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
3042 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
3043 	}
3044 }
3045 /*
3046 **********************************************************************
3047 **********************************************************************
3048 */
3049 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
3050 {
3051 	switch (acb->adapter_type) {
3052 	case ACB_ADAPTER_TYPE_A:
3053 		arcmsr_start_hba_bgrb(acb);
3054 		break;
3055 	case ACB_ADAPTER_TYPE_B:
3056 		arcmsr_start_hbb_bgrb(acb);
3057 		break;
3058 	case ACB_ADAPTER_TYPE_C:
3059 		arcmsr_start_hbc_bgrb(acb);
3060 		break;
3061 	case ACB_ADAPTER_TYPE_D:
3062 		arcmsr_start_hbd_bgrb(acb);
3063 		break;
3064 	}
3065 }
3066 /*
3067 **********************************************************************
3068 **
3069 **********************************************************************
3070 */
3071 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3072 {
3073 	struct CommandControlBlock *srb;
3074 	u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
3075 	u_int16_t	error;
3076 
3077 polling_ccb_retry:
3078 	poll_count++;
3079 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
3080 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);	/*clear interrupt*/
3081 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3082 	while(1) {
3083 		if((flag_srb = CHIP_REG_READ32(HBA_MessageUnit,
3084 			0, outbound_queueport)) == 0xFFFFFFFF) {
3085 			if(poll_srb_done) {
3086 				break;/*chip FIFO no ccb for completion already*/
3087 			} else {
3088 				UDELAY(25000);
3089 				if ((poll_count > 100) && (poll_srb != NULL)) {
3090 					break;
3091 				}
3092 				goto polling_ccb_retry;
3093 			}
3094 		}
3095 		/* check if command done with no error*/
3096 		srb = (struct CommandControlBlock *)
3097 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
3098         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
3099 		poll_srb_done = (srb == poll_srb) ? 1:0;
3100 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3101 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3102 				kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'"
3103 					"poll command abort successfully \n"
3104 					, acb->pci_unit
3105 					, srb->pccb->ccb_h.target_id
3106 					, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3107 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3108 				arcmsr_srb_complete(srb, 1);
3109 				continue;
3110 			}
3111 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
3112 				"srboutstandingcount=%d \n"
3113 				, acb->pci_unit
3114 				, srb, acb->srboutstandingcount);
3115 			continue;
3116 		}
3117 		arcmsr_report_srb_state(acb, srb, error);
3118 	}	/*drain reply FIFO*/
3119 }
3120 /*
3121 **********************************************************************
3122 **
3123 **********************************************************************
3124 */
3125 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3126 {
3127 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
3128 	struct CommandControlBlock *srb;
3129 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
3130 	int index;
3131 	u_int16_t	error;
3132 
3133 polling_ccb_retry:
3134 	poll_count++;
3135 	CHIP_REG_WRITE32(HBB_DOORBELL,
3136 	0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
3137 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3138 	while(1) {
3139 		index = phbbmu->doneq_index;
3140 		if((flag_srb = phbbmu->done_qbuffer[index]) == 0) {
3141 			if(poll_srb_done) {
3142 				break;/*chip FIFO no ccb for completion already*/
3143 			} else {
3144 				UDELAY(25000);
3145 			    if ((poll_count > 100) && (poll_srb != NULL)) {
3146 					break;
3147 				}
3148 				goto polling_ccb_retry;
3149 			}
3150 		}
3151 		phbbmu->done_qbuffer[index] = 0;
3152 		index++;
3153 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
3154 		phbbmu->doneq_index = index;
3155 		/* check if command done with no error*/
3156 		srb = (struct CommandControlBlock *)
3157 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
3158         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
3159 		poll_srb_done = (srb == poll_srb) ? 1:0;
3160 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3161 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3162 				kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'"
3163 					"poll command abort successfully \n"
3164 					, acb->pci_unit
3165 					, srb->pccb->ccb_h.target_id
3166 					, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3167 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3168 				arcmsr_srb_complete(srb, 1);
3169 				continue;
3170 			}
3171 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
3172 				"srboutstandingcount=%d \n"
3173 				, acb->pci_unit
3174 				, srb, acb->srboutstandingcount);
3175 			continue;
3176 		}
3177 		arcmsr_report_srb_state(acb, srb, error);
3178 	}	/*drain reply FIFO*/
3179 }
3180 /*
3181 **********************************************************************
3182 **
3183 **********************************************************************
3184 */
3185 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3186 {
3187 	struct CommandControlBlock *srb;
3188 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
3189 	u_int16_t	error;
3190 
3191 polling_ccb_retry:
3192 	poll_count++;
3193 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3194 	while(1) {
3195 		if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
3196 			if(poll_srb_done) {
3197 				break;/*chip FIFO no ccb for completion already*/
3198 			} else {
3199 				UDELAY(25000);
3200 			    if ((poll_count > 100) && (poll_srb != NULL)) {
3201 					break;
3202 				}
3203 			    if (acb->srboutstandingcount == 0) {
3204 				    break;
3205 			    }
3206 				goto polling_ccb_retry;
3207 			}
3208 		}
3209 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
3210 		/* check if command done with no error*/
3211 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
3212         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
3213 		if (poll_srb != NULL)
3214 			poll_srb_done = (srb == poll_srb) ? 1:0;
3215 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3216 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3217 				kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n"
3218 						, acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3219 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3220 				arcmsr_srb_complete(srb, 1);
3221 				continue;
3222 			}
3223 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
3224 					, acb->pci_unit, srb, acb->srboutstandingcount);
3225 			continue;
3226 		}
3227 		arcmsr_report_srb_state(acb, srb, error);
3228 	}	/*drain reply FIFO*/
3229 }
3230 /*
3231 **********************************************************************
3232 **
3233 **********************************************************************
3234 */
3235 static void arcmsr_polling_hbd_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3236 {
3237 	struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
3238 	struct CommandControlBlock *srb;
3239 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
3240 	u_int32_t outbound_write_pointer;
3241 	u_int16_t	error, doneq_index;
3242 
3243 polling_ccb_retry:
3244 	poll_count++;
3245 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3246 	while(1) {
3247 		outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
3248 		doneq_index = phbdmu->doneq_index;
3249 		if ((outbound_write_pointer & 0xFF) == (doneq_index & 0xFF)) {
3250 			if(poll_srb_done) {
3251 				break;/*chip FIFO no ccb for completion already*/
3252 			} else {
3253 				UDELAY(25000);
3254 			    if ((poll_count > 100) && (poll_srb != NULL)) {
3255 					break;
3256 				}
3257 			    if (acb->srboutstandingcount == 0) {
3258 				    break;
3259 			    }
3260 				goto polling_ccb_retry;
3261 			}
3262 		}
3263 		doneq_index = arcmsr_get_doneq_index(phbdmu);
3264 		flag_srb = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow;
3265 		/* check if command done with no error*/
3266 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
3267         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
3268 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index);
3269 		if (poll_srb != NULL)
3270 			poll_srb_done = (srb == poll_srb) ? 1:0;
3271 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3272 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3273 				kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n"
3274 						, acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3275 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3276 				arcmsr_srb_complete(srb, 1);
3277 				continue;
3278 			}
3279 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
3280 					, acb->pci_unit, srb, acb->srboutstandingcount);
3281 			continue;
3282 		}
3283 		arcmsr_report_srb_state(acb, srb, error);
3284 	}	/*drain reply FIFO*/
3285 }
3286 /*
3287 **********************************************************************
3288 **********************************************************************
3289 */
3290 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3291 {
3292 	switch (acb->adapter_type) {
3293 	case ACB_ADAPTER_TYPE_A: {
3294 			arcmsr_polling_hba_srbdone(acb, poll_srb);
3295 		}
3296 		break;
3297 	case ACB_ADAPTER_TYPE_B: {
3298 			arcmsr_polling_hbb_srbdone(acb, poll_srb);
3299 		}
3300 		break;
3301 	case ACB_ADAPTER_TYPE_C: {
3302 			arcmsr_polling_hbc_srbdone(acb, poll_srb);
3303 		}
3304 		break;
3305 	case ACB_ADAPTER_TYPE_D: {
3306 			arcmsr_polling_hbd_srbdone(acb, poll_srb);
3307 		}
3308 		break;
3309 	}
3310 }
3311 /*
3312 **********************************************************************
3313 **********************************************************************
3314 */
3315 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
3316 {
3317 	char *acb_firm_model = acb->firm_model;
3318 	char *acb_firm_version = acb->firm_version;
3319 	char *acb_device_map = acb->device_map;
3320 	size_t iop_firm_model = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
3321 	size_t iop_firm_version = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
3322 	size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3323 	int i;
3324 
3325 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3326 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
3327 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3328 	}
3329 	i = 0;
3330 	while(i < 8) {
3331 		*acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3332 		/* 8 bytes firm_model, 15, 60-67*/
3333 		acb_firm_model++;
3334 		i++;
3335 	}
3336 	i=0;
3337 	while(i < 16) {
3338 		*acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3339 		/* 16 bytes firm_version, 17, 68-83*/
3340 		acb_firm_version++;
3341 		i++;
3342 	}
3343 	i=0;
3344 	while(i < 16) {
3345 		*acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3346 		acb_device_map++;
3347 		i++;
3348 	}
3349 	kprintf("Areca RAID adapter%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3350 	kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3351 	acb->firm_request_len = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
3352 	acb->firm_numbers_queue = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
3353 	acb->firm_sdram_size = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
3354 	acb->firm_ide_channels = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
3355 	acb->firm_cfg_version = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
3356 	if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
3357 		acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1;
3358 	else
3359 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
3360 }
3361 /*
3362 **********************************************************************
3363 **********************************************************************
3364 */
3365 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
3366 {
3367 	char *acb_firm_model = acb->firm_model;
3368 	char *acb_firm_version = acb->firm_version;
3369 	char *acb_device_map = acb->device_map;
3370 	size_t iop_firm_model = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
3371 	size_t iop_firm_version = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
3372 	size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3373 	int i;
3374 
3375 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
3376 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3377 		kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3378 	}
3379 	i = 0;
3380 	while(i < 8) {
3381 		*acb_firm_model = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
3382 		/* 8 bytes firm_model, 15, 60-67*/
3383 		acb_firm_model++;
3384 		i++;
3385 	}
3386 	i = 0;
3387 	while(i < 16) {
3388 		*acb_firm_version = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
3389 		/* 16 bytes firm_version, 17, 68-83*/
3390 		acb_firm_version++;
3391 		i++;
3392 	}
3393 	i = 0;
3394 	while(i < 16) {
3395 		*acb_device_map = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
3396 		acb_device_map++;
3397 		i++;
3398 	}
3399 	kprintf("Areca RAID adapter%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3400 	kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3401 	acb->firm_request_len = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
3402 	acb->firm_numbers_queue = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
3403 	acb->firm_sdram_size = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
3404 	acb->firm_ide_channels = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
3405 	acb->firm_cfg_version = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
3406 	if(acb->firm_numbers_queue > ARCMSR_MAX_HBB_POSTQUEUE)
3407 		acb->maxOutstanding = ARCMSR_MAX_HBB_POSTQUEUE - 1;
3408 	else
3409 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
3410 }
3411 /*
3412 **********************************************************************
3413 **********************************************************************
3414 */
3415 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
3416 {
3417 	char *acb_firm_model = acb->firm_model;
3418 	char *acb_firm_version = acb->firm_version;
3419 	char *acb_device_map = acb->device_map;
3420 	size_t iop_firm_model = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
3421 	size_t iop_firm_version = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3422 	size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3423 	int i;
3424 
3425 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3426 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3427 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3428 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3429 	}
3430 	i = 0;
3431 	while(i < 8) {
3432 		*acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3433 		/* 8 bytes firm_model, 15, 60-67*/
3434 		acb_firm_model++;
3435 		i++;
3436 	}
3437 	i = 0;
3438 	while(i < 16) {
3439 		*acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3440 		/* 16 bytes firm_version, 17, 68-83*/
3441 		acb_firm_version++;
3442 		i++;
3443 	}
3444 	i = 0;
3445 	while(i < 16) {
3446 		*acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3447 		acb_device_map++;
3448 		i++;
3449 	}
3450 	kprintf("Areca RAID adapter%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3451 	kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3452 	acb->firm_request_len	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]);	/*firm_request_len,   1, 04-07*/
3453 	acb->firm_numbers_queue	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_numbers_queue, 2, 08-11*/
3454 	acb->firm_sdram_size	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_sdram_size,    3, 12-15*/
3455 	acb->firm_ide_channels	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_ide_channels,  4, 16-19*/
3456 	acb->firm_cfg_version	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
3457 	if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
3458 		acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1;
3459 	else
3460 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
3461 }
3462 /*
3463 **********************************************************************
3464 **********************************************************************
3465 */
3466 static void arcmsr_get_hbd_config(struct AdapterControlBlock *acb)
3467 {
3468 	char *acb_firm_model = acb->firm_model;
3469 	char *acb_firm_version = acb->firm_version;
3470 	char *acb_device_map = acb->device_map;
3471 	size_t iop_firm_model = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
3472 	size_t iop_firm_version = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3473 	size_t iop_device_map = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3474 	int i;
3475 
3476 	if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE)
3477 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);
3478 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3479 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
3480 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3481 	}
3482 	i = 0;
3483 	while(i < 8) {
3484 		*acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3485 		/* 8 bytes firm_model, 15, 60-67*/
3486 		acb_firm_model++;
3487 		i++;
3488 	}
3489 	i = 0;
3490 	while(i < 16) {
3491 		*acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3492 		/* 16 bytes firm_version, 17, 68-83*/
3493 		acb_firm_version++;
3494 		i++;
3495 	}
3496 	i = 0;
3497 	while(i < 16) {
3498 		*acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3499 		acb_device_map++;
3500 		i++;
3501 	}
3502 	kprintf("Areca RAID adapter%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3503 	kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3504 	acb->firm_request_len	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_request_len,   1, 04-07*/
3505 	acb->firm_numbers_queue	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_numbers_queue, 2, 08-11*/
3506 	acb->firm_sdram_size	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_sdram_size,    3, 12-15*/
3507 	acb->firm_ide_channels	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[5]);	/*firm_ide_channels,  4, 16-19*/
3508 	acb->firm_cfg_version	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
3509 	if(acb->firm_numbers_queue > ARCMSR_MAX_HBD_POSTQUEUE)
3510 		acb->maxOutstanding = ARCMSR_MAX_HBD_POSTQUEUE - 1;
3511 	else
3512 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
3513 }
3514 /*
3515 **********************************************************************
3516 **********************************************************************
3517 */
3518 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3519 {
3520 	switch (acb->adapter_type) {
3521 	case ACB_ADAPTER_TYPE_A: {
3522 			arcmsr_get_hba_config(acb);
3523 		}
3524 		break;
3525 	case ACB_ADAPTER_TYPE_B: {
3526 			arcmsr_get_hbb_config(acb);
3527 		}
3528 		break;
3529 	case ACB_ADAPTER_TYPE_C: {
3530 			arcmsr_get_hbc_config(acb);
3531 		}
3532 		break;
3533 	case ACB_ADAPTER_TYPE_D: {
3534 			arcmsr_get_hbd_config(acb);
3535 		}
3536 		break;
3537 	}
3538 }
3539 /*
3540 **********************************************************************
3541 **********************************************************************
3542 */
3543 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
3544 {
3545 	int	timeout=0;
3546 
3547 	switch (acb->adapter_type) {
3548 	case ACB_ADAPTER_TYPE_A: {
3549 			while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3550 			{
3551 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3552 				{
3553 					kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3554 					return;
3555 				}
3556 				UDELAY(15000); /* wait 15 milli-seconds */
3557 			}
3558 		}
3559 		break;
3560 	case ACB_ADAPTER_TYPE_B: {
3561 			while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3562 			{
3563 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3564 				{
3565 					kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3566 					return;
3567 				}
3568 				UDELAY(15000); /* wait 15 milli-seconds */
3569 			}
3570 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3571 		}
3572 		break;
3573 	case ACB_ADAPTER_TYPE_C: {
3574 			while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3575 			{
3576 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3577 				{
3578 					kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3579 					return;
3580 				}
3581 				UDELAY(15000); /* wait 15 milli-seconds */
3582 			}
3583 		}
3584 		break;
3585 	case ACB_ADAPTER_TYPE_D: {
3586 			while ((CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK) == 0)
3587 			{
3588 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3589 				{
3590 					kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3591 					return;
3592 				}
3593 				UDELAY(15000); /* wait 15 milli-seconds */
3594 			}
3595 		}
3596 		break;
3597 	}
3598 }
3599 /*
3600 **********************************************************************
3601 **********************************************************************
3602 */
3603 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3604 {
3605 	u_int32_t outbound_doorbell;
3606 
3607 	switch (acb->adapter_type) {
3608 	case ACB_ADAPTER_TYPE_A: {
3609 			/* empty doorbell Qbuffer if door bell ringed */
3610 			outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3611 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
3612 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3613 
3614 		}
3615 		break;
3616 	case ACB_ADAPTER_TYPE_B: {
3617 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
3618 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
3619 			/* let IOP know data has been read */
3620 		}
3621 		break;
3622 	case ACB_ADAPTER_TYPE_C: {
3623 			/* empty doorbell Qbuffer if door bell ringed */
3624 			outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
3625 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell);	/*clear doorbell interrupt */
3626 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
3627 			CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell_clear); /* Dummy read to force pci flush */
3628 			CHIP_REG_READ32(HBC_MessageUnit, 0, inbound_doorbell); /* Dummy read to force pci flush */
3629 		}
3630 		break;
3631 	case ACB_ADAPTER_TYPE_D: {
3632 			/* empty doorbell Qbuffer if door bell ringed */
3633 			outbound_doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell);
3634 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
3635 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ);
3636 
3637 		}
3638 		break;
3639 	}
3640 }
3641 /*
3642 ************************************************************************
3643 ************************************************************************
3644 */
3645 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3646 {
3647 	unsigned long srb_phyaddr;
3648 	u_int32_t srb_phyaddr_hi32;
3649 	u_int32_t srb_phyaddr_lo32;
3650 
3651 	/*
3652 	********************************************************************
3653 	** here we need to tell iop 331 our freesrb.HighPart
3654 	** if freesrb.HighPart is not zero
3655 	********************************************************************
3656 	*/
3657 	srb_phyaddr = (unsigned long) acb->srb_phyaddr.phyaddr;
3658 	srb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
3659 	srb_phyaddr_lo32 = acb->srb_phyaddr.B.phyadd_low;
3660 	switch (acb->adapter_type) {
3661 	case ACB_ADAPTER_TYPE_A: {
3662 			if(srb_phyaddr_hi32 != 0) {
3663 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3664 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3665 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3666 				if(!arcmsr_hba_wait_msgint_ready(acb)) {
3667 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3668 					return FALSE;
3669 				}
3670 			}
3671 		}
3672 		break;
3673 		/*
3674 		***********************************************************************
3675 		**    if adapter type B, set window of "post command Q"
3676 		***********************************************************************
3677 		*/
3678 	case ACB_ADAPTER_TYPE_B: {
3679 			u_int32_t post_queue_phyaddr;
3680 			struct HBB_MessageUnit *phbbmu;
3681 
3682 			phbbmu = (struct HBB_MessageUnit *)acb->pmu;
3683 			phbbmu->postq_index = 0;
3684 			phbbmu->doneq_index = 0;
3685 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
3686 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3687 				kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
3688 				return FALSE;
3689 			}
3690 			post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE
3691 			+ offsetof(struct HBB_MessageUnit, post_qbuffer);
3692 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3693 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
3694 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
3695 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
3696 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
3697 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
3698 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3699 				kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
3700 				return FALSE;
3701 			}
3702 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
3703 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3704 				kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
3705 				return FALSE;
3706 			}
3707 		}
3708 		break;
3709 	case ACB_ADAPTER_TYPE_C: {
3710 			if(srb_phyaddr_hi32 != 0) {
3711 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3712 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3713 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3714 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3715 				if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3716 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3717 					return FALSE;
3718 				}
3719 			}
3720 		}
3721 		break;
3722 	case ACB_ADAPTER_TYPE_D: {
3723 			u_int32_t post_queue_phyaddr, done_queue_phyaddr;
3724 			struct HBD_MessageUnit0 *phbdmu;
3725 
3726 			phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
3727 			phbdmu->postq_index = 0;
3728 			phbdmu->doneq_index = 0x40FF;
3729 			post_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE
3730 								+ offsetof(struct HBD_MessageUnit0, post_qbuffer);
3731 			done_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE
3732 								+ offsetof(struct HBD_MessageUnit0, done_qbuffer);
3733 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3734 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3735 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ base */
3736 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[3], done_queue_phyaddr); /* doneQ base */
3737 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[4], 0x100);
3738 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3739 			if(!arcmsr_hbd_wait_msgint_ready(acb)) {
3740 				kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3741 				return FALSE;
3742 			}
3743 		}
3744 		break;
3745 	}
3746 	return (TRUE);
3747 }
3748 /*
3749 ************************************************************************
3750 ************************************************************************
3751 */
3752 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3753 {
3754 	switch (acb->adapter_type)
3755 	{
3756 	case ACB_ADAPTER_TYPE_A:
3757 	case ACB_ADAPTER_TYPE_C:
3758 	case ACB_ADAPTER_TYPE_D:
3759 		break;
3760 	case ACB_ADAPTER_TYPE_B: {
3761 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
3762 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3763 				kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
3764 				return;
3765 			}
3766 		}
3767 		break;
3768 	}
3769 }
3770 /*
3771 **********************************************************************
3772 **********************************************************************
3773 */
3774 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3775 {
3776 	u_int32_t intmask_org;
3777 
3778 	/* disable all outbound interrupt */
3779 	intmask_org = arcmsr_disable_allintr(acb);
3780 	arcmsr_wait_firmware_ready(acb);
3781 	arcmsr_iop_confirm(acb);
3782 	arcmsr_get_firmware_spec(acb);
3783 	/*start background rebuild*/
3784 	arcmsr_start_adapter_bgrb(acb);
3785 	/* empty doorbell Qbuffer if door bell ringed */
3786 	arcmsr_clear_doorbell_queue_buffer(acb);
3787 	arcmsr_enable_eoi_mode(acb);
3788 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3789 	arcmsr_enable_allintr(acb, intmask_org);
3790 	acb->acb_flags |= ACB_F_IOP_INITED;
3791 }
3792 /*
3793 **********************************************************************
3794 **********************************************************************
3795 */
3796 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3797 {
3798 	struct AdapterControlBlock *acb = arg;
3799 	struct CommandControlBlock *srb_tmp;
3800 	u_int32_t i;
3801 	unsigned long srb_phyaddr = (unsigned long)segs->ds_addr;
3802 
3803 	acb->srb_phyaddr.phyaddr = srb_phyaddr;
3804 	srb_tmp = (struct CommandControlBlock *)acb->uncacheptr;
3805 	for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
3806 		if(bus_dmamap_create(acb->dm_segs_dmat,
3807 			 /*flags*/0, &srb_tmp->dm_segs_dmamap) != 0) {
3808 			acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
3809 			kprintf("arcmsr%d:"
3810 			" srb dmamap bus_dmamap_create error\n", acb->pci_unit);
3811 			return;
3812 		}
3813 		if((acb->adapter_type == ACB_ADAPTER_TYPE_C) || (acb->adapter_type == ACB_ADAPTER_TYPE_D))
3814 		{
3815 			srb_tmp->cdb_phyaddr_low = srb_phyaddr;
3816 			srb_tmp->cdb_phyaddr_high = (u_int32_t)((srb_phyaddr >> 16) >> 16);
3817 		}
3818 		else
3819 			srb_tmp->cdb_phyaddr_low = srb_phyaddr >> 5;
3820 		srb_tmp->acb = acb;
3821 		acb->srbworkingQ[i] = acb->psrb_pool[i] = srb_tmp;
3822 		srb_phyaddr = srb_phyaddr + SRB_SIZE;
3823 		srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp + SRB_SIZE);
3824 	}
3825 	acb->vir2phy_offset = (unsigned long)srb_tmp - (unsigned long)srb_phyaddr;
3826 }
3827 /*
3828 ************************************************************************
3829 ************************************************************************
3830 */
3831 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
3832 {
3833 	/* remove the control device */
3834 	if(acb->ioctl_dev != NULL) {
3835 		destroy_dev(acb->ioctl_dev);
3836 	}
3837 	bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
3838 	bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
3839 	bus_dma_tag_destroy(acb->srb_dmat);
3840 	bus_dma_tag_destroy(acb->dm_segs_dmat);
3841 	bus_dma_tag_destroy(acb->parent_dmat);
3842 }
3843 /*
3844 ************************************************************************
3845 ************************************************************************
3846 */
3847 static void arcmsr_mutex_init(struct AdapterControlBlock *acb)
3848 {
3849 	ARCMSR_LOCK_INIT(&acb->isr_lock, "arcmsr isr lock");
3850 	ARCMSR_LOCK_INIT(&acb->srb_lock, "arcmsr srb lock");
3851 	ARCMSR_LOCK_INIT(&acb->postDone_lock, "arcmsr postQ lock");
3852 	ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr RW buffer lock");
3853 }
3854 /*
3855 ************************************************************************
3856 ************************************************************************
3857 */
3858 static void arcmsr_mutex_destroy(struct AdapterControlBlock *acb)
3859 {
3860 	ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3861 	ARCMSR_LOCK_DESTROY(&acb->postDone_lock);
3862 	ARCMSR_LOCK_DESTROY(&acb->srb_lock);
3863 	ARCMSR_LOCK_DESTROY(&acb->isr_lock);
3864 }
3865 /*
3866 ************************************************************************
3867 ************************************************************************
3868 */
3869 static u_int32_t arcmsr_initialize(device_t dev)
3870 {
3871 	struct AdapterControlBlock *acb = device_get_softc(dev);
3872 	u_int16_t pci_command;
3873 	int i, j,max_coherent_size;
3874 	u_int32_t vendor_dev_id;
3875 
3876 	vendor_dev_id = pci_get_devid(dev);
3877 	acb->vendor_device_id = vendor_dev_id;
3878 	acb->sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3879 	switch (vendor_dev_id) {
3880 	case PCIDevVenIDARC1880:
3881 	case PCIDevVenIDARC1882:
3882 	case PCIDevVenIDARC1213:
3883 	case PCIDevVenIDARC1223: {
3884 			acb->adapter_type = ACB_ADAPTER_TYPE_C;
3885 			if (acb->sub_device_id == ARECA_SUB_DEV_ID_1883)
3886 				acb->adapter_bus_speed = ACB_BUS_SPEED_12G;
3887 			else
3888 				acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
3889 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE;
3890 		}
3891 		break;
3892 	case PCIDevVenIDARC1214: {
3893 			acb->adapter_type = ACB_ADAPTER_TYPE_D;
3894 			acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
3895 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBD_MessageUnit0));
3896 		}
3897 		break;
3898 	case PCIDevVenIDARC1200:
3899 	case PCIDevVenIDARC1201: {
3900 			acb->adapter_type = ACB_ADAPTER_TYPE_B;
3901 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3902 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit));
3903 		}
3904 		break;
3905 	case PCIDevVenIDARC1110:
3906 	case PCIDevVenIDARC1120:
3907 	case PCIDevVenIDARC1130:
3908 	case PCIDevVenIDARC1160:
3909 	case PCIDevVenIDARC1170:
3910 	case PCIDevVenIDARC1210:
3911 	case PCIDevVenIDARC1220:
3912 	case PCIDevVenIDARC1230:
3913 	case PCIDevVenIDARC1231:
3914 	case PCIDevVenIDARC1260:
3915 	case PCIDevVenIDARC1261:
3916 	case PCIDevVenIDARC1270:
3917 	case PCIDevVenIDARC1280:
3918 	case PCIDevVenIDARC1212:
3919 	case PCIDevVenIDARC1222:
3920 	case PCIDevVenIDARC1380:
3921 	case PCIDevVenIDARC1381:
3922 	case PCIDevVenIDARC1680:
3923 	case PCIDevVenIDARC1681: {
3924 			acb->adapter_type = ACB_ADAPTER_TYPE_A;
3925 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3926 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE;
3927 		}
3928 		break;
3929 	default: {
3930 			kprintf("arcmsr%d:"
3931 			" unknown RAID adapter type \n", device_get_unit(dev));
3932 			return ENOMEM;
3933 		}
3934 	}
3935 	if(bus_dma_tag_create(  /*PCI parent*/		bus_get_dma_tag(dev),
3936 				/*alignemnt*/	1,
3937 				/*boundary*/	0,
3938 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3939 				/*highaddr*/	BUS_SPACE_MAXADDR,
3940 				/*filter*/	NULL,
3941 				/*filterarg*/	NULL,
3942 				/*maxsize*/	BUS_SPACE_MAXSIZE_32BIT,
3943 				/*nsegments*/	BUS_SPACE_UNRESTRICTED,
3944 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3945 				/*flags*/	0,
3946 						&acb->parent_dmat) != 0)
3947 	{
3948 		kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3949 		return ENOMEM;
3950 	}
3951 	/* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
3952 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3953 				/*alignment*/	1,
3954 				/*boundary*/	0,
3955 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3956 				/*highaddr*/	BUS_SPACE_MAXADDR,
3957 				/*filter*/	NULL,
3958 				/*filterarg*/	NULL,
3959 				/*maxsize*/	ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
3960 				/*nsegments*/	ARCMSR_MAX_SG_ENTRIES,
3961 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3962 				/*flags*/	0,
3963 						&acb->dm_segs_dmat) != 0)
3964 	{
3965 		bus_dma_tag_destroy(acb->parent_dmat);
3966 		kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3967 		return ENOMEM;
3968 	}
3969 
3970 	/* DMA tag for our srb structures.... Allocate the freesrb memory */
3971 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3972 				/*alignment*/	0x20,
3973 				/*boundary*/	0,
3974 				/*lowaddr*/	BUS_SPACE_MAXADDR_32BIT,
3975 				/*highaddr*/	BUS_SPACE_MAXADDR,
3976 				/*filter*/	NULL,
3977 				/*filterarg*/	NULL,
3978 				/*maxsize*/	max_coherent_size,
3979 				/*nsegments*/	1,
3980 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3981 				/*flags*/	0,
3982 						&acb->srb_dmat) != 0)
3983 	{
3984 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3985 		bus_dma_tag_destroy(acb->parent_dmat);
3986 		kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3987 		return ENXIO;
3988 	}
3989 	/* Allocation for our srbs */
3990 	if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
3991 		bus_dma_tag_destroy(acb->srb_dmat);
3992 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3993 		bus_dma_tag_destroy(acb->parent_dmat);
3994 		kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
3995 		return ENXIO;
3996 	}
3997 	/* And permanently map them */
3998 	if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
3999 		bus_dma_tag_destroy(acb->srb_dmat);
4000 		bus_dma_tag_destroy(acb->dm_segs_dmat);
4001 		bus_dma_tag_destroy(acb->parent_dmat);
4002 		kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
4003 		return ENXIO;
4004 	}
4005 	pci_command = pci_read_config(dev, PCIR_COMMAND, 2);
4006 	pci_command |= PCIM_CMD_BUSMASTEREN;
4007 	pci_command |= PCIM_CMD_PERRESPEN;
4008 	pci_command |= PCIM_CMD_MWRICEN;
4009 	/* Enable Busmaster */
4010 	pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
4011 	switch(acb->adapter_type) {
4012 	case ACB_ADAPTER_TYPE_A: {
4013 			u_int32_t rid0 = PCIR_BAR(0);
4014 			vm_offset_t	mem_base0;
4015 
4016 			acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
4017 			if(acb->sys_res_arcmsr[0] == NULL) {
4018 				arcmsr_free_resource(acb);
4019 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
4020 				return ENOMEM;
4021 			}
4022 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
4023 				arcmsr_free_resource(acb);
4024 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
4025 				return ENXIO;
4026 			}
4027 			mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
4028 			if(mem_base0 == 0) {
4029 				arcmsr_free_resource(acb);
4030 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
4031 				return ENXIO;
4032 			}
4033 			acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
4034 			acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
4035 			acb->pmu = (struct MessageUnit_UNION *)mem_base0;
4036 		}
4037 		break;
4038 	case ACB_ADAPTER_TYPE_B: {
4039 			struct HBB_MessageUnit *phbbmu;
4040 			struct CommandControlBlock *freesrb;
4041 			u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
4042 			vm_offset_t	mem_base[]={0,0};
4043 			for(i=0; i < 2; i++) {
4044 				if(i == 0) {
4045 					acb->sys_res_arcmsr[i] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
4046 											0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE);
4047 				} else {
4048 					acb->sys_res_arcmsr[i] = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
4049 											0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
4050 				}
4051 				if(acb->sys_res_arcmsr[i] == NULL) {
4052 					arcmsr_free_resource(acb);
4053 					kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
4054 					return ENOMEM;
4055 				}
4056 				if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
4057 					arcmsr_free_resource(acb);
4058 					kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
4059 					return ENXIO;
4060 				}
4061 				mem_base[i] = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
4062 				if(mem_base[i] == 0) {
4063 					arcmsr_free_resource(acb);
4064 					kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
4065 					return ENXIO;
4066 				}
4067 				acb->btag[i] = rman_get_bustag(acb->sys_res_arcmsr[i]);
4068 				acb->bhandle[i] = rman_get_bushandle(acb->sys_res_arcmsr[i]);
4069 			}
4070 			freesrb = (struct CommandControlBlock *)acb->uncacheptr;
4071 			acb->pmu = (struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE);
4072 			phbbmu = (struct HBB_MessageUnit *)acb->pmu;
4073 			phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)mem_base[0];
4074 			phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)mem_base[1];
4075 		}
4076 		break;
4077 	case ACB_ADAPTER_TYPE_C: {
4078 			u_int32_t rid0 = PCIR_BAR(1);
4079 			vm_offset_t	mem_base0;
4080 
4081 			acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
4082 			if(acb->sys_res_arcmsr[0] == NULL) {
4083 				arcmsr_free_resource(acb);
4084 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
4085 				return ENOMEM;
4086 			}
4087 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
4088 				arcmsr_free_resource(acb);
4089 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
4090 				return ENXIO;
4091 			}
4092 			mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
4093 			if(mem_base0 == 0) {
4094 				arcmsr_free_resource(acb);
4095 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
4096 				return ENXIO;
4097 			}
4098 			acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
4099 			acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
4100 			acb->pmu = (struct MessageUnit_UNION *)mem_base0;
4101 		}
4102 		break;
4103 	case ACB_ADAPTER_TYPE_D: {
4104 			struct HBD_MessageUnit0 *phbdmu;
4105 			u_int32_t rid0 = PCIR_BAR(0);
4106 			vm_offset_t	mem_base0;
4107 
4108 			acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBD_MessageUnit), RF_ACTIVE);
4109 			if(acb->sys_res_arcmsr[0] == NULL) {
4110 				arcmsr_free_resource(acb);
4111 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
4112 				return ENOMEM;
4113 			}
4114 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
4115 				arcmsr_free_resource(acb);
4116 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
4117 				return ENXIO;
4118 			}
4119 			mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
4120 			if(mem_base0 == 0) {
4121 				arcmsr_free_resource(acb);
4122 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
4123 				return ENXIO;
4124 			}
4125 			acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
4126 			acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
4127 			acb->pmu = (struct MessageUnit_UNION *)((unsigned long)acb->uncacheptr+ARCMSR_SRBS_POOL_SIZE);
4128 			phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
4129 			phbdmu->phbdmu = (struct HBD_MessageUnit *)mem_base0;
4130 		}
4131 		break;
4132 	}
4133 	if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
4134 		arcmsr_free_resource(acb);
4135 		kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
4136 		return ENXIO;
4137 	}
4138 	acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
4139 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
4140 	/*
4141 	********************************************************************
4142 	** init raid volume state
4143 	********************************************************************
4144 	*/
4145 	for(i=0; i < ARCMSR_MAX_TARGETID; i++) {
4146 		for(j=0; j < ARCMSR_MAX_TARGETLUN; j++) {
4147 			acb->devstate[i][j] = ARECA_RAID_GONE;
4148 		}
4149 	}
4150 	arcmsr_iop_init(acb);
4151 	return(0);
4152 }
4153 /*
4154 ************************************************************************
4155 ************************************************************************
4156 */
4157 static int arcmsr_attach(device_t dev)
4158 {
4159 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
4160 	u_int32_t unit=device_get_unit(dev);
4161 	struct ccb_setasync csa;
4162 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
4163 	struct resource	*irqres;
4164 	int	rid;
4165 	u_int irq_flags;
4166 
4167 	if(acb == NULL) {
4168 		kprintf("arcmsr%d: cannot allocate softc\n", unit);
4169 		return (ENOMEM);
4170 	}
4171 	arcmsr_mutex_init(acb);
4172 	acb->pci_dev = dev;
4173 	acb->pci_unit = unit;
4174 	if(arcmsr_initialize(dev)) {
4175 		kprintf("arcmsr%d: initialize failure!\n", unit);
4176 		arcmsr_mutex_destroy(acb);
4177 		return ENXIO;
4178 	}
4179 	/* After setting up the adapter, map our interrupt */
4180 	rid=0;
4181 	acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &rid,
4182 	    &irq_flags);
4183 	irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1,
4184 	    irq_flags);
4185 	if(irqres == NULL ||
4186 		bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih, NULL)) {
4187 		arcmsr_free_resource(acb);
4188 		arcmsr_mutex_destroy(acb);
4189 		kprintf("arcmsr%d: unable to register interrupt handler!\n", unit);
4190 		return ENXIO;
4191 	}
4192 	acb->irqres = irqres;
4193 	/*
4194 	 * Now let the CAM generic SCSI layer find the SCSI devices on
4195 	 * the bus *  start queue to reset to the idle loop. *
4196 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
4197 	 * max_sim_transactions
4198 	*/
4199 	devq = cam_simq_alloc(acb->maxOutstanding);
4200 	if(devq == NULL) {
4201 	    arcmsr_free_resource(acb);
4202 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
4203 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
4204 			pci_release_msi(dev);
4205 		arcmsr_mutex_destroy(acb);
4206 		kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
4207 		return ENXIO;
4208 	}
4209 	acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->isr_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
4210 	cam_simq_release(devq);
4211 	if(acb->psim == NULL) {
4212 		arcmsr_free_resource(acb);
4213 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
4214 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
4215 			pci_release_msi(dev);
4216 		arcmsr_mutex_destroy(acb);
4217 		kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
4218 		return ENXIO;
4219 	}
4220 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
4221 	if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
4222 		arcmsr_free_resource(acb);
4223 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
4224 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
4225 			pci_release_msi(dev);
4226 		cam_sim_free(acb->psim);
4227 		arcmsr_mutex_destroy(acb);
4228 		kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
4229 		return ENXIO;
4230 	}
4231 	if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
4232 		arcmsr_free_resource(acb);
4233 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
4234 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
4235 			pci_release_msi(dev);
4236 		xpt_bus_deregister(cam_sim_path(acb->psim));
4237 		cam_sim_free(acb->psim);
4238 		arcmsr_mutex_destroy(acb);
4239 		kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
4240 		return ENXIO;
4241 	}
4242 	/*
4243 	****************************************************
4244 	*/
4245 	xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
4246 	csa.ccb_h.func_code = XPT_SASYNC_CB;
4247 	csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
4248 	csa.callback = arcmsr_async;
4249 	csa.callback_arg = acb->psim;
4250 	xpt_action((union ccb *)&csa);
4251 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
4252 	/* Create the control device.  */
4253 	acb->ioctl_dev = make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
4254 
4255 	acb->ioctl_dev->si_drv1 = acb;
4256 	(void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
4257 	arcmsr_callout_init(&acb->devmap_callout);
4258 	callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
4259 	return (0);
4260 }
4261 
4262 /*
4263 ************************************************************************
4264 ************************************************************************
4265 */
4266 static int arcmsr_probe(device_t dev)
4267 {
4268 	u_int32_t id;
4269 	u_int16_t sub_device_id;
4270 	static char buf[256];
4271 	char x_type[]={"unknown"};
4272 	char *type;
4273 	int raid6 = 1;
4274 
4275 	if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
4276 		return (ENXIO);
4277 	}
4278 	sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
4279 	switch(id = pci_get_devid(dev)) {
4280 	case PCIDevVenIDARC1110:
4281 	case PCIDevVenIDARC1200:
4282 	case PCIDevVenIDARC1201:
4283 	case PCIDevVenIDARC1210:
4284 		raid6 = 0;
4285 		/*FALLTHRU*/
4286 	case PCIDevVenIDARC1120:
4287 	case PCIDevVenIDARC1130:
4288 	case PCIDevVenIDARC1160:
4289 	case PCIDevVenIDARC1170:
4290 	case PCIDevVenIDARC1220:
4291 	case PCIDevVenIDARC1230:
4292 	case PCIDevVenIDARC1231:
4293 	case PCIDevVenIDARC1260:
4294 	case PCIDevVenIDARC1261:
4295 	case PCIDevVenIDARC1270:
4296 	case PCIDevVenIDARC1280:
4297 		type = "SATA 3G";
4298 		break;
4299 	case PCIDevVenIDARC1212:
4300 	case PCIDevVenIDARC1222:
4301 	case PCIDevVenIDARC1380:
4302 	case PCIDevVenIDARC1381:
4303 	case PCIDevVenIDARC1680:
4304 	case PCIDevVenIDARC1681:
4305 		type = "SAS 3G";
4306 		break;
4307 	case PCIDevVenIDARC1880:
4308 	case PCIDevVenIDARC1882:
4309 	case PCIDevVenIDARC1213:
4310 	case PCIDevVenIDARC1223:
4311 		if (sub_device_id == ARECA_SUB_DEV_ID_1883)
4312 			type = "SAS 12G";
4313 		else
4314 			type = "SAS 6G";
4315 		arcmsr_msi_enable = 0;
4316 		break;
4317 	case PCIDevVenIDARC1214:
4318 		type = "SATA 6G";
4319 		break;
4320 	default:
4321 		type = x_type;
4322 		raid6 = 0;
4323 		break;
4324 	}
4325 	if(type == x_type)
4326 		return(ENXIO);
4327 	ksprintf(buf, "Areca %s Host Adapter RAID Controller%s",
4328 		type, raid6 ? " (RAID6 capable)" : "");
4329 	device_set_desc_copy(dev, buf);
4330 	return (BUS_PROBE_DEFAULT);
4331 }
4332 /*
4333 ************************************************************************
4334 ************************************************************************
4335 */
4336 static int arcmsr_shutdown(device_t dev)
4337 {
4338 	u_int32_t  i;
4339 	struct CommandControlBlock *srb;
4340 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
4341 
4342 	/* stop adapter background rebuild */
4343 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
4344 	/* disable all outbound interrupt */
4345 	arcmsr_disable_allintr(acb);
4346 	arcmsr_stop_adapter_bgrb(acb);
4347 	arcmsr_flush_adapter_cache(acb);
4348 	/* abort all outstanding command */
4349 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
4350 	acb->acb_flags &= ~ACB_F_IOP_INITED;
4351 	if(acb->srboutstandingcount != 0) {
4352 		/*clear and abort all outbound posted Q*/
4353 		arcmsr_done4abort_postqueue(acb);
4354 		/* talk to iop 331 outstanding command aborted*/
4355 		arcmsr_abort_allcmd(acb);
4356 		for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
4357 			srb = acb->psrb_pool[i];
4358 			if(srb->srb_state == ARCMSR_SRB_START) {
4359 				srb->srb_state = ARCMSR_SRB_ABORTED;
4360 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
4361 				arcmsr_srb_complete(srb, 1);
4362 			}
4363 		}
4364 	}
4365 	acb->srboutstandingcount = 0;
4366 	acb->workingsrb_doneindex = 0;
4367 	acb->workingsrb_startindex = 0;
4368 	acb->pktRequestCount = 0;
4369 	acb->pktReturnCount = 0;
4370 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
4371 	return (0);
4372 }
4373 /*
4374 ************************************************************************
4375 ************************************************************************
4376 */
4377 static int arcmsr_detach(device_t dev)
4378 {
4379 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
4380 	int i;
4381 
4382 	callout_stop(&acb->devmap_callout);
4383 	bus_teardown_intr(dev, acb->irqres, acb->ih);
4384 	arcmsr_shutdown(dev);
4385 	arcmsr_free_resource(acb);
4386 	for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
4387 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
4388 	}
4389 	bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
4390 	if (acb->irq_type == PCI_INTR_TYPE_MSI)
4391 		pci_release_msi(dev);
4392 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
4393 	xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
4394 	xpt_free_path(acb->ppath);
4395 	xpt_bus_deregister(cam_sim_path(acb->psim));
4396 	cam_sim_free(acb->psim);
4397 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
4398 	arcmsr_mutex_destroy(acb);
4399 	return (0);
4400 }
4401 
4402 #ifdef ARCMSR_DEBUG1
4403 static void arcmsr_dump_data(struct AdapterControlBlock *acb)
4404 {
4405 	if((acb->pktRequestCount - acb->pktReturnCount) == 0)
4406 		return;
4407 	kprintf("Command Request Count   =0x%x\n",acb->pktRequestCount);
4408 	kprintf("Command Return Count    =0x%x\n",acb->pktReturnCount);
4409 	kprintf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount));
4410 	kprintf("Queued Command Count    =0x%x\n",acb->srboutstandingcount);
4411 }
4412 #endif
4413