xref: /dragonfly/sys/dev/raid/arcmsr/arcmsr.c (revision 6daa0b12)
1 /*
2 ********************************************************************************
3 **        OS    : FreeBSD
4 **   FILE NAME  : arcmsr.c
5 **        BY    : Erich Chen, Ching Huang
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x)
8 **                SATA/SAS RAID HOST Adapter
9 ********************************************************************************
10 ********************************************************************************
11 **
12 ** Copyright (C) 2002 - 2012, Areca Technology Corporation All rights reserved.
13 **
14 ** Redistribution and use in source and binary forms, with or without
15 ** modification, are permitted provided that the following conditions
16 ** are met:
17 ** 1. Redistributions of source code must retain the above copyright
18 **    notice, this list of conditions and the following disclaimer.
19 ** 2. Redistributions in binary form must reproduce the above copyright
20 **    notice, this list of conditions and the following disclaimer in the
21 **    documentation and/or other materials provided with the distribution.
22 ** 3. The name of the author may not be used to endorse or promote products
23 **    derived from this software without specific prior written permission.
24 **
25 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
30 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
32 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
33 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
34 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 ********************************************************************************
36 ** History
37 **
38 **    REV#         DATE         NAME        DESCRIPTION
39 ** 1.00.00.00   03/31/2004  Erich Chen      First release
40 ** 1.20.00.02   11/29/2004  Erich Chen      bug fix with arcmsr_bus_reset when PHY error
41 ** 1.20.00.03   04/19/2005  Erich Chen      add SATA 24 Ports adapter type support
42 **                                          clean unused function
43 ** 1.20.00.12   09/12/2005  Erich Chen      bug fix with abort command handling,
44 **                                          firmware version check
45 **                                          and firmware update notify for hardware bug fix
46 **                                          handling if none zero high part physical address
47 **                                          of srb resource
48 ** 1.20.00.13   08/18/2006  Erich Chen      remove pending srb and report busy
49 **                                          add iop message xfer
50 **                                          with scsi pass-through command
51 **                                          add new device id of sas raid adapters
52 **                                          code fit for SPARC64 & PPC
53 ** 1.20.00.14   02/05/2007  Erich Chen      bug fix for incorrect ccb_h.status report
54 **                                          and cause g_vfs_done() read write error
55 ** 1.20.00.15   10/10/2007  Erich Chen      support new RAID adapter type ARC120x
56 ** 1.20.00.16   10/10/2009  Erich Chen      Bug fix for RAID adapter type ARC120x
57 **                                          bus_dmamem_alloc() with BUS_DMA_ZERO
58 ** 1.20.00.17   07/15/2010  Ching Huang     Added support ARC1880
59 **                                          report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
60 **                                          prevent cam_periph_error removing all LUN devices of one Target id
61 **                                          for any one LUN device failed
62 ** 1.20.00.18   10/14/2010  Ching Huang     Fixed "inquiry data fails comparion at DV1 step"
63 **              10/25/2010  Ching Huang     Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
64 ** 1.20.00.19   11/11/2010  Ching Huang     Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
65 ** 1.20.00.20   12/08/2010  Ching Huang     Avoid calling atomic_set_int function
66 ** 1.20.00.21   02/08/2011  Ching Huang     Implement I/O request timeout
67 **              02/14/2011  Ching Huang     Modified pktRequestCount
68 ** 1.20.00.21   03/03/2011  Ching Huang     if a command timeout, then wait its ccb back before free it
69 ** 1.20.00.22   07/04/2011  Ching Huang     Fixed multiple MTX panic
70 ** 1.20.00.23   10/28/2011  Ching Huang     Added TIMEOUT_DELAY in case of too many HDDs need to start
71 ** 1.20.00.23   11/08/2011  Ching Huang     Added report device transfer speed
72 ** 1.20.00.23   01/30/2012  Ching Huang     Fixed Request requeued and Retrying command
73 ** 1.20.00.24   06/11/2012  Ching Huang     Fixed return sense data condition
74 ** 1.20.00.25   08/17/2012  Ching Huang     Fixed hotplug device no function on type A adapter
75 ** 1.20.00.26   12/14/2012  Ching Huang     Added support ARC1214,1224,1264,1284
76 ** 1.20.00.27   05/06/2013  Ching Huang     Fixed out standing cmd full on ARC-12x4
77 ** 1.20.00.28   09/13/2013  Ching Huang     Removed recursive mutex in arcmsr_abort_dr_ccbs
78 ** 1.20.00.29   12/18/2013  Ching Huang     Change simq allocation number, support ARC1883
79 ******************************************************************************************
80 * $FreeBSD: head/sys/dev/arcmsr/arcmsr.c 259565 2013-12-18 19:25:40Z delphij $
81 */
82 #if 0
83 #define ARCMSR_DEBUG1			1
84 #endif
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/bus.h>
90 #include <sys/queue.h>
91 #include <sys/stat.h>
92 #include <sys/kthread.h>
93 #include <sys/module.h>
94 #include <sys/proc.h>
95 #include <sys/lock.h>
96 #include <sys/sysctl.h>
97 #include <sys/thread2.h>
98 #include <sys/poll.h>
99 #include <sys/device.h>
100 #include <vm/vm.h>
101 #include <vm/vm_param.h>
102 #include <vm/pmap.h>
103 
104 #include <machine/atomic.h>
105 #include <sys/conf.h>
106 #include <sys/rman.h>
107 
108 #include <bus/cam/cam.h>
109 #include <bus/cam/cam_ccb.h>
110 #include <bus/cam/cam_sim.h>
111 #include <bus/cam/cam_periph.h>
112 #include <bus/cam/cam_xpt_periph.h>
113 #include <bus/cam/cam_xpt_sim.h>
114 #include <bus/cam/cam_debug.h>
115 #include <bus/cam/scsi/scsi_all.h>
116 #include <bus/cam/scsi/scsi_message.h>
117 /*
118 **************************************************************************
119 **************************************************************************
120 */
121 #include <sys/endian.h>
122 #include <bus/pci/pcivar.h>
123 #include <bus/pci/pcireg.h>
124 
125 #define arcmsr_callout_init(a)	callout_init_mp(a);
126 
127 #define ARCMSR_DRIVER_VERSION	"arcmsr version 1.20.00.29 2013-12-18"
128 #include <dev/raid/arcmsr/arcmsr.h>
129 /*
130 **************************************************************************
131 **************************************************************************
132 */
133 static void arcmsr_free_srb(struct CommandControlBlock *srb);
134 static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb);
135 static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb);
136 static int arcmsr_probe(device_t dev);
137 static int arcmsr_attach(device_t dev);
138 static int arcmsr_detach(device_t dev);
139 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
140 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
141 static int arcmsr_shutdown(device_t dev);
142 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
143 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
144 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
145 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
146 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
147 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
148 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
149 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
150 static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer);
151 static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb);
152 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
153 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
154 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
155 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
156 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg);
157 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb);
158 static int arcmsr_resume(device_t dev);
159 static int arcmsr_suspend(device_t dev);
160 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
161 static void	arcmsr_polling_devmap(void *arg);
162 static void	arcmsr_srb_timeout(void *arg);
163 static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb);
164 #ifdef ARCMSR_DEBUG1
165 static void arcmsr_dump_data(struct AdapterControlBlock *acb);
166 #endif
167 /*
168 **************************************************************************
169 **************************************************************************
170 */
171 static void UDELAY(u_int32_t us) { DELAY(us); }
172 /*
173 **************************************************************************
174 **************************************************************************
175 */
176 static bus_dmamap_callback_t arcmsr_map_free_srb;
177 static bus_dmamap_callback_t arcmsr_execute_srb;
178 /*
179 **************************************************************************
180 **************************************************************************
181 */
182 static d_open_t	arcmsr_open;
183 static d_close_t arcmsr_close;
184 static d_ioctl_t arcmsr_ioctl;
185 
186 static device_method_t arcmsr_methods[]={
187 	DEVMETHOD(device_probe,		arcmsr_probe),
188 	DEVMETHOD(device_attach,	arcmsr_attach),
189 	DEVMETHOD(device_detach,	arcmsr_detach),
190 	DEVMETHOD(device_shutdown,	arcmsr_shutdown),
191 	DEVMETHOD(device_suspend,	arcmsr_suspend),
192 	DEVMETHOD(device_resume,	arcmsr_resume),
193 	DEVMETHOD_END
194 };
195 
196 static driver_t arcmsr_driver={
197 	"arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
198 };
199 
200 static devclass_t arcmsr_devclass;
201 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL);
202 MODULE_VERSION(arcmsr, 1);
203 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
204 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
205 #ifndef BUS_DMA_COHERENT
206 	#define	BUS_DMA_COHERENT	0x04	/* hint: map memory in a coherent way */
207 #endif
208 
209 static struct dev_ops arcmsr_ops = {
210 	{ "arcmsr", 0, D_MPSAFE },
211 	.d_open =	arcmsr_open,		        /* open     */
212 	.d_close =	arcmsr_close,		        /* close    */
213 	.d_ioctl =	arcmsr_ioctl,		        /* ioctl    */
214 };
215 
216 static int	arcmsr_msi_enable = 1;
217 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable);
218 
219 
220 /*
221 **************************************************************************
222 **************************************************************************
223 */
224 
225 static int
226 arcmsr_open(struct dev_open_args *ap)
227 {
228 	cdev_t dev = ap->a_head.a_dev;
229 	struct AdapterControlBlock *acb = dev->si_drv1;
230 
231 	if(acb == NULL) {
232 		return ENXIO;
233 	}
234 	return (0);
235 }
236 
237 /*
238 **************************************************************************
239 **************************************************************************
240 */
241 
242 static int
243 arcmsr_close(struct dev_close_args *ap)
244 {
245 	cdev_t dev = ap->a_head.a_dev;
246 	struct AdapterControlBlock *acb = dev->si_drv1;
247 
248 	if(acb == NULL) {
249 		return ENXIO;
250 	}
251 	return 0;
252 }
253 
254 /*
255 **************************************************************************
256 **************************************************************************
257 */
258 
259 static int
260 arcmsr_ioctl(struct dev_ioctl_args *ap)
261 {
262 	cdev_t dev = ap->a_head.a_dev;
263 	u_long ioctl_cmd = ap->a_cmd;
264 	caddr_t arg = ap->a_data;
265 	struct AdapterControlBlock *acb = dev->si_drv1;
266 
267 	if(acb == NULL) {
268 		return ENXIO;
269 	}
270 	return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
271 }
272 
273 /*
274 **********************************************************************
275 **********************************************************************
276 */
277 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
278 {
279 	u_int32_t intmask_org = 0;
280 
281 	switch (acb->adapter_type) {
282 	case ACB_ADAPTER_TYPE_A: {
283 			/* disable all outbound interrupt */
284 			intmask_org = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
285 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
286 		}
287 		break;
288 	case ACB_ADAPTER_TYPE_B: {
289 			/* disable all outbound interrupt */
290 			intmask_org = CHIP_REG_READ32(HBB_DOORBELL,
291 			0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
292 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
293 		}
294 		break;
295 	case ACB_ADAPTER_TYPE_C: {
296 			/* disable all outbound interrupt */
297 			intmask_org = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask)	; /* disable outbound message0 int */
298 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
299 		}
300 		break;
301 	case ACB_ADAPTER_TYPE_D: {
302 			/* disable all outbound interrupt */
303 			intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable)	; /* disable outbound message0 int */
304 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE);
305 		}
306 		break;
307 	}
308 	return (intmask_org);
309 }
310 /*
311 **********************************************************************
312 **********************************************************************
313 */
314 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
315 {
316 	u_int32_t mask;
317 
318 	switch (acb->adapter_type) {
319 	case ACB_ADAPTER_TYPE_A: {
320 			/* enable outbound Post Queue, outbound doorbell Interrupt */
321 			mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
322 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
323 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
324 		}
325 		break;
326 	case ACB_ADAPTER_TYPE_B: {
327 			/* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
328 			mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
329 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
330 			acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
331 		}
332 		break;
333 	case ACB_ADAPTER_TYPE_C: {
334 			/* enable outbound Post Queue, outbound doorbell Interrupt */
335 			mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
336 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
337 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
338 		}
339 		break;
340 	case ACB_ADAPTER_TYPE_D: {
341 			/* enable outbound Post Queue, outbound doorbell Interrupt */
342 			mask = ARCMSR_HBDMU_ALL_INT_ENABLE;
343 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | mask);
344 			CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable);
345 			acb->outbound_int_enable = mask;
346 		}
347 		break;
348 	}
349 }
350 /*
351 **********************************************************************
352 **********************************************************************
353 */
354 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
355 {
356 	u_int32_t Index;
357 	u_int8_t Retries = 0x00;
358 
359 	do {
360 		for(Index=0; Index < 100; Index++) {
361 			if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
362 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
363 				return TRUE;
364 			}
365 			UDELAY(10000);
366 		}/*max 1 seconds*/
367 	}while(Retries++ < 20);/*max 20 sec*/
368 	return (FALSE);
369 }
370 /*
371 **********************************************************************
372 **********************************************************************
373 */
374 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
375 {
376 	u_int32_t Index;
377 	u_int8_t Retries = 0x00;
378 
379 	do {
380 		for(Index=0; Index < 100; Index++) {
381 			if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
382 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
383 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
384 				return TRUE;
385 			}
386 			UDELAY(10000);
387 		}/*max 1 seconds*/
388 	}while(Retries++ < 20);/*max 20 sec*/
389 	return (FALSE);
390 }
391 /*
392 **********************************************************************
393 **********************************************************************
394 */
395 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
396 {
397 	u_int32_t Index;
398 	u_int8_t Retries = 0x00;
399 
400 	do {
401 		for(Index=0; Index < 100; Index++) {
402 			if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
403 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
404 				return TRUE;
405 			}
406 			UDELAY(10000);
407 		}/*max 1 seconds*/
408 	}while(Retries++ < 20);/*max 20 sec*/
409 	return (FALSE);
410 }
411 /*
412 **********************************************************************
413 **********************************************************************
414 */
415 static u_int8_t arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock *acb)
416 {
417 	u_int32_t Index;
418 	u_int8_t Retries = 0x00;
419 
420 	do {
421 		for(Index=0; Index < 100; Index++) {
422 			if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) {
423 				CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);/*clear interrupt*/
424 				return TRUE;
425 			}
426 			UDELAY(10000);
427 		}/*max 1 seconds*/
428 	}while(Retries++ < 20);/*max 20 sec*/
429 	return (FALSE);
430 }
431 /*
432 ************************************************************************
433 ************************************************************************
434 */
435 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
436 {
437 	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
438 
439 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
440 	do {
441 		if(arcmsr_hba_wait_msgint_ready(acb)) {
442 			break;
443 		} else {
444 			retry_count--;
445 		}
446 	}while(retry_count != 0);
447 }
448 /*
449 ************************************************************************
450 ************************************************************************
451 */
452 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
453 {
454 	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
455 
456 	CHIP_REG_WRITE32(HBB_DOORBELL,
457 	0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
458 	do {
459 		if(arcmsr_hbb_wait_msgint_ready(acb)) {
460 			break;
461 		} else {
462 			retry_count--;
463 		}
464 	}while(retry_count != 0);
465 }
466 /*
467 ************************************************************************
468 ************************************************************************
469 */
470 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
471 {
472 	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
473 
474 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
475 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
476 	do {
477 		if(arcmsr_hbc_wait_msgint_ready(acb)) {
478 			break;
479 		} else {
480 			retry_count--;
481 		}
482 	}while(retry_count != 0);
483 }
484 /*
485 ************************************************************************
486 ************************************************************************
487 */
488 static void arcmsr_flush_hbd_cache(struct AdapterControlBlock *acb)
489 {
490 	int retry_count = 30; /* enlarge wait flush adapter cache time: 10 minute */
491 
492 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
493 	do {
494 		if(arcmsr_hbd_wait_msgint_ready(acb)) {
495 			break;
496 		} else {
497 			retry_count--;
498 		}
499 	}while(retry_count != 0);
500 }
501 /*
502 ************************************************************************
503 ************************************************************************
504 */
505 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
506 {
507 	switch (acb->adapter_type) {
508 	case ACB_ADAPTER_TYPE_A: {
509 			arcmsr_flush_hba_cache(acb);
510 		}
511 		break;
512 	case ACB_ADAPTER_TYPE_B: {
513 			arcmsr_flush_hbb_cache(acb);
514 		}
515 		break;
516 	case ACB_ADAPTER_TYPE_C: {
517 			arcmsr_flush_hbc_cache(acb);
518 		}
519 		break;
520 	case ACB_ADAPTER_TYPE_D: {
521 			arcmsr_flush_hbd_cache(acb);
522 		}
523 		break;
524 	}
525 }
526 /*
527 *******************************************************************************
528 *******************************************************************************
529 */
530 static int arcmsr_suspend(device_t dev)
531 {
532 	struct AdapterControlBlock	*acb = device_get_softc(dev);
533 
534 	/* flush controller */
535 	arcmsr_iop_parking(acb);
536 	/* disable all outbound interrupt */
537 	arcmsr_disable_allintr(acb);
538 	return(0);
539 }
540 /*
541 *******************************************************************************
542 *******************************************************************************
543 */
544 static int arcmsr_resume(device_t dev)
545 {
546 	struct AdapterControlBlock	*acb = device_get_softc(dev);
547 
548 	arcmsr_iop_init(acb);
549 	return(0);
550 }
551 /*
552 *********************************************************************************
553 *********************************************************************************
554 */
555 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
556 {
557 	struct AdapterControlBlock *acb;
558 	u_int8_t target_id, target_lun;
559 	struct cam_sim *sim;
560 
561 	sim = (struct cam_sim *) cb_arg;
562 	acb =(struct AdapterControlBlock *) cam_sim_softc(sim);
563 	switch (code) {
564 	case AC_LOST_DEVICE:
565 		target_id = xpt_path_target_id(path);
566 		target_lun = xpt_path_lun_id(path);
567 		if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) {
568 			break;
569 		}
570 	//	printf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun);
571 		break;
572 	default:
573 		break;
574 	}
575 }
576 /*
577 **********************************************************************
578 **********************************************************************
579 */
580 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
581 {
582 	union ccb *pccb = srb->pccb;
583 
584 	pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
585 	pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
586 	if(pccb->csio.sense_len) {
587 		memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
588 		memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
589 		get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
590 		((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
591 		pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
592 	}
593 }
594 /*
595 *********************************************************************
596 *********************************************************************
597 */
598 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
599 {
600 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
601 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
602 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
603 	}
604 }
605 /*
606 *********************************************************************
607 *********************************************************************
608 */
609 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
610 {
611 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
612 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
613 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
614 	}
615 }
616 /*
617 *********************************************************************
618 *********************************************************************
619 */
620 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
621 {
622 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
623 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
624 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
625 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
626 	}
627 }
628 /*
629 *********************************************************************
630 *********************************************************************
631 */
632 static void arcmsr_abort_hbd_allcmd(struct AdapterControlBlock *acb)
633 {
634 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
635 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
636 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
637 	}
638 }
639 /*
640 *********************************************************************
641 *********************************************************************
642 */
643 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
644 {
645 	switch (acb->adapter_type) {
646 	case ACB_ADAPTER_TYPE_A: {
647 			arcmsr_abort_hba_allcmd(acb);
648 		}
649 		break;
650 	case ACB_ADAPTER_TYPE_B: {
651 			arcmsr_abort_hbb_allcmd(acb);
652 		}
653 		break;
654 	case ACB_ADAPTER_TYPE_C: {
655 			arcmsr_abort_hbc_allcmd(acb);
656 		}
657 		break;
658 	case ACB_ADAPTER_TYPE_D: {
659 			arcmsr_abort_hbd_allcmd(acb);
660 		}
661 		break;
662 	}
663 }
664 /*
665 **********************************************************************
666 **********************************************************************
667 */
668 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
669 {
670 	struct AdapterControlBlock *acb = srb->acb;
671 	union ccb *pccb = srb->pccb;
672 
673 	if(srb->srb_flags & SRB_FLAG_TIMER_START)
674 		callout_stop(&srb->ccb_callout);
675 	if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
676 		bus_dmasync_op_t op;
677 
678 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
679 			op = BUS_DMASYNC_POSTREAD;
680 		} else {
681 			op = BUS_DMASYNC_POSTWRITE;
682 		}
683 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
684 		bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
685 	}
686 	if(stand_flag == 1) {
687 		atomic_subtract_int(&acb->srboutstandingcount, 1);
688 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
689 		acb->srboutstandingcount < (acb->maxOutstanding -10))) {
690 			acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
691 			pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
692 		}
693 	}
694 	if(srb->srb_state != ARCMSR_SRB_TIMEOUT)
695 		arcmsr_free_srb(srb);
696 	acb->pktReturnCount++;
697 	xpt_done(pccb);
698 }
699 /*
700 **************************************************************************
701 **************************************************************************
702 */
703 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
704 {
705 	int target, lun;
706 
707 	target = srb->pccb->ccb_h.target_id;
708 	lun = srb->pccb->ccb_h.target_lun;
709 	if(error == FALSE) {
710 		if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
711 			acb->devstate[target][lun] = ARECA_RAID_GOOD;
712 		}
713 		srb->pccb->ccb_h.status |= CAM_REQ_CMP;
714 		arcmsr_srb_complete(srb, 1);
715 	} else {
716 		switch(srb->arcmsr_cdb.DeviceStatus) {
717 		case ARCMSR_DEV_SELECT_TIMEOUT: {
718 				if(acb->devstate[target][lun] == ARECA_RAID_GOOD) {
719 					kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
720 				}
721 				acb->devstate[target][lun] = ARECA_RAID_GONE;
722 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
723 				arcmsr_srb_complete(srb, 1);
724 			}
725 			break;
726 		case ARCMSR_DEV_ABORTED:
727 		case ARCMSR_DEV_INIT_FAIL: {
728 				acb->devstate[target][lun] = ARECA_RAID_GONE;
729 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
730 				arcmsr_srb_complete(srb, 1);
731 			}
732 			break;
733 		case SCSISTAT_CHECK_CONDITION: {
734 				acb->devstate[target][lun] = ARECA_RAID_GOOD;
735 				arcmsr_report_sense_info(srb);
736 				arcmsr_srb_complete(srb, 1);
737 			}
738 			break;
739 		default:
740 			kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknown DeviceStatus=0x%x \n"
741 					, acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
742 			acb->devstate[target][lun] = ARECA_RAID_GONE;
743 			srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
744 			/*unknown error or crc error just for retry*/
745 			arcmsr_srb_complete(srb, 1);
746 			break;
747 		}
748 	}
749 }
750 /*
751 **************************************************************************
752 **************************************************************************
753 */
754 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
755 {
756 	struct CommandControlBlock *srb;
757 
758 	/* check if command done with no error*/
759 	switch (acb->adapter_type) {
760 	case ACB_ADAPTER_TYPE_C:
761 	case ACB_ADAPTER_TYPE_D:
762 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0)); /*frame must be 32 bytes aligned*/
763 		break;
764 	case ACB_ADAPTER_TYPE_A:
765 	case ACB_ADAPTER_TYPE_B:
766 	default:
767 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
768 		break;
769 	}
770 	if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
771 		if(srb->srb_state == ARCMSR_SRB_TIMEOUT) {
772 			arcmsr_free_srb(srb);
773 			kprintf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb);
774 			return;
775 		}
776 		kprintf("arcmsr%d: return srb has been completed\n"
777 			"srb='%p' srb_state=0x%x outstanding srb count=%d \n",
778 			acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount);
779 		return;
780 	}
781 	arcmsr_report_srb_state(acb, srb, error);
782 }
783 /*
784 **************************************************************************
785 **************************************************************************
786 */
787 static void	arcmsr_srb_timeout(void *arg)
788 {
789 	struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
790 	struct AdapterControlBlock *acb;
791 	int target, lun;
792 	u_int8_t cmd;
793 
794 	target = srb->pccb->ccb_h.target_id;
795 	lun = srb->pccb->ccb_h.target_lun;
796 	acb = srb->acb;
797 	if(srb->srb_state == ARCMSR_SRB_START)
798 	{
799 		cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
800 		srb->srb_state = ARCMSR_SRB_TIMEOUT;
801 		srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
802 		arcmsr_srb_complete(srb, 1);
803 		kprintf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n",
804 				 acb->pci_unit, target, lun, cmd, srb);
805 	}
806 #ifdef ARCMSR_DEBUG1
807 	arcmsr_dump_data(acb);
808 #endif
809 }
810 
811 /*
812 **********************************************************************
813 **********************************************************************
814 */
815 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
816 {
817 	int i=0;
818 	u_int32_t flag_srb;
819 	u_int16_t error;
820 
821 	switch (acb->adapter_type) {
822 	case ACB_ADAPTER_TYPE_A: {
823 			u_int32_t outbound_intstatus;
824 
825 			/*clear and abort all outbound posted Q*/
826 			outbound_intstatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
827 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
828 			while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
829                 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
830 				arcmsr_drain_donequeue(acb, flag_srb, error);
831 			}
832 		}
833 		break;
834 	case ACB_ADAPTER_TYPE_B: {
835 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
836 
837 			/*clear all outbound posted Q*/
838 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
839 			for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
840 				if((flag_srb = phbbmu->done_qbuffer[i]) != 0) {
841 					phbbmu->done_qbuffer[i] = 0;
842                 	error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
843 					arcmsr_drain_donequeue(acb, flag_srb, error);
844 				}
845 				phbbmu->post_qbuffer[i] = 0;
846 			}/*drain reply FIFO*/
847 			phbbmu->doneq_index = 0;
848 			phbbmu->postq_index = 0;
849 		}
850 		break;
851 	case ACB_ADAPTER_TYPE_C: {
852 
853 			while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
854 				flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
855                 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
856 				arcmsr_drain_donequeue(acb, flag_srb, error);
857 			}
858 		}
859 		break;
860 	case ACB_ADAPTER_TYPE_D: {
861 			arcmsr_hbd_postqueue_isr(acb);
862 		}
863 		break;
864 	}
865 }
866 /*
867 ****************************************************************************
868 ****************************************************************************
869 */
870 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
871 {
872 	struct CommandControlBlock *srb;
873 	u_int32_t intmask_org;
874 	u_int32_t i=0;
875 
876 	if(acb->srboutstandingcount>0) {
877 		/* disable all outbound interrupt */
878 		intmask_org = arcmsr_disable_allintr(acb);
879 		/*clear and abort all outbound posted Q*/
880 		arcmsr_done4abort_postqueue(acb);
881 		/* talk to iop 331 outstanding command aborted*/
882 		arcmsr_abort_allcmd(acb);
883 		for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
884 			srb = acb->psrb_pool[i];
885 			if(srb->srb_state == ARCMSR_SRB_START) {
886 				srb->srb_state = ARCMSR_SRB_ABORTED;
887 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
888 				arcmsr_srb_complete(srb, 1);
889 				kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p' aborted\n"
890 						, acb->pci_unit, srb->pccb->ccb_h.target_id
891 						, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
892 			}
893 		}
894 		/* enable all outbound interrupt */
895 		arcmsr_enable_allintr(acb, intmask_org);
896 	}
897 	acb->srboutstandingcount = 0;
898 	acb->workingsrb_doneindex = 0;
899 	acb->workingsrb_startindex = 0;
900 	acb->pktRequestCount = 0;
901 	acb->pktReturnCount = 0;
902 }
903 /*
904 **********************************************************************
905 **********************************************************************
906 */
907 static void arcmsr_build_srb(struct CommandControlBlock *srb,
908 		bus_dma_segment_t *dm_segs, u_int32_t nseg)
909 {
910 	struct ARCMSR_CDB *arcmsr_cdb = &srb->arcmsr_cdb;
911 	u_int8_t *psge = (u_int8_t *)&arcmsr_cdb->u;
912 	u_int32_t address_lo, address_hi;
913 	union ccb *pccb = srb->pccb;
914 	struct ccb_scsiio *pcsio = &pccb->csio;
915 	u_int32_t arccdbsize = 0x30;
916 
917 	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
918 	arcmsr_cdb->Bus = 0;
919 	arcmsr_cdb->TargetID = pccb->ccb_h.target_id;
920 	arcmsr_cdb->LUN = pccb->ccb_h.target_lun;
921 	arcmsr_cdb->Function = 1;
922 	arcmsr_cdb->CdbLength = (u_int8_t)pcsio->cdb_len;
923 	bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
924 	if(nseg != 0) {
925 		struct AdapterControlBlock *acb = srb->acb;
926 		bus_dmasync_op_t op;
927 		u_int32_t length, i, cdb_sgcount = 0;
928 
929 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
930 			op = BUS_DMASYNC_PREREAD;
931 		} else {
932 			op = BUS_DMASYNC_PREWRITE;
933 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
934 			srb->srb_flags |= SRB_FLAG_WRITE;
935 		}
936 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
937 		for(i=0; i < nseg; i++) {
938 			/* Get the physical address of the current data pointer */
939 			length = arcmsr_htole32(dm_segs[i].ds_len);
940 			address_lo = arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
941 			address_hi = arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
942 			if(address_hi == 0) {
943 				struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
944 				pdma_sg->address = address_lo;
945 				pdma_sg->length = length;
946 				psge += sizeof(struct SG32ENTRY);
947 				arccdbsize += sizeof(struct SG32ENTRY);
948 			} else {
949 				u_int32_t sg64s_size = 0, tmplength = length;
950 
951 				while(1) {
952 					u_int64_t span4G, length0;
953 					struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
954 
955 					span4G = (u_int64_t)address_lo + tmplength;
956 					pdma_sg->addresshigh = address_hi;
957 					pdma_sg->address = address_lo;
958 					if(span4G > 0x100000000) {
959 						/*see if cross 4G boundary*/
960 						length0 = 0x100000000-address_lo;
961 						pdma_sg->length = (u_int32_t)length0 | IS_SG64_ADDR;
962 						address_hi = address_hi+1;
963 						address_lo = 0;
964 						tmplength = tmplength - (u_int32_t)length0;
965 						sg64s_size += sizeof(struct SG64ENTRY);
966 						psge += sizeof(struct SG64ENTRY);
967 						cdb_sgcount++;
968 					} else {
969 						pdma_sg->length = tmplength | IS_SG64_ADDR;
970 						sg64s_size += sizeof(struct SG64ENTRY);
971 						psge += sizeof(struct SG64ENTRY);
972 						break;
973 					}
974 				}
975 				arccdbsize += sg64s_size;
976 			}
977 			cdb_sgcount++;
978 		}
979 		arcmsr_cdb->sgcount = (u_int8_t)cdb_sgcount;
980 		arcmsr_cdb->DataLength = pcsio->dxfer_len;
981 		if( arccdbsize > 256) {
982 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
983 		}
984 	} else {
985 		arcmsr_cdb->DataLength = 0;
986 	}
987     srb->arc_cdb_size = arccdbsize;
988     arcmsr_cdb->msgPages = (arccdbsize/256) + ((arccdbsize % 256) ? 1 : 0);
989 }
990 /*
991 **************************************************************************
992 **************************************************************************
993 */
994 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
995 {
996 	u_int32_t cdb_phyaddr_low = (u_int32_t) srb->cdb_phyaddr_low;
997 	struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&srb->arcmsr_cdb;
998 
999 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
1000 	atomic_add_int(&acb->srboutstandingcount, 1);
1001 	srb->srb_state = ARCMSR_SRB_START;
1002 
1003 	switch (acb->adapter_type) {
1004 	case ACB_ADAPTER_TYPE_A: {
1005 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1006 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
1007 			} else {
1008 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low);
1009 			}
1010 		}
1011 		break;
1012 	case ACB_ADAPTER_TYPE_B: {
1013 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1014 			int ending_index, index;
1015 
1016 			index = phbbmu->postq_index;
1017 			ending_index = ((index+1) % ARCMSR_MAX_HBB_POSTQUEUE);
1018 			phbbmu->post_qbuffer[ending_index] = 0;
1019 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1020 				phbbmu->post_qbuffer[index] = cdb_phyaddr_low | ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
1021 			} else {
1022 				phbbmu->post_qbuffer[index] = cdb_phyaddr_low;
1023 			}
1024 			index++;
1025 			index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
1026 			phbbmu->postq_index = index;
1027 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
1028 		}
1029 		break;
1030     case ACB_ADAPTER_TYPE_C: {
1031             u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
1032 
1033             arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size;
1034             ccb_post_stamp = (cdb_phyaddr_low | ((arc_cdb_size-1) >> 6) | 1);
1035 			cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
1036             if(cdb_phyaddr_hi32)
1037             {
1038 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
1039 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
1040             }
1041             else
1042             {
1043 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
1044             }
1045         }
1046         break;
1047 	case ACB_ADAPTER_TYPE_D: {
1048 			struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
1049 			u_int16_t index_stripped;
1050 			u_int16_t postq_index;
1051 			struct InBound_SRB *pinbound_srb;
1052 
1053 			ARCMSR_LOCK_ACQUIRE(&acb->postDone_lock);
1054 			postq_index = phbdmu->postq_index;
1055 			pinbound_srb = (struct InBound_SRB *)&phbdmu->post_qbuffer[postq_index & 0xFF];
1056 			pinbound_srb->addressHigh = srb->cdb_phyaddr_high;
1057 			pinbound_srb->addressLow = srb->cdb_phyaddr_low;
1058 			pinbound_srb->length = srb->arc_cdb_size >> 2;
1059 			arcmsr_cdb->Context = srb->cdb_phyaddr_low;
1060 			if (postq_index & 0x4000) {
1061 				index_stripped = postq_index & 0xFF;
1062 				index_stripped += 1;
1063 				index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
1064 				phbdmu->postq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped;
1065 			} else {
1066 				index_stripped = postq_index;
1067 				index_stripped += 1;
1068 				index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
1069 				phbdmu->postq_index = index_stripped ? index_stripped : (index_stripped | 0x4000);
1070 			}
1071 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inboundlist_write_pointer, postq_index);
1072 			ARCMSR_LOCK_RELEASE(&acb->postDone_lock);
1073 		}
1074 		break;
1075 	}
1076 }
1077 /*
1078 ************************************************************************
1079 ************************************************************************
1080 */
1081 static struct QBUFFER *arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
1082 {
1083 	struct QBUFFER *qbuffer=NULL;
1084 
1085 	switch (acb->adapter_type) {
1086 	case ACB_ADAPTER_TYPE_A: {
1087 			struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu;
1088 
1089 			qbuffer = (struct QBUFFER *)&phbamu->message_rbuffer;
1090 		}
1091 		break;
1092 	case ACB_ADAPTER_TYPE_B: {
1093 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1094 
1095 			qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
1096 		}
1097 		break;
1098 	case ACB_ADAPTER_TYPE_C: {
1099 			struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu;
1100 
1101 			qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer;
1102 		}
1103 		break;
1104 	case ACB_ADAPTER_TYPE_D: {
1105 			struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
1106 
1107 			qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_rbuffer;
1108 		}
1109 		break;
1110 	}
1111 	return(qbuffer);
1112 }
1113 /*
1114 ************************************************************************
1115 ************************************************************************
1116 */
1117 static struct QBUFFER *arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
1118 {
1119 	struct QBUFFER *qbuffer = NULL;
1120 
1121 	switch (acb->adapter_type) {
1122 	case ACB_ADAPTER_TYPE_A: {
1123 			struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu;
1124 
1125 			qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
1126 		}
1127 		break;
1128 	case ACB_ADAPTER_TYPE_B: {
1129 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1130 
1131 			qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
1132 		}
1133 		break;
1134 	case ACB_ADAPTER_TYPE_C: {
1135 			struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu;
1136 
1137 			qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
1138 		}
1139 		break;
1140 	case ACB_ADAPTER_TYPE_D: {
1141 			struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
1142 
1143 			qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_wbuffer;
1144 		}
1145 		break;
1146 	}
1147 	return(qbuffer);
1148 }
1149 /*
1150 **************************************************************************
1151 **************************************************************************
1152 */
1153 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1154 {
1155 	switch (acb->adapter_type) {
1156 	case ACB_ADAPTER_TYPE_A: {
1157 			/* let IOP know data has been read */
1158 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1159 		}
1160 		break;
1161 	case ACB_ADAPTER_TYPE_B: {
1162 			/* let IOP know data has been read */
1163 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1164 		}
1165 		break;
1166 	case ACB_ADAPTER_TYPE_C: {
1167 			/* let IOP know data has been read */
1168 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1169 		}
1170 		break;
1171 	case ACB_ADAPTER_TYPE_D: {
1172 			/* let IOP know data has been read */
1173 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ);
1174 		}
1175 		break;
1176 	}
1177 }
1178 /*
1179 **************************************************************************
1180 **************************************************************************
1181 */
1182 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1183 {
1184 	switch (acb->adapter_type) {
1185 	case ACB_ADAPTER_TYPE_A: {
1186 			/*
1187 			** push inbound doorbell tell iop, driver data write ok
1188 			** and wait reply on next hwinterrupt for next Qbuffer post
1189 			*/
1190 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1191 		}
1192 		break;
1193 	case ACB_ADAPTER_TYPE_B: {
1194 			/*
1195 			** push inbound doorbell tell iop, driver data write ok
1196 			** and wait reply on next hwinterrupt for next Qbuffer post
1197 			*/
1198 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1199 		}
1200 		break;
1201 	case ACB_ADAPTER_TYPE_C: {
1202 			/*
1203 			** push inbound doorbell tell iop, driver data write ok
1204 			** and wait reply on next hwinterrupt for next Qbuffer post
1205 			*/
1206 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1207 		}
1208 		break;
1209 	case ACB_ADAPTER_TYPE_D: {
1210 			/*
1211 			** push inbound doorbell tell iop, driver data write ok
1212 			** and wait reply on next hwinterrupt for next Qbuffer post
1213 			*/
1214 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_IN_READY);
1215 		}
1216 		break;
1217 	}
1218 }
1219 /*
1220 ************************************************************************
1221 ************************************************************************
1222 */
1223 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1224 {
1225 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1226 	CHIP_REG_WRITE32(HBA_MessageUnit,
1227 	0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1228 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
1229 		kprintf("arcmsr%d: wait 'stop adapter background rebuild' timeout \n"
1230 			, acb->pci_unit);
1231 	}
1232 }
1233 /*
1234 ************************************************************************
1235 ************************************************************************
1236 */
1237 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1238 {
1239 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1240 	CHIP_REG_WRITE32(HBB_DOORBELL,
1241 	0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1242 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1243 		kprintf( "arcmsr%d: wait 'stop adapter background rebuild' timeout \n"
1244 			, acb->pci_unit);
1245 	}
1246 }
1247 /*
1248 ************************************************************************
1249 ************************************************************************
1250 */
1251 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1252 {
1253 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1254 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1255 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1256 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1257 		kprintf("arcmsr%d: wait 'stop adapter background rebuild' timeout \n", acb->pci_unit);
1258 	}
1259 }
1260 /*
1261 ************************************************************************
1262 ************************************************************************
1263 */
1264 static void arcmsr_stop_hbd_bgrb(struct AdapterControlBlock *acb)
1265 {
1266 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1267 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1268 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
1269 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1270 	}
1271 }
1272 /*
1273 ************************************************************************
1274 ************************************************************************
1275 */
1276 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1277 {
1278 	switch (acb->adapter_type) {
1279 	case ACB_ADAPTER_TYPE_A: {
1280 			arcmsr_stop_hba_bgrb(acb);
1281 		}
1282 		break;
1283 	case ACB_ADAPTER_TYPE_B: {
1284 			arcmsr_stop_hbb_bgrb(acb);
1285 		}
1286 		break;
1287 	case ACB_ADAPTER_TYPE_C: {
1288 			arcmsr_stop_hbc_bgrb(acb);
1289 		}
1290 		break;
1291 	case ACB_ADAPTER_TYPE_D: {
1292 			arcmsr_stop_hbd_bgrb(acb);
1293 		}
1294 		break;
1295 	}
1296 }
1297 /*
1298 ************************************************************************
1299 ************************************************************************
1300 */
1301 static void arcmsr_poll(struct cam_sim *psim)
1302 {
1303 	struct AdapterControlBlock *acb;
1304 	int	mutex;
1305 
1306 	acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1307 	mutex = lockstatus(&acb->isr_lock, curthread);
1308 	if( mutex == 0 )
1309 		ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
1310 	arcmsr_interrupt(acb);
1311 	if( mutex == 0 )
1312 		ARCMSR_LOCK_RELEASE(&acb->isr_lock);
1313 }
1314 /*
1315 **************************************************************************
1316 **************************************************************************
1317 */
1318 static u_int32_t arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock *acb,
1319     struct QBUFFER *prbuffer) {
1320 
1321 	u_int8_t *pQbuffer;
1322 	u_int8_t *buf1 = NULL;
1323 	u_int32_t *iop_data, *buf2 = NULL;
1324 	u_int32_t iop_len, data_len;
1325 
1326 	iop_data = (u_int32_t *)prbuffer->data;
1327 	iop_len = (u_int32_t)prbuffer->data_len;
1328 	if ( iop_len > 0 )
1329 	{
1330 		buf1 = kmalloc(128, M_DEVBUF, M_NOWAIT | M_ZERO);
1331 		buf2 = (u_int32_t *)buf1;
1332 		if( buf1 == NULL)
1333 			return (0);
1334 		data_len = iop_len;
1335 		while(data_len >= 4)
1336 		{
1337 			*buf2++ = *iop_data++;
1338 			data_len -= 4;
1339 		}
1340 		if(data_len)
1341 			*buf2 = *iop_data;
1342 		buf2 = (u_int32_t *)buf1;
1343 	}
1344 	while (iop_len > 0) {
1345 		pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex];
1346 		*pQbuffer = *buf1;
1347 		acb->rqbuf_lastindex++;
1348 		/* if last, index number set it to 0 */
1349 		acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1350 		buf1++;
1351 		iop_len--;
1352 	}
1353 	if(buf2)
1354 		kfree( (u_int8_t *)buf2, M_DEVBUF);
1355 	/* let IOP know data has been read */
1356 	arcmsr_iop_message_read(acb);
1357 	return (1);
1358 }
1359 /*
1360 **************************************************************************
1361 **************************************************************************
1362 */
1363 static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
1364     struct QBUFFER *prbuffer) {
1365 
1366 	u_int8_t *pQbuffer;
1367 	u_int8_t *iop_data;
1368 	u_int32_t iop_len;
1369 
1370 	if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
1371 		return(arcmsr_Read_iop_rqbuffer_data_D(acb, prbuffer));
1372 	}
1373 	iop_data = (u_int8_t *)prbuffer->data;
1374 	iop_len = (u_int32_t)prbuffer->data_len;
1375 	while (iop_len > 0) {
1376 		pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex];
1377 		*pQbuffer = *iop_data;
1378 		acb->rqbuf_lastindex++;
1379 		/* if last, index number set it to 0 */
1380 		acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1381 		iop_data++;
1382 		iop_len--;
1383 	}
1384 	/* let IOP know data has been read */
1385 	arcmsr_iop_message_read(acb);
1386 	return (1);
1387 }
1388 /*
1389 **************************************************************************
1390 **************************************************************************
1391 */
1392 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1393 {
1394 	struct QBUFFER *prbuffer;
1395 	int my_empty_len;
1396 
1397 	/*check this iop data if overflow my rqbuffer*/
1398 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1399 	prbuffer = arcmsr_get_iop_rqbuffer(acb);
1400 	my_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) &
1401 	    (ARCMSR_MAX_QBUFFER-1);
1402 	if(my_empty_len >= prbuffer->data_len) {
1403 		if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
1404 			acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1405 	} else {
1406 		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1407 	}
1408 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1409 }
1410 /*
1411 **********************************************************************
1412 **********************************************************************
1413 */
1414 static void arcmsr_Write_data_2iop_wqbuffer_D(struct AdapterControlBlock *acb)
1415 {
1416 	u_int8_t *pQbuffer;
1417 	struct QBUFFER *pwbuffer;
1418 	u_int8_t *buf1 = NULL;
1419 	u_int32_t *iop_data, *buf2 = NULL;
1420 	u_int32_t allxfer_len = 0, data_len;
1421 
1422 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1423 		buf1 = kmalloc(128, M_DEVBUF, M_NOWAIT | M_ZERO);
1424 		buf2 = (u_int32_t *)buf1;
1425 		if( buf1 == NULL)
1426 			return;
1427 
1428 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1429 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1430 		iop_data = (u_int32_t *)pwbuffer->data;
1431 		while((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
1432 			&& (allxfer_len < 124)) {
1433 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1434 			*buf1 = *pQbuffer;
1435 			acb->wqbuf_firstindex++;
1436 			acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1437 			buf1++;
1438 			allxfer_len++;
1439 		}
1440 		pwbuffer->data_len = allxfer_len;
1441 		data_len = allxfer_len;
1442 		buf1 = (u_int8_t *)buf2;
1443 		while(data_len >= 4)
1444 		{
1445 			*iop_data++ = *buf2++;
1446 			data_len -= 4;
1447 		}
1448 		if(data_len)
1449 			*iop_data = *buf2;
1450 		kfree( buf1, M_DEVBUF);
1451 		arcmsr_iop_message_wrote(acb);
1452 	}
1453 }
1454 /*
1455 **********************************************************************
1456 **********************************************************************
1457 */
1458 static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb)
1459 {
1460 	u_int8_t *pQbuffer;
1461 	struct QBUFFER *pwbuffer;
1462 	u_int8_t *iop_data;
1463 	int32_t allxfer_len=0;
1464 
1465 	if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
1466 		arcmsr_Write_data_2iop_wqbuffer_D(acb);
1467 		return;
1468 	}
1469 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1470 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1471 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1472 		iop_data = (u_int8_t *)pwbuffer->data;
1473 		while((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
1474 			&& (allxfer_len < 124)) {
1475 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1476 			*iop_data = *pQbuffer;
1477 			acb->wqbuf_firstindex++;
1478 			acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1479 			iop_data++;
1480 			allxfer_len++;
1481 		}
1482 		pwbuffer->data_len = allxfer_len;
1483 		arcmsr_iop_message_wrote(acb);
1484 	}
1485 }
1486 /*
1487 **************************************************************************
1488 **************************************************************************
1489 */
1490 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1491 {
1492 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1493 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1494 	/*
1495 	*****************************************************************
1496 	**   check if there are any mail packages from user space program
1497 	**   in my post bag, now is the time to send them into Areca's firmware
1498 	*****************************************************************
1499 	*/
1500 	if(acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
1501 		arcmsr_Write_data_2iop_wqbuffer(acb);
1502 	}
1503 	if(acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
1504 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1505 	}
1506 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1507 }
1508 /*
1509 **************************************************************************
1510 **************************************************************************
1511 */
1512 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1513 {
1514 /*
1515 	if (ccb->ccb_h.status != CAM_REQ_CMP)
1516 		kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x,"
1517 		    "failure status=%x\n", ccb->ccb_h.target_id,
1518 		    ccb->ccb_h.target_lun, ccb->ccb_h.status);
1519 	else
1520 		kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1521 */
1522 	xpt_free_path(ccb->ccb_h.path);
1523 	xpt_free_ccb(ccb);
1524 }
1525 
1526 static void	arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1527 {
1528 	struct cam_path     *path;
1529 	union ccb           *ccb;
1530 
1531 	if ((ccb = (union ccb *)xpt_alloc_ccb()) == NULL)
1532  		return;
1533 	if (xpt_create_path(&path, NULL, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1534 	{
1535 		xpt_free_ccb(ccb);
1536 		return;
1537 	}
1538 /*	kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
1539 	bzero(ccb, sizeof(union ccb));
1540 	xpt_setup_ccb(&ccb->ccb_h, path, 5);
1541 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
1542 	ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1543 	ccb->crcn.flags = CAM_FLAG_NONE;
1544 	xpt_action(ccb);
1545 }
1546 
1547 
1548 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1549 {
1550 	struct CommandControlBlock *srb;
1551 	u_int32_t intmask_org;
1552 	int i;
1553 
1554 	/* disable all outbound interrupts */
1555 	intmask_org = arcmsr_disable_allintr(acb);
1556 	for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1557 	{
1558 		srb = acb->psrb_pool[i];
1559 		if (srb->srb_state == ARCMSR_SRB_START)
1560 		{
1561 		if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1562             {
1563 			srb->srb_state = ARCMSR_SRB_ABORTED;
1564 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1565 			arcmsr_srb_complete(srb, 1);
1566 				kprintf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
1567 		}
1568 		}
1569 	}
1570 	/* enable outbound Post Queue, outbound doorbell Interrupt */
1571 	arcmsr_enable_allintr(acb, intmask_org);
1572 }
1573 /*
1574 **************************************************************************
1575 **************************************************************************
1576 */
1577 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1578 	u_int32_t	devicemap;
1579 	u_int32_t	target, lun;
1580     u_int32_t	deviceMapCurrent[4]={0};
1581     u_int8_t	*pDevMap;
1582 
1583 	switch (acb->adapter_type) {
1584 	case ACB_ADAPTER_TYPE_A:
1585 			devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1586 			for (target = 0; target < 4; target++)
1587 			{
1588 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1589 		devicemap += 4;
1590 			}
1591 			break;
1592 
1593 	case ACB_ADAPTER_TYPE_B:
1594 			devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1595 			for (target = 0; target < 4; target++)
1596 			{
1597 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1],  devicemap);
1598 		devicemap += 4;
1599 			}
1600 			break;
1601 
1602 	case ACB_ADAPTER_TYPE_C:
1603 			devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1604 			for (target = 0; target < 4; target++)
1605 			{
1606 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1607 		devicemap += 4;
1608 			}
1609 			break;
1610 	case ACB_ADAPTER_TYPE_D:
1611 			devicemap = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1612 			for (target = 0; target < 4; target++)
1613 			{
1614             	deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1615             	devicemap += 4;
1616 			}
1617 			break;
1618 	}
1619 
1620 		if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1621 		{
1622 			acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1623 		}
1624 		/*
1625 		** adapter posted CONFIG message
1626 		** copy the new map, note if there are differences with the current map
1627 		*/
1628 		pDevMap = (u_int8_t	*)&deviceMapCurrent[0];
1629 		for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1630 		{
1631 			if (*pDevMap != acb->device_map[target])
1632 			{
1633                 u_int8_t difference, bit_check;
1634 
1635                 difference = *pDevMap ^ acb->device_map[target];
1636                 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1637                 {
1638                     bit_check = (1 << lun);						/*check bit from 0....31*/
1639                     if(difference & bit_check)
1640                     {
1641                         if(acb->device_map[target] & bit_check)
1642                         {/* unit departed */
1643 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1644 							arcmsr_abort_dr_ccbs(acb, target, lun);
1645 				arcmsr_rescan_lun(acb, target, lun);
1646 						acb->devstate[target][lun] = ARECA_RAID_GONE;
1647                         }
1648                         else
1649                         {/* unit arrived */
1650 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
1651 				arcmsr_rescan_lun(acb, target, lun);
1652 						acb->devstate[target][lun] = ARECA_RAID_GOOD;
1653                         }
1654                     }
1655                 }
1656 /*				kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1657 				acb->device_map[target] = *pDevMap;
1658 			}
1659 			pDevMap++;
1660 		}
1661 }
1662 /*
1663 **************************************************************************
1664 **************************************************************************
1665 */
1666 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1667 	u_int32_t outbound_message;
1668 
1669 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1670 	outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1671 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1672 		arcmsr_dr_handle( acb );
1673 }
1674 /*
1675 **************************************************************************
1676 **************************************************************************
1677 */
1678 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1679 	u_int32_t outbound_message;
1680 
1681 	/* clear interrupts */
1682 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1683 	outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1684 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1685 		arcmsr_dr_handle( acb );
1686 }
1687 /*
1688 **************************************************************************
1689 **************************************************************************
1690 */
1691 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1692 	u_int32_t outbound_message;
1693 
1694 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1695 	outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1696 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1697 		arcmsr_dr_handle( acb );
1698 }
1699 /*
1700 **************************************************************************
1701 **************************************************************************
1702 */
1703 static void arcmsr_hbd_message_isr(struct AdapterControlBlock *acb) {
1704 	u_int32_t outbound_message;
1705 
1706 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);
1707 	outbound_message = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[0]);
1708 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1709 		arcmsr_dr_handle( acb );
1710 }
1711 /*
1712 **************************************************************************
1713 **************************************************************************
1714 */
1715 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1716 {
1717 	u_int32_t doorbell_status;
1718 
1719 	/*
1720 	*******************************************************************
1721 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1722 	**  DOORBELL: din! don!
1723 	**  check if there are any mail need to pack from firmware
1724 	*******************************************************************
1725 	*/
1726 	doorbell_status = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
1727 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
1728 	if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1729 		arcmsr_iop2drv_data_wrote_handle(acb);
1730 	}
1731 	if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1732 		arcmsr_iop2drv_data_read_handle(acb);
1733 	}
1734 }
1735 /*
1736 **************************************************************************
1737 **************************************************************************
1738 */
1739 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1740 {
1741 	u_int32_t doorbell_status;
1742 
1743 	/*
1744 	*******************************************************************
1745 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1746 	**  DOORBELL: din! don!
1747 	**  check if there are any mail need to pack from firmware
1748 	*******************************************************************
1749 	*/
1750 	doorbell_status = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1751 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, doorbell_status); /* clear doorbell interrupt */
1752 	if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1753 		arcmsr_iop2drv_data_wrote_handle(acb);
1754 	}
1755 	if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1756 		arcmsr_iop2drv_data_read_handle(acb);
1757 	}
1758 	if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1759 		arcmsr_hbc_message_isr(acb);    /* messenger of "driver to iop commands" */
1760 	}
1761 }
1762 /*
1763 **************************************************************************
1764 **************************************************************************
1765 */
1766 static void arcmsr_hbd_doorbell_isr(struct AdapterControlBlock *acb)
1767 {
1768 	u_int32_t doorbell_status;
1769 
1770 	/*
1771 	*******************************************************************
1772 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1773 	**  DOORBELL: din! don!
1774 	**  check if there are any mail need to pack from firmware
1775 	*******************************************************************
1776 	*/
1777 	doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
1778 	if(doorbell_status)
1779 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
1780 	while( doorbell_status & ARCMSR_HBDMU_F0_DOORBELL_CAUSE ) {
1781 		if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK) {
1782 			arcmsr_iop2drv_data_wrote_handle(acb);
1783 		}
1784 		if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK) {
1785 			arcmsr_iop2drv_data_read_handle(acb);
1786 		}
1787 		if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) {
1788 			arcmsr_hbd_message_isr(acb);    /* messenger of "driver to iop commands" */
1789 		}
1790 		doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
1791 		if(doorbell_status)
1792 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
1793 	}
1794 }
1795 /*
1796 **************************************************************************
1797 **************************************************************************
1798 */
1799 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1800 {
1801 	u_int32_t flag_srb;
1802 	u_int16_t error;
1803 
1804 	/*
1805 	*****************************************************************************
1806 	**               areca cdb command done
1807 	*****************************************************************************
1808 	*/
1809 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1810 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1811 	while((flag_srb = CHIP_REG_READ32(HBA_MessageUnit,
1812 		0, outbound_queueport)) != 0xFFFFFFFF) {
1813 		/* check if command done with no error*/
1814         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0) ? TRUE : FALSE;
1815 		arcmsr_drain_donequeue(acb, flag_srb, error);
1816 	}	/*drain reply FIFO*/
1817 }
1818 /*
1819 **************************************************************************
1820 **************************************************************************
1821 */
1822 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1823 {
1824 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1825 	u_int32_t flag_srb;
1826 	int index;
1827 	u_int16_t error;
1828 
1829 	/*
1830 	*****************************************************************************
1831 	**               areca cdb command done
1832 	*****************************************************************************
1833 	*/
1834 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1835 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1836 	index = phbbmu->doneq_index;
1837 	while((flag_srb = phbbmu->done_qbuffer[index]) != 0) {
1838 		phbbmu->done_qbuffer[index] = 0;
1839 		index++;
1840 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
1841 		phbbmu->doneq_index = index;
1842 		/* check if command done with no error*/
1843         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1844 		arcmsr_drain_donequeue(acb, flag_srb, error);
1845 	}	/*drain reply FIFO*/
1846 }
1847 /*
1848 **************************************************************************
1849 **************************************************************************
1850 */
1851 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1852 {
1853 	u_int32_t flag_srb,throttling = 0;
1854 	u_int16_t error;
1855 
1856 	/*
1857 	*****************************************************************************
1858 	**               areca cdb command done
1859 	*****************************************************************************
1860 	*/
1861 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1862 	do {
1863 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
1864 		/* check if command done with no error*/
1865         	error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
1866 		arcmsr_drain_donequeue(acb, flag_srb, error);
1867         	throttling++;
1868 		if(throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1869 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
1870 			throttling = 0;
1871         	}
1872 	} while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR);
1873 }
1874 /*
1875 **********************************************************************
1876 **
1877 **********************************************************************
1878 */
1879 static uint16_t arcmsr_get_doneq_index(struct HBD_MessageUnit0 *phbdmu)
1880 {
1881 	uint16_t doneq_index, index_stripped;
1882 
1883 	doneq_index = phbdmu->doneq_index;
1884 	if (doneq_index & 0x4000) {
1885 		index_stripped = doneq_index & 0xFF;
1886 		index_stripped += 1;
1887 		index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
1888 		phbdmu->doneq_index = index_stripped ?
1889 		    (index_stripped | 0x4000) : index_stripped;
1890 	} else {
1891 		index_stripped = doneq_index;
1892 		index_stripped += 1;
1893 		index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
1894 		phbdmu->doneq_index = index_stripped ?
1895 		    index_stripped : (index_stripped | 0x4000);
1896 	}
1897 	return (phbdmu->doneq_index);
1898 }
1899 /*
1900 **************************************************************************
1901 **************************************************************************
1902 */
1903 static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb)
1904 {
1905 	struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
1906 	u_int32_t outbound_write_pointer;
1907 	u_int32_t addressLow;
1908 	uint16_t doneq_index;
1909 	u_int16_t error;
1910 	/*
1911 	*****************************************************************************
1912 	**               areca cdb command done
1913 	*****************************************************************************
1914 	*/
1915 	if((CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause) &
1916 	    ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT) == 0)
1917 	    return;
1918 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1919 		BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1920 	outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
1921 	doneq_index = phbdmu->doneq_index;
1922 	while ((doneq_index & 0xFF) != (outbound_write_pointer & 0xFF)) {
1923 		doneq_index = arcmsr_get_doneq_index(phbdmu);
1924 		addressLow = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow;
1925 		error = (addressLow & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
1926 		arcmsr_drain_donequeue(acb, addressLow, error); /*Check if command done with no error */
1927 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index);
1928 		outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
1929 	}
1930 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_interrupt_cause, ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT_CLEAR);
1931 	CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause); /*Dummy ioread32 to force pci flush */
1932 }
1933 /*
1934 **********************************************************************
1935 **********************************************************************
1936 */
1937 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
1938 {
1939 	u_int32_t outbound_intStatus;
1940 	/*
1941 	*********************************************
1942 	**   check outbound intstatus
1943 	*********************************************
1944 	*/
1945 	outbound_intStatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
1946 	if(!outbound_intStatus) {
1947 		/*it must be share irq*/
1948 		return;
1949 	}
1950 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus); /*clear interrupt*/
1951 	/* MU doorbell interrupts*/
1952 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1953 		arcmsr_hba_doorbell_isr(acb);
1954 	}
1955 	/* MU post queue interrupts*/
1956 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1957 		arcmsr_hba_postqueue_isr(acb);
1958 	}
1959 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1960 		arcmsr_hba_message_isr(acb);
1961 	}
1962 }
1963 /*
1964 **********************************************************************
1965 **********************************************************************
1966 */
1967 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
1968 {
1969 	u_int32_t outbound_doorbell;
1970 	/*
1971 	*********************************************
1972 	**   check outbound intstatus
1973 	*********************************************
1974 	*/
1975 	outbound_doorbell = CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
1976 	if(!outbound_doorbell) {
1977 		/*it must be share irq*/
1978 		return;
1979 	}
1980 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
1981 	CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
1982 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1983 	/* MU ioctl transfer doorbell interrupts*/
1984 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1985 		arcmsr_iop2drv_data_wrote_handle(acb);
1986 	}
1987 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1988 		arcmsr_iop2drv_data_read_handle(acb);
1989 	}
1990 	/* MU post queue interrupts*/
1991 	if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1992 		arcmsr_hbb_postqueue_isr(acb);
1993 	}
1994 	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1995 		arcmsr_hbb_message_isr(acb);
1996 	}
1997 }
1998 /*
1999 **********************************************************************
2000 **********************************************************************
2001 */
2002 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
2003 {
2004 	u_int32_t host_interrupt_status;
2005 	/*
2006 	*********************************************
2007 	**   check outbound intstatus
2008 	*********************************************
2009 	*/
2010 	host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) &
2011 		(ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2012 		ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
2013 	if(!host_interrupt_status) {
2014 		/*it must be share irq*/
2015 		return;
2016 	}
2017 	do {
2018 		/* MU doorbell interrupts*/
2019 		if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
2020 			arcmsr_hbc_doorbell_isr(acb);
2021 		}
2022 		/* MU post queue interrupts*/
2023 		if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
2024 			arcmsr_hbc_postqueue_isr(acb);
2025 		}
2026 		host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
2027 	} while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2028 }
2029 /*
2030 **********************************************************************
2031 **********************************************************************
2032 */
2033 static void arcmsr_handle_hbd_isr( struct AdapterControlBlock *acb)
2034 {
2035 	u_int32_t host_interrupt_status;
2036 	u_int32_t intmask_org;
2037 	/*
2038 	*********************************************
2039 	**   check outbound intstatus
2040 	*********************************************
2041 	*/
2042 	host_interrupt_status = CHIP_REG_READ32(HBD_MessageUnit, 0, host_int_status) & acb->outbound_int_enable;
2043 	if(!(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_INT)) {
2044 		/*it must be share irq*/
2045 		return;
2046 	}
2047 	/* disable outbound interrupt */
2048 	intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable)	; /* disable outbound message0 int */
2049 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE);
2050 	/* MU doorbell interrupts*/
2051 	if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_DOORBELL_INT) {
2052 		arcmsr_hbd_doorbell_isr(acb);
2053 	}
2054 	/* MU post queue interrupts*/
2055 	if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_POSTQUEUE_INT) {
2056 		arcmsr_hbd_postqueue_isr(acb);
2057 	}
2058 	/* enable all outbound interrupt */
2059 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | ARCMSR_HBDMU_ALL_INT_ENABLE);
2060 //	CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable);
2061 }
2062 /*
2063 ******************************************************************************
2064 ******************************************************************************
2065 */
2066 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
2067 {
2068 	switch (acb->adapter_type) {
2069 	case ACB_ADAPTER_TYPE_A:
2070 		arcmsr_handle_hba_isr(acb);
2071 		break;
2072 	case ACB_ADAPTER_TYPE_B:
2073 		arcmsr_handle_hbb_isr(acb);
2074 		break;
2075 	case ACB_ADAPTER_TYPE_C:
2076 		arcmsr_handle_hbc_isr(acb);
2077 		break;
2078 	case ACB_ADAPTER_TYPE_D:
2079 		arcmsr_handle_hbd_isr(acb);
2080 		break;
2081 	default:
2082 		kprintf("arcmsr%d: interrupt service,"
2083 		" unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type);
2084 		break;
2085 	}
2086 }
2087 /*
2088 **********************************************************************
2089 **********************************************************************
2090 */
2091 static void arcmsr_intr_handler(void *arg)
2092 {
2093 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
2094 
2095 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
2096 	arcmsr_interrupt(acb);
2097 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
2098 }
2099 /*
2100 ******************************************************************************
2101 ******************************************************************************
2102 */
2103 static void	arcmsr_polling_devmap(void *arg)
2104 {
2105 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
2106 	switch (acb->adapter_type) {
2107 	case ACB_ADAPTER_TYPE_A:
2108 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2109 		break;
2110 
2111 	case ACB_ADAPTER_TYPE_B:
2112 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2113 		break;
2114 
2115 	case ACB_ADAPTER_TYPE_C:
2116 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2117 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2118 		break;
2119 
2120     	case ACB_ADAPTER_TYPE_D:
2121 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2122 	    	break;
2123 	}
2124 
2125 	if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
2126 	{
2127 		callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb);	/* polling per 5 seconds */
2128 	}
2129 }
2130 
2131 /*
2132 *******************************************************************************
2133 **
2134 *******************************************************************************
2135 */
2136 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2137 {
2138 	u_int32_t intmask_org;
2139 
2140 	if(acb != NULL) {
2141 		/* stop adapter background rebuild */
2142 		if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
2143 			intmask_org = arcmsr_disable_allintr(acb);
2144 			arcmsr_stop_adapter_bgrb(acb);
2145 			arcmsr_flush_adapter_cache(acb);
2146 			arcmsr_enable_allintr(acb, intmask_org);
2147 		}
2148 	}
2149 }
2150 /*
2151 ***********************************************************************
2152 **
2153 ************************************************************************
2154 */
2155 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
2156 {
2157 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2158 	u_int32_t retvalue = EINVAL;
2159 
2160 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) arg;
2161 	if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
2162 		return retvalue;
2163 	}
2164 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2165 	switch(ioctl_cmd) {
2166 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
2167 			u_int8_t *pQbuffer;
2168 			u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer;
2169 			u_int32_t allxfer_len=0;
2170 
2171 			while((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2172 				&& (allxfer_len < 1031)) {
2173 				/*copy READ QBUFFER to srb*/
2174 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2175 				*ptmpQbuffer = *pQbuffer;
2176 				acb->rqbuf_firstindex++;
2177 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2178 				/*if last index number set it to 0 */
2179 				ptmpQbuffer++;
2180 				allxfer_len++;
2181 			}
2182 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2183 				struct QBUFFER *prbuffer;
2184 
2185 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2186 				prbuffer = arcmsr_get_iop_rqbuffer(acb);
2187 				if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2188 					acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2189 			}
2190 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
2191 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2192 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2193 		}
2194 		break;
2195 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2196 			u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2197 			u_int8_t *pQbuffer;
2198 			u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer;
2199 
2200 			user_len = pcmdmessagefld->cmdmessage.Length;
2201 			/*check if data xfer length of this request will overflow my array qbuffer */
2202 			wqbuf_lastindex = acb->wqbuf_lastindex;
2203 			wqbuf_firstindex = acb->wqbuf_firstindex;
2204 			if(wqbuf_lastindex != wqbuf_firstindex) {
2205 				arcmsr_Write_data_2iop_wqbuffer(acb);
2206 				pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
2207 			} else {
2208 				my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) &
2209 				    (ARCMSR_MAX_QBUFFER - 1);
2210 				if(my_empty_len >= user_len) {
2211 					while(user_len > 0) {
2212 						/*copy srb data to wqbuffer*/
2213 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2214 						*pQbuffer = *ptmpuserbuffer;
2215 						acb->wqbuf_lastindex++;
2216 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2217 						/*if last index number set it to 0 */
2218 						ptmpuserbuffer++;
2219 						user_len--;
2220 					}
2221 					/*post fist Qbuffer*/
2222 					if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2223 						acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2224 						arcmsr_Write_data_2iop_wqbuffer(acb);
2225 					}
2226 					pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2227 				} else {
2228 					pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
2229 				}
2230 			}
2231 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2232 		}
2233 		break;
2234 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2235 			u_int8_t *pQbuffer = acb->rqbuffer;
2236 
2237 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2238 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2239 				arcmsr_iop_message_read(acb);
2240 				/*signature, let IOP know data has been readed */
2241 			}
2242 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2243 			acb->rqbuf_firstindex = 0;
2244 			acb->rqbuf_lastindex = 0;
2245 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2246 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2247 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2248 		}
2249 		break;
2250 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
2251 		{
2252 			u_int8_t *pQbuffer = acb->wqbuffer;
2253 
2254 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2255 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2256                 arcmsr_iop_message_read(acb);
2257 				/*signature, let IOP know data has been readed */
2258 			}
2259 			acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
2260 			acb->wqbuf_firstindex = 0;
2261 			acb->wqbuf_lastindex = 0;
2262 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2263 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2264 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2265 		}
2266 		break;
2267 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2268 			u_int8_t *pQbuffer;
2269 
2270 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2271 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2272                 arcmsr_iop_message_read(acb);
2273 				/*signature, let IOP know data has been readed */
2274 			}
2275 			acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
2276 					|ACB_F_MESSAGE_RQBUFFER_CLEARED
2277 					|ACB_F_MESSAGE_WQBUFFER_READ);
2278 			acb->rqbuf_firstindex = 0;
2279 			acb->rqbuf_lastindex = 0;
2280 			acb->wqbuf_firstindex = 0;
2281 			acb->wqbuf_lastindex = 0;
2282 			pQbuffer = acb->rqbuffer;
2283 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
2284 			pQbuffer = acb->wqbuffer;
2285 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
2286 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2287 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2288 		}
2289 		break;
2290 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2291 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2292 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2293 		}
2294 		break;
2295 	case ARCMSR_MESSAGE_SAY_HELLO: {
2296 			u_int8_t *hello_string = "Hello! I am ARCMSR";
2297 			u_int8_t *puserbuffer = (u_int8_t *)pcmdmessagefld->messagedatabuffer;
2298 
2299 			if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
2300 				pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
2301 				ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2302 				return ENOIOCTL;
2303 			}
2304 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2305 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2306 		}
2307 		break;
2308 	case ARCMSR_MESSAGE_SAY_GOODBYE: {
2309 			arcmsr_iop_parking(acb);
2310 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2311 		}
2312 		break;
2313 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
2314 			arcmsr_flush_adapter_cache(acb);
2315 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2316 		}
2317 		break;
2318 	}
2319 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2320 	return (retvalue);
2321 }
2322 /*
2323 **************************************************************************
2324 **************************************************************************
2325 */
2326 static void arcmsr_free_srb(struct CommandControlBlock *srb)
2327 {
2328 	struct AdapterControlBlock	*acb;
2329 
2330 	acb = srb->acb;
2331 	ARCMSR_LOCK_ACQUIRE(&acb->srb_lock);
2332 	srb->srb_state = ARCMSR_SRB_DONE;
2333 	srb->srb_flags = 0;
2334 	acb->srbworkingQ[acb->workingsrb_doneindex] = srb;
2335 	acb->workingsrb_doneindex++;
2336 	acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
2337 	ARCMSR_LOCK_RELEASE(&acb->srb_lock);
2338 }
2339 /*
2340 **************************************************************************
2341 **************************************************************************
2342 */
2343 static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb)
2344 {
2345 	struct CommandControlBlock *srb = NULL;
2346 	u_int32_t workingsrb_startindex, workingsrb_doneindex;
2347 
2348 	ARCMSR_LOCK_ACQUIRE(&acb->srb_lock);
2349 	workingsrb_doneindex = acb->workingsrb_doneindex;
2350 	workingsrb_startindex = acb->workingsrb_startindex;
2351 	srb = acb->srbworkingQ[workingsrb_startindex];
2352 	workingsrb_startindex++;
2353 	workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
2354 	if(workingsrb_doneindex != workingsrb_startindex) {
2355 		acb->workingsrb_startindex = workingsrb_startindex;
2356 	} else {
2357 		srb = NULL;
2358 	}
2359 	ARCMSR_LOCK_RELEASE(&acb->srb_lock);
2360 	return(srb);
2361 }
2362 /*
2363 **************************************************************************
2364 **************************************************************************
2365 */
2366 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb)
2367 {
2368 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2369 	int retvalue = 0, transfer_len = 0;
2370 	char *buffer;
2371 	u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
2372 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
2373 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8  |
2374 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
2375 					/* 4 bytes: Areca io control code */
2376 	if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2377 		buffer = pccb->csio.data_ptr;
2378 		transfer_len = pccb->csio.dxfer_len;
2379 	} else {
2380 		retvalue = ARCMSR_MESSAGE_FAIL;
2381 		goto message_out;
2382 	}
2383 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2384 		retvalue = ARCMSR_MESSAGE_FAIL;
2385 		goto message_out;
2386 	}
2387 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
2388 	switch(controlcode) {
2389 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
2390 			u_int8_t *pQbuffer;
2391 			u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer;
2392 			int32_t allxfer_len = 0;
2393 
2394 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2395 			while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2396 				&& (allxfer_len < 1031)) {
2397 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2398 				*ptmpQbuffer = *pQbuffer;
2399 				acb->rqbuf_firstindex++;
2400 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2401 				ptmpQbuffer++;
2402 				allxfer_len++;
2403 			}
2404 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2405 				struct QBUFFER  *prbuffer;
2406 
2407 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2408 				prbuffer = arcmsr_get_iop_rqbuffer(acb);
2409 				if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2410 					acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2411 			}
2412 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
2413 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2414 			retvalue = ARCMSR_MESSAGE_SUCCESS;
2415 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2416 		}
2417 		break;
2418 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2419 			int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2420 			u_int8_t *pQbuffer;
2421 			u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer;
2422 
2423 			user_len = pcmdmessagefld->cmdmessage.Length;
2424 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2425 			wqbuf_lastindex = acb->wqbuf_lastindex;
2426 			wqbuf_firstindex = acb->wqbuf_firstindex;
2427 			if (wqbuf_lastindex != wqbuf_firstindex) {
2428 				arcmsr_Write_data_2iop_wqbuffer(acb);
2429 				/* has error report sensedata */
2430 			    if(pccb->csio.sense_len) {
2431 				((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2432 				/* Valid,ErrorCode */
2433 				((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2434 				/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2435 				((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2436 				/* AdditionalSenseLength */
2437 				((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2438 				/* AdditionalSenseCode */
2439 				}
2440 				retvalue = ARCMSR_MESSAGE_FAIL;
2441 			} else {
2442 				my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2443 						&(ARCMSR_MAX_QBUFFER - 1);
2444 				if (my_empty_len >= user_len) {
2445 					while (user_len > 0) {
2446 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2447 						*pQbuffer = *ptmpuserbuffer;
2448 						acb->wqbuf_lastindex++;
2449 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2450 						ptmpuserbuffer++;
2451 						user_len--;
2452 					}
2453 					if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2454 						acb->acb_flags &=
2455 						    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2456 						arcmsr_Write_data_2iop_wqbuffer(acb);
2457 					}
2458 				} else {
2459 					/* has error report sensedata */
2460 					if(pccb->csio.sense_len) {
2461 					((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2462 					/* Valid,ErrorCode */
2463 					((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2464 					/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2465 					((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2466 					/* AdditionalSenseLength */
2467 					((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2468 					/* AdditionalSenseCode */
2469 					}
2470 					retvalue = ARCMSR_MESSAGE_FAIL;
2471 				}
2472 			}
2473 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2474 		}
2475 		break;
2476 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2477 			u_int8_t *pQbuffer = acb->rqbuffer;
2478 
2479 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2480 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2481 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2482 				arcmsr_iop_message_read(acb);
2483 			}
2484 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2485 			acb->rqbuf_firstindex = 0;
2486 			acb->rqbuf_lastindex = 0;
2487 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2488 			pcmdmessagefld->cmdmessage.ReturnCode =
2489 			    ARCMSR_MESSAGE_RETURNCODE_OK;
2490 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2491 		}
2492 		break;
2493 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2494 			u_int8_t *pQbuffer = acb->wqbuffer;
2495 
2496 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2497 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2498 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2499 				arcmsr_iop_message_read(acb);
2500 			}
2501 			acb->acb_flags |=
2502 				(ACB_F_MESSAGE_WQBUFFER_CLEARED |
2503 					ACB_F_MESSAGE_WQBUFFER_READ);
2504 			acb->wqbuf_firstindex = 0;
2505 			acb->wqbuf_lastindex = 0;
2506 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2507 			pcmdmessagefld->cmdmessage.ReturnCode =
2508 				ARCMSR_MESSAGE_RETURNCODE_OK;
2509 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2510 		}
2511 		break;
2512 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2513 			u_int8_t *pQbuffer;
2514 
2515 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2516 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2517 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2518 				arcmsr_iop_message_read(acb);
2519 			}
2520 			acb->acb_flags |=
2521 				(ACB_F_MESSAGE_WQBUFFER_CLEARED
2522 				| ACB_F_MESSAGE_RQBUFFER_CLEARED
2523 				| ACB_F_MESSAGE_WQBUFFER_READ);
2524 			acb->rqbuf_firstindex = 0;
2525 			acb->rqbuf_lastindex = 0;
2526 			acb->wqbuf_firstindex = 0;
2527 			acb->wqbuf_lastindex = 0;
2528 			pQbuffer = acb->rqbuffer;
2529 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2530 			pQbuffer = acb->wqbuffer;
2531 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2532 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2533 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2534 		}
2535 		break;
2536 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2537 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2538 		}
2539 		break;
2540 	case ARCMSR_MESSAGE_SAY_HELLO: {
2541 			int8_t *hello_string = "Hello! I am ARCMSR";
2542 
2543 			memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2544 				, (int16_t)strlen(hello_string));
2545 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2546 		}
2547 		break;
2548 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2549 		arcmsr_iop_parking(acb);
2550 		break;
2551 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2552 		arcmsr_flush_adapter_cache(acb);
2553 		break;
2554 	default:
2555 		retvalue = ARCMSR_MESSAGE_FAIL;
2556 	}
2557 message_out:
2558 	return (retvalue);
2559 }
2560 /*
2561 *********************************************************************
2562 *********************************************************************
2563 */
2564 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2565 {
2566 	struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
2567 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)srb->acb;
2568 	union ccb *pccb;
2569 	int target, lun;
2570 
2571 	pccb = srb->pccb;
2572 	target = pccb->ccb_h.target_id;
2573 	lun = pccb->ccb_h.target_lun;
2574 	acb->pktRequestCount++;
2575 	if(error != 0) {
2576 		if(error != EFBIG) {
2577 			kprintf("arcmsr%d: unexpected error %x"
2578 				" returned from 'bus_dmamap_load' \n"
2579 				, acb->pci_unit, error);
2580 		}
2581 		if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2582 			pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2583 		}
2584 		arcmsr_srb_complete(srb, 0);
2585 		return;
2586 	}
2587 	if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2588 		pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2589 		arcmsr_srb_complete(srb, 0);
2590 		return;
2591 	}
2592 	if(acb->acb_flags & ACB_F_BUS_RESET) {
2593 		kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2594 		pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2595 		arcmsr_srb_complete(srb, 0);
2596 		return;
2597 	}
2598 	if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
2599 		u_int8_t block_cmd, cmd;
2600 
2601 		cmd = pccb->csio.cdb_io.cdb_bytes[0];
2602 		block_cmd = cmd & 0x0f;
2603 		if(block_cmd == 0x08 || block_cmd == 0x0a) {
2604 			kprintf("arcmsr%d:block 'read/write' command "
2605 				"with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n"
2606 				, acb->pci_unit, cmd, target, lun);
2607 			pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2608 			arcmsr_srb_complete(srb, 0);
2609 			return;
2610 		}
2611 	}
2612 	if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2613 		if(nseg != 0) {
2614 			bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2615 		}
2616 		arcmsr_srb_complete(srb, 0);
2617 		return;
2618 	}
2619 	if(acb->srboutstandingcount >= acb->maxOutstanding) {
2620 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) == 0)
2621 		{
2622 			xpt_freeze_simq(acb->psim, 1);
2623 			acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2624 		}
2625 		pccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2626 		pccb->ccb_h.status |= CAM_REQUEUE_REQ;
2627 		arcmsr_srb_complete(srb, 0);
2628 		return;
2629 	}
2630 	pccb->ccb_h.status |= CAM_SIM_QUEUED;
2631 	arcmsr_build_srb(srb, dm_segs, nseg);
2632 	arcmsr_post_srb(acb, srb);
2633 	if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2634 	{
2635 		callout_init_lk(&srb->ccb_callout, &srb->acb->isr_lock);
2636 		callout_reset(&srb->ccb_callout, ((pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)) * hz) / 1000, arcmsr_srb_timeout, srb);
2637 		srb->srb_flags |= SRB_FLAG_TIMER_START;
2638 	}
2639 }
2640 /*
2641 *****************************************************************************************
2642 *****************************************************************************************
2643 */
2644 static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb)
2645 {
2646 	struct CommandControlBlock *srb;
2647 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2648 	u_int32_t intmask_org;
2649 	int i = 0;
2650 
2651 	acb->num_aborts++;
2652 	/*
2653 	***************************************************************************
2654 	** It is the upper layer do abort command this lock just prior to calling us.
2655 	** First determine if we currently own this command.
2656 	** Start by searching the device queue. If not found
2657 	** at all, and the system wanted us to just abort the
2658 	** command return success.
2659 	***************************************************************************
2660 	*/
2661 	if(acb->srboutstandingcount != 0) {
2662 		/* disable all outbound interrupt */
2663 		intmask_org = arcmsr_disable_allintr(acb);
2664 		for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
2665 			srb = acb->psrb_pool[i];
2666 			if(srb->srb_state == ARCMSR_SRB_START) {
2667 				if(srb->pccb == abortccb) {
2668 					srb->srb_state = ARCMSR_SRB_ABORTED;
2669 					kprintf("arcmsr%d:scsi id=%d lun=%jx abort srb '%p'"
2670 						"outstanding command \n"
2671 						, acb->pci_unit, abortccb->ccb_h.target_id
2672 						, (uintmax_t)abortccb->ccb_h.target_lun, srb);
2673 					arcmsr_polling_srbdone(acb, srb);
2674 					/* enable outbound Post Queue, outbound doorbell Interrupt */
2675 					arcmsr_enable_allintr(acb, intmask_org);
2676 					return (TRUE);
2677 				}
2678 			}
2679 		}
2680 		/* enable outbound Post Queue, outbound doorbell Interrupt */
2681 		arcmsr_enable_allintr(acb, intmask_org);
2682 	}
2683 	return(FALSE);
2684 }
2685 /*
2686 ****************************************************************************
2687 ****************************************************************************
2688 */
2689 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2690 {
2691 	int retry = 0;
2692 
2693 	acb->num_resets++;
2694 	acb->acb_flags |= ACB_F_BUS_RESET;
2695 	while(acb->srboutstandingcount != 0 && retry < 400) {
2696 		arcmsr_interrupt(acb);
2697 		UDELAY(25000);
2698 		retry++;
2699 	}
2700 	arcmsr_iop_reset(acb);
2701 	acb->acb_flags &= ~ACB_F_BUS_RESET;
2702 }
2703 /*
2704 **************************************************************************
2705 **************************************************************************
2706 */
2707 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2708 		union ccb *pccb)
2709 {
2710 	if (pccb->ccb_h.target_lun) {
2711 		pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2712 		xpt_done(pccb);
2713 		return;
2714 	}
2715 	pccb->ccb_h.status |= CAM_REQ_CMP;
2716 	switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2717 	case INQUIRY: {
2718 		unsigned char inqdata[36];
2719 		char *buffer = pccb->csio.data_ptr;
2720 
2721 		inqdata[0] = T_PROCESSOR;	/* Periph Qualifier & Periph Dev Type */
2722 		inqdata[1] = 0;				/* rem media bit & Dev Type Modifier */
2723 		inqdata[2] = 0;				/* ISO, ECMA, & ANSI versions */
2724 		inqdata[3] = 0;
2725 		inqdata[4] = 31;			/* length of additional data */
2726 		inqdata[5] = 0;
2727 		inqdata[6] = 0;
2728 		inqdata[7] = 0;
2729 		strncpy(&inqdata[8], "Areca   ", 8);	/* Vendor Identification */
2730 		strncpy(&inqdata[16], "RAID controller ", 16);	/* Product Identification */
2731 		strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2732 		memcpy(buffer, inqdata, sizeof(inqdata));
2733 		xpt_done(pccb);
2734 	}
2735 	break;
2736 	case WRITE_BUFFER:
2737 	case READ_BUFFER: {
2738 		if (arcmsr_iop_message_xfer(acb, pccb)) {
2739 			pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2740 			pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2741 		}
2742 		xpt_done(pccb);
2743 	}
2744 	break;
2745 	default:
2746 		xpt_done(pccb);
2747 	}
2748 }
2749 /*
2750 *********************************************************************
2751 *********************************************************************
2752 */
2753 static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
2754 {
2755 	struct AdapterControlBlock *acb;
2756 
2757 	acb = (struct AdapterControlBlock *) cam_sim_softc(psim);
2758 	if(acb == NULL) {
2759 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2760 		xpt_done(pccb);
2761 		return;
2762 	}
2763 	switch (pccb->ccb_h.func_code) {
2764 	case XPT_SCSI_IO: {
2765 			struct CommandControlBlock *srb;
2766 			int target = pccb->ccb_h.target_id;
2767 
2768 			if(target == 16) {
2769 				/* virtual device for iop message transfer */
2770 				arcmsr_handle_virtual_command(acb, pccb);
2771 				return;
2772 			}
2773 			if((srb = arcmsr_get_freesrb(acb)) == NULL) {
2774 				pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
2775 				xpt_done(pccb);
2776 				return;
2777 			}
2778 			pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
2779 			pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
2780 			srb->pccb=pccb;
2781 			if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2782 				if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
2783 					/* Single buffer */
2784 					if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
2785 						/* Buffer is virtual */
2786 						u_int32_t error;
2787 
2788 						crit_enter();
2789 						error =	bus_dmamap_load(acb->dm_segs_dmat
2790 							, srb->dm_segs_dmamap
2791 							, pccb->csio.data_ptr
2792 							, pccb->csio.dxfer_len
2793 							, arcmsr_execute_srb, srb, /*flags*/0);
2794 						if(error == EINPROGRESS) {
2795 							xpt_freeze_simq(acb->psim, 1);
2796 							pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2797 						}
2798 						crit_exit();
2799 					}
2800 					else {		/* Buffer is physical */
2801 						struct bus_dma_segment seg;
2802 
2803 						seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
2804 						seg.ds_len = pccb->csio.dxfer_len;
2805 						arcmsr_execute_srb(srb, &seg, 1, 0);
2806 					}
2807 				} else {
2808 					/* Scatter/gather list */
2809 					struct bus_dma_segment *segs;
2810 
2811 					if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
2812 					|| (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2813 						pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
2814 						xpt_done(pccb);
2815 						kfree(srb, M_DEVBUF);
2816 						return;
2817 					}
2818 					segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
2819 					arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
2820 				}
2821 			} else {
2822 				arcmsr_execute_srb(srb, NULL, 0, 0);
2823 			}
2824 			break;
2825 		}
2826 	case XPT_TARGET_IO: {
2827 			/* target mode not yet support vendor specific commands. */
2828 			pccb->ccb_h.status |= CAM_REQ_CMP;
2829 			xpt_done(pccb);
2830 			break;
2831 		}
2832 	case XPT_PATH_INQ: {
2833 			struct ccb_pathinq *cpi = &pccb->cpi;
2834 
2835 			cpi->version_num = 1;
2836 			cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
2837 			cpi->target_sprt = 0;
2838 			cpi->hba_misc = 0;
2839 			cpi->hba_eng_cnt = 0;
2840 			cpi->max_target = ARCMSR_MAX_TARGETID;        /* 0-16 */
2841 			cpi->max_lun = ARCMSR_MAX_TARGETLUN;	    /* 0-7 */
2842 			cpi->initiator_id = ARCMSR_SCSI_INITIATOR_ID; /* 255 */
2843 			cpi->bus_id = cam_sim_bus(psim);
2844 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2845 			strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
2846 			strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
2847 			cpi->unit_number = cam_sim_unit(psim);
2848 			if(acb->adapter_bus_speed == ACB_BUS_SPEED_12G)
2849 				cpi->base_transfer_speed = 1200000;
2850 			else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
2851 				cpi->base_transfer_speed = 600000;
2852 			else
2853 				cpi->base_transfer_speed = 300000;
2854 			if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2855 			   (acb->vendor_device_id == PCIDevVenIDARC1680) ||
2856 			   (acb->vendor_device_id == PCIDevVenIDARC1214))
2857 			{
2858 				cpi->transport = XPORT_SAS;
2859 				cpi->transport_version = 0;
2860 				cpi->protocol_version = SCSI_REV_SPC2;
2861 			}
2862 			else
2863 			{
2864 				cpi->transport = XPORT_SPI;
2865 				cpi->transport_version = 2;
2866 				cpi->protocol_version = SCSI_REV_2;
2867 			}
2868 			cpi->protocol = PROTO_SCSI;
2869 			cpi->ccb_h.status |= CAM_REQ_CMP;
2870 			xpt_done(pccb);
2871 			break;
2872 		}
2873 	case XPT_ABORT: {
2874 			union ccb *pabort_ccb;
2875 
2876 			pabort_ccb = pccb->cab.abort_ccb;
2877 			switch (pabort_ccb->ccb_h.func_code) {
2878 			case XPT_ACCEPT_TARGET_IO:
2879 			case XPT_IMMED_NOTIFY:
2880 			case XPT_CONT_TARGET_IO:
2881 				if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
2882 					pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
2883 					xpt_done(pabort_ccb);
2884 					pccb->ccb_h.status |= CAM_REQ_CMP;
2885 				} else {
2886 					xpt_print_path(pabort_ccb->ccb_h.path);
2887 					kprintf("Not found\n");
2888 					pccb->ccb_h.status |= CAM_PATH_INVALID;
2889 				}
2890 				break;
2891 			case XPT_SCSI_IO:
2892 				pccb->ccb_h.status |= CAM_UA_ABORT;
2893 				break;
2894 			default:
2895 				pccb->ccb_h.status |= CAM_REQ_INVALID;
2896 				break;
2897 			}
2898 			xpt_done(pccb);
2899 			break;
2900 		}
2901 	case XPT_RESET_BUS:
2902 	case XPT_RESET_DEV: {
2903 			u_int32_t     i;
2904 
2905 			arcmsr_bus_reset(acb);
2906 			for (i=0; i < 500; i++) {
2907 				DELAY(1000);
2908 			}
2909 			pccb->ccb_h.status |= CAM_REQ_CMP;
2910 			xpt_done(pccb);
2911 			break;
2912 		}
2913 	case XPT_TERM_IO: {
2914 			pccb->ccb_h.status |= CAM_REQ_INVALID;
2915 			xpt_done(pccb);
2916 			break;
2917 		}
2918 	case XPT_GET_TRAN_SETTINGS: {
2919 			struct ccb_trans_settings *cts;
2920 
2921 			if(pccb->ccb_h.target_id == 16) {
2922 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2923 				xpt_done(pccb);
2924 				break;
2925 			}
2926 			cts = &pccb->cts;
2927 			{
2928 				struct ccb_trans_settings_scsi *scsi;
2929 				struct ccb_trans_settings_spi *spi;
2930 				struct ccb_trans_settings_sas *sas;
2931 
2932 				scsi = &cts->proto_specific.scsi;
2933 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2934 				scsi->valid = CTS_SCSI_VALID_TQ;
2935 				cts->protocol = PROTO_SCSI;
2936 
2937 				if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2938 				   (acb->vendor_device_id == PCIDevVenIDARC1680) ||
2939 				   (acb->vendor_device_id == PCIDevVenIDARC1214))
2940 				{
2941 					cts->protocol_version = SCSI_REV_SPC2;
2942 					cts->transport_version = 0;
2943 					cts->transport = XPORT_SAS;
2944 					sas = &cts->xport_specific.sas;
2945 					sas->valid = CTS_SAS_VALID_SPEED;
2946 					if (acb->sub_device_id == ARECA_SUB_DEV_ID_1883)
2947 						sas->bitrate = 1200000;
2948 					else if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2949 					   (acb->vendor_device_id == PCIDevVenIDARC1214))
2950 						sas->bitrate = 600000;
2951 					else if(acb->vendor_device_id == PCIDevVenIDARC1680)
2952 						sas->bitrate = 300000;
2953 				}
2954 				else
2955 				{
2956 					cts->protocol_version = SCSI_REV_2;
2957 					cts->transport_version = 2;
2958 					cts->transport = XPORT_SPI;
2959 					spi = &cts->xport_specific.spi;
2960 					spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2961 					spi->sync_period = 2;
2962 					spi->sync_offset = 32;
2963 					spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2964 					spi->valid = CTS_SPI_VALID_DISC
2965 						| CTS_SPI_VALID_SYNC_RATE
2966 						| CTS_SPI_VALID_SYNC_OFFSET
2967 						| CTS_SPI_VALID_BUS_WIDTH;
2968 				}
2969 			}
2970 			pccb->ccb_h.status |= CAM_REQ_CMP;
2971 			xpt_done(pccb);
2972 			break;
2973 		}
2974 	case XPT_SET_TRAN_SETTINGS: {
2975 			pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2976 			xpt_done(pccb);
2977 			break;
2978 		}
2979 	case XPT_CALC_GEOMETRY:
2980 			if(pccb->ccb_h.target_id == 16) {
2981 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2982 				xpt_done(pccb);
2983 				break;
2984 			}
2985 			cam_calc_geometry(&pccb->ccg, 1);
2986 			xpt_done(pccb);
2987 			break;
2988 	default:
2989 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2990 		xpt_done(pccb);
2991 		break;
2992 	}
2993 }
2994 /*
2995 **********************************************************************
2996 **********************************************************************
2997 */
2998 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2999 {
3000 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3001 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3002 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
3003 		kprintf("arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
3004 	}
3005 }
3006 /*
3007 **********************************************************************
3008 **********************************************************************
3009 */
3010 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
3011 {
3012 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3013 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,  ARCMSR_MESSAGE_START_BGRB);
3014 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3015 		kprintf( "arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
3016 	}
3017 }
3018 /*
3019 **********************************************************************
3020 **********************************************************************
3021 */
3022 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
3023 {
3024 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3025 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3026 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3027 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3028 		kprintf("arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
3029 	}
3030 }
3031 /*
3032 **********************************************************************
3033 **********************************************************************
3034 */
3035 static void arcmsr_start_hbd_bgrb(struct AdapterControlBlock *acb)
3036 {
3037 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3038 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3039 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
3040 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
3041 	}
3042 }
3043 /*
3044 **********************************************************************
3045 **********************************************************************
3046 */
3047 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
3048 {
3049 	switch (acb->adapter_type) {
3050 	case ACB_ADAPTER_TYPE_A:
3051 		arcmsr_start_hba_bgrb(acb);
3052 		break;
3053 	case ACB_ADAPTER_TYPE_B:
3054 		arcmsr_start_hbb_bgrb(acb);
3055 		break;
3056 	case ACB_ADAPTER_TYPE_C:
3057 		arcmsr_start_hbc_bgrb(acb);
3058 		break;
3059 	case ACB_ADAPTER_TYPE_D:
3060 		arcmsr_start_hbd_bgrb(acb);
3061 		break;
3062 	}
3063 }
3064 /*
3065 **********************************************************************
3066 **
3067 **********************************************************************
3068 */
3069 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3070 {
3071 	struct CommandControlBlock *srb;
3072 	u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
3073 	u_int16_t	error;
3074 
3075 polling_ccb_retry:
3076 	poll_count++;
3077 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
3078 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);	/*clear interrupt*/
3079 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3080 	while(1) {
3081 		if((flag_srb = CHIP_REG_READ32(HBA_MessageUnit,
3082 			0, outbound_queueport)) == 0xFFFFFFFF) {
3083 			if(poll_srb_done) {
3084 				break;/*chip FIFO no ccb for completion already*/
3085 			} else {
3086 				UDELAY(25000);
3087 				if ((poll_count > 100) && (poll_srb != NULL)) {
3088 					break;
3089 				}
3090 				goto polling_ccb_retry;
3091 			}
3092 		}
3093 		/* check if command done with no error*/
3094 		srb = (struct CommandControlBlock *)
3095 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
3096         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
3097 		poll_srb_done = (srb == poll_srb) ? 1:0;
3098 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3099 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3100 				kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'"
3101 					"poll command abort successfully \n"
3102 					, acb->pci_unit
3103 					, srb->pccb->ccb_h.target_id
3104 					, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3105 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3106 				arcmsr_srb_complete(srb, 1);
3107 				continue;
3108 			}
3109 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
3110 				"srboutstandingcount=%d \n"
3111 				, acb->pci_unit
3112 				, srb, acb->srboutstandingcount);
3113 			continue;
3114 		}
3115 		arcmsr_report_srb_state(acb, srb, error);
3116 	}	/*drain reply FIFO*/
3117 }
3118 /*
3119 **********************************************************************
3120 **
3121 **********************************************************************
3122 */
3123 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3124 {
3125 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
3126 	struct CommandControlBlock *srb;
3127 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
3128 	int index;
3129 	u_int16_t	error;
3130 
3131 polling_ccb_retry:
3132 	poll_count++;
3133 	CHIP_REG_WRITE32(HBB_DOORBELL,
3134 	0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
3135 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3136 	while(1) {
3137 		index = phbbmu->doneq_index;
3138 		if((flag_srb = phbbmu->done_qbuffer[index]) == 0) {
3139 			if(poll_srb_done) {
3140 				break;/*chip FIFO no ccb for completion already*/
3141 			} else {
3142 				UDELAY(25000);
3143 			    if ((poll_count > 100) && (poll_srb != NULL)) {
3144 					break;
3145 				}
3146 				goto polling_ccb_retry;
3147 			}
3148 		}
3149 		phbbmu->done_qbuffer[index] = 0;
3150 		index++;
3151 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
3152 		phbbmu->doneq_index = index;
3153 		/* check if command done with no error*/
3154 		srb = (struct CommandControlBlock *)
3155 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
3156         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
3157 		poll_srb_done = (srb == poll_srb) ? 1:0;
3158 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3159 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3160 				kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'"
3161 					"poll command abort successfully \n"
3162 					, acb->pci_unit
3163 					, srb->pccb->ccb_h.target_id
3164 					, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3165 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3166 				arcmsr_srb_complete(srb, 1);
3167 				continue;
3168 			}
3169 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
3170 				"srboutstandingcount=%d \n"
3171 				, acb->pci_unit
3172 				, srb, acb->srboutstandingcount);
3173 			continue;
3174 		}
3175 		arcmsr_report_srb_state(acb, srb, error);
3176 	}	/*drain reply FIFO*/
3177 }
3178 /*
3179 **********************************************************************
3180 **
3181 **********************************************************************
3182 */
3183 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3184 {
3185 	struct CommandControlBlock *srb;
3186 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
3187 	u_int16_t	error;
3188 
3189 polling_ccb_retry:
3190 	poll_count++;
3191 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3192 	while(1) {
3193 		if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
3194 			if(poll_srb_done) {
3195 				break;/*chip FIFO no ccb for completion already*/
3196 			} else {
3197 				UDELAY(25000);
3198 			    if ((poll_count > 100) && (poll_srb != NULL)) {
3199 					break;
3200 				}
3201 			    if (acb->srboutstandingcount == 0) {
3202 				    break;
3203 			    }
3204 				goto polling_ccb_retry;
3205 			}
3206 		}
3207 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
3208 		/* check if command done with no error*/
3209 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
3210         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
3211 		if (poll_srb != NULL)
3212 			poll_srb_done = (srb == poll_srb) ? 1:0;
3213 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3214 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3215 				kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n"
3216 						, acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3217 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3218 				arcmsr_srb_complete(srb, 1);
3219 				continue;
3220 			}
3221 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
3222 					, acb->pci_unit, srb, acb->srboutstandingcount);
3223 			continue;
3224 		}
3225 		arcmsr_report_srb_state(acb, srb, error);
3226 	}	/*drain reply FIFO*/
3227 }
3228 /*
3229 **********************************************************************
3230 **
3231 **********************************************************************
3232 */
3233 static void arcmsr_polling_hbd_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3234 {
3235 	struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
3236 	struct CommandControlBlock *srb;
3237 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
3238 	u_int32_t outbound_write_pointer;
3239 	u_int16_t	error, doneq_index;
3240 
3241 polling_ccb_retry:
3242 	poll_count++;
3243 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3244 	while(1) {
3245 		outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
3246 		doneq_index = phbdmu->doneq_index;
3247 		if ((outbound_write_pointer & 0xFF) == (doneq_index & 0xFF)) {
3248 			if(poll_srb_done) {
3249 				break;/*chip FIFO no ccb for completion already*/
3250 			} else {
3251 				UDELAY(25000);
3252 			    if ((poll_count > 100) && (poll_srb != NULL)) {
3253 					break;
3254 				}
3255 			    if (acb->srboutstandingcount == 0) {
3256 				    break;
3257 			    }
3258 				goto polling_ccb_retry;
3259 			}
3260 		}
3261 		doneq_index = arcmsr_get_doneq_index(phbdmu);
3262 		flag_srb = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow;
3263 		/* check if command done with no error*/
3264 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
3265         error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
3266 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index);
3267 		if (poll_srb != NULL)
3268 			poll_srb_done = (srb == poll_srb) ? 1:0;
3269 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3270 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3271 				kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n"
3272 						, acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3273 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3274 				arcmsr_srb_complete(srb, 1);
3275 				continue;
3276 			}
3277 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
3278 					, acb->pci_unit, srb, acb->srboutstandingcount);
3279 			continue;
3280 		}
3281 		arcmsr_report_srb_state(acb, srb, error);
3282 	}	/*drain reply FIFO*/
3283 }
3284 /*
3285 **********************************************************************
3286 **********************************************************************
3287 */
3288 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3289 {
3290 	switch (acb->adapter_type) {
3291 	case ACB_ADAPTER_TYPE_A: {
3292 			arcmsr_polling_hba_srbdone(acb, poll_srb);
3293 		}
3294 		break;
3295 	case ACB_ADAPTER_TYPE_B: {
3296 			arcmsr_polling_hbb_srbdone(acb, poll_srb);
3297 		}
3298 		break;
3299 	case ACB_ADAPTER_TYPE_C: {
3300 			arcmsr_polling_hbc_srbdone(acb, poll_srb);
3301 		}
3302 		break;
3303 	case ACB_ADAPTER_TYPE_D: {
3304 			arcmsr_polling_hbd_srbdone(acb, poll_srb);
3305 		}
3306 		break;
3307 	}
3308 }
3309 /*
3310 **********************************************************************
3311 **********************************************************************
3312 */
3313 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
3314 {
3315 	char *acb_firm_model = acb->firm_model;
3316 	char *acb_firm_version = acb->firm_version;
3317 	char *acb_device_map = acb->device_map;
3318 	size_t iop_firm_model = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
3319 	size_t iop_firm_version = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
3320 	size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3321 	int i;
3322 
3323 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3324 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
3325 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3326 	}
3327 	i = 0;
3328 	while(i < 8) {
3329 		*acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3330 		/* 8 bytes firm_model, 15, 60-67*/
3331 		acb_firm_model++;
3332 		i++;
3333 	}
3334 	i=0;
3335 	while(i < 16) {
3336 		*acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3337 		/* 16 bytes firm_version, 17, 68-83*/
3338 		acb_firm_version++;
3339 		i++;
3340 	}
3341 	i=0;
3342 	while(i < 16) {
3343 		*acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3344 		acb_device_map++;
3345 		i++;
3346 	}
3347 	kprintf("Areca RAID adapter%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3348 	kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3349 	acb->firm_request_len = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
3350 	acb->firm_numbers_queue = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
3351 	acb->firm_sdram_size = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
3352 	acb->firm_ide_channels = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
3353 	acb->firm_cfg_version = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
3354 	if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
3355 		acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1;
3356 	else
3357 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
3358 }
3359 /*
3360 **********************************************************************
3361 **********************************************************************
3362 */
3363 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
3364 {
3365 	char *acb_firm_model = acb->firm_model;
3366 	char *acb_firm_version = acb->firm_version;
3367 	char *acb_device_map = acb->device_map;
3368 	size_t iop_firm_model = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
3369 	size_t iop_firm_version = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
3370 	size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3371 	int i;
3372 
3373 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
3374 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3375 		kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3376 	}
3377 	i = 0;
3378 	while(i < 8) {
3379 		*acb_firm_model = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
3380 		/* 8 bytes firm_model, 15, 60-67*/
3381 		acb_firm_model++;
3382 		i++;
3383 	}
3384 	i = 0;
3385 	while(i < 16) {
3386 		*acb_firm_version = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
3387 		/* 16 bytes firm_version, 17, 68-83*/
3388 		acb_firm_version++;
3389 		i++;
3390 	}
3391 	i = 0;
3392 	while(i < 16) {
3393 		*acb_device_map = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
3394 		acb_device_map++;
3395 		i++;
3396 	}
3397 	kprintf("Areca RAID adapter%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3398 	kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3399 	acb->firm_request_len = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
3400 	acb->firm_numbers_queue = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
3401 	acb->firm_sdram_size = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
3402 	acb->firm_ide_channels = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
3403 	acb->firm_cfg_version = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
3404 	if(acb->firm_numbers_queue > ARCMSR_MAX_HBB_POSTQUEUE)
3405 		acb->maxOutstanding = ARCMSR_MAX_HBB_POSTQUEUE - 1;
3406 	else
3407 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
3408 }
3409 /*
3410 **********************************************************************
3411 **********************************************************************
3412 */
3413 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
3414 {
3415 	char *acb_firm_model = acb->firm_model;
3416 	char *acb_firm_version = acb->firm_version;
3417 	char *acb_device_map = acb->device_map;
3418 	size_t iop_firm_model = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
3419 	size_t iop_firm_version = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3420 	size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3421 	int i;
3422 
3423 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3424 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3425 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3426 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3427 	}
3428 	i = 0;
3429 	while(i < 8) {
3430 		*acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3431 		/* 8 bytes firm_model, 15, 60-67*/
3432 		acb_firm_model++;
3433 		i++;
3434 	}
3435 	i = 0;
3436 	while(i < 16) {
3437 		*acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3438 		/* 16 bytes firm_version, 17, 68-83*/
3439 		acb_firm_version++;
3440 		i++;
3441 	}
3442 	i = 0;
3443 	while(i < 16) {
3444 		*acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3445 		acb_device_map++;
3446 		i++;
3447 	}
3448 	kprintf("Areca RAID adapter%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3449 	kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3450 	acb->firm_request_len	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]);	/*firm_request_len,   1, 04-07*/
3451 	acb->firm_numbers_queue	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_numbers_queue, 2, 08-11*/
3452 	acb->firm_sdram_size	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_sdram_size,    3, 12-15*/
3453 	acb->firm_ide_channels	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_ide_channels,  4, 16-19*/
3454 	acb->firm_cfg_version	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
3455 	if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
3456 		acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1;
3457 	else
3458 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
3459 }
3460 /*
3461 **********************************************************************
3462 **********************************************************************
3463 */
3464 static void arcmsr_get_hbd_config(struct AdapterControlBlock *acb)
3465 {
3466 	char *acb_firm_model = acb->firm_model;
3467 	char *acb_firm_version = acb->firm_version;
3468 	char *acb_device_map = acb->device_map;
3469 	size_t iop_firm_model = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
3470 	size_t iop_firm_version = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3471 	size_t iop_device_map = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3472 	int i;
3473 
3474 	if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE)
3475 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);
3476 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3477 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
3478 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3479 	}
3480 	i = 0;
3481 	while(i < 8) {
3482 		*acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3483 		/* 8 bytes firm_model, 15, 60-67*/
3484 		acb_firm_model++;
3485 		i++;
3486 	}
3487 	i = 0;
3488 	while(i < 16) {
3489 		*acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3490 		/* 16 bytes firm_version, 17, 68-83*/
3491 		acb_firm_version++;
3492 		i++;
3493 	}
3494 	i = 0;
3495 	while(i < 16) {
3496 		*acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3497 		acb_device_map++;
3498 		i++;
3499 	}
3500 	kprintf("Areca RAID adapter%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3501 	kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3502 	acb->firm_request_len	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_request_len,   1, 04-07*/
3503 	acb->firm_numbers_queue	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_numbers_queue, 2, 08-11*/
3504 	acb->firm_sdram_size	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_sdram_size,    3, 12-15*/
3505 	acb->firm_ide_channels	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[5]);	/*firm_ide_channels,  4, 16-19*/
3506 	acb->firm_cfg_version	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
3507 	if(acb->firm_numbers_queue > ARCMSR_MAX_HBD_POSTQUEUE)
3508 		acb->maxOutstanding = ARCMSR_MAX_HBD_POSTQUEUE - 1;
3509 	else
3510 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
3511 }
3512 /*
3513 **********************************************************************
3514 **********************************************************************
3515 */
3516 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3517 {
3518 	switch (acb->adapter_type) {
3519 	case ACB_ADAPTER_TYPE_A: {
3520 			arcmsr_get_hba_config(acb);
3521 		}
3522 		break;
3523 	case ACB_ADAPTER_TYPE_B: {
3524 			arcmsr_get_hbb_config(acb);
3525 		}
3526 		break;
3527 	case ACB_ADAPTER_TYPE_C: {
3528 			arcmsr_get_hbc_config(acb);
3529 		}
3530 		break;
3531 	case ACB_ADAPTER_TYPE_D: {
3532 			arcmsr_get_hbd_config(acb);
3533 		}
3534 		break;
3535 	}
3536 }
3537 /*
3538 **********************************************************************
3539 **********************************************************************
3540 */
3541 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
3542 {
3543 	int	timeout=0;
3544 
3545 	switch (acb->adapter_type) {
3546 	case ACB_ADAPTER_TYPE_A: {
3547 			while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3548 			{
3549 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3550 				{
3551 					kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3552 					return;
3553 				}
3554 				UDELAY(15000); /* wait 15 milli-seconds */
3555 			}
3556 		}
3557 		break;
3558 	case ACB_ADAPTER_TYPE_B: {
3559 			while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3560 			{
3561 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3562 				{
3563 					kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3564 					return;
3565 				}
3566 				UDELAY(15000); /* wait 15 milli-seconds */
3567 			}
3568 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3569 		}
3570 		break;
3571 	case ACB_ADAPTER_TYPE_C: {
3572 			while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3573 			{
3574 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3575 				{
3576 					kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3577 					return;
3578 				}
3579 				UDELAY(15000); /* wait 15 milli-seconds */
3580 			}
3581 		}
3582 		break;
3583 	case ACB_ADAPTER_TYPE_D: {
3584 			while ((CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK) == 0)
3585 			{
3586 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3587 				{
3588 					kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3589 					return;
3590 				}
3591 				UDELAY(15000); /* wait 15 milli-seconds */
3592 			}
3593 		}
3594 		break;
3595 	}
3596 }
3597 /*
3598 **********************************************************************
3599 **********************************************************************
3600 */
3601 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3602 {
3603 	u_int32_t outbound_doorbell;
3604 
3605 	switch (acb->adapter_type) {
3606 	case ACB_ADAPTER_TYPE_A: {
3607 			/* empty doorbell Qbuffer if door bell ringed */
3608 			outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3609 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
3610 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3611 
3612 		}
3613 		break;
3614 	case ACB_ADAPTER_TYPE_B: {
3615 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
3616 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
3617 			/* let IOP know data has been read */
3618 		}
3619 		break;
3620 	case ACB_ADAPTER_TYPE_C: {
3621 			/* empty doorbell Qbuffer if door bell ringed */
3622 			outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
3623 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell);	/*clear doorbell interrupt */
3624 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
3625 			CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell_clear); /* Dummy read to force pci flush */
3626 			CHIP_REG_READ32(HBC_MessageUnit, 0, inbound_doorbell); /* Dummy read to force pci flush */
3627 		}
3628 		break;
3629 	case ACB_ADAPTER_TYPE_D: {
3630 			/* empty doorbell Qbuffer if door bell ringed */
3631 			outbound_doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell);
3632 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
3633 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ);
3634 
3635 		}
3636 		break;
3637 	}
3638 }
3639 /*
3640 ************************************************************************
3641 ************************************************************************
3642 */
3643 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3644 {
3645 	unsigned long srb_phyaddr;
3646 	u_int32_t srb_phyaddr_hi32;
3647 	u_int32_t srb_phyaddr_lo32;
3648 
3649 	/*
3650 	********************************************************************
3651 	** here we need to tell iop 331 our freesrb.HighPart
3652 	** if freesrb.HighPart is not zero
3653 	********************************************************************
3654 	*/
3655 	srb_phyaddr = (unsigned long) acb->srb_phyaddr.phyaddr;
3656 	srb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
3657 	srb_phyaddr_lo32 = acb->srb_phyaddr.B.phyadd_low;
3658 	switch (acb->adapter_type) {
3659 	case ACB_ADAPTER_TYPE_A: {
3660 			if(srb_phyaddr_hi32 != 0) {
3661 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3662 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3663 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3664 				if(!arcmsr_hba_wait_msgint_ready(acb)) {
3665 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3666 					return FALSE;
3667 				}
3668 			}
3669 		}
3670 		break;
3671 		/*
3672 		***********************************************************************
3673 		**    if adapter type B, set window of "post command Q"
3674 		***********************************************************************
3675 		*/
3676 	case ACB_ADAPTER_TYPE_B: {
3677 			u_int32_t post_queue_phyaddr;
3678 			struct HBB_MessageUnit *phbbmu;
3679 
3680 			phbbmu = (struct HBB_MessageUnit *)acb->pmu;
3681 			phbbmu->postq_index = 0;
3682 			phbbmu->doneq_index = 0;
3683 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
3684 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3685 				kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
3686 				return FALSE;
3687 			}
3688 			post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE
3689 			+ offsetof(struct HBB_MessageUnit, post_qbuffer);
3690 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3691 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
3692 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
3693 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
3694 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
3695 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
3696 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3697 				kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
3698 				return FALSE;
3699 			}
3700 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
3701 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3702 				kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
3703 				return FALSE;
3704 			}
3705 		}
3706 		break;
3707 	case ACB_ADAPTER_TYPE_C: {
3708 			if(srb_phyaddr_hi32 != 0) {
3709 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3710 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3711 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3712 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3713 				if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3714 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3715 					return FALSE;
3716 				}
3717 			}
3718 		}
3719 		break;
3720 	case ACB_ADAPTER_TYPE_D: {
3721 			u_int32_t post_queue_phyaddr, done_queue_phyaddr;
3722 			struct HBD_MessageUnit0 *phbdmu;
3723 
3724 			phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
3725 			phbdmu->postq_index = 0;
3726 			phbdmu->doneq_index = 0x40FF;
3727 			post_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE
3728 								+ offsetof(struct HBD_MessageUnit0, post_qbuffer);
3729 			done_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE
3730 								+ offsetof(struct HBD_MessageUnit0, done_qbuffer);
3731 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3732 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3733 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ base */
3734 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[3], done_queue_phyaddr); /* doneQ base */
3735 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[4], 0x100);
3736 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3737 			if(!arcmsr_hbd_wait_msgint_ready(acb)) {
3738 				kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3739 				return FALSE;
3740 			}
3741 		}
3742 		break;
3743 	}
3744 	return (TRUE);
3745 }
3746 /*
3747 ************************************************************************
3748 ************************************************************************
3749 */
3750 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3751 {
3752 	switch (acb->adapter_type)
3753 	{
3754 	case ACB_ADAPTER_TYPE_A:
3755 	case ACB_ADAPTER_TYPE_C:
3756 	case ACB_ADAPTER_TYPE_D:
3757 		break;
3758 	case ACB_ADAPTER_TYPE_B: {
3759 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
3760 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3761 				kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
3762 				return;
3763 			}
3764 		}
3765 		break;
3766 	}
3767 }
3768 /*
3769 **********************************************************************
3770 **********************************************************************
3771 */
3772 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3773 {
3774 	u_int32_t intmask_org;
3775 
3776 	/* disable all outbound interrupt */
3777 	intmask_org = arcmsr_disable_allintr(acb);
3778 	arcmsr_wait_firmware_ready(acb);
3779 	arcmsr_iop_confirm(acb);
3780 	arcmsr_get_firmware_spec(acb);
3781 	/*start background rebuild*/
3782 	arcmsr_start_adapter_bgrb(acb);
3783 	/* empty doorbell Qbuffer if door bell ringed */
3784 	arcmsr_clear_doorbell_queue_buffer(acb);
3785 	arcmsr_enable_eoi_mode(acb);
3786 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3787 	arcmsr_enable_allintr(acb, intmask_org);
3788 	acb->acb_flags |= ACB_F_IOP_INITED;
3789 }
3790 /*
3791 **********************************************************************
3792 **********************************************************************
3793 */
3794 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3795 {
3796 	struct AdapterControlBlock *acb = arg;
3797 	struct CommandControlBlock *srb_tmp;
3798 	u_int32_t i;
3799 	unsigned long srb_phyaddr = (unsigned long)segs->ds_addr;
3800 
3801 	acb->srb_phyaddr.phyaddr = srb_phyaddr;
3802 	srb_tmp = (struct CommandControlBlock *)acb->uncacheptr;
3803 	for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
3804 		if(bus_dmamap_create(acb->dm_segs_dmat,
3805 			 /*flags*/0, &srb_tmp->dm_segs_dmamap) != 0) {
3806 			acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
3807 			kprintf("arcmsr%d:"
3808 			" srb dmamap bus_dmamap_create error\n", acb->pci_unit);
3809 			return;
3810 		}
3811 		if((acb->adapter_type == ACB_ADAPTER_TYPE_C) || (acb->adapter_type == ACB_ADAPTER_TYPE_D))
3812 		{
3813 			srb_tmp->cdb_phyaddr_low = srb_phyaddr;
3814 			srb_tmp->cdb_phyaddr_high = (u_int32_t)((srb_phyaddr >> 16) >> 16);
3815 		}
3816 		else
3817 			srb_tmp->cdb_phyaddr_low = srb_phyaddr >> 5;
3818 		srb_tmp->acb = acb;
3819 		acb->srbworkingQ[i] = acb->psrb_pool[i] = srb_tmp;
3820 		srb_phyaddr = srb_phyaddr + SRB_SIZE;
3821 		srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp + SRB_SIZE);
3822 	}
3823 	acb->vir2phy_offset = (unsigned long)srb_tmp - (unsigned long)srb_phyaddr;
3824 }
3825 /*
3826 ************************************************************************
3827 ************************************************************************
3828 */
3829 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
3830 {
3831 	/* remove the control device */
3832 	if(acb->ioctl_dev != NULL) {
3833 		destroy_dev(acb->ioctl_dev);
3834 	}
3835 	bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
3836 	bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
3837 	bus_dma_tag_destroy(acb->srb_dmat);
3838 	bus_dma_tag_destroy(acb->dm_segs_dmat);
3839 	bus_dma_tag_destroy(acb->parent_dmat);
3840 }
3841 /*
3842 ************************************************************************
3843 ************************************************************************
3844 */
3845 static void arcmsr_mutex_init(struct AdapterControlBlock *acb)
3846 {
3847 	ARCMSR_LOCK_INIT(&acb->isr_lock, "arcmsr isr lock");
3848 	ARCMSR_LOCK_INIT(&acb->srb_lock, "arcmsr srb lock");
3849 	ARCMSR_LOCK_INIT(&acb->postDone_lock, "arcmsr postQ lock");
3850 	ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr RW buffer lock");
3851 }
3852 /*
3853 ************************************************************************
3854 ************************************************************************
3855 */
3856 static void arcmsr_mutex_destroy(struct AdapterControlBlock *acb)
3857 {
3858 	ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3859 	ARCMSR_LOCK_DESTROY(&acb->postDone_lock);
3860 	ARCMSR_LOCK_DESTROY(&acb->srb_lock);
3861 	ARCMSR_LOCK_DESTROY(&acb->isr_lock);
3862 }
3863 /*
3864 ************************************************************************
3865 ************************************************************************
3866 */
3867 static u_int32_t arcmsr_initialize(device_t dev)
3868 {
3869 	struct AdapterControlBlock *acb = device_get_softc(dev);
3870 	u_int16_t pci_command;
3871 	int i, j,max_coherent_size;
3872 	u_int32_t vendor_dev_id;
3873 
3874 	vendor_dev_id = pci_get_devid(dev);
3875 	acb->vendor_device_id = vendor_dev_id;
3876 	acb->sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3877 	switch (vendor_dev_id) {
3878 	case PCIDevVenIDARC1880:
3879 	case PCIDevVenIDARC1882:
3880 	case PCIDevVenIDARC1213:
3881 	case PCIDevVenIDARC1223: {
3882 			acb->adapter_type = ACB_ADAPTER_TYPE_C;
3883 			if (acb->sub_device_id == ARECA_SUB_DEV_ID_1883)
3884 				acb->adapter_bus_speed = ACB_BUS_SPEED_12G;
3885 			else
3886 				acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
3887 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE;
3888 		}
3889 		break;
3890 	case PCIDevVenIDARC1214: {
3891 			acb->adapter_type = ACB_ADAPTER_TYPE_D;
3892 			acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
3893 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBD_MessageUnit0));
3894 		}
3895 		break;
3896 	case PCIDevVenIDARC1200:
3897 	case PCIDevVenIDARC1201: {
3898 			acb->adapter_type = ACB_ADAPTER_TYPE_B;
3899 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3900 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit));
3901 		}
3902 		break;
3903 	case PCIDevVenIDARC1110:
3904 	case PCIDevVenIDARC1120:
3905 	case PCIDevVenIDARC1130:
3906 	case PCIDevVenIDARC1160:
3907 	case PCIDevVenIDARC1170:
3908 	case PCIDevVenIDARC1210:
3909 	case PCIDevVenIDARC1220:
3910 	case PCIDevVenIDARC1230:
3911 	case PCIDevVenIDARC1231:
3912 	case PCIDevVenIDARC1260:
3913 	case PCIDevVenIDARC1261:
3914 	case PCIDevVenIDARC1270:
3915 	case PCIDevVenIDARC1280:
3916 	case PCIDevVenIDARC1212:
3917 	case PCIDevVenIDARC1222:
3918 	case PCIDevVenIDARC1380:
3919 	case PCIDevVenIDARC1381:
3920 	case PCIDevVenIDARC1680:
3921 	case PCIDevVenIDARC1681: {
3922 			acb->adapter_type = ACB_ADAPTER_TYPE_A;
3923 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3924 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE;
3925 		}
3926 		break;
3927 	default: {
3928 			kprintf("arcmsr%d:"
3929 			" unknown RAID adapter type \n", device_get_unit(dev));
3930 			return ENOMEM;
3931 		}
3932 	}
3933 	if(bus_dma_tag_create(  /*PCI parent*/		bus_get_dma_tag(dev),
3934 				/*alignemnt*/	1,
3935 				/*boundary*/	0,
3936 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3937 				/*highaddr*/	BUS_SPACE_MAXADDR,
3938 				/*filter*/	NULL,
3939 				/*filterarg*/	NULL,
3940 				/*maxsize*/	BUS_SPACE_MAXSIZE_32BIT,
3941 				/*nsegments*/	BUS_SPACE_UNRESTRICTED,
3942 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3943 				/*flags*/	0,
3944 						&acb->parent_dmat) != 0)
3945 	{
3946 		kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3947 		return ENOMEM;
3948 	}
3949 	/* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
3950 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3951 				/*alignment*/	1,
3952 				/*boundary*/	0,
3953 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3954 				/*highaddr*/	BUS_SPACE_MAXADDR,
3955 				/*filter*/	NULL,
3956 				/*filterarg*/	NULL,
3957 				/*maxsize*/	ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
3958 				/*nsegments*/	ARCMSR_MAX_SG_ENTRIES,
3959 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3960 				/*flags*/	0,
3961 						&acb->dm_segs_dmat) != 0)
3962 	{
3963 		bus_dma_tag_destroy(acb->parent_dmat);
3964 		kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3965 		return ENOMEM;
3966 	}
3967 
3968 	/* DMA tag for our srb structures.... Allocate the freesrb memory */
3969 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3970 				/*alignment*/	0x20,
3971 				/*boundary*/	0,
3972 				/*lowaddr*/	BUS_SPACE_MAXADDR_32BIT,
3973 				/*highaddr*/	BUS_SPACE_MAXADDR,
3974 				/*filter*/	NULL,
3975 				/*filterarg*/	NULL,
3976 				/*maxsize*/	max_coherent_size,
3977 				/*nsegments*/	1,
3978 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3979 				/*flags*/	0,
3980 						&acb->srb_dmat) != 0)
3981 	{
3982 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3983 		bus_dma_tag_destroy(acb->parent_dmat);
3984 		kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3985 		return ENXIO;
3986 	}
3987 	/* Allocation for our srbs */
3988 	if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
3989 		bus_dma_tag_destroy(acb->srb_dmat);
3990 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3991 		bus_dma_tag_destroy(acb->parent_dmat);
3992 		kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
3993 		return ENXIO;
3994 	}
3995 	/* And permanently map them */
3996 	if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
3997 		bus_dma_tag_destroy(acb->srb_dmat);
3998 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3999 		bus_dma_tag_destroy(acb->parent_dmat);
4000 		kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
4001 		return ENXIO;
4002 	}
4003 	pci_command = pci_read_config(dev, PCIR_COMMAND, 2);
4004 	pci_command |= PCIM_CMD_BUSMASTEREN;
4005 	pci_command |= PCIM_CMD_PERRESPEN;
4006 	pci_command |= PCIM_CMD_MWRICEN;
4007 	/* Enable Busmaster */
4008 	pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
4009 	switch(acb->adapter_type) {
4010 	case ACB_ADAPTER_TYPE_A: {
4011 			u_int32_t rid0 = PCIR_BAR(0);
4012 			vm_offset_t	mem_base0;
4013 
4014 			acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
4015 			if(acb->sys_res_arcmsr[0] == NULL) {
4016 				arcmsr_free_resource(acb);
4017 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
4018 				return ENOMEM;
4019 			}
4020 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
4021 				arcmsr_free_resource(acb);
4022 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
4023 				return ENXIO;
4024 			}
4025 			mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
4026 			if(mem_base0 == 0) {
4027 				arcmsr_free_resource(acb);
4028 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
4029 				return ENXIO;
4030 			}
4031 			acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
4032 			acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
4033 			acb->pmu = (struct MessageUnit_UNION *)mem_base0;
4034 		}
4035 		break;
4036 	case ACB_ADAPTER_TYPE_B: {
4037 			struct HBB_MessageUnit *phbbmu;
4038 			struct CommandControlBlock *freesrb;
4039 			u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
4040 			vm_offset_t	mem_base[]={0,0};
4041 			for(i=0; i < 2; i++) {
4042 				if(i == 0) {
4043 					acb->sys_res_arcmsr[i] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
4044 											0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE);
4045 				} else {
4046 					acb->sys_res_arcmsr[i] = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
4047 											0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
4048 				}
4049 				if(acb->sys_res_arcmsr[i] == NULL) {
4050 					arcmsr_free_resource(acb);
4051 					kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
4052 					return ENOMEM;
4053 				}
4054 				if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
4055 					arcmsr_free_resource(acb);
4056 					kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
4057 					return ENXIO;
4058 				}
4059 				mem_base[i] = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
4060 				if(mem_base[i] == 0) {
4061 					arcmsr_free_resource(acb);
4062 					kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
4063 					return ENXIO;
4064 				}
4065 				acb->btag[i] = rman_get_bustag(acb->sys_res_arcmsr[i]);
4066 				acb->bhandle[i] = rman_get_bushandle(acb->sys_res_arcmsr[i]);
4067 			}
4068 			freesrb = (struct CommandControlBlock *)acb->uncacheptr;
4069 			acb->pmu = (struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE);
4070 			phbbmu = (struct HBB_MessageUnit *)acb->pmu;
4071 			phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)mem_base[0];
4072 			phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)mem_base[1];
4073 		}
4074 		break;
4075 	case ACB_ADAPTER_TYPE_C: {
4076 			u_int32_t rid0 = PCIR_BAR(1);
4077 			vm_offset_t	mem_base0;
4078 
4079 			acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
4080 			if(acb->sys_res_arcmsr[0] == NULL) {
4081 				arcmsr_free_resource(acb);
4082 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
4083 				return ENOMEM;
4084 			}
4085 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
4086 				arcmsr_free_resource(acb);
4087 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
4088 				return ENXIO;
4089 			}
4090 			mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
4091 			if(mem_base0 == 0) {
4092 				arcmsr_free_resource(acb);
4093 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
4094 				return ENXIO;
4095 			}
4096 			acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
4097 			acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
4098 			acb->pmu = (struct MessageUnit_UNION *)mem_base0;
4099 		}
4100 		break;
4101 	case ACB_ADAPTER_TYPE_D: {
4102 			struct HBD_MessageUnit0 *phbdmu;
4103 			u_int32_t rid0 = PCIR_BAR(0);
4104 			vm_offset_t	mem_base0;
4105 
4106 			acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBD_MessageUnit), RF_ACTIVE);
4107 			if(acb->sys_res_arcmsr[0] == NULL) {
4108 				arcmsr_free_resource(acb);
4109 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
4110 				return ENOMEM;
4111 			}
4112 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
4113 				arcmsr_free_resource(acb);
4114 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
4115 				return ENXIO;
4116 			}
4117 			mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
4118 			if(mem_base0 == 0) {
4119 				arcmsr_free_resource(acb);
4120 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
4121 				return ENXIO;
4122 			}
4123 			acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
4124 			acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
4125 			acb->pmu = (struct MessageUnit_UNION *)((unsigned long)acb->uncacheptr+ARCMSR_SRBS_POOL_SIZE);
4126 			phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
4127 			phbdmu->phbdmu = (struct HBD_MessageUnit *)mem_base0;
4128 		}
4129 		break;
4130 	}
4131 	if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
4132 		arcmsr_free_resource(acb);
4133 		kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
4134 		return ENXIO;
4135 	}
4136 	acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
4137 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
4138 	/*
4139 	********************************************************************
4140 	** init raid volume state
4141 	********************************************************************
4142 	*/
4143 	for(i=0; i < ARCMSR_MAX_TARGETID; i++) {
4144 		for(j=0; j < ARCMSR_MAX_TARGETLUN; j++) {
4145 			acb->devstate[i][j] = ARECA_RAID_GONE;
4146 		}
4147 	}
4148 	arcmsr_iop_init(acb);
4149 	return(0);
4150 }
4151 /*
4152 ************************************************************************
4153 ************************************************************************
4154 */
4155 static int arcmsr_attach(device_t dev)
4156 {
4157 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
4158 	u_int32_t unit=device_get_unit(dev);
4159 	struct ccb_setasync csa;
4160 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
4161 	struct resource	*irqres;
4162 	int	rid;
4163 	u_int irq_flags;
4164 
4165 	if(acb == NULL) {
4166 		kprintf("arcmsr%d: cannot allocate softc\n", unit);
4167 		return (ENOMEM);
4168 	}
4169 	arcmsr_mutex_init(acb);
4170 	acb->pci_dev = dev;
4171 	acb->pci_unit = unit;
4172 	if(arcmsr_initialize(dev)) {
4173 		kprintf("arcmsr%d: initialize failure!\n", unit);
4174 		arcmsr_mutex_destroy(acb);
4175 		return ENXIO;
4176 	}
4177 	/* After setting up the adapter, map our interrupt */
4178 	rid=0;
4179 	acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &rid,
4180 	    &irq_flags);
4181 	irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1,
4182 	    irq_flags);
4183 	if(irqres == NULL ||
4184 		bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih, NULL)) {
4185 		arcmsr_free_resource(acb);
4186 		arcmsr_mutex_destroy(acb);
4187 		kprintf("arcmsr%d: unable to register interrupt handler!\n", unit);
4188 		return ENXIO;
4189 	}
4190 	acb->irqres = irqres;
4191 	/*
4192 	 * Now let the CAM generic SCSI layer find the SCSI devices on
4193 	 * the bus *  start queue to reset to the idle loop. *
4194 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
4195 	 * max_sim_transactions
4196 	*/
4197 	devq = cam_simq_alloc(acb->maxOutstanding);
4198 	if(devq == NULL) {
4199 	    arcmsr_free_resource(acb);
4200 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
4201 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
4202 			pci_release_msi(dev);
4203 		arcmsr_mutex_destroy(acb);
4204 		kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
4205 		return ENXIO;
4206 	}
4207 	acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->isr_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
4208 	cam_simq_release(devq);
4209 	if(acb->psim == NULL) {
4210 		arcmsr_free_resource(acb);
4211 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
4212 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
4213 			pci_release_msi(dev);
4214 		arcmsr_mutex_destroy(acb);
4215 		kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
4216 		return ENXIO;
4217 	}
4218 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
4219 	if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
4220 		arcmsr_free_resource(acb);
4221 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
4222 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
4223 			pci_release_msi(dev);
4224 		cam_sim_free(acb->psim);
4225 		arcmsr_mutex_destroy(acb);
4226 		kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
4227 		return ENXIO;
4228 	}
4229 	if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
4230 		arcmsr_free_resource(acb);
4231 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
4232 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
4233 			pci_release_msi(dev);
4234 		xpt_bus_deregister(cam_sim_path(acb->psim));
4235 		cam_sim_free(acb->psim);
4236 		arcmsr_mutex_destroy(acb);
4237 		kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
4238 		return ENXIO;
4239 	}
4240 	/*
4241 	****************************************************
4242 	*/
4243 	xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
4244 	csa.ccb_h.func_code = XPT_SASYNC_CB;
4245 	csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
4246 	csa.callback = arcmsr_async;
4247 	csa.callback_arg = acb->psim;
4248 	xpt_action((union ccb *)&csa);
4249 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
4250 	/* Create the control device.  */
4251 	acb->ioctl_dev = make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
4252 
4253 	acb->ioctl_dev->si_drv1 = acb;
4254 	(void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
4255 	arcmsr_callout_init(&acb->devmap_callout);
4256 	callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
4257 	return (0);
4258 }
4259 
4260 /*
4261 ************************************************************************
4262 ************************************************************************
4263 */
4264 static int arcmsr_probe(device_t dev)
4265 {
4266 	u_int32_t id;
4267 	u_int16_t sub_device_id;
4268 	static char buf[256];
4269 	char x_type[]={"unknown"};
4270 	char *type;
4271 	int raid6 = 1;
4272 
4273 	if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
4274 		return (ENXIO);
4275 	}
4276 	sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
4277 	switch(id = pci_get_devid(dev)) {
4278 	case PCIDevVenIDARC1110:
4279 	case PCIDevVenIDARC1200:
4280 	case PCIDevVenIDARC1201:
4281 	case PCIDevVenIDARC1210:
4282 		raid6 = 0;
4283 		/*FALLTHRU*/
4284 	case PCIDevVenIDARC1120:
4285 	case PCIDevVenIDARC1130:
4286 	case PCIDevVenIDARC1160:
4287 	case PCIDevVenIDARC1170:
4288 	case PCIDevVenIDARC1220:
4289 	case PCIDevVenIDARC1230:
4290 	case PCIDevVenIDARC1231:
4291 	case PCIDevVenIDARC1260:
4292 	case PCIDevVenIDARC1261:
4293 	case PCIDevVenIDARC1270:
4294 	case PCIDevVenIDARC1280:
4295 		type = "SATA 3G";
4296 		break;
4297 	case PCIDevVenIDARC1212:
4298 	case PCIDevVenIDARC1222:
4299 	case PCIDevVenIDARC1380:
4300 	case PCIDevVenIDARC1381:
4301 	case PCIDevVenIDARC1680:
4302 	case PCIDevVenIDARC1681:
4303 		type = "SAS 3G";
4304 		break;
4305 	case PCIDevVenIDARC1880:
4306 	case PCIDevVenIDARC1882:
4307 	case PCIDevVenIDARC1213:
4308 	case PCIDevVenIDARC1223:
4309 		if (sub_device_id == ARECA_SUB_DEV_ID_1883)
4310 			type = "SAS 12G";
4311 		else
4312 			type = "SAS 6G";
4313 		arcmsr_msi_enable = 0;
4314 		break;
4315 	case PCIDevVenIDARC1214:
4316 		type = "SATA 6G";
4317 		break;
4318 	default:
4319 		type = x_type;
4320 		raid6 = 0;
4321 		break;
4322 	}
4323 	if(type == x_type)
4324 		return(ENXIO);
4325 	ksprintf(buf, "Areca %s Host Adapter RAID Controller%s",
4326 		type, raid6 ? " (RAID6 capable)" : "");
4327 	device_set_desc_copy(dev, buf);
4328 	return (BUS_PROBE_DEFAULT);
4329 }
4330 /*
4331 ************************************************************************
4332 ************************************************************************
4333 */
4334 static int arcmsr_shutdown(device_t dev)
4335 {
4336 	u_int32_t  i;
4337 	struct CommandControlBlock *srb;
4338 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
4339 
4340 	/* stop adapter background rebuild */
4341 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
4342 	/* disable all outbound interrupt */
4343 	arcmsr_disable_allintr(acb);
4344 	arcmsr_stop_adapter_bgrb(acb);
4345 	arcmsr_flush_adapter_cache(acb);
4346 	/* abort all outstanding command */
4347 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
4348 	acb->acb_flags &= ~ACB_F_IOP_INITED;
4349 	if(acb->srboutstandingcount != 0) {
4350 		/*clear and abort all outbound posted Q*/
4351 		arcmsr_done4abort_postqueue(acb);
4352 		/* talk to iop 331 outstanding command aborted*/
4353 		arcmsr_abort_allcmd(acb);
4354 		for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
4355 			srb = acb->psrb_pool[i];
4356 			if(srb->srb_state == ARCMSR_SRB_START) {
4357 				srb->srb_state = ARCMSR_SRB_ABORTED;
4358 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
4359 				arcmsr_srb_complete(srb, 1);
4360 			}
4361 		}
4362 	}
4363 	acb->srboutstandingcount = 0;
4364 	acb->workingsrb_doneindex = 0;
4365 	acb->workingsrb_startindex = 0;
4366 	acb->pktRequestCount = 0;
4367 	acb->pktReturnCount = 0;
4368 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
4369 	return (0);
4370 }
4371 /*
4372 ************************************************************************
4373 ************************************************************************
4374 */
4375 static int arcmsr_detach(device_t dev)
4376 {
4377 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
4378 	int i;
4379 
4380 	callout_stop(&acb->devmap_callout);
4381 	bus_teardown_intr(dev, acb->irqres, acb->ih);
4382 	arcmsr_shutdown(dev);
4383 	arcmsr_free_resource(acb);
4384 	for(i=0; i<2 && (acb->sys_res_arcmsr[i]!=NULL); i++) {
4385 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
4386 	}
4387 	bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
4388 	if (acb->irq_type == PCI_INTR_TYPE_MSI)
4389 		pci_release_msi(dev);
4390 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
4391 	xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
4392 	xpt_free_path(acb->ppath);
4393 	xpt_bus_deregister(cam_sim_path(acb->psim));
4394 	cam_sim_free(acb->psim);
4395 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
4396 	arcmsr_mutex_destroy(acb);
4397 	return (0);
4398 }
4399 
4400 #ifdef ARCMSR_DEBUG1
4401 static void arcmsr_dump_data(struct AdapterControlBlock *acb)
4402 {
4403 	if((acb->pktRequestCount - acb->pktReturnCount) == 0)
4404 		return;
4405 	kprintf("Command Request Count   =0x%x\n",acb->pktRequestCount);
4406 	kprintf("Command Return Count    =0x%x\n",acb->pktReturnCount);
4407 	kprintf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount));
4408 	kprintf("Queued Command Count    =0x%x\n",acb->srboutstandingcount);
4409 }
4410 #endif
4411