xref: /dragonfly/sys/dev/raid/arcmsr/arcmsr.c (revision 532828a0)
1 /*
2 *****************************************************************************************
3 **        O.S   : FreeBSD
4 **   FILE NAME  : arcmsr.c
5 **        BY    : Erich Chen, Ching Huang
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter
8 **                ARCMSR RAID Host adapter
9 **                [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set]
10 ******************************************************************************************
11 ************************************************************************
12 **
13 ** Copyright (c) 2004-2010 ARECA Co. Ltd.
14 **        Erich Chen, Taipei Taiwan All rights reserved.
15 **
16 ** Redistribution and use in source and binary forms, with or without
17 ** modification, are permitted provided that the following conditions
18 ** are met:
19 ** 1. Redistributions of source code must retain the above copyright
20 **    notice, this list of conditions and the following disclaimer.
21 ** 2. Redistributions in binary form must reproduce the above copyright
22 **    notice, this list of conditions and the following disclaimer in the
23 **    documentation and/or other materials provided with the distribution.
24 ** 3. The name of the author may not be used to endorse or promote products
25 **    derived from this software without specific prior written permission.
26 **
27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 **************************************************************************
38 ** History
39 **
40 **        REV#         DATE             NAME             DESCRIPTION
41 **     1.00.00.00   03/31/2004      Erich Chen           First release
42 **     1.20.00.02   11/29/2004      Erich Chen           bug fix with arcmsr_bus_reset when PHY error
43 **     1.20.00.03   04/19/2005      Erich Chen           add SATA 24 Ports adapter type support
44 **                                                       clean unused function
45 **     1.20.00.12   09/12/2005      Erich Chen           bug fix with abort command handling,
46 **                                                       firmware version check
47 **                                                       and firmware update notify for hardware bug fix
48 **                                                       handling if none zero high part physical address
49 **                                                       of srb resource
50 **     1.20.00.13   08/18/2006      Erich Chen           remove pending srb and report busy
51 **                                                       add iop message xfer
52 **                                                       with scsi pass-through command
53 **                                                       add new device id of sas raid adapters
54 **                                                       code fit for SPARC64 & PPC
55 **     1.20.00.14   02/05/2007      Erich Chen           bug fix for incorrect ccb_h.status report
56 **                                                       and cause g_vfs_done() read write error
57 **     1.20.00.15   10/10/2007      Erich Chen           support new RAID adapter type ARC120x
58 **     1.20.00.16   10/10/2009      Erich Chen           Bug fix for RAID adapter type ARC120x
59 **                                                       bus_dmamem_alloc() with BUS_DMA_ZERO
60 **     1.20.00.17   07/15/2010      Ching Huang          Added support ARC1880
61 **                                                       report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
62 **                                                       prevent cam_periph_error removing all LUN devices of one Target id
63 **                                                       for any one LUN device failed
64 **     1.20.00.18   10/14/2010      Ching Huang          Fixed "inquiry data fails comparion at DV1 step"
65 **                  10/25/2010      Ching Huang          Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
66 **     1.20.00.19   11/11/2010      Ching Huang          Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
67 **     1.20.00.20   12/08/2010      Ching Huang          Avoid calling atomic_set_int function
68 **     1.20.00.21   02/08/2011      Ching Huang          Implement I/O request timeout
69 **                  02/14/2011      Ching Huang          Modified pktRequestCount
70 **     1.20.00.21   03/03/2011      Ching Huang          if a command timeout, then wait its ccb back before free it
71 **     1.20.00.22   07/04/2011      Ching Huang          Fixed multiple MTX panic
72 **     1.20.00.23   10/28/2011      Ching Huang          Added TIMEOUT_DELAY in case of too many HDDs need to start
73 **     1.20.00.23   11/08/2011      Ching Huang          Added report device transfer speed
74 **     1.20.00.23   01/30/2012      Ching Huang          Fixed Request requeued and Retrying command
75 **     1.20.00.24   06/11/2012      Ching Huang          Fixed return sense data condition
76 **     1.20.00.25   08/17/2012      Ching Huang          Fixed hotplug device no function on type A adapter
77 ******************************************************************************************
78 * $FreeBSD: src/sys/dev/arcmsr/arcmsr.c,v 1.43 2012/09/04 05:15:54 delphij Exp $
79 */
80 #if 0
81 #define ARCMSR_DEBUG1			1
82 #endif
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/bus.h>
88 #include <sys/queue.h>
89 #include <sys/stat.h>
90 #include <sys/kthread.h>
91 #include <sys/module.h>
92 #include <sys/proc.h>
93 #include <sys/lock.h>
94 #include <sys/sysctl.h>
95 #include <sys/thread2.h>
96 #include <sys/poll.h>
97 #include <sys/device.h>
98 #include <vm/vm.h>
99 #include <vm/vm_param.h>
100 #include <vm/pmap.h>
101 
102 #include <machine/atomic.h>
103 #include <sys/conf.h>
104 #include <sys/rman.h>
105 
106 #include <bus/cam/cam.h>
107 #include <bus/cam/cam_ccb.h>
108 #include <bus/cam/cam_sim.h>
109 #include <bus/cam/cam_periph.h>
110 #include <bus/cam/cam_xpt_periph.h>
111 #include <bus/cam/cam_xpt_sim.h>
112 #include <bus/cam/cam_debug.h>
113 #include <bus/cam/scsi/scsi_all.h>
114 #include <bus/cam/scsi/scsi_message.h>
115 /*
116 **************************************************************************
117 **************************************************************************
118 */
119 #include <sys/endian.h>
120 #include <bus/pci/pcivar.h>
121 #include <bus/pci/pcireg.h>
122 #define ARCMSR_LOCK_INIT(l, s)	lockinit(l, s, 0, LK_CANRECURSE)
123 #define ARCMSR_LOCK_DESTROY(l)	lockuninit(l)
124 #define ARCMSR_LOCK_ACQUIRE(l)	lockmgr(l, LK_EXCLUSIVE)
125 #define ARCMSR_LOCK_RELEASE(l)	lockmgr(l, LK_RELEASE)
126 #define ARCMSR_LOCK_TRY(l)	lockmgr(&l, LK_EXCLUSIVE|LK_NOWAIT);
127 #define arcmsr_htole32(x)	htole32(x)
128 typedef struct lock		arcmsr_lock_t;
129 
130 #define arcmsr_callout_init(a)	callout_init_mp(a);
131 
132 #define ARCMSR_DRIVER_VERSION			"Driver Version 1.20.00.25 2012-08-17"
133 #include <dev/raid/arcmsr/arcmsr.h>
134 #define	SRB_SIZE						((sizeof(struct CommandControlBlock)+0x1f) & 0xffe0)
135 #define ARCMSR_SRBS_POOL_SIZE           (SRB_SIZE * ARCMSR_MAX_FREESRB_NUM)
136 /*
137 **************************************************************************
138 **************************************************************************
139 */
140 #define CHIP_REG_READ32(s, b, r)	bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r))
141 #define CHIP_REG_WRITE32(s, b, r, d)	bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d)
142 /*
143 **************************************************************************
144 **************************************************************************
145 */
146 static void arcmsr_free_srb(struct CommandControlBlock *srb);
147 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb);
148 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb);
149 static int arcmsr_probe(device_t dev);
150 static int arcmsr_attach(device_t dev);
151 static int arcmsr_detach(device_t dev);
152 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
153 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
154 static int arcmsr_shutdown(device_t dev);
155 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
156 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
157 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
158 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
159 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
160 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
161 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
162 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
163 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb);
164 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
165 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
166 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
167 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
168 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg);
169 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb);
170 static int arcmsr_resume(device_t dev);
171 static int arcmsr_suspend(device_t dev);
172 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
173 static void	arcmsr_polling_devmap(void* arg);
174 static void	arcmsr_srb_timeout(void* arg);
175 #ifdef ARCMSR_DEBUG1
176 static void arcmsr_dump_data(struct AdapterControlBlock *acb);
177 #endif
178 /*
179 **************************************************************************
180 **************************************************************************
181 */
182 static void UDELAY(u_int32_t us) { DELAY(us); }
183 /*
184 **************************************************************************
185 **************************************************************************
186 */
187 static bus_dmamap_callback_t arcmsr_map_free_srb;
188 static bus_dmamap_callback_t arcmsr_execute_srb;
189 /*
190 **************************************************************************
191 **************************************************************************
192 */
193 static d_open_t	arcmsr_open;
194 static d_close_t arcmsr_close;
195 static d_ioctl_t arcmsr_ioctl;
196 
197 static device_method_t arcmsr_methods[]={
198 	DEVMETHOD(device_probe,		arcmsr_probe),
199 	DEVMETHOD(device_attach,	arcmsr_attach),
200 	DEVMETHOD(device_detach,	arcmsr_detach),
201 	DEVMETHOD(device_shutdown,	arcmsr_shutdown),
202 	DEVMETHOD(device_suspend,	arcmsr_suspend),
203 	DEVMETHOD(device_resume,	arcmsr_resume),
204 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
205 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
206 	DEVMETHOD_END
207 };
208 
209 static driver_t arcmsr_driver={
210 	"arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
211 };
212 
213 static devclass_t arcmsr_devclass;
214 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL);
215 MODULE_VERSION(arcmsr, 1);
216 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
217 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
218 #ifndef BUS_DMA_COHERENT
219 	#define	BUS_DMA_COHERENT	0x04	/* hint: map memory in a coherent way */
220 #endif
221 
222 static struct dev_ops arcmsr_ops = {
223 	{ "arcmsr", 0, D_MPSAFE },
224 	.d_open =	arcmsr_open,		        /* open     */
225 	.d_close =	arcmsr_close,		        /* close    */
226 	.d_ioctl =	arcmsr_ioctl,		        /* ioctl    */
227 };
228 
229 static int	arcmsr_msi_enable = 1;
230 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable);
231 
232 
233 /*
234 **************************************************************************
235 **************************************************************************
236 */
237 
238 static int
239 arcmsr_open(struct dev_open_args *ap)
240 {
241 	cdev_t dev = ap->a_head.a_dev;
242 	struct AdapterControlBlock *acb=dev->si_drv1;
243 
244 	if(acb==NULL) {
245 		return ENXIO;
246 	}
247 	return (0);
248 }
249 
250 /*
251 **************************************************************************
252 **************************************************************************
253 */
254 
255 static int
256 arcmsr_close(struct dev_close_args *ap)
257 {
258 	cdev_t dev = ap->a_head.a_dev;
259 	struct AdapterControlBlock *acb=dev->si_drv1;
260 
261 	if(acb==NULL) {
262 		return ENXIO;
263 	}
264 	return 0;
265 }
266 
267 /*
268 **************************************************************************
269 **************************************************************************
270 */
271 
272 static int
273 arcmsr_ioctl(struct dev_ioctl_args *ap)
274 {
275 	cdev_t dev = ap->a_head.a_dev;
276 	u_long ioctl_cmd = ap->a_cmd;
277 	caddr_t arg = ap->a_data;
278 	struct AdapterControlBlock *acb=dev->si_drv1;
279 
280 	if(acb==NULL) {
281 		return ENXIO;
282 	}
283 	return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
284 }
285 
286 /*
287 **********************************************************************
288 **********************************************************************
289 */
290 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
291 {
292 	u_int32_t intmask_org=0;
293 
294 	switch (acb->adapter_type) {
295 	case ACB_ADAPTER_TYPE_A: {
296 			/* disable all outbound interrupt */
297 			intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
298 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
299 		}
300 		break;
301 	case ACB_ADAPTER_TYPE_B: {
302 			/* disable all outbound interrupt */
303 			intmask_org=CHIP_REG_READ32(HBB_DOORBELL,
304 			0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
305 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
306 		}
307 		break;
308 	case ACB_ADAPTER_TYPE_C: {
309 			/* disable all outbound interrupt */
310 			intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask)	; /* disable outbound message0 int */
311 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
312 		}
313 		break;
314 	}
315 	return (intmask_org);
316 }
317 /*
318 **********************************************************************
319 **********************************************************************
320 */
321 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
322 {
323 	u_int32_t mask;
324 
325 	switch (acb->adapter_type) {
326 	case ACB_ADAPTER_TYPE_A: {
327 			/* enable outbound Post Queue, outbound doorbell Interrupt */
328 			mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
329 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
330 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
331 		}
332 		break;
333 	case ACB_ADAPTER_TYPE_B: {
334 			/* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
335 			mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
336 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
337 			acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
338 		}
339 		break;
340 	case ACB_ADAPTER_TYPE_C: {
341 			/* enable outbound Post Queue, outbound doorbell Interrupt */
342 			mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
343 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
344 			acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f;
345 		}
346 		break;
347 	}
348 }
349 /*
350 **********************************************************************
351 **********************************************************************
352 */
353 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
354 {
355 	u_int32_t Index;
356 	u_int8_t Retries=0x00;
357 
358 	do {
359 		for(Index=0; Index < 100; Index++) {
360 			if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
361 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
362 				return TRUE;
363 			}
364 			UDELAY(10000);
365 		}/*max 1 seconds*/
366 	}while(Retries++ < 20);/*max 20 sec*/
367 	return (FALSE);
368 }
369 /*
370 **********************************************************************
371 **********************************************************************
372 */
373 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
374 {
375 	u_int32_t Index;
376 	u_int8_t Retries=0x00;
377 
378 	do {
379 		for(Index=0; Index < 100; Index++) {
380 			if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
381 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
382 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
383 				return TRUE;
384 			}
385 			UDELAY(10000);
386 		}/*max 1 seconds*/
387 	}while(Retries++ < 20);/*max 20 sec*/
388 	return (FALSE);
389 }
390 /*
391 **********************************************************************
392 **********************************************************************
393 */
394 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
395 {
396 	u_int32_t Index;
397 	u_int8_t Retries=0x00;
398 
399 	do {
400 		for(Index=0; Index < 100; Index++) {
401 			if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
402 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
403 				return TRUE;
404 			}
405 			UDELAY(10000);
406 		}/*max 1 seconds*/
407 	}while(Retries++ < 20);/*max 20 sec*/
408 	return (FALSE);
409 }
410 /*
411 ************************************************************************
412 ************************************************************************
413 */
414 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
415 {
416 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
417 
418 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
419 	do {
420 		if(arcmsr_hba_wait_msgint_ready(acb)) {
421 			break;
422 		} else {
423 			retry_count--;
424 		}
425 	}while(retry_count!=0);
426 }
427 /*
428 ************************************************************************
429 ************************************************************************
430 */
431 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
432 {
433 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
434 
435 	CHIP_REG_WRITE32(HBB_DOORBELL,
436 	0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
437 	do {
438 		if(arcmsr_hbb_wait_msgint_ready(acb)) {
439 			break;
440 		} else {
441 			retry_count--;
442 		}
443 	}while(retry_count!=0);
444 }
445 /*
446 ************************************************************************
447 ************************************************************************
448 */
449 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
450 {
451 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
452 
453 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
454 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
455 	do {
456 		if(arcmsr_hbc_wait_msgint_ready(acb)) {
457 			break;
458 		} else {
459 			retry_count--;
460 		}
461 	}while(retry_count!=0);
462 }
463 /*
464 ************************************************************************
465 ************************************************************************
466 */
467 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
468 {
469 	switch (acb->adapter_type) {
470 	case ACB_ADAPTER_TYPE_A: {
471 			arcmsr_flush_hba_cache(acb);
472 		}
473 		break;
474 	case ACB_ADAPTER_TYPE_B: {
475 			arcmsr_flush_hbb_cache(acb);
476 		}
477 		break;
478 	case ACB_ADAPTER_TYPE_C: {
479 			arcmsr_flush_hbc_cache(acb);
480 		}
481 		break;
482 	}
483 }
484 /*
485 *******************************************************************************
486 *******************************************************************************
487 */
488 static int arcmsr_suspend(device_t dev)
489 {
490 	struct AdapterControlBlock	*acb = device_get_softc(dev);
491 
492 	/* flush controller */
493 	arcmsr_iop_parking(acb);
494 	/* disable all outbound interrupt */
495 	arcmsr_disable_allintr(acb);
496 	return(0);
497 }
498 /*
499 *******************************************************************************
500 *******************************************************************************
501 */
502 static int arcmsr_resume(device_t dev)
503 {
504 	struct AdapterControlBlock	*acb = device_get_softc(dev);
505 
506 	arcmsr_iop_init(acb);
507 	return(0);
508 }
509 /*
510 *********************************************************************************
511 *********************************************************************************
512 */
513 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
514 {
515 }
516 /*
517 **********************************************************************
518 **********************************************************************
519 */
520 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
521 {
522 	struct AdapterControlBlock *acb=srb->acb;
523 	union ccb * pccb=srb->pccb;
524 
525 	if(srb->srb_flags & SRB_FLAG_TIMER_START)
526 		callout_stop(&srb->ccb_callout);
527 	if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
528 		bus_dmasync_op_t op;
529 
530 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
531 			op = BUS_DMASYNC_POSTREAD;
532 		} else {
533 			op = BUS_DMASYNC_POSTWRITE;
534 		}
535 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
536 		bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
537 	}
538 	if(stand_flag==1) {
539 		atomic_subtract_int(&acb->srboutstandingcount, 1);
540 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
541 		acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) {
542 			acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
543 			pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
544 		}
545 	}
546 	if(srb->srb_state != ARCMSR_SRB_TIMEOUT)
547 		arcmsr_free_srb(srb);
548 #ifdef ARCMSR_DEBUG1
549 	acb->pktReturnCount++;
550 #endif
551 	xpt_done(pccb);
552 	return;
553 }
554 /*
555 **********************************************************************
556 **********************************************************************
557 */
558 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
559 {
560 	union ccb * pccb=srb->pccb;
561 
562 	pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
563 	pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
564 	if(pccb->csio.sense_len) {
565 		memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
566 		memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
567 		get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
568 		((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
569 		pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
570 	}
571 }
572 /*
573 *********************************************************************
574 *********************************************************************
575 */
576 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
577 {
578 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
579 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
580 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
581 	}
582 }
583 /*
584 *********************************************************************
585 *********************************************************************
586 */
587 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
588 {
589 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
590 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
591 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
592 	}
593 }
594 /*
595 *********************************************************************
596 *********************************************************************
597 */
598 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
599 {
600 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
601 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
602 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
603 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
604 	}
605 }
606 /*
607 *********************************************************************
608 *********************************************************************
609 */
610 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
611 {
612 	switch (acb->adapter_type) {
613 	case ACB_ADAPTER_TYPE_A: {
614 			arcmsr_abort_hba_allcmd(acb);
615 		}
616 		break;
617 	case ACB_ADAPTER_TYPE_B: {
618 			arcmsr_abort_hbb_allcmd(acb);
619 		}
620 		break;
621 	case ACB_ADAPTER_TYPE_C: {
622 			arcmsr_abort_hbc_allcmd(acb);
623 		}
624 		break;
625 	}
626 }
627 /*
628 **************************************************************************
629 **************************************************************************
630 */
631 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
632 {
633 	int target, lun;
634 
635 	target=srb->pccb->ccb_h.target_id;
636 	lun=srb->pccb->ccb_h.target_lun;
637 	if(error == FALSE) {
638 		if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
639 			acb->devstate[target][lun]=ARECA_RAID_GOOD;
640 		}
641 		srb->pccb->ccb_h.status |= CAM_REQ_CMP;
642 		arcmsr_srb_complete(srb, 1);
643 	} else {
644 		switch(srb->arcmsr_cdb.DeviceStatus) {
645 		case ARCMSR_DEV_SELECT_TIMEOUT: {
646 				if(acb->devstate[target][lun]==ARECA_RAID_GOOD) {
647 					kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
648 				}
649 				acb->devstate[target][lun]=ARECA_RAID_GONE;
650 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
651 				arcmsr_srb_complete(srb, 1);
652 			}
653 			break;
654 		case ARCMSR_DEV_ABORTED:
655 		case ARCMSR_DEV_INIT_FAIL: {
656 				acb->devstate[target][lun]=ARECA_RAID_GONE;
657 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
658 				arcmsr_srb_complete(srb, 1);
659 			}
660 			break;
661 		case SCSISTAT_CHECK_CONDITION: {
662 				acb->devstate[target][lun]=ARECA_RAID_GOOD;
663 				arcmsr_report_sense_info(srb);
664 				arcmsr_srb_complete(srb, 1);
665 			}
666 			break;
667 		default:
668 			kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done, but got unknown DeviceStatus=0x%x\n"
669 					, acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
670 			acb->devstate[target][lun]=ARECA_RAID_GONE;
671 			srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
672 			/*unknown error or crc error just for retry*/
673 			arcmsr_srb_complete(srb, 1);
674 			break;
675 		}
676 	}
677 }
678 /*
679 **************************************************************************
680 **************************************************************************
681 */
682 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
683 {
684 	struct CommandControlBlock *srb;
685 
686 	/* check if command done with no error*/
687 	switch (acb->adapter_type) {
688 	case ACB_ADAPTER_TYPE_C:
689 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
690 		break;
691 	case ACB_ADAPTER_TYPE_A:
692 	case ACB_ADAPTER_TYPE_B:
693 	default:
694 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
695 		break;
696 	}
697 	if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
698 		if(srb->srb_state == ARCMSR_SRB_TIMEOUT) {
699 			arcmsr_free_srb(srb);
700 			kprintf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb);
701 			return;
702 		}
703 		kprintf("arcmsr%d: return srb has been completed\n"
704 			"srb='%p' srb_state=0x%x outstanding srb count=%d \n",
705 			acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount);
706 		return;
707 	}
708 	arcmsr_report_srb_state(acb, srb, error);
709 }
710 /*
711 **************************************************************************
712 **************************************************************************
713 */
714 static void	arcmsr_srb_timeout(void* arg)
715 {
716 	struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
717 	struct AdapterControlBlock *acb;
718 	int target, lun;
719 	u_int8_t cmd;
720 
721 	target=srb->pccb->ccb_h.target_id;
722 	lun=srb->pccb->ccb_h.target_lun;
723 	acb = srb->acb;
724 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
725 	if(srb->srb_state == ARCMSR_SRB_START)
726 	{
727 		cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
728 		srb->srb_state = ARCMSR_SRB_TIMEOUT;
729 		srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
730 		arcmsr_srb_complete(srb, 1);
731 		kprintf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n",
732 				 acb->pci_unit, target, lun, cmd, srb);
733 	}
734 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
735 #ifdef ARCMSR_DEBUG1
736 	arcmsr_dump_data(acb);
737 #endif
738 }
739 
740 /*
741 **********************************************************************
742 **********************************************************************
743 */
744 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
745 {
746 	int i=0;
747 	u_int32_t flag_srb;
748 	u_int16_t error;
749 
750 	switch (acb->adapter_type) {
751 	case ACB_ADAPTER_TYPE_A: {
752 			u_int32_t outbound_intstatus;
753 
754 			/*clear and abort all outbound posted Q*/
755 			outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
756 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
757 			while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
758                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
759 				arcmsr_drain_donequeue(acb, flag_srb, error);
760 			}
761 		}
762 		break;
763 	case ACB_ADAPTER_TYPE_B: {
764 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
765 
766 			/*clear all outbound posted Q*/
767 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
768 			for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
769 				if((flag_srb=phbbmu->done_qbuffer[i])!=0) {
770 					phbbmu->done_qbuffer[i]=0;
771 			error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
772 					arcmsr_drain_donequeue(acb, flag_srb, error);
773 				}
774 				phbbmu->post_qbuffer[i]=0;
775 			}/*drain reply FIFO*/
776 			phbbmu->doneq_index=0;
777 			phbbmu->postq_index=0;
778 		}
779 		break;
780 	case ACB_ADAPTER_TYPE_C: {
781 
782 			while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
783 				flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
784                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
785 				arcmsr_drain_donequeue(acb, flag_srb, error);
786 			}
787 		}
788 		break;
789 	}
790 }
791 /*
792 ****************************************************************************
793 ****************************************************************************
794 */
795 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
796 {
797 	struct CommandControlBlock *srb;
798 	u_int32_t intmask_org;
799 	u_int32_t i=0;
800 
801 	if(acb->srboutstandingcount>0) {
802 		/* disable all outbound interrupt */
803 		intmask_org=arcmsr_disable_allintr(acb);
804 		/*clear and abort all outbound posted Q*/
805 		arcmsr_done4abort_postqueue(acb);
806 		/* talk to iop 331 outstanding command aborted*/
807 		arcmsr_abort_allcmd(acb);
808 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
809 			srb=acb->psrb_pool[i];
810 			if(srb->srb_state==ARCMSR_SRB_START) {
811 				srb->srb_state=ARCMSR_SRB_ABORTED;
812 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
813 				arcmsr_srb_complete(srb, 1);
814 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p' aborted\n"
815 						, acb->pci_unit, srb->pccb->ccb_h.target_id
816 						, srb->pccb->ccb_h.target_lun, srb);
817 			}
818 		}
819 		/* enable all outbound interrupt */
820 		arcmsr_enable_allintr(acb, intmask_org);
821 	}
822 	acb->srboutstandingcount=0;
823 	acb->workingsrb_doneindex=0;
824 	acb->workingsrb_startindex=0;
825 #ifdef ARCMSR_DEBUG1
826 	acb->pktRequestCount = 0;
827 	acb->pktReturnCount = 0;
828 #endif
829 }
830 /*
831 **********************************************************************
832 **********************************************************************
833 */
834 static void arcmsr_build_srb(struct CommandControlBlock *srb,
835 		bus_dma_segment_t *dm_segs, u_int32_t nseg)
836 {
837 	struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb;
838 	u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u;
839 	u_int32_t address_lo, address_hi;
840 	union ccb * pccb=srb->pccb;
841 	struct ccb_scsiio * pcsio= &pccb->csio;
842 	u_int32_t arccdbsize=0x30;
843 
844 	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
845 	arcmsr_cdb->Bus=0;
846 	arcmsr_cdb->TargetID=pccb->ccb_h.target_id;
847 	arcmsr_cdb->LUN=pccb->ccb_h.target_lun;
848 	arcmsr_cdb->Function=1;
849 	arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len;
850 	arcmsr_cdb->Context=0;
851 	bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
852 	if(nseg != 0) {
853 		struct AdapterControlBlock *acb=srb->acb;
854 		bus_dmasync_op_t op;
855 		u_int32_t length, i, cdb_sgcount=0;
856 
857 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
858 			op=BUS_DMASYNC_PREREAD;
859 		} else {
860 			op=BUS_DMASYNC_PREWRITE;
861 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE;
862 			srb->srb_flags|=SRB_FLAG_WRITE;
863 		}
864 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
865 		for(i=0;i<nseg;i++) {
866 			/* Get the physical address of the current data pointer */
867 			length=arcmsr_htole32(dm_segs[i].ds_len);
868 			address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
869 			address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
870 			if(address_hi==0) {
871 				struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge;
872 				pdma_sg->address=address_lo;
873 				pdma_sg->length=length;
874 				psge += sizeof(struct SG32ENTRY);
875 				arccdbsize += sizeof(struct SG32ENTRY);
876 			} else {
877 				u_int32_t sg64s_size=0, tmplength=length;
878 
879 				while(1) {
880 					u_int64_t span4G, length0;
881 					struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge;
882 
883 					span4G=(u_int64_t)address_lo + tmplength;
884 					pdma_sg->addresshigh=address_hi;
885 					pdma_sg->address=address_lo;
886 					if(span4G > 0x100000000) {
887 						/*see if cross 4G boundary*/
888 						length0=0x100000000-address_lo;
889 						pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR;
890 						address_hi=address_hi+1;
891 						address_lo=0;
892 						tmplength=tmplength-(u_int32_t)length0;
893 						sg64s_size += sizeof(struct SG64ENTRY);
894 						psge += sizeof(struct SG64ENTRY);
895 						cdb_sgcount++;
896 					} else {
897 						pdma_sg->length=tmplength|IS_SG64_ADDR;
898 						sg64s_size += sizeof(struct SG64ENTRY);
899 						psge += sizeof(struct SG64ENTRY);
900 						break;
901 					}
902 				}
903 				arccdbsize += sg64s_size;
904 			}
905 			cdb_sgcount++;
906 		}
907 		arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount;
908 		arcmsr_cdb->DataLength=pcsio->dxfer_len;
909 		if( arccdbsize > 256) {
910 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE;
911 		}
912 	} else {
913 		arcmsr_cdb->DataLength = 0;
914 	}
915 	srb->arc_cdb_size=arccdbsize;
916 }
917 /*
918 **************************************************************************
919 **************************************************************************
920 */
921 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
922 {
923 	u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr;
924 	struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb;
925 
926 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
927 	atomic_add_int(&acb->srboutstandingcount, 1);
928 	srb->srb_state=ARCMSR_SRB_START;
929 
930 	switch (acb->adapter_type) {
931 	case ACB_ADAPTER_TYPE_A: {
932 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
933 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
934 			} else {
935 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr);
936 			}
937 		}
938 		break;
939 	case ACB_ADAPTER_TYPE_B: {
940 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
941 			int ending_index, index;
942 
943 			index=phbbmu->postq_index;
944 			ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
945 			phbbmu->post_qbuffer[ending_index]=0;
946 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
947 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
948 			} else {
949 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr;
950 			}
951 			index++;
952 			index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
953 			phbbmu->postq_index=index;
954 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
955 		}
956 		break;
957     case ACB_ADAPTER_TYPE_C:
958         {
959             u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
960 
961             arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size;
962             ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1);
963 			cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
964             if(cdb_phyaddr_hi32)
965             {
966 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
967 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
968             }
969             else
970             {
971 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
972             }
973         }
974         break;
975 	}
976 }
977 /*
978 ************************************************************************
979 ************************************************************************
980 */
981 static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
982 {
983 	struct QBUFFER *qbuffer=NULL;
984 
985 	switch (acb->adapter_type) {
986 	case ACB_ADAPTER_TYPE_A: {
987 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
988 
989 			qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer;
990 		}
991 		break;
992 	case ACB_ADAPTER_TYPE_B: {
993 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
994 
995 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
996 		}
997 		break;
998 	case ACB_ADAPTER_TYPE_C: {
999 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1000 
1001 			qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer;
1002 		}
1003 		break;
1004 	}
1005 	return(qbuffer);
1006 }
1007 /*
1008 ************************************************************************
1009 ************************************************************************
1010 */
1011 static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
1012 {
1013 	struct QBUFFER *qbuffer=NULL;
1014 
1015 	switch (acb->adapter_type) {
1016 	case ACB_ADAPTER_TYPE_A: {
1017 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
1018 
1019 			qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer;
1020 		}
1021 		break;
1022 	case ACB_ADAPTER_TYPE_B: {
1023 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1024 
1025 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
1026 		}
1027 		break;
1028 	case ACB_ADAPTER_TYPE_C: {
1029 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1030 
1031 			qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer;
1032 		}
1033 		break;
1034 	}
1035 	return(qbuffer);
1036 }
1037 /*
1038 **************************************************************************
1039 **************************************************************************
1040 */
1041 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1042 {
1043 	switch (acb->adapter_type) {
1044 	case ACB_ADAPTER_TYPE_A: {
1045 			/* let IOP know data has been read */
1046 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1047 		}
1048 		break;
1049 	case ACB_ADAPTER_TYPE_B: {
1050 			/* let IOP know data has been read */
1051 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1052 		}
1053 		break;
1054 	case ACB_ADAPTER_TYPE_C: {
1055 			/* let IOP know data has been read */
1056 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1057 		}
1058 	}
1059 }
1060 /*
1061 **************************************************************************
1062 **************************************************************************
1063 */
1064 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1065 {
1066 	switch (acb->adapter_type) {
1067 	case ACB_ADAPTER_TYPE_A: {
1068 			/*
1069 			** push inbound doorbell tell iop, driver data write ok
1070 			** and wait reply on next hwinterrupt for next Qbuffer post
1071 			*/
1072 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1073 		}
1074 		break;
1075 	case ACB_ADAPTER_TYPE_B: {
1076 			/*
1077 			** push inbound doorbell tell iop, driver data write ok
1078 			** and wait reply on next hwinterrupt for next Qbuffer post
1079 			*/
1080 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1081 		}
1082 		break;
1083 	case ACB_ADAPTER_TYPE_C: {
1084 			/*
1085 			** push inbound doorbell tell iop, driver data write ok
1086 			** and wait reply on next hwinterrupt for next Qbuffer post
1087 			*/
1088 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1089 		}
1090 		break;
1091 	}
1092 }
1093 /*
1094 **********************************************************************
1095 **********************************************************************
1096 */
1097 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1098 {
1099 	u_int8_t *pQbuffer;
1100 	struct QBUFFER *pwbuffer;
1101 	u_int8_t * iop_data;
1102 	int32_t allxfer_len=0;
1103 
1104 	pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1105 	iop_data=(u_int8_t *)pwbuffer->data;
1106 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1107 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1108 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1109 			&& (allxfer_len<124)) {
1110 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1111 			memcpy(iop_data, pQbuffer, 1);
1112 			acb->wqbuf_firstindex++;
1113 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1114 			iop_data++;
1115 			allxfer_len++;
1116 		}
1117 		pwbuffer->data_len=allxfer_len;
1118 		/*
1119 		** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post
1120 		*/
1121 		arcmsr_iop_message_wrote(acb);
1122 	}
1123 }
1124 /*
1125 ************************************************************************
1126 ************************************************************************
1127 */
1128 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1129 {
1130 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1131 	CHIP_REG_WRITE32(HBA_MessageUnit,
1132 	0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1133 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
1134 		kprintf("arcmsr%d: wait 'stop adapter background rebuild' timeout \n"
1135 			, acb->pci_unit);
1136 	}
1137 	return;
1138 }
1139 /*
1140 ************************************************************************
1141 ************************************************************************
1142 */
1143 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1144 {
1145 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1146 	CHIP_REG_WRITE32(HBB_DOORBELL,
1147 	0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1148 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1149 		kprintf( "arcmsr%d: wait 'stop adapter background rebuild' timeout \n"
1150 			, acb->pci_unit);
1151 	}
1152 }
1153 /*
1154 ************************************************************************
1155 ************************************************************************
1156 */
1157 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1158 {
1159 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1160 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1161 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1162 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1163 		kprintf("arcmsr%d: wait 'stop adapter background rebuild' timeout \n", acb->pci_unit);
1164 	}
1165 }
1166 /*
1167 ************************************************************************
1168 ************************************************************************
1169 */
1170 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1171 {
1172 	switch (acb->adapter_type) {
1173 	case ACB_ADAPTER_TYPE_A: {
1174 			arcmsr_stop_hba_bgrb(acb);
1175 		}
1176 		break;
1177 	case ACB_ADAPTER_TYPE_B: {
1178 			arcmsr_stop_hbb_bgrb(acb);
1179 		}
1180 		break;
1181 	case ACB_ADAPTER_TYPE_C: {
1182 			arcmsr_stop_hbc_bgrb(acb);
1183 		}
1184 		break;
1185 	}
1186 }
1187 /*
1188 ************************************************************************
1189 ************************************************************************
1190 */
1191 static void arcmsr_poll(struct cam_sim * psim)
1192 {
1193 	struct AdapterControlBlock *acb;
1194 	int	mutex;
1195 
1196 	acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1197 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
1198 	if( mutex == 0 )
1199 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1200 	arcmsr_interrupt(acb);
1201 	if( mutex == 0 )
1202 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1203 }
1204 /*
1205 **************************************************************************
1206 **************************************************************************
1207 */
1208 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1209 {
1210 	struct QBUFFER *prbuffer;
1211 	u_int8_t *pQbuffer;
1212 	u_int8_t *iop_data;
1213 	int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1214 
1215 	/*check this iop data if overflow my rqbuffer*/
1216 	rqbuf_lastindex=acb->rqbuf_lastindex;
1217 	rqbuf_firstindex=acb->rqbuf_firstindex;
1218 	prbuffer=arcmsr_get_iop_rqbuffer(acb);
1219 	iop_data=(u_int8_t *)prbuffer->data;
1220 	iop_len=prbuffer->data_len;
1221 	my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1222 	if(my_empty_len>=iop_len) {
1223 		while(iop_len > 0) {
1224 			pQbuffer=&acb->rqbuffer[rqbuf_lastindex];
1225 			memcpy(pQbuffer, iop_data, 1);
1226 			rqbuf_lastindex++;
1227 			rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */
1228 			iop_data++;
1229 			iop_len--;
1230 		}
1231 		acb->rqbuf_lastindex=rqbuf_lastindex;
1232 		arcmsr_iop_message_read(acb);
1233 		/*signature, let IOP know data has been read */
1234 	} else {
1235 		acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW;
1236 	}
1237 }
1238 /*
1239 **************************************************************************
1240 **************************************************************************
1241 */
1242 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1243 {
1244 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1245 	/*
1246 	*****************************************************************
1247 	**   check if there are any mail packages from user space program
1248 	**   in my post bag, now is the time to send them into Areca's firmware
1249 	*****************************************************************
1250 	*/
1251 	if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) {
1252 		u_int8_t *pQbuffer;
1253 		struct QBUFFER *pwbuffer;
1254 		u_int8_t *iop_data;
1255 		int allxfer_len=0;
1256 
1257 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1258 		pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1259 		iop_data=(u_int8_t *)pwbuffer->data;
1260 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1261 			&& (allxfer_len<124)) {
1262 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1263 			memcpy(iop_data, pQbuffer, 1);
1264 			acb->wqbuf_firstindex++;
1265 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1266 			iop_data++;
1267 			allxfer_len++;
1268 		}
1269 		pwbuffer->data_len=allxfer_len;
1270 		/*
1271 		** push inbound doorbell tell iop driver data write ok
1272 		** and wait reply on next hwinterrupt for next Qbuffer post
1273 		*/
1274 		arcmsr_iop_message_wrote(acb);
1275 	}
1276 	if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) {
1277 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1278 	}
1279 }
1280 
1281 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1282 {
1283 /*
1284 	if (ccb->ccb_h.status != CAM_REQ_CMP)
1285 		kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status);
1286 	else
1287 		kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1288 */
1289 	xpt_free_path(ccb->ccb_h.path);
1290 }
1291 
1292 static void	arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1293 {
1294 	struct cam_path     *path;
1295 	union ccb            ccb;
1296 
1297 	if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1298 		return;
1299 /*	kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
1300 	bzero(&ccb, sizeof(union ccb));
1301 	xpt_setup_ccb(&ccb.ccb_h, path, 5);
1302 	ccb.ccb_h.func_code = XPT_SCAN_LUN;
1303 	ccb.ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1304 	ccb.crcn.flags = CAM_FLAG_NONE;
1305 	xpt_action(&ccb);
1306 }
1307 
1308 
1309 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1310 {
1311 	struct CommandControlBlock *srb;
1312 	u_int32_t intmask_org;
1313 	int i;
1314 
1315 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1316 	/* disable all outbound interrupts */
1317 	intmask_org = arcmsr_disable_allintr(acb);
1318 	for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1319 	{
1320 		srb = acb->psrb_pool[i];
1321 		if (srb->srb_state == ARCMSR_SRB_START)
1322 		{
1323 		if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1324             {
1325 			srb->srb_state = ARCMSR_SRB_ABORTED;
1326 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1327 			arcmsr_srb_complete(srb, 1);
1328 				kprintf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
1329 		}
1330 		}
1331 	}
1332 	/* enable outbound Post Queue, outbound doorbell Interrupt */
1333 	arcmsr_enable_allintr(acb, intmask_org);
1334 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1335 }
1336 
1337 
1338 /*
1339 **************************************************************************
1340 **************************************************************************
1341 */
1342 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1343 	u_int32_t	devicemap;
1344 	u_int32_t	target, lun;
1345     u_int32_t	deviceMapCurrent[4]={0};
1346     u_int8_t	*pDevMap;
1347 
1348 	switch (acb->adapter_type) {
1349 	case ACB_ADAPTER_TYPE_A:
1350 			devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1351 			for (target= 0; target < 4; target++)
1352 			{
1353 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1354 		devicemap += 4;
1355 			}
1356 			break;
1357 
1358 	case ACB_ADAPTER_TYPE_B:
1359 			devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1360 			for (target= 0; target < 4; target++)
1361 			{
1362 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1],  devicemap);
1363 		devicemap += 4;
1364 			}
1365 			break;
1366 
1367 	case ACB_ADAPTER_TYPE_C:
1368 			devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1369 			for (target= 0; target < 4; target++)
1370 			{
1371 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1372 		devicemap += 4;
1373 			}
1374 			break;
1375 	}
1376 
1377 		if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1378 		{
1379 			acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1380 		}
1381 		/*
1382 		** adapter posted CONFIG message
1383 		** copy the new map, note if there are differences with the current map
1384 		*/
1385 		pDevMap = (u_int8_t	*)&deviceMapCurrent[0];
1386 		for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1387 		{
1388 			if (*pDevMap != acb->device_map[target])
1389 			{
1390                 u_int8_t difference, bit_check;
1391 
1392                 difference= *pDevMap ^ acb->device_map[target];
1393                 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1394                 {
1395                     bit_check=(1 << lun);						/*check bit from 0....31*/
1396                     if(difference & bit_check)
1397                     {
1398                         if(acb->device_map[target] & bit_check)
1399                         {/* unit departed */
1400 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1401 							arcmsr_abort_dr_ccbs(acb, target, lun);
1402 				arcmsr_rescan_lun(acb, target, lun);
1403 						acb->devstate[target][lun] = ARECA_RAID_GONE;
1404                         }
1405                         else
1406                         {/* unit arrived */
1407 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
1408 				arcmsr_rescan_lun(acb, target, lun);
1409 						acb->devstate[target][lun] = ARECA_RAID_GOOD;
1410                         }
1411                     }
1412                 }
1413 /*				kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1414 				acb->device_map[target]= *pDevMap;
1415 			}
1416 			pDevMap++;
1417 		}
1418 }
1419 /*
1420 **************************************************************************
1421 **************************************************************************
1422 */
1423 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1424 	u_int32_t outbound_message;
1425 
1426 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1427 	outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1428 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1429 		arcmsr_dr_handle( acb );
1430 }
1431 /*
1432 **************************************************************************
1433 **************************************************************************
1434 */
1435 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1436 	u_int32_t outbound_message;
1437 
1438 	/* clear interrupts */
1439 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1440 	outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1441 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1442 		arcmsr_dr_handle( acb );
1443 }
1444 /*
1445 **************************************************************************
1446 **************************************************************************
1447 */
1448 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1449 	u_int32_t outbound_message;
1450 
1451 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1452 	outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1453 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1454 		arcmsr_dr_handle( acb );
1455 }
1456 /*
1457 **************************************************************************
1458 **************************************************************************
1459 */
1460 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1461 {
1462 	u_int32_t outbound_doorbell;
1463 
1464 	/*
1465 	*******************************************************************
1466 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1467 	**  DOORBELL: din! don!
1468 	**  check if there are any mail need to pack from firmware
1469 	*******************************************************************
1470 	*/
1471 	outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit,
1472 	0, outbound_doorbell);
1473 	CHIP_REG_WRITE32(HBA_MessageUnit,
1474 	0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */
1475 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1476 		arcmsr_iop2drv_data_wrote_handle(acb);
1477 	}
1478 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1479 		arcmsr_iop2drv_data_read_handle(acb);
1480 	}
1481 }
1482 /*
1483 **************************************************************************
1484 **************************************************************************
1485 */
1486 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1487 {
1488 	u_int32_t outbound_doorbell;
1489 
1490 	/*
1491 	*******************************************************************
1492 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1493 	**  DOORBELL: din! don!
1494 	**  check if there are any mail need to pack from firmware
1495 	*******************************************************************
1496 	*/
1497 	outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1498 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */
1499 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1500 		arcmsr_iop2drv_data_wrote_handle(acb);
1501 	}
1502 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1503 		arcmsr_iop2drv_data_read_handle(acb);
1504 	}
1505 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1506 		arcmsr_hbc_message_isr(acb);    /* messenger of "driver to iop commands" */
1507 	}
1508 }
1509 /*
1510 **************************************************************************
1511 **************************************************************************
1512 */
1513 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1514 {
1515 	u_int32_t flag_srb;
1516 	u_int16_t error;
1517 
1518 	/*
1519 	*****************************************************************************
1520 	**               areca cdb command done
1521 	*****************************************************************************
1522 	*/
1523 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1524 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1525 	while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
1526 		0, outbound_queueport)) != 0xFFFFFFFF) {
1527 		/* check if command done with no error*/
1528         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1529 		arcmsr_drain_donequeue(acb, flag_srb, error);
1530 	}	/*drain reply FIFO*/
1531 }
1532 /*
1533 **************************************************************************
1534 **************************************************************************
1535 */
1536 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1537 {
1538 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1539 	u_int32_t flag_srb;
1540 	int index;
1541 	u_int16_t error;
1542 
1543 	/*
1544 	*****************************************************************************
1545 	**               areca cdb command done
1546 	*****************************************************************************
1547 	*/
1548 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1549 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1550 	index=phbbmu->doneq_index;
1551 	while((flag_srb=phbbmu->done_qbuffer[index]) != 0) {
1552 		phbbmu->done_qbuffer[index]=0;
1553 		index++;
1554 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
1555 		phbbmu->doneq_index=index;
1556 		/* check if command done with no error*/
1557         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1558 		arcmsr_drain_donequeue(acb, flag_srb, error);
1559 	}	/*drain reply FIFO*/
1560 }
1561 /*
1562 **************************************************************************
1563 **************************************************************************
1564 */
1565 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1566 {
1567 	u_int32_t flag_srb,throttling=0;
1568 	u_int16_t error;
1569 
1570 	/*
1571 	*****************************************************************************
1572 	**               areca cdb command done
1573 	*****************************************************************************
1574 	*/
1575 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1576 
1577 	while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1578 
1579 		flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
1580 		/* check if command done with no error*/
1581         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
1582 		arcmsr_drain_donequeue(acb, flag_srb, error);
1583         if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1584             CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
1585             break;
1586         }
1587         throttling++;
1588 	}	/*drain reply FIFO*/
1589 }
1590 /*
1591 **********************************************************************
1592 **********************************************************************
1593 */
1594 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
1595 {
1596 	u_int32_t outbound_intStatus;
1597 	/*
1598 	*********************************************
1599 	**   check outbound intstatus
1600 	*********************************************
1601 	*/
1602 	outbound_intStatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
1603 	if(!outbound_intStatus) {
1604 		/*it must be share irq*/
1605 		return;
1606 	}
1607 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus);/*clear interrupt*/
1608 	/* MU doorbell interrupts*/
1609 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1610 		arcmsr_hba_doorbell_isr(acb);
1611 	}
1612 	/* MU post queue interrupts*/
1613 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1614 		arcmsr_hba_postqueue_isr(acb);
1615 	}
1616 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1617 		arcmsr_hba_message_isr(acb);
1618 	}
1619 }
1620 /*
1621 **********************************************************************
1622 **********************************************************************
1623 */
1624 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
1625 {
1626 	u_int32_t outbound_doorbell;
1627 	/*
1628 	*********************************************
1629 	**   check outbound intstatus
1630 	*********************************************
1631 	*/
1632 	outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
1633 	if(!outbound_doorbell) {
1634 		/*it must be share irq*/
1635 		return;
1636 	}
1637 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
1638 	CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
1639 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1640 	/* MU ioctl transfer doorbell interrupts*/
1641 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1642 		arcmsr_iop2drv_data_wrote_handle(acb);
1643 	}
1644 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1645 		arcmsr_iop2drv_data_read_handle(acb);
1646 	}
1647 	/* MU post queue interrupts*/
1648 	if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1649 		arcmsr_hbb_postqueue_isr(acb);
1650 	}
1651 	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1652 		arcmsr_hbb_message_isr(acb);
1653 	}
1654 }
1655 /*
1656 **********************************************************************
1657 **********************************************************************
1658 */
1659 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
1660 {
1661 	u_int32_t host_interrupt_status;
1662 	/*
1663 	*********************************************
1664 	**   check outbound intstatus
1665 	*********************************************
1666 	*/
1667 	host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
1668 	if(!host_interrupt_status) {
1669 		/*it must be share irq*/
1670 		return;
1671 	}
1672 	/* MU doorbell interrupts*/
1673 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
1674 		arcmsr_hbc_doorbell_isr(acb);
1675 	}
1676 	/* MU post queue interrupts*/
1677 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1678 		arcmsr_hbc_postqueue_isr(acb);
1679 	}
1680 }
1681 /*
1682 ******************************************************************************
1683 ******************************************************************************
1684 */
1685 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
1686 {
1687 	switch (acb->adapter_type) {
1688 	case ACB_ADAPTER_TYPE_A:
1689 		arcmsr_handle_hba_isr(acb);
1690 		break;
1691 	case ACB_ADAPTER_TYPE_B:
1692 		arcmsr_handle_hbb_isr(acb);
1693 		break;
1694 	case ACB_ADAPTER_TYPE_C:
1695 		arcmsr_handle_hbc_isr(acb);
1696 		break;
1697 	default:
1698 		kprintf("arcmsr%d: interrupt service,"
1699 		" unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type);
1700 		break;
1701 	}
1702 }
1703 /*
1704 **********************************************************************
1705 **********************************************************************
1706 */
1707 static void arcmsr_intr_handler(void *arg)
1708 {
1709 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg;
1710 
1711 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1712 	arcmsr_interrupt(acb);
1713 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1714 }
1715 /*
1716 ******************************************************************************
1717 ******************************************************************************
1718 */
1719 static void	arcmsr_polling_devmap(void* arg)
1720 {
1721 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
1722 	switch (acb->adapter_type) {
1723 	case ACB_ADAPTER_TYPE_A:
1724 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1725 		break;
1726 
1727 	case ACB_ADAPTER_TYPE_B:
1728 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
1729 		break;
1730 
1731 	case ACB_ADAPTER_TYPE_C:
1732 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1733 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1734 		break;
1735 	}
1736 
1737 	if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
1738 	{
1739 		callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb);	/* polling per 5 seconds */
1740 	}
1741 }
1742 
1743 /*
1744 *******************************************************************************
1745 **
1746 *******************************************************************************
1747 */
1748 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1749 {
1750 	u_int32_t intmask_org;
1751 
1752 	if(acb!=NULL) {
1753 		/* stop adapter background rebuild */
1754 		if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
1755 			intmask_org = arcmsr_disable_allintr(acb);
1756 			arcmsr_stop_adapter_bgrb(acb);
1757 			arcmsr_flush_adapter_cache(acb);
1758 			arcmsr_enable_allintr(acb, intmask_org);
1759 		}
1760 	}
1761 }
1762 /*
1763 ***********************************************************************
1764 **
1765 ************************************************************************
1766 */
1767 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
1768 {
1769 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1770 	u_int32_t retvalue=EINVAL;
1771 
1772 	pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg;
1773 	if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
1774 		return retvalue;
1775 	}
1776 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1777 	switch(ioctl_cmd) {
1778 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
1779 			u_int8_t * pQbuffer;
1780 			u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1781 			u_int32_t allxfer_len=0;
1782 
1783 			while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex)
1784 				&& (allxfer_len<1031)) {
1785 				/*copy READ QBUFFER to srb*/
1786 				pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex];
1787 				memcpy(ptmpQbuffer, pQbuffer, 1);
1788 				acb->rqbuf_firstindex++;
1789 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1790 				/*if last index number set it to 0 */
1791 				ptmpQbuffer++;
1792 				allxfer_len++;
1793 			}
1794 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1795 				struct QBUFFER * prbuffer;
1796 				u_int8_t * iop_data;
1797 				u_int32_t iop_len;
1798 
1799 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1800 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
1801 				iop_data=(u_int8_t *)prbuffer->data;
1802 				iop_len=(u_int32_t)prbuffer->data_len;
1803 				/*this iop data does no chance to make me overflow again here, so just do it*/
1804 				while(iop_len>0) {
1805 					pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
1806 					memcpy(pQbuffer, iop_data, 1);
1807 					acb->rqbuf_lastindex++;
1808 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1809 					/*if last index number set it to 0 */
1810 					iop_data++;
1811 					iop_len--;
1812 				}
1813 				arcmsr_iop_message_read(acb);
1814 				/*signature, let IOP know data has been readed */
1815 			}
1816 			pcmdmessagefld->cmdmessage.Length=allxfer_len;
1817 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1818 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1819 		}
1820 		break;
1821 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1822 			u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1823 			u_int8_t * pQbuffer;
1824 			u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
1825 
1826 			user_len=pcmdmessagefld->cmdmessage.Length;
1827 			/*check if data xfer length of this request will overflow my array qbuffer */
1828 			wqbuf_lastindex=acb->wqbuf_lastindex;
1829 			wqbuf_firstindex=acb->wqbuf_firstindex;
1830 			if(wqbuf_lastindex!=wqbuf_firstindex) {
1831 				arcmsr_post_ioctldata2iop(acb);
1832 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1833 			} else {
1834 				my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1835 				if(my_empty_len>=user_len) {
1836 					while(user_len>0) {
1837 						/*copy srb data to wqbuffer*/
1838 						pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex];
1839 						memcpy(pQbuffer, ptmpuserbuffer, 1);
1840 						acb->wqbuf_lastindex++;
1841 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1842 						/*if last index number set it to 0 */
1843 						ptmpuserbuffer++;
1844 						user_len--;
1845 					}
1846 					/*post fist Qbuffer*/
1847 					if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1848 						acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1849 						arcmsr_post_ioctldata2iop(acb);
1850 					}
1851 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1852 				} else {
1853 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1854 				}
1855 			}
1856 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1857 		}
1858 		break;
1859 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1860 			u_int8_t * pQbuffer=acb->rqbuffer;
1861 
1862 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1863 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1864 				arcmsr_iop_message_read(acb);
1865 				/*signature, let IOP know data has been readed */
1866 			}
1867 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1868 			acb->rqbuf_firstindex=0;
1869 			acb->rqbuf_lastindex=0;
1870 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1871 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1872 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1873 		}
1874 		break;
1875 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
1876 		{
1877 			u_int8_t * pQbuffer=acb->wqbuffer;
1878 
1879 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1880 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1881                 arcmsr_iop_message_read(acb);
1882 				/*signature, let IOP know data has been readed */
1883 			}
1884 			acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
1885 			acb->wqbuf_firstindex=0;
1886 			acb->wqbuf_lastindex=0;
1887 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1888 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1889 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1890 		}
1891 		break;
1892 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1893 			u_int8_t * pQbuffer;
1894 
1895 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1896 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1897                 arcmsr_iop_message_read(acb);
1898 				/*signature, let IOP know data has been readed */
1899 			}
1900 			acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
1901 					|ACB_F_MESSAGE_RQBUFFER_CLEARED
1902 					|ACB_F_MESSAGE_WQBUFFER_READ);
1903 			acb->rqbuf_firstindex=0;
1904 			acb->rqbuf_lastindex=0;
1905 			acb->wqbuf_firstindex=0;
1906 			acb->wqbuf_lastindex=0;
1907 			pQbuffer=acb->rqbuffer;
1908 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1909 			pQbuffer=acb->wqbuffer;
1910 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1911 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1912 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1913 		}
1914 		break;
1915 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
1916 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F;
1917 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1918 		}
1919 		break;
1920 	case ARCMSR_MESSAGE_SAY_HELLO: {
1921 			u_int8_t * hello_string="Hello! I am ARCMSR";
1922 			u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer;
1923 
1924 			if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
1925 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1926 				ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1927 				return ENOIOCTL;
1928 			}
1929 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1930 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1931 		}
1932 		break;
1933 	case ARCMSR_MESSAGE_SAY_GOODBYE: {
1934 			arcmsr_iop_parking(acb);
1935 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1936 		}
1937 		break;
1938 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
1939 			arcmsr_flush_adapter_cache(acb);
1940 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1941 		}
1942 		break;
1943 	}
1944 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1945 	return (retvalue);
1946 }
1947 /*
1948 **************************************************************************
1949 **************************************************************************
1950 */
1951 static void arcmsr_free_srb(struct CommandControlBlock *srb)
1952 {
1953 	struct AdapterControlBlock	*acb;
1954 	int	mutex;
1955 
1956 	acb = srb->acb;
1957 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
1958 	if( mutex == 0 )
1959 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1960 	srb->srb_state=ARCMSR_SRB_DONE;
1961 	srb->srb_flags=0;
1962 	acb->srbworkingQ[acb->workingsrb_doneindex]=srb;
1963 	acb->workingsrb_doneindex++;
1964 	acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
1965 	if( mutex == 0 )
1966 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1967 }
1968 /*
1969 **************************************************************************
1970 **************************************************************************
1971 */
1972 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb)
1973 {
1974 	struct CommandControlBlock *srb=NULL;
1975 	u_int32_t workingsrb_startindex, workingsrb_doneindex;
1976 	int	mutex;
1977 
1978 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
1979 	if( mutex == 0 )
1980 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1981 	workingsrb_doneindex=acb->workingsrb_doneindex;
1982 	workingsrb_startindex=acb->workingsrb_startindex;
1983 	srb=acb->srbworkingQ[workingsrb_startindex];
1984 	workingsrb_startindex++;
1985 	workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
1986 	if(workingsrb_doneindex!=workingsrb_startindex) {
1987 		acb->workingsrb_startindex=workingsrb_startindex;
1988 	} else {
1989 		srb=NULL;
1990 	}
1991 	if( mutex == 0 )
1992 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1993 	return(srb);
1994 }
1995 /*
1996 **************************************************************************
1997 **************************************************************************
1998 */
1999 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb)
2000 {
2001 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
2002 	int retvalue = 0, transfer_len = 0;
2003 	char *buffer;
2004 	u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
2005 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
2006 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8  |
2007 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
2008 					/* 4 bytes: Areca io control code */
2009 	if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2010 		buffer = pccb->csio.data_ptr;
2011 		transfer_len = pccb->csio.dxfer_len;
2012 	} else {
2013 		retvalue = ARCMSR_MESSAGE_FAIL;
2014 		goto message_out;
2015 	}
2016 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2017 		retvalue = ARCMSR_MESSAGE_FAIL;
2018 		goto message_out;
2019 	}
2020 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
2021 	switch(controlcode) {
2022 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
2023 			u_int8_t *pQbuffer;
2024 			u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
2025 			int32_t allxfer_len = 0;
2026 
2027 			while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2028 				&& (allxfer_len < 1031)) {
2029 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2030 				memcpy(ptmpQbuffer, pQbuffer, 1);
2031 				acb->rqbuf_firstindex++;
2032 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2033 				ptmpQbuffer++;
2034 				allxfer_len++;
2035 			}
2036 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2037 				struct QBUFFER  *prbuffer;
2038 				u_int8_t  *iop_data;
2039 				int32_t iop_len;
2040 
2041 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2042 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
2043 				iop_data = (u_int8_t *)prbuffer->data;
2044 				iop_len =(u_int32_t)prbuffer->data_len;
2045 				while (iop_len > 0) {
2046 			        pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
2047 					memcpy(pQbuffer, iop_data, 1);
2048 					acb->rqbuf_lastindex++;
2049 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2050 					iop_data++;
2051 					iop_len--;
2052 				}
2053 				arcmsr_iop_message_read(acb);
2054 			}
2055 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
2056 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2057 			retvalue=ARCMSR_MESSAGE_SUCCESS;
2058 		}
2059 		break;
2060 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2061 			int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2062 			u_int8_t *pQbuffer;
2063 			u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
2064 
2065 			user_len = pcmdmessagefld->cmdmessage.Length;
2066 			wqbuf_lastindex = acb->wqbuf_lastindex;
2067 			wqbuf_firstindex = acb->wqbuf_firstindex;
2068 			if (wqbuf_lastindex != wqbuf_firstindex) {
2069 				arcmsr_post_ioctldata2iop(acb);
2070 				/* has error report sensedata */
2071 			    if(pccb->csio.sense_len) {
2072 				((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2073 				/* Valid,ErrorCode */
2074 				((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2075 				/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2076 				((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2077 				/* AdditionalSenseLength */
2078 				((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2079 				/* AdditionalSenseCode */
2080 				}
2081 				retvalue = ARCMSR_MESSAGE_FAIL;
2082 			} else {
2083 				my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2084 						&(ARCMSR_MAX_QBUFFER - 1);
2085 				if (my_empty_len >= user_len) {
2086 					while (user_len > 0) {
2087 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2088 						memcpy(pQbuffer, ptmpuserbuffer, 1);
2089 						acb->wqbuf_lastindex++;
2090 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2091 						ptmpuserbuffer++;
2092 						user_len--;
2093 					}
2094 					if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2095 						acb->acb_flags &=
2096 						~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2097 						arcmsr_post_ioctldata2iop(acb);
2098 					}
2099 				} else {
2100 					/* has error report sensedata */
2101 					if(pccb->csio.sense_len) {
2102 					((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2103 					/* Valid,ErrorCode */
2104 					((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2105 					/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2106 					((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2107 					/* AdditionalSenseLength */
2108 					((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2109 					/* AdditionalSenseCode */
2110 					}
2111 					retvalue = ARCMSR_MESSAGE_FAIL;
2112 				}
2113 			}
2114 		}
2115 		break;
2116 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2117 			u_int8_t *pQbuffer = acb->rqbuffer;
2118 
2119 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2120 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2121 				arcmsr_iop_message_read(acb);
2122 			}
2123 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2124 			acb->rqbuf_firstindex = 0;
2125 			acb->rqbuf_lastindex = 0;
2126 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2127 			pcmdmessagefld->cmdmessage.ReturnCode =
2128 			ARCMSR_MESSAGE_RETURNCODE_OK;
2129 		}
2130 		break;
2131 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2132 			u_int8_t *pQbuffer = acb->wqbuffer;
2133 
2134 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2135 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2136 				arcmsr_iop_message_read(acb);
2137 			}
2138 			acb->acb_flags |=
2139 				(ACB_F_MESSAGE_WQBUFFER_CLEARED |
2140 					ACB_F_MESSAGE_WQBUFFER_READ);
2141 			acb->wqbuf_firstindex = 0;
2142 			acb->wqbuf_lastindex = 0;
2143 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2144 			pcmdmessagefld->cmdmessage.ReturnCode =
2145 				ARCMSR_MESSAGE_RETURNCODE_OK;
2146 		}
2147 		break;
2148 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2149 			u_int8_t *pQbuffer;
2150 
2151 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2152 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2153 				arcmsr_iop_message_read(acb);
2154 			}
2155 			acb->acb_flags |=
2156 				(ACB_F_MESSAGE_WQBUFFER_CLEARED
2157 				| ACB_F_MESSAGE_RQBUFFER_CLEARED
2158 				| ACB_F_MESSAGE_WQBUFFER_READ);
2159 			acb->rqbuf_firstindex = 0;
2160 			acb->rqbuf_lastindex = 0;
2161 			acb->wqbuf_firstindex = 0;
2162 			acb->wqbuf_lastindex = 0;
2163 			pQbuffer = acb->rqbuffer;
2164 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2165 			pQbuffer = acb->wqbuffer;
2166 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2167 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2168 		}
2169 		break;
2170 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2171 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2172 		}
2173 		break;
2174 	case ARCMSR_MESSAGE_SAY_HELLO: {
2175 			int8_t * hello_string = "Hello! I am ARCMSR";
2176 
2177 			memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2178 				, (int16_t)strlen(hello_string));
2179 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2180 		}
2181 		break;
2182 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2183 		arcmsr_iop_parking(acb);
2184 		break;
2185 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2186 		arcmsr_flush_adapter_cache(acb);
2187 		break;
2188 	default:
2189 		retvalue = ARCMSR_MESSAGE_FAIL;
2190 	}
2191 message_out:
2192 	return (retvalue);
2193 }
2194 /*
2195 *********************************************************************
2196 *********************************************************************
2197 */
2198 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2199 {
2200 	struct CommandControlBlock *srb=(struct CommandControlBlock *)arg;
2201 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb;
2202 	union ccb * pccb;
2203 	int target, lun;
2204 
2205 	pccb=srb->pccb;
2206 	target=pccb->ccb_h.target_id;
2207 	lun=pccb->ccb_h.target_lun;
2208 #ifdef ARCMSR_DEBUG1
2209 	acb->pktRequestCount++;
2210 #endif
2211 	if(error != 0) {
2212 		if(error != EFBIG) {
2213 			kprintf("arcmsr%d: unexpected error %x"
2214 				" returned from 'bus_dmamap_load' \n"
2215 				, acb->pci_unit, error);
2216 		}
2217 		if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2218 			pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2219 		}
2220 		arcmsr_srb_complete(srb, 0);
2221 		return;
2222 	}
2223 	if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2224 		pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2225 		arcmsr_srb_complete(srb, 0);
2226 		return;
2227 	}
2228 	if(acb->acb_flags & ACB_F_BUS_RESET) {
2229 		kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2230 		pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2231 		arcmsr_srb_complete(srb, 0);
2232 		return;
2233 	}
2234 	if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
2235 		u_int8_t block_cmd, cmd;
2236 
2237 		cmd = pccb->csio.cdb_io.cdb_bytes[0];
2238 		block_cmd= cmd & 0x0f;
2239 		if(block_cmd==0x08 || block_cmd==0x0a) {
2240 			kprintf("arcmsr%d:block 'read/write' command "
2241 				"with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n"
2242 				, acb->pci_unit, cmd, target, lun);
2243 			pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2244 			arcmsr_srb_complete(srb, 0);
2245 			return;
2246 		}
2247 	}
2248 	if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2249 		if(nseg != 0) {
2250 			bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2251 		}
2252 		arcmsr_srb_complete(srb, 0);
2253 		return;
2254 	}
2255 	if(acb->srboutstandingcount > ARCMSR_MAX_OUTSTANDING_CMD) {
2256 		xpt_freeze_simq(acb->psim, 1);
2257 		pccb->ccb_h.status = CAM_REQUEUE_REQ;
2258 		acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2259 		arcmsr_srb_complete(srb, 0);
2260 		return;
2261 	}
2262 	pccb->ccb_h.status |= CAM_SIM_QUEUED;
2263 	arcmsr_build_srb(srb, dm_segs, nseg);
2264 	arcmsr_post_srb(acb, srb);
2265 	if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2266 	{
2267 		arcmsr_callout_init(&srb->ccb_callout);
2268 		callout_reset(&srb->ccb_callout, ((pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)) * hz) / 1000, arcmsr_srb_timeout, srb);
2269 		srb->srb_flags |= SRB_FLAG_TIMER_START;
2270 	}
2271 }
2272 /*
2273 *****************************************************************************************
2274 *****************************************************************************************
2275 */
2276 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb)
2277 {
2278 	struct CommandControlBlock *srb;
2279 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2280 	u_int32_t intmask_org;
2281 	int i=0;
2282 
2283 	acb->num_aborts++;
2284 	/*
2285 	***************************************************************************
2286 	** It is the upper layer do abort command this lock just prior to calling us.
2287 	** First determine if we currently own this command.
2288 	** Start by searching the device queue. If not found
2289 	** at all, and the system wanted us to just abort the
2290 	** command return success.
2291 	***************************************************************************
2292 	*/
2293 	if(acb->srboutstandingcount!=0) {
2294 		/* disable all outbound interrupt */
2295 		intmask_org=arcmsr_disable_allintr(acb);
2296 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
2297 			srb=acb->psrb_pool[i];
2298 			if(srb->srb_state==ARCMSR_SRB_START) {
2299 				if(srb->pccb==abortccb) {
2300 					srb->srb_state=ARCMSR_SRB_ABORTED;
2301 					kprintf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'"
2302 						"outstanding command \n"
2303 						, acb->pci_unit, abortccb->ccb_h.target_id
2304 						, abortccb->ccb_h.target_lun, srb);
2305 					arcmsr_polling_srbdone(acb, srb);
2306 					/* enable outbound Post Queue, outbound doorbell Interrupt */
2307 					arcmsr_enable_allintr(acb, intmask_org);
2308 					return (TRUE);
2309 				}
2310 			}
2311 		}
2312 		/* enable outbound Post Queue, outbound doorbell Interrupt */
2313 		arcmsr_enable_allintr(acb, intmask_org);
2314 	}
2315 	return(FALSE);
2316 }
2317 /*
2318 ****************************************************************************
2319 ****************************************************************************
2320 */
2321 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2322 {
2323 	int retry=0;
2324 
2325 	acb->num_resets++;
2326 	acb->acb_flags |=ACB_F_BUS_RESET;
2327 	while(acb->srboutstandingcount!=0 && retry < 400) {
2328 		arcmsr_interrupt(acb);
2329 		UDELAY(25000);
2330 		retry++;
2331 	}
2332 	arcmsr_iop_reset(acb);
2333 	acb->acb_flags &= ~ACB_F_BUS_RESET;
2334 }
2335 /*
2336 **************************************************************************
2337 **************************************************************************
2338 */
2339 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2340 		union ccb * pccb)
2341 {
2342 	pccb->ccb_h.status |= CAM_REQ_CMP;
2343 	switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2344 	case INQUIRY: {
2345 		unsigned char inqdata[36];
2346 		char *buffer=pccb->csio.data_ptr;
2347 
2348 		if (pccb->ccb_h.target_lun) {
2349 			pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2350 			xpt_done(pccb);
2351 			return;
2352 		}
2353 		inqdata[0] = T_PROCESSOR;	/* Periph Qualifier & Periph Dev Type */
2354 		inqdata[1] = 0;				/* rem media bit & Dev Type Modifier */
2355 		inqdata[2] = 0;				/* ISO, ECMA, & ANSI versions */
2356 		inqdata[3] = 0;
2357 		inqdata[4] = 31;			/* length of additional data */
2358 		inqdata[5] = 0;
2359 		inqdata[6] = 0;
2360 		inqdata[7] = 0;
2361 		strncpy(&inqdata[8], "Areca   ", 8);	/* Vendor Identification */
2362 		strncpy(&inqdata[16], "RAID controller ", 16);	/* Product Identification */
2363 		strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2364 		memcpy(buffer, inqdata, sizeof(inqdata));
2365 		xpt_done(pccb);
2366 	}
2367 	break;
2368 	case WRITE_BUFFER:
2369 	case READ_BUFFER: {
2370 		if (arcmsr_iop_message_xfer(acb, pccb)) {
2371 			pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2372 			pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2373 		}
2374 		xpt_done(pccb);
2375 	}
2376 	break;
2377 	default:
2378 		xpt_done(pccb);
2379 	}
2380 }
2381 /*
2382 *********************************************************************
2383 *********************************************************************
2384 */
2385 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb)
2386 {
2387 	struct AdapterControlBlock *  acb;
2388 
2389 	acb=(struct AdapterControlBlock *) cam_sim_softc(psim);
2390 	if(acb==NULL) {
2391 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2392 		xpt_done(pccb);
2393 		return;
2394 	}
2395 	switch (pccb->ccb_h.func_code) {
2396 	case XPT_SCSI_IO: {
2397 			struct CommandControlBlock *srb;
2398 			int target=pccb->ccb_h.target_id;
2399 
2400 			if(target == 16) {
2401 				/* virtual device for iop message transfer */
2402 				arcmsr_handle_virtual_command(acb, pccb);
2403 				return;
2404 			}
2405 			if((srb=arcmsr_get_freesrb(acb)) == NULL) {
2406 				pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
2407 				xpt_done(pccb);
2408 				return;
2409 			}
2410 			pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
2411 			pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
2412 			srb->pccb=pccb;
2413 			if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2414 				if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
2415 					/* Single buffer */
2416 					if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
2417 						/* Buffer is virtual */
2418 						u_int32_t error;
2419 
2420 						crit_enter();
2421 						error =	bus_dmamap_load(acb->dm_segs_dmat
2422 							, srb->dm_segs_dmamap
2423 							, pccb->csio.data_ptr
2424 							, pccb->csio.dxfer_len
2425 							, arcmsr_execute_srb, srb, /*flags*/0);
2426 						if(error == EINPROGRESS) {
2427 							xpt_freeze_simq(acb->psim, 1);
2428 							pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2429 						}
2430 						crit_exit();
2431 					}
2432 					else {		/* Buffer is physical */
2433 						struct bus_dma_segment seg;
2434 
2435 						seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
2436 						seg.ds_len = pccb->csio.dxfer_len;
2437 						arcmsr_execute_srb(srb, &seg, 1, 0);
2438 					}
2439 				} else {
2440 					/* Scatter/gather list */
2441 					struct bus_dma_segment *segs;
2442 
2443 					if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
2444 					|| (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2445 						pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
2446 						xpt_done(pccb);
2447 						kfree(srb, M_DEVBUF);
2448 						return;
2449 					}
2450 					segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
2451 					arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
2452 				}
2453 			} else {
2454 				arcmsr_execute_srb(srb, NULL, 0, 0);
2455 			}
2456 			break;
2457 		}
2458 	case XPT_TARGET_IO: {
2459 			/* target mode not yet support vendor specific commands. */
2460 			pccb->ccb_h.status |= CAM_REQ_CMP;
2461 			xpt_done(pccb);
2462 			break;
2463 		}
2464 	case XPT_PATH_INQ: {
2465 			struct ccb_pathinq *cpi= &pccb->cpi;
2466 
2467 			cpi->version_num=1;
2468 			cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE;
2469 			cpi->target_sprt=0;
2470 			cpi->hba_misc=0;
2471 			cpi->hba_eng_cnt=0;
2472 			cpi->max_target=ARCMSR_MAX_TARGETID;        /* 0-16 */
2473 			cpi->max_lun=ARCMSR_MAX_TARGETLUN;	    /* 0-7 */
2474 			cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */
2475 			cpi->bus_id=cam_sim_bus(psim);
2476 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2477 			strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
2478 			strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
2479 			cpi->unit_number=cam_sim_unit(psim);
2480 			if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
2481 				cpi->base_transfer_speed = 600000;
2482 			else
2483 				cpi->base_transfer_speed = 300000;
2484 			if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2485 			   (acb->vendor_device_id == PCIDevVenIDARC1680))
2486 			{
2487 				cpi->transport = XPORT_SAS;
2488 				cpi->transport_version = 0;
2489 				cpi->protocol_version = SCSI_REV_SPC2;
2490 			}
2491 			else
2492 			{
2493 				cpi->transport = XPORT_SPI;
2494 				cpi->transport_version = 2;
2495 				cpi->protocol_version = SCSI_REV_2;
2496 			}
2497 			cpi->protocol = PROTO_SCSI;
2498 			cpi->ccb_h.status |= CAM_REQ_CMP;
2499 			xpt_done(pccb);
2500 			break;
2501 		}
2502 	case XPT_ABORT: {
2503 			union ccb *pabort_ccb;
2504 
2505 			pabort_ccb=pccb->cab.abort_ccb;
2506 			switch (pabort_ccb->ccb_h.func_code) {
2507 			case XPT_ACCEPT_TARGET_IO:
2508 			case XPT_IMMED_NOTIFY:
2509 			case XPT_CONT_TARGET_IO:
2510 				if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
2511 					pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
2512 					xpt_done(pabort_ccb);
2513 					pccb->ccb_h.status |= CAM_REQ_CMP;
2514 				} else {
2515 					xpt_print_path(pabort_ccb->ccb_h.path);
2516 					kprintf("Not found\n");
2517 					pccb->ccb_h.status |= CAM_PATH_INVALID;
2518 				}
2519 				break;
2520 			case XPT_SCSI_IO:
2521 				pccb->ccb_h.status |= CAM_UA_ABORT;
2522 				break;
2523 			default:
2524 				pccb->ccb_h.status |= CAM_REQ_INVALID;
2525 				break;
2526 			}
2527 			xpt_done(pccb);
2528 			break;
2529 		}
2530 	case XPT_RESET_BUS:
2531 	case XPT_RESET_DEV: {
2532 			u_int32_t     i;
2533 
2534 			arcmsr_bus_reset(acb);
2535 			for (i=0; i < 500; i++) {
2536 				DELAY(1000);
2537 			}
2538 			pccb->ccb_h.status |= CAM_REQ_CMP;
2539 			xpt_done(pccb);
2540 			break;
2541 		}
2542 	case XPT_TERM_IO: {
2543 			pccb->ccb_h.status |= CAM_REQ_INVALID;
2544 			xpt_done(pccb);
2545 			break;
2546 		}
2547 	case XPT_GET_TRAN_SETTINGS: {
2548 			struct ccb_trans_settings *cts;
2549 
2550 			if(pccb->ccb_h.target_id == 16) {
2551 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2552 				xpt_done(pccb);
2553 				break;
2554 			}
2555 			cts= &pccb->cts;
2556 			{
2557 				struct ccb_trans_settings_scsi *scsi;
2558 				struct ccb_trans_settings_spi *spi;
2559 				struct ccb_trans_settings_sas *sas;
2560 
2561 				scsi = &cts->proto_specific.scsi;
2562 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2563 				scsi->valid = CTS_SCSI_VALID_TQ;
2564 				cts->protocol = PROTO_SCSI;
2565 
2566 				if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2567 				   (acb->vendor_device_id == PCIDevVenIDARC1680))
2568 				{
2569 					cts->protocol_version = SCSI_REV_SPC2;
2570 					cts->transport_version = 0;
2571 					cts->transport = XPORT_SAS;
2572 					sas = &cts->xport_specific.sas;
2573 					sas->valid = CTS_SAS_VALID_SPEED;
2574 					if(acb->vendor_device_id == PCIDevVenIDARC1880)
2575 						sas->bitrate = 600000;
2576 					else if(acb->vendor_device_id == PCIDevVenIDARC1680)
2577 						sas->bitrate = 300000;
2578 				}
2579 				else
2580 				{
2581 					cts->protocol_version = SCSI_REV_2;
2582 					cts->transport_version = 2;
2583 					cts->transport = XPORT_SPI;
2584 					spi = &cts->xport_specific.spi;
2585 					spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2586 					spi->sync_period=2;
2587 					spi->sync_offset=32;
2588 					spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2589 					spi->valid = CTS_SPI_VALID_DISC
2590 						| CTS_SPI_VALID_SYNC_RATE
2591 						| CTS_SPI_VALID_SYNC_OFFSET
2592 						| CTS_SPI_VALID_BUS_WIDTH;
2593 				}
2594 			}
2595 			pccb->ccb_h.status |= CAM_REQ_CMP;
2596 			xpt_done(pccb);
2597 			break;
2598 		}
2599 	case XPT_SET_TRAN_SETTINGS: {
2600 			pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2601 			xpt_done(pccb);
2602 			break;
2603 		}
2604 	case XPT_CALC_GEOMETRY:
2605 			if(pccb->ccb_h.target_id == 16) {
2606 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2607 				xpt_done(pccb);
2608 				break;
2609 			}
2610 			cam_calc_geometry(&pccb->ccg, 1);
2611 			xpt_done(pccb);
2612 			break;
2613 	default:
2614 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2615 		xpt_done(pccb);
2616 		break;
2617 	}
2618 }
2619 /*
2620 **********************************************************************
2621 **********************************************************************
2622 */
2623 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2624 {
2625 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2626 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2627 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2628 		kprintf("arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
2629 	}
2630 }
2631 /*
2632 **********************************************************************
2633 **********************************************************************
2634 */
2635 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2636 {
2637 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2638 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,  ARCMSR_MESSAGE_START_BGRB);
2639 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2640 		kprintf( "arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
2641 	}
2642 }
2643 /*
2644 **********************************************************************
2645 **********************************************************************
2646 */
2647 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
2648 {
2649 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2650 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2651 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2652 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2653 		kprintf("arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
2654 	}
2655 }
2656 /*
2657 **********************************************************************
2658 **********************************************************************
2659 */
2660 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2661 {
2662 	switch (acb->adapter_type) {
2663 	case ACB_ADAPTER_TYPE_A:
2664 		arcmsr_start_hba_bgrb(acb);
2665 		break;
2666 	case ACB_ADAPTER_TYPE_B:
2667 		arcmsr_start_hbb_bgrb(acb);
2668 		break;
2669 	case ACB_ADAPTER_TYPE_C:
2670 		arcmsr_start_hbc_bgrb(acb);
2671 		break;
2672 	}
2673 }
2674 /*
2675 **********************************************************************
2676 **
2677 **********************************************************************
2678 */
2679 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2680 {
2681 	struct CommandControlBlock *srb;
2682 	u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
2683 	u_int16_t	error;
2684 
2685 polling_ccb_retry:
2686 	poll_count++;
2687 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
2688 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);	/*clear interrupt*/
2689 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2690 	while(1) {
2691 		if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
2692 			0, outbound_queueport))==0xFFFFFFFF) {
2693 			if(poll_srb_done) {
2694 				break;/*chip FIFO no ccb for completion already*/
2695 			} else {
2696 				UDELAY(25000);
2697 				if ((poll_count > 100) && (poll_srb != NULL)) {
2698 					break;
2699 				}
2700 				goto polling_ccb_retry;
2701 			}
2702 		}
2703 		/* check if command done with no error*/
2704 		srb=(struct CommandControlBlock *)
2705 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2706         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2707 		poll_srb_done = (srb==poll_srb) ? 1:0;
2708 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2709 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2710 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2711 					"poll command abort successfully \n"
2712 					, acb->pci_unit
2713 					, srb->pccb->ccb_h.target_id
2714 					, srb->pccb->ccb_h.target_lun, srb);
2715 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2716 				arcmsr_srb_complete(srb, 1);
2717 				continue;
2718 			}
2719 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2720 				"srboutstandingcount=%d \n"
2721 				, acb->pci_unit
2722 				, srb, acb->srboutstandingcount);
2723 			continue;
2724 		}
2725 		arcmsr_report_srb_state(acb, srb, error);
2726 	}	/*drain reply FIFO*/
2727 }
2728 /*
2729 **********************************************************************
2730 **
2731 **********************************************************************
2732 */
2733 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2734 {
2735 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
2736 	struct CommandControlBlock *srb;
2737 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2738 	int index;
2739 	u_int16_t	error;
2740 
2741 polling_ccb_retry:
2742 	poll_count++;
2743 	CHIP_REG_WRITE32(HBB_DOORBELL,
2744 	0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
2745 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2746 	while(1) {
2747 		index=phbbmu->doneq_index;
2748 		if((flag_srb=phbbmu->done_qbuffer[index]) == 0) {
2749 			if(poll_srb_done) {
2750 				break;/*chip FIFO no ccb for completion already*/
2751 			} else {
2752 				UDELAY(25000);
2753 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2754 					break;
2755 				}
2756 				goto polling_ccb_retry;
2757 			}
2758 		}
2759 		phbbmu->done_qbuffer[index]=0;
2760 		index++;
2761 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
2762 		phbbmu->doneq_index=index;
2763 		/* check if command done with no error*/
2764 		srb=(struct CommandControlBlock *)
2765 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2766         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2767 		poll_srb_done = (srb==poll_srb) ? 1:0;
2768 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2769 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2770 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2771 					"poll command abort successfully \n"
2772 					, acb->pci_unit
2773 					, srb->pccb->ccb_h.target_id
2774 					, srb->pccb->ccb_h.target_lun, srb);
2775 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2776 				arcmsr_srb_complete(srb, 1);
2777 				continue;
2778 			}
2779 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2780 				"srboutstandingcount=%d \n"
2781 				, acb->pci_unit
2782 				, srb, acb->srboutstandingcount);
2783 			continue;
2784 		}
2785 		arcmsr_report_srb_state(acb, srb, error);
2786 	}	/*drain reply FIFO*/
2787 }
2788 /*
2789 **********************************************************************
2790 **
2791 **********************************************************************
2792 */
2793 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2794 {
2795 	struct CommandControlBlock *srb;
2796 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2797 	u_int16_t	error;
2798 
2799 polling_ccb_retry:
2800 	poll_count++;
2801 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2802 	while(1) {
2803 		if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
2804 			if(poll_srb_done) {
2805 				break;/*chip FIFO no ccb for completion already*/
2806 			} else {
2807 				UDELAY(25000);
2808 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2809 					break;
2810 				}
2811 			    if (acb->srboutstandingcount == 0) {
2812 				    break;
2813 			    }
2814 				goto polling_ccb_retry;
2815 			}
2816 		}
2817 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
2818 		/* check if command done with no error*/
2819 		srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
2820         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
2821 		if (poll_srb != NULL)
2822 			poll_srb_done = (srb==poll_srb) ? 1:0;
2823 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2824 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2825 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n"
2826 						, acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb);
2827 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2828 				arcmsr_srb_complete(srb, 1);
2829 				continue;
2830 			}
2831 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
2832 					, acb->pci_unit, srb, acb->srboutstandingcount);
2833 			continue;
2834 		}
2835 		arcmsr_report_srb_state(acb, srb, error);
2836 	}	/*drain reply FIFO*/
2837 }
2838 /*
2839 **********************************************************************
2840 **********************************************************************
2841 */
2842 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2843 {
2844 	switch (acb->adapter_type) {
2845 	case ACB_ADAPTER_TYPE_A: {
2846 			arcmsr_polling_hba_srbdone(acb, poll_srb);
2847 		}
2848 		break;
2849 	case ACB_ADAPTER_TYPE_B: {
2850 			arcmsr_polling_hbb_srbdone(acb, poll_srb);
2851 		}
2852 		break;
2853 	case ACB_ADAPTER_TYPE_C: {
2854 			arcmsr_polling_hbc_srbdone(acb, poll_srb);
2855 		}
2856 		break;
2857 	}
2858 }
2859 /*
2860 **********************************************************************
2861 **********************************************************************
2862 */
2863 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2864 {
2865 	char *acb_firm_model=acb->firm_model;
2866 	char *acb_firm_version=acb->firm_version;
2867 	char *acb_device_map = acb->device_map;
2868 	size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2869 	size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2870 	size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2871 	int i;
2872 
2873 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2874 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2875 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2876 	}
2877 	i=0;
2878 	while(i<8) {
2879 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2880 		/* 8 bytes firm_model, 15, 60-67*/
2881 		acb_firm_model++;
2882 		i++;
2883 	}
2884 	i=0;
2885 	while(i<16) {
2886 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2887 		/* 16 bytes firm_version, 17, 68-83*/
2888 		acb_firm_version++;
2889 		i++;
2890 	}
2891 	i=0;
2892 	while(i<16) {
2893 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2894 		acb_device_map++;
2895 		i++;
2896 	}
2897 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2898 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2899 	acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2900 	acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2901 	acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2902 	acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2903 	acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2904 }
2905 /*
2906 **********************************************************************
2907 **********************************************************************
2908 */
2909 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2910 {
2911 	char *acb_firm_model=acb->firm_model;
2912 	char *acb_firm_version=acb->firm_version;
2913 	char *acb_device_map = acb->device_map;
2914 	size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2915 	size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2916 	size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2917 	int i;
2918 
2919 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2920 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2921 		kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2922 	}
2923 	i=0;
2924 	while(i<8) {
2925 		*acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
2926 		/* 8 bytes firm_model, 15, 60-67*/
2927 		acb_firm_model++;
2928 		i++;
2929 	}
2930 	i=0;
2931 	while(i<16) {
2932 		*acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
2933 		/* 16 bytes firm_version, 17, 68-83*/
2934 		acb_firm_version++;
2935 		i++;
2936 	}
2937 	i=0;
2938 	while(i<16) {
2939 		*acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
2940 		acb_device_map++;
2941 		i++;
2942 	}
2943 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2944 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2945 	acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2946 	acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2947 	acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2948 	acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2949 	acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2950 }
2951 /*
2952 **********************************************************************
2953 **********************************************************************
2954 */
2955 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
2956 {
2957 	char *acb_firm_model=acb->firm_model;
2958 	char *acb_firm_version=acb->firm_version;
2959 	char *acb_device_map = acb->device_map;
2960 	size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
2961 	size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
2962 	size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2963 	int i;
2964 
2965 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2966 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2967 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2968 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2969 	}
2970 	i=0;
2971 	while(i<8) {
2972 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2973 		/* 8 bytes firm_model, 15, 60-67*/
2974 		acb_firm_model++;
2975 		i++;
2976 	}
2977 	i=0;
2978 	while(i<16) {
2979 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2980 		/* 16 bytes firm_version, 17, 68-83*/
2981 		acb_firm_version++;
2982 		i++;
2983 	}
2984 	i=0;
2985 	while(i<16) {
2986 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2987 		acb_device_map++;
2988 		i++;
2989 	}
2990 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2991 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2992 	acb->firm_request_len	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]);	/*firm_request_len,   1, 04-07*/
2993 	acb->firm_numbers_queue	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_numbers_queue, 2, 08-11*/
2994 	acb->firm_sdram_size	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_sdram_size,    3, 12-15*/
2995 	acb->firm_ide_channels	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_ide_channels,  4, 16-19*/
2996 	acb->firm_cfg_version	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2997 }
2998 /*
2999 **********************************************************************
3000 **********************************************************************
3001 */
3002 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3003 {
3004 	switch (acb->adapter_type) {
3005 	case ACB_ADAPTER_TYPE_A: {
3006 			arcmsr_get_hba_config(acb);
3007 		}
3008 		break;
3009 	case ACB_ADAPTER_TYPE_B: {
3010 			arcmsr_get_hbb_config(acb);
3011 		}
3012 		break;
3013 	case ACB_ADAPTER_TYPE_C: {
3014 			arcmsr_get_hbc_config(acb);
3015 		}
3016 		break;
3017 	}
3018 }
3019 /*
3020 **********************************************************************
3021 **********************************************************************
3022 */
3023 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
3024 {
3025 	int	timeout=0;
3026 
3027 	switch (acb->adapter_type) {
3028 	case ACB_ADAPTER_TYPE_A: {
3029 			while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3030 			{
3031 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3032 				{
3033 					kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3034 					return;
3035 				}
3036 				UDELAY(15000); /* wait 15 milli-seconds */
3037 			}
3038 		}
3039 		break;
3040 	case ACB_ADAPTER_TYPE_B: {
3041 			while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3042 			{
3043 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3044 				{
3045 					kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3046 					return;
3047 				}
3048 				UDELAY(15000); /* wait 15 milli-seconds */
3049 			}
3050 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3051 		}
3052 		break;
3053 	case ACB_ADAPTER_TYPE_C: {
3054 			while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3055 			{
3056 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3057 				{
3058 					kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3059 					return;
3060 				}
3061 				UDELAY(15000); /* wait 15 milli-seconds */
3062 			}
3063 		}
3064 		break;
3065 	}
3066 }
3067 /*
3068 **********************************************************************
3069 **********************************************************************
3070 */
3071 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3072 {
3073 	u_int32_t outbound_doorbell;
3074 
3075 	switch (acb->adapter_type) {
3076 	case ACB_ADAPTER_TYPE_A: {
3077 			/* empty doorbell Qbuffer if door bell ringed */
3078 			outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3079 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
3080 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3081 
3082 		}
3083 		break;
3084 	case ACB_ADAPTER_TYPE_B: {
3085 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
3086 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
3087 			/* let IOP know data has been read */
3088 		}
3089 		break;
3090 	case ACB_ADAPTER_TYPE_C: {
3091 			/* empty doorbell Qbuffer if door bell ringed */
3092 			outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
3093 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell);	/*clear doorbell interrupt */
3094 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
3095 
3096 		}
3097 		break;
3098 	}
3099 }
3100 /*
3101 ************************************************************************
3102 ************************************************************************
3103 */
3104 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3105 {
3106 	unsigned long srb_phyaddr;
3107 	u_int32_t srb_phyaddr_hi32;
3108 
3109 	/*
3110 	********************************************************************
3111 	** here we need to tell iop 331 our freesrb.HighPart
3112 	** if freesrb.HighPart is not zero
3113 	********************************************************************
3114 	*/
3115 	srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr;
3116 //	srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16);
3117 	srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high;
3118 	switch (acb->adapter_type) {
3119 	case ACB_ADAPTER_TYPE_A: {
3120 			if(srb_phyaddr_hi32!=0) {
3121 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3122 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3123 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3124 				if(!arcmsr_hba_wait_msgint_ready(acb)) {
3125 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3126 					return FALSE;
3127 				}
3128 			}
3129 		}
3130 		break;
3131 		/*
3132 		***********************************************************************
3133 		**    if adapter type B, set window of "post command Q"
3134 		***********************************************************************
3135 		*/
3136 	case ACB_ADAPTER_TYPE_B: {
3137 			u_int32_t post_queue_phyaddr;
3138 			struct HBB_MessageUnit *phbbmu;
3139 
3140 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3141 			phbbmu->postq_index=0;
3142 			phbbmu->doneq_index=0;
3143 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
3144 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3145 				kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
3146 				return FALSE;
3147 			}
3148 			post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE
3149 			+ offsetof(struct HBB_MessageUnit, post_qbuffer);
3150 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3151 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
3152 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
3153 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
3154 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
3155 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
3156 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3157 				kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
3158 				return FALSE;
3159 			}
3160 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
3161 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3162 				kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
3163 				return FALSE;
3164 			}
3165 		}
3166 		break;
3167 	case ACB_ADAPTER_TYPE_C: {
3168 			if(srb_phyaddr_hi32!=0) {
3169 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3170 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3171 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3172 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3173 				if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3174 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3175 					return FALSE;
3176 				}
3177 			}
3178 		}
3179 		break;
3180 	}
3181 	return (TRUE);
3182 }
3183 /*
3184 ************************************************************************
3185 ************************************************************************
3186 */
3187 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3188 {
3189 	switch (acb->adapter_type)
3190 	{
3191 	case ACB_ADAPTER_TYPE_A:
3192 	case ACB_ADAPTER_TYPE_C:
3193 		break;
3194 	case ACB_ADAPTER_TYPE_B: {
3195 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
3196 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3197 				kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
3198 
3199 				return;
3200 			}
3201 		}
3202 		break;
3203 	}
3204 }
3205 /*
3206 **********************************************************************
3207 **********************************************************************
3208 */
3209 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3210 {
3211 	u_int32_t intmask_org;
3212 
3213 	/* disable all outbound interrupt */
3214 	intmask_org=arcmsr_disable_allintr(acb);
3215 	arcmsr_wait_firmware_ready(acb);
3216 	arcmsr_iop_confirm(acb);
3217 	arcmsr_get_firmware_spec(acb);
3218 	/*start background rebuild*/
3219 	arcmsr_start_adapter_bgrb(acb);
3220 	/* empty doorbell Qbuffer if door bell ringed */
3221 	arcmsr_clear_doorbell_queue_buffer(acb);
3222 	arcmsr_enable_eoi_mode(acb);
3223 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3224 	arcmsr_enable_allintr(acb, intmask_org);
3225 	acb->acb_flags |=ACB_F_IOP_INITED;
3226 }
3227 /*
3228 **********************************************************************
3229 **********************************************************************
3230 */
3231 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3232 {
3233 	struct AdapterControlBlock *acb=arg;
3234 	struct CommandControlBlock *srb_tmp;
3235 	u_int8_t * dma_memptr;
3236 	u_int32_t i;
3237 	unsigned long srb_phyaddr=(unsigned long)segs->ds_addr;
3238 
3239 	dma_memptr=acb->uncacheptr;
3240 	acb->srb_phyaddr.phyaddr=srb_phyaddr;
3241 	srb_tmp=(struct CommandControlBlock *)dma_memptr;
3242 	for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3243 		if(bus_dmamap_create(acb->dm_segs_dmat,
3244 			 /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) {
3245 			acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
3246 			kprintf("arcmsr%d:"
3247 			" srb dmamap bus_dmamap_create error\n", acb->pci_unit);
3248 			return;
3249 		}
3250 		srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5);
3251 		srb_tmp->acb=acb;
3252 		acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp;
3253 		srb_phyaddr=srb_phyaddr+SRB_SIZE;
3254 		srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp+SRB_SIZE);
3255 	}
3256 	acb->vir2phy_offset=(unsigned long)srb_tmp-srb_phyaddr;
3257 }
3258 /*
3259 ************************************************************************
3260 **
3261 **
3262 ************************************************************************
3263 */
3264 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
3265 {
3266 	/* remove the control device */
3267 	if(acb->ioctl_dev != NULL) {
3268 		destroy_dev(acb->ioctl_dev);
3269 	}
3270 	bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
3271 	bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
3272 	bus_dma_tag_destroy(acb->srb_dmat);
3273 	bus_dma_tag_destroy(acb->dm_segs_dmat);
3274 	bus_dma_tag_destroy(acb->parent_dmat);
3275 }
3276 /*
3277 ************************************************************************
3278 ************************************************************************
3279 */
3280 static u_int32_t arcmsr_initialize(device_t dev)
3281 {
3282 	struct AdapterControlBlock *acb=device_get_softc(dev);
3283 	u_int16_t pci_command;
3284 	int i, j,max_coherent_size;
3285 	u_int32_t vendor_dev_id;
3286 
3287 	vendor_dev_id = pci_get_devid(dev);
3288 	acb->vendor_device_id = vendor_dev_id;
3289 	switch (vendor_dev_id) {
3290 	case PCIDevVenIDARC1880:
3291 	case PCIDevVenIDARC1882:
3292 	case PCIDevVenIDARC1213:
3293 	case PCIDevVenIDARC1223: {
3294 			acb->adapter_type=ACB_ADAPTER_TYPE_C;
3295 			acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
3296 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3297 		}
3298 		break;
3299 	case PCIDevVenIDARC1200:
3300 	case PCIDevVenIDARC1201: {
3301 			acb->adapter_type=ACB_ADAPTER_TYPE_B;
3302 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3303 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit));
3304 		}
3305 		break;
3306 	case PCIDevVenIDARC1110:
3307 	case PCIDevVenIDARC1120:
3308 	case PCIDevVenIDARC1130:
3309 	case PCIDevVenIDARC1160:
3310 	case PCIDevVenIDARC1170:
3311 	case PCIDevVenIDARC1210:
3312 	case PCIDevVenIDARC1220:
3313 	case PCIDevVenIDARC1230:
3314 	case PCIDevVenIDARC1231:
3315 	case PCIDevVenIDARC1260:
3316 	case PCIDevVenIDARC1261:
3317 	case PCIDevVenIDARC1270:
3318 	case PCIDevVenIDARC1280:
3319 	case PCIDevVenIDARC1212:
3320 	case PCIDevVenIDARC1222:
3321 	case PCIDevVenIDARC1380:
3322 	case PCIDevVenIDARC1381:
3323 	case PCIDevVenIDARC1680:
3324 	case PCIDevVenIDARC1681: {
3325 			acb->adapter_type=ACB_ADAPTER_TYPE_A;
3326 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3327 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3328 		}
3329 		break;
3330 	default: {
3331 			kprintf("arcmsr%d:"
3332 			" unknown RAID adapter type \n", device_get_unit(dev));
3333 			return ENOMEM;
3334 		}
3335 	}
3336 	if(bus_dma_tag_create(  /*parent*/	NULL,
3337 				/*alignemnt*/	1,
3338 				/*boundary*/	0,
3339 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3340 				/*highaddr*/	BUS_SPACE_MAXADDR,
3341 				/*filter*/	NULL,
3342 				/*filterarg*/	NULL,
3343 				/*maxsize*/	BUS_SPACE_MAXSIZE_32BIT,
3344 				/*nsegments*/	BUS_SPACE_UNRESTRICTED,
3345 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3346 				/*flags*/	0,
3347 						&acb->parent_dmat) != 0)
3348 	{
3349 		kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3350 		return ENOMEM;
3351 	}
3352 	/* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
3353 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3354 				/*alignment*/	1,
3355 				/*boundary*/	0,
3356 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3357 				/*highaddr*/	BUS_SPACE_MAXADDR,
3358 				/*filter*/	NULL,
3359 				/*filterarg*/	NULL,
3360 				/*maxsize*/	ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
3361 				/*nsegments*/	ARCMSR_MAX_SG_ENTRIES,
3362 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3363 				/*flags*/	0,
3364 						&acb->dm_segs_dmat) != 0)
3365 	{
3366 		bus_dma_tag_destroy(acb->parent_dmat);
3367 		kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3368 		return ENOMEM;
3369 	}
3370 	/* DMA tag for our srb structures.... Allocate the freesrb memory */
3371 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3372 				/*alignment*/	0x20,
3373 				/*boundary*/	0,
3374 				/*lowaddr*/	BUS_SPACE_MAXADDR_32BIT,
3375 				/*highaddr*/	BUS_SPACE_MAXADDR,
3376 				/*filter*/	NULL,
3377 				/*filterarg*/	NULL,
3378 				/*maxsize*/	max_coherent_size,
3379 				/*nsegments*/	1,
3380 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3381 				/*flags*/	0,
3382 						&acb->srb_dmat) != 0)
3383 	{
3384 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3385 		bus_dma_tag_destroy(acb->parent_dmat);
3386 		kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3387 		return ENXIO;
3388 	}
3389 	/* Allocation for our srbs */
3390 	if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
3391 		bus_dma_tag_destroy(acb->srb_dmat);
3392 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3393 		bus_dma_tag_destroy(acb->parent_dmat);
3394 		kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
3395 		return ENXIO;
3396 	}
3397 	/* And permanently map them */
3398 	if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
3399 		bus_dma_tag_destroy(acb->srb_dmat);
3400 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3401 		bus_dma_tag_destroy(acb->parent_dmat);
3402 		kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
3403 		return ENXIO;
3404 	}
3405 	pci_command=pci_read_config(dev, PCIR_COMMAND, 2);
3406 	pci_command |= PCIM_CMD_BUSMASTEREN;
3407 	pci_command |= PCIM_CMD_PERRESPEN;
3408 	pci_command |= PCIM_CMD_MWRICEN;
3409 	/* Enable Busmaster/Mem */
3410 	pci_command |= PCIM_CMD_MEMEN;
3411 	pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
3412 	switch(acb->adapter_type) {
3413 	case ACB_ADAPTER_TYPE_A: {
3414 			u_int32_t rid0=PCIR_BAR(0);
3415 			vm_offset_t	mem_base0;
3416 
3417 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
3418 			if(acb->sys_res_arcmsr[0] == NULL) {
3419 				arcmsr_free_resource(acb);
3420 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3421 				return ENOMEM;
3422 			}
3423 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3424 				arcmsr_free_resource(acb);
3425 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3426 				return ENXIO;
3427 			}
3428 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3429 			if(mem_base0==0) {
3430 				arcmsr_free_resource(acb);
3431 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3432 				return ENXIO;
3433 			}
3434 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3435 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3436 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3437 		}
3438 		break;
3439 	case ACB_ADAPTER_TYPE_B: {
3440 			struct HBB_MessageUnit *phbbmu;
3441 			struct CommandControlBlock *freesrb;
3442 			u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
3443 			vm_offset_t	mem_base[]={0,0};
3444 			for(i=0; i<2; i++) {
3445 				if(i==0) {
3446 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
3447 											0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE);
3448 				} else {
3449 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
3450 											0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
3451 				}
3452 				if(acb->sys_res_arcmsr[i] == NULL) {
3453 					arcmsr_free_resource(acb);
3454 					kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
3455 					return ENOMEM;
3456 				}
3457 				if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
3458 					arcmsr_free_resource(acb);
3459 					kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
3460 					return ENXIO;
3461 				}
3462 				mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
3463 				if(mem_base[i]==0) {
3464 					arcmsr_free_resource(acb);
3465 					kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
3466 					return ENXIO;
3467 				}
3468 				acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]);
3469 				acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]);
3470 			}
3471 			freesrb=(struct CommandControlBlock *)acb->uncacheptr;
3472 //			acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM];
3473 			acb->pmu=(struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE);
3474 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3475 			phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0];
3476 			phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1];
3477 		}
3478 		break;
3479 	case ACB_ADAPTER_TYPE_C: {
3480 			u_int32_t rid0=PCIR_BAR(1);
3481 			vm_offset_t	mem_base0;
3482 
3483 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
3484 			if(acb->sys_res_arcmsr[0] == NULL) {
3485 				arcmsr_free_resource(acb);
3486 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3487 				return ENOMEM;
3488 			}
3489 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3490 				arcmsr_free_resource(acb);
3491 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3492 				return ENXIO;
3493 			}
3494 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3495 			if(mem_base0==0) {
3496 				arcmsr_free_resource(acb);
3497 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3498 				return ENXIO;
3499 			}
3500 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3501 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3502 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3503 		}
3504 		break;
3505 	}
3506 	if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
3507 		arcmsr_free_resource(acb);
3508 		kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
3509 		return ENXIO;
3510 	}
3511 	acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
3512 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
3513 	/*
3514 	********************************************************************
3515 	** init raid volume state
3516 	********************************************************************
3517 	*/
3518 	for(i=0;i<ARCMSR_MAX_TARGETID;i++) {
3519 		for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) {
3520 			acb->devstate[i][j]=ARECA_RAID_GONE;
3521 		}
3522 	}
3523 	arcmsr_iop_init(acb);
3524 	return(0);
3525 }
3526 /*
3527 ************************************************************************
3528 ************************************************************************
3529 */
3530 static int arcmsr_attach(device_t dev)
3531 {
3532 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3533 	u_int32_t unit=device_get_unit(dev);
3534 	struct ccb_setasync csa;
3535 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
3536 	struct resource	*irqres;
3537 	int	rid;
3538 	u_int irq_flags;
3539 
3540 	if(acb == NULL) {
3541 		kprintf("arcmsr%d: cannot allocate softc\n", unit);
3542 		return (ENOMEM);
3543 	}
3544 	ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock");
3545 	if(arcmsr_initialize(dev)) {
3546 		kprintf("arcmsr%d: initialize failure!\n", unit);
3547 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3548 		return ENXIO;
3549 	}
3550 	/* After setting up the adapter, map our interrupt */
3551 	rid=0;
3552 	acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &rid,
3553 	    &irq_flags);
3554 	irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1,
3555 	    irq_flags);
3556 	if(irqres == NULL ||
3557 		bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih, NULL)) {
3558 		arcmsr_free_resource(acb);
3559 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3560 		kprintf("arcmsr%d: unable to register interrupt handler!\n", unit);
3561 		return ENXIO;
3562 	}
3563 	acb->irqres=irqres;
3564 	acb->pci_dev=dev;
3565 	acb->pci_unit=unit;
3566 	/*
3567 	 * Now let the CAM generic SCSI layer find the SCSI devices on
3568 	 * the bus *  start queue to reset to the idle loop. *
3569 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
3570 	 * max_sim_transactions
3571 	*/
3572 	devq=cam_simq_alloc(ARCMSR_MAX_START_JOB);
3573 	if(devq == NULL) {
3574 	    arcmsr_free_resource(acb);
3575 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3576 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3577 			pci_release_msi(dev);
3578 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3579 		kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
3580 		return ENXIO;
3581 	}
3582 	acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
3583 	cam_simq_release(devq);
3584 	if(acb->psim == NULL) {
3585 		arcmsr_free_resource(acb);
3586 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3587 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3588 			pci_release_msi(dev);
3589 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3590 		kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
3591 		return ENXIO;
3592 	}
3593 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3594 	if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
3595 		arcmsr_free_resource(acb);
3596 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3597 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3598 			pci_release_msi(dev);
3599 		cam_sim_free(acb->psim);
3600 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3601 		kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
3602 		return ENXIO;
3603 	}
3604 	if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3605 		arcmsr_free_resource(acb);
3606 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3607 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3608 			pci_release_msi(dev);
3609 		xpt_bus_deregister(cam_sim_path(acb->psim));
3610 		cam_sim_free(acb->psim);
3611 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3612 		kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
3613 		return ENXIO;
3614 	}
3615 	/*
3616 	****************************************************
3617 	*/
3618 	xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
3619 	csa.ccb_h.func_code=XPT_SASYNC_CB;
3620 	csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE;
3621 	csa.callback=arcmsr_async;
3622 	csa.callback_arg=acb->psim;
3623 	xpt_action((union ccb *)&csa);
3624 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3625 	/* Create the control device.  */
3626 	acb->ioctl_dev=make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
3627 
3628 	acb->ioctl_dev->si_drv1=acb;
3629 	(void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
3630 	arcmsr_callout_init(&acb->devmap_callout);
3631 	callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
3632 	return (0);
3633 }
3634 
3635 /*
3636 ************************************************************************
3637 ************************************************************************
3638 */
3639 static int arcmsr_probe(device_t dev)
3640 {
3641 	u_int32_t id;
3642 	static char buf[256];
3643 	char x_type[]={"X-TYPE"};
3644 	char *type;
3645 	int raid6 = 1;
3646 
3647 	if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
3648 		return (ENXIO);
3649 	}
3650 	switch(id=pci_get_devid(dev)) {
3651 	case PCIDevVenIDARC1110:
3652 	case PCIDevVenIDARC1200:
3653 	case PCIDevVenIDARC1201:
3654 	case PCIDevVenIDARC1210:
3655 		raid6 = 0;
3656 		/*FALLTHRU*/
3657 	case PCIDevVenIDARC1120:
3658 	case PCIDevVenIDARC1130:
3659 	case PCIDevVenIDARC1160:
3660 	case PCIDevVenIDARC1170:
3661 	case PCIDevVenIDARC1220:
3662 	case PCIDevVenIDARC1230:
3663 	case PCIDevVenIDARC1231:
3664 	case PCIDevVenIDARC1260:
3665 	case PCIDevVenIDARC1261:
3666 	case PCIDevVenIDARC1270:
3667 	case PCIDevVenIDARC1280:
3668 		type = "SATA";
3669 		break;
3670 	case PCIDevVenIDARC1212:
3671 	case PCIDevVenIDARC1222:
3672 	case PCIDevVenIDARC1380:
3673 	case PCIDevVenIDARC1381:
3674 	case PCIDevVenIDARC1680:
3675 	case PCIDevVenIDARC1681:
3676 		type = "SAS 3G";
3677 		break;
3678 	case PCIDevVenIDARC1880:
3679 	case PCIDevVenIDARC1882:
3680 	case PCIDevVenIDARC1213:
3681 	case PCIDevVenIDARC1223:
3682 		type = "SAS 6G";
3683 		arcmsr_msi_enable = 0;
3684 		break;
3685 	default:
3686 		type = x_type;
3687 		break;
3688 	}
3689 	if(type == x_type)
3690 		return(ENXIO);
3691 	ksprintf(buf, "Areca %s Host Adapter RAID Controller%s", type, raid6 ? " (RAID6 capable)" : "");
3692 	device_set_desc_copy(dev, buf);
3693 	return (BUS_PROBE_DEFAULT);
3694 }
3695 /*
3696 ************************************************************************
3697 ************************************************************************
3698 */
3699 static int arcmsr_shutdown(device_t dev)
3700 {
3701 	u_int32_t  i;
3702 	struct CommandControlBlock *srb;
3703 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3704 
3705 	/* stop adapter background rebuild */
3706 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3707 	/* disable all outbound interrupt */
3708 	arcmsr_disable_allintr(acb);
3709 	arcmsr_stop_adapter_bgrb(acb);
3710 	arcmsr_flush_adapter_cache(acb);
3711 	/* abort all outstanding command */
3712 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3713 	acb->acb_flags &= ~ACB_F_IOP_INITED;
3714 	if(acb->srboutstandingcount!=0) {
3715 		/*clear and abort all outbound posted Q*/
3716 		arcmsr_done4abort_postqueue(acb);
3717 		/* talk to iop 331 outstanding command aborted*/
3718 		arcmsr_abort_allcmd(acb);
3719 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3720 			srb=acb->psrb_pool[i];
3721 			if(srb->srb_state==ARCMSR_SRB_START) {
3722 				srb->srb_state=ARCMSR_SRB_ABORTED;
3723 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3724 				arcmsr_srb_complete(srb, 1);
3725 			}
3726 		}
3727 	}
3728 	acb->srboutstandingcount=0;
3729 	acb->workingsrb_doneindex=0;
3730 	acb->workingsrb_startindex=0;
3731 #ifdef ARCMSR_DEBUG1
3732 	acb->pktRequestCount = 0;
3733 	acb->pktReturnCount = 0;
3734 #endif
3735 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3736 	return (0);
3737 }
3738 /*
3739 ************************************************************************
3740 ************************************************************************
3741 */
3742 static int arcmsr_detach(device_t dev)
3743 {
3744 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3745 	int i;
3746 
3747 	callout_stop(&acb->devmap_callout);
3748 	bus_teardown_intr(dev, acb->irqres, acb->ih);
3749 	arcmsr_shutdown(dev);
3750 	arcmsr_free_resource(acb);
3751 	for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
3752 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
3753 	}
3754 	bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3755 	if (acb->irq_type == PCI_INTR_TYPE_MSI)
3756 		pci_release_msi(dev);
3757 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3758 	xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
3759 	xpt_free_path(acb->ppath);
3760 	xpt_bus_deregister(cam_sim_path(acb->psim));
3761 	cam_sim_free(acb->psim);
3762 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3763 	ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3764 	return (0);
3765 }
3766 
3767 #ifdef ARCMSR_DEBUG1
3768 static void arcmsr_dump_data(struct AdapterControlBlock *acb)
3769 {
3770 	if((acb->pktRequestCount - acb->pktReturnCount) == 0)
3771 		return;
3772 	printf("Command Request Count   =0x%x\n",acb->pktRequestCount);
3773 	printf("Command Return Count    =0x%x\n",acb->pktReturnCount);
3774 	printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount));
3775 	printf("Queued Command Count    =0x%x\n",acb->srboutstandingcount);
3776 }
3777 #endif
3778