xref: /dragonfly/sys/dev/raid/arcmsr/arcmsr.c (revision 25a2db75)
1 /*
2 *****************************************************************************************
3 **        O.S   : FreeBSD
4 **   FILE NAME  : arcmsr.c
5 **        BY    : Erich Chen, Ching Huang
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter
8 **                ARCMSR RAID Host adapter
9 **                [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set]
10 ******************************************************************************************
11 ************************************************************************
12 **
13 ** Copyright (c) 2004-2010 ARECA Co. Ltd.
14 **        Erich Chen, Taipei Taiwan All rights reserved.
15 **
16 ** Redistribution and use in source and binary forms, with or without
17 ** modification, are permitted provided that the following conditions
18 ** are met:
19 ** 1. Redistributions of source code must retain the above copyright
20 **    notice, this list of conditions and the following disclaimer.
21 ** 2. Redistributions in binary form must reproduce the above copyright
22 **    notice, this list of conditions and the following disclaimer in the
23 **    documentation and/or other materials provided with the distribution.
24 ** 3. The name of the author may not be used to endorse or promote products
25 **    derived from this software without specific prior written permission.
26 **
27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 **************************************************************************
38 ** History
39 **
40 **        REV#         DATE             NAME             DESCRIPTION
41 **     1.00.00.00   03/31/2004      Erich Chen           First release
42 **     1.20.00.02   11/29/2004      Erich Chen           bug fix with arcmsr_bus_reset when PHY error
43 **     1.20.00.03   04/19/2005      Erich Chen           add SATA 24 Ports adapter type support
44 **                                                       clean unused function
45 **     1.20.00.12   09/12/2005      Erich Chen           bug fix with abort command handling,
46 **                                                       firmware version check
47 **                                                       and firmware update notify for hardware bug fix
48 **                                                       handling if none zero high part physical address
49 **                                                       of srb resource
50 **     1.20.00.13   08/18/2006      Erich Chen           remove pending srb and report busy
51 **                                                       add iop message xfer
52 **                                                       with scsi pass-through command
53 **                                                       add new device id of sas raid adapters
54 **                                                       code fit for SPARC64 & PPC
55 **     1.20.00.14   02/05/2007      Erich Chen           bug fix for incorrect ccb_h.status report
56 **                                                       and cause g_vfs_done() read write error
57 **     1.20.00.15   10/10/2007      Erich Chen           support new RAID adapter type ARC120x
58 **     1.20.00.16   10/10/2009      Erich Chen           Bug fix for RAID adapter type ARC120x
59 **                                                       bus_dmamem_alloc() with BUS_DMA_ZERO
60 **     1.20.00.17   07/15/2010      Ching Huang          Added support ARC1880
61 **                                                       report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
62 **                                                       prevent cam_periph_error removing all LUN devices of one Target id
63 **                                                       for any one LUN device failed
64 **     1.20.00.18   10/14/2010      Ching Huang          Fixed "inquiry data fails comparion at DV1 step"
65 **                  10/25/2010      Ching Huang          Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
66 **     1.20.00.19   11/11/2010      Ching Huang          Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
67 **     1.20.00.20   12/08/2010      Ching Huang          Avoid calling atomic_set_int function
68 **     1.20.00.21   02/08/2011      Ching Huang          Implement I/O request timeout
69 **                  02/14/2011      Ching Huang          Modified pktRequestCount
70 **     1.20.00.21   03/03/2011      Ching Huang          if a command timeout, then wait its ccb back before free it
71 **     1.20.00.22   07/04/2011      Ching Huang          Fixed multiple MTX panic
72 **     1.20.00.23   10/28/2011      Ching Huang          Added TIMEOUT_DELAY in case of too many HDDs need to start
73 **     1.20.00.23   11/08/2011      Ching Huang          Added report device transfer speed
74 **     1.20.00.23   01/30/2012      Ching Huang          Fixed Request requeued and Retrying command
75 **     1.20.00.24   06/11/2012      Ching Huang          Fixed return sense data condition
76 **     1.20.00.25   08/17/2012      Ching Huang          Fixed hotplug device no function on type A adapter
77 ******************************************************************************************
78 * $FreeBSD: src/sys/dev/arcmsr/arcmsr.c,v 1.43 2012/09/04 05:15:54 delphij Exp $
79 */
80 #if 0
81 #define ARCMSR_DEBUG1			1
82 #endif
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/bus.h>
88 #include <sys/queue.h>
89 #include <sys/stat.h>
90 #include <sys/kthread.h>
91 #include <sys/module.h>
92 #include <sys/proc.h>
93 #include <sys/lock.h>
94 #include <sys/sysctl.h>
95 #include <sys/thread2.h>
96 #include <sys/poll.h>
97 #include <sys/device.h>
98 #include <vm/vm.h>
99 #include <vm/vm_param.h>
100 #include <vm/pmap.h>
101 
102 #include <machine/atomic.h>
103 #include <sys/conf.h>
104 #include <sys/rman.h>
105 
106 #include <bus/cam/cam.h>
107 #include <bus/cam/cam_ccb.h>
108 #include <bus/cam/cam_sim.h>
109 #include <bus/cam/cam_periph.h>
110 #include <bus/cam/cam_xpt_periph.h>
111 #include <bus/cam/cam_xpt_sim.h>
112 #include <bus/cam/cam_debug.h>
113 #include <bus/cam/scsi/scsi_all.h>
114 #include <bus/cam/scsi/scsi_message.h>
115 /*
116 **************************************************************************
117 **************************************************************************
118 */
119 #include <sys/endian.h>
120 #include <bus/pci/pcivar.h>
121 #include <bus/pci/pcireg.h>
122 #define ARCMSR_LOCK_INIT(l, s)	lockinit(l, s, 0, LK_CANRECURSE)
123 #define ARCMSR_LOCK_DESTROY(l)	lockuninit(l)
124 #define ARCMSR_LOCK_ACQUIRE(l)	lockmgr(l, LK_EXCLUSIVE)
125 #define ARCMSR_LOCK_RELEASE(l)	lockmgr(l, LK_RELEASE)
126 #define ARCMSR_LOCK_TRY(l)	lockmgr(&l, LK_EXCLUSIVE|LK_NOWAIT);
127 #define arcmsr_htole32(x)	htole32(x)
128 typedef struct lock		arcmsr_lock_t;
129 
130 #if !defined(CAM_NEW_TRAN_CODE)
131 #define	CAM_NEW_TRAN_CODE	1
132 #endif
133 
134 #define arcmsr_callout_init(a)	callout_init_mp(a);
135 
136 #define ARCMSR_DRIVER_VERSION			"Driver Version 1.20.00.25 2012-08-17"
137 #include <dev/raid/arcmsr/arcmsr.h>
138 #define	SRB_SIZE						((sizeof(struct CommandControlBlock)+0x1f) & 0xffe0)
139 #define ARCMSR_SRBS_POOL_SIZE           (SRB_SIZE * ARCMSR_MAX_FREESRB_NUM)
140 /*
141 **************************************************************************
142 **************************************************************************
143 */
144 #define CHIP_REG_READ32(s, b, r)	bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r))
145 #define CHIP_REG_WRITE32(s, b, r, d)	bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d)
146 /*
147 **************************************************************************
148 **************************************************************************
149 */
150 static void arcmsr_free_srb(struct CommandControlBlock *srb);
151 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb);
152 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb);
153 static int arcmsr_probe(device_t dev);
154 static int arcmsr_attach(device_t dev);
155 static int arcmsr_detach(device_t dev);
156 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
157 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
158 static int arcmsr_shutdown(device_t dev);
159 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
160 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
161 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
162 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
163 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
164 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
165 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
166 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
167 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb);
168 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
169 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
170 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
171 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
172 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg);
173 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb);
174 static int arcmsr_resume(device_t dev);
175 static int arcmsr_suspend(device_t dev);
176 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
177 static void	arcmsr_polling_devmap(void* arg);
178 static void	arcmsr_srb_timeout(void* arg);
179 #ifdef ARCMSR_DEBUG1
180 static void arcmsr_dump_data(struct AdapterControlBlock *acb);
181 #endif
182 /*
183 **************************************************************************
184 **************************************************************************
185 */
186 static void UDELAY(u_int32_t us) { DELAY(us); }
187 /*
188 **************************************************************************
189 **************************************************************************
190 */
191 static bus_dmamap_callback_t arcmsr_map_free_srb;
192 static bus_dmamap_callback_t arcmsr_execute_srb;
193 /*
194 **************************************************************************
195 **************************************************************************
196 */
197 static d_open_t	arcmsr_open;
198 static d_close_t arcmsr_close;
199 static d_ioctl_t arcmsr_ioctl;
200 
201 static device_method_t arcmsr_methods[]={
202 	DEVMETHOD(device_probe,		arcmsr_probe),
203 	DEVMETHOD(device_attach,	arcmsr_attach),
204 	DEVMETHOD(device_detach,	arcmsr_detach),
205 	DEVMETHOD(device_shutdown,	arcmsr_shutdown),
206 	DEVMETHOD(device_suspend,	arcmsr_suspend),
207 	DEVMETHOD(device_resume,	arcmsr_resume),
208 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
209 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
210 	DEVMETHOD_END
211 };
212 
213 static driver_t arcmsr_driver={
214 	"arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
215 };
216 
217 static devclass_t arcmsr_devclass;
218 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL);
219 MODULE_VERSION(arcmsr, 1);
220 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
221 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
222 #ifndef BUS_DMA_COHERENT
223 	#define	BUS_DMA_COHERENT	0x04	/* hint: map memory in a coherent way */
224 #endif
225 
226 static struct dev_ops arcmsr_ops = {
227 	{ "arcmsr", 0, D_MPSAFE },
228 	.d_open =	arcmsr_open,		        /* open     */
229 	.d_close =	arcmsr_close,		        /* close    */
230 	.d_ioctl =	arcmsr_ioctl,		        /* ioctl    */
231 };
232 
233 static int	arcmsr_msi_enable = 1;
234 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable);
235 
236 
237 /*
238 **************************************************************************
239 **************************************************************************
240 */
241 
242 static int
243 arcmsr_open(struct dev_open_args *ap)
244 {
245 	cdev_t dev = ap->a_head.a_dev;
246 	struct AdapterControlBlock *acb=dev->si_drv1;
247 
248 	if(acb==NULL) {
249 		return ENXIO;
250 	}
251 	return (0);
252 }
253 
254 /*
255 **************************************************************************
256 **************************************************************************
257 */
258 
259 static int
260 arcmsr_close(struct dev_close_args *ap)
261 {
262 	cdev_t dev = ap->a_head.a_dev;
263 	struct AdapterControlBlock *acb=dev->si_drv1;
264 
265 	if(acb==NULL) {
266 		return ENXIO;
267 	}
268 	return 0;
269 }
270 
271 /*
272 **************************************************************************
273 **************************************************************************
274 */
275 
276 static int
277 arcmsr_ioctl(struct dev_ioctl_args *ap)
278 {
279 	cdev_t dev = ap->a_head.a_dev;
280 	u_long ioctl_cmd = ap->a_cmd;
281 	caddr_t arg = ap->a_data;
282 	struct AdapterControlBlock *acb=dev->si_drv1;
283 
284 	if(acb==NULL) {
285 		return ENXIO;
286 	}
287 	return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
288 }
289 
290 /*
291 **********************************************************************
292 **********************************************************************
293 */
294 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
295 {
296 	u_int32_t intmask_org=0;
297 
298 	switch (acb->adapter_type) {
299 	case ACB_ADAPTER_TYPE_A: {
300 			/* disable all outbound interrupt */
301 			intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
302 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
303 		}
304 		break;
305 	case ACB_ADAPTER_TYPE_B: {
306 			/* disable all outbound interrupt */
307 			intmask_org=CHIP_REG_READ32(HBB_DOORBELL,
308 			0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
309 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
310 		}
311 		break;
312 	case ACB_ADAPTER_TYPE_C: {
313 			/* disable all outbound interrupt */
314 			intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask)	; /* disable outbound message0 int */
315 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
316 		}
317 		break;
318 	}
319 	return (intmask_org);
320 }
321 /*
322 **********************************************************************
323 **********************************************************************
324 */
325 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
326 {
327 	u_int32_t mask;
328 
329 	switch (acb->adapter_type) {
330 	case ACB_ADAPTER_TYPE_A: {
331 			/* enable outbound Post Queue, outbound doorbell Interrupt */
332 			mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
333 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
334 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
335 		}
336 		break;
337 	case ACB_ADAPTER_TYPE_B: {
338 			/* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
339 			mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
340 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
341 			acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
342 		}
343 		break;
344 	case ACB_ADAPTER_TYPE_C: {
345 			/* enable outbound Post Queue, outbound doorbell Interrupt */
346 			mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
347 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
348 			acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f;
349 		}
350 		break;
351 	}
352 }
353 /*
354 **********************************************************************
355 **********************************************************************
356 */
357 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
358 {
359 	u_int32_t Index;
360 	u_int8_t Retries=0x00;
361 
362 	do {
363 		for(Index=0; Index < 100; Index++) {
364 			if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
365 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
366 				return TRUE;
367 			}
368 			UDELAY(10000);
369 		}/*max 1 seconds*/
370 	}while(Retries++ < 20);/*max 20 sec*/
371 	return (FALSE);
372 }
373 /*
374 **********************************************************************
375 **********************************************************************
376 */
377 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
378 {
379 	u_int32_t Index;
380 	u_int8_t Retries=0x00;
381 
382 	do {
383 		for(Index=0; Index < 100; Index++) {
384 			if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
385 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
386 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
387 				return TRUE;
388 			}
389 			UDELAY(10000);
390 		}/*max 1 seconds*/
391 	}while(Retries++ < 20);/*max 20 sec*/
392 	return (FALSE);
393 }
394 /*
395 **********************************************************************
396 **********************************************************************
397 */
398 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
399 {
400 	u_int32_t Index;
401 	u_int8_t Retries=0x00;
402 
403 	do {
404 		for(Index=0; Index < 100; Index++) {
405 			if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
406 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
407 				return TRUE;
408 			}
409 			UDELAY(10000);
410 		}/*max 1 seconds*/
411 	}while(Retries++ < 20);/*max 20 sec*/
412 	return (FALSE);
413 }
414 /*
415 ************************************************************************
416 ************************************************************************
417 */
418 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
419 {
420 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
421 
422 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
423 	do {
424 		if(arcmsr_hba_wait_msgint_ready(acb)) {
425 			break;
426 		} else {
427 			retry_count--;
428 		}
429 	}while(retry_count!=0);
430 }
431 /*
432 ************************************************************************
433 ************************************************************************
434 */
435 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
436 {
437 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
438 
439 	CHIP_REG_WRITE32(HBB_DOORBELL,
440 	0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
441 	do {
442 		if(arcmsr_hbb_wait_msgint_ready(acb)) {
443 			break;
444 		} else {
445 			retry_count--;
446 		}
447 	}while(retry_count!=0);
448 }
449 /*
450 ************************************************************************
451 ************************************************************************
452 */
453 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
454 {
455 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
456 
457 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
458 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
459 	do {
460 		if(arcmsr_hbc_wait_msgint_ready(acb)) {
461 			break;
462 		} else {
463 			retry_count--;
464 		}
465 	}while(retry_count!=0);
466 }
467 /*
468 ************************************************************************
469 ************************************************************************
470 */
471 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
472 {
473 	switch (acb->adapter_type) {
474 	case ACB_ADAPTER_TYPE_A: {
475 			arcmsr_flush_hba_cache(acb);
476 		}
477 		break;
478 	case ACB_ADAPTER_TYPE_B: {
479 			arcmsr_flush_hbb_cache(acb);
480 		}
481 		break;
482 	case ACB_ADAPTER_TYPE_C: {
483 			arcmsr_flush_hbc_cache(acb);
484 		}
485 		break;
486 	}
487 }
488 /*
489 *******************************************************************************
490 *******************************************************************************
491 */
492 static int arcmsr_suspend(device_t dev)
493 {
494 	struct AdapterControlBlock	*acb = device_get_softc(dev);
495 
496 	/* flush controller */
497 	arcmsr_iop_parking(acb);
498 	/* disable all outbound interrupt */
499 	arcmsr_disable_allintr(acb);
500 	return(0);
501 }
502 /*
503 *******************************************************************************
504 *******************************************************************************
505 */
506 static int arcmsr_resume(device_t dev)
507 {
508 	struct AdapterControlBlock	*acb = device_get_softc(dev);
509 
510 	arcmsr_iop_init(acb);
511 	return(0);
512 }
513 /*
514 *********************************************************************************
515 *********************************************************************************
516 */
517 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
518 {
519 }
520 /*
521 **********************************************************************
522 **********************************************************************
523 */
524 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
525 {
526 	struct AdapterControlBlock *acb=srb->acb;
527 	union ccb * pccb=srb->pccb;
528 
529 	if(srb->srb_flags & SRB_FLAG_TIMER_START)
530 		callout_stop(&srb->ccb_callout);
531 	if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
532 		bus_dmasync_op_t op;
533 
534 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
535 			op = BUS_DMASYNC_POSTREAD;
536 		} else {
537 			op = BUS_DMASYNC_POSTWRITE;
538 		}
539 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
540 		bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
541 	}
542 	if(stand_flag==1) {
543 		atomic_subtract_int(&acb->srboutstandingcount, 1);
544 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
545 		acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) {
546 			acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
547 			pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
548 		}
549 	}
550 	if(srb->srb_state != ARCMSR_SRB_TIMEOUT)
551 		arcmsr_free_srb(srb);
552 #ifdef ARCMSR_DEBUG1
553 	acb->pktReturnCount++;
554 #endif
555 	xpt_done(pccb);
556 	return;
557 }
558 /*
559 **********************************************************************
560 **********************************************************************
561 */
562 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
563 {
564 	union ccb * pccb=srb->pccb;
565 
566 	pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
567 	pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
568 	if(pccb->csio.sense_len) {
569 		memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
570 		memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
571 		get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
572 		((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
573 		pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
574 	}
575 }
576 /*
577 *********************************************************************
578 *********************************************************************
579 */
580 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
581 {
582 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
583 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
584 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
585 	}
586 }
587 /*
588 *********************************************************************
589 *********************************************************************
590 */
591 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
592 {
593 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
594 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
595 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
596 	}
597 }
598 /*
599 *********************************************************************
600 *********************************************************************
601 */
602 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
603 {
604 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
605 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
606 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
607 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
608 	}
609 }
610 /*
611 *********************************************************************
612 *********************************************************************
613 */
614 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
615 {
616 	switch (acb->adapter_type) {
617 	case ACB_ADAPTER_TYPE_A: {
618 			arcmsr_abort_hba_allcmd(acb);
619 		}
620 		break;
621 	case ACB_ADAPTER_TYPE_B: {
622 			arcmsr_abort_hbb_allcmd(acb);
623 		}
624 		break;
625 	case ACB_ADAPTER_TYPE_C: {
626 			arcmsr_abort_hbc_allcmd(acb);
627 		}
628 		break;
629 	}
630 }
631 /*
632 **************************************************************************
633 **************************************************************************
634 */
635 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
636 {
637 	int target, lun;
638 
639 	target=srb->pccb->ccb_h.target_id;
640 	lun=srb->pccb->ccb_h.target_lun;
641 	if(error == FALSE) {
642 		if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
643 			acb->devstate[target][lun]=ARECA_RAID_GOOD;
644 		}
645 		srb->pccb->ccb_h.status |= CAM_REQ_CMP;
646 		arcmsr_srb_complete(srb, 1);
647 	} else {
648 		switch(srb->arcmsr_cdb.DeviceStatus) {
649 		case ARCMSR_DEV_SELECT_TIMEOUT: {
650 				if(acb->devstate[target][lun]==ARECA_RAID_GOOD) {
651 					kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
652 				}
653 				acb->devstate[target][lun]=ARECA_RAID_GONE;
654 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
655 				arcmsr_srb_complete(srb, 1);
656 			}
657 			break;
658 		case ARCMSR_DEV_ABORTED:
659 		case ARCMSR_DEV_INIT_FAIL: {
660 				acb->devstate[target][lun]=ARECA_RAID_GONE;
661 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
662 				arcmsr_srb_complete(srb, 1);
663 			}
664 			break;
665 		case SCSISTAT_CHECK_CONDITION: {
666 				acb->devstate[target][lun]=ARECA_RAID_GOOD;
667 				arcmsr_report_sense_info(srb);
668 				arcmsr_srb_complete(srb, 1);
669 			}
670 			break;
671 		default:
672 			kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done, but got unknown DeviceStatus=0x%x\n"
673 					, acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
674 			acb->devstate[target][lun]=ARECA_RAID_GONE;
675 			srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
676 			/*unknown error or crc error just for retry*/
677 			arcmsr_srb_complete(srb, 1);
678 			break;
679 		}
680 	}
681 }
682 /*
683 **************************************************************************
684 **************************************************************************
685 */
686 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
687 {
688 	struct CommandControlBlock *srb;
689 
690 	/* check if command done with no error*/
691 	switch (acb->adapter_type) {
692 	case ACB_ADAPTER_TYPE_C:
693 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
694 		break;
695 	case ACB_ADAPTER_TYPE_A:
696 	case ACB_ADAPTER_TYPE_B:
697 	default:
698 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
699 		break;
700 	}
701 	if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
702 		if(srb->srb_state == ARCMSR_SRB_TIMEOUT) {
703 			arcmsr_free_srb(srb);
704 			kprintf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb);
705 			return;
706 		}
707 		kprintf("arcmsr%d: return srb has been completed\n"
708 			"srb='%p' srb_state=0x%x outstanding srb count=%d \n",
709 			acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount);
710 		return;
711 	}
712 	arcmsr_report_srb_state(acb, srb, error);
713 }
714 /*
715 **************************************************************************
716 **************************************************************************
717 */
718 static void	arcmsr_srb_timeout(void* arg)
719 {
720 	struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
721 	struct AdapterControlBlock *acb;
722 	int target, lun;
723 	u_int8_t cmd;
724 
725 	target=srb->pccb->ccb_h.target_id;
726 	lun=srb->pccb->ccb_h.target_lun;
727 	acb = srb->acb;
728 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
729 	if(srb->srb_state == ARCMSR_SRB_START)
730 	{
731 		cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
732 		srb->srb_state = ARCMSR_SRB_TIMEOUT;
733 		srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
734 		arcmsr_srb_complete(srb, 1);
735 		kprintf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n",
736 				 acb->pci_unit, target, lun, cmd, srb);
737 	}
738 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
739 #ifdef ARCMSR_DEBUG1
740 	arcmsr_dump_data(acb);
741 #endif
742 }
743 
744 /*
745 **********************************************************************
746 **********************************************************************
747 */
748 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
749 {
750 	int i=0;
751 	u_int32_t flag_srb;
752 	u_int16_t error;
753 
754 	switch (acb->adapter_type) {
755 	case ACB_ADAPTER_TYPE_A: {
756 			u_int32_t outbound_intstatus;
757 
758 			/*clear and abort all outbound posted Q*/
759 			outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
760 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
761 			while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
762                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
763 				arcmsr_drain_donequeue(acb, flag_srb, error);
764 			}
765 		}
766 		break;
767 	case ACB_ADAPTER_TYPE_B: {
768 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
769 
770 			/*clear all outbound posted Q*/
771 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
772 			for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
773 				if((flag_srb=phbbmu->done_qbuffer[i])!=0) {
774 					phbbmu->done_qbuffer[i]=0;
775 			error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
776 					arcmsr_drain_donequeue(acb, flag_srb, error);
777 				}
778 				phbbmu->post_qbuffer[i]=0;
779 			}/*drain reply FIFO*/
780 			phbbmu->doneq_index=0;
781 			phbbmu->postq_index=0;
782 		}
783 		break;
784 	case ACB_ADAPTER_TYPE_C: {
785 
786 			while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
787 				flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
788                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
789 				arcmsr_drain_donequeue(acb, flag_srb, error);
790 			}
791 		}
792 		break;
793 	}
794 }
795 /*
796 ****************************************************************************
797 ****************************************************************************
798 */
799 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
800 {
801 	struct CommandControlBlock *srb;
802 	u_int32_t intmask_org;
803 	u_int32_t i=0;
804 
805 	if(acb->srboutstandingcount>0) {
806 		/* disable all outbound interrupt */
807 		intmask_org=arcmsr_disable_allintr(acb);
808 		/*clear and abort all outbound posted Q*/
809 		arcmsr_done4abort_postqueue(acb);
810 		/* talk to iop 331 outstanding command aborted*/
811 		arcmsr_abort_allcmd(acb);
812 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
813 			srb=acb->psrb_pool[i];
814 			if(srb->srb_state==ARCMSR_SRB_START) {
815 				srb->srb_state=ARCMSR_SRB_ABORTED;
816 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
817 				arcmsr_srb_complete(srb, 1);
818 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p' aborted\n"
819 						, acb->pci_unit, srb->pccb->ccb_h.target_id
820 						, srb->pccb->ccb_h.target_lun, srb);
821 			}
822 		}
823 		/* enable all outbound interrupt */
824 		arcmsr_enable_allintr(acb, intmask_org);
825 	}
826 	acb->srboutstandingcount=0;
827 	acb->workingsrb_doneindex=0;
828 	acb->workingsrb_startindex=0;
829 #ifdef ARCMSR_DEBUG1
830 	acb->pktRequestCount = 0;
831 	acb->pktReturnCount = 0;
832 #endif
833 }
834 /*
835 **********************************************************************
836 **********************************************************************
837 */
838 static void arcmsr_build_srb(struct CommandControlBlock *srb,
839 		bus_dma_segment_t *dm_segs, u_int32_t nseg)
840 {
841 	struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb;
842 	u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u;
843 	u_int32_t address_lo, address_hi;
844 	union ccb * pccb=srb->pccb;
845 	struct ccb_scsiio * pcsio= &pccb->csio;
846 	u_int32_t arccdbsize=0x30;
847 
848 	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
849 	arcmsr_cdb->Bus=0;
850 	arcmsr_cdb->TargetID=pccb->ccb_h.target_id;
851 	arcmsr_cdb->LUN=pccb->ccb_h.target_lun;
852 	arcmsr_cdb->Function=1;
853 	arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len;
854 	arcmsr_cdb->Context=0;
855 	bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
856 	if(nseg != 0) {
857 		struct AdapterControlBlock *acb=srb->acb;
858 		bus_dmasync_op_t op;
859 		u_int32_t length, i, cdb_sgcount=0;
860 
861 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
862 			op=BUS_DMASYNC_PREREAD;
863 		} else {
864 			op=BUS_DMASYNC_PREWRITE;
865 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE;
866 			srb->srb_flags|=SRB_FLAG_WRITE;
867 		}
868 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
869 		for(i=0;i<nseg;i++) {
870 			/* Get the physical address of the current data pointer */
871 			length=arcmsr_htole32(dm_segs[i].ds_len);
872 			address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
873 			address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
874 			if(address_hi==0) {
875 				struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge;
876 				pdma_sg->address=address_lo;
877 				pdma_sg->length=length;
878 				psge += sizeof(struct SG32ENTRY);
879 				arccdbsize += sizeof(struct SG32ENTRY);
880 			} else {
881 				u_int32_t sg64s_size=0, tmplength=length;
882 
883 				while(1) {
884 					u_int64_t span4G, length0;
885 					struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge;
886 
887 					span4G=(u_int64_t)address_lo + tmplength;
888 					pdma_sg->addresshigh=address_hi;
889 					pdma_sg->address=address_lo;
890 					if(span4G > 0x100000000) {
891 						/*see if cross 4G boundary*/
892 						length0=0x100000000-address_lo;
893 						pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR;
894 						address_hi=address_hi+1;
895 						address_lo=0;
896 						tmplength=tmplength-(u_int32_t)length0;
897 						sg64s_size += sizeof(struct SG64ENTRY);
898 						psge += sizeof(struct SG64ENTRY);
899 						cdb_sgcount++;
900 					} else {
901 						pdma_sg->length=tmplength|IS_SG64_ADDR;
902 						sg64s_size += sizeof(struct SG64ENTRY);
903 						psge += sizeof(struct SG64ENTRY);
904 						break;
905 					}
906 				}
907 				arccdbsize += sg64s_size;
908 			}
909 			cdb_sgcount++;
910 		}
911 		arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount;
912 		arcmsr_cdb->DataLength=pcsio->dxfer_len;
913 		if( arccdbsize > 256) {
914 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE;
915 		}
916 	} else {
917 		arcmsr_cdb->DataLength = 0;
918 	}
919 	srb->arc_cdb_size=arccdbsize;
920 }
921 /*
922 **************************************************************************
923 **************************************************************************
924 */
925 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
926 {
927 	u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr;
928 	struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb;
929 
930 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
931 	atomic_add_int(&acb->srboutstandingcount, 1);
932 	srb->srb_state=ARCMSR_SRB_START;
933 
934 	switch (acb->adapter_type) {
935 	case ACB_ADAPTER_TYPE_A: {
936 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
937 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
938 			} else {
939 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr);
940 			}
941 		}
942 		break;
943 	case ACB_ADAPTER_TYPE_B: {
944 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
945 			int ending_index, index;
946 
947 			index=phbbmu->postq_index;
948 			ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
949 			phbbmu->post_qbuffer[ending_index]=0;
950 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
951 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
952 			} else {
953 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr;
954 			}
955 			index++;
956 			index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
957 			phbbmu->postq_index=index;
958 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
959 		}
960 		break;
961     case ACB_ADAPTER_TYPE_C:
962         {
963             u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
964 
965             arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size;
966             ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1);
967 			cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
968             if(cdb_phyaddr_hi32)
969             {
970 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
971 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
972             }
973             else
974             {
975 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
976             }
977         }
978         break;
979 	}
980 }
981 /*
982 ************************************************************************
983 ************************************************************************
984 */
985 static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
986 {
987 	struct QBUFFER *qbuffer=NULL;
988 
989 	switch (acb->adapter_type) {
990 	case ACB_ADAPTER_TYPE_A: {
991 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
992 
993 			qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer;
994 		}
995 		break;
996 	case ACB_ADAPTER_TYPE_B: {
997 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
998 
999 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
1000 		}
1001 		break;
1002 	case ACB_ADAPTER_TYPE_C: {
1003 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1004 
1005 			qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer;
1006 		}
1007 		break;
1008 	}
1009 	return(qbuffer);
1010 }
1011 /*
1012 ************************************************************************
1013 ************************************************************************
1014 */
1015 static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
1016 {
1017 	struct QBUFFER *qbuffer=NULL;
1018 
1019 	switch (acb->adapter_type) {
1020 	case ACB_ADAPTER_TYPE_A: {
1021 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
1022 
1023 			qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer;
1024 		}
1025 		break;
1026 	case ACB_ADAPTER_TYPE_B: {
1027 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1028 
1029 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
1030 		}
1031 		break;
1032 	case ACB_ADAPTER_TYPE_C: {
1033 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1034 
1035 			qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer;
1036 		}
1037 		break;
1038 	}
1039 	return(qbuffer);
1040 }
1041 /*
1042 **************************************************************************
1043 **************************************************************************
1044 */
1045 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1046 {
1047 	switch (acb->adapter_type) {
1048 	case ACB_ADAPTER_TYPE_A: {
1049 			/* let IOP know data has been read */
1050 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1051 		}
1052 		break;
1053 	case ACB_ADAPTER_TYPE_B: {
1054 			/* let IOP know data has been read */
1055 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1056 		}
1057 		break;
1058 	case ACB_ADAPTER_TYPE_C: {
1059 			/* let IOP know data has been read */
1060 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1061 		}
1062 	}
1063 }
1064 /*
1065 **************************************************************************
1066 **************************************************************************
1067 */
1068 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1069 {
1070 	switch (acb->adapter_type) {
1071 	case ACB_ADAPTER_TYPE_A: {
1072 			/*
1073 			** push inbound doorbell tell iop, driver data write ok
1074 			** and wait reply on next hwinterrupt for next Qbuffer post
1075 			*/
1076 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1077 		}
1078 		break;
1079 	case ACB_ADAPTER_TYPE_B: {
1080 			/*
1081 			** push inbound doorbell tell iop, driver data write ok
1082 			** and wait reply on next hwinterrupt for next Qbuffer post
1083 			*/
1084 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1085 		}
1086 		break;
1087 	case ACB_ADAPTER_TYPE_C: {
1088 			/*
1089 			** push inbound doorbell tell iop, driver data write ok
1090 			** and wait reply on next hwinterrupt for next Qbuffer post
1091 			*/
1092 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1093 		}
1094 		break;
1095 	}
1096 }
1097 /*
1098 **********************************************************************
1099 **********************************************************************
1100 */
1101 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1102 {
1103 	u_int8_t *pQbuffer;
1104 	struct QBUFFER *pwbuffer;
1105 	u_int8_t * iop_data;
1106 	int32_t allxfer_len=0;
1107 
1108 	pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1109 	iop_data=(u_int8_t *)pwbuffer->data;
1110 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1111 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1112 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1113 			&& (allxfer_len<124)) {
1114 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1115 			memcpy(iop_data, pQbuffer, 1);
1116 			acb->wqbuf_firstindex++;
1117 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1118 			iop_data++;
1119 			allxfer_len++;
1120 		}
1121 		pwbuffer->data_len=allxfer_len;
1122 		/*
1123 		** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post
1124 		*/
1125 		arcmsr_iop_message_wrote(acb);
1126 	}
1127 }
1128 /*
1129 ************************************************************************
1130 ************************************************************************
1131 */
1132 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1133 {
1134 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1135 	CHIP_REG_WRITE32(HBA_MessageUnit,
1136 	0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1137 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
1138 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1139 			, acb->pci_unit);
1140 	}
1141 	return;
1142 }
1143 /*
1144 ************************************************************************
1145 ************************************************************************
1146 */
1147 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1148 {
1149 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1150 	CHIP_REG_WRITE32(HBB_DOORBELL,
1151 	0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1152 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1153 		kprintf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1154 			, acb->pci_unit);
1155 	}
1156 }
1157 /*
1158 ************************************************************************
1159 ************************************************************************
1160 */
1161 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1162 {
1163 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1164 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1165 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1166 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1167 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1168 	}
1169 }
1170 /*
1171 ************************************************************************
1172 ************************************************************************
1173 */
1174 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1175 {
1176 	switch (acb->adapter_type) {
1177 	case ACB_ADAPTER_TYPE_A: {
1178 			arcmsr_stop_hba_bgrb(acb);
1179 		}
1180 		break;
1181 	case ACB_ADAPTER_TYPE_B: {
1182 			arcmsr_stop_hbb_bgrb(acb);
1183 		}
1184 		break;
1185 	case ACB_ADAPTER_TYPE_C: {
1186 			arcmsr_stop_hbc_bgrb(acb);
1187 		}
1188 		break;
1189 	}
1190 }
1191 /*
1192 ************************************************************************
1193 ************************************************************************
1194 */
1195 static void arcmsr_poll(struct cam_sim * psim)
1196 {
1197 	struct AdapterControlBlock *acb;
1198 	int	mutex;
1199 
1200 	acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1201 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
1202 	if( mutex == 0 )
1203 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1204 	arcmsr_interrupt(acb);
1205 	if( mutex == 0 )
1206 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1207 }
1208 /*
1209 **************************************************************************
1210 **************************************************************************
1211 */
1212 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1213 {
1214 	struct QBUFFER *prbuffer;
1215 	u_int8_t *pQbuffer;
1216 	u_int8_t *iop_data;
1217 	int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1218 
1219 	/*check this iop data if overflow my rqbuffer*/
1220 	rqbuf_lastindex=acb->rqbuf_lastindex;
1221 	rqbuf_firstindex=acb->rqbuf_firstindex;
1222 	prbuffer=arcmsr_get_iop_rqbuffer(acb);
1223 	iop_data=(u_int8_t *)prbuffer->data;
1224 	iop_len=prbuffer->data_len;
1225 	my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1226 	if(my_empty_len>=iop_len) {
1227 		while(iop_len > 0) {
1228 			pQbuffer=&acb->rqbuffer[rqbuf_lastindex];
1229 			memcpy(pQbuffer, iop_data, 1);
1230 			rqbuf_lastindex++;
1231 			rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */
1232 			iop_data++;
1233 			iop_len--;
1234 		}
1235 		acb->rqbuf_lastindex=rqbuf_lastindex;
1236 		arcmsr_iop_message_read(acb);
1237 		/*signature, let IOP know data has been read */
1238 	} else {
1239 		acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW;
1240 	}
1241 }
1242 /*
1243 **************************************************************************
1244 **************************************************************************
1245 */
1246 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1247 {
1248 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1249 	/*
1250 	*****************************************************************
1251 	**   check if there are any mail packages from user space program
1252 	**   in my post bag, now is the time to send them into Areca's firmware
1253 	*****************************************************************
1254 	*/
1255 	if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) {
1256 		u_int8_t *pQbuffer;
1257 		struct QBUFFER *pwbuffer;
1258 		u_int8_t *iop_data;
1259 		int allxfer_len=0;
1260 
1261 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1262 		pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1263 		iop_data=(u_int8_t *)pwbuffer->data;
1264 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1265 			&& (allxfer_len<124)) {
1266 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1267 			memcpy(iop_data, pQbuffer, 1);
1268 			acb->wqbuf_firstindex++;
1269 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1270 			iop_data++;
1271 			allxfer_len++;
1272 		}
1273 		pwbuffer->data_len=allxfer_len;
1274 		/*
1275 		** push inbound doorbell tell iop driver data write ok
1276 		** and wait reply on next hwinterrupt for next Qbuffer post
1277 		*/
1278 		arcmsr_iop_message_wrote(acb);
1279 	}
1280 	if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) {
1281 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1282 	}
1283 }
1284 
1285 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1286 {
1287 /*
1288 	if (ccb->ccb_h.status != CAM_REQ_CMP)
1289 		kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status);
1290 	else
1291 		kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1292 */
1293 	xpt_free_path(ccb->ccb_h.path);
1294 }
1295 
1296 static void	arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1297 {
1298 	struct cam_path     *path;
1299 	union ccb            ccb;
1300 
1301 	if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1302 		return;
1303 /*	kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
1304 	bzero(&ccb, sizeof(union ccb));
1305 	xpt_setup_ccb(&ccb.ccb_h, path, 5);
1306 	ccb.ccb_h.func_code = XPT_SCAN_LUN;
1307 	ccb.ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1308 	ccb.crcn.flags = CAM_FLAG_NONE;
1309 	xpt_action(&ccb);
1310 }
1311 
1312 
1313 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1314 {
1315 	struct CommandControlBlock *srb;
1316 	u_int32_t intmask_org;
1317 	int i;
1318 
1319 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1320 	/* disable all outbound interrupts */
1321 	intmask_org = arcmsr_disable_allintr(acb);
1322 	for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1323 	{
1324 		srb = acb->psrb_pool[i];
1325 		if (srb->srb_state == ARCMSR_SRB_START)
1326 		{
1327 		if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1328             {
1329 			srb->srb_state = ARCMSR_SRB_ABORTED;
1330 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1331 			arcmsr_srb_complete(srb, 1);
1332 				kprintf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
1333 		}
1334 		}
1335 	}
1336 	/* enable outbound Post Queue, outbound doorbell Interrupt */
1337 	arcmsr_enable_allintr(acb, intmask_org);
1338 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1339 }
1340 
1341 
1342 /*
1343 **************************************************************************
1344 **************************************************************************
1345 */
1346 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1347 	u_int32_t	devicemap;
1348 	u_int32_t	target, lun;
1349     u_int32_t	deviceMapCurrent[4]={0};
1350     u_int8_t	*pDevMap;
1351 
1352 	switch (acb->adapter_type) {
1353 	case ACB_ADAPTER_TYPE_A:
1354 			devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1355 			for (target= 0; target < 4; target++)
1356 			{
1357 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1358 		devicemap += 4;
1359 			}
1360 			break;
1361 
1362 	case ACB_ADAPTER_TYPE_B:
1363 			devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1364 			for (target= 0; target < 4; target++)
1365 			{
1366 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1],  devicemap);
1367 		devicemap += 4;
1368 			}
1369 			break;
1370 
1371 	case ACB_ADAPTER_TYPE_C:
1372 			devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1373 			for (target= 0; target < 4; target++)
1374 			{
1375 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1376 		devicemap += 4;
1377 			}
1378 			break;
1379 	}
1380 
1381 		if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1382 		{
1383 			acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1384 		}
1385 		/*
1386 		** adapter posted CONFIG message
1387 		** copy the new map, note if there are differences with the current map
1388 		*/
1389 		pDevMap = (u_int8_t	*)&deviceMapCurrent[0];
1390 		for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1391 		{
1392 			if (*pDevMap != acb->device_map[target])
1393 			{
1394                 u_int8_t difference, bit_check;
1395 
1396                 difference= *pDevMap ^ acb->device_map[target];
1397                 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1398                 {
1399                     bit_check=(1 << lun);						/*check bit from 0....31*/
1400                     if(difference & bit_check)
1401                     {
1402                         if(acb->device_map[target] & bit_check)
1403                         {/* unit departed */
1404 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1405 							arcmsr_abort_dr_ccbs(acb, target, lun);
1406 				arcmsr_rescan_lun(acb, target, lun);
1407 						acb->devstate[target][lun] = ARECA_RAID_GONE;
1408                         }
1409                         else
1410                         {/* unit arrived */
1411 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
1412 				arcmsr_rescan_lun(acb, target, lun);
1413 						acb->devstate[target][lun] = ARECA_RAID_GOOD;
1414                         }
1415                     }
1416                 }
1417 /*				kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1418 				acb->device_map[target]= *pDevMap;
1419 			}
1420 			pDevMap++;
1421 		}
1422 }
1423 /*
1424 **************************************************************************
1425 **************************************************************************
1426 */
1427 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1428 	u_int32_t outbound_message;
1429 
1430 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1431 	outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1432 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1433 		arcmsr_dr_handle( acb );
1434 }
1435 /*
1436 **************************************************************************
1437 **************************************************************************
1438 */
1439 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1440 	u_int32_t outbound_message;
1441 
1442 	/* clear interrupts */
1443 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1444 	outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1445 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1446 		arcmsr_dr_handle( acb );
1447 }
1448 /*
1449 **************************************************************************
1450 **************************************************************************
1451 */
1452 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1453 	u_int32_t outbound_message;
1454 
1455 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1456 	outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1457 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1458 		arcmsr_dr_handle( acb );
1459 }
1460 /*
1461 **************************************************************************
1462 **************************************************************************
1463 */
1464 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1465 {
1466 	u_int32_t outbound_doorbell;
1467 
1468 	/*
1469 	*******************************************************************
1470 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1471 	**  DOORBELL: din! don!
1472 	**  check if there are any mail need to pack from firmware
1473 	*******************************************************************
1474 	*/
1475 	outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit,
1476 	0, outbound_doorbell);
1477 	CHIP_REG_WRITE32(HBA_MessageUnit,
1478 	0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */
1479 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1480 		arcmsr_iop2drv_data_wrote_handle(acb);
1481 	}
1482 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1483 		arcmsr_iop2drv_data_read_handle(acb);
1484 	}
1485 }
1486 /*
1487 **************************************************************************
1488 **************************************************************************
1489 */
1490 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1491 {
1492 	u_int32_t outbound_doorbell;
1493 
1494 	/*
1495 	*******************************************************************
1496 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1497 	**  DOORBELL: din! don!
1498 	**  check if there are any mail need to pack from firmware
1499 	*******************************************************************
1500 	*/
1501 	outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1502 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */
1503 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1504 		arcmsr_iop2drv_data_wrote_handle(acb);
1505 	}
1506 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1507 		arcmsr_iop2drv_data_read_handle(acb);
1508 	}
1509 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1510 		arcmsr_hbc_message_isr(acb);    /* messenger of "driver to iop commands" */
1511 	}
1512 }
1513 /*
1514 **************************************************************************
1515 **************************************************************************
1516 */
1517 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1518 {
1519 	u_int32_t flag_srb;
1520 	u_int16_t error;
1521 
1522 	/*
1523 	*****************************************************************************
1524 	**               areca cdb command done
1525 	*****************************************************************************
1526 	*/
1527 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1528 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1529 	while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
1530 		0, outbound_queueport)) != 0xFFFFFFFF) {
1531 		/* check if command done with no error*/
1532         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1533 		arcmsr_drain_donequeue(acb, flag_srb, error);
1534 	}	/*drain reply FIFO*/
1535 }
1536 /*
1537 **************************************************************************
1538 **************************************************************************
1539 */
1540 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1541 {
1542 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1543 	u_int32_t flag_srb;
1544 	int index;
1545 	u_int16_t error;
1546 
1547 	/*
1548 	*****************************************************************************
1549 	**               areca cdb command done
1550 	*****************************************************************************
1551 	*/
1552 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1553 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1554 	index=phbbmu->doneq_index;
1555 	while((flag_srb=phbbmu->done_qbuffer[index]) != 0) {
1556 		phbbmu->done_qbuffer[index]=0;
1557 		index++;
1558 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
1559 		phbbmu->doneq_index=index;
1560 		/* check if command done with no error*/
1561         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1562 		arcmsr_drain_donequeue(acb, flag_srb, error);
1563 	}	/*drain reply FIFO*/
1564 }
1565 /*
1566 **************************************************************************
1567 **************************************************************************
1568 */
1569 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1570 {
1571 	u_int32_t flag_srb,throttling=0;
1572 	u_int16_t error;
1573 
1574 	/*
1575 	*****************************************************************************
1576 	**               areca cdb command done
1577 	*****************************************************************************
1578 	*/
1579 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1580 
1581 	while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1582 
1583 		flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
1584 		/* check if command done with no error*/
1585         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
1586 		arcmsr_drain_donequeue(acb, flag_srb, error);
1587         if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1588             CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
1589             break;
1590         }
1591         throttling++;
1592 	}	/*drain reply FIFO*/
1593 }
1594 /*
1595 **********************************************************************
1596 **********************************************************************
1597 */
1598 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
1599 {
1600 	u_int32_t outbound_intStatus;
1601 	/*
1602 	*********************************************
1603 	**   check outbound intstatus
1604 	*********************************************
1605 	*/
1606 	outbound_intStatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
1607 	if(!outbound_intStatus) {
1608 		/*it must be share irq*/
1609 		return;
1610 	}
1611 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus);/*clear interrupt*/
1612 	/* MU doorbell interrupts*/
1613 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1614 		arcmsr_hba_doorbell_isr(acb);
1615 	}
1616 	/* MU post queue interrupts*/
1617 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1618 		arcmsr_hba_postqueue_isr(acb);
1619 	}
1620 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1621 		arcmsr_hba_message_isr(acb);
1622 	}
1623 }
1624 /*
1625 **********************************************************************
1626 **********************************************************************
1627 */
1628 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
1629 {
1630 	u_int32_t outbound_doorbell;
1631 	/*
1632 	*********************************************
1633 	**   check outbound intstatus
1634 	*********************************************
1635 	*/
1636 	outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
1637 	if(!outbound_doorbell) {
1638 		/*it must be share irq*/
1639 		return;
1640 	}
1641 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
1642 	CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
1643 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1644 	/* MU ioctl transfer doorbell interrupts*/
1645 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1646 		arcmsr_iop2drv_data_wrote_handle(acb);
1647 	}
1648 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1649 		arcmsr_iop2drv_data_read_handle(acb);
1650 	}
1651 	/* MU post queue interrupts*/
1652 	if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1653 		arcmsr_hbb_postqueue_isr(acb);
1654 	}
1655 	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1656 		arcmsr_hbb_message_isr(acb);
1657 	}
1658 }
1659 /*
1660 **********************************************************************
1661 **********************************************************************
1662 */
1663 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
1664 {
1665 	u_int32_t host_interrupt_status;
1666 	/*
1667 	*********************************************
1668 	**   check outbound intstatus
1669 	*********************************************
1670 	*/
1671 	host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
1672 	if(!host_interrupt_status) {
1673 		/*it must be share irq*/
1674 		return;
1675 	}
1676 	/* MU doorbell interrupts*/
1677 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
1678 		arcmsr_hbc_doorbell_isr(acb);
1679 	}
1680 	/* MU post queue interrupts*/
1681 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1682 		arcmsr_hbc_postqueue_isr(acb);
1683 	}
1684 }
1685 /*
1686 ******************************************************************************
1687 ******************************************************************************
1688 */
1689 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
1690 {
1691 	switch (acb->adapter_type) {
1692 	case ACB_ADAPTER_TYPE_A:
1693 		arcmsr_handle_hba_isr(acb);
1694 		break;
1695 	case ACB_ADAPTER_TYPE_B:
1696 		arcmsr_handle_hbb_isr(acb);
1697 		break;
1698 	case ACB_ADAPTER_TYPE_C:
1699 		arcmsr_handle_hbc_isr(acb);
1700 		break;
1701 	default:
1702 		kprintf("arcmsr%d: interrupt service,"
1703 		" unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type);
1704 		break;
1705 	}
1706 }
1707 /*
1708 **********************************************************************
1709 **********************************************************************
1710 */
1711 static void arcmsr_intr_handler(void *arg)
1712 {
1713 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg;
1714 
1715 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1716 	arcmsr_interrupt(acb);
1717 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1718 }
1719 /*
1720 ******************************************************************************
1721 ******************************************************************************
1722 */
1723 static void	arcmsr_polling_devmap(void* arg)
1724 {
1725 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
1726 	switch (acb->adapter_type) {
1727 	case ACB_ADAPTER_TYPE_A:
1728 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1729 		break;
1730 
1731 	case ACB_ADAPTER_TYPE_B:
1732 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
1733 		break;
1734 
1735 	case ACB_ADAPTER_TYPE_C:
1736 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1737 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1738 		break;
1739 	}
1740 
1741 	if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
1742 	{
1743 		callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb);	/* polling per 5 seconds */
1744 	}
1745 }
1746 
1747 /*
1748 *******************************************************************************
1749 **
1750 *******************************************************************************
1751 */
1752 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1753 {
1754 	u_int32_t intmask_org;
1755 
1756 	if(acb!=NULL) {
1757 		/* stop adapter background rebuild */
1758 		if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
1759 			intmask_org = arcmsr_disable_allintr(acb);
1760 			arcmsr_stop_adapter_bgrb(acb);
1761 			arcmsr_flush_adapter_cache(acb);
1762 			arcmsr_enable_allintr(acb, intmask_org);
1763 		}
1764 	}
1765 }
1766 /*
1767 ***********************************************************************
1768 **
1769 ************************************************************************
1770 */
1771 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
1772 {
1773 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1774 	u_int32_t retvalue=EINVAL;
1775 
1776 	pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg;
1777 	if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
1778 		return retvalue;
1779 	}
1780 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1781 	switch(ioctl_cmd) {
1782 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
1783 			u_int8_t * pQbuffer;
1784 			u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1785 			u_int32_t allxfer_len=0;
1786 
1787 			while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex)
1788 				&& (allxfer_len<1031)) {
1789 				/*copy READ QBUFFER to srb*/
1790 				pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex];
1791 				memcpy(ptmpQbuffer, pQbuffer, 1);
1792 				acb->rqbuf_firstindex++;
1793 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1794 				/*if last index number set it to 0 */
1795 				ptmpQbuffer++;
1796 				allxfer_len++;
1797 			}
1798 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1799 				struct QBUFFER * prbuffer;
1800 				u_int8_t * iop_data;
1801 				u_int32_t iop_len;
1802 
1803 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1804 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
1805 				iop_data=(u_int8_t *)prbuffer->data;
1806 				iop_len=(u_int32_t)prbuffer->data_len;
1807 				/*this iop data does no chance to make me overflow again here, so just do it*/
1808 				while(iop_len>0) {
1809 					pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
1810 					memcpy(pQbuffer, iop_data, 1);
1811 					acb->rqbuf_lastindex++;
1812 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1813 					/*if last index number set it to 0 */
1814 					iop_data++;
1815 					iop_len--;
1816 				}
1817 				arcmsr_iop_message_read(acb);
1818 				/*signature, let IOP know data has been readed */
1819 			}
1820 			pcmdmessagefld->cmdmessage.Length=allxfer_len;
1821 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1822 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1823 		}
1824 		break;
1825 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1826 			u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1827 			u_int8_t * pQbuffer;
1828 			u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
1829 
1830 			user_len=pcmdmessagefld->cmdmessage.Length;
1831 			/*check if data xfer length of this request will overflow my array qbuffer */
1832 			wqbuf_lastindex=acb->wqbuf_lastindex;
1833 			wqbuf_firstindex=acb->wqbuf_firstindex;
1834 			if(wqbuf_lastindex!=wqbuf_firstindex) {
1835 				arcmsr_post_ioctldata2iop(acb);
1836 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1837 			} else {
1838 				my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1839 				if(my_empty_len>=user_len) {
1840 					while(user_len>0) {
1841 						/*copy srb data to wqbuffer*/
1842 						pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex];
1843 						memcpy(pQbuffer, ptmpuserbuffer, 1);
1844 						acb->wqbuf_lastindex++;
1845 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1846 						/*if last index number set it to 0 */
1847 						ptmpuserbuffer++;
1848 						user_len--;
1849 					}
1850 					/*post fist Qbuffer*/
1851 					if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1852 						acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1853 						arcmsr_post_ioctldata2iop(acb);
1854 					}
1855 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1856 				} else {
1857 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1858 				}
1859 			}
1860 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1861 		}
1862 		break;
1863 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1864 			u_int8_t * pQbuffer=acb->rqbuffer;
1865 
1866 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1867 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1868 				arcmsr_iop_message_read(acb);
1869 				/*signature, let IOP know data has been readed */
1870 			}
1871 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1872 			acb->rqbuf_firstindex=0;
1873 			acb->rqbuf_lastindex=0;
1874 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1875 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1876 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1877 		}
1878 		break;
1879 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
1880 		{
1881 			u_int8_t * pQbuffer=acb->wqbuffer;
1882 
1883 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1884 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1885                 arcmsr_iop_message_read(acb);
1886 				/*signature, let IOP know data has been readed */
1887 			}
1888 			acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
1889 			acb->wqbuf_firstindex=0;
1890 			acb->wqbuf_lastindex=0;
1891 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1892 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1893 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1894 		}
1895 		break;
1896 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1897 			u_int8_t * pQbuffer;
1898 
1899 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1900 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1901                 arcmsr_iop_message_read(acb);
1902 				/*signature, let IOP know data has been readed */
1903 			}
1904 			acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
1905 					|ACB_F_MESSAGE_RQBUFFER_CLEARED
1906 					|ACB_F_MESSAGE_WQBUFFER_READ);
1907 			acb->rqbuf_firstindex=0;
1908 			acb->rqbuf_lastindex=0;
1909 			acb->wqbuf_firstindex=0;
1910 			acb->wqbuf_lastindex=0;
1911 			pQbuffer=acb->rqbuffer;
1912 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1913 			pQbuffer=acb->wqbuffer;
1914 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1915 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1916 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1917 		}
1918 		break;
1919 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
1920 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F;
1921 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1922 		}
1923 		break;
1924 	case ARCMSR_MESSAGE_SAY_HELLO: {
1925 			u_int8_t * hello_string="Hello! I am ARCMSR";
1926 			u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer;
1927 
1928 			if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
1929 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1930 				ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1931 				return ENOIOCTL;
1932 			}
1933 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1934 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1935 		}
1936 		break;
1937 	case ARCMSR_MESSAGE_SAY_GOODBYE: {
1938 			arcmsr_iop_parking(acb);
1939 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1940 		}
1941 		break;
1942 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
1943 			arcmsr_flush_adapter_cache(acb);
1944 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1945 		}
1946 		break;
1947 	}
1948 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1949 	return (retvalue);
1950 }
1951 /*
1952 **************************************************************************
1953 **************************************************************************
1954 */
1955 static void arcmsr_free_srb(struct CommandControlBlock *srb)
1956 {
1957 	struct AdapterControlBlock	*acb;
1958 	int	mutex;
1959 
1960 	acb = srb->acb;
1961 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
1962 	if( mutex == 0 )
1963 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1964 	srb->srb_state=ARCMSR_SRB_DONE;
1965 	srb->srb_flags=0;
1966 	acb->srbworkingQ[acb->workingsrb_doneindex]=srb;
1967 	acb->workingsrb_doneindex++;
1968 	acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
1969 	if( mutex == 0 )
1970 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1971 }
1972 /*
1973 **************************************************************************
1974 **************************************************************************
1975 */
1976 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb)
1977 {
1978 	struct CommandControlBlock *srb=NULL;
1979 	u_int32_t workingsrb_startindex, workingsrb_doneindex;
1980 	int	mutex;
1981 
1982 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
1983 	if( mutex == 0 )
1984 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1985 	workingsrb_doneindex=acb->workingsrb_doneindex;
1986 	workingsrb_startindex=acb->workingsrb_startindex;
1987 	srb=acb->srbworkingQ[workingsrb_startindex];
1988 	workingsrb_startindex++;
1989 	workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
1990 	if(workingsrb_doneindex!=workingsrb_startindex) {
1991 		acb->workingsrb_startindex=workingsrb_startindex;
1992 	} else {
1993 		srb=NULL;
1994 	}
1995 	if( mutex == 0 )
1996 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1997 	return(srb);
1998 }
1999 /*
2000 **************************************************************************
2001 **************************************************************************
2002 */
2003 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb)
2004 {
2005 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
2006 	int retvalue = 0, transfer_len = 0;
2007 	char *buffer;
2008 	u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
2009 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
2010 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8  |
2011 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
2012 					/* 4 bytes: Areca io control code */
2013 	if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2014 		buffer = pccb->csio.data_ptr;
2015 		transfer_len = pccb->csio.dxfer_len;
2016 	} else {
2017 		retvalue = ARCMSR_MESSAGE_FAIL;
2018 		goto message_out;
2019 	}
2020 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2021 		retvalue = ARCMSR_MESSAGE_FAIL;
2022 		goto message_out;
2023 	}
2024 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
2025 	switch(controlcode) {
2026 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
2027 			u_int8_t *pQbuffer;
2028 			u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
2029 			int32_t allxfer_len = 0;
2030 
2031 			while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2032 				&& (allxfer_len < 1031)) {
2033 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2034 				memcpy(ptmpQbuffer, pQbuffer, 1);
2035 				acb->rqbuf_firstindex++;
2036 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2037 				ptmpQbuffer++;
2038 				allxfer_len++;
2039 			}
2040 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2041 				struct QBUFFER  *prbuffer;
2042 				u_int8_t  *iop_data;
2043 				int32_t iop_len;
2044 
2045 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2046 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
2047 				iop_data = (u_int8_t *)prbuffer->data;
2048 				iop_len =(u_int32_t)prbuffer->data_len;
2049 				while (iop_len > 0) {
2050 			        pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
2051 					memcpy(pQbuffer, iop_data, 1);
2052 					acb->rqbuf_lastindex++;
2053 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2054 					iop_data++;
2055 					iop_len--;
2056 				}
2057 				arcmsr_iop_message_read(acb);
2058 			}
2059 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
2060 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2061 			retvalue=ARCMSR_MESSAGE_SUCCESS;
2062 		}
2063 		break;
2064 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2065 			int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2066 			u_int8_t *pQbuffer;
2067 			u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
2068 
2069 			user_len = pcmdmessagefld->cmdmessage.Length;
2070 			wqbuf_lastindex = acb->wqbuf_lastindex;
2071 			wqbuf_firstindex = acb->wqbuf_firstindex;
2072 			if (wqbuf_lastindex != wqbuf_firstindex) {
2073 				arcmsr_post_ioctldata2iop(acb);
2074 				/* has error report sensedata */
2075 			    if(pccb->csio.sense_len) {
2076 				((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2077 				/* Valid,ErrorCode */
2078 				((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2079 				/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2080 				((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2081 				/* AdditionalSenseLength */
2082 				((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2083 				/* AdditionalSenseCode */
2084 				}
2085 				retvalue = ARCMSR_MESSAGE_FAIL;
2086 			} else {
2087 				my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2088 						&(ARCMSR_MAX_QBUFFER - 1);
2089 				if (my_empty_len >= user_len) {
2090 					while (user_len > 0) {
2091 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2092 						memcpy(pQbuffer, ptmpuserbuffer, 1);
2093 						acb->wqbuf_lastindex++;
2094 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2095 						ptmpuserbuffer++;
2096 						user_len--;
2097 					}
2098 					if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2099 						acb->acb_flags &=
2100 						~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2101 						arcmsr_post_ioctldata2iop(acb);
2102 					}
2103 				} else {
2104 					/* has error report sensedata */
2105 					if(pccb->csio.sense_len) {
2106 					((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2107 					/* Valid,ErrorCode */
2108 					((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2109 					/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2110 					((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2111 					/* AdditionalSenseLength */
2112 					((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2113 					/* AdditionalSenseCode */
2114 					}
2115 					retvalue = ARCMSR_MESSAGE_FAIL;
2116 				}
2117 			}
2118 		}
2119 		break;
2120 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2121 			u_int8_t *pQbuffer = acb->rqbuffer;
2122 
2123 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2124 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2125 				arcmsr_iop_message_read(acb);
2126 			}
2127 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2128 			acb->rqbuf_firstindex = 0;
2129 			acb->rqbuf_lastindex = 0;
2130 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2131 			pcmdmessagefld->cmdmessage.ReturnCode =
2132 			ARCMSR_MESSAGE_RETURNCODE_OK;
2133 		}
2134 		break;
2135 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2136 			u_int8_t *pQbuffer = acb->wqbuffer;
2137 
2138 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2139 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2140 				arcmsr_iop_message_read(acb);
2141 			}
2142 			acb->acb_flags |=
2143 				(ACB_F_MESSAGE_WQBUFFER_CLEARED |
2144 					ACB_F_MESSAGE_WQBUFFER_READ);
2145 			acb->wqbuf_firstindex = 0;
2146 			acb->wqbuf_lastindex = 0;
2147 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2148 			pcmdmessagefld->cmdmessage.ReturnCode =
2149 				ARCMSR_MESSAGE_RETURNCODE_OK;
2150 		}
2151 		break;
2152 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2153 			u_int8_t *pQbuffer;
2154 
2155 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2156 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2157 				arcmsr_iop_message_read(acb);
2158 			}
2159 			acb->acb_flags |=
2160 				(ACB_F_MESSAGE_WQBUFFER_CLEARED
2161 				| ACB_F_MESSAGE_RQBUFFER_CLEARED
2162 				| ACB_F_MESSAGE_WQBUFFER_READ);
2163 			acb->rqbuf_firstindex = 0;
2164 			acb->rqbuf_lastindex = 0;
2165 			acb->wqbuf_firstindex = 0;
2166 			acb->wqbuf_lastindex = 0;
2167 			pQbuffer = acb->rqbuffer;
2168 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2169 			pQbuffer = acb->wqbuffer;
2170 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2171 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2172 		}
2173 		break;
2174 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2175 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2176 		}
2177 		break;
2178 	case ARCMSR_MESSAGE_SAY_HELLO: {
2179 			int8_t * hello_string = "Hello! I am ARCMSR";
2180 
2181 			memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2182 				, (int16_t)strlen(hello_string));
2183 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2184 		}
2185 		break;
2186 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2187 		arcmsr_iop_parking(acb);
2188 		break;
2189 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2190 		arcmsr_flush_adapter_cache(acb);
2191 		break;
2192 	default:
2193 		retvalue = ARCMSR_MESSAGE_FAIL;
2194 	}
2195 message_out:
2196 	return (retvalue);
2197 }
2198 /*
2199 *********************************************************************
2200 *********************************************************************
2201 */
2202 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2203 {
2204 	struct CommandControlBlock *srb=(struct CommandControlBlock *)arg;
2205 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb;
2206 	union ccb * pccb;
2207 	int target, lun;
2208 
2209 	pccb=srb->pccb;
2210 	target=pccb->ccb_h.target_id;
2211 	lun=pccb->ccb_h.target_lun;
2212 #ifdef ARCMSR_DEBUG1
2213 	acb->pktRequestCount++;
2214 #endif
2215 	if(error != 0) {
2216 		if(error != EFBIG) {
2217 			kprintf("arcmsr%d: unexpected error %x"
2218 				" returned from 'bus_dmamap_load' \n"
2219 				, acb->pci_unit, error);
2220 		}
2221 		if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2222 			pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2223 		}
2224 		arcmsr_srb_complete(srb, 0);
2225 		return;
2226 	}
2227 	if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2228 		pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2229 		arcmsr_srb_complete(srb, 0);
2230 		return;
2231 	}
2232 	if(acb->acb_flags & ACB_F_BUS_RESET) {
2233 		kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2234 		pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2235 		arcmsr_srb_complete(srb, 0);
2236 		return;
2237 	}
2238 	if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
2239 		u_int8_t block_cmd, cmd;
2240 
2241 		cmd = pccb->csio.cdb_io.cdb_bytes[0];
2242 		block_cmd= cmd & 0x0f;
2243 		if(block_cmd==0x08 || block_cmd==0x0a) {
2244 			kprintf("arcmsr%d:block 'read/write' command "
2245 				"with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n"
2246 				, acb->pci_unit, cmd, target, lun);
2247 			pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2248 			arcmsr_srb_complete(srb, 0);
2249 			return;
2250 		}
2251 	}
2252 	if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2253 		if(nseg != 0) {
2254 			bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2255 		}
2256 		arcmsr_srb_complete(srb, 0);
2257 		return;
2258 	}
2259 	if(acb->srboutstandingcount > ARCMSR_MAX_OUTSTANDING_CMD) {
2260 		xpt_freeze_simq(acb->psim, 1);
2261 		pccb->ccb_h.status = CAM_REQUEUE_REQ;
2262 		acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2263 		arcmsr_srb_complete(srb, 0);
2264 		return;
2265 	}
2266 	pccb->ccb_h.status |= CAM_SIM_QUEUED;
2267 	arcmsr_build_srb(srb, dm_segs, nseg);
2268 	arcmsr_post_srb(acb, srb);
2269 	if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2270 	{
2271 		arcmsr_callout_init(&srb->ccb_callout);
2272 		callout_reset(&srb->ccb_callout, ((pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)) * hz) / 1000, arcmsr_srb_timeout, srb);
2273 		srb->srb_flags |= SRB_FLAG_TIMER_START;
2274 	}
2275 }
2276 /*
2277 *****************************************************************************************
2278 *****************************************************************************************
2279 */
2280 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb)
2281 {
2282 	struct CommandControlBlock *srb;
2283 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2284 	u_int32_t intmask_org;
2285 	int i=0;
2286 
2287 	acb->num_aborts++;
2288 	/*
2289 	***************************************************************************
2290 	** It is the upper layer do abort command this lock just prior to calling us.
2291 	** First determine if we currently own this command.
2292 	** Start by searching the device queue. If not found
2293 	** at all, and the system wanted us to just abort the
2294 	** command return success.
2295 	***************************************************************************
2296 	*/
2297 	if(acb->srboutstandingcount!=0) {
2298 		/* disable all outbound interrupt */
2299 		intmask_org=arcmsr_disable_allintr(acb);
2300 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
2301 			srb=acb->psrb_pool[i];
2302 			if(srb->srb_state==ARCMSR_SRB_START) {
2303 				if(srb->pccb==abortccb) {
2304 					srb->srb_state=ARCMSR_SRB_ABORTED;
2305 					kprintf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'"
2306 						"outstanding command \n"
2307 						, acb->pci_unit, abortccb->ccb_h.target_id
2308 						, abortccb->ccb_h.target_lun, srb);
2309 					arcmsr_polling_srbdone(acb, srb);
2310 					/* enable outbound Post Queue, outbound doorbell Interrupt */
2311 					arcmsr_enable_allintr(acb, intmask_org);
2312 					return (TRUE);
2313 				}
2314 			}
2315 		}
2316 		/* enable outbound Post Queue, outbound doorbell Interrupt */
2317 		arcmsr_enable_allintr(acb, intmask_org);
2318 	}
2319 	return(FALSE);
2320 }
2321 /*
2322 ****************************************************************************
2323 ****************************************************************************
2324 */
2325 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2326 {
2327 	int retry=0;
2328 
2329 	acb->num_resets++;
2330 	acb->acb_flags |=ACB_F_BUS_RESET;
2331 	while(acb->srboutstandingcount!=0 && retry < 400) {
2332 		arcmsr_interrupt(acb);
2333 		UDELAY(25000);
2334 		retry++;
2335 	}
2336 	arcmsr_iop_reset(acb);
2337 	acb->acb_flags &= ~ACB_F_BUS_RESET;
2338 }
2339 /*
2340 **************************************************************************
2341 **************************************************************************
2342 */
2343 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2344 		union ccb * pccb)
2345 {
2346 	pccb->ccb_h.status |= CAM_REQ_CMP;
2347 	switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2348 	case INQUIRY: {
2349 		unsigned char inqdata[36];
2350 		char *buffer=pccb->csio.data_ptr;
2351 
2352 		if (pccb->ccb_h.target_lun) {
2353 			pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2354 			xpt_done(pccb);
2355 			return;
2356 		}
2357 		inqdata[0] = T_PROCESSOR;	/* Periph Qualifier & Periph Dev Type */
2358 		inqdata[1] = 0;				/* rem media bit & Dev Type Modifier */
2359 		inqdata[2] = 0;				/* ISO, ECMA, & ANSI versions */
2360 		inqdata[3] = 0;
2361 		inqdata[4] = 31;			/* length of additional data */
2362 		inqdata[5] = 0;
2363 		inqdata[6] = 0;
2364 		inqdata[7] = 0;
2365 		strncpy(&inqdata[8], "Areca   ", 8);	/* Vendor Identification */
2366 		strncpy(&inqdata[16], "RAID controller ", 16);	/* Product Identification */
2367 		strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2368 		memcpy(buffer, inqdata, sizeof(inqdata));
2369 		xpt_done(pccb);
2370 	}
2371 	break;
2372 	case WRITE_BUFFER:
2373 	case READ_BUFFER: {
2374 		if (arcmsr_iop_message_xfer(acb, pccb)) {
2375 			pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2376 			pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2377 		}
2378 		xpt_done(pccb);
2379 	}
2380 	break;
2381 	default:
2382 		xpt_done(pccb);
2383 	}
2384 }
2385 /*
2386 *********************************************************************
2387 *********************************************************************
2388 */
2389 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb)
2390 {
2391 	struct AdapterControlBlock *  acb;
2392 
2393 	acb=(struct AdapterControlBlock *) cam_sim_softc(psim);
2394 	if(acb==NULL) {
2395 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2396 		xpt_done(pccb);
2397 		return;
2398 	}
2399 	switch (pccb->ccb_h.func_code) {
2400 	case XPT_SCSI_IO: {
2401 			struct CommandControlBlock *srb;
2402 			int target=pccb->ccb_h.target_id;
2403 
2404 			if(target == 16) {
2405 				/* virtual device for iop message transfer */
2406 				arcmsr_handle_virtual_command(acb, pccb);
2407 				return;
2408 			}
2409 			if((srb=arcmsr_get_freesrb(acb)) == NULL) {
2410 				pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
2411 				xpt_done(pccb);
2412 				return;
2413 			}
2414 			pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
2415 			pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
2416 			srb->pccb=pccb;
2417 			if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2418 				if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
2419 					/* Single buffer */
2420 					if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
2421 						/* Buffer is virtual */
2422 						u_int32_t error;
2423 
2424 						crit_enter();
2425 						error =	bus_dmamap_load(acb->dm_segs_dmat
2426 							, srb->dm_segs_dmamap
2427 							, pccb->csio.data_ptr
2428 							, pccb->csio.dxfer_len
2429 							, arcmsr_execute_srb, srb, /*flags*/0);
2430 						if(error == EINPROGRESS) {
2431 							xpt_freeze_simq(acb->psim, 1);
2432 							pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2433 						}
2434 						crit_exit();
2435 					}
2436 					else {		/* Buffer is physical */
2437 						struct bus_dma_segment seg;
2438 
2439 						seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
2440 						seg.ds_len = pccb->csio.dxfer_len;
2441 						arcmsr_execute_srb(srb, &seg, 1, 0);
2442 					}
2443 				} else {
2444 					/* Scatter/gather list */
2445 					struct bus_dma_segment *segs;
2446 
2447 					if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
2448 					|| (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2449 						pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
2450 						xpt_done(pccb);
2451 						kfree(srb, M_DEVBUF);
2452 						return;
2453 					}
2454 					segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
2455 					arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
2456 				}
2457 			} else {
2458 				arcmsr_execute_srb(srb, NULL, 0, 0);
2459 			}
2460 			break;
2461 		}
2462 	case XPT_TARGET_IO: {
2463 			/* target mode not yet support vendor specific commands. */
2464 			pccb->ccb_h.status |= CAM_REQ_CMP;
2465 			xpt_done(pccb);
2466 			break;
2467 		}
2468 	case XPT_PATH_INQ: {
2469 			struct ccb_pathinq *cpi= &pccb->cpi;
2470 
2471 			cpi->version_num=1;
2472 			cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE;
2473 			cpi->target_sprt=0;
2474 			cpi->hba_misc=0;
2475 			cpi->hba_eng_cnt=0;
2476 			cpi->max_target=ARCMSR_MAX_TARGETID;        /* 0-16 */
2477 			cpi->max_lun=ARCMSR_MAX_TARGETLUN;	    /* 0-7 */
2478 			cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */
2479 			cpi->bus_id=cam_sim_bus(psim);
2480 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2481 			strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
2482 			strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
2483 			cpi->unit_number=cam_sim_unit(psim);
2484 		#ifdef	CAM_NEW_TRAN_CODE
2485 			if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
2486 				cpi->base_transfer_speed = 600000;
2487 			else
2488 				cpi->base_transfer_speed = 300000;
2489 			if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2490 			   (acb->vendor_device_id == PCIDevVenIDARC1680))
2491 			{
2492 				cpi->transport = XPORT_SAS;
2493 				cpi->transport_version = 0;
2494 				cpi->protocol_version = SCSI_REV_SPC2;
2495 			}
2496 			else
2497 			{
2498 				cpi->transport = XPORT_SPI;
2499 				cpi->transport_version = 2;
2500 				cpi->protocol_version = SCSI_REV_2;
2501 			}
2502 			cpi->protocol = PROTO_SCSI;
2503 		#endif
2504 			cpi->ccb_h.status |= CAM_REQ_CMP;
2505 			xpt_done(pccb);
2506 			break;
2507 		}
2508 	case XPT_ABORT: {
2509 			union ccb *pabort_ccb;
2510 
2511 			pabort_ccb=pccb->cab.abort_ccb;
2512 			switch (pabort_ccb->ccb_h.func_code) {
2513 			case XPT_ACCEPT_TARGET_IO:
2514 			case XPT_IMMED_NOTIFY:
2515 			case XPT_CONT_TARGET_IO:
2516 				if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
2517 					pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
2518 					xpt_done(pabort_ccb);
2519 					pccb->ccb_h.status |= CAM_REQ_CMP;
2520 				} else {
2521 					xpt_print_path(pabort_ccb->ccb_h.path);
2522 					kprintf("Not found\n");
2523 					pccb->ccb_h.status |= CAM_PATH_INVALID;
2524 				}
2525 				break;
2526 			case XPT_SCSI_IO:
2527 				pccb->ccb_h.status |= CAM_UA_ABORT;
2528 				break;
2529 			default:
2530 				pccb->ccb_h.status |= CAM_REQ_INVALID;
2531 				break;
2532 			}
2533 			xpt_done(pccb);
2534 			break;
2535 		}
2536 	case XPT_RESET_BUS:
2537 	case XPT_RESET_DEV: {
2538 			u_int32_t     i;
2539 
2540 			arcmsr_bus_reset(acb);
2541 			for (i=0; i < 500; i++) {
2542 				DELAY(1000);
2543 			}
2544 			pccb->ccb_h.status |= CAM_REQ_CMP;
2545 			xpt_done(pccb);
2546 			break;
2547 		}
2548 	case XPT_TERM_IO: {
2549 			pccb->ccb_h.status |= CAM_REQ_INVALID;
2550 			xpt_done(pccb);
2551 			break;
2552 		}
2553 	case XPT_GET_TRAN_SETTINGS: {
2554 			struct ccb_trans_settings *cts;
2555 
2556 			if(pccb->ccb_h.target_id == 16) {
2557 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2558 				xpt_done(pccb);
2559 				break;
2560 			}
2561 			cts= &pccb->cts;
2562 		#ifdef	CAM_NEW_TRAN_CODE
2563 			{
2564 				struct ccb_trans_settings_scsi *scsi;
2565 				struct ccb_trans_settings_spi *spi;
2566 				struct ccb_trans_settings_sas *sas;
2567 
2568 				scsi = &cts->proto_specific.scsi;
2569 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2570 				scsi->valid = CTS_SCSI_VALID_TQ;
2571 				cts->protocol = PROTO_SCSI;
2572 
2573 				if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2574 				   (acb->vendor_device_id == PCIDevVenIDARC1680))
2575 				{
2576 					cts->protocol_version = SCSI_REV_SPC2;
2577 					cts->transport_version = 0;
2578 					cts->transport = XPORT_SAS;
2579 					sas = &cts->xport_specific.sas;
2580 					sas->valid = CTS_SAS_VALID_SPEED;
2581 					if(acb->vendor_device_id == PCIDevVenIDARC1880)
2582 						sas->bitrate = 600000;
2583 					else if(acb->vendor_device_id == PCIDevVenIDARC1680)
2584 						sas->bitrate = 300000;
2585 				}
2586 				else
2587 				{
2588 					cts->protocol_version = SCSI_REV_2;
2589 					cts->transport_version = 2;
2590 					cts->transport = XPORT_SPI;
2591 					spi = &cts->xport_specific.spi;
2592 					spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2593 					spi->sync_period=2;
2594 					spi->sync_offset=32;
2595 					spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2596 					spi->valid = CTS_SPI_VALID_DISC
2597 						| CTS_SPI_VALID_SYNC_RATE
2598 						| CTS_SPI_VALID_SYNC_OFFSET
2599 						| CTS_SPI_VALID_BUS_WIDTH;
2600 				}
2601 			}
2602 		#else
2603 			{
2604 				cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
2605 				cts->sync_period=2;
2606 				cts->sync_offset=32;
2607 				cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2608 				cts->valid=CCB_TRANS_SYNC_RATE_VALID |
2609 				CCB_TRANS_SYNC_OFFSET_VALID |
2610 				CCB_TRANS_BUS_WIDTH_VALID |
2611 				CCB_TRANS_DISC_VALID |
2612 				CCB_TRANS_TQ_VALID;
2613 			}
2614 		#endif
2615 			pccb->ccb_h.status |= CAM_REQ_CMP;
2616 			xpt_done(pccb);
2617 			break;
2618 		}
2619 	case XPT_SET_TRAN_SETTINGS: {
2620 			pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2621 			xpt_done(pccb);
2622 			break;
2623 		}
2624 	case XPT_CALC_GEOMETRY:
2625 			if(pccb->ccb_h.target_id == 16) {
2626 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2627 				xpt_done(pccb);
2628 				break;
2629 			}
2630 			cam_calc_geometry(&pccb->ccg, 1);
2631 			xpt_done(pccb);
2632 			break;
2633 	default:
2634 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2635 		xpt_done(pccb);
2636 		break;
2637 	}
2638 }
2639 /*
2640 **********************************************************************
2641 **********************************************************************
2642 */
2643 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2644 {
2645 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2646 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2647 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2648 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2649 	}
2650 }
2651 /*
2652 **********************************************************************
2653 **********************************************************************
2654 */
2655 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2656 {
2657 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2658 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,  ARCMSR_MESSAGE_START_BGRB);
2659 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2660 		kprintf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2661 	}
2662 }
2663 /*
2664 **********************************************************************
2665 **********************************************************************
2666 */
2667 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
2668 {
2669 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2670 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2671 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2672 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2673 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2674 	}
2675 }
2676 /*
2677 **********************************************************************
2678 **********************************************************************
2679 */
2680 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2681 {
2682 	switch (acb->adapter_type) {
2683 	case ACB_ADAPTER_TYPE_A:
2684 		arcmsr_start_hba_bgrb(acb);
2685 		break;
2686 	case ACB_ADAPTER_TYPE_B:
2687 		arcmsr_start_hbb_bgrb(acb);
2688 		break;
2689 	case ACB_ADAPTER_TYPE_C:
2690 		arcmsr_start_hbc_bgrb(acb);
2691 		break;
2692 	}
2693 }
2694 /*
2695 **********************************************************************
2696 **
2697 **********************************************************************
2698 */
2699 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2700 {
2701 	struct CommandControlBlock *srb;
2702 	u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
2703 	u_int16_t	error;
2704 
2705 polling_ccb_retry:
2706 	poll_count++;
2707 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
2708 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);	/*clear interrupt*/
2709 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2710 	while(1) {
2711 		if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
2712 			0, outbound_queueport))==0xFFFFFFFF) {
2713 			if(poll_srb_done) {
2714 				break;/*chip FIFO no ccb for completion already*/
2715 			} else {
2716 				UDELAY(25000);
2717 				if ((poll_count > 100) && (poll_srb != NULL)) {
2718 					break;
2719 				}
2720 				goto polling_ccb_retry;
2721 			}
2722 		}
2723 		/* check if command done with no error*/
2724 		srb=(struct CommandControlBlock *)
2725 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2726         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2727 		poll_srb_done = (srb==poll_srb) ? 1:0;
2728 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2729 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2730 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2731 					"poll command abort successfully \n"
2732 					, acb->pci_unit
2733 					, srb->pccb->ccb_h.target_id
2734 					, srb->pccb->ccb_h.target_lun, srb);
2735 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2736 				arcmsr_srb_complete(srb, 1);
2737 				continue;
2738 			}
2739 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2740 				"srboutstandingcount=%d \n"
2741 				, acb->pci_unit
2742 				, srb, acb->srboutstandingcount);
2743 			continue;
2744 		}
2745 		arcmsr_report_srb_state(acb, srb, error);
2746 	}	/*drain reply FIFO*/
2747 }
2748 /*
2749 **********************************************************************
2750 **
2751 **********************************************************************
2752 */
2753 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2754 {
2755 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
2756 	struct CommandControlBlock *srb;
2757 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2758 	int index;
2759 	u_int16_t	error;
2760 
2761 polling_ccb_retry:
2762 	poll_count++;
2763 	CHIP_REG_WRITE32(HBB_DOORBELL,
2764 	0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
2765 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2766 	while(1) {
2767 		index=phbbmu->doneq_index;
2768 		if((flag_srb=phbbmu->done_qbuffer[index]) == 0) {
2769 			if(poll_srb_done) {
2770 				break;/*chip FIFO no ccb for completion already*/
2771 			} else {
2772 				UDELAY(25000);
2773 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2774 					break;
2775 				}
2776 				goto polling_ccb_retry;
2777 			}
2778 		}
2779 		phbbmu->done_qbuffer[index]=0;
2780 		index++;
2781 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
2782 		phbbmu->doneq_index=index;
2783 		/* check if command done with no error*/
2784 		srb=(struct CommandControlBlock *)
2785 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2786         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2787 		poll_srb_done = (srb==poll_srb) ? 1:0;
2788 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2789 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2790 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2791 					"poll command abort successfully \n"
2792 					, acb->pci_unit
2793 					, srb->pccb->ccb_h.target_id
2794 					, srb->pccb->ccb_h.target_lun, srb);
2795 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2796 				arcmsr_srb_complete(srb, 1);
2797 				continue;
2798 			}
2799 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2800 				"srboutstandingcount=%d \n"
2801 				, acb->pci_unit
2802 				, srb, acb->srboutstandingcount);
2803 			continue;
2804 		}
2805 		arcmsr_report_srb_state(acb, srb, error);
2806 	}	/*drain reply FIFO*/
2807 }
2808 /*
2809 **********************************************************************
2810 **
2811 **********************************************************************
2812 */
2813 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2814 {
2815 	struct CommandControlBlock *srb;
2816 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2817 	u_int16_t	error;
2818 
2819 polling_ccb_retry:
2820 	poll_count++;
2821 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2822 	while(1) {
2823 		if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
2824 			if(poll_srb_done) {
2825 				break;/*chip FIFO no ccb for completion already*/
2826 			} else {
2827 				UDELAY(25000);
2828 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2829 					break;
2830 				}
2831 			    if (acb->srboutstandingcount == 0) {
2832 				    break;
2833 			    }
2834 				goto polling_ccb_retry;
2835 			}
2836 		}
2837 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
2838 		/* check if command done with no error*/
2839 		srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
2840         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
2841 		if (poll_srb != NULL)
2842 			poll_srb_done = (srb==poll_srb) ? 1:0;
2843 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2844 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2845 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n"
2846 						, acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb);
2847 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2848 				arcmsr_srb_complete(srb, 1);
2849 				continue;
2850 			}
2851 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
2852 					, acb->pci_unit, srb, acb->srboutstandingcount);
2853 			continue;
2854 		}
2855 		arcmsr_report_srb_state(acb, srb, error);
2856 	}	/*drain reply FIFO*/
2857 }
2858 /*
2859 **********************************************************************
2860 **********************************************************************
2861 */
2862 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2863 {
2864 	switch (acb->adapter_type) {
2865 	case ACB_ADAPTER_TYPE_A: {
2866 			arcmsr_polling_hba_srbdone(acb, poll_srb);
2867 		}
2868 		break;
2869 	case ACB_ADAPTER_TYPE_B: {
2870 			arcmsr_polling_hbb_srbdone(acb, poll_srb);
2871 		}
2872 		break;
2873 	case ACB_ADAPTER_TYPE_C: {
2874 			arcmsr_polling_hbc_srbdone(acb, poll_srb);
2875 		}
2876 		break;
2877 	}
2878 }
2879 /*
2880 **********************************************************************
2881 **********************************************************************
2882 */
2883 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2884 {
2885 	char *acb_firm_model=acb->firm_model;
2886 	char *acb_firm_version=acb->firm_version;
2887 	char *acb_device_map = acb->device_map;
2888 	size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2889 	size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2890 	size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2891 	int i;
2892 
2893 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2894 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2895 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2896 	}
2897 	i=0;
2898 	while(i<8) {
2899 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2900 		/* 8 bytes firm_model, 15, 60-67*/
2901 		acb_firm_model++;
2902 		i++;
2903 	}
2904 	i=0;
2905 	while(i<16) {
2906 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2907 		/* 16 bytes firm_version, 17, 68-83*/
2908 		acb_firm_version++;
2909 		i++;
2910 	}
2911 	i=0;
2912 	while(i<16) {
2913 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2914 		acb_device_map++;
2915 		i++;
2916 	}
2917 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2918 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2919 	acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2920 	acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2921 	acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2922 	acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2923 	acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2924 }
2925 /*
2926 **********************************************************************
2927 **********************************************************************
2928 */
2929 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2930 {
2931 	char *acb_firm_model=acb->firm_model;
2932 	char *acb_firm_version=acb->firm_version;
2933 	char *acb_device_map = acb->device_map;
2934 	size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2935 	size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2936 	size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2937 	int i;
2938 
2939 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2940 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2941 		kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2942 	}
2943 	i=0;
2944 	while(i<8) {
2945 		*acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
2946 		/* 8 bytes firm_model, 15, 60-67*/
2947 		acb_firm_model++;
2948 		i++;
2949 	}
2950 	i=0;
2951 	while(i<16) {
2952 		*acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
2953 		/* 16 bytes firm_version, 17, 68-83*/
2954 		acb_firm_version++;
2955 		i++;
2956 	}
2957 	i=0;
2958 	while(i<16) {
2959 		*acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
2960 		acb_device_map++;
2961 		i++;
2962 	}
2963 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2964 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2965 	acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2966 	acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2967 	acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2968 	acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2969 	acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2970 }
2971 /*
2972 **********************************************************************
2973 **********************************************************************
2974 */
2975 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
2976 {
2977 	char *acb_firm_model=acb->firm_model;
2978 	char *acb_firm_version=acb->firm_version;
2979 	char *acb_device_map = acb->device_map;
2980 	size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
2981 	size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
2982 	size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2983 	int i;
2984 
2985 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2986 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2987 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2988 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2989 	}
2990 	i=0;
2991 	while(i<8) {
2992 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2993 		/* 8 bytes firm_model, 15, 60-67*/
2994 		acb_firm_model++;
2995 		i++;
2996 	}
2997 	i=0;
2998 	while(i<16) {
2999 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3000 		/* 16 bytes firm_version, 17, 68-83*/
3001 		acb_firm_version++;
3002 		i++;
3003 	}
3004 	i=0;
3005 	while(i<16) {
3006 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3007 		acb_device_map++;
3008 		i++;
3009 	}
3010 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3011 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
3012 	acb->firm_request_len	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]);	/*firm_request_len,   1, 04-07*/
3013 	acb->firm_numbers_queue	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_numbers_queue, 2, 08-11*/
3014 	acb->firm_sdram_size	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_sdram_size,    3, 12-15*/
3015 	acb->firm_ide_channels	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_ide_channels,  4, 16-19*/
3016 	acb->firm_cfg_version	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
3017 }
3018 /*
3019 **********************************************************************
3020 **********************************************************************
3021 */
3022 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3023 {
3024 	switch (acb->adapter_type) {
3025 	case ACB_ADAPTER_TYPE_A: {
3026 			arcmsr_get_hba_config(acb);
3027 		}
3028 		break;
3029 	case ACB_ADAPTER_TYPE_B: {
3030 			arcmsr_get_hbb_config(acb);
3031 		}
3032 		break;
3033 	case ACB_ADAPTER_TYPE_C: {
3034 			arcmsr_get_hbc_config(acb);
3035 		}
3036 		break;
3037 	}
3038 }
3039 /*
3040 **********************************************************************
3041 **********************************************************************
3042 */
3043 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
3044 {
3045 	int	timeout=0;
3046 
3047 	switch (acb->adapter_type) {
3048 	case ACB_ADAPTER_TYPE_A: {
3049 			while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3050 			{
3051 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3052 				{
3053 					kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3054 					return;
3055 				}
3056 				UDELAY(15000); /* wait 15 milli-seconds */
3057 			}
3058 		}
3059 		break;
3060 	case ACB_ADAPTER_TYPE_B: {
3061 			while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3062 			{
3063 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3064 				{
3065 					kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3066 					return;
3067 				}
3068 				UDELAY(15000); /* wait 15 milli-seconds */
3069 			}
3070 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3071 		}
3072 		break;
3073 	case ACB_ADAPTER_TYPE_C: {
3074 			while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3075 			{
3076 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3077 				{
3078 					kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3079 					return;
3080 				}
3081 				UDELAY(15000); /* wait 15 milli-seconds */
3082 			}
3083 		}
3084 		break;
3085 	}
3086 }
3087 /*
3088 **********************************************************************
3089 **********************************************************************
3090 */
3091 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3092 {
3093 	u_int32_t outbound_doorbell;
3094 
3095 	switch (acb->adapter_type) {
3096 	case ACB_ADAPTER_TYPE_A: {
3097 			/* empty doorbell Qbuffer if door bell ringed */
3098 			outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3099 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
3100 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3101 
3102 		}
3103 		break;
3104 	case ACB_ADAPTER_TYPE_B: {
3105 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
3106 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
3107 			/* let IOP know data has been read */
3108 		}
3109 		break;
3110 	case ACB_ADAPTER_TYPE_C: {
3111 			/* empty doorbell Qbuffer if door bell ringed */
3112 			outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
3113 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell);	/*clear doorbell interrupt */
3114 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
3115 
3116 		}
3117 		break;
3118 	}
3119 }
3120 /*
3121 ************************************************************************
3122 ************************************************************************
3123 */
3124 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3125 {
3126 	unsigned long srb_phyaddr;
3127 	u_int32_t srb_phyaddr_hi32;
3128 
3129 	/*
3130 	********************************************************************
3131 	** here we need to tell iop 331 our freesrb.HighPart
3132 	** if freesrb.HighPart is not zero
3133 	********************************************************************
3134 	*/
3135 	srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr;
3136 //	srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16);
3137 	srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high;
3138 	switch (acb->adapter_type) {
3139 	case ACB_ADAPTER_TYPE_A: {
3140 			if(srb_phyaddr_hi32!=0) {
3141 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3142 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3143 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3144 				if(!arcmsr_hba_wait_msgint_ready(acb)) {
3145 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3146 					return FALSE;
3147 				}
3148 			}
3149 		}
3150 		break;
3151 		/*
3152 		***********************************************************************
3153 		**    if adapter type B, set window of "post command Q"
3154 		***********************************************************************
3155 		*/
3156 	case ACB_ADAPTER_TYPE_B: {
3157 			u_int32_t post_queue_phyaddr;
3158 			struct HBB_MessageUnit *phbbmu;
3159 
3160 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3161 			phbbmu->postq_index=0;
3162 			phbbmu->doneq_index=0;
3163 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
3164 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3165 				kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
3166 				return FALSE;
3167 			}
3168 			post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE
3169 			+ offsetof(struct HBB_MessageUnit, post_qbuffer);
3170 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3171 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
3172 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
3173 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
3174 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
3175 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
3176 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3177 				kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
3178 				return FALSE;
3179 			}
3180 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
3181 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3182 				kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
3183 				return FALSE;
3184 			}
3185 		}
3186 		break;
3187 	case ACB_ADAPTER_TYPE_C: {
3188 			if(srb_phyaddr_hi32!=0) {
3189 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3190 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3191 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3192 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3193 				if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3194 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3195 					return FALSE;
3196 				}
3197 			}
3198 		}
3199 		break;
3200 	}
3201 	return (TRUE);
3202 }
3203 /*
3204 ************************************************************************
3205 ************************************************************************
3206 */
3207 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3208 {
3209 	switch (acb->adapter_type)
3210 	{
3211 	case ACB_ADAPTER_TYPE_A:
3212 	case ACB_ADAPTER_TYPE_C:
3213 		break;
3214 	case ACB_ADAPTER_TYPE_B: {
3215 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
3216 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3217 				kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
3218 
3219 				return;
3220 			}
3221 		}
3222 		break;
3223 	}
3224 }
3225 /*
3226 **********************************************************************
3227 **********************************************************************
3228 */
3229 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3230 {
3231 	u_int32_t intmask_org;
3232 
3233 	/* disable all outbound interrupt */
3234 	intmask_org=arcmsr_disable_allintr(acb);
3235 	arcmsr_wait_firmware_ready(acb);
3236 	arcmsr_iop_confirm(acb);
3237 	arcmsr_get_firmware_spec(acb);
3238 	/*start background rebuild*/
3239 	arcmsr_start_adapter_bgrb(acb);
3240 	/* empty doorbell Qbuffer if door bell ringed */
3241 	arcmsr_clear_doorbell_queue_buffer(acb);
3242 	arcmsr_enable_eoi_mode(acb);
3243 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3244 	arcmsr_enable_allintr(acb, intmask_org);
3245 	acb->acb_flags |=ACB_F_IOP_INITED;
3246 }
3247 /*
3248 **********************************************************************
3249 **********************************************************************
3250 */
3251 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3252 {
3253 	struct AdapterControlBlock *acb=arg;
3254 	struct CommandControlBlock *srb_tmp;
3255 	u_int8_t * dma_memptr;
3256 	u_int32_t i;
3257 	unsigned long srb_phyaddr=(unsigned long)segs->ds_addr;
3258 
3259 	dma_memptr=acb->uncacheptr;
3260 	acb->srb_phyaddr.phyaddr=srb_phyaddr;
3261 	srb_tmp=(struct CommandControlBlock *)dma_memptr;
3262 	for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3263 		if(bus_dmamap_create(acb->dm_segs_dmat,
3264 			 /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) {
3265 			acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
3266 			kprintf("arcmsr%d:"
3267 			" srb dmamap bus_dmamap_create error\n", acb->pci_unit);
3268 			return;
3269 		}
3270 		srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5);
3271 		srb_tmp->acb=acb;
3272 		acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp;
3273 		srb_phyaddr=srb_phyaddr+SRB_SIZE;
3274 		srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp+SRB_SIZE);
3275 	}
3276 	acb->vir2phy_offset=(unsigned long)srb_tmp-srb_phyaddr;
3277 }
3278 /*
3279 ************************************************************************
3280 **
3281 **
3282 ************************************************************************
3283 */
3284 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
3285 {
3286 	/* remove the control device */
3287 	if(acb->ioctl_dev != NULL) {
3288 		destroy_dev(acb->ioctl_dev);
3289 	}
3290 	bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
3291 	bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
3292 	bus_dma_tag_destroy(acb->srb_dmat);
3293 	bus_dma_tag_destroy(acb->dm_segs_dmat);
3294 	bus_dma_tag_destroy(acb->parent_dmat);
3295 }
3296 /*
3297 ************************************************************************
3298 ************************************************************************
3299 */
3300 static u_int32_t arcmsr_initialize(device_t dev)
3301 {
3302 	struct AdapterControlBlock *acb=device_get_softc(dev);
3303 	u_int16_t pci_command;
3304 	int i, j,max_coherent_size;
3305 	u_int32_t vendor_dev_id;
3306 
3307 	vendor_dev_id = pci_get_devid(dev);
3308 	acb->vendor_device_id = vendor_dev_id;
3309 	switch (vendor_dev_id) {
3310 	case PCIDevVenIDARC1880:
3311 	case PCIDevVenIDARC1882:
3312 	case PCIDevVenIDARC1213:
3313 	case PCIDevVenIDARC1223: {
3314 			acb->adapter_type=ACB_ADAPTER_TYPE_C;
3315 			acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
3316 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3317 		}
3318 		break;
3319 	case PCIDevVenIDARC1200:
3320 	case PCIDevVenIDARC1201: {
3321 			acb->adapter_type=ACB_ADAPTER_TYPE_B;
3322 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3323 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit));
3324 		}
3325 		break;
3326 	case PCIDevVenIDARC1110:
3327 	case PCIDevVenIDARC1120:
3328 	case PCIDevVenIDARC1130:
3329 	case PCIDevVenIDARC1160:
3330 	case PCIDevVenIDARC1170:
3331 	case PCIDevVenIDARC1210:
3332 	case PCIDevVenIDARC1220:
3333 	case PCIDevVenIDARC1230:
3334 	case PCIDevVenIDARC1231:
3335 	case PCIDevVenIDARC1260:
3336 	case PCIDevVenIDARC1261:
3337 	case PCIDevVenIDARC1270:
3338 	case PCIDevVenIDARC1280:
3339 	case PCIDevVenIDARC1212:
3340 	case PCIDevVenIDARC1222:
3341 	case PCIDevVenIDARC1380:
3342 	case PCIDevVenIDARC1381:
3343 	case PCIDevVenIDARC1680:
3344 	case PCIDevVenIDARC1681: {
3345 			acb->adapter_type=ACB_ADAPTER_TYPE_A;
3346 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3347 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3348 		}
3349 		break;
3350 	default: {
3351 			kprintf("arcmsr%d:"
3352 			" unknown RAID adapter type \n", device_get_unit(dev));
3353 			return ENOMEM;
3354 		}
3355 	}
3356 	if(bus_dma_tag_create(  /*parent*/	NULL,
3357 				/*alignemnt*/	1,
3358 				/*boundary*/	0,
3359 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3360 				/*highaddr*/	BUS_SPACE_MAXADDR,
3361 				/*filter*/	NULL,
3362 				/*filterarg*/	NULL,
3363 				/*maxsize*/	BUS_SPACE_MAXSIZE_32BIT,
3364 				/*nsegments*/	BUS_SPACE_UNRESTRICTED,
3365 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3366 				/*flags*/	0,
3367 						&acb->parent_dmat) != 0)
3368 	{
3369 		kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3370 		return ENOMEM;
3371 	}
3372 	/* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
3373 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3374 				/*alignment*/	1,
3375 				/*boundary*/	0,
3376 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3377 				/*highaddr*/	BUS_SPACE_MAXADDR,
3378 				/*filter*/	NULL,
3379 				/*filterarg*/	NULL,
3380 				/*maxsize*/	ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
3381 				/*nsegments*/	ARCMSR_MAX_SG_ENTRIES,
3382 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3383 				/*flags*/	0,
3384 						&acb->dm_segs_dmat) != 0)
3385 	{
3386 		bus_dma_tag_destroy(acb->parent_dmat);
3387 		kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3388 		return ENOMEM;
3389 	}
3390 	/* DMA tag for our srb structures.... Allocate the freesrb memory */
3391 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3392 				/*alignment*/	0x20,
3393 				/*boundary*/	0,
3394 				/*lowaddr*/	BUS_SPACE_MAXADDR_32BIT,
3395 				/*highaddr*/	BUS_SPACE_MAXADDR,
3396 				/*filter*/	NULL,
3397 				/*filterarg*/	NULL,
3398 				/*maxsize*/	max_coherent_size,
3399 				/*nsegments*/	1,
3400 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3401 				/*flags*/	0,
3402 						&acb->srb_dmat) != 0)
3403 	{
3404 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3405 		bus_dma_tag_destroy(acb->parent_dmat);
3406 		kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3407 		return ENXIO;
3408 	}
3409 	/* Allocation for our srbs */
3410 	if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
3411 		bus_dma_tag_destroy(acb->srb_dmat);
3412 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3413 		bus_dma_tag_destroy(acb->parent_dmat);
3414 		kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
3415 		return ENXIO;
3416 	}
3417 	/* And permanently map them */
3418 	if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
3419 		bus_dma_tag_destroy(acb->srb_dmat);
3420 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3421 		bus_dma_tag_destroy(acb->parent_dmat);
3422 		kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
3423 		return ENXIO;
3424 	}
3425 	pci_command=pci_read_config(dev, PCIR_COMMAND, 2);
3426 	pci_command |= PCIM_CMD_BUSMASTEREN;
3427 	pci_command |= PCIM_CMD_PERRESPEN;
3428 	pci_command |= PCIM_CMD_MWRICEN;
3429 	/* Enable Busmaster/Mem */
3430 	pci_command |= PCIM_CMD_MEMEN;
3431 	pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
3432 	switch(acb->adapter_type) {
3433 	case ACB_ADAPTER_TYPE_A: {
3434 			u_int32_t rid0=PCIR_BAR(0);
3435 			vm_offset_t	mem_base0;
3436 
3437 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
3438 			if(acb->sys_res_arcmsr[0] == NULL) {
3439 				arcmsr_free_resource(acb);
3440 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3441 				return ENOMEM;
3442 			}
3443 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3444 				arcmsr_free_resource(acb);
3445 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3446 				return ENXIO;
3447 			}
3448 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3449 			if(mem_base0==0) {
3450 				arcmsr_free_resource(acb);
3451 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3452 				return ENXIO;
3453 			}
3454 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3455 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3456 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3457 		}
3458 		break;
3459 	case ACB_ADAPTER_TYPE_B: {
3460 			struct HBB_MessageUnit *phbbmu;
3461 			struct CommandControlBlock *freesrb;
3462 			u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
3463 			vm_offset_t	mem_base[]={0,0};
3464 			for(i=0; i<2; i++) {
3465 				if(i==0) {
3466 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
3467 											0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE);
3468 				} else {
3469 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
3470 											0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
3471 				}
3472 				if(acb->sys_res_arcmsr[i] == NULL) {
3473 					arcmsr_free_resource(acb);
3474 					kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
3475 					return ENOMEM;
3476 				}
3477 				if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
3478 					arcmsr_free_resource(acb);
3479 					kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
3480 					return ENXIO;
3481 				}
3482 				mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
3483 				if(mem_base[i]==0) {
3484 					arcmsr_free_resource(acb);
3485 					kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
3486 					return ENXIO;
3487 				}
3488 				acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]);
3489 				acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]);
3490 			}
3491 			freesrb=(struct CommandControlBlock *)acb->uncacheptr;
3492 //			acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM];
3493 			acb->pmu=(struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE);
3494 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3495 			phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0];
3496 			phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1];
3497 		}
3498 		break;
3499 	case ACB_ADAPTER_TYPE_C: {
3500 			u_int32_t rid0=PCIR_BAR(1);
3501 			vm_offset_t	mem_base0;
3502 
3503 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
3504 			if(acb->sys_res_arcmsr[0] == NULL) {
3505 				arcmsr_free_resource(acb);
3506 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3507 				return ENOMEM;
3508 			}
3509 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3510 				arcmsr_free_resource(acb);
3511 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3512 				return ENXIO;
3513 			}
3514 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3515 			if(mem_base0==0) {
3516 				arcmsr_free_resource(acb);
3517 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3518 				return ENXIO;
3519 			}
3520 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3521 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3522 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3523 		}
3524 		break;
3525 	}
3526 	if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
3527 		arcmsr_free_resource(acb);
3528 		kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
3529 		return ENXIO;
3530 	}
3531 	acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
3532 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
3533 	/*
3534 	********************************************************************
3535 	** init raid volume state
3536 	********************************************************************
3537 	*/
3538 	for(i=0;i<ARCMSR_MAX_TARGETID;i++) {
3539 		for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) {
3540 			acb->devstate[i][j]=ARECA_RAID_GONE;
3541 		}
3542 	}
3543 	arcmsr_iop_init(acb);
3544 	return(0);
3545 }
3546 /*
3547 ************************************************************************
3548 ************************************************************************
3549 */
3550 static int arcmsr_attach(device_t dev)
3551 {
3552 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3553 	u_int32_t unit=device_get_unit(dev);
3554 	struct ccb_setasync csa;
3555 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
3556 	struct resource	*irqres;
3557 	int	rid;
3558 	u_int irq_flags;
3559 
3560 	if(acb == NULL) {
3561 		kprintf("arcmsr%d: cannot allocate softc\n", unit);
3562 		return (ENOMEM);
3563 	}
3564 	ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock");
3565 	if(arcmsr_initialize(dev)) {
3566 		kprintf("arcmsr%d: initialize failure!\n", unit);
3567 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3568 		return ENXIO;
3569 	}
3570 	/* After setting up the adapter, map our interrupt */
3571 	rid=0;
3572 	acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &rid,
3573 	    &irq_flags);
3574 	irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1,
3575 	    irq_flags);
3576 	if(irqres == NULL ||
3577 		bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih, NULL)) {
3578 		arcmsr_free_resource(acb);
3579 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3580 		kprintf("arcmsr%d: unable to register interrupt handler!\n", unit);
3581 		return ENXIO;
3582 	}
3583 	acb->irqres=irqres;
3584 	acb->pci_dev=dev;
3585 	acb->pci_unit=unit;
3586 	/*
3587 	 * Now let the CAM generic SCSI layer find the SCSI devices on
3588 	 * the bus *  start queue to reset to the idle loop. *
3589 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
3590 	 * max_sim_transactions
3591 	*/
3592 	devq=cam_simq_alloc(ARCMSR_MAX_START_JOB);
3593 	if(devq == NULL) {
3594 	    arcmsr_free_resource(acb);
3595 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3596 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3597 			pci_release_msi(dev);
3598 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3599 		kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
3600 		return ENXIO;
3601 	}
3602 	acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
3603 	cam_simq_release(devq);
3604 	if(acb->psim == NULL) {
3605 		arcmsr_free_resource(acb);
3606 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3607 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3608 			pci_release_msi(dev);
3609 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3610 		kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
3611 		return ENXIO;
3612 	}
3613 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3614 	if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
3615 		arcmsr_free_resource(acb);
3616 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3617 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3618 			pci_release_msi(dev);
3619 		cam_sim_free(acb->psim);
3620 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3621 		kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
3622 		return ENXIO;
3623 	}
3624 	if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3625 		arcmsr_free_resource(acb);
3626 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3627 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3628 			pci_release_msi(dev);
3629 		xpt_bus_deregister(cam_sim_path(acb->psim));
3630 		cam_sim_free(acb->psim);
3631 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3632 		kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
3633 		return ENXIO;
3634 	}
3635 	/*
3636 	****************************************************
3637 	*/
3638 	xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
3639 	csa.ccb_h.func_code=XPT_SASYNC_CB;
3640 	csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE;
3641 	csa.callback=arcmsr_async;
3642 	csa.callback_arg=acb->psim;
3643 	xpt_action((union ccb *)&csa);
3644 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3645 	/* Create the control device.  */
3646 	acb->ioctl_dev=make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
3647 
3648 	acb->ioctl_dev->si_drv1=acb;
3649 	(void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
3650 	arcmsr_callout_init(&acb->devmap_callout);
3651 	callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
3652 	return (0);
3653 }
3654 
3655 /*
3656 ************************************************************************
3657 ************************************************************************
3658 */
3659 static int arcmsr_probe(device_t dev)
3660 {
3661 	u_int32_t id;
3662 	static char buf[256];
3663 	char x_type[]={"X-TYPE"};
3664 	char *type;
3665 	int raid6 = 1;
3666 
3667 	if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
3668 		return (ENXIO);
3669 	}
3670 	switch(id=pci_get_devid(dev)) {
3671 	case PCIDevVenIDARC1110:
3672 	case PCIDevVenIDARC1200:
3673 	case PCIDevVenIDARC1201:
3674 	case PCIDevVenIDARC1210:
3675 		raid6 = 0;
3676 		/*FALLTHRU*/
3677 	case PCIDevVenIDARC1120:
3678 	case PCIDevVenIDARC1130:
3679 	case PCIDevVenIDARC1160:
3680 	case PCIDevVenIDARC1170:
3681 	case PCIDevVenIDARC1220:
3682 	case PCIDevVenIDARC1230:
3683 	case PCIDevVenIDARC1231:
3684 	case PCIDevVenIDARC1260:
3685 	case PCIDevVenIDARC1261:
3686 	case PCIDevVenIDARC1270:
3687 	case PCIDevVenIDARC1280:
3688 		type = "SATA";
3689 		break;
3690 	case PCIDevVenIDARC1212:
3691 	case PCIDevVenIDARC1222:
3692 	case PCIDevVenIDARC1380:
3693 	case PCIDevVenIDARC1381:
3694 	case PCIDevVenIDARC1680:
3695 	case PCIDevVenIDARC1681:
3696 		type = "SAS 3G";
3697 		break;
3698 	case PCIDevVenIDARC1880:
3699 	case PCIDevVenIDARC1882:
3700 	case PCIDevVenIDARC1213:
3701 	case PCIDevVenIDARC1223:
3702 		type = "SAS 6G";
3703 		arcmsr_msi_enable = 0;
3704 		break;
3705 	default:
3706 		type = x_type;
3707 		break;
3708 	}
3709 	if(type == x_type)
3710 		return(ENXIO);
3711 	ksprintf(buf, "Areca %s Host Adapter RAID Controller%s", type, raid6 ? " (RAID6 capable)" : "");
3712 	device_set_desc_copy(dev, buf);
3713 	return (BUS_PROBE_DEFAULT);
3714 }
3715 /*
3716 ************************************************************************
3717 ************************************************************************
3718 */
3719 static int arcmsr_shutdown(device_t dev)
3720 {
3721 	u_int32_t  i;
3722 	struct CommandControlBlock *srb;
3723 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3724 
3725 	/* stop adapter background rebuild */
3726 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3727 	/* disable all outbound interrupt */
3728 	arcmsr_disable_allintr(acb);
3729 	arcmsr_stop_adapter_bgrb(acb);
3730 	arcmsr_flush_adapter_cache(acb);
3731 	/* abort all outstanding command */
3732 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3733 	acb->acb_flags &= ~ACB_F_IOP_INITED;
3734 	if(acb->srboutstandingcount!=0) {
3735 		/*clear and abort all outbound posted Q*/
3736 		arcmsr_done4abort_postqueue(acb);
3737 		/* talk to iop 331 outstanding command aborted*/
3738 		arcmsr_abort_allcmd(acb);
3739 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3740 			srb=acb->psrb_pool[i];
3741 			if(srb->srb_state==ARCMSR_SRB_START) {
3742 				srb->srb_state=ARCMSR_SRB_ABORTED;
3743 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3744 				arcmsr_srb_complete(srb, 1);
3745 			}
3746 		}
3747 	}
3748 	acb->srboutstandingcount=0;
3749 	acb->workingsrb_doneindex=0;
3750 	acb->workingsrb_startindex=0;
3751 #ifdef ARCMSR_DEBUG1
3752 	acb->pktRequestCount = 0;
3753 	acb->pktReturnCount = 0;
3754 #endif
3755 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3756 	return (0);
3757 }
3758 /*
3759 ************************************************************************
3760 ************************************************************************
3761 */
3762 static int arcmsr_detach(device_t dev)
3763 {
3764 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3765 	int i;
3766 
3767 	callout_stop(&acb->devmap_callout);
3768 	bus_teardown_intr(dev, acb->irqres, acb->ih);
3769 	arcmsr_shutdown(dev);
3770 	arcmsr_free_resource(acb);
3771 	for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
3772 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
3773 	}
3774 	bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3775 	if (acb->irq_type == PCI_INTR_TYPE_MSI)
3776 		pci_release_msi(dev);
3777 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3778 	xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
3779 	xpt_free_path(acb->ppath);
3780 	xpt_bus_deregister(cam_sim_path(acb->psim));
3781 	cam_sim_free(acb->psim);
3782 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3783 	ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3784 	return (0);
3785 }
3786 
3787 #ifdef ARCMSR_DEBUG1
3788 static void arcmsr_dump_data(struct AdapterControlBlock *acb)
3789 {
3790 	if((acb->pktRequestCount - acb->pktReturnCount) == 0)
3791 		return;
3792 	printf("Command Request Count   =0x%x\n",acb->pktRequestCount);
3793 	printf("Command Return Count    =0x%x\n",acb->pktReturnCount);
3794 	printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount));
3795 	printf("Queued Command Count    =0x%x\n",acb->srboutstandingcount);
3796 }
3797 #endif
3798