xref: /dragonfly/sys/dev/raid/arcmsr/arcmsr.c (revision 38c2ea22)
1 /*
2 *****************************************************************************************
3 **        O.S   : FreeBSD
4 **   FILE NAME  : arcmsr.c
5 **        BY    : Erich Chen, Ching Huang
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter
8 **                ARCMSR RAID Host adapter
9 **                [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set]
10 ******************************************************************************************
11 ************************************************************************
12 **
13 ** Copyright (c) 2004-2010 ARECA Co. Ltd.
14 **        Erich Chen, Taipei Taiwan All rights reserved.
15 **
16 ** Redistribution and use in source and binary forms, with or without
17 ** modification, are permitted provided that the following conditions
18 ** are met:
19 ** 1. Redistributions of source code must retain the above copyright
20 **    notice, this list of conditions and the following disclaimer.
21 ** 2. Redistributions in binary form must reproduce the above copyright
22 **    notice, this list of conditions and the following disclaimer in the
23 **    documentation and/or other materials provided with the distribution.
24 ** 3. The name of the author may not be used to endorse or promote products
25 **    derived from this software without specific prior written permission.
26 **
27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 **************************************************************************
38 ** History
39 **
40 **        REV#         DATE	            NAME	         DESCRIPTION
41 **     1.00.00.00	03/31/2004		Erich Chen			 First release
42 **     1.20.00.02	11/29/2004		Erich Chen			 bug fix with arcmsr_bus_reset when PHY error
43 **     1.20.00.03	04/19/2005		Erich Chen			 add SATA 24 Ports adapter type support
44 **                                                       clean unused function
45 **     1.20.00.12	09/12/2005		Erich Chen        	 bug fix with abort command handling,
46 **                                                       firmware version check
47 **                                                       and firmware update notify for hardware bug fix
48 **                                                       handling if none zero high part physical address
49 **                                                       of srb resource
50 **     1.20.00.13	08/18/2006		Erich Chen			 remove pending srb and report busy
51 **                                                       add iop message xfer
52 **                                                       with scsi pass-through command
53 **                                                       add new device id of sas raid adapters
54 **                                                       code fit for SPARC64 & PPC
55 **     1.20.00.14	02/05/2007		Erich Chen			 bug fix for incorrect ccb_h.status report
56 **                                                       and cause g_vfs_done() read write error
57 **     1.20.00.15	10/10/2007		Erich Chen			 support new RAID adapter type ARC120x
58 **     1.20.00.16	10/10/2009		Erich Chen			 Bug fix for RAID adapter type ARC120x
59 **                                                       bus_dmamem_alloc() with BUS_DMA_ZERO
60 **     1.20.00.17   07/15/2010         Ching Huang       Added support ARC1880
61 **							 report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
62 **							 prevent cam_periph_error removing all LUN devices of one Target id
63 **							 for any one LUN device failed
64 **     1.20.00.18	10/14/2010		Ching Huang			 Fixed "inquiry data fails comparion at DV1 step"
65 **               	10/25/2010		Ching Huang			 Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
66 **     1.20.00.19	11/11/2010		Ching Huang			 Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
67 **     1.20.00.20	12/08/2010		Ching Huang			 Avoid calling atomic_set_int function
68 **     1.20.00.21	02/08/2011		Ching Huang			 Implement I/O request timeout
69 **               	02/14/2011		Ching Huang			 Modified pktRequestCount
70 **     1.20.00.21	03/03/2011		Ching Huang			 if a command timeout, then wait its ccb back before free it
71 **     1.20.00.22	07/04/2011		Ching Huang			 Fixed multiple MTX panic
72 ******************************************************************************************
73 * $FreeBSD: src/sys/dev/arcmsr/arcmsr.c,v 1.38 2011/08/16 08:41:37 delphij Exp $
74 */
75 #if 0
76 #define ARCMSR_DEBUG1			1
77 #endif
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/malloc.h>
81 #include <sys/kernel.h>
82 #include <sys/bus.h>
83 #include <sys/queue.h>
84 #include <sys/stat.h>
85 #include <sys/devicestat.h>
86 #include <sys/kthread.h>
87 #include <sys/module.h>
88 #include <sys/proc.h>
89 #include <sys/lock.h>
90 #include <sys/sysctl.h>
91 #include <sys/thread2.h>
92 #include <sys/poll.h>
93 #include <sys/device.h>
94 #include <vm/vm.h>
95 #include <vm/vm_param.h>
96 #include <vm/pmap.h>
97 
98 #include <machine/atomic.h>
99 #include <sys/conf.h>
100 #include <sys/rman.h>
101 
102 #include <bus/cam/cam.h>
103 #include <bus/cam/cam_ccb.h>
104 #include <bus/cam/cam_sim.h>
105 #include <bus/cam/cam_periph.h>
106 #include <bus/cam/cam_xpt_periph.h>
107 #include <bus/cam/cam_xpt_sim.h>
108 #include <bus/cam/cam_debug.h>
109 #include <bus/cam/scsi/scsi_all.h>
110 #include <bus/cam/scsi/scsi_message.h>
111 /*
112 **************************************************************************
113 **************************************************************************
114 */
115 #include <sys/endian.h>
116 #include <bus/pci/pcivar.h>
117 #include <bus/pci/pcireg.h>
118 #define ARCMSR_LOCK_INIT(l, s)	lockinit(l, s, 0, LK_CANRECURSE)
119 #define ARCMSR_LOCK_DESTROY(l)	lockuninit(l)
120 #define ARCMSR_LOCK_ACQUIRE(l)	lockmgr(l, LK_EXCLUSIVE)
121 #define ARCMSR_LOCK_RELEASE(l)	lockmgr(l, LK_RELEASE)
122 #define ARCMSR_LOCK_TRY(l)	lockmgr(&l, LK_EXCLUSIVE|LK_NOWAIT);
123 #define arcmsr_htole32(x)	htole32(x)
124 typedef struct lock		arcmsr_lock_t;
125 
126 #if !defined(CAM_NEW_TRAN_CODE)
127 #define	CAM_NEW_TRAN_CODE	1
128 #endif
129 
130 #define arcmsr_callout_init(a)	callout_init_mp(a);
131 
132 #define ARCMSR_DRIVER_VERSION			"Driver Version 1.20.00.22 2011-07-04"
133 #include <dev/raid/arcmsr/arcmsr.h>
134 #define	SRB_SIZE						((sizeof(struct CommandControlBlock)+0x1f) & 0xffe0)
135 #define ARCMSR_SRBS_POOL_SIZE           (SRB_SIZE * ARCMSR_MAX_FREESRB_NUM)
136 /*
137 **************************************************************************
138 **************************************************************************
139 */
140 #define CHIP_REG_READ32(s, b, r)	bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r))
141 #define CHIP_REG_WRITE32(s, b, r, d)	bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d)
142 /*
143 **************************************************************************
144 **************************************************************************
145 */
146 static void arcmsr_free_srb(struct CommandControlBlock *srb);
147 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb);
148 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb);
149 static int arcmsr_probe(device_t dev);
150 static int arcmsr_attach(device_t dev);
151 static int arcmsr_detach(device_t dev);
152 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
153 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
154 static int arcmsr_shutdown(device_t dev);
155 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
156 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
157 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
158 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
159 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
160 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
161 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
162 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
163 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb);
164 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
165 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
166 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
167 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
168 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg);
169 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb);
170 static int arcmsr_resume(device_t dev);
171 static int arcmsr_suspend(device_t dev);
172 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
173 static void	arcmsr_polling_devmap(void* arg);
174 static void	arcmsr_srb_timeout(void* arg);
175 #ifdef ARCMSR_DEBUG1
176 static void arcmsr_dump_data(struct AdapterControlBlock *acb);
177 #endif
178 /*
179 **************************************************************************
180 **************************************************************************
181 */
182 static void UDELAY(u_int32_t us) { DELAY(us); }
183 /*
184 **************************************************************************
185 **************************************************************************
186 */
187 static bus_dmamap_callback_t arcmsr_map_free_srb;
188 static bus_dmamap_callback_t arcmsr_execute_srb;
189 /*
190 **************************************************************************
191 **************************************************************************
192 */
193 static d_open_t	arcmsr_open;
194 static d_close_t arcmsr_close;
195 static d_ioctl_t arcmsr_ioctl;
196 
197 static device_method_t arcmsr_methods[]={
198 	DEVMETHOD(device_probe,		arcmsr_probe),
199 	DEVMETHOD(device_attach,	arcmsr_attach),
200 	DEVMETHOD(device_detach,	arcmsr_detach),
201 	DEVMETHOD(device_shutdown,	arcmsr_shutdown),
202 	DEVMETHOD(device_suspend,	arcmsr_suspend),
203 	DEVMETHOD(device_resume,	arcmsr_resume),
204 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
205 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
206 	{ 0, 0 }
207 };
208 
209 static driver_t arcmsr_driver={
210 	"arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
211 };
212 
213 static devclass_t arcmsr_devclass;
214 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL);
215 MODULE_VERSION(arcmsr, 1);
216 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
217 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
218 #ifndef BUS_DMA_COHERENT
219 	#define	BUS_DMA_COHERENT	0x04	/* hint: map memory in a coherent way */
220 #endif
221 
222 static struct dev_ops arcmsr_ops = {
223 	{ "arcmsr", 0, 0 },
224 	.d_open =	arcmsr_open,		        /* open     */
225 	.d_close =	arcmsr_close,		        /* close    */
226 	.d_ioctl =	arcmsr_ioctl,		        /* ioctl    */
227 };
228 
229 static int	arcmsr_msi_enable = 1;
230 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable);
231 
232 
233 /*
234 **************************************************************************
235 **************************************************************************
236 */
237 
238 static int
239 arcmsr_open(struct dev_open_args *ap)
240 {
241 	cdev_t dev = ap->a_head.a_dev;
242 	struct AdapterControlBlock *acb=dev->si_drv1;
243 
244 	if(acb==NULL) {
245 		return ENXIO;
246 	}
247 	return 0;
248 }
249 
250 /*
251 **************************************************************************
252 **************************************************************************
253 */
254 
255 static int
256 arcmsr_close(struct dev_close_args *ap)
257 {
258 	cdev_t dev = ap->a_head.a_dev;
259 	struct AdapterControlBlock *acb=dev->si_drv1;
260 
261 	if(acb==NULL) {
262 		return ENXIO;
263 	}
264 	return 0;
265 }
266 
267 /*
268 **************************************************************************
269 **************************************************************************
270 */
271 
272 static int
273 arcmsr_ioctl(struct dev_ioctl_args *ap)
274 {
275 	cdev_t dev = ap->a_head.a_dev;
276 	u_long ioctl_cmd = ap->a_cmd;
277 	caddr_t arg = ap->a_data;
278 	struct AdapterControlBlock *acb=dev->si_drv1;
279 
280 	if(acb==NULL) {
281 		return ENXIO;
282 	}
283 	return(arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
284 }
285 
286 /*
287 **********************************************************************
288 **********************************************************************
289 */
290 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
291 {
292 	u_int32_t intmask_org=0;
293 
294 	switch (acb->adapter_type) {
295 	case ACB_ADAPTER_TYPE_A: {
296 			/* disable all outbound interrupt */
297 			intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
298 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
299 		}
300 		break;
301 	case ACB_ADAPTER_TYPE_B: {
302 			/* disable all outbound interrupt */
303 			intmask_org=CHIP_REG_READ32(HBB_DOORBELL,
304 			0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
305 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
306 		}
307 		break;
308 	case ACB_ADAPTER_TYPE_C: {
309 			/* disable all outbound interrupt */
310 			intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask)	; /* disable outbound message0 int */
311 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
312 		}
313 		break;
314 	}
315 	return(intmask_org);
316 }
317 /*
318 **********************************************************************
319 **********************************************************************
320 */
321 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
322 {
323 	u_int32_t mask;
324 
325 	switch (acb->adapter_type) {
326 	case ACB_ADAPTER_TYPE_A: {
327 			/* enable outbound Post Queue, outbound doorbell Interrupt */
328 			mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
329 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
330 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
331 		}
332 		break;
333 	case ACB_ADAPTER_TYPE_B: {
334 			/* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
335 			mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
336 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
337 			acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
338 		}
339 		break;
340 	case ACB_ADAPTER_TYPE_C: {
341 			/* enable outbound Post Queue, outbound doorbell Interrupt */
342 			mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
343 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
344 			acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f;
345 		}
346 		break;
347 	}
348 	return;
349 }
350 /*
351 **********************************************************************
352 **********************************************************************
353 */
354 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
355 {
356 	u_int32_t Index;
357 	u_int8_t Retries=0x00;
358 
359 	do {
360 		for(Index=0; Index < 100; Index++) {
361 			if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
362 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
363 				return TRUE;
364 			}
365 			UDELAY(10000);
366 		}/*max 1 seconds*/
367 	}while(Retries++ < 20);/*max 20 sec*/
368 	return FALSE;
369 }
370 /*
371 **********************************************************************
372 **********************************************************************
373 */
374 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
375 {
376 	u_int32_t Index;
377 	u_int8_t Retries=0x00;
378 
379 	do {
380 		for(Index=0; Index < 100; Index++) {
381 			if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
382 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
383 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
384 				return TRUE;
385 			}
386 			UDELAY(10000);
387 		}/*max 1 seconds*/
388 	}while(Retries++ < 20);/*max 20 sec*/
389 	return FALSE;
390 }
391 /*
392 **********************************************************************
393 **********************************************************************
394 */
395 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
396 {
397 	u_int32_t Index;
398 	u_int8_t Retries=0x00;
399 
400 	do {
401 		for(Index=0; Index < 100; Index++) {
402 			if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
403 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
404 				return TRUE;
405 			}
406 			UDELAY(10000);
407 		}/*max 1 seconds*/
408 	}while(Retries++ < 20);/*max 20 sec*/
409 	return FALSE;
410 }
411 /*
412 ************************************************************************
413 ************************************************************************
414 */
415 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
416 {
417 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
418 
419 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
420 	do {
421 		if(arcmsr_hba_wait_msgint_ready(acb)) {
422 			break;
423 		} else {
424 			retry_count--;
425 		}
426 	}while(retry_count!=0);
427 	return;
428 }
429 /*
430 ************************************************************************
431 ************************************************************************
432 */
433 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
434 {
435 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
436 
437 	CHIP_REG_WRITE32(HBB_DOORBELL,
438 	0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
439 	do {
440 		if(arcmsr_hbb_wait_msgint_ready(acb)) {
441 			break;
442 		} else {
443 			retry_count--;
444 		}
445 	}while(retry_count!=0);
446 	return;
447 }
448 /*
449 ************************************************************************
450 ************************************************************************
451 */
452 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
453 {
454 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
455 
456 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
457 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
458 	do {
459 		if(arcmsr_hbc_wait_msgint_ready(acb)) {
460 			break;
461 		} else {
462 			retry_count--;
463 		}
464 	}while(retry_count!=0);
465 	return;
466 }
467 /*
468 ************************************************************************
469 ************************************************************************
470 */
471 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
472 {
473 	switch (acb->adapter_type) {
474 	case ACB_ADAPTER_TYPE_A: {
475 			arcmsr_flush_hba_cache(acb);
476 		}
477 		break;
478 	case ACB_ADAPTER_TYPE_B: {
479 			arcmsr_flush_hbb_cache(acb);
480 		}
481 		break;
482 	case ACB_ADAPTER_TYPE_C: {
483 			arcmsr_flush_hbc_cache(acb);
484 		}
485 		break;
486 	}
487 	return;
488 }
489 /*
490 *******************************************************************************
491 *******************************************************************************
492 */
493 static int arcmsr_suspend(device_t dev)
494 {
495 	struct AdapterControlBlock	*acb = device_get_softc(dev);
496 
497 	/* flush controller */
498 	arcmsr_iop_parking(acb);
499 	/* disable all outbound interrupt */
500 	arcmsr_disable_allintr(acb);
501 	return(0);
502 }
503 /*
504 *******************************************************************************
505 *******************************************************************************
506 */
507 static int arcmsr_resume(device_t dev)
508 {
509 	struct AdapterControlBlock	*acb = device_get_softc(dev);
510 
511 	arcmsr_iop_init(acb);
512 	return(0);
513 }
514 /*
515 *********************************************************************************
516 *********************************************************************************
517 */
518 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
519 {
520 	struct AdapterControlBlock *acb;
521 	u_int8_t target_id, target_lun;
522 	struct cam_sim * sim;
523 
524 	sim=(struct cam_sim *) cb_arg;
525 	acb =(struct AdapterControlBlock *) cam_sim_softc(sim);
526 	switch (code) {
527 	case AC_LOST_DEVICE:
528 		target_id=xpt_path_target_id(path);
529 		target_lun=xpt_path_lun_id(path);
530 		if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) {
531 			break;
532 		}
533 		kprintf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun);
534 		break;
535 	default:
536 		break;
537 	}
538 }
539 /*
540 **********************************************************************
541 **********************************************************************
542 */
543 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
544 {
545 	struct AdapterControlBlock *acb=srb->acb;
546 	union ccb * pccb=srb->pccb;
547 
548 	if(srb->srb_flags & SRB_FLAG_TIMER_START)
549 		callout_stop(&srb->ccb_callout);
550 	if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
551 		bus_dmasync_op_t op;
552 
553 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
554 			op = BUS_DMASYNC_POSTREAD;
555 		} else {
556 			op = BUS_DMASYNC_POSTWRITE;
557 		}
558 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
559 		bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
560 	}
561 	if(stand_flag==1) {
562 		atomic_subtract_int(&acb->srboutstandingcount, 1);
563 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
564 		acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) {
565 			acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
566 			pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
567 		}
568 	}
569 	if(srb->srb_state != ARCMSR_SRB_TIMEOUT)
570 		arcmsr_free_srb(srb);
571 #ifdef ARCMSR_DEBUG1
572 	acb->pktReturnCount++;
573 #endif
574 	xpt_done(pccb);
575 	return;
576 }
577 /*
578 **********************************************************************
579 **********************************************************************
580 */
581 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
582 {
583 	union ccb * pccb=srb->pccb;
584 
585 	pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
586 	pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
587 	if(&pccb->csio.sense_data) {
588 		memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
589 		memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
590 		get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
591 		((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
592 		pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
593 	}
594 	return;
595 }
596 /*
597 *********************************************************************
598 *********************************************************************
599 */
600 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
601 {
602 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
603 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
604 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
605 	}
606 	return;
607 }
608 /*
609 *********************************************************************
610 *********************************************************************
611 */
612 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
613 {
614 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
615 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
616 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
617 	}
618 	return;
619 }
620 /*
621 *********************************************************************
622 *********************************************************************
623 */
624 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
625 {
626 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
627 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
628 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
629 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
630 	}
631 	return;
632 }
633 /*
634 *********************************************************************
635 *********************************************************************
636 */
637 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
638 {
639 	switch (acb->adapter_type) {
640 	case ACB_ADAPTER_TYPE_A: {
641 			arcmsr_abort_hba_allcmd(acb);
642 		}
643 		break;
644 	case ACB_ADAPTER_TYPE_B: {
645 			arcmsr_abort_hbb_allcmd(acb);
646 		}
647 		break;
648 	case ACB_ADAPTER_TYPE_C: {
649 			arcmsr_abort_hbc_allcmd(acb);
650 		}
651 		break;
652 	}
653 	return;
654 }
655 /*
656 **************************************************************************
657 **************************************************************************
658 */
659 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
660 {
661 	int target, lun;
662 
663 	target=srb->pccb->ccb_h.target_id;
664 	lun=srb->pccb->ccb_h.target_lun;
665 	if(error == FALSE) {
666 		if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
667 			acb->devstate[target][lun]=ARECA_RAID_GOOD;
668 		}
669 		srb->pccb->ccb_h.status |= CAM_REQ_CMP;
670 		arcmsr_srb_complete(srb, 1);
671 	} else {
672 		switch(srb->arcmsr_cdb.DeviceStatus) {
673 		case ARCMSR_DEV_SELECT_TIMEOUT: {
674 				if(acb->devstate[target][lun]==ARECA_RAID_GOOD) {
675 					kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
676 				}
677 				acb->devstate[target][lun]=ARECA_RAID_GONE;
678 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
679 				arcmsr_srb_complete(srb, 1);
680 			}
681 			break;
682 		case ARCMSR_DEV_ABORTED:
683 		case ARCMSR_DEV_INIT_FAIL: {
684 				acb->devstate[target][lun]=ARECA_RAID_GONE;
685 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
686 				arcmsr_srb_complete(srb, 1);
687 			}
688 			break;
689 		case SCSISTAT_CHECK_CONDITION: {
690 				acb->devstate[target][lun]=ARECA_RAID_GOOD;
691 				arcmsr_report_sense_info(srb);
692 				arcmsr_srb_complete(srb, 1);
693 			}
694 			break;
695 		default:
696 			kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknow DeviceStatus=0x%x \n"
697 					, acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
698 			acb->devstate[target][lun]=ARECA_RAID_GONE;
699 			srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
700 			/*unknow error or crc error just for retry*/
701 			arcmsr_srb_complete(srb, 1);
702 			break;
703 		}
704 	}
705 	return;
706 }
707 /*
708 **************************************************************************
709 **************************************************************************
710 */
711 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
712 {
713 	struct CommandControlBlock *srb;
714 
715 	/* check if command done with no error*/
716 	switch (acb->adapter_type) {
717 	case ACB_ADAPTER_TYPE_C:
718 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
719 		break;
720 	case ACB_ADAPTER_TYPE_A:
721 	case ACB_ADAPTER_TYPE_B:
722 	default:
723 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
724 		break;
725 	}
726 	if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
727 		if(srb->srb_state == ARCMSR_SRB_TIMEOUT) {
728 			arcmsr_free_srb(srb);
729 			kprintf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb);
730 			return;
731 		}
732 		kprintf("arcmsr%d: return srb has been completed\n"
733 			"srb='%p' srb_state=0x%x outstanding srb count=%d \n",
734 			acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount);
735 		return;
736 	}
737 	arcmsr_report_srb_state(acb, srb, error);
738 	return;
739 }
740 /*
741 **************************************************************************
742 **************************************************************************
743 */
744 static void	arcmsr_srb_timeout(void* arg)
745 {
746 	struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
747 	struct AdapterControlBlock *acb;
748 	int target, lun;
749 	u_int8_t cmd;
750 
751 	target=srb->pccb->ccb_h.target_id;
752 	lun=srb->pccb->ccb_h.target_lun;
753 	acb = srb->acb;
754 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
755 	if(srb->srb_state == ARCMSR_SRB_START)
756 	{
757 		cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
758 		srb->srb_state = ARCMSR_SRB_TIMEOUT;
759 		srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
760 		arcmsr_srb_complete(srb, 1);
761 		kprintf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n",
762 				 acb->pci_unit, target, lun, cmd, srb);
763 	}
764 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
765 #ifdef ARCMSR_DEBUG1
766 	arcmsr_dump_data(acb);
767 #endif
768 }
769 
770 /*
771 **********************************************************************
772 **********************************************************************
773 */
774 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
775 {
776 	int i=0;
777 	u_int32_t flag_srb;
778 	u_int16_t error;
779 
780 	switch (acb->adapter_type) {
781 	case ACB_ADAPTER_TYPE_A: {
782 			u_int32_t outbound_intstatus;
783 
784 			/*clear and abort all outbound posted Q*/
785 			outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
786 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
787 			while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
788                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
789 				arcmsr_drain_donequeue(acb, flag_srb, error);
790 			}
791 		}
792 		break;
793 	case ACB_ADAPTER_TYPE_B: {
794 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
795 
796 			/*clear all outbound posted Q*/
797 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
798 			for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
799 				if((flag_srb=phbbmu->done_qbuffer[i])!=0) {
800 					phbbmu->done_qbuffer[i]=0;
801 			error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
802 					arcmsr_drain_donequeue(acb, flag_srb, error);
803 				}
804 				phbbmu->post_qbuffer[i]=0;
805 			}/*drain reply FIFO*/
806 			phbbmu->doneq_index=0;
807 			phbbmu->postq_index=0;
808 		}
809 		break;
810 	case ACB_ADAPTER_TYPE_C: {
811 
812 			while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
813 				flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
814                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
815 				arcmsr_drain_donequeue(acb, flag_srb, error);
816 			}
817 		}
818 		break;
819 	}
820 	return;
821 }
822 /*
823 ****************************************************************************
824 ****************************************************************************
825 */
826 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
827 {
828 	struct CommandControlBlock *srb;
829 	u_int32_t intmask_org;
830 	u_int32_t i=0;
831 
832 	if(acb->srboutstandingcount>0) {
833 		/* disable all outbound interrupt */
834 		intmask_org=arcmsr_disable_allintr(acb);
835 		/*clear and abort all outbound posted Q*/
836 		arcmsr_done4abort_postqueue(acb);
837 		/* talk to iop 331 outstanding command aborted*/
838 		arcmsr_abort_allcmd(acb);
839 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
840 			srb=acb->psrb_pool[i];
841 			if(srb->srb_state==ARCMSR_SRB_START) {
842 				srb->srb_state=ARCMSR_SRB_ABORTED;
843 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
844 				arcmsr_srb_complete(srb, 1);
845 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p' aborted\n"
846 						, acb->pci_unit, srb->pccb->ccb_h.target_id
847 						, srb->pccb->ccb_h.target_lun, srb);
848 			}
849 		}
850 		/* enable all outbound interrupt */
851 		arcmsr_enable_allintr(acb, intmask_org);
852 	}
853 	acb->srboutstandingcount=0;
854 	acb->workingsrb_doneindex=0;
855 	acb->workingsrb_startindex=0;
856 #ifdef ARCMSR_DEBUG1
857 	acb->pktRequestCount = 0;
858 	acb->pktReturnCount = 0;
859 #endif
860 	return;
861 }
862 /*
863 **********************************************************************
864 **********************************************************************
865 */
866 static void arcmsr_build_srb(struct CommandControlBlock *srb,
867 		bus_dma_segment_t *dm_segs, u_int32_t nseg)
868 {
869 	struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb;
870 	u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u;
871 	u_int32_t address_lo, address_hi;
872 	union ccb * pccb=srb->pccb;
873 	struct ccb_scsiio * pcsio= &pccb->csio;
874 	u_int32_t arccdbsize=0x30;
875 
876 	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
877 	arcmsr_cdb->Bus=0;
878 	arcmsr_cdb->TargetID=pccb->ccb_h.target_id;
879 	arcmsr_cdb->LUN=pccb->ccb_h.target_lun;
880 	arcmsr_cdb->Function=1;
881 	arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len;
882 	arcmsr_cdb->Context=0;
883 	bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
884 	if(nseg != 0) {
885 		struct AdapterControlBlock *acb=srb->acb;
886 		bus_dmasync_op_t op;
887 		u_int32_t length, i, cdb_sgcount=0;
888 
889 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
890 			op=BUS_DMASYNC_PREREAD;
891 		} else {
892 			op=BUS_DMASYNC_PREWRITE;
893 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE;
894 			srb->srb_flags|=SRB_FLAG_WRITE;
895 		}
896 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
897 		for(i=0;i<nseg;i++) {
898 			/* Get the physical address of the current data pointer */
899 			length=arcmsr_htole32(dm_segs[i].ds_len);
900 			address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
901 			address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
902 			if(address_hi==0) {
903 				struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge;
904 				pdma_sg->address=address_lo;
905 				pdma_sg->length=length;
906 				psge += sizeof(struct SG32ENTRY);
907 				arccdbsize += sizeof(struct SG32ENTRY);
908 			} else {
909 				u_int32_t sg64s_size=0, tmplength=length;
910 
911 				while(1) {
912 					u_int64_t span4G, length0;
913 					struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge;
914 
915 					span4G=(u_int64_t)address_lo + tmplength;
916 					pdma_sg->addresshigh=address_hi;
917 					pdma_sg->address=address_lo;
918 					if(span4G > 0x100000000) {
919 						/*see if cross 4G boundary*/
920 						length0=0x100000000-address_lo;
921 						pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR;
922 						address_hi=address_hi+1;
923 						address_lo=0;
924 						tmplength=tmplength-(u_int32_t)length0;
925 						sg64s_size += sizeof(struct SG64ENTRY);
926 						psge += sizeof(struct SG64ENTRY);
927 						cdb_sgcount++;
928 					} else {
929 						pdma_sg->length=tmplength|IS_SG64_ADDR;
930 						sg64s_size += sizeof(struct SG64ENTRY);
931 						psge += sizeof(struct SG64ENTRY);
932 						break;
933 					}
934 				}
935 				arccdbsize += sg64s_size;
936 			}
937 			cdb_sgcount++;
938 		}
939 		arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount;
940 		arcmsr_cdb->DataLength=pcsio->dxfer_len;
941 		if( arccdbsize > 256) {
942 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE;
943 		}
944 	} else {
945 		arcmsr_cdb->DataLength = 0;
946 	}
947     srb->arc_cdb_size=arccdbsize;
948 	return;
949 }
950 /*
951 **************************************************************************
952 **************************************************************************
953 */
954 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
955 {
956 	u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr;
957 	struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb;
958 
959 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
960 	atomic_add_int(&acb->srboutstandingcount, 1);
961 	srb->srb_state=ARCMSR_SRB_START;
962 
963 	switch (acb->adapter_type) {
964 	case ACB_ADAPTER_TYPE_A: {
965 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
966 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
967 			} else {
968 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr);
969 			}
970 		}
971 		break;
972 	case ACB_ADAPTER_TYPE_B: {
973 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
974 			int ending_index, index;
975 
976 			index=phbbmu->postq_index;
977 			ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
978 			phbbmu->post_qbuffer[ending_index]=0;
979 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
980 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
981 			} else {
982 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr;
983 			}
984 			index++;
985 			index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
986 			phbbmu->postq_index=index;
987 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
988 		}
989 		break;
990     case ACB_ADAPTER_TYPE_C:
991         {
992             u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
993 
994             arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size;
995             ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1);
996 			cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
997             if(cdb_phyaddr_hi32)
998             {
999 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
1000 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
1001             }
1002             else
1003             {
1004 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
1005             }
1006         }
1007         break;
1008 	}
1009 	return;
1010 }
1011 /*
1012 ************************************************************************
1013 ************************************************************************
1014 */
1015 static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
1016 {
1017 	struct QBUFFER *qbuffer=NULL;
1018 
1019 	switch (acb->adapter_type) {
1020 	case ACB_ADAPTER_TYPE_A: {
1021 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
1022 
1023 			qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer;
1024 		}
1025 		break;
1026 	case ACB_ADAPTER_TYPE_B: {
1027 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1028 
1029 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
1030 		}
1031 		break;
1032 	case ACB_ADAPTER_TYPE_C: {
1033 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1034 
1035 			qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer;
1036 		}
1037 		break;
1038 	}
1039 	return(qbuffer);
1040 }
1041 /*
1042 ************************************************************************
1043 ************************************************************************
1044 */
1045 static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
1046 {
1047 	struct QBUFFER *qbuffer=NULL;
1048 
1049 	switch (acb->adapter_type) {
1050 	case ACB_ADAPTER_TYPE_A: {
1051 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
1052 
1053 			qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer;
1054 		}
1055 		break;
1056 	case ACB_ADAPTER_TYPE_B: {
1057 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1058 
1059 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
1060 		}
1061 		break;
1062 	case ACB_ADAPTER_TYPE_C: {
1063 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1064 
1065 			qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer;
1066 		}
1067 		break;
1068 	}
1069 	return(qbuffer);
1070 }
1071 /*
1072 **************************************************************************
1073 **************************************************************************
1074 */
1075 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1076 {
1077 	switch (acb->adapter_type) {
1078 	case ACB_ADAPTER_TYPE_A: {
1079 			/* let IOP know data has been read */
1080 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1081 		}
1082 		break;
1083 	case ACB_ADAPTER_TYPE_B: {
1084 			/* let IOP know data has been read */
1085 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1086 		}
1087 		break;
1088 	case ACB_ADAPTER_TYPE_C: {
1089 			/* let IOP know data has been read */
1090 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1091 		}
1092 	}
1093 	return;
1094 }
1095 /*
1096 **************************************************************************
1097 **************************************************************************
1098 */
1099 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1100 {
1101 	switch (acb->adapter_type) {
1102 	case ACB_ADAPTER_TYPE_A: {
1103 			/*
1104 			** push inbound doorbell tell iop, driver data write ok
1105 			** and wait reply on next hwinterrupt for next Qbuffer post
1106 			*/
1107 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1108 		}
1109 		break;
1110 	case ACB_ADAPTER_TYPE_B: {
1111 			/*
1112 			** push inbound doorbell tell iop, driver data write ok
1113 			** and wait reply on next hwinterrupt for next Qbuffer post
1114 			*/
1115 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1116 		}
1117 		break;
1118 	case ACB_ADAPTER_TYPE_C: {
1119 			/*
1120 			** push inbound doorbell tell iop, driver data write ok
1121 			** and wait reply on next hwinterrupt for next Qbuffer post
1122 			*/
1123 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1124 		}
1125 		break;
1126 	}
1127 }
1128 /*
1129 **********************************************************************
1130 **********************************************************************
1131 */
1132 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1133 {
1134 	u_int8_t *pQbuffer;
1135 	struct QBUFFER *pwbuffer;
1136 	u_int8_t * iop_data;
1137 	int32_t allxfer_len=0;
1138 
1139 	pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1140 	iop_data=(u_int8_t *)pwbuffer->data;
1141 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1142 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1143 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1144 			&& (allxfer_len<124)) {
1145 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1146 			memcpy(iop_data, pQbuffer, 1);
1147 			acb->wqbuf_firstindex++;
1148 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1149 			iop_data++;
1150 			allxfer_len++;
1151 		}
1152 		pwbuffer->data_len=allxfer_len;
1153 		/*
1154 		** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post
1155 		*/
1156 		arcmsr_iop_message_wrote(acb);
1157 	}
1158 	return;
1159 }
1160 /*
1161 ************************************************************************
1162 ************************************************************************
1163 */
1164 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1165 {
1166 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1167 	CHIP_REG_WRITE32(HBA_MessageUnit,
1168 	0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1169 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
1170 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1171 			, acb->pci_unit);
1172 	}
1173 	return;
1174 }
1175 /*
1176 ************************************************************************
1177 ************************************************************************
1178 */
1179 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1180 {
1181 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1182 	CHIP_REG_WRITE32(HBB_DOORBELL,
1183 	0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1184 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1185 		kprintf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1186 			, acb->pci_unit);
1187 	}
1188 	return;
1189 }
1190 /*
1191 ************************************************************************
1192 ************************************************************************
1193 */
1194 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1195 {
1196 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1197 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1198 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1199 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1200 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1201 	}
1202 	return;
1203 }
1204 /*
1205 ************************************************************************
1206 ************************************************************************
1207 */
1208 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1209 {
1210 	switch (acb->adapter_type) {
1211 	case ACB_ADAPTER_TYPE_A: {
1212 			arcmsr_stop_hba_bgrb(acb);
1213 		}
1214 		break;
1215 	case ACB_ADAPTER_TYPE_B: {
1216 			arcmsr_stop_hbb_bgrb(acb);
1217 		}
1218 		break;
1219 	case ACB_ADAPTER_TYPE_C: {
1220 			arcmsr_stop_hbc_bgrb(acb);
1221 		}
1222 		break;
1223 	}
1224 	return;
1225 }
1226 /*
1227 ************************************************************************
1228 ************************************************************************
1229 */
1230 static void arcmsr_poll(struct cam_sim * psim)
1231 {
1232 	struct AdapterControlBlock *acb;
1233 	int	mutex;
1234 
1235 	acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1236 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
1237 	if( mutex == 0 )
1238 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1239 	arcmsr_interrupt(acb);
1240 	if( mutex == 0 )
1241 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1242 	return;
1243 }
1244 /*
1245 **************************************************************************
1246 **************************************************************************
1247 */
1248 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1249 {
1250 	struct QBUFFER *prbuffer;
1251 	u_int8_t *pQbuffer;
1252 	u_int8_t *iop_data;
1253 	int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1254 
1255 	/*check this iop data if overflow my rqbuffer*/
1256 	rqbuf_lastindex=acb->rqbuf_lastindex;
1257 	rqbuf_firstindex=acb->rqbuf_firstindex;
1258 	prbuffer=arcmsr_get_iop_rqbuffer(acb);
1259 	iop_data=(u_int8_t *)prbuffer->data;
1260 	iop_len=prbuffer->data_len;
1261 	my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1262 	if(my_empty_len>=iop_len) {
1263 		while(iop_len > 0) {
1264 			pQbuffer=&acb->rqbuffer[rqbuf_lastindex];
1265 			memcpy(pQbuffer, iop_data, 1);
1266 			rqbuf_lastindex++;
1267 			rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */
1268 			iop_data++;
1269 			iop_len--;
1270 		}
1271 		acb->rqbuf_lastindex=rqbuf_lastindex;
1272 		arcmsr_iop_message_read(acb);
1273 		/*signature, let IOP know data has been read */
1274 	} else {
1275 		acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW;
1276 	}
1277 	return;
1278 }
1279 /*
1280 **************************************************************************
1281 **************************************************************************
1282 */
1283 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1284 {
1285 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1286 	/*
1287 	*****************************************************************
1288 	**   check if there are any mail packages from user space program
1289 	**   in my post bag, now is the time to send them into Areca's firmware
1290 	*****************************************************************
1291 	*/
1292 	if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) {
1293 		u_int8_t *pQbuffer;
1294 		struct QBUFFER *pwbuffer;
1295 		u_int8_t *iop_data;
1296 		int allxfer_len=0;
1297 
1298 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1299 		pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1300 		iop_data=(u_int8_t *)pwbuffer->data;
1301 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1302 			&& (allxfer_len<124)) {
1303 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1304 			memcpy(iop_data, pQbuffer, 1);
1305 			acb->wqbuf_firstindex++;
1306 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1307 			iop_data++;
1308 			allxfer_len++;
1309 		}
1310 		pwbuffer->data_len=allxfer_len;
1311 		/*
1312 		** push inbound doorbell tell iop driver data write ok
1313 		** and wait reply on next hwinterrupt for next Qbuffer post
1314 		*/
1315 		arcmsr_iop_message_wrote(acb);
1316 	}
1317 	if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) {
1318 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1319 	}
1320 	return;
1321 }
1322 
1323 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1324 {
1325 /*
1326 	if (ccb->ccb_h.status != CAM_REQ_CMP)
1327 		kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status);
1328 	else
1329 		kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1330 */
1331 	xpt_free_path(ccb->ccb_h.path);
1332 }
1333 
1334 static void	arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1335 {
1336 	struct cam_path     *path;
1337 	union ccb            ccb;
1338 
1339 	if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1340 		return;
1341 /*	kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
1342 	bzero(&ccb, sizeof(union ccb));
1343 	xpt_setup_ccb(&ccb.ccb_h, path, 5);
1344 	ccb.ccb_h.func_code = XPT_SCAN_LUN;
1345 	ccb.ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1346 	ccb.crcn.flags = CAM_FLAG_NONE;
1347 	xpt_action(&ccb);
1348 	return;
1349 }
1350 
1351 
1352 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1353 {
1354 	struct CommandControlBlock *srb;
1355 	u_int32_t intmask_org;
1356 	int i;
1357 
1358 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1359 	/* disable all outbound interrupts */
1360 	intmask_org = arcmsr_disable_allintr(acb);
1361 	for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1362 	{
1363 		srb = acb->psrb_pool[i];
1364 		if (srb->srb_state == ARCMSR_SRB_START)
1365 		{
1366 		if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1367             {
1368 			srb->srb_state = ARCMSR_SRB_ABORTED;
1369 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1370 			arcmsr_srb_complete(srb, 1);
1371 				kprintf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
1372 		}
1373 		}
1374 	}
1375 	/* enable outbound Post Queue, outbound doorbell Interrupt */
1376 	arcmsr_enable_allintr(acb, intmask_org);
1377 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1378 }
1379 
1380 
1381 /*
1382 **************************************************************************
1383 **************************************************************************
1384 */
1385 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1386 	u_int32_t	devicemap;
1387 	u_int32_t	target, lun;
1388     u_int32_t	deviceMapCurrent[4]={0};
1389     u_int8_t	*pDevMap;
1390 
1391 	switch (acb->adapter_type) {
1392 	case ACB_ADAPTER_TYPE_A:
1393 			devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1394 			for (target= 0; target < 4; target++)
1395 			{
1396 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1397 		devicemap += 4;
1398 			}
1399 			break;
1400 
1401 	case ACB_ADAPTER_TYPE_B:
1402 			devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1403 			for (target= 0; target < 4; target++)
1404 			{
1405 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1],  devicemap);
1406 		devicemap += 4;
1407 			}
1408 			break;
1409 
1410 	case ACB_ADAPTER_TYPE_C:
1411 			devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1412 			for (target= 0; target < 4; target++)
1413 			{
1414 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1415 		devicemap += 4;
1416 			}
1417 			break;
1418 	}
1419 		if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1420 		{
1421 			acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1422 		}
1423 		/*
1424 		** adapter posted CONFIG message
1425 		** copy the new map, note if there are differences with the current map
1426 		*/
1427 		pDevMap = (u_int8_t	*)&deviceMapCurrent[0];
1428 		for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1429 		{
1430 			if (*pDevMap != acb->device_map[target])
1431 			{
1432                 u_int8_t difference, bit_check;
1433 
1434                 difference= *pDevMap ^ acb->device_map[target];
1435                 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1436                 {
1437                     bit_check=(1 << lun);						/*check bit from 0....31*/
1438                     if(difference & bit_check)
1439                     {
1440                         if(acb->device_map[target] & bit_check)
1441                         {/* unit departed */
1442 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1443 							arcmsr_abort_dr_ccbs(acb, target, lun);
1444 				arcmsr_rescan_lun(acb, target, lun);
1445 						acb->devstate[target][lun] = ARECA_RAID_GONE;
1446                         }
1447                         else
1448                         {/* unit arrived */
1449 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
1450 				arcmsr_rescan_lun(acb, target, lun);
1451 						acb->devstate[target][lun] = ARECA_RAID_GOOD;
1452                         }
1453                     }
1454                 }
1455 /*				kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1456 				acb->device_map[target]= *pDevMap;
1457 			}
1458 			pDevMap++;
1459 		}
1460 }
1461 /*
1462 **************************************************************************
1463 **************************************************************************
1464 */
1465 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1466 	u_int32_t outbound_message;
1467 
1468 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1469 	outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1470 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1471 		arcmsr_dr_handle( acb );
1472 }
1473 /*
1474 **************************************************************************
1475 **************************************************************************
1476 */
1477 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1478 	u_int32_t outbound_message;
1479 
1480 	/* clear interrupts */
1481 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1482 	outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1483 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1484 		arcmsr_dr_handle( acb );
1485 }
1486 /*
1487 **************************************************************************
1488 **************************************************************************
1489 */
1490 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1491 	u_int32_t outbound_message;
1492 
1493 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1494 	outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1495 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1496 		arcmsr_dr_handle( acb );
1497 }
1498 /*
1499 **************************************************************************
1500 **************************************************************************
1501 */
1502 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1503 {
1504 	u_int32_t outbound_doorbell;
1505 
1506 	/*
1507 	*******************************************************************
1508 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1509 	**  DOORBELL: din! don!
1510 	**  check if there are any mail need to pack from firmware
1511 	*******************************************************************
1512 	*/
1513 	outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit,
1514 	0, outbound_doorbell);
1515 	CHIP_REG_WRITE32(HBA_MessageUnit,
1516 	0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */
1517 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1518 		arcmsr_iop2drv_data_wrote_handle(acb);
1519 	}
1520 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1521 		arcmsr_iop2drv_data_read_handle(acb);
1522 	}
1523 	return;
1524 }
1525 /*
1526 **************************************************************************
1527 **************************************************************************
1528 */
1529 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1530 {
1531 	u_int32_t outbound_doorbell;
1532 
1533 	/*
1534 	*******************************************************************
1535 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1536 	**  DOORBELL: din! don!
1537 	**  check if there are any mail need to pack from firmware
1538 	*******************************************************************
1539 	*/
1540 	outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1541 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */
1542 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1543 		arcmsr_iop2drv_data_wrote_handle(acb);
1544 	}
1545 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1546 		arcmsr_iop2drv_data_read_handle(acb);
1547 	}
1548 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1549 		arcmsr_hbc_message_isr(acb);    /* messenger of "driver to iop commands" */
1550 	}
1551 	return;
1552 }
1553 /*
1554 **************************************************************************
1555 **************************************************************************
1556 */
1557 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1558 {
1559 	u_int32_t flag_srb;
1560 	u_int16_t error;
1561 
1562 	/*
1563 	*****************************************************************************
1564 	**               areca cdb command done
1565 	*****************************************************************************
1566 	*/
1567 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1568 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1569 	while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
1570 		0, outbound_queueport)) != 0xFFFFFFFF) {
1571 		/* check if command done with no error*/
1572         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1573 		arcmsr_drain_donequeue(acb, flag_srb, error);
1574 	}	/*drain reply FIFO*/
1575 	return;
1576 }
1577 /*
1578 **************************************************************************
1579 **************************************************************************
1580 */
1581 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1582 {
1583 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1584 	u_int32_t flag_srb;
1585 	int index;
1586 	u_int16_t error;
1587 
1588 	/*
1589 	*****************************************************************************
1590 	**               areca cdb command done
1591 	*****************************************************************************
1592 	*/
1593 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1594 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1595 	index=phbbmu->doneq_index;
1596 	while((flag_srb=phbbmu->done_qbuffer[index]) != 0) {
1597 		phbbmu->done_qbuffer[index]=0;
1598 		index++;
1599 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
1600 		phbbmu->doneq_index=index;
1601 		/* check if command done with no error*/
1602         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1603 		arcmsr_drain_donequeue(acb, flag_srb, error);
1604 	}	/*drain reply FIFO*/
1605 	return;
1606 }
1607 /*
1608 **************************************************************************
1609 **************************************************************************
1610 */
1611 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1612 {
1613 	u_int32_t flag_srb,throttling=0;
1614 	u_int16_t error;
1615 
1616 	/*
1617 	*****************************************************************************
1618 	**               areca cdb command done
1619 	*****************************************************************************
1620 	*/
1621 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1622 
1623 	while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1624 
1625 		flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
1626 		/* check if command done with no error*/
1627         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
1628 		arcmsr_drain_donequeue(acb, flag_srb, error);
1629         if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1630             CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
1631             break;
1632         }
1633         throttling++;
1634 	}	/*drain reply FIFO*/
1635 	return;
1636 }
1637 /*
1638 **********************************************************************
1639 **********************************************************************
1640 */
1641 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
1642 {
1643 	u_int32_t outbound_intstatus;
1644 	/*
1645 	*********************************************
1646 	**   check outbound intstatus
1647 	*********************************************
1648 	*/
1649 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
1650 	if(!outbound_intstatus) {
1651 		/*it must be share irq*/
1652 		return;
1653 	}
1654 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
1655 	/* MU doorbell interrupts*/
1656 	if(outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1657 		arcmsr_hba_doorbell_isr(acb);
1658 	}
1659 	/* MU post queue interrupts*/
1660 	if(outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1661 		arcmsr_hba_postqueue_isr(acb);
1662 	}
1663 	if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1664 		arcmsr_hba_message_isr(acb);
1665 	}
1666 	return;
1667 }
1668 /*
1669 **********************************************************************
1670 **********************************************************************
1671 */
1672 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
1673 {
1674 	u_int32_t outbound_doorbell;
1675 	/*
1676 	*********************************************
1677 	**   check outbound intstatus
1678 	*********************************************
1679 	*/
1680 	outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
1681 	if(!outbound_doorbell) {
1682 		/*it must be share irq*/
1683 		return;
1684 	}
1685 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
1686 	CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
1687 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1688 	/* MU ioctl transfer doorbell interrupts*/
1689 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1690 		arcmsr_iop2drv_data_wrote_handle(acb);
1691 	}
1692 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1693 		arcmsr_iop2drv_data_read_handle(acb);
1694 	}
1695 	/* MU post queue interrupts*/
1696 	if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1697 		arcmsr_hbb_postqueue_isr(acb);
1698 	}
1699 	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1700 		arcmsr_hbb_message_isr(acb);
1701 	}
1702 	return;
1703 }
1704 /*
1705 **********************************************************************
1706 **********************************************************************
1707 */
1708 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
1709 {
1710 	u_int32_t host_interrupt_status;
1711 	/*
1712 	*********************************************
1713 	**   check outbound intstatus
1714 	*********************************************
1715 	*/
1716 	host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
1717 	if(!host_interrupt_status) {
1718 		/*it must be share irq*/
1719 		return;
1720 	}
1721 	/* MU doorbell interrupts*/
1722 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
1723 		arcmsr_hbc_doorbell_isr(acb);
1724 	}
1725 	/* MU post queue interrupts*/
1726 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1727 		arcmsr_hbc_postqueue_isr(acb);
1728 	}
1729 	return;
1730 }
1731 /*
1732 ******************************************************************************
1733 ******************************************************************************
1734 */
1735 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
1736 {
1737 	switch (acb->adapter_type) {
1738 	case ACB_ADAPTER_TYPE_A:
1739 		arcmsr_handle_hba_isr(acb);
1740 		break;
1741 	case ACB_ADAPTER_TYPE_B:
1742 		arcmsr_handle_hbb_isr(acb);
1743 		break;
1744 	case ACB_ADAPTER_TYPE_C:
1745 		arcmsr_handle_hbc_isr(acb);
1746 		break;
1747 	default:
1748 		kprintf("arcmsr%d: interrupt service,"
1749 		" unknow adapter type =%d\n", acb->pci_unit, acb->adapter_type);
1750 		break;
1751 	}
1752 	return;
1753 }
1754 /*
1755 **********************************************************************
1756 **********************************************************************
1757 */
1758 static void arcmsr_intr_handler(void *arg)
1759 {
1760 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg;
1761 
1762 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1763 	arcmsr_interrupt(acb);
1764 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1765 }
1766 /*
1767 ******************************************************************************
1768 ******************************************************************************
1769 */
1770 static void	arcmsr_polling_devmap(void* arg)
1771 {
1772 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
1773 	switch (acb->adapter_type) {
1774 	case ACB_ADAPTER_TYPE_A:
1775 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1776 		break;
1777 
1778 	case ACB_ADAPTER_TYPE_B:
1779 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
1780 		break;
1781 
1782 	case ACB_ADAPTER_TYPE_C:
1783 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1784 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1785 		break;
1786 	}
1787 
1788 	if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
1789 	{
1790 		callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb);	/* polling per 5 seconds */
1791 	}
1792 }
1793 
1794 /*
1795 *******************************************************************************
1796 **
1797 *******************************************************************************
1798 */
1799 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1800 {
1801 	u_int32_t intmask_org;
1802 
1803 	if(acb!=NULL) {
1804 		/* stop adapter background rebuild */
1805 		if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
1806 			intmask_org = arcmsr_disable_allintr(acb);
1807 			arcmsr_stop_adapter_bgrb(acb);
1808 			arcmsr_flush_adapter_cache(acb);
1809 			arcmsr_enable_allintr(acb, intmask_org);
1810 		}
1811 	}
1812 }
1813 /*
1814 ***********************************************************************
1815 **
1816 ************************************************************************
1817 */
1818 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
1819 {
1820 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1821 	u_int32_t retvalue=EINVAL;
1822 
1823 	pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg;
1824 	if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
1825 		return retvalue;
1826 	}
1827 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1828 	switch(ioctl_cmd) {
1829 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
1830 			u_int8_t * pQbuffer;
1831 			u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1832 			u_int32_t allxfer_len=0;
1833 
1834 			while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex)
1835 				&& (allxfer_len<1031)) {
1836 				/*copy READ QBUFFER to srb*/
1837 				pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex];
1838 				memcpy(ptmpQbuffer, pQbuffer, 1);
1839 				acb->rqbuf_firstindex++;
1840 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1841 				/*if last index number set it to 0 */
1842 				ptmpQbuffer++;
1843 				allxfer_len++;
1844 			}
1845 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1846 				struct QBUFFER * prbuffer;
1847 				u_int8_t * iop_data;
1848 				u_int32_t iop_len;
1849 
1850 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1851 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
1852 				iop_data=(u_int8_t *)prbuffer->data;
1853 				iop_len=(u_int32_t)prbuffer->data_len;
1854 				/*this iop data does no chance to make me overflow again here, so just do it*/
1855 				while(iop_len>0) {
1856 					pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
1857 					memcpy(pQbuffer, iop_data, 1);
1858 					acb->rqbuf_lastindex++;
1859 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1860 					/*if last index number set it to 0 */
1861 					iop_data++;
1862 					iop_len--;
1863 				}
1864 				arcmsr_iop_message_read(acb);
1865 				/*signature, let IOP know data has been readed */
1866 			}
1867 			pcmdmessagefld->cmdmessage.Length=allxfer_len;
1868 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1869 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1870 		}
1871 		break;
1872 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1873 			u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1874 			u_int8_t * pQbuffer;
1875 			u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
1876 
1877 			user_len=pcmdmessagefld->cmdmessage.Length;
1878 			/*check if data xfer length of this request will overflow my array qbuffer */
1879 			wqbuf_lastindex=acb->wqbuf_lastindex;
1880 			wqbuf_firstindex=acb->wqbuf_firstindex;
1881 			if(wqbuf_lastindex!=wqbuf_firstindex) {
1882 				arcmsr_post_ioctldata2iop(acb);
1883 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1884 			} else {
1885 				my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1886 				if(my_empty_len>=user_len) {
1887 					while(user_len>0) {
1888 						/*copy srb data to wqbuffer*/
1889 						pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex];
1890 						memcpy(pQbuffer, ptmpuserbuffer, 1);
1891 						acb->wqbuf_lastindex++;
1892 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1893 						/*if last index number set it to 0 */
1894 						ptmpuserbuffer++;
1895 						user_len--;
1896 					}
1897 					/*post fist Qbuffer*/
1898 					if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1899 						acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1900 						arcmsr_post_ioctldata2iop(acb);
1901 					}
1902 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1903 				} else {
1904 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1905 				}
1906 			}
1907 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1908 		}
1909 		break;
1910 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1911 			u_int8_t * pQbuffer=acb->rqbuffer;
1912 
1913 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1914 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1915 				arcmsr_iop_message_read(acb);
1916 				/*signature, let IOP know data has been readed */
1917 			}
1918 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1919 			acb->rqbuf_firstindex=0;
1920 			acb->rqbuf_lastindex=0;
1921 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1922 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1923 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1924 		}
1925 		break;
1926 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
1927 		{
1928 			u_int8_t * pQbuffer=acb->wqbuffer;
1929 
1930 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1931 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1932                 arcmsr_iop_message_read(acb);
1933 				/*signature, let IOP know data has been readed */
1934 			}
1935 			acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
1936 			acb->wqbuf_firstindex=0;
1937 			acb->wqbuf_lastindex=0;
1938 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1939 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1940 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1941 		}
1942 		break;
1943 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1944 			u_int8_t * pQbuffer;
1945 
1946 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1947 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1948                 arcmsr_iop_message_read(acb);
1949 				/*signature, let IOP know data has been readed */
1950 			}
1951 			acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
1952 					|ACB_F_MESSAGE_RQBUFFER_CLEARED
1953 					|ACB_F_MESSAGE_WQBUFFER_READ);
1954 			acb->rqbuf_firstindex=0;
1955 			acb->rqbuf_lastindex=0;
1956 			acb->wqbuf_firstindex=0;
1957 			acb->wqbuf_lastindex=0;
1958 			pQbuffer=acb->rqbuffer;
1959 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1960 			pQbuffer=acb->wqbuffer;
1961 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1962 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1963 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1964 		}
1965 		break;
1966 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
1967 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F;
1968 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1969 		}
1970 		break;
1971 	case ARCMSR_MESSAGE_SAY_HELLO: {
1972 			u_int8_t * hello_string="Hello! I am ARCMSR";
1973 			u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer;
1974 
1975 			if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
1976 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1977 				ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1978 				return ENOIOCTL;
1979 			}
1980 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1981 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1982 		}
1983 		break;
1984 	case ARCMSR_MESSAGE_SAY_GOODBYE: {
1985 			arcmsr_iop_parking(acb);
1986 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1987 		}
1988 		break;
1989 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
1990 			arcmsr_flush_adapter_cache(acb);
1991 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1992 		}
1993 		break;
1994 	}
1995 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1996 	return retvalue;
1997 }
1998 /*
1999 **************************************************************************
2000 **************************************************************************
2001 */
2002 static void arcmsr_free_srb(struct CommandControlBlock *srb)
2003 {
2004 	struct AdapterControlBlock	*acb;
2005 	int	mutex;
2006 
2007 	acb = srb->acb;
2008 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
2009 	if( mutex == 0 )
2010 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2011 	srb->srb_state=ARCMSR_SRB_DONE;
2012 	srb->srb_flags=0;
2013 	acb->srbworkingQ[acb->workingsrb_doneindex]=srb;
2014 	acb->workingsrb_doneindex++;
2015 	acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
2016 	if( mutex == 0 )
2017 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2018 }
2019 /*
2020 **************************************************************************
2021 **************************************************************************
2022 */
2023 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb)
2024 {
2025 	struct CommandControlBlock *srb=NULL;
2026 	u_int32_t workingsrb_startindex, workingsrb_doneindex;
2027 	int	mutex;
2028 
2029 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
2030 	if( mutex == 0 )
2031 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2032 	workingsrb_doneindex=acb->workingsrb_doneindex;
2033 	workingsrb_startindex=acb->workingsrb_startindex;
2034 	srb=acb->srbworkingQ[workingsrb_startindex];
2035 	workingsrb_startindex++;
2036 	workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
2037 	if(workingsrb_doneindex!=workingsrb_startindex) {
2038 		acb->workingsrb_startindex=workingsrb_startindex;
2039 	} else {
2040 		srb=NULL;
2041 	}
2042 	if( mutex == 0 )
2043 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2044 	return(srb);
2045 }
2046 /*
2047 **************************************************************************
2048 **************************************************************************
2049 */
2050 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb)
2051 {
2052 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
2053 	int retvalue = 0, transfer_len = 0;
2054 	char *buffer;
2055 	u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
2056 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
2057 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8  |
2058 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
2059 					/* 4 bytes: Areca io control code */
2060 	if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2061 		buffer = pccb->csio.data_ptr;
2062 		transfer_len = pccb->csio.dxfer_len;
2063 	} else {
2064 		retvalue = ARCMSR_MESSAGE_FAIL;
2065 		goto message_out;
2066 	}
2067 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2068 		retvalue = ARCMSR_MESSAGE_FAIL;
2069 		goto message_out;
2070 	}
2071 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
2072 	switch(controlcode) {
2073 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
2074 			u_int8_t *pQbuffer;
2075 			u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
2076 			int32_t allxfer_len = 0;
2077 
2078 			while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2079 				&& (allxfer_len < 1031)) {
2080 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2081 				memcpy(ptmpQbuffer, pQbuffer, 1);
2082 				acb->rqbuf_firstindex++;
2083 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2084 				ptmpQbuffer++;
2085 				allxfer_len++;
2086 			}
2087 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2088 				struct QBUFFER  *prbuffer;
2089 				u_int8_t  *iop_data;
2090 				int32_t iop_len;
2091 
2092 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2093 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
2094 				iop_data = (u_int8_t *)prbuffer->data;
2095 				iop_len =(u_int32_t)prbuffer->data_len;
2096 				while (iop_len > 0) {
2097 			        pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
2098 					memcpy(pQbuffer, iop_data, 1);
2099 					acb->rqbuf_lastindex++;
2100 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2101 					iop_data++;
2102 					iop_len--;
2103 				}
2104 				arcmsr_iop_message_read(acb);
2105 			}
2106 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
2107 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2108 			retvalue=ARCMSR_MESSAGE_SUCCESS;
2109 		}
2110 		break;
2111 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2112 			int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2113 			u_int8_t *pQbuffer;
2114 			u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
2115 
2116 			user_len = pcmdmessagefld->cmdmessage.Length;
2117 			wqbuf_lastindex = acb->wqbuf_lastindex;
2118 			wqbuf_firstindex = acb->wqbuf_firstindex;
2119 			if (wqbuf_lastindex != wqbuf_firstindex) {
2120 				arcmsr_post_ioctldata2iop(acb);
2121 				/* has error report sensedata */
2122 			    if(&pccb->csio.sense_data) {
2123 				((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2124 				/* Valid,ErrorCode */
2125 				((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2126 				/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2127 				((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2128 				/* AdditionalSenseLength */
2129 				((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2130 				/* AdditionalSenseCode */
2131 				}
2132 				retvalue = ARCMSR_MESSAGE_FAIL;
2133 			} else {
2134 				my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2135 						&(ARCMSR_MAX_QBUFFER - 1);
2136 				if (my_empty_len >= user_len) {
2137 					while (user_len > 0) {
2138 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2139 						memcpy(pQbuffer, ptmpuserbuffer, 1);
2140 						acb->wqbuf_lastindex++;
2141 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2142 						ptmpuserbuffer++;
2143 						user_len--;
2144 					}
2145 					if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2146 						acb->acb_flags &=
2147 						~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2148 						arcmsr_post_ioctldata2iop(acb);
2149 					}
2150 				} else {
2151 					/* has error report sensedata */
2152 					if(&pccb->csio.sense_data) {
2153 					((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2154 					/* Valid,ErrorCode */
2155 					((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2156 					/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2157 					((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2158 					/* AdditionalSenseLength */
2159 					((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2160 					/* AdditionalSenseCode */
2161 					}
2162 					retvalue = ARCMSR_MESSAGE_FAIL;
2163 				}
2164 			}
2165 		}
2166 		break;
2167 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2168 			u_int8_t *pQbuffer = acb->rqbuffer;
2169 
2170 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2171 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2172 				arcmsr_iop_message_read(acb);
2173 			}
2174 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2175 			acb->rqbuf_firstindex = 0;
2176 			acb->rqbuf_lastindex = 0;
2177 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2178 			pcmdmessagefld->cmdmessage.ReturnCode =
2179 			ARCMSR_MESSAGE_RETURNCODE_OK;
2180 		}
2181 		break;
2182 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2183 			u_int8_t *pQbuffer = acb->wqbuffer;
2184 
2185 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2186 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2187 				arcmsr_iop_message_read(acb);
2188 			}
2189 			acb->acb_flags |=
2190 				(ACB_F_MESSAGE_WQBUFFER_CLEARED |
2191 					ACB_F_MESSAGE_WQBUFFER_READ);
2192 			acb->wqbuf_firstindex = 0;
2193 			acb->wqbuf_lastindex = 0;
2194 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2195 			pcmdmessagefld->cmdmessage.ReturnCode =
2196 				ARCMSR_MESSAGE_RETURNCODE_OK;
2197 		}
2198 		break;
2199 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2200 			u_int8_t *pQbuffer;
2201 
2202 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2203 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2204 				arcmsr_iop_message_read(acb);
2205 			}
2206 			acb->acb_flags |=
2207 				(ACB_F_MESSAGE_WQBUFFER_CLEARED
2208 				| ACB_F_MESSAGE_RQBUFFER_CLEARED
2209 				| ACB_F_MESSAGE_WQBUFFER_READ);
2210 			acb->rqbuf_firstindex = 0;
2211 			acb->rqbuf_lastindex = 0;
2212 			acb->wqbuf_firstindex = 0;
2213 			acb->wqbuf_lastindex = 0;
2214 			pQbuffer = acb->rqbuffer;
2215 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2216 			pQbuffer = acb->wqbuffer;
2217 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2218 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2219 		}
2220 		break;
2221 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2222 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2223 		}
2224 		break;
2225 	case ARCMSR_MESSAGE_SAY_HELLO: {
2226 			int8_t * hello_string = "Hello! I am ARCMSR";
2227 
2228 			memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2229 				, (int16_t)strlen(hello_string));
2230 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2231 		}
2232 		break;
2233 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2234 		arcmsr_iop_parking(acb);
2235 		break;
2236 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2237 		arcmsr_flush_adapter_cache(acb);
2238 		break;
2239 	default:
2240 		retvalue = ARCMSR_MESSAGE_FAIL;
2241 	}
2242 message_out:
2243 	return retvalue;
2244 }
2245 /*
2246 *********************************************************************
2247 *********************************************************************
2248 */
2249 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2250 {
2251 	struct CommandControlBlock *srb=(struct CommandControlBlock *)arg;
2252 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb;
2253 	union ccb * pccb;
2254 	int target, lun;
2255 
2256 	pccb=srb->pccb;
2257 	target=pccb->ccb_h.target_id;
2258 	lun=pccb->ccb_h.target_lun;
2259 #ifdef ARCMSR_DEBUG1
2260 	acb->pktRequestCount++;
2261 #endif
2262 	if(error != 0) {
2263 		if(error != EFBIG) {
2264 			kprintf("arcmsr%d: unexpected error %x"
2265 				" returned from 'bus_dmamap_load' \n"
2266 				, acb->pci_unit, error);
2267 		}
2268 		if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2269 			pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2270 		}
2271 		arcmsr_srb_complete(srb, 0);
2272 		return;
2273 	}
2274 	if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2275 		pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2276 		arcmsr_srb_complete(srb, 0);
2277 		return;
2278 	}
2279 	if(acb->acb_flags & ACB_F_BUS_RESET) {
2280 		kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2281 		pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2282 		arcmsr_srb_complete(srb, 0);
2283 		return;
2284 	}
2285 	if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
2286 		u_int8_t block_cmd, cmd;
2287 
2288 		cmd = pccb->csio.cdb_io.cdb_bytes[0];
2289 		block_cmd= cmd & 0x0f;
2290 		if(block_cmd==0x08 || block_cmd==0x0a) {
2291 			kprintf("arcmsr%d:block 'read/write' command "
2292 				"with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n"
2293 				, acb->pci_unit, cmd, target, lun);
2294 			pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2295 			arcmsr_srb_complete(srb, 0);
2296 			return;
2297 		}
2298 	}
2299 	if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2300 		if(nseg != 0) {
2301 			bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2302 		}
2303 		arcmsr_srb_complete(srb, 0);
2304 		return;
2305 	}
2306 	if(acb->srboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
2307 		xpt_freeze_simq(acb->psim, 1);
2308 		pccb->ccb_h.status = CAM_REQUEUE_REQ;
2309 		acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2310 		arcmsr_srb_complete(srb, 0);
2311 		return;
2312 	}
2313 	pccb->ccb_h.status |= CAM_SIM_QUEUED;
2314 	arcmsr_build_srb(srb, dm_segs, nseg);
2315 	arcmsr_post_srb(acb, srb);
2316 	if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2317 	{
2318 		arcmsr_callout_init(&srb->ccb_callout);
2319 		callout_reset(&srb->ccb_callout, (pccb->ccb_h.timeout * hz ) / 1000, arcmsr_srb_timeout, srb);
2320 		srb->srb_flags |= SRB_FLAG_TIMER_START;
2321 	}
2322 	return;
2323 }
2324 /*
2325 *****************************************************************************************
2326 *****************************************************************************************
2327 */
2328 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb)
2329 {
2330 	struct CommandControlBlock *srb;
2331 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2332 	u_int32_t intmask_org;
2333 	int i=0;
2334 
2335 	acb->num_aborts++;
2336 	/*
2337 	***************************************************************************
2338 	** It is the upper layer do abort command this lock just prior to calling us.
2339 	** First determine if we currently own this command.
2340 	** Start by searching the device queue. If not found
2341 	** at all, and the system wanted us to just abort the
2342 	** command return success.
2343 	***************************************************************************
2344 	*/
2345 	if(acb->srboutstandingcount!=0) {
2346 		/* disable all outbound interrupt */
2347 		intmask_org=arcmsr_disable_allintr(acb);
2348 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
2349 			srb=acb->psrb_pool[i];
2350 			if(srb->srb_state==ARCMSR_SRB_START) {
2351 				if(srb->pccb==abortccb) {
2352 					srb->srb_state=ARCMSR_SRB_ABORTED;
2353 					kprintf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'"
2354 						"outstanding command \n"
2355 						, acb->pci_unit, abortccb->ccb_h.target_id
2356 						, abortccb->ccb_h.target_lun, srb);
2357 					arcmsr_polling_srbdone(acb, srb);
2358 					/* enable outbound Post Queue, outbound doorbell Interrupt */
2359 					arcmsr_enable_allintr(acb, intmask_org);
2360 					return (TRUE);
2361 				}
2362 			}
2363 		}
2364 		/* enable outbound Post Queue, outbound doorbell Interrupt */
2365 		arcmsr_enable_allintr(acb, intmask_org);
2366 	}
2367 	return(FALSE);
2368 }
2369 /*
2370 ****************************************************************************
2371 ****************************************************************************
2372 */
2373 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2374 {
2375 	int retry=0;
2376 
2377 	acb->num_resets++;
2378 	acb->acb_flags |=ACB_F_BUS_RESET;
2379 	while(acb->srboutstandingcount!=0 && retry < 400) {
2380 		arcmsr_interrupt(acb);
2381 		UDELAY(25000);
2382 		retry++;
2383 	}
2384 	arcmsr_iop_reset(acb);
2385 	acb->acb_flags &= ~ACB_F_BUS_RESET;
2386 	return;
2387 }
2388 /*
2389 **************************************************************************
2390 **************************************************************************
2391 */
2392 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2393 		union ccb * pccb)
2394 {
2395 	pccb->ccb_h.status |= CAM_REQ_CMP;
2396 	switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2397 	case INQUIRY: {
2398 		unsigned char inqdata[36];
2399 		char *buffer=pccb->csio.data_ptr;
2400 
2401 		if (pccb->ccb_h.target_lun) {
2402 			pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2403 			xpt_done(pccb);
2404 			return;
2405 		}
2406 		inqdata[0] = T_PROCESSOR;	/* Periph Qualifier & Periph Dev Type */
2407 		inqdata[1] = 0;				/* rem media bit & Dev Type Modifier */
2408 		inqdata[2] = 0;				/* ISO, ECMA, & ANSI versions */
2409 		inqdata[3] = 0;
2410 		inqdata[4] = 31;			/* length of additional data */
2411 		inqdata[5] = 0;
2412 		inqdata[6] = 0;
2413 		inqdata[7] = 0;
2414 		strncpy(&inqdata[8], "Areca   ", 8);	/* Vendor Identification */
2415 		strncpy(&inqdata[16], "RAID controller ", 16);	/* Product Identification */
2416 		strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2417 		memcpy(buffer, inqdata, sizeof(inqdata));
2418 		xpt_done(pccb);
2419 	}
2420 	break;
2421 	case WRITE_BUFFER:
2422 	case READ_BUFFER: {
2423 		if (arcmsr_iop_message_xfer(acb, pccb)) {
2424 			pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2425 			pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2426 		}
2427 		xpt_done(pccb);
2428 	}
2429 	break;
2430 	default:
2431 		xpt_done(pccb);
2432 	}
2433 }
2434 /*
2435 *********************************************************************
2436 *********************************************************************
2437 */
2438 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb)
2439 {
2440 	struct AdapterControlBlock *  acb;
2441 
2442 	acb=(struct AdapterControlBlock *) cam_sim_softc(psim);
2443 	if(acb==NULL) {
2444 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2445 		xpt_done(pccb);
2446 		return;
2447 	}
2448 	switch (pccb->ccb_h.func_code) {
2449 	case XPT_SCSI_IO: {
2450 			struct CommandControlBlock *srb;
2451 			int target=pccb->ccb_h.target_id;
2452 
2453 			if(target == 16) {
2454 				/* virtual device for iop message transfer */
2455 				arcmsr_handle_virtual_command(acb, pccb);
2456 				return;
2457 			}
2458 			if((srb=arcmsr_get_freesrb(acb)) == NULL) {
2459 				pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
2460 				xpt_done(pccb);
2461 				return;
2462 			}
2463 			pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
2464 			pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
2465 			srb->pccb=pccb;
2466 			if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2467 				if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
2468 					/* Single buffer */
2469 					if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
2470 						/* Buffer is virtual */
2471 						u_int32_t error;
2472 
2473 						crit_enter();
2474 						error =	bus_dmamap_load(acb->dm_segs_dmat
2475 							, srb->dm_segs_dmamap
2476 							, pccb->csio.data_ptr
2477 							, pccb->csio.dxfer_len
2478 							, arcmsr_execute_srb, srb, /*flags*/0);
2479 						if(error == EINPROGRESS) {
2480 							xpt_freeze_simq(acb->psim, 1);
2481 							pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2482 						}
2483 						crit_exit();
2484 					}
2485 					else {		/* Buffer is physical */
2486 						struct bus_dma_segment seg;
2487 
2488 						seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
2489 						seg.ds_len = pccb->csio.dxfer_len;
2490 						arcmsr_execute_srb(srb, &seg, 1, 0);
2491 					}
2492 				} else {
2493 					/* Scatter/gather list */
2494 					struct bus_dma_segment *segs;
2495 
2496 					if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
2497 					|| (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2498 						pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
2499 						xpt_done(pccb);
2500 						kfree(srb, M_DEVBUF);
2501 						return;
2502 					}
2503 					segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
2504 					arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
2505 				}
2506 			} else {
2507 				arcmsr_execute_srb(srb, NULL, 0, 0);
2508 			}
2509 			break;
2510 		}
2511 	case XPT_TARGET_IO: {
2512 			/* target mode not yet support vendor specific commands. */
2513 			pccb->ccb_h.status |= CAM_REQ_CMP;
2514 			xpt_done(pccb);
2515 			break;
2516 		}
2517 	case XPT_PATH_INQ: {
2518 			struct ccb_pathinq *cpi= &pccb->cpi;
2519 
2520 			cpi->version_num=1;
2521 			cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE;
2522 			cpi->target_sprt=0;
2523 			cpi->hba_misc=0;
2524 			cpi->hba_eng_cnt=0;
2525 			cpi->max_target=ARCMSR_MAX_TARGETID;        /* 0-16 */
2526 			cpi->max_lun=ARCMSR_MAX_TARGETLUN;	    /* 0-7 */
2527 			cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */
2528 			cpi->bus_id=cam_sim_bus(psim);
2529 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2530 			strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
2531 			strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
2532 			cpi->unit_number=cam_sim_unit(psim);
2533 		#ifdef	CAM_NEW_TRAN_CODE
2534 			cpi->transport = XPORT_SPI;
2535 			cpi->transport_version = 2;
2536 			cpi->protocol = PROTO_SCSI;
2537 			cpi->protocol_version = SCSI_REV_2;
2538 		#endif
2539 			cpi->ccb_h.status |= CAM_REQ_CMP;
2540 			xpt_done(pccb);
2541 			break;
2542 		}
2543 	case XPT_ABORT: {
2544 			union ccb *pabort_ccb;
2545 
2546 			pabort_ccb=pccb->cab.abort_ccb;
2547 			switch (pabort_ccb->ccb_h.func_code) {
2548 			case XPT_ACCEPT_TARGET_IO:
2549 			case XPT_IMMED_NOTIFY:
2550 			case XPT_CONT_TARGET_IO:
2551 				if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
2552 					pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
2553 					xpt_done(pabort_ccb);
2554 					pccb->ccb_h.status |= CAM_REQ_CMP;
2555 				} else {
2556 					xpt_print_path(pabort_ccb->ccb_h.path);
2557 					kprintf("Not found\n");
2558 					pccb->ccb_h.status |= CAM_PATH_INVALID;
2559 				}
2560 				break;
2561 			case XPT_SCSI_IO:
2562 				pccb->ccb_h.status |= CAM_UA_ABORT;
2563 				break;
2564 			default:
2565 				pccb->ccb_h.status |= CAM_REQ_INVALID;
2566 				break;
2567 			}
2568 			xpt_done(pccb);
2569 			break;
2570 		}
2571 	case XPT_RESET_BUS:
2572 	case XPT_RESET_DEV: {
2573 			u_int32_t     i;
2574 
2575 			arcmsr_bus_reset(acb);
2576 			for (i=0; i < 500; i++) {
2577 				DELAY(1000);
2578 			}
2579 			pccb->ccb_h.status |= CAM_REQ_CMP;
2580 			xpt_done(pccb);
2581 			break;
2582 		}
2583 	case XPT_TERM_IO: {
2584 			pccb->ccb_h.status |= CAM_REQ_INVALID;
2585 			xpt_done(pccb);
2586 			break;
2587 		}
2588 	case XPT_GET_TRAN_SETTINGS: {
2589 			struct ccb_trans_settings *cts;
2590 
2591 			if(pccb->ccb_h.target_id == 16) {
2592 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2593 				xpt_done(pccb);
2594 				break;
2595 			}
2596 			cts= &pccb->cts;
2597 		#ifdef	CAM_NEW_TRAN_CODE
2598 			{
2599 				struct ccb_trans_settings_scsi *scsi;
2600 				struct ccb_trans_settings_spi *spi;
2601 
2602 				scsi = &cts->proto_specific.scsi;
2603 				spi = &cts->xport_specific.spi;
2604 				cts->protocol = PROTO_SCSI;
2605 				cts->protocol_version = SCSI_REV_2;
2606 				cts->transport = XPORT_SPI;
2607 				cts->transport_version = 2;
2608 				spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2609 				spi->sync_period=3;
2610 				spi->sync_offset=32;
2611 				spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2612 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2613 				spi->valid = CTS_SPI_VALID_DISC
2614 					| CTS_SPI_VALID_SYNC_RATE
2615 					| CTS_SPI_VALID_SYNC_OFFSET
2616 					| CTS_SPI_VALID_BUS_WIDTH;
2617 				scsi->valid = CTS_SCSI_VALID_TQ;
2618 			}
2619 		#else
2620 			{
2621 				cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
2622 				cts->sync_period=3;
2623 				cts->sync_offset=32;
2624 				cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2625 				cts->valid=CCB_TRANS_SYNC_RATE_VALID |
2626 				CCB_TRANS_SYNC_OFFSET_VALID |
2627 				CCB_TRANS_BUS_WIDTH_VALID |
2628 				CCB_TRANS_DISC_VALID |
2629 				CCB_TRANS_TQ_VALID;
2630 			}
2631 		#endif
2632 			pccb->ccb_h.status |= CAM_REQ_CMP;
2633 			xpt_done(pccb);
2634 			break;
2635 		}
2636 	case XPT_SET_TRAN_SETTINGS: {
2637 			pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2638 			xpt_done(pccb);
2639 			break;
2640 		}
2641 	case XPT_CALC_GEOMETRY: {
2642 			struct ccb_calc_geometry *ccg;
2643 			u_int32_t size_mb;
2644 			u_int32_t secs_per_cylinder;
2645 
2646 			if(pccb->ccb_h.target_id == 16) {
2647 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2648 				xpt_done(pccb);
2649 				break;
2650 			}
2651 			ccg= &pccb->ccg;
2652 			if (ccg->block_size == 0) {
2653 				pccb->ccb_h.status = CAM_REQ_INVALID;
2654 				xpt_done(pccb);
2655 				break;
2656 			}
2657 			if(((1024L * 1024L)/ccg->block_size) < 0) {
2658 				pccb->ccb_h.status = CAM_REQ_INVALID;
2659 				xpt_done(pccb);
2660 				break;
2661 			}
2662 			size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size);
2663 			if(size_mb > 1024 ) {
2664 				ccg->heads=255;
2665 				ccg->secs_per_track=63;
2666 			} else {
2667 				ccg->heads=64;
2668 				ccg->secs_per_track=32;
2669 			}
2670 			secs_per_cylinder=ccg->heads * ccg->secs_per_track;
2671 			ccg->cylinders=ccg->volume_size / secs_per_cylinder;
2672 			pccb->ccb_h.status |= CAM_REQ_CMP;
2673 			xpt_done(pccb);
2674 			break;
2675 		}
2676 	default:
2677 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2678 		xpt_done(pccb);
2679 		break;
2680 	}
2681 	return;
2682 }
2683 /*
2684 **********************************************************************
2685 **********************************************************************
2686 */
2687 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2688 {
2689 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2690 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2691 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2692 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2693 	}
2694 	return;
2695 }
2696 /*
2697 **********************************************************************
2698 **********************************************************************
2699 */
2700 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2701 {
2702 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2703 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,  ARCMSR_MESSAGE_START_BGRB);
2704 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2705 		kprintf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2706 	}
2707 	return;
2708 }
2709 /*
2710 **********************************************************************
2711 **********************************************************************
2712 */
2713 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
2714 {
2715 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2716 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2717 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2718 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2719 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2720 	}
2721 	return;
2722 }
2723 /*
2724 **********************************************************************
2725 **********************************************************************
2726 */
2727 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2728 {
2729 	switch (acb->adapter_type) {
2730 	case ACB_ADAPTER_TYPE_A:
2731 		arcmsr_start_hba_bgrb(acb);
2732 		break;
2733 	case ACB_ADAPTER_TYPE_B:
2734 		arcmsr_start_hbb_bgrb(acb);
2735 		break;
2736 	case ACB_ADAPTER_TYPE_C:
2737 		arcmsr_start_hbc_bgrb(acb);
2738 		break;
2739 	}
2740 	return;
2741 }
2742 /*
2743 **********************************************************************
2744 **
2745 **********************************************************************
2746 */
2747 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2748 {
2749 	struct CommandControlBlock *srb;
2750 	u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
2751 	u_int16_t	error;
2752 
2753 polling_ccb_retry:
2754 	poll_count++;
2755 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
2756 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);	/*clear interrupt*/
2757 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2758 	while(1) {
2759 		if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
2760 			0, outbound_queueport))==0xFFFFFFFF) {
2761 			if(poll_srb_done) {
2762 				break;/*chip FIFO no ccb for completion already*/
2763 			} else {
2764 				UDELAY(25000);
2765 				if ((poll_count > 100) && (poll_srb != NULL)) {
2766 					break;
2767 				}
2768 				goto polling_ccb_retry;
2769 			}
2770 		}
2771 		/* check if command done with no error*/
2772 		srb=(struct CommandControlBlock *)
2773 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2774         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2775 		poll_srb_done = (srb==poll_srb) ? 1:0;
2776 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2777 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2778 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2779 					"poll command abort successfully \n"
2780 					, acb->pci_unit
2781 					, srb->pccb->ccb_h.target_id
2782 					, srb->pccb->ccb_h.target_lun, srb);
2783 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2784 				arcmsr_srb_complete(srb, 1);
2785 				continue;
2786 			}
2787 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2788 				"srboutstandingcount=%d \n"
2789 				, acb->pci_unit
2790 				, srb, acb->srboutstandingcount);
2791 			continue;
2792 		}
2793 		arcmsr_report_srb_state(acb, srb, error);
2794 	}	/*drain reply FIFO*/
2795 	return;
2796 }
2797 /*
2798 **********************************************************************
2799 **
2800 **********************************************************************
2801 */
2802 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2803 {
2804 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
2805 	struct CommandControlBlock *srb;
2806 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2807 	int index;
2808 	u_int16_t	error;
2809 
2810 polling_ccb_retry:
2811 	poll_count++;
2812 	CHIP_REG_WRITE32(HBB_DOORBELL,
2813 	0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
2814 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2815 	while(1) {
2816 		index=phbbmu->doneq_index;
2817 		if((flag_srb=phbbmu->done_qbuffer[index]) == 0) {
2818 			if(poll_srb_done) {
2819 				break;/*chip FIFO no ccb for completion already*/
2820 			} else {
2821 				UDELAY(25000);
2822 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2823 					break;
2824 				}
2825 				goto polling_ccb_retry;
2826 			}
2827 		}
2828 		phbbmu->done_qbuffer[index]=0;
2829 		index++;
2830 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
2831 		phbbmu->doneq_index=index;
2832 		/* check if command done with no error*/
2833 		srb=(struct CommandControlBlock *)
2834 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2835         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2836 		poll_srb_done = (srb==poll_srb) ? 1:0;
2837 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2838 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2839 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2840 					"poll command abort successfully \n"
2841 					, acb->pci_unit
2842 					, srb->pccb->ccb_h.target_id
2843 					, srb->pccb->ccb_h.target_lun, srb);
2844 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2845 				arcmsr_srb_complete(srb, 1);
2846 				continue;
2847 			}
2848 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2849 				"srboutstandingcount=%d \n"
2850 				, acb->pci_unit
2851 				, srb, acb->srboutstandingcount);
2852 			continue;
2853 		}
2854 		arcmsr_report_srb_state(acb, srb, error);
2855 	}	/*drain reply FIFO*/
2856 	return;
2857 }
2858 /*
2859 **********************************************************************
2860 **
2861 **********************************************************************
2862 */
2863 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2864 {
2865 	struct CommandControlBlock *srb;
2866 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2867 	u_int16_t	error;
2868 
2869 polling_ccb_retry:
2870 	poll_count++;
2871 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2872 	while(1) {
2873 		if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
2874 			if(poll_srb_done) {
2875 				break;/*chip FIFO no ccb for completion already*/
2876 			} else {
2877 				UDELAY(25000);
2878 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2879 					break;
2880 				}
2881 			    if (acb->srboutstandingcount == 0) {
2882 				    break;
2883 			    }
2884 				goto polling_ccb_retry;
2885 			}
2886 		}
2887 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
2888 		/* check if command done with no error*/
2889 		srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
2890         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
2891 		if (poll_srb != NULL)
2892 			poll_srb_done = (srb==poll_srb) ? 1:0;
2893 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2894 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2895 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n"
2896 						, acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb);
2897 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2898 				arcmsr_srb_complete(srb, 1);
2899 				continue;
2900 			}
2901 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
2902 					, acb->pci_unit, srb, acb->srboutstandingcount);
2903 			continue;
2904 		}
2905 		arcmsr_report_srb_state(acb, srb, error);
2906 	}	/*drain reply FIFO*/
2907 	return;
2908 }
2909 /*
2910 **********************************************************************
2911 **********************************************************************
2912 */
2913 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2914 {
2915 	switch (acb->adapter_type) {
2916 	case ACB_ADAPTER_TYPE_A: {
2917 			arcmsr_polling_hba_srbdone(acb, poll_srb);
2918 		}
2919 		break;
2920 	case ACB_ADAPTER_TYPE_B: {
2921 			arcmsr_polling_hbb_srbdone(acb, poll_srb);
2922 		}
2923 		break;
2924 	case ACB_ADAPTER_TYPE_C: {
2925 			arcmsr_polling_hbc_srbdone(acb, poll_srb);
2926 		}
2927 		break;
2928 	}
2929 }
2930 /*
2931 **********************************************************************
2932 **********************************************************************
2933 */
2934 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2935 {
2936 	char *acb_firm_model=acb->firm_model;
2937 	char *acb_firm_version=acb->firm_version;
2938 	char *acb_device_map = acb->device_map;
2939 	size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2940 	size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2941 	size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2942 	int i;
2943 
2944 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2945 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2946 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2947 	}
2948 	i=0;
2949 	while(i<8) {
2950 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2951 		/* 8 bytes firm_model, 15, 60-67*/
2952 		acb_firm_model++;
2953 		i++;
2954 	}
2955 	i=0;
2956 	while(i<16) {
2957 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2958 		/* 16 bytes firm_version, 17, 68-83*/
2959 		acb_firm_version++;
2960 		i++;
2961 	}
2962 	i=0;
2963 	while(i<16) {
2964 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2965 		acb_device_map++;
2966 		i++;
2967 	}
2968 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2969 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2970 	acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2971 	acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2972 	acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2973 	acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2974 	acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2975 	return;
2976 }
2977 /*
2978 **********************************************************************
2979 **********************************************************************
2980 */
2981 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2982 {
2983 	char *acb_firm_model=acb->firm_model;
2984 	char *acb_firm_version=acb->firm_version;
2985 	char *acb_device_map = acb->device_map;
2986 	size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2987 	size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2988 	size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2989 	int i;
2990 
2991 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2992 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2993 		kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2994 	}
2995 	i=0;
2996 	while(i<8) {
2997 		*acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
2998 		/* 8 bytes firm_model, 15, 60-67*/
2999 		acb_firm_model++;
3000 		i++;
3001 	}
3002 	i=0;
3003 	while(i<16) {
3004 		*acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
3005 		/* 16 bytes firm_version, 17, 68-83*/
3006 		acb_firm_version++;
3007 		i++;
3008 	}
3009 	i=0;
3010 	while(i<16) {
3011 		*acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
3012 		acb_device_map++;
3013 		i++;
3014 	}
3015 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3016 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
3017 	acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
3018 	acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
3019 	acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
3020 	acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
3021 	acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
3022 	return;
3023 }
3024 /*
3025 **********************************************************************
3026 **********************************************************************
3027 */
3028 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
3029 {
3030 	char *acb_firm_model=acb->firm_model;
3031 	char *acb_firm_version=acb->firm_version;
3032 	char *acb_device_map = acb->device_map;
3033 	size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
3034 	size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3035 	size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3036 	int i;
3037 
3038 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3039 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3040 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3041 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3042 	}
3043 	i=0;
3044 	while(i<8) {
3045 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3046 		/* 8 bytes firm_model, 15, 60-67*/
3047 		acb_firm_model++;
3048 		i++;
3049 	}
3050 	i=0;
3051 	while(i<16) {
3052 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3053 		/* 16 bytes firm_version, 17, 68-83*/
3054 		acb_firm_version++;
3055 		i++;
3056 	}
3057 	i=0;
3058 	while(i<16) {
3059 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3060 		acb_device_map++;
3061 		i++;
3062 	}
3063 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3064 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
3065 	acb->firm_request_len	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]);	/*firm_request_len,   1, 04-07*/
3066 	acb->firm_numbers_queue	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_numbers_queue, 2, 08-11*/
3067 	acb->firm_sdram_size	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_sdram_size,    3, 12-15*/
3068 	acb->firm_ide_channels	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_ide_channels,  4, 16-19*/
3069 	acb->firm_cfg_version	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
3070 	return;
3071 }
3072 /*
3073 **********************************************************************
3074 **********************************************************************
3075 */
3076 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3077 {
3078 	switch (acb->adapter_type) {
3079 	case ACB_ADAPTER_TYPE_A: {
3080 			arcmsr_get_hba_config(acb);
3081 		}
3082 		break;
3083 	case ACB_ADAPTER_TYPE_B: {
3084 			arcmsr_get_hbb_config(acb);
3085 		}
3086 		break;
3087 	case ACB_ADAPTER_TYPE_C: {
3088 			arcmsr_get_hbc_config(acb);
3089 		}
3090 		break;
3091 	}
3092 	return;
3093 }
3094 /*
3095 **********************************************************************
3096 **********************************************************************
3097 */
3098 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
3099 {
3100 	int	timeout=0;
3101 
3102 	switch (acb->adapter_type) {
3103 	case ACB_ADAPTER_TYPE_A: {
3104 			while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3105 			{
3106 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3107 				{
3108 					kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3109 					return;
3110 				}
3111 				UDELAY(15000); /* wait 15 milli-seconds */
3112 			}
3113 		}
3114 		break;
3115 	case ACB_ADAPTER_TYPE_B: {
3116 			while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3117 			{
3118 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3119 				{
3120 					kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3121 					return;
3122 				}
3123 				UDELAY(15000); /* wait 15 milli-seconds */
3124 			}
3125 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3126 		}
3127 		break;
3128 	case ACB_ADAPTER_TYPE_C: {
3129 			while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3130 			{
3131 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3132 				{
3133 					kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3134 					return;
3135 				}
3136 				UDELAY(15000); /* wait 15 milli-seconds */
3137 			}
3138 		}
3139 		break;
3140 	}
3141 	return;
3142 }
3143 /*
3144 **********************************************************************
3145 **********************************************************************
3146 */
3147 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3148 {
3149 	u_int32_t outbound_doorbell;
3150 
3151 	switch (acb->adapter_type) {
3152 	case ACB_ADAPTER_TYPE_A: {
3153 			/* empty doorbell Qbuffer if door bell ringed */
3154 			outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3155 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
3156 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3157 
3158 		}
3159 		break;
3160 	case ACB_ADAPTER_TYPE_B: {
3161 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
3162 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
3163 			/* let IOP know data has been read */
3164 		}
3165 		break;
3166 	case ACB_ADAPTER_TYPE_C: {
3167 			/* empty doorbell Qbuffer if door bell ringed */
3168 			outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
3169 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell);	/*clear doorbell interrupt */
3170 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
3171 
3172 		}
3173 		break;
3174 	}
3175 	return;
3176 }
3177 /*
3178 ************************************************************************
3179 ************************************************************************
3180 */
3181 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3182 {
3183 	unsigned long srb_phyaddr;
3184 	u_int32_t srb_phyaddr_hi32;
3185 
3186 	/*
3187 	********************************************************************
3188 	** here we need to tell iop 331 our freesrb.HighPart
3189 	** if freesrb.HighPart is not zero
3190 	********************************************************************
3191 	*/
3192 	srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr;
3193 //	srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16);
3194 	srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high;
3195 	switch (acb->adapter_type) {
3196 	case ACB_ADAPTER_TYPE_A: {
3197 			if(srb_phyaddr_hi32!=0) {
3198 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3199 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3200 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3201 				if(!arcmsr_hba_wait_msgint_ready(acb)) {
3202 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3203 					return FALSE;
3204 				}
3205 			}
3206 		}
3207 		break;
3208 		/*
3209 		***********************************************************************
3210 		**    if adapter type B, set window of "post command Q"
3211 		***********************************************************************
3212 		*/
3213 	case ACB_ADAPTER_TYPE_B: {
3214 			u_int32_t post_queue_phyaddr;
3215 			struct HBB_MessageUnit *phbbmu;
3216 
3217 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3218 			phbbmu->postq_index=0;
3219 			phbbmu->doneq_index=0;
3220 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
3221 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3222 				kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
3223 				return FALSE;
3224 			}
3225 			post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE
3226 			+ offsetof(struct HBB_MessageUnit, post_qbuffer);
3227 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3228 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
3229 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
3230 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
3231 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
3232 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
3233 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3234 				kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
3235 				return FALSE;
3236 			}
3237 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
3238 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3239 				kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
3240 				return FALSE;
3241 			}
3242 		}
3243 		break;
3244 	case ACB_ADAPTER_TYPE_C: {
3245 			if(srb_phyaddr_hi32!=0) {
3246 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3247 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3248 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3249 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3250 				if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3251 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3252 					return FALSE;
3253 				}
3254 			}
3255 		}
3256 		break;
3257 	}
3258 	return TRUE;
3259 }
3260 /*
3261 ************************************************************************
3262 ************************************************************************
3263 */
3264 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3265 {
3266 	switch (acb->adapter_type)
3267 	{
3268 	case ACB_ADAPTER_TYPE_A:
3269 	case ACB_ADAPTER_TYPE_C:
3270 		break;
3271 	case ACB_ADAPTER_TYPE_B: {
3272 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
3273 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3274 				kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
3275 
3276 				return;
3277 			}
3278 		}
3279 		break;
3280 	}
3281 	return;
3282 }
3283 /*
3284 **********************************************************************
3285 **********************************************************************
3286 */
3287 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3288 {
3289 	u_int32_t intmask_org;
3290 
3291 	/* disable all outbound interrupt */
3292 	intmask_org=arcmsr_disable_allintr(acb);
3293 	arcmsr_wait_firmware_ready(acb);
3294 	arcmsr_iop_confirm(acb);
3295 	arcmsr_get_firmware_spec(acb);
3296 	/*start background rebuild*/
3297 	arcmsr_start_adapter_bgrb(acb);
3298 	/* empty doorbell Qbuffer if door bell ringed */
3299 	arcmsr_clear_doorbell_queue_buffer(acb);
3300 	arcmsr_enable_eoi_mode(acb);
3301 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3302 	arcmsr_enable_allintr(acb, intmask_org);
3303 	acb->acb_flags |=ACB_F_IOP_INITED;
3304 	return;
3305 }
3306 /*
3307 **********************************************************************
3308 **********************************************************************
3309 */
3310 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3311 {
3312 	struct AdapterControlBlock *acb=arg;
3313 	struct CommandControlBlock *srb_tmp;
3314 	u_int8_t * dma_memptr;
3315 	u_int32_t i;
3316 	unsigned long srb_phyaddr=(unsigned long)segs->ds_addr;
3317 
3318 	dma_memptr=acb->uncacheptr;
3319 	acb->srb_phyaddr.phyaddr=srb_phyaddr;
3320 	srb_tmp=(struct CommandControlBlock *)dma_memptr;
3321 	for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3322 		if(bus_dmamap_create(acb->dm_segs_dmat,
3323 			 /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) {
3324 			acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
3325 			kprintf("arcmsr%d:"
3326 			" srb dmamap bus_dmamap_create error\n", acb->pci_unit);
3327 			return;
3328 		}
3329 		srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5);
3330 		srb_tmp->acb=acb;
3331 		acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp;
3332 		srb_phyaddr=srb_phyaddr+SRB_SIZE;
3333 		srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp+SRB_SIZE);
3334 	}
3335 	acb->vir2phy_offset=(unsigned long)srb_tmp-srb_phyaddr;
3336 	return;
3337 }
3338 /*
3339 ************************************************************************
3340 **
3341 **
3342 ************************************************************************
3343 */
3344 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
3345 {
3346 	/* remove the control device */
3347 	if(acb->ioctl_dev != NULL) {
3348 		destroy_dev(acb->ioctl_dev);
3349 	}
3350 	bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
3351 	bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
3352 	bus_dma_tag_destroy(acb->srb_dmat);
3353 	bus_dma_tag_destroy(acb->dm_segs_dmat);
3354 	bus_dma_tag_destroy(acb->parent_dmat);
3355 	return;
3356 }
3357 /*
3358 ************************************************************************
3359 ************************************************************************
3360 */
3361 static u_int32_t arcmsr_initialize(device_t dev)
3362 {
3363 	struct AdapterControlBlock *acb=device_get_softc(dev);
3364 	u_int16_t pci_command;
3365 	int i, j,max_coherent_size;
3366 
3367 	switch (pci_get_devid(dev)) {
3368 	case PCIDevVenIDARC1880: {
3369 			acb->adapter_type=ACB_ADAPTER_TYPE_C;
3370 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3371 		}
3372 		break;
3373 	case PCIDevVenIDARC1200:
3374 	case PCIDevVenIDARC1201: {
3375 			acb->adapter_type=ACB_ADAPTER_TYPE_B;
3376 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit));
3377 		}
3378 		break;
3379 	case PCIDevVenIDARC1110:
3380 	case PCIDevVenIDARC1120:
3381 	case PCIDevVenIDARC1130:
3382 	case PCIDevVenIDARC1160:
3383 	case PCIDevVenIDARC1170:
3384 	case PCIDevVenIDARC1210:
3385 	case PCIDevVenIDARC1220:
3386 	case PCIDevVenIDARC1230:
3387 	case PCIDevVenIDARC1231:
3388 	case PCIDevVenIDARC1260:
3389 	case PCIDevVenIDARC1261:
3390 	case PCIDevVenIDARC1270:
3391 	case PCIDevVenIDARC1280:
3392 	case PCIDevVenIDARC1212:
3393 	case PCIDevVenIDARC1222:
3394 	case PCIDevVenIDARC1380:
3395 	case PCIDevVenIDARC1381:
3396 	case PCIDevVenIDARC1680:
3397 	case PCIDevVenIDARC1681: {
3398 			acb->adapter_type=ACB_ADAPTER_TYPE_A;
3399 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3400 		}
3401 		break;
3402 	default: {
3403 			kprintf("arcmsr%d:"
3404 			" unknown RAID adapter type \n", device_get_unit(dev));
3405 			return ENOMEM;
3406 		}
3407 	}
3408 	if(bus_dma_tag_create(  /*parent*/	NULL,
3409 				/*alignemnt*/	1,
3410 				/*boundary*/	0,
3411 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3412 				/*highaddr*/	BUS_SPACE_MAXADDR,
3413 				/*filter*/	NULL,
3414 				/*filterarg*/	NULL,
3415 				/*maxsize*/	BUS_SPACE_MAXSIZE_32BIT,
3416 				/*nsegments*/	BUS_SPACE_UNRESTRICTED,
3417 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3418 				/*flags*/	0,
3419 						&acb->parent_dmat) != 0)
3420 	{
3421 		kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3422 		return ENOMEM;
3423 	}
3424 	/* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
3425 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3426 				/*alignment*/	1,
3427 				/*boundary*/	0,
3428 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3429 				/*highaddr*/	BUS_SPACE_MAXADDR,
3430 				/*filter*/	NULL,
3431 				/*filterarg*/	NULL,
3432 				/*maxsize*/	ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
3433 				/*nsegments*/	ARCMSR_MAX_SG_ENTRIES,
3434 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3435 				/*flags*/	0,
3436 						&acb->dm_segs_dmat) != 0)
3437 	{
3438 		bus_dma_tag_destroy(acb->parent_dmat);
3439 		kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3440 		return ENOMEM;
3441 	}
3442 	/* DMA tag for our srb structures.... Allocate the freesrb memory */
3443 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3444 				/*alignment*/	0x20,
3445 				/*boundary*/	0,
3446 				/*lowaddr*/	BUS_SPACE_MAXADDR_32BIT,
3447 				/*highaddr*/	BUS_SPACE_MAXADDR,
3448 				/*filter*/	NULL,
3449 				/*filterarg*/	NULL,
3450 				/*maxsize*/	max_coherent_size,
3451 				/*nsegments*/	1,
3452 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3453 				/*flags*/	0,
3454 						&acb->srb_dmat) != 0)
3455 	{
3456 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3457 		bus_dma_tag_destroy(acb->parent_dmat);
3458 		kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3459 		return ENXIO;
3460 	}
3461 	/* Allocation for our srbs */
3462 	if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
3463 		bus_dma_tag_destroy(acb->srb_dmat);
3464 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3465 		bus_dma_tag_destroy(acb->parent_dmat);
3466 		kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
3467 		return ENXIO;
3468 	}
3469 	/* And permanently map them */
3470 	if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
3471 		bus_dma_tag_destroy(acb->srb_dmat);
3472 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3473 		bus_dma_tag_destroy(acb->parent_dmat);
3474 		kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
3475 		return ENXIO;
3476 	}
3477 	pci_command=pci_read_config(dev, PCIR_COMMAND, 2);
3478 	pci_command |= PCIM_CMD_BUSMASTEREN;
3479 	pci_command |= PCIM_CMD_PERRESPEN;
3480 	pci_command |= PCIM_CMD_MWRICEN;
3481 	/* Enable Busmaster/Mem */
3482 	pci_command |= PCIM_CMD_MEMEN;
3483 	pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
3484 	switch(acb->adapter_type) {
3485 	case ACB_ADAPTER_TYPE_A: {
3486 			u_int32_t rid0=PCIR_BAR(0);
3487 			vm_offset_t	mem_base0;
3488 
3489 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
3490 			if(acb->sys_res_arcmsr[0] == NULL) {
3491 				arcmsr_free_resource(acb);
3492 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3493 				return ENOMEM;
3494 			}
3495 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3496 				arcmsr_free_resource(acb);
3497 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3498 				return ENXIO;
3499 			}
3500 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3501 			if(mem_base0==0) {
3502 				arcmsr_free_resource(acb);
3503 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3504 				return ENXIO;
3505 			}
3506 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3507 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3508 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3509 		}
3510 		break;
3511 	case ACB_ADAPTER_TYPE_B: {
3512 			struct HBB_MessageUnit *phbbmu;
3513 			struct CommandControlBlock *freesrb;
3514 			u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
3515 			vm_offset_t	mem_base[]={0,0};
3516 			for(i=0; i<2; i++) {
3517 				if(i==0) {
3518 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
3519 											0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE);
3520 				} else {
3521 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
3522 											0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
3523 				}
3524 				if(acb->sys_res_arcmsr[i] == NULL) {
3525 					arcmsr_free_resource(acb);
3526 					kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
3527 					return ENOMEM;
3528 				}
3529 				if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
3530 					arcmsr_free_resource(acb);
3531 					kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
3532 					return ENXIO;
3533 				}
3534 				mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
3535 				if(mem_base[i]==0) {
3536 					arcmsr_free_resource(acb);
3537 					kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
3538 					return ENXIO;
3539 				}
3540 				acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]);
3541 				acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]);
3542 			}
3543 			freesrb=(struct CommandControlBlock *)acb->uncacheptr;
3544 //			acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM];
3545 			acb->pmu=(struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE);
3546 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3547 			phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0];
3548 			phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1];
3549 		}
3550 		break;
3551 	case ACB_ADAPTER_TYPE_C: {
3552 			u_int32_t rid0=PCIR_BAR(1);
3553 			vm_offset_t	mem_base0;
3554 
3555 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
3556 			if(acb->sys_res_arcmsr[0] == NULL) {
3557 				arcmsr_free_resource(acb);
3558 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3559 				return ENOMEM;
3560 			}
3561 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3562 				arcmsr_free_resource(acb);
3563 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3564 				return ENXIO;
3565 			}
3566 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3567 			if(mem_base0==0) {
3568 				arcmsr_free_resource(acb);
3569 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3570 				return ENXIO;
3571 			}
3572 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3573 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3574 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3575 		}
3576 		break;
3577 	}
3578 	if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
3579 		arcmsr_free_resource(acb);
3580 		kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
3581 		return ENXIO;
3582 	}
3583 	acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
3584 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
3585 	/*
3586 	********************************************************************
3587 	** init raid volume state
3588 	********************************************************************
3589 	*/
3590 	for(i=0;i<ARCMSR_MAX_TARGETID;i++) {
3591 		for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) {
3592 			acb->devstate[i][j]=ARECA_RAID_GONE;
3593 		}
3594 	}
3595 	arcmsr_iop_init(acb);
3596 	return(0);
3597 }
3598 /*
3599 ************************************************************************
3600 ************************************************************************
3601 */
3602 static int arcmsr_attach(device_t dev)
3603 {
3604 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3605 	u_int32_t unit=device_get_unit(dev);
3606 	struct ccb_setasync csa;
3607 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
3608 	struct resource	*irqres;
3609 	int	rid;
3610 	u_int irq_flags;
3611 
3612 	if(acb == NULL) {
3613 		kprintf("arcmsr%d: cannot allocate softc\n", unit);
3614 		return (ENOMEM);
3615 	}
3616 	ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock");
3617 	if(arcmsr_initialize(dev)) {
3618 		kprintf("arcmsr%d: initialize failure!\n", unit);
3619 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3620 		return ENXIO;
3621 	}
3622 	/* After setting up the adapter, map our interrupt */
3623 	rid=0;
3624 	acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &rid,
3625 	    &irq_flags);
3626 	irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1,
3627 	    irq_flags);
3628 	if(irqres == NULL ||
3629 		bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih, NULL)) {
3630 		arcmsr_free_resource(acb);
3631 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3632 		kprintf("arcmsr%d: unable to register interrupt handler!\n", unit);
3633 		return ENXIO;
3634 	}
3635 	acb->irqres=irqres;
3636 	acb->pci_dev=dev;
3637 	acb->pci_unit=unit;
3638 	/*
3639 	 * Now let the CAM generic SCSI layer find the SCSI devices on
3640 	 * the bus *  start queue to reset to the idle loop. *
3641 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
3642 	 * max_sim_transactions
3643 	*/
3644 	devq=cam_simq_alloc(ARCMSR_MAX_START_JOB);
3645 	if(devq == NULL) {
3646 	    arcmsr_free_resource(acb);
3647 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3648 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3649 			pci_release_msi(dev);
3650 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3651 		kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
3652 		return ENXIO;
3653 	}
3654 	acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
3655 	if(acb->psim == NULL) {
3656 		arcmsr_free_resource(acb);
3657 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3658 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3659 			pci_release_msi(dev);
3660 		cam_simq_release(devq);
3661 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3662 		kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
3663 		return ENXIO;
3664 	}
3665 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3666 	if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
3667 		arcmsr_free_resource(acb);
3668 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3669 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3670 			pci_release_msi(dev);
3671 		cam_sim_free(acb->psim);
3672 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3673 		kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
3674 		return ENXIO;
3675 	}
3676 	if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3677 		arcmsr_free_resource(acb);
3678 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3679 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3680 			pci_release_msi(dev);
3681 		xpt_bus_deregister(cam_sim_path(acb->psim));
3682 		cam_sim_free(acb->psim);
3683 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3684 		kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
3685 		return ENXIO;
3686 	}
3687 	/*
3688 	****************************************************
3689 	*/
3690 	xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
3691 	csa.ccb_h.func_code=XPT_SASYNC_CB;
3692 	csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE;
3693 	csa.callback=arcmsr_async;
3694 	csa.callback_arg=acb->psim;
3695 	xpt_action((union ccb *)&csa);
3696 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3697 	/* Create the control device.  */
3698 	acb->ioctl_dev=make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
3699 
3700 	acb->ioctl_dev->si_drv1=acb;
3701 	(void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
3702 	arcmsr_callout_init(&acb->devmap_callout);
3703 	callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
3704 	return 0;
3705 }
3706 
3707 /*
3708 ************************************************************************
3709 ************************************************************************
3710 */
3711 static int arcmsr_probe(device_t dev)
3712 {
3713 	u_int32_t id;
3714 	static char buf[256];
3715 	char x_type[]={"X-TYPE"};
3716 	char *type;
3717 	int raid6 = 1;
3718 
3719 	if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
3720 		return (ENXIO);
3721 	}
3722 	switch(id=pci_get_devid(dev)) {
3723 	case PCIDevVenIDARC1110:
3724 	case PCIDevVenIDARC1200:
3725 	case PCIDevVenIDARC1201:
3726 	case PCIDevVenIDARC1210:
3727 		raid6 = 0;
3728 		/*FALLTHRU*/
3729 	case PCIDevVenIDARC1120:
3730 	case PCIDevVenIDARC1130:
3731 	case PCIDevVenIDARC1160:
3732 	case PCIDevVenIDARC1170:
3733 	case PCIDevVenIDARC1220:
3734 	case PCIDevVenIDARC1230:
3735 	case PCIDevVenIDARC1231:
3736 	case PCIDevVenIDARC1260:
3737 	case PCIDevVenIDARC1261:
3738 	case PCIDevVenIDARC1270:
3739 	case PCIDevVenIDARC1280:
3740 		type = "SATA";
3741 		break;
3742 	case PCIDevVenIDARC1212:
3743 	case PCIDevVenIDARC1222:
3744 	case PCIDevVenIDARC1380:
3745 	case PCIDevVenIDARC1381:
3746 	case PCIDevVenIDARC1680:
3747 	case PCIDevVenIDARC1681:
3748 		type = "SAS 3G";
3749 		break;
3750 	case PCIDevVenIDARC1880:
3751 		type = "SAS 6G";
3752 		arcmsr_msi_enable = 0;
3753 		break;
3754 	default:
3755 		type = x_type;
3756 		break;
3757 	}
3758 	if(type == x_type)
3759 		return(ENXIO);
3760 	ksprintf(buf, "Areca %s Host Adapter RAID Controller%s", type, raid6 ? " (RAID6 capable)" : "");
3761 	device_set_desc_copy(dev, buf);
3762 	return 0;
3763 }
3764 /*
3765 ************************************************************************
3766 ************************************************************************
3767 */
3768 static int arcmsr_shutdown(device_t dev)
3769 {
3770 	u_int32_t  i;
3771 	u_int32_t intmask_org;
3772 	struct CommandControlBlock *srb;
3773 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3774 
3775 	/* stop adapter background rebuild */
3776 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3777 	/* disable all outbound interrupt */
3778 	intmask_org=arcmsr_disable_allintr(acb);
3779 	arcmsr_stop_adapter_bgrb(acb);
3780 	arcmsr_flush_adapter_cache(acb);
3781 	/* abort all outstanding command */
3782 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3783 	acb->acb_flags &= ~ACB_F_IOP_INITED;
3784 	if(acb->srboutstandingcount!=0) {
3785 		/*clear and abort all outbound posted Q*/
3786 		arcmsr_done4abort_postqueue(acb);
3787 		/* talk to iop 331 outstanding command aborted*/
3788 		arcmsr_abort_allcmd(acb);
3789 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3790 			srb=acb->psrb_pool[i];
3791 			if(srb->srb_state==ARCMSR_SRB_START) {
3792 				srb->srb_state=ARCMSR_SRB_ABORTED;
3793 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3794 				arcmsr_srb_complete(srb, 1);
3795 			}
3796 		}
3797 	}
3798 	acb->srboutstandingcount=0;
3799 	acb->workingsrb_doneindex=0;
3800 	acb->workingsrb_startindex=0;
3801 #ifdef ARCMSR_DEBUG1
3802 	acb->pktRequestCount = 0;
3803 	acb->pktReturnCount = 0;
3804 #endif
3805 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3806 	return (0);
3807 }
3808 /*
3809 ************************************************************************
3810 ************************************************************************
3811 */
3812 static int arcmsr_detach(device_t dev)
3813 {
3814 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3815 	int i;
3816 
3817 	callout_stop(&acb->devmap_callout);
3818 	bus_teardown_intr(dev, acb->irqres, acb->ih);
3819 	arcmsr_shutdown(dev);
3820 	arcmsr_free_resource(acb);
3821 	for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
3822 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
3823 	}
3824 	bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3825 	if (acb->irq_type == PCI_INTR_TYPE_MSI)
3826 		pci_release_msi(dev);
3827 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3828 	xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
3829 	xpt_free_path(acb->ppath);
3830 	xpt_bus_deregister(cam_sim_path(acb->psim));
3831 	cam_sim_free(acb->psim);
3832 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3833 	ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3834 	return (0);
3835 }
3836 
3837 #ifdef ARCMSR_DEBUG1
3838 static void arcmsr_dump_data(struct AdapterControlBlock *acb)
3839 {
3840 	if((acb->pktRequestCount - acb->pktReturnCount) == 0)
3841 		return;
3842 	printf("Command Request Count   =0x%x\n",acb->pktRequestCount);
3843 	printf("Command Return Count    =0x%x\n",acb->pktReturnCount);
3844 	printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount));
3845 	printf("Queued Command Count    =0x%x\n",acb->srboutstandingcount);
3846 }
3847 #endif
3848