1 /*
2 ********************************************************************************
3 ** OS : FreeBSD
4 ** FILE NAME : arcmsr.c
5 ** BY : Erich Chen, Ching Huang
6 ** Description: SCSI RAID Device Driver for
7 ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x)
8 ** SATA/SAS RAID HOST Adapter
9 ********************************************************************************
10 ********************************************************************************
11 **
12 ** Copyright (C) 2002 - 2012, Areca Technology Corporation All rights reserved.
13 **
14 ** Redistribution and use in source and binary forms, with or without
15 ** modification, are permitted provided that the following conditions
16 ** are met:
17 ** 1. Redistributions of source code must retain the above copyright
18 ** notice, this list of conditions and the following disclaimer.
19 ** 2. Redistributions in binary form must reproduce the above copyright
20 ** notice, this list of conditions and the following disclaimer in the
21 ** documentation and/or other materials provided with the distribution.
22 ** 3. The name of the author may not be used to endorse or promote products
23 ** derived from this software without specific prior written permission.
24 **
25 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
30 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
32 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
33 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
34 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 ********************************************************************************
36 ** History
37 **
38 ** REV# DATE NAME DESCRIPTION
39 ** 1.00.00.00 03/31/2004 Erich Chen First release
40 ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error
41 ** 1.20.00.03 04/19/2005 Erich Chen add SATA 24 Ports adapter type support
42 ** clean unused function
43 ** 1.20.00.12 09/12/2005 Erich Chen bug fix with abort command handling,
44 ** firmware version check
45 ** and firmware update notify for hardware bug fix
46 ** handling if none zero high part physical address
47 ** of srb resource
48 ** 1.20.00.13 08/18/2006 Erich Chen remove pending srb and report busy
49 ** add iop message xfer
50 ** with scsi pass-through command
51 ** add new device id of sas raid adapters
52 ** code fit for SPARC64 & PPC
53 ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report
54 ** and cause g_vfs_done() read write error
55 ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x
56 ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x
57 ** bus_dmamem_alloc() with BUS_DMA_ZERO
58 ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880
59 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
60 ** prevent cam_periph_error removing all LUN devices of one Target id
61 ** for any one LUN device failed
62 ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step"
63 ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
64 ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
65 ** 1.20.00.20 12/08/2010 Ching Huang Avoid calling atomic_set_int function
66 ** 1.20.00.21 02/08/2011 Ching Huang Implement I/O request timeout
67 ** 02/14/2011 Ching Huang Modified pktRequestCount
68 ** 1.20.00.21 03/03/2011 Ching Huang if a command timeout, then wait its ccb back before free it
69 ** 1.20.00.22 07/04/2011 Ching Huang Fixed multiple MTX panic
70 ** 1.20.00.23 10/28/2011 Ching Huang Added TIMEOUT_DELAY in case of too many HDDs need to start
71 ** 1.20.00.23 11/08/2011 Ching Huang Added report device transfer speed
72 ** 1.20.00.23 01/30/2012 Ching Huang Fixed Request requeued and Retrying command
73 ** 1.20.00.24 06/11/2012 Ching Huang Fixed return sense data condition
74 ** 1.20.00.25 08/17/2012 Ching Huang Fixed hotplug device no function on type A adapter
75 ** 1.20.00.26 12/14/2012 Ching Huang Added support ARC1214,1224,1264,1284
76 ** 1.20.00.27 05/06/2013 Ching Huang Fixed out standing cmd full on ARC-12x4
77 ** 1.20.00.28 09/13/2013 Ching Huang Removed recursive mutex in arcmsr_abort_dr_ccbs
78 ** 1.20.00.29 12/18/2013 Ching Huang Change simq allocation number, support ARC1883
79 ** 1.30.00.00 11/30/2015 Ching Huang Added support ARC1203
80 ** 1.40.00.00 10/26/2017 Ching Huang Added support ARC1884
81 ******************************************************************************************
82 * $FreeBSD: head/sys/dev/arcmsr/arcmsr.c 259565 2013-12-18 19:25:40Z delphij $
83 */
84 #if 0
85 #define ARCMSR_DEBUG1 1
86 #endif
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/malloc.h>
90 #include <sys/kernel.h>
91 #include <sys/bus.h>
92 #include <sys/queue.h>
93 #include <sys/stat.h>
94 #include <sys/kthread.h>
95 #include <sys/module.h>
96 #include <sys/proc.h>
97 #include <sys/lock.h>
98 #include <sys/sysctl.h>
99 #include <sys/thread2.h>
100 #include <sys/poll.h>
101 #include <sys/device.h>
102 #include <vm/vm.h>
103 #include <vm/vm_param.h>
104 #include <vm/pmap.h>
105
106 #include <machine/atomic.h>
107 #include <sys/conf.h>
108 #include <sys/rman.h>
109
110 #include <bus/cam/cam.h>
111 #include <bus/cam/cam_ccb.h>
112 #include <bus/cam/cam_sim.h>
113 #include <bus/cam/cam_periph.h>
114 #include <bus/cam/cam_xpt_periph.h>
115 #include <bus/cam/cam_xpt_sim.h>
116 #include <bus/cam/cam_debug.h>
117 #include <bus/cam/scsi/scsi_all.h>
118 #include <bus/cam/scsi/scsi_message.h>
119 /*
120 **************************************************************************
121 **************************************************************************
122 */
123 #include <sys/endian.h>
124 #include <bus/pci/pcivar.h>
125 #include <bus/pci/pcireg.h>
126
127 #define arcmsr_callout_init(a) callout_init_mp(a);
128
129 #define ARCMSR_DRIVER_VERSION "arcmsr version 1.40.00.00 2017-10-26"
130 #include <dev/raid/arcmsr/arcmsr.h>
131 /*
132 **************************************************************************
133 **************************************************************************
134 */
135 static void arcmsr_free_srb(struct CommandControlBlock *srb);
136 static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb);
137 static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb);
138 static int arcmsr_probe(device_t dev);
139 static int arcmsr_attach(device_t dev);
140 static int arcmsr_detach(device_t dev);
141 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
142 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
143 static int arcmsr_shutdown(device_t dev);
144 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
145 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
146 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
147 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
148 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
149 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
150 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
151 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
152 static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer);
153 static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb);
154 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
155 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
156 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
157 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
158 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg);
159 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb);
160 static int arcmsr_resume(device_t dev);
161 static int arcmsr_suspend(device_t dev);
162 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
163 static void arcmsr_polling_devmap(void *arg);
164 static void arcmsr_srb_timeout(void *arg);
165 static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb);
166 static void arcmsr_hbe_postqueue_isr(struct AdapterControlBlock *acb);
167 static void arcmsr_teardown_intr(device_t dev, struct AdapterControlBlock *acb);
168 #ifdef ARCMSR_DEBUG1
169 static void arcmsr_dump_data(struct AdapterControlBlock *acb);
170 #endif
171 /*
172 **************************************************************************
173 **************************************************************************
174 */
UDELAY(u_int32_t us)175 static void UDELAY(u_int32_t us) { DELAY(us); }
176 /*
177 **************************************************************************
178 **************************************************************************
179 */
180 static bus_dmamap_callback_t arcmsr_map_free_srb;
181 static bus_dmamap_callback_t arcmsr_execute_srb;
182 /*
183 **************************************************************************
184 **************************************************************************
185 */
186 static d_open_t arcmsr_open;
187 static d_close_t arcmsr_close;
188 static d_ioctl_t arcmsr_ioctl;
189
190 static device_method_t arcmsr_methods[]={
191 DEVMETHOD(device_probe, arcmsr_probe),
192 DEVMETHOD(device_attach, arcmsr_attach),
193 DEVMETHOD(device_detach, arcmsr_detach),
194 DEVMETHOD(device_shutdown, arcmsr_shutdown),
195 DEVMETHOD(device_suspend, arcmsr_suspend),
196 DEVMETHOD(device_resume, arcmsr_resume),
197 DEVMETHOD(bus_print_child, bus_generic_print_child),
198 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
199 DEVMETHOD_END
200 };
201
202 static driver_t arcmsr_driver={
203 "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
204 };
205
206 static devclass_t arcmsr_devclass;
207 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL);
208 MODULE_VERSION(arcmsr, 1);
209 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
210 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
211 #ifndef BUS_DMA_COHERENT
212 #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */
213 #endif
214 static struct dev_ops arcmsr_ops = {
215 { "arcmsr", 0, D_MPSAFE },
216 .d_open = arcmsr_open, /* open */
217 .d_close = arcmsr_close, /* close */
218 .d_ioctl = arcmsr_ioctl, /* ioctl */
219 };
220
221 static int arcmsr_msi_enable = 1;
222 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable);
223
224 /*
225 **************************************************************************
226 **************************************************************************
227 */
228 static int
arcmsr_open(struct dev_open_args * ap)229 arcmsr_open(struct dev_open_args *ap)
230 {
231 cdev_t dev = ap->a_head.a_dev;
232 struct AdapterControlBlock *acb = dev->si_drv1;
233
234 if(acb == NULL) {
235 return ENXIO;
236 }
237 return (0);
238 }
239 /*
240 **************************************************************************
241 **************************************************************************
242 */
243 static int
arcmsr_close(struct dev_close_args * ap)244 arcmsr_close(struct dev_close_args *ap)
245 {
246 cdev_t dev = ap->a_head.a_dev;
247 struct AdapterControlBlock *acb = dev->si_drv1;
248
249 if(acb == NULL) {
250 return ENXIO;
251 }
252 return 0;
253 }
254 /*
255 **************************************************************************
256 **************************************************************************
257 */
258 static int
arcmsr_ioctl(struct dev_ioctl_args * ap)259 arcmsr_ioctl(struct dev_ioctl_args *ap)
260 {
261 cdev_t dev = ap->a_head.a_dev;
262 u_long ioctl_cmd = ap->a_cmd;
263 caddr_t arg = ap->a_data;
264 struct AdapterControlBlock *acb = dev->si_drv1;
265
266 if(acb == NULL) {
267 return ENXIO;
268 }
269 return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
270 }
271 /*
272 **********************************************************************
273 **********************************************************************
274 */
arcmsr_disable_allintr(struct AdapterControlBlock * acb)275 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
276 {
277 u_int32_t intmask_org = 0;
278
279 switch (acb->adapter_type) {
280 case ACB_ADAPTER_TYPE_A: {
281 /* disable all outbound interrupt */
282 intmask_org = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
283 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
284 }
285 break;
286 case ACB_ADAPTER_TYPE_B: {
287 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
288 /* disable all outbound interrupt */
289 intmask_org = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask)
290 & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
291 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, 0); /* disable all interrupt */
292 }
293 break;
294 case ACB_ADAPTER_TYPE_C: {
295 /* disable all outbound interrupt */
296 intmask_org = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */
297 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
298 }
299 break;
300 case ACB_ADAPTER_TYPE_D: {
301 /* disable all outbound interrupt */
302 intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */
303 CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE);
304 }
305 break;
306 case ACB_ADAPTER_TYPE_E: {
307 /* disable all outbound interrupt */
308 intmask_org = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */
309 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_mask, intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE);
310 }
311 break;
312 }
313 return (intmask_org);
314 }
315 /*
316 **********************************************************************
317 **********************************************************************
318 */
arcmsr_enable_allintr(struct AdapterControlBlock * acb,u_int32_t intmask_org)319 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
320 {
321 u_int32_t mask;
322
323 switch (acb->adapter_type) {
324 case ACB_ADAPTER_TYPE_A: {
325 /* enable outbound Post Queue, outbound doorbell Interrupt */
326 mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
327 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
328 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
329 }
330 break;
331 case ACB_ADAPTER_TYPE_B: {
332 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
333 /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
334 mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
335 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
336 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
337 }
338 break;
339 case ACB_ADAPTER_TYPE_C: {
340 /* enable outbound Post Queue, outbound doorbell Interrupt */
341 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
342 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
343 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
344 }
345 break;
346 case ACB_ADAPTER_TYPE_D: {
347 /* enable outbound Post Queue, outbound doorbell Interrupt */
348 mask = ARCMSR_HBDMU_ALL_INT_ENABLE;
349 CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | mask);
350 CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable);
351 acb->outbound_int_enable = mask;
352 }
353 break;
354 case ACB_ADAPTER_TYPE_E: {
355 /* enable outbound Post Queue, outbound doorbell Interrupt */
356 mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
357 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_mask, intmask_org & mask);
358 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
359 }
360 break;
361 }
362 }
363 /*
364 **********************************************************************
365 **********************************************************************
366 */
arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock * acb)367 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
368 {
369 u_int32_t Index;
370 u_int8_t Retries = 0x00;
371
372 do {
373 for(Index=0; Index < 100; Index++) {
374 if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
375 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
376 return TRUE;
377 }
378 UDELAY(10000);
379 }/*max 1 seconds*/
380 }while(Retries++ < 20);/*max 20 sec*/
381 return (FALSE);
382 }
383 /*
384 **********************************************************************
385 **********************************************************************
386 */
arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock * acb)387 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
388 {
389 u_int32_t Index;
390 u_int8_t Retries = 0x00;
391 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
392
393 do {
394 for(Index=0; Index < 100; Index++) {
395 if(READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
396 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
397 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
398 return TRUE;
399 }
400 UDELAY(10000);
401 }/*max 1 seconds*/
402 }while(Retries++ < 20);/*max 20 sec*/
403 return (FALSE);
404 }
405 /*
406 **********************************************************************
407 **********************************************************************
408 */
arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock * acb)409 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
410 {
411 u_int32_t Index;
412 u_int8_t Retries = 0x00;
413
414 do {
415 for(Index=0; Index < 100; Index++) {
416 if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
417 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
418 return TRUE;
419 }
420 UDELAY(10000);
421 }/*max 1 seconds*/
422 }while(Retries++ < 20);/*max 20 sec*/
423 return (FALSE);
424 }
425 /*
426 **********************************************************************
427 **********************************************************************
428 */
arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock * acb)429 static u_int8_t arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock *acb)
430 {
431 u_int32_t Index;
432 u_int8_t Retries = 0x00;
433
434 do {
435 for(Index=0; Index < 100; Index++) {
436 if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) {
437 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);/*clear interrupt*/
438 return TRUE;
439 }
440 UDELAY(10000);
441 }/*max 1 seconds*/
442 }while(Retries++ < 20);/*max 20 sec*/
443 return (FALSE);
444 }
445 /*
446 **********************************************************************
447 **********************************************************************
448 */
arcmsr_hbe_wait_msgint_ready(struct AdapterControlBlock * acb)449 static u_int8_t arcmsr_hbe_wait_msgint_ready(struct AdapterControlBlock *acb)
450 {
451 u_int32_t Index, read_doorbell;
452 u_int8_t Retries = 0x00;
453
454 do {
455 for(Index=0; Index < 100; Index++) {
456 read_doorbell = CHIP_REG_READ32(HBE_MessageUnit, 0, iobound_doorbell);
457 if((read_doorbell ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
458 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_status, 0);/*clear interrupt*/
459 acb->in_doorbell = read_doorbell;
460 return TRUE;
461 }
462 UDELAY(10000);
463 }/*max 1 seconds*/
464 }while(Retries++ < 20);/*max 20 sec*/
465 return (FALSE);
466 }
467 /*
468 ************************************************************************
469 ************************************************************************
470 */
arcmsr_flush_hba_cache(struct AdapterControlBlock * acb)471 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
472 {
473 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
474
475 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
476 do {
477 if(arcmsr_hba_wait_msgint_ready(acb)) {
478 break;
479 } else {
480 retry_count--;
481 }
482 }while(retry_count != 0);
483 }
484 /*
485 ************************************************************************
486 ************************************************************************
487 */
arcmsr_flush_hbb_cache(struct AdapterControlBlock * acb)488 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
489 {
490 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
491 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
492
493 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
494 do {
495 if(arcmsr_hbb_wait_msgint_ready(acb)) {
496 break;
497 } else {
498 retry_count--;
499 }
500 }while(retry_count != 0);
501 }
502 /*
503 ************************************************************************
504 ************************************************************************
505 */
arcmsr_flush_hbc_cache(struct AdapterControlBlock * acb)506 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
507 {
508 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
509
510 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
511 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
512 do {
513 if(arcmsr_hbc_wait_msgint_ready(acb)) {
514 break;
515 } else {
516 retry_count--;
517 }
518 }while(retry_count != 0);
519 }
520 /*
521 ************************************************************************
522 ************************************************************************
523 */
arcmsr_flush_hbd_cache(struct AdapterControlBlock * acb)524 static void arcmsr_flush_hbd_cache(struct AdapterControlBlock *acb)
525 {
526 int retry_count = 30; /* enlarge wait flush adapter cache time: 10 minute */
527
528 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
529 do {
530 if(arcmsr_hbd_wait_msgint_ready(acb)) {
531 break;
532 } else {
533 retry_count--;
534 }
535 }while(retry_count != 0);
536 }
537 /*
538 ************************************************************************
539 ************************************************************************
540 */
arcmsr_flush_hbe_cache(struct AdapterControlBlock * acb)541 static void arcmsr_flush_hbe_cache(struct AdapterControlBlock *acb)
542 {
543 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
544
545 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
546 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
547 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell);
548 do {
549 if(arcmsr_hbe_wait_msgint_ready(acb)) {
550 break;
551 } else {
552 retry_count--;
553 }
554 }while(retry_count != 0);
555 }
556 /*
557 ************************************************************************
558 ************************************************************************
559 */
arcmsr_flush_adapter_cache(struct AdapterControlBlock * acb)560 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
561 {
562 switch (acb->adapter_type) {
563 case ACB_ADAPTER_TYPE_A: {
564 arcmsr_flush_hba_cache(acb);
565 }
566 break;
567 case ACB_ADAPTER_TYPE_B: {
568 arcmsr_flush_hbb_cache(acb);
569 }
570 break;
571 case ACB_ADAPTER_TYPE_C: {
572 arcmsr_flush_hbc_cache(acb);
573 }
574 break;
575 case ACB_ADAPTER_TYPE_D: {
576 arcmsr_flush_hbd_cache(acb);
577 }
578 break;
579 case ACB_ADAPTER_TYPE_E: {
580 arcmsr_flush_hbe_cache(acb);
581 }
582 break;
583 }
584 }
585 /*
586 *******************************************************************************
587 *******************************************************************************
588 */
arcmsr_suspend(device_t dev)589 static int arcmsr_suspend(device_t dev)
590 {
591 struct AdapterControlBlock *acb = device_get_softc(dev);
592
593 /* flush controller */
594 arcmsr_iop_parking(acb);
595 /* disable all outbound interrupt */
596 arcmsr_disable_allintr(acb);
597 return(0);
598 }
599 /*
600 *******************************************************************************
601 *******************************************************************************
602 */
arcmsr_resume(device_t dev)603 static int arcmsr_resume(device_t dev)
604 {
605 struct AdapterControlBlock *acb = device_get_softc(dev);
606
607 arcmsr_iop_init(acb);
608 return(0);
609 }
610 /*
611 **********************************************************************
612 **********************************************************************
613 */
arcmsr_report_sense_info(struct CommandControlBlock * srb)614 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
615 {
616 union ccb *pccb = srb->pccb;
617
618 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
619 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
620 if(pccb->csio.sense_len) {
621 memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
622 memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
623 get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
624 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
625 pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
626 }
627 }
628 /*
629 *********************************************************************
630 *********************************************************************
631 */
arcmsr_abort_hba_allcmd(struct AdapterControlBlock * acb)632 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
633 {
634 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
635 if(!arcmsr_hba_wait_msgint_ready(acb)) {
636 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
637 }
638 }
639 /*
640 *********************************************************************
641 *********************************************************************
642 */
arcmsr_abort_hbb_allcmd(struct AdapterControlBlock * acb)643 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
644 {
645 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
646 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
647 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
648 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
649 }
650 }
651 /*
652 *********************************************************************
653 *********************************************************************
654 */
arcmsr_abort_hbc_allcmd(struct AdapterControlBlock * acb)655 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
656 {
657 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
658 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
659 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
660 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
661 }
662 }
663 /*
664 *********************************************************************
665 *********************************************************************
666 */
arcmsr_abort_hbd_allcmd(struct AdapterControlBlock * acb)667 static void arcmsr_abort_hbd_allcmd(struct AdapterControlBlock *acb)
668 {
669 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
670 if(!arcmsr_hbd_wait_msgint_ready(acb)) {
671 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
672 }
673 }
674 /*
675 *********************************************************************
676 *********************************************************************
677 */
arcmsr_abort_hbe_allcmd(struct AdapterControlBlock * acb)678 static void arcmsr_abort_hbe_allcmd(struct AdapterControlBlock *acb)
679 {
680 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
681 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
682 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell);
683 if(!arcmsr_hbe_wait_msgint_ready(acb)) {
684 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
685 }
686 }
687 /*
688 *********************************************************************
689 *********************************************************************
690 */
arcmsr_abort_allcmd(struct AdapterControlBlock * acb)691 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
692 {
693 switch (acb->adapter_type) {
694 case ACB_ADAPTER_TYPE_A: {
695 arcmsr_abort_hba_allcmd(acb);
696 }
697 break;
698 case ACB_ADAPTER_TYPE_B: {
699 arcmsr_abort_hbb_allcmd(acb);
700 }
701 break;
702 case ACB_ADAPTER_TYPE_C: {
703 arcmsr_abort_hbc_allcmd(acb);
704 }
705 break;
706 case ACB_ADAPTER_TYPE_D: {
707 arcmsr_abort_hbd_allcmd(acb);
708 }
709 break;
710 case ACB_ADAPTER_TYPE_E: {
711 arcmsr_abort_hbe_allcmd(acb);
712 }
713 break;
714 }
715 }
716 /*
717 **********************************************************************
718 **********************************************************************
719 */
arcmsr_srb_complete(struct CommandControlBlock * srb,int stand_flag)720 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
721 {
722 struct AdapterControlBlock *acb = srb->acb;
723 union ccb *pccb = srb->pccb;
724
725 if(srb->srb_flags & SRB_FLAG_TIMER_START)
726 callout_stop(&srb->ccb_callout);
727 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
728 bus_dmasync_op_t op;
729
730 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
731 op = BUS_DMASYNC_POSTREAD;
732 } else {
733 op = BUS_DMASYNC_POSTWRITE;
734 }
735 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
736 ARCMSR_LOCK_ACQUIRE(&acb->io_lock);
737 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
738 ARCMSR_LOCK_RELEASE(&acb->io_lock);
739 }
740 if(stand_flag == 1) {
741 atomic_subtract_int(&acb->srboutstandingcount, 1);
742 if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
743 acb->srboutstandingcount < (acb->maxOutstanding -10))) {
744 acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
745 pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
746 }
747 }
748 if(srb->srb_state != ARCMSR_SRB_TIMEOUT)
749 arcmsr_free_srb(srb);
750 acb->pktReturnCount++;
751 ARCMSR_LOCK_ACQUIRE(&acb->sim_lock);
752 xpt_done(pccb);
753 ARCMSR_LOCK_RELEASE(&acb->sim_lock);
754 }
755 /*
756 **************************************************************************
757 **************************************************************************
758 */
arcmsr_report_srb_state(struct AdapterControlBlock * acb,struct CommandControlBlock * srb,u_int16_t error)759 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
760 {
761 int target, lun;
762
763 target = srb->pccb->ccb_h.target_id;
764 lun = srb->pccb->ccb_h.target_lun;
765 if(error == FALSE) {
766 if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
767 acb->devstate[target][lun] = ARECA_RAID_GOOD;
768 }
769 srb->pccb->ccb_h.status |= CAM_REQ_CMP;
770 arcmsr_srb_complete(srb, 1);
771 } else {
772 switch(srb->arcmsr_cdb.DeviceStatus) {
773 case ARCMSR_DEV_SELECT_TIMEOUT: {
774 if(acb->devstate[target][lun] == ARECA_RAID_GOOD) {
775 kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
776 }
777 acb->devstate[target][lun] = ARECA_RAID_GONE;
778 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
779 arcmsr_srb_complete(srb, 1);
780 }
781 break;
782 case ARCMSR_DEV_ABORTED:
783 case ARCMSR_DEV_INIT_FAIL: {
784 acb->devstate[target][lun] = ARECA_RAID_GONE;
785 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
786 arcmsr_srb_complete(srb, 1);
787 }
788 break;
789 case SCSISTAT_CHECK_CONDITION: {
790 acb->devstate[target][lun] = ARECA_RAID_GOOD;
791 arcmsr_report_sense_info(srb);
792 arcmsr_srb_complete(srb, 1);
793 }
794 break;
795 default:
796 kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknown DeviceStatus=0x%x \n"
797 , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
798 acb->devstate[target][lun] = ARECA_RAID_GONE;
799 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
800 /*unknown error or crc error just for retry*/
801 arcmsr_srb_complete(srb, 1);
802 break;
803 }
804 }
805 }
806 /*
807 **************************************************************************
808 **************************************************************************
809 */
arcmsr_drain_donequeue(struct AdapterControlBlock * acb,u_int32_t flag_srb,u_int16_t error)810 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
811 {
812 struct CommandControlBlock *srb;
813
814 /* check if command done with no error*/
815 switch (acb->adapter_type) {
816 case ACB_ADAPTER_TYPE_C:
817 case ACB_ADAPTER_TYPE_D:
818 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0)); /*frame must be 32 bytes aligned*/
819 break;
820 case ACB_ADAPTER_TYPE_E:
821 srb = acb->psrb_pool[flag_srb];
822 break;
823 case ACB_ADAPTER_TYPE_A:
824 case ACB_ADAPTER_TYPE_B:
825 default:
826 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
827 break;
828 }
829 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
830 if(srb->srb_state == ARCMSR_SRB_TIMEOUT) {
831 arcmsr_free_srb(srb);
832 kprintf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb);
833 return;
834 }
835 kprintf("arcmsr%d: return srb has been completed\n"
836 "srb='%p' srb_state=0x%x outstanding srb count=%d \n",
837 acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount);
838 return;
839 }
840 arcmsr_report_srb_state(acb, srb, error);
841 }
842 /*
843 **************************************************************************
844 **************************************************************************
845 */
arcmsr_srb_timeout(void * arg)846 static void arcmsr_srb_timeout(void *arg)
847 {
848 struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
849 struct AdapterControlBlock *acb;
850 int target, lun;
851 u_int8_t cmd;
852
853 target = srb->pccb->ccb_h.target_id;
854 lun = srb->pccb->ccb_h.target_lun;
855 acb = srb->acb;
856 if(srb->srb_state == ARCMSR_SRB_START)
857 {
858 cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
859 srb->srb_state = ARCMSR_SRB_TIMEOUT;
860 srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
861 arcmsr_srb_complete(srb, 1);
862 kprintf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n",
863 acb->pci_unit, target, lun, cmd, srb);
864 }
865 #ifdef ARCMSR_DEBUG1
866 arcmsr_dump_data(acb);
867 #endif
868 }
869
870 /*
871 **********************************************************************
872 **********************************************************************
873 */
arcmsr_done4abort_postqueue(struct AdapterControlBlock * acb)874 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
875 {
876 int i=0;
877 u_int32_t flag_srb;
878 u_int16_t error;
879
880 switch (acb->adapter_type) {
881 case ACB_ADAPTER_TYPE_A: {
882 u_int32_t outbound_intstatus;
883
884 /*clear and abort all outbound posted Q*/
885 outbound_intstatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
886 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
887 while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
888 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
889 arcmsr_drain_donequeue(acb, flag_srb, error);
890 }
891 }
892 break;
893 case ACB_ADAPTER_TYPE_B: {
894 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
895
896 /*clear all outbound posted Q*/
897 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
898 for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
899 if((flag_srb = phbbmu->done_qbuffer[i]) != 0) {
900 phbbmu->done_qbuffer[i] = 0;
901 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
902 arcmsr_drain_donequeue(acb, flag_srb, error);
903 }
904 phbbmu->post_qbuffer[i] = 0;
905 }/*drain reply FIFO*/
906 phbbmu->doneq_index = 0;
907 phbbmu->postq_index = 0;
908 }
909 break;
910 case ACB_ADAPTER_TYPE_C: {
911
912 while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
913 flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
914 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
915 arcmsr_drain_donequeue(acb, flag_srb, error);
916 }
917 }
918 break;
919 case ACB_ADAPTER_TYPE_D: {
920 arcmsr_hbd_postqueue_isr(acb);
921 }
922 break;
923 case ACB_ADAPTER_TYPE_E: {
924 arcmsr_hbe_postqueue_isr(acb);
925 }
926 break;
927 }
928 }
929 /*
930 ****************************************************************************
931 ****************************************************************************
932 */
arcmsr_iop_reset(struct AdapterControlBlock * acb)933 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
934 {
935 struct CommandControlBlock *srb;
936 u_int32_t intmask_org;
937 u_int32_t i=0;
938
939 if(acb->srboutstandingcount>0) {
940 /* disable all outbound interrupt */
941 intmask_org = arcmsr_disable_allintr(acb);
942 /*clear and abort all outbound posted Q*/
943 arcmsr_done4abort_postqueue(acb);
944 /* talk to iop 331 outstanding command aborted*/
945 arcmsr_abort_allcmd(acb);
946 for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
947 srb = acb->psrb_pool[i];
948 if(srb->srb_state == ARCMSR_SRB_START) {
949 srb->srb_state = ARCMSR_SRB_ABORTED;
950 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
951 arcmsr_srb_complete(srb, 1);
952 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p' aborted\n"
953 , acb->pci_unit, srb->pccb->ccb_h.target_id
954 , (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
955 }
956 }
957 /* enable all outbound interrupt */
958 arcmsr_enable_allintr(acb, intmask_org);
959 }
960 acb->srboutstandingcount = 0;
961 acb->workingsrb_doneindex = 0;
962 acb->workingsrb_startindex = 0;
963 acb->pktRequestCount = 0;
964 acb->pktReturnCount = 0;
965 }
966 /*
967 **********************************************************************
968 **********************************************************************
969 */
arcmsr_build_srb(struct CommandControlBlock * srb,bus_dma_segment_t * dm_segs,u_int32_t nseg)970 static void arcmsr_build_srb(struct CommandControlBlock *srb,
971 bus_dma_segment_t *dm_segs, u_int32_t nseg)
972 {
973 struct ARCMSR_CDB *arcmsr_cdb = &srb->arcmsr_cdb;
974 u_int8_t *psge = (u_int8_t *)&arcmsr_cdb->u;
975 u_int32_t address_lo, address_hi;
976 union ccb *pccb = srb->pccb;
977 struct ccb_scsiio *pcsio = &pccb->csio;
978 u_int32_t arccdbsize = 0x30;
979
980 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
981 arcmsr_cdb->Bus = 0;
982 arcmsr_cdb->TargetID = pccb->ccb_h.target_id;
983 arcmsr_cdb->LUN = pccb->ccb_h.target_lun;
984 arcmsr_cdb->Function = 1;
985 arcmsr_cdb->CdbLength = (u_int8_t)pcsio->cdb_len;
986 bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
987 if(nseg != 0) {
988 struct AdapterControlBlock *acb = srb->acb;
989 bus_dmasync_op_t op;
990 u_int32_t length, i, cdb_sgcount = 0;
991
992 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
993 op = BUS_DMASYNC_PREREAD;
994 } else {
995 op = BUS_DMASYNC_PREWRITE;
996 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
997 srb->srb_flags |= SRB_FLAG_WRITE;
998 }
999 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
1000 for(i=0; i < nseg; i++) {
1001 /* Get the physical address of the current data pointer */
1002 length = arcmsr_htole32(dm_segs[i].ds_len);
1003 address_lo = arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
1004 address_hi = arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
1005 if(address_hi == 0) {
1006 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1007 pdma_sg->address = address_lo;
1008 pdma_sg->length = length;
1009 psge += sizeof(struct SG32ENTRY);
1010 arccdbsize += sizeof(struct SG32ENTRY);
1011 } else {
1012 u_int32_t sg64s_size = 0, tmplength = length;
1013
1014 while(1) {
1015 u_int64_t span4G, length0;
1016 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1017
1018 span4G = (u_int64_t)address_lo + tmplength;
1019 pdma_sg->addresshigh = address_hi;
1020 pdma_sg->address = address_lo;
1021 if(span4G > 0x100000000) {
1022 /*see if cross 4G boundary*/
1023 length0 = 0x100000000-address_lo;
1024 pdma_sg->length = (u_int32_t)length0 | IS_SG64_ADDR;
1025 address_hi = address_hi+1;
1026 address_lo = 0;
1027 tmplength = tmplength - (u_int32_t)length0;
1028 sg64s_size += sizeof(struct SG64ENTRY);
1029 psge += sizeof(struct SG64ENTRY);
1030 cdb_sgcount++;
1031 } else {
1032 pdma_sg->length = tmplength | IS_SG64_ADDR;
1033 sg64s_size += sizeof(struct SG64ENTRY);
1034 psge += sizeof(struct SG64ENTRY);
1035 break;
1036 }
1037 }
1038 arccdbsize += sg64s_size;
1039 }
1040 cdb_sgcount++;
1041 }
1042 arcmsr_cdb->sgcount = (u_int8_t)cdb_sgcount;
1043 arcmsr_cdb->DataLength = pcsio->dxfer_len;
1044 if( arccdbsize > 256) {
1045 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1046 }
1047 } else {
1048 arcmsr_cdb->DataLength = 0;
1049 }
1050 srb->arc_cdb_size = arccdbsize;
1051 arcmsr_cdb->msgPages = (arccdbsize/256) + ((arccdbsize % 256) ? 1 : 0);
1052 }
1053 /*
1054 **************************************************************************
1055 **************************************************************************
1056 */
arcmsr_post_srb(struct AdapterControlBlock * acb,struct CommandControlBlock * srb)1057 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
1058 {
1059 u_int32_t cdb_phyaddr_low = (u_int32_t) srb->cdb_phyaddr_low;
1060 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&srb->arcmsr_cdb;
1061
1062 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
1063 atomic_add_int(&acb->srboutstandingcount, 1);
1064 srb->srb_state = ARCMSR_SRB_START;
1065
1066 switch (acb->adapter_type) {
1067 case ACB_ADAPTER_TYPE_A: {
1068 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1069 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
1070 } else {
1071 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low);
1072 }
1073 }
1074 break;
1075 case ACB_ADAPTER_TYPE_B: {
1076 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1077 int ending_index, index;
1078
1079 index = phbbmu->postq_index;
1080 ending_index = ((index+1) % ARCMSR_MAX_HBB_POSTQUEUE);
1081 phbbmu->post_qbuffer[ending_index] = 0;
1082 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1083 phbbmu->post_qbuffer[index] = cdb_phyaddr_low | ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
1084 } else {
1085 phbbmu->post_qbuffer[index] = cdb_phyaddr_low;
1086 }
1087 index++;
1088 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */
1089 phbbmu->postq_index = index;
1090 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
1091 }
1092 break;
1093 case ACB_ADAPTER_TYPE_C: {
1094 u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
1095
1096 arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size;
1097 ccb_post_stamp = (cdb_phyaddr_low | ((arc_cdb_size-1) >> 6) | 1);
1098 cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
1099 if(cdb_phyaddr_hi32)
1100 {
1101 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
1102 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
1103 }
1104 else
1105 {
1106 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
1107 }
1108 }
1109 break;
1110 case ACB_ADAPTER_TYPE_D: {
1111 struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
1112 u_int16_t index_stripped;
1113 u_int16_t postq_index;
1114 struct InBound_SRB *pinbound_srb;
1115
1116 ARCMSR_LOCK_ACQUIRE(&acb->postDone_lock);
1117 postq_index = phbdmu->postq_index;
1118 pinbound_srb = (struct InBound_SRB *)&phbdmu->post_qbuffer[postq_index & 0xFF];
1119 pinbound_srb->addressHigh = srb->cdb_phyaddr_high;
1120 pinbound_srb->addressLow = srb->cdb_phyaddr_low;
1121 pinbound_srb->length = srb->arc_cdb_size >> 2;
1122 arcmsr_cdb->Context = srb->cdb_phyaddr_low;
1123 if (postq_index & 0x4000) {
1124 index_stripped = postq_index & 0xFF;
1125 index_stripped += 1;
1126 index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
1127 phbdmu->postq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped;
1128 } else {
1129 index_stripped = postq_index;
1130 index_stripped += 1;
1131 index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
1132 phbdmu->postq_index = index_stripped ? index_stripped : (index_stripped | 0x4000);
1133 }
1134 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inboundlist_write_pointer, postq_index);
1135 ARCMSR_LOCK_RELEASE(&acb->postDone_lock);
1136 }
1137 break;
1138 case ACB_ADAPTER_TYPE_E: {
1139 u_int32_t ccb_post_stamp, arc_cdb_size;
1140
1141 arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size;
1142 ccb_post_stamp = (srb->smid | ((arc_cdb_size-1) >> 6));
1143 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_queueport_high, 0);
1144 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_queueport_low, ccb_post_stamp);
1145 }
1146 break;
1147 }
1148 }
1149 /*
1150 ************************************************************************
1151 ************************************************************************
1152 */
arcmsr_get_iop_rqbuffer(struct AdapterControlBlock * acb)1153 static struct QBUFFER *arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
1154 {
1155 struct QBUFFER *qbuffer=NULL;
1156
1157 switch (acb->adapter_type) {
1158 case ACB_ADAPTER_TYPE_A: {
1159 struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu;
1160
1161 qbuffer = (struct QBUFFER *)&phbamu->message_rbuffer;
1162 }
1163 break;
1164 case ACB_ADAPTER_TYPE_B: {
1165 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1166
1167 qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
1168 }
1169 break;
1170 case ACB_ADAPTER_TYPE_C: {
1171 struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu;
1172
1173 qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer;
1174 }
1175 break;
1176 case ACB_ADAPTER_TYPE_D: {
1177 struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
1178
1179 qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_rbuffer;
1180 }
1181 break;
1182 case ACB_ADAPTER_TYPE_E: {
1183 struct HBE_MessageUnit *phbcmu = (struct HBE_MessageUnit *)acb->pmu;
1184
1185 qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer;
1186 }
1187 break;
1188 }
1189 return(qbuffer);
1190 }
1191 /*
1192 ************************************************************************
1193 ************************************************************************
1194 */
arcmsr_get_iop_wqbuffer(struct AdapterControlBlock * acb)1195 static struct QBUFFER *arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
1196 {
1197 struct QBUFFER *qbuffer = NULL;
1198
1199 switch (acb->adapter_type) {
1200 case ACB_ADAPTER_TYPE_A: {
1201 struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu;
1202
1203 qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
1204 }
1205 break;
1206 case ACB_ADAPTER_TYPE_B: {
1207 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1208
1209 qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
1210 }
1211 break;
1212 case ACB_ADAPTER_TYPE_C: {
1213 struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu;
1214
1215 qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
1216 }
1217 break;
1218 case ACB_ADAPTER_TYPE_D: {
1219 struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
1220
1221 qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_wbuffer;
1222 }
1223 break;
1224 case ACB_ADAPTER_TYPE_E: {
1225 struct HBE_MessageUnit *phbcmu = (struct HBE_MessageUnit *)acb->pmu;
1226
1227 qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
1228 }
1229 break;
1230 }
1231 return(qbuffer);
1232 }
1233 /*
1234 **************************************************************************
1235 **************************************************************************
1236 */
arcmsr_iop_message_read(struct AdapterControlBlock * acb)1237 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1238 {
1239 switch (acb->adapter_type) {
1240 case ACB_ADAPTER_TYPE_A: {
1241 /* let IOP know data has been read */
1242 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1243 }
1244 break;
1245 case ACB_ADAPTER_TYPE_B: {
1246 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1247 /* let IOP know data has been read */
1248 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1249 }
1250 break;
1251 case ACB_ADAPTER_TYPE_C: {
1252 /* let IOP know data has been read */
1253 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1254 }
1255 break;
1256 case ACB_ADAPTER_TYPE_D: {
1257 /* let IOP know data has been read */
1258 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ);
1259 }
1260 break;
1261 case ACB_ADAPTER_TYPE_E: {
1262 /* let IOP know data has been read */
1263 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
1264 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell);
1265 }
1266 break;
1267 }
1268 }
1269 /*
1270 **************************************************************************
1271 **************************************************************************
1272 */
arcmsr_iop_message_wrote(struct AdapterControlBlock * acb)1273 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1274 {
1275 switch (acb->adapter_type) {
1276 case ACB_ADAPTER_TYPE_A: {
1277 /*
1278 ** push inbound doorbell tell iop, driver data write ok
1279 ** and wait reply on next hwinterrupt for next Qbuffer post
1280 */
1281 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1282 }
1283 break;
1284 case ACB_ADAPTER_TYPE_B: {
1285 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1286 /*
1287 ** push inbound doorbell tell iop, driver data write ok
1288 ** and wait reply on next hwinterrupt for next Qbuffer post
1289 */
1290 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1291 }
1292 break;
1293 case ACB_ADAPTER_TYPE_C: {
1294 /*
1295 ** push inbound doorbell tell iop, driver data write ok
1296 ** and wait reply on next hwinterrupt for next Qbuffer post
1297 */
1298 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1299 }
1300 break;
1301 case ACB_ADAPTER_TYPE_D: {
1302 /*
1303 ** push inbound doorbell tell iop, driver data write ok
1304 ** and wait reply on next hwinterrupt for next Qbuffer post
1305 */
1306 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_IN_READY);
1307 }
1308 break;
1309 case ACB_ADAPTER_TYPE_E: {
1310 /*
1311 ** push inbound doorbell tell iop, driver data write ok
1312 ** and wait reply on next hwinterrupt for next Qbuffer post
1313 */
1314 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
1315 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell);
1316 }
1317 break;
1318 }
1319 }
1320 /*
1321 ************************************************************************
1322 ************************************************************************
1323 */
arcmsr_stop_hba_bgrb(struct AdapterControlBlock * acb)1324 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1325 {
1326 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1327 CHIP_REG_WRITE32(HBA_MessageUnit,
1328 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1329 if(!arcmsr_hba_wait_msgint_ready(acb)) {
1330 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1331 , acb->pci_unit);
1332 }
1333 }
1334 /*
1335 ************************************************************************
1336 ************************************************************************
1337 */
arcmsr_stop_hbb_bgrb(struct AdapterControlBlock * acb)1338 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1339 {
1340 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1341 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1342 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1343 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1344 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1345 , acb->pci_unit);
1346 }
1347 }
1348 /*
1349 ************************************************************************
1350 ************************************************************************
1351 */
arcmsr_stop_hbc_bgrb(struct AdapterControlBlock * acb)1352 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1353 {
1354 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1355 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1356 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1357 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1358 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1359 }
1360 }
1361 /*
1362 ************************************************************************
1363 ************************************************************************
1364 */
arcmsr_stop_hbd_bgrb(struct AdapterControlBlock * acb)1365 static void arcmsr_stop_hbd_bgrb(struct AdapterControlBlock *acb)
1366 {
1367 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1368 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1369 if(!arcmsr_hbd_wait_msgint_ready(acb)) {
1370 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1371 }
1372 }
1373 /*
1374 ************************************************************************
1375 ************************************************************************
1376 */
arcmsr_stop_hbe_bgrb(struct AdapterControlBlock * acb)1377 static void arcmsr_stop_hbe_bgrb(struct AdapterControlBlock *acb)
1378 {
1379 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1380 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1381 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1382 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell);
1383 if(!arcmsr_hbe_wait_msgint_ready(acb)) {
1384 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1385 }
1386 }
1387 /*
1388 ************************************************************************
1389 ************************************************************************
1390 */
arcmsr_stop_adapter_bgrb(struct AdapterControlBlock * acb)1391 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1392 {
1393 switch (acb->adapter_type) {
1394 case ACB_ADAPTER_TYPE_A: {
1395 arcmsr_stop_hba_bgrb(acb);
1396 }
1397 break;
1398 case ACB_ADAPTER_TYPE_B: {
1399 arcmsr_stop_hbb_bgrb(acb);
1400 }
1401 break;
1402 case ACB_ADAPTER_TYPE_C: {
1403 arcmsr_stop_hbc_bgrb(acb);
1404 }
1405 break;
1406 case ACB_ADAPTER_TYPE_D: {
1407 arcmsr_stop_hbd_bgrb(acb);
1408 }
1409 break;
1410 case ACB_ADAPTER_TYPE_E: {
1411 arcmsr_stop_hbe_bgrb(acb);
1412 }
1413 break;
1414 }
1415 }
1416 /*
1417 ************************************************************************
1418 ************************************************************************
1419 */
arcmsr_poll(struct cam_sim * psim)1420 static void arcmsr_poll(struct cam_sim *psim)
1421 {
1422 struct AdapterControlBlock *acb;
1423 int mutex;
1424
1425 acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1426 mutex = lockstatus(&acb->isr_lock, curthread);
1427 if( mutex == 0 )
1428 ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
1429 arcmsr_interrupt(acb);
1430 if( mutex == 0 )
1431 ARCMSR_LOCK_RELEASE(&acb->isr_lock);
1432 }
1433 /*
1434 **************************************************************************
1435 **************************************************************************
1436 */
arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock * acb,struct QBUFFER * prbuffer)1437 static u_int32_t arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock *acb,
1438 struct QBUFFER *prbuffer) {
1439
1440 u_int8_t *pQbuffer;
1441 u_int8_t *buf1 = NULL;
1442 u_int32_t *iop_data, *buf2 = NULL;
1443 u_int32_t iop_len, data_len;
1444
1445 iop_data = (u_int32_t *)prbuffer->data;
1446 iop_len = (u_int32_t)prbuffer->data_len;
1447 if ( iop_len > 0 )
1448 {
1449 buf1 = kmalloc(128, M_DEVBUF, M_NOWAIT | M_ZERO);
1450 buf2 = (u_int32_t *)buf1;
1451 if( buf1 == NULL)
1452 return (0);
1453 data_len = iop_len;
1454 while(data_len >= 4)
1455 {
1456 *buf2++ = *iop_data++;
1457 data_len -= 4;
1458 }
1459 if(data_len)
1460 *buf2 = *iop_data;
1461 buf2 = (u_int32_t *)buf1;
1462 }
1463 while (iop_len > 0) {
1464 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex];
1465 *pQbuffer = *buf1;
1466 acb->rqbuf_lastindex++;
1467 /* if last, index number set it to 0 */
1468 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1469 buf1++;
1470 iop_len--;
1471 }
1472 if(buf2)
1473 kfree( (u_int8_t *)buf2, M_DEVBUF);
1474 /* let IOP know data has been read */
1475 arcmsr_iop_message_read(acb);
1476 return (1);
1477 }
1478 /*
1479 **************************************************************************
1480 **************************************************************************
1481 */
arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock * acb,struct QBUFFER * prbuffer)1482 static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
1483 struct QBUFFER *prbuffer) {
1484
1485 u_int8_t *pQbuffer;
1486 u_int8_t *iop_data;
1487 u_int32_t iop_len;
1488
1489 if(acb->adapter_type >= ACB_ADAPTER_TYPE_B) {
1490 return(arcmsr_Read_iop_rqbuffer_data_D(acb, prbuffer));
1491 }
1492 iop_data = (u_int8_t *)prbuffer->data;
1493 iop_len = (u_int32_t)prbuffer->data_len;
1494 while (iop_len > 0) {
1495 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex];
1496 *pQbuffer = *iop_data;
1497 acb->rqbuf_lastindex++;
1498 /* if last, index number set it to 0 */
1499 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1500 iop_data++;
1501 iop_len--;
1502 }
1503 /* let IOP know data has been read */
1504 arcmsr_iop_message_read(acb);
1505 return (1);
1506 }
1507 /*
1508 **************************************************************************
1509 **************************************************************************
1510 */
arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock * acb)1511 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1512 {
1513 struct QBUFFER *prbuffer;
1514 int my_empty_len;
1515
1516 /*check this iop data if overflow my rqbuffer*/
1517 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1518 prbuffer = arcmsr_get_iop_rqbuffer(acb);
1519 my_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) &
1520 (ARCMSR_MAX_QBUFFER-1);
1521 if(my_empty_len >= prbuffer->data_len) {
1522 if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
1523 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1524 } else {
1525 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1526 }
1527 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1528 }
1529 /*
1530 **********************************************************************
1531 **********************************************************************
1532 */
arcmsr_Write_data_2iop_wqbuffer_D(struct AdapterControlBlock * acb)1533 static void arcmsr_Write_data_2iop_wqbuffer_D(struct AdapterControlBlock *acb)
1534 {
1535 u_int8_t *pQbuffer;
1536 struct QBUFFER *pwbuffer;
1537 u_int8_t *buf1 = NULL;
1538 u_int32_t *iop_data, *buf2 = NULL;
1539 u_int32_t allxfer_len = 0, data_len;
1540
1541 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1542 buf1 = kmalloc(128, M_DEVBUF, M_NOWAIT | M_ZERO);
1543 buf2 = (u_int32_t *)buf1;
1544 if( buf1 == NULL)
1545 return;
1546
1547 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1548 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1549 iop_data = (u_int32_t *)pwbuffer->data;
1550 while((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
1551 && (allxfer_len < 124)) {
1552 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1553 *buf1 = *pQbuffer;
1554 acb->wqbuf_firstindex++;
1555 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1556 buf1++;
1557 allxfer_len++;
1558 }
1559 pwbuffer->data_len = allxfer_len;
1560 data_len = allxfer_len;
1561 buf1 = (u_int8_t *)buf2;
1562 while(data_len >= 4)
1563 {
1564 *iop_data++ = *buf2++;
1565 data_len -= 4;
1566 }
1567 if(data_len)
1568 *iop_data = *buf2;
1569 kfree( buf1, M_DEVBUF);
1570 arcmsr_iop_message_wrote(acb);
1571 }
1572 }
1573 /*
1574 **********************************************************************
1575 **********************************************************************
1576 */
arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock * acb)1577 static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb)
1578 {
1579 u_int8_t *pQbuffer;
1580 struct QBUFFER *pwbuffer;
1581 u_int8_t *iop_data;
1582 int32_t allxfer_len=0;
1583
1584 if(acb->adapter_type >= ACB_ADAPTER_TYPE_B) {
1585 arcmsr_Write_data_2iop_wqbuffer_D(acb);
1586 return;
1587 }
1588 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1589 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1590 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1591 iop_data = (u_int8_t *)pwbuffer->data;
1592 while((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
1593 && (allxfer_len < 124)) {
1594 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1595 *iop_data = *pQbuffer;
1596 acb->wqbuf_firstindex++;
1597 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1598 iop_data++;
1599 allxfer_len++;
1600 }
1601 pwbuffer->data_len = allxfer_len;
1602 arcmsr_iop_message_wrote(acb);
1603 }
1604 }
1605 /*
1606 **************************************************************************
1607 **************************************************************************
1608 */
arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock * acb)1609 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1610 {
1611 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1612 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1613 /*
1614 *****************************************************************
1615 ** check if there are any mail packages from user space program
1616 ** in my post bag, now is the time to send them into Areca's firmware
1617 *****************************************************************
1618 */
1619 if(acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
1620 arcmsr_Write_data_2iop_wqbuffer(acb);
1621 }
1622 if(acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
1623 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1624 }
1625 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1626 }
1627 /*
1628 **************************************************************************
1629 **************************************************************************
1630 */
arcmsr_rescanLun_cb(struct cam_periph * periph,union ccb * ccb)1631 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1632 {
1633 /*
1634 if (ccb->ccb_h.status != CAM_REQ_CMP)
1635 kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x,"
1636 "failure status=%x\n", ccb->ccb_h.target_id,
1637 ccb->ccb_h.target_lun, ccb->ccb_h.status);
1638 else
1639 kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1640 */
1641 xpt_free_path(ccb->ccb_h.path);
1642 xpt_free_ccb(&ccb->ccb_h);
1643 }
1644
arcmsr_rescan_lun(struct AdapterControlBlock * acb,int target,int lun)1645 static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1646 {
1647 struct cam_path *path;
1648 union ccb *ccb;
1649
1650 if ((ccb = (union ccb *)xpt_alloc_ccb()) == NULL)
1651 return;
1652 if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1653 {
1654 xpt_free_ccb(&ccb->ccb_h);
1655 return;
1656 }
1657 xpt_setup_ccb(&ccb->ccb_h, path, 5);
1658 ccb->ccb_h.func_code = XPT_SCAN_LUN;
1659 ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1660 ccb->crcn.flags = CAM_FLAG_NONE;
1661 xpt_action(ccb);
1662 }
1663
1664
arcmsr_abort_dr_ccbs(struct AdapterControlBlock * acb,int target,int lun)1665 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1666 {
1667 struct CommandControlBlock *srb;
1668 u_int32_t intmask_org;
1669 int i;
1670
1671 /* disable all outbound interrupts */
1672 intmask_org = arcmsr_disable_allintr(acb);
1673 for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1674 {
1675 srb = acb->psrb_pool[i];
1676 if (srb->srb_state == ARCMSR_SRB_START)
1677 {
1678 if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1679 {
1680 srb->srb_state = ARCMSR_SRB_ABORTED;
1681 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1682 arcmsr_srb_complete(srb, 1);
1683 kprintf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
1684 }
1685 }
1686 }
1687 /* enable outbound Post Queue, outbound doorbell Interrupt */
1688 arcmsr_enable_allintr(acb, intmask_org);
1689 }
1690 /*
1691 **************************************************************************
1692 **************************************************************************
1693 */
arcmsr_dr_handle(struct AdapterControlBlock * acb)1694 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1695 u_int32_t devicemap;
1696 u_int32_t target, lun;
1697 u_int32_t deviceMapCurrent[4]={0};
1698 u_int8_t *pDevMap;
1699
1700 switch (acb->adapter_type) {
1701 case ACB_ADAPTER_TYPE_A:
1702 devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1703 for (target = 0; target < 4; target++)
1704 {
1705 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
1706 devicemap += 4;
1707 }
1708 break;
1709
1710 case ACB_ADAPTER_TYPE_B:
1711 devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1712 for (target = 0; target < 4; target++)
1713 {
1714 deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap);
1715 devicemap += 4;
1716 }
1717 break;
1718
1719 case ACB_ADAPTER_TYPE_C:
1720 devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1721 for (target = 0; target < 4; target++)
1722 {
1723 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
1724 devicemap += 4;
1725 }
1726 break;
1727 case ACB_ADAPTER_TYPE_D:
1728 devicemap = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1729 for (target = 0; target < 4; target++)
1730 {
1731 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
1732 devicemap += 4;
1733 }
1734 break;
1735 case ACB_ADAPTER_TYPE_E:
1736 devicemap = offsetof(struct HBE_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1737 for (target = 0; target < 4; target++)
1738 {
1739 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
1740 devicemap += 4;
1741 }
1742 break;
1743 }
1744
1745 if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1746 {
1747 acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1748 }
1749 /*
1750 ** adapter posted CONFIG message
1751 ** copy the new map, note if there are differences with the current map
1752 */
1753 pDevMap = (u_int8_t *)&deviceMapCurrent[0];
1754 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1755 {
1756 if (*pDevMap != acb->device_map[target])
1757 {
1758 u_int8_t difference, bit_check;
1759
1760 difference = *pDevMap ^ acb->device_map[target];
1761 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1762 {
1763 bit_check = (1 << lun); /*check bit from 0....31*/
1764 if(difference & bit_check)
1765 {
1766 if(acb->device_map[target] & bit_check)
1767 {/* unit departed */
1768 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1769 arcmsr_abort_dr_ccbs(acb, target, lun);
1770 arcmsr_rescan_lun(acb, target, lun);
1771 acb->devstate[target][lun] = ARECA_RAID_GONE;
1772 }
1773 else
1774 {/* unit arrived */
1775 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
1776 arcmsr_rescan_lun(acb, target, lun);
1777 acb->devstate[target][lun] = ARECA_RAID_GOOD;
1778 }
1779 }
1780 }
1781 /* kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1782 acb->device_map[target] = *pDevMap;
1783 }
1784 pDevMap++;
1785 }
1786 }
1787 /*
1788 **************************************************************************
1789 **************************************************************************
1790 */
arcmsr_hba_message_isr(struct AdapterControlBlock * acb)1791 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1792 u_int32_t outbound_message;
1793
1794 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1795 outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1796 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1797 arcmsr_dr_handle( acb );
1798 }
1799 /*
1800 **************************************************************************
1801 **************************************************************************
1802 */
arcmsr_hbb_message_isr(struct AdapterControlBlock * acb)1803 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1804 u_int32_t outbound_message;
1805 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1806
1807 /* clear interrupts */
1808 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1809 outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1810 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1811 arcmsr_dr_handle( acb );
1812 }
1813 /*
1814 **************************************************************************
1815 **************************************************************************
1816 */
arcmsr_hbc_message_isr(struct AdapterControlBlock * acb)1817 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1818 u_int32_t outbound_message;
1819
1820 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1821 outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1822 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1823 arcmsr_dr_handle( acb );
1824 }
1825 /*
1826 **************************************************************************
1827 **************************************************************************
1828 */
arcmsr_hbd_message_isr(struct AdapterControlBlock * acb)1829 static void arcmsr_hbd_message_isr(struct AdapterControlBlock *acb) {
1830 u_int32_t outbound_message;
1831
1832 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);
1833 outbound_message = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[0]);
1834 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1835 arcmsr_dr_handle( acb );
1836 }
1837 /*
1838 **************************************************************************
1839 **************************************************************************
1840 */
arcmsr_hbe_message_isr(struct AdapterControlBlock * acb)1841 static void arcmsr_hbe_message_isr(struct AdapterControlBlock *acb) {
1842 u_int32_t outbound_message;
1843
1844 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_status, 0);
1845 outbound_message = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[0]);
1846 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1847 arcmsr_dr_handle( acb );
1848 }
1849 /*
1850 **************************************************************************
1851 **************************************************************************
1852 */
arcmsr_hba_doorbell_isr(struct AdapterControlBlock * acb)1853 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1854 {
1855 u_int32_t doorbell_status;
1856
1857 /*
1858 *******************************************************************
1859 ** Maybe here we need to check wrqbuffer_lock is lock or not
1860 ** DOORBELL: din! don!
1861 ** check if there are any mail need to pack from firmware
1862 *******************************************************************
1863 */
1864 doorbell_status = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
1865 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
1866 if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1867 arcmsr_iop2drv_data_wrote_handle(acb);
1868 }
1869 if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1870 arcmsr_iop2drv_data_read_handle(acb);
1871 }
1872 }
1873 /*
1874 **************************************************************************
1875 **************************************************************************
1876 */
arcmsr_hbc_doorbell_isr(struct AdapterControlBlock * acb)1877 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1878 {
1879 u_int32_t doorbell_status;
1880
1881 /*
1882 *******************************************************************
1883 ** Maybe here we need to check wrqbuffer_lock is lock or not
1884 ** DOORBELL: din! don!
1885 ** check if there are any mail need to pack from firmware
1886 *******************************************************************
1887 */
1888 doorbell_status = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1889 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, doorbell_status); /* clear doorbell interrupt */
1890 if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1891 arcmsr_iop2drv_data_wrote_handle(acb);
1892 }
1893 if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1894 arcmsr_iop2drv_data_read_handle(acb);
1895 }
1896 if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1897 arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */
1898 }
1899 }
1900 /*
1901 **************************************************************************
1902 **************************************************************************
1903 */
arcmsr_hbd_doorbell_isr(struct AdapterControlBlock * acb)1904 static void arcmsr_hbd_doorbell_isr(struct AdapterControlBlock *acb)
1905 {
1906 u_int32_t doorbell_status;
1907
1908 /*
1909 *******************************************************************
1910 ** Maybe here we need to check wrqbuffer_lock is lock or not
1911 ** DOORBELL: din! don!
1912 ** check if there are any mail need to pack from firmware
1913 *******************************************************************
1914 */
1915 doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
1916 if(doorbell_status)
1917 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
1918 while( doorbell_status & ARCMSR_HBDMU_F0_DOORBELL_CAUSE ) {
1919 if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK) {
1920 arcmsr_iop2drv_data_wrote_handle(acb);
1921 }
1922 if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK) {
1923 arcmsr_iop2drv_data_read_handle(acb);
1924 }
1925 if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) {
1926 arcmsr_hbd_message_isr(acb); /* messenger of "driver to iop commands" */
1927 }
1928 doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
1929 if(doorbell_status)
1930 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
1931 }
1932 }
1933 /*
1934 **************************************************************************
1935 **************************************************************************
1936 */
arcmsr_hbe_doorbell_isr(struct AdapterControlBlock * acb)1937 static void arcmsr_hbe_doorbell_isr(struct AdapterControlBlock *acb)
1938 {
1939 u_int32_t doorbell_status, in_doorbell;
1940
1941 /*
1942 *******************************************************************
1943 ** Maybe here we need to check wrqbuffer_lock is lock or not
1944 ** DOORBELL: din! don!
1945 ** check if there are any mail need to pack from firmware
1946 *******************************************************************
1947 */
1948 in_doorbell = CHIP_REG_READ32(HBE_MessageUnit, 0, iobound_doorbell);
1949 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_status, 0); /* clear doorbell interrupt */
1950 doorbell_status = in_doorbell ^ acb->in_doorbell;
1951 if(doorbell_status & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
1952 arcmsr_iop2drv_data_wrote_handle(acb);
1953 }
1954 if(doorbell_status & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
1955 arcmsr_iop2drv_data_read_handle(acb);
1956 }
1957 if(doorbell_status & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
1958 arcmsr_hbe_message_isr(acb); /* messenger of "driver to iop commands" */
1959 }
1960 acb->in_doorbell = in_doorbell;
1961 }
1962 /*
1963 **************************************************************************
1964 **************************************************************************
1965 */
arcmsr_hba_postqueue_isr(struct AdapterControlBlock * acb)1966 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1967 {
1968 u_int32_t flag_srb;
1969 u_int16_t error;
1970
1971 /*
1972 *****************************************************************************
1973 ** areca cdb command done
1974 *****************************************************************************
1975 */
1976 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1977 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1978 while((flag_srb = CHIP_REG_READ32(HBA_MessageUnit,
1979 0, outbound_queueport)) != 0xFFFFFFFF) {
1980 /* check if command done with no error*/
1981 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0) ? TRUE : FALSE;
1982 arcmsr_drain_donequeue(acb, flag_srb, error);
1983 } /*drain reply FIFO*/
1984 }
1985 /*
1986 **************************************************************************
1987 **************************************************************************
1988 */
arcmsr_hbb_postqueue_isr(struct AdapterControlBlock * acb)1989 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1990 {
1991 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
1992 u_int32_t flag_srb;
1993 int index;
1994 u_int16_t error;
1995
1996 /*
1997 *****************************************************************************
1998 ** areca cdb command done
1999 *****************************************************************************
2000 */
2001 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
2002 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2003 index = phbbmu->doneq_index;
2004 while((flag_srb = phbbmu->done_qbuffer[index]) != 0) {
2005 phbbmu->done_qbuffer[index] = 0;
2006 index++;
2007 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */
2008 phbbmu->doneq_index = index;
2009 /* check if command done with no error*/
2010 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2011 arcmsr_drain_donequeue(acb, flag_srb, error);
2012 } /*drain reply FIFO*/
2013 }
2014 /*
2015 **************************************************************************
2016 **************************************************************************
2017 */
arcmsr_hbc_postqueue_isr(struct AdapterControlBlock * acb)2018 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
2019 {
2020 u_int32_t flag_srb,throttling = 0;
2021 u_int16_t error;
2022
2023 /*
2024 *****************************************************************************
2025 ** areca cdb command done
2026 *****************************************************************************
2027 */
2028 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2029 do {
2030 flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
2031 if (flag_srb == 0xFFFFFFFF)
2032 break;
2033 /* check if command done with no error*/
2034 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
2035 arcmsr_drain_donequeue(acb, flag_srb, error);
2036 throttling++;
2037 if(throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
2038 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
2039 throttling = 0;
2040 }
2041 } while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR);
2042 }
2043 /*
2044 **********************************************************************
2045 **
2046 **********************************************************************
2047 */
arcmsr_get_doneq_index(struct HBD_MessageUnit0 * phbdmu)2048 static uint16_t arcmsr_get_doneq_index(struct HBD_MessageUnit0 *phbdmu)
2049 {
2050 uint16_t doneq_index, index_stripped;
2051
2052 doneq_index = phbdmu->doneq_index;
2053 if (doneq_index & 0x4000) {
2054 index_stripped = doneq_index & 0xFF;
2055 index_stripped += 1;
2056 index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
2057 phbdmu->doneq_index = index_stripped ?
2058 (index_stripped | 0x4000) : index_stripped;
2059 } else {
2060 index_stripped = doneq_index;
2061 index_stripped += 1;
2062 index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
2063 phbdmu->doneq_index = index_stripped ?
2064 index_stripped : (index_stripped | 0x4000);
2065 }
2066 return (phbdmu->doneq_index);
2067 }
2068 /*
2069 **************************************************************************
2070 **************************************************************************
2071 */
arcmsr_hbd_postqueue_isr(struct AdapterControlBlock * acb)2072 static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb)
2073 {
2074 struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
2075 u_int32_t outbound_write_pointer;
2076 u_int32_t addressLow;
2077 uint16_t doneq_index;
2078 u_int16_t error;
2079 /*
2080 *****************************************************************************
2081 ** areca cdb command done
2082 *****************************************************************************
2083 */
2084 if((CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause) &
2085 ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT) == 0)
2086 return;
2087 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
2088 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2089 outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
2090 doneq_index = phbdmu->doneq_index;
2091 while ((doneq_index & 0xFF) != (outbound_write_pointer & 0xFF)) {
2092 doneq_index = arcmsr_get_doneq_index(phbdmu);
2093 addressLow = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow;
2094 error = (addressLow & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
2095 arcmsr_drain_donequeue(acb, addressLow, error); /*Check if command done with no error */
2096 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index);
2097 outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
2098 }
2099 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_interrupt_cause, ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT_CLEAR);
2100 CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause); /*Dummy ioread32 to force pci flush */
2101 }
2102 /*
2103 **************************************************************************
2104 **************************************************************************
2105 */
arcmsr_hbe_postqueue_isr(struct AdapterControlBlock * acb)2106 static void arcmsr_hbe_postqueue_isr(struct AdapterControlBlock *acb)
2107 {
2108 u_int16_t error;
2109 uint32_t doneq_index;
2110 uint16_t cmdSMID;
2111
2112 /*
2113 *****************************************************************************
2114 ** areca cdb command done
2115 *****************************************************************************
2116 */
2117 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2118 doneq_index = acb->doneq_index;
2119 while ((CHIP_REG_READ32(HBE_MessageUnit, 0, reply_post_producer_index) & 0xFFFF) != doneq_index) {
2120 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2121 error = (acb->pCompletionQ[doneq_index].cmdFlag & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
2122 arcmsr_drain_donequeue(acb, (u_int32_t)cmdSMID, error);
2123 doneq_index++;
2124 if (doneq_index >= acb->completionQ_entry)
2125 doneq_index = 0;
2126 }
2127 acb->doneq_index = doneq_index;
2128 CHIP_REG_WRITE32(HBE_MessageUnit, 0, reply_post_consumer_index, doneq_index);
2129 }
2130 /*
2131 **********************************************************************
2132 **********************************************************************
2133 */
arcmsr_handle_hba_isr(struct AdapterControlBlock * acb)2134 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
2135 {
2136 u_int32_t outbound_intStatus;
2137 /*
2138 *********************************************
2139 ** check outbound intstatus
2140 *********************************************
2141 */
2142 outbound_intStatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
2143 if(!outbound_intStatus) {
2144 /*it must be share irq*/
2145 return;
2146 }
2147 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus); /*clear interrupt*/
2148 /* MU doorbell interrupts*/
2149 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
2150 arcmsr_hba_doorbell_isr(acb);
2151 }
2152 /* MU post queue interrupts*/
2153 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
2154 arcmsr_hba_postqueue_isr(acb);
2155 }
2156 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
2157 arcmsr_hba_message_isr(acb);
2158 }
2159 }
2160 /*
2161 **********************************************************************
2162 **********************************************************************
2163 */
arcmsr_handle_hbb_isr(struct AdapterControlBlock * acb)2164 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
2165 {
2166 u_int32_t outbound_doorbell;
2167 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
2168 /*
2169 *********************************************
2170 ** check outbound intstatus
2171 *********************************************
2172 */
2173 outbound_doorbell = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & acb->outbound_int_enable;
2174 if(!outbound_doorbell) {
2175 /*it must be share irq*/
2176 return;
2177 }
2178 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
2179 READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell);
2180 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
2181 /* MU ioctl transfer doorbell interrupts*/
2182 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
2183 arcmsr_iop2drv_data_wrote_handle(acb);
2184 }
2185 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
2186 arcmsr_iop2drv_data_read_handle(acb);
2187 }
2188 /* MU post queue interrupts*/
2189 if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
2190 arcmsr_hbb_postqueue_isr(acb);
2191 }
2192 if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
2193 arcmsr_hbb_message_isr(acb);
2194 }
2195 }
2196 /*
2197 **********************************************************************
2198 **********************************************************************
2199 */
arcmsr_handle_hbc_isr(struct AdapterControlBlock * acb)2200 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
2201 {
2202 u_int32_t host_interrupt_status;
2203 /*
2204 *********************************************
2205 ** check outbound intstatus
2206 *********************************************
2207 */
2208 host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) &
2209 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2210 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
2211 if(!host_interrupt_status) {
2212 /*it must be share irq*/
2213 return;
2214 }
2215 do {
2216 /* MU doorbell interrupts*/
2217 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
2218 arcmsr_hbc_doorbell_isr(acb);
2219 }
2220 /* MU post queue interrupts*/
2221 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
2222 arcmsr_hbc_postqueue_isr(acb);
2223 }
2224 host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
2225 } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2226 }
2227 /*
2228 **********************************************************************
2229 **********************************************************************
2230 */
arcmsr_handle_hbd_isr(struct AdapterControlBlock * acb)2231 static void arcmsr_handle_hbd_isr( struct AdapterControlBlock *acb)
2232 {
2233 u_int32_t host_interrupt_status;
2234 u_int32_t intmask_org;
2235 /*
2236 *********************************************
2237 ** check outbound intstatus
2238 *********************************************
2239 */
2240 host_interrupt_status = CHIP_REG_READ32(HBD_MessageUnit, 0, host_int_status) & acb->outbound_int_enable;
2241 if(!(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_INT)) {
2242 /*it must be share irq*/
2243 return;
2244 }
2245 /* disable outbound interrupt */
2246 intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */
2247 CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE);
2248 /* MU doorbell interrupts*/
2249 if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_DOORBELL_INT) {
2250 arcmsr_hbd_doorbell_isr(acb);
2251 }
2252 /* MU post queue interrupts*/
2253 if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_POSTQUEUE_INT) {
2254 arcmsr_hbd_postqueue_isr(acb);
2255 }
2256 /* enable all outbound interrupt */
2257 CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | ARCMSR_HBDMU_ALL_INT_ENABLE);
2258 // CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable);
2259 }
2260 /*
2261 **********************************************************************
2262 **********************************************************************
2263 */
arcmsr_handle_hbe_isr(struct AdapterControlBlock * acb)2264 static void arcmsr_handle_hbe_isr( struct AdapterControlBlock *acb)
2265 {
2266 u_int32_t host_interrupt_status;
2267 /*
2268 *********************************************
2269 ** check outbound intstatus
2270 *********************************************
2271 */
2272 host_interrupt_status = CHIP_REG_READ32(HBE_MessageUnit, 0, host_int_status) &
2273 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2274 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2275 if(!host_interrupt_status) {
2276 /*it must be share irq*/
2277 return;
2278 }
2279 do {
2280 /* MU doorbell interrupts*/
2281 if(host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
2282 arcmsr_hbe_doorbell_isr(acb);
2283 }
2284 /* MU post queue interrupts*/
2285 if(host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
2286 arcmsr_hbe_postqueue_isr(acb);
2287 }
2288 host_interrupt_status = CHIP_REG_READ32(HBE_MessageUnit, 0, host_int_status);
2289 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2290 }
2291 /*
2292 ******************************************************************************
2293 ******************************************************************************
2294 */
arcmsr_interrupt(struct AdapterControlBlock * acb)2295 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
2296 {
2297 switch (acb->adapter_type) {
2298 case ACB_ADAPTER_TYPE_A:
2299 arcmsr_handle_hba_isr(acb);
2300 break;
2301 case ACB_ADAPTER_TYPE_B:
2302 arcmsr_handle_hbb_isr(acb);
2303 break;
2304 case ACB_ADAPTER_TYPE_C:
2305 arcmsr_handle_hbc_isr(acb);
2306 break;
2307 case ACB_ADAPTER_TYPE_D:
2308 arcmsr_handle_hbd_isr(acb);
2309 break;
2310 case ACB_ADAPTER_TYPE_E:
2311 arcmsr_handle_hbe_isr(acb);
2312 break;
2313 default:
2314 kprintf("arcmsr%d: interrupt service,"
2315 " unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type);
2316 break;
2317 }
2318 }
2319 /*
2320 **********************************************************************
2321 **********************************************************************
2322 */
arcmsr_intr_handler(void * arg)2323 static void arcmsr_intr_handler(void *arg)
2324 {
2325 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
2326
2327 arcmsr_interrupt(acb);
2328 }
2329 /*
2330 ******************************************************************************
2331 ******************************************************************************
2332 */
arcmsr_polling_devmap(void * arg)2333 static void arcmsr_polling_devmap(void *arg)
2334 {
2335 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
2336 switch (acb->adapter_type) {
2337 case ACB_ADAPTER_TYPE_A:
2338 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2339 break;
2340
2341 case ACB_ADAPTER_TYPE_B: {
2342 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
2343 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2344 }
2345 break;
2346
2347 case ACB_ADAPTER_TYPE_C:
2348 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2349 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2350 break;
2351
2352 case ACB_ADAPTER_TYPE_D:
2353 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2354 break;
2355
2356 case ACB_ADAPTER_TYPE_E:
2357 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2358 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
2359 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell);
2360 break;
2361 }
2362
2363 if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
2364 {
2365 callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */
2366 }
2367 }
2368
2369 /*
2370 *******************************************************************************
2371 **
2372 *******************************************************************************
2373 */
arcmsr_iop_parking(struct AdapterControlBlock * acb)2374 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2375 {
2376 u_int32_t intmask_org;
2377
2378 if(acb != NULL) {
2379 /* stop adapter background rebuild */
2380 if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
2381 intmask_org = arcmsr_disable_allintr(acb);
2382 arcmsr_stop_adapter_bgrb(acb);
2383 arcmsr_flush_adapter_cache(acb);
2384 arcmsr_enable_allintr(acb, intmask_org);
2385 }
2386 }
2387 }
2388 /*
2389 ***********************************************************************
2390 **
2391 ************************************************************************
2392 */
arcmsr_iop_ioctlcmd(struct AdapterControlBlock * acb,u_int32_t ioctl_cmd,caddr_t arg)2393 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
2394 {
2395 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2396 u_int32_t retvalue = EINVAL;
2397
2398 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) arg;
2399 if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
2400 return retvalue;
2401 }
2402 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2403 switch(ioctl_cmd) {
2404 case ARCMSR_MESSAGE_READ_RQBUFFER: {
2405 u_int8_t *pQbuffer;
2406 u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer;
2407 u_int32_t allxfer_len=0;
2408
2409 while((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2410 && (allxfer_len < 1031)) {
2411 /*copy READ QBUFFER to srb*/
2412 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2413 *ptmpQbuffer = *pQbuffer;
2414 acb->rqbuf_firstindex++;
2415 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2416 /*if last index number set it to 0 */
2417 ptmpQbuffer++;
2418 allxfer_len++;
2419 }
2420 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2421 struct QBUFFER *prbuffer;
2422
2423 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2424 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2425 if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2426 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2427 }
2428 pcmdmessagefld->cmdmessage.Length = allxfer_len;
2429 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2430 retvalue = ARCMSR_MESSAGE_SUCCESS;
2431 }
2432 break;
2433 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2434 u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2435 u_int8_t *pQbuffer;
2436 u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer;
2437
2438 user_len = pcmdmessagefld->cmdmessage.Length;
2439 /*check if data xfer length of this request will overflow my array qbuffer */
2440 wqbuf_lastindex = acb->wqbuf_lastindex;
2441 wqbuf_firstindex = acb->wqbuf_firstindex;
2442 if(wqbuf_lastindex != wqbuf_firstindex) {
2443 arcmsr_Write_data_2iop_wqbuffer(acb);
2444 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
2445 } else {
2446 my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) &
2447 (ARCMSR_MAX_QBUFFER - 1);
2448 if(my_empty_len >= user_len) {
2449 while(user_len > 0) {
2450 /*copy srb data to wqbuffer*/
2451 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2452 *pQbuffer = *ptmpuserbuffer;
2453 acb->wqbuf_lastindex++;
2454 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2455 /*if last index number set it to 0 */
2456 ptmpuserbuffer++;
2457 user_len--;
2458 }
2459 /*post fist Qbuffer*/
2460 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2461 acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2462 arcmsr_Write_data_2iop_wqbuffer(acb);
2463 }
2464 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2465 } else {
2466 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
2467 }
2468 }
2469 retvalue = ARCMSR_MESSAGE_SUCCESS;
2470 }
2471 break;
2472 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2473 u_int8_t *pQbuffer = acb->rqbuffer;
2474
2475 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2476 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2477 arcmsr_iop_message_read(acb);
2478 /*signature, let IOP know data has been readed */
2479 }
2480 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2481 acb->rqbuf_firstindex = 0;
2482 acb->rqbuf_lastindex = 0;
2483 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2484 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2485 retvalue = ARCMSR_MESSAGE_SUCCESS;
2486 }
2487 break;
2488 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
2489 {
2490 u_int8_t *pQbuffer = acb->wqbuffer;
2491
2492 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2493 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2494 arcmsr_iop_message_read(acb);
2495 /*signature, let IOP know data has been readed */
2496 }
2497 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
2498 acb->wqbuf_firstindex = 0;
2499 acb->wqbuf_lastindex = 0;
2500 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2501 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2502 retvalue = ARCMSR_MESSAGE_SUCCESS;
2503 }
2504 break;
2505 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2506 u_int8_t *pQbuffer;
2507
2508 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2509 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2510 arcmsr_iop_message_read(acb);
2511 /*signature, let IOP know data has been readed */
2512 }
2513 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
2514 |ACB_F_MESSAGE_RQBUFFER_CLEARED
2515 |ACB_F_MESSAGE_WQBUFFER_READ);
2516 acb->rqbuf_firstindex = 0;
2517 acb->rqbuf_lastindex = 0;
2518 acb->wqbuf_firstindex = 0;
2519 acb->wqbuf_lastindex = 0;
2520 pQbuffer = acb->rqbuffer;
2521 memset(pQbuffer, 0, sizeof(struct QBUFFER));
2522 pQbuffer = acb->wqbuffer;
2523 memset(pQbuffer, 0, sizeof(struct QBUFFER));
2524 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2525 retvalue = ARCMSR_MESSAGE_SUCCESS;
2526 }
2527 break;
2528 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2529 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2530 retvalue = ARCMSR_MESSAGE_SUCCESS;
2531 }
2532 break;
2533 case ARCMSR_MESSAGE_SAY_HELLO: {
2534 u_int8_t *hello_string = "Hello! I am ARCMSR";
2535 u_int8_t *puserbuffer = (u_int8_t *)pcmdmessagefld->messagedatabuffer;
2536
2537 if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
2538 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
2539 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2540 return ENOIOCTL;
2541 }
2542 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2543 retvalue = ARCMSR_MESSAGE_SUCCESS;
2544 }
2545 break;
2546 case ARCMSR_MESSAGE_SAY_GOODBYE: {
2547 arcmsr_iop_parking(acb);
2548 retvalue = ARCMSR_MESSAGE_SUCCESS;
2549 }
2550 break;
2551 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
2552 arcmsr_flush_adapter_cache(acb);
2553 retvalue = ARCMSR_MESSAGE_SUCCESS;
2554 }
2555 break;
2556 }
2557 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2558 return (retvalue);
2559 }
2560 /*
2561 **************************************************************************
2562 **************************************************************************
2563 */
arcmsr_free_srb(struct CommandControlBlock * srb)2564 static void arcmsr_free_srb(struct CommandControlBlock *srb)
2565 {
2566 struct AdapterControlBlock *acb;
2567
2568 acb = srb->acb;
2569 ARCMSR_LOCK_ACQUIRE(&acb->srb_lock);
2570 srb->srb_state = ARCMSR_SRB_DONE;
2571 srb->srb_flags = 0;
2572 acb->srbworkingQ[acb->workingsrb_doneindex] = srb;
2573 acb->workingsrb_doneindex++;
2574 acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
2575 ARCMSR_LOCK_RELEASE(&acb->srb_lock);
2576 }
2577 /*
2578 **************************************************************************
2579 **************************************************************************
2580 */
arcmsr_get_freesrb(struct AdapterControlBlock * acb)2581 static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb)
2582 {
2583 struct CommandControlBlock *srb = NULL;
2584 u_int32_t workingsrb_startindex, workingsrb_doneindex;
2585
2586 ARCMSR_LOCK_ACQUIRE(&acb->srb_lock);
2587 workingsrb_doneindex = acb->workingsrb_doneindex;
2588 workingsrb_startindex = acb->workingsrb_startindex;
2589 srb = acb->srbworkingQ[workingsrb_startindex];
2590 workingsrb_startindex++;
2591 workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
2592 if(workingsrb_doneindex != workingsrb_startindex) {
2593 acb->workingsrb_startindex = workingsrb_startindex;
2594 } else {
2595 srb = NULL;
2596 }
2597 ARCMSR_LOCK_RELEASE(&acb->srb_lock);
2598 return(srb);
2599 }
2600 /*
2601 **************************************************************************
2602 **************************************************************************
2603 */
arcmsr_iop_message_xfer(struct AdapterControlBlock * acb,union ccb * pccb)2604 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb)
2605 {
2606 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2607 int retvalue = 0, transfer_len = 0;
2608 char *buffer;
2609 u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
2610 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
2611 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 |
2612 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
2613 /* 4 bytes: Areca io control code */
2614 if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2615 buffer = pccb->csio.data_ptr;
2616 transfer_len = pccb->csio.dxfer_len;
2617 } else {
2618 retvalue = ARCMSR_MESSAGE_FAIL;
2619 goto message_out;
2620 }
2621 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2622 retvalue = ARCMSR_MESSAGE_FAIL;
2623 goto message_out;
2624 }
2625 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
2626 switch(controlcode) {
2627 case ARCMSR_MESSAGE_READ_RQBUFFER: {
2628 u_int8_t *pQbuffer;
2629 u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer;
2630 int32_t allxfer_len = 0;
2631
2632 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2633 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2634 && (allxfer_len < 1031)) {
2635 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2636 *ptmpQbuffer = *pQbuffer;
2637 acb->rqbuf_firstindex++;
2638 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2639 ptmpQbuffer++;
2640 allxfer_len++;
2641 }
2642 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2643 struct QBUFFER *prbuffer;
2644
2645 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2646 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2647 if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2648 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2649 }
2650 pcmdmessagefld->cmdmessage.Length = allxfer_len;
2651 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2652 retvalue = ARCMSR_MESSAGE_SUCCESS;
2653 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2654 }
2655 break;
2656 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2657 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2658 u_int8_t *pQbuffer;
2659 u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer;
2660
2661 user_len = pcmdmessagefld->cmdmessage.Length;
2662 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2663 wqbuf_lastindex = acb->wqbuf_lastindex;
2664 wqbuf_firstindex = acb->wqbuf_firstindex;
2665 if (wqbuf_lastindex != wqbuf_firstindex) {
2666 arcmsr_Write_data_2iop_wqbuffer(acb);
2667 /* has error report sensedata */
2668 if(pccb->csio.sense_len) {
2669 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2670 /* Valid,ErrorCode */
2671 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2672 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2673 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2674 /* AdditionalSenseLength */
2675 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2676 /* AdditionalSenseCode */
2677 }
2678 retvalue = ARCMSR_MESSAGE_FAIL;
2679 } else {
2680 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2681 &(ARCMSR_MAX_QBUFFER - 1);
2682 if (my_empty_len >= user_len) {
2683 while (user_len > 0) {
2684 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2685 *pQbuffer = *ptmpuserbuffer;
2686 acb->wqbuf_lastindex++;
2687 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2688 ptmpuserbuffer++;
2689 user_len--;
2690 }
2691 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2692 acb->acb_flags &=
2693 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2694 arcmsr_Write_data_2iop_wqbuffer(acb);
2695 }
2696 } else {
2697 /* has error report sensedata */
2698 if(pccb->csio.sense_len) {
2699 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2700 /* Valid,ErrorCode */
2701 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2702 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2703 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2704 /* AdditionalSenseLength */
2705 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2706 /* AdditionalSenseCode */
2707 }
2708 retvalue = ARCMSR_MESSAGE_FAIL;
2709 }
2710 }
2711 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2712 }
2713 break;
2714 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2715 u_int8_t *pQbuffer = acb->rqbuffer;
2716
2717 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2718 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2719 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2720 arcmsr_iop_message_read(acb);
2721 }
2722 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2723 acb->rqbuf_firstindex = 0;
2724 acb->rqbuf_lastindex = 0;
2725 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2726 pcmdmessagefld->cmdmessage.ReturnCode =
2727 ARCMSR_MESSAGE_RETURNCODE_OK;
2728 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2729 }
2730 break;
2731 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2732 u_int8_t *pQbuffer = acb->wqbuffer;
2733
2734 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2735 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2736 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2737 arcmsr_iop_message_read(acb);
2738 }
2739 acb->acb_flags |=
2740 (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2741 ACB_F_MESSAGE_WQBUFFER_READ);
2742 acb->wqbuf_firstindex = 0;
2743 acb->wqbuf_lastindex = 0;
2744 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2745 pcmdmessagefld->cmdmessage.ReturnCode =
2746 ARCMSR_MESSAGE_RETURNCODE_OK;
2747 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2748 }
2749 break;
2750 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2751 u_int8_t *pQbuffer;
2752
2753 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2754 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2755 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2756 arcmsr_iop_message_read(acb);
2757 }
2758 acb->acb_flags |=
2759 (ACB_F_MESSAGE_WQBUFFER_CLEARED
2760 | ACB_F_MESSAGE_RQBUFFER_CLEARED
2761 | ACB_F_MESSAGE_WQBUFFER_READ);
2762 acb->rqbuf_firstindex = 0;
2763 acb->rqbuf_lastindex = 0;
2764 acb->wqbuf_firstindex = 0;
2765 acb->wqbuf_lastindex = 0;
2766 pQbuffer = acb->rqbuffer;
2767 memset(pQbuffer, 0, sizeof (struct QBUFFER));
2768 pQbuffer = acb->wqbuffer;
2769 memset(pQbuffer, 0, sizeof (struct QBUFFER));
2770 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2771 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2772 }
2773 break;
2774 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2775 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2776 }
2777 break;
2778 case ARCMSR_MESSAGE_SAY_HELLO: {
2779 int8_t *hello_string = "Hello! I am ARCMSR";
2780
2781 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2782 , (int16_t)strlen(hello_string));
2783 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2784 }
2785 break;
2786 case ARCMSR_MESSAGE_SAY_GOODBYE:
2787 arcmsr_iop_parking(acb);
2788 break;
2789 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2790 arcmsr_flush_adapter_cache(acb);
2791 break;
2792 default:
2793 retvalue = ARCMSR_MESSAGE_FAIL;
2794 }
2795 message_out:
2796 return (retvalue);
2797 }
2798 /*
2799 *********************************************************************
2800 *********************************************************************
2801 */
arcmsr_execute_srb(void * arg,bus_dma_segment_t * dm_segs,int nseg,int error)2802 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2803 {
2804 struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
2805 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)srb->acb;
2806 union ccb *pccb;
2807 int target, lun;
2808
2809 pccb = srb->pccb;
2810 target = pccb->ccb_h.target_id;
2811 lun = pccb->ccb_h.target_lun;
2812 acb->pktRequestCount++;
2813 if(error != 0) {
2814 if(error != EFBIG) {
2815 kprintf("arcmsr%d: unexpected error %x"
2816 " returned from 'bus_dmamap_load' \n"
2817 , acb->pci_unit, error);
2818 }
2819 if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2820 pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2821 }
2822 arcmsr_srb_complete(srb, 0);
2823 return;
2824 }
2825 if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2826 pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2827 arcmsr_srb_complete(srb, 0);
2828 return;
2829 }
2830 if(acb->acb_flags & ACB_F_BUS_RESET) {
2831 kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2832 pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2833 arcmsr_srb_complete(srb, 0);
2834 return;
2835 }
2836 if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
2837 u_int8_t block_cmd, cmd;
2838
2839 cmd = pccb->csio.cdb_io.cdb_bytes[0];
2840 block_cmd = cmd & 0x0f;
2841 if(block_cmd == 0x08 || block_cmd == 0x0a) {
2842 kprintf("arcmsr%d:block 'read/write' command "
2843 "with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n"
2844 , acb->pci_unit, cmd, target, lun);
2845 pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2846 arcmsr_srb_complete(srb, 0);
2847 return;
2848 }
2849 }
2850 if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2851 if(nseg != 0) {
2852 ARCMSR_LOCK_ACQUIRE(&acb->io_lock);
2853 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2854 ARCMSR_LOCK_RELEASE(&acb->io_lock);
2855 }
2856 arcmsr_srb_complete(srb, 0);
2857 return;
2858 }
2859 if(acb->srboutstandingcount >= acb->maxOutstanding) {
2860 if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) == 0)
2861 {
2862 xpt_freeze_simq(acb->psim, 1);
2863 acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2864 }
2865 pccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2866 pccb->ccb_h.status |= CAM_REQUEUE_REQ;
2867 arcmsr_srb_complete(srb, 0);
2868 return;
2869 }
2870 pccb->ccb_h.status |= CAM_SIM_QUEUED;
2871 arcmsr_build_srb(srb, dm_segs, nseg);
2872 arcmsr_post_srb(acb, srb);
2873 if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2874 {
2875 callout_init_lk(&srb->ccb_callout, &srb->acb->isr_lock);
2876 callout_reset(&srb->ccb_callout, ((pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)) * hz) / 1000, arcmsr_srb_timeout, srb);
2877 srb->srb_flags |= SRB_FLAG_TIMER_START;
2878 }
2879 }
2880 /*
2881 *****************************************************************************************
2882 *****************************************************************************************
2883 */
arcmsr_seek_cmd2abort(union ccb * abortccb)2884 static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb)
2885 {
2886 struct CommandControlBlock *srb;
2887 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2888 u_int32_t intmask_org;
2889 int i = 0;
2890
2891 acb->num_aborts++;
2892 /*
2893 ***************************************************************************
2894 ** It is the upper layer do abort command this lock just prior to calling us.
2895 ** First determine if we currently own this command.
2896 ** Start by searching the device queue. If not found
2897 ** at all, and the system wanted us to just abort the
2898 ** command return success.
2899 ***************************************************************************
2900 */
2901 if(acb->srboutstandingcount != 0) {
2902 /* disable all outbound interrupt */
2903 intmask_org = arcmsr_disable_allintr(acb);
2904 for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
2905 srb = acb->psrb_pool[i];
2906 if(srb->srb_state == ARCMSR_SRB_START) {
2907 if(srb->pccb == abortccb) {
2908 srb->srb_state = ARCMSR_SRB_ABORTED;
2909 kprintf("arcmsr%d:scsi id=%d lun=%jx abort srb '%p'"
2910 "outstanding command \n"
2911 , acb->pci_unit, abortccb->ccb_h.target_id
2912 , (uintmax_t)abortccb->ccb_h.target_lun, srb);
2913 arcmsr_polling_srbdone(acb, srb);
2914 /* enable outbound Post Queue, outbound doorbell Interrupt */
2915 arcmsr_enable_allintr(acb, intmask_org);
2916 return (TRUE);
2917 }
2918 }
2919 }
2920 /* enable outbound Post Queue, outbound doorbell Interrupt */
2921 arcmsr_enable_allintr(acb, intmask_org);
2922 }
2923 return(FALSE);
2924 }
2925 /*
2926 ****************************************************************************
2927 ****************************************************************************
2928 */
arcmsr_bus_reset(struct AdapterControlBlock * acb)2929 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2930 {
2931 int retry = 0;
2932
2933 acb->num_resets++;
2934 acb->acb_flags |= ACB_F_BUS_RESET;
2935 while(acb->srboutstandingcount != 0 && retry < 400) {
2936 arcmsr_interrupt(acb);
2937 UDELAY(25000);
2938 retry++;
2939 }
2940 arcmsr_iop_reset(acb);
2941 acb->acb_flags &= ~ACB_F_BUS_RESET;
2942 }
2943 /*
2944 **************************************************************************
2945 **************************************************************************
2946 */
arcmsr_handle_virtual_command(struct AdapterControlBlock * acb,union ccb * pccb)2947 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2948 union ccb *pccb)
2949 {
2950 if (pccb->ccb_h.target_lun) {
2951 pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2952 xpt_done(pccb);
2953 return;
2954 }
2955 pccb->ccb_h.status |= CAM_REQ_CMP;
2956 switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2957 case INQUIRY: {
2958 unsigned char inqdata[36];
2959 char *buffer = pccb->csio.data_ptr;
2960
2961 inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */
2962 inqdata[1] = 0; /* rem media bit & Dev Type Modifier */
2963 inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */
2964 inqdata[3] = 0;
2965 inqdata[4] = 31; /* length of additional data */
2966 inqdata[5] = 0;
2967 inqdata[6] = 0;
2968 inqdata[7] = 0;
2969 strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */
2970 strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */
2971 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2972 memcpy(buffer, inqdata, sizeof(inqdata));
2973 xpt_done(pccb);
2974 }
2975 break;
2976 case WRITE_BUFFER:
2977 case READ_BUFFER: {
2978 if (arcmsr_iop_message_xfer(acb, pccb)) {
2979 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2980 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2981 }
2982 xpt_done(pccb);
2983 }
2984 break;
2985 default:
2986 xpt_done(pccb);
2987 }
2988 }
2989 /*
2990 *********************************************************************
2991 *********************************************************************
2992 */
arcmsr_action(struct cam_sim * psim,union ccb * pccb)2993 static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
2994 {
2995 struct AdapterControlBlock *acb;
2996
2997 acb = (struct AdapterControlBlock *) cam_sim_softc(psim);
2998 if(acb == NULL) {
2999 pccb->ccb_h.status |= CAM_REQ_INVALID;
3000 xpt_done(pccb);
3001 return;
3002 }
3003 switch (pccb->ccb_h.func_code) {
3004 case XPT_SCSI_IO: {
3005 struct CommandControlBlock *srb;
3006 int target = pccb->ccb_h.target_id;
3007
3008 if(target == 16) {
3009 /* virtual device for iop message transfer */
3010 arcmsr_handle_virtual_command(acb, pccb);
3011 return;
3012 }
3013 if((srb = arcmsr_get_freesrb(acb)) == NULL) {
3014 pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
3015 xpt_done(pccb);
3016 return;
3017 }
3018 pccb->ccb_h.arcmsr_ccbsrb_ptr = srb;
3019 pccb->ccb_h.arcmsr_ccbacb_ptr = acb;
3020 srb->pccb = pccb;
3021 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
3022 if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
3023 /* Single buffer */
3024 if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
3025 /* Buffer is virtual */
3026 u_int32_t error;
3027
3028 crit_enter();
3029 ARCMSR_LOCK_ACQUIRE(&acb->io_lock);
3030 error = bus_dmamap_load(acb->dm_segs_dmat
3031 , srb->dm_segs_dmamap
3032 , pccb->csio.data_ptr
3033 , pccb->csio.dxfer_len
3034 , arcmsr_execute_srb, srb, /*flags*/0);
3035 ARCMSR_LOCK_RELEASE(&acb->io_lock);
3036 if(error == EINPROGRESS) {
3037 xpt_freeze_simq(acb->psim, 1);
3038 pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3039 }
3040 crit_exit();
3041 }
3042 else { /* Buffer is physical */
3043 struct bus_dma_segment seg;
3044
3045 seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
3046 seg.ds_len = pccb->csio.dxfer_len;
3047 arcmsr_execute_srb(srb, &seg, 1, 0);
3048 }
3049 } else {
3050 /* Scatter/gather list */
3051 struct bus_dma_segment *segs;
3052
3053 if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
3054 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
3055 pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
3056 xpt_done(pccb);
3057 kfree(srb, M_DEVBUF);
3058 return;
3059 }
3060 segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
3061 arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
3062 }
3063 } else {
3064 arcmsr_execute_srb(srb, NULL, 0, 0);
3065 }
3066 break;
3067 }
3068 case XPT_TARGET_IO: {
3069 /* target mode not yet support vendor specific commands. */
3070 pccb->ccb_h.status |= CAM_REQ_CMP;
3071 xpt_done(pccb);
3072 break;
3073 }
3074 case XPT_PATH_INQ: {
3075 struct ccb_pathinq *cpi = &pccb->cpi;
3076
3077 cpi->version_num = 1;
3078 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
3079 cpi->target_sprt = 0;
3080 cpi->hba_misc = 0;
3081 cpi->hba_eng_cnt = 0;
3082 cpi->max_target = ARCMSR_MAX_TARGETID; /* 0-16 */
3083 cpi->max_lun = ARCMSR_MAX_TARGETLUN; /* 0-7 */
3084 cpi->initiator_id = ARCMSR_SCSI_INITIATOR_ID; /* 255 */
3085 cpi->bus_id = cam_sim_bus(psim);
3086 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3087 strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
3088 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
3089 cpi->unit_number = cam_sim_unit(psim);
3090 if(acb->adapter_bus_speed == ACB_BUS_SPEED_12G)
3091 cpi->base_transfer_speed = 1200000;
3092 else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
3093 cpi->base_transfer_speed = 600000;
3094 else
3095 cpi->base_transfer_speed = 300000;
3096 if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
3097 (acb->vendor_device_id == PCIDevVenIDARC1884) ||
3098 (acb->vendor_device_id == PCIDevVenIDARC1680) ||
3099 (acb->vendor_device_id == PCIDevVenIDARC1214))
3100 {
3101 cpi->transport = XPORT_SAS;
3102 cpi->transport_version = 0;
3103 cpi->protocol_version = SCSI_REV_SPC2;
3104 }
3105 else
3106 {
3107 cpi->transport = XPORT_SPI;
3108 cpi->transport_version = 2;
3109 cpi->protocol_version = SCSI_REV_2;
3110 }
3111 cpi->protocol = PROTO_SCSI;
3112 cpi->ccb_h.status |= CAM_REQ_CMP;
3113 xpt_done(pccb);
3114 break;
3115 }
3116 case XPT_ABORT: {
3117 union ccb *pabort_ccb;
3118
3119 pabort_ccb = pccb->cab.abort_ccb;
3120 switch (pabort_ccb->ccb_h.func_code) {
3121 case XPT_ACCEPT_TARGET_IO:
3122 case XPT_IMMED_NOTIFY:
3123 case XPT_CONT_TARGET_IO:
3124 if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
3125 pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
3126 xpt_done(pabort_ccb);
3127 pccb->ccb_h.status |= CAM_REQ_CMP;
3128 } else {
3129 xpt_print_path(pabort_ccb->ccb_h.path);
3130 kprintf("Not found\n");
3131 pccb->ccb_h.status |= CAM_PATH_INVALID;
3132 }
3133 break;
3134 case XPT_SCSI_IO:
3135 pccb->ccb_h.status |= CAM_UA_ABORT;
3136 break;
3137 default:
3138 pccb->ccb_h.status |= CAM_REQ_INVALID;
3139 break;
3140 }
3141 xpt_done(pccb);
3142 break;
3143 }
3144 case XPT_RESET_BUS:
3145 case XPT_RESET_DEV: {
3146 u_int32_t i;
3147
3148 arcmsr_bus_reset(acb);
3149 for (i=0; i < 500; i++) {
3150 DELAY(1000);
3151 }
3152 pccb->ccb_h.status |= CAM_REQ_CMP;
3153 xpt_done(pccb);
3154 break;
3155 }
3156 case XPT_TERM_IO: {
3157 pccb->ccb_h.status |= CAM_REQ_INVALID;
3158 xpt_done(pccb);
3159 break;
3160 }
3161 case XPT_GET_TRAN_SETTINGS: {
3162 struct ccb_trans_settings *cts;
3163
3164 if(pccb->ccb_h.target_id == 16) {
3165 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
3166 xpt_done(pccb);
3167 break;
3168 }
3169 cts = &pccb->cts;
3170 {
3171 struct ccb_trans_settings_scsi *scsi;
3172 struct ccb_trans_settings_spi *spi;
3173 struct ccb_trans_settings_sas *sas;
3174
3175 scsi = &cts->proto_specific.scsi;
3176 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3177 scsi->valid = CTS_SCSI_VALID_TQ;
3178 cts->protocol = PROTO_SCSI;
3179
3180 if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
3181 (acb->vendor_device_id == PCIDevVenIDARC1884) ||
3182 (acb->vendor_device_id == PCIDevVenIDARC1680) ||
3183 (acb->vendor_device_id == PCIDevVenIDARC1214))
3184 {
3185 cts->protocol_version = SCSI_REV_SPC2;
3186 cts->transport_version = 0;
3187 cts->transport = XPORT_SAS;
3188 sas = &cts->xport_specific.sas;
3189 sas->valid = CTS_SAS_VALID_SPEED;
3190 if (acb->adapter_bus_speed == ACB_BUS_SPEED_12G)
3191 sas->bitrate = 1200000;
3192 else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
3193 sas->bitrate = 600000;
3194 else if(acb->adapter_bus_speed == ACB_BUS_SPEED_3G)
3195 sas->bitrate = 300000;
3196 }
3197 else
3198 {
3199 cts->protocol_version = SCSI_REV_2;
3200 cts->transport_version = 2;
3201 cts->transport = XPORT_SPI;
3202 spi = &cts->xport_specific.spi;
3203 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
3204 if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
3205 spi->sync_period = 1;
3206 else
3207 spi->sync_period = 2;
3208 spi->sync_offset = 32;
3209 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3210 spi->valid = CTS_SPI_VALID_DISC
3211 | CTS_SPI_VALID_SYNC_RATE
3212 | CTS_SPI_VALID_SYNC_OFFSET
3213 | CTS_SPI_VALID_BUS_WIDTH;
3214 }
3215 }
3216 pccb->ccb_h.status |= CAM_REQ_CMP;
3217 xpt_done(pccb);
3218 break;
3219 }
3220 case XPT_SET_TRAN_SETTINGS: {
3221 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
3222 xpt_done(pccb);
3223 break;
3224 }
3225 case XPT_CALC_GEOMETRY:
3226 if(pccb->ccb_h.target_id == 16) {
3227 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
3228 xpt_done(pccb);
3229 break;
3230 }
3231 cam_calc_geometry(&pccb->ccg, 1);
3232 xpt_done(pccb);
3233 break;
3234 default:
3235 pccb->ccb_h.status |= CAM_REQ_INVALID;
3236 xpt_done(pccb);
3237 break;
3238 }
3239 }
3240 /*
3241 **********************************************************************
3242 **********************************************************************
3243 */
arcmsr_start_hba_bgrb(struct AdapterControlBlock * acb)3244 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
3245 {
3246 acb->acb_flags |= ACB_F_MSG_START_BGRB;
3247 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3248 if(!arcmsr_hba_wait_msgint_ready(acb)) {
3249 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
3250 }
3251 }
3252 /*
3253 **********************************************************************
3254 **********************************************************************
3255 */
arcmsr_start_hbb_bgrb(struct AdapterControlBlock * acb)3256 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
3257 {
3258 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
3259 acb->acb_flags |= ACB_F_MSG_START_BGRB;
3260 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB);
3261 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3262 kprintf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
3263 }
3264 }
3265 /*
3266 **********************************************************************
3267 **********************************************************************
3268 */
arcmsr_start_hbc_bgrb(struct AdapterControlBlock * acb)3269 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
3270 {
3271 acb->acb_flags |= ACB_F_MSG_START_BGRB;
3272 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3273 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3274 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3275 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
3276 }
3277 }
3278 /*
3279 **********************************************************************
3280 **********************************************************************
3281 */
arcmsr_start_hbd_bgrb(struct AdapterControlBlock * acb)3282 static void arcmsr_start_hbd_bgrb(struct AdapterControlBlock *acb)
3283 {
3284 acb->acb_flags |= ACB_F_MSG_START_BGRB;
3285 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3286 if(!arcmsr_hbd_wait_msgint_ready(acb)) {
3287 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
3288 }
3289 }
3290 /*
3291 **********************************************************************
3292 **********************************************************************
3293 */
arcmsr_start_hbe_bgrb(struct AdapterControlBlock * acb)3294 static void arcmsr_start_hbe_bgrb(struct AdapterControlBlock *acb)
3295 {
3296 acb->acb_flags |= ACB_F_MSG_START_BGRB;
3297 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3298 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3299 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell);
3300 if(!arcmsr_hbe_wait_msgint_ready(acb)) {
3301 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
3302 }
3303 }
3304 /*
3305 **********************************************************************
3306 **********************************************************************
3307 */
arcmsr_start_adapter_bgrb(struct AdapterControlBlock * acb)3308 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
3309 {
3310 switch (acb->adapter_type) {
3311 case ACB_ADAPTER_TYPE_A:
3312 arcmsr_start_hba_bgrb(acb);
3313 break;
3314 case ACB_ADAPTER_TYPE_B:
3315 arcmsr_start_hbb_bgrb(acb);
3316 break;
3317 case ACB_ADAPTER_TYPE_C:
3318 arcmsr_start_hbc_bgrb(acb);
3319 break;
3320 case ACB_ADAPTER_TYPE_D:
3321 arcmsr_start_hbd_bgrb(acb);
3322 break;
3323 case ACB_ADAPTER_TYPE_E:
3324 arcmsr_start_hbe_bgrb(acb);
3325 break;
3326 }
3327 }
3328 /*
3329 **********************************************************************
3330 **
3331 **********************************************************************
3332 */
arcmsr_polling_hba_srbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_srb)3333 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3334 {
3335 struct CommandControlBlock *srb;
3336 u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
3337 u_int16_t error;
3338
3339 polling_ccb_retry:
3340 poll_count++;
3341 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
3342 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/
3343 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3344 while(1) {
3345 if((flag_srb = CHIP_REG_READ32(HBA_MessageUnit,
3346 0, outbound_queueport)) == 0xFFFFFFFF) {
3347 if(poll_srb_done) {
3348 break;/*chip FIFO no ccb for completion already*/
3349 } else {
3350 UDELAY(25000);
3351 if ((poll_count > 100) && (poll_srb != NULL)) {
3352 break;
3353 }
3354 goto polling_ccb_retry;
3355 }
3356 }
3357 /* check if command done with no error*/
3358 srb = (struct CommandControlBlock *)
3359 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
3360 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
3361 poll_srb_done = (srb == poll_srb) ? 1:0;
3362 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3363 if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3364 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'"
3365 "poll command abort successfully \n"
3366 , acb->pci_unit
3367 , srb->pccb->ccb_h.target_id
3368 , (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3369 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3370 arcmsr_srb_complete(srb, 1);
3371 continue;
3372 }
3373 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
3374 "srboutstandingcount=%d \n"
3375 , acb->pci_unit
3376 , srb, acb->srboutstandingcount);
3377 continue;
3378 }
3379 arcmsr_report_srb_state(acb, srb, error);
3380 } /*drain reply FIFO*/
3381 }
3382 /*
3383 **********************************************************************
3384 **
3385 **********************************************************************
3386 */
arcmsr_polling_hbb_srbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_srb)3387 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3388 {
3389 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
3390 struct CommandControlBlock *srb;
3391 u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
3392 int index;
3393 u_int16_t error;
3394
3395 polling_ccb_retry:
3396 poll_count++;
3397 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
3398 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3399 while(1) {
3400 index = phbbmu->doneq_index;
3401 if((flag_srb = phbbmu->done_qbuffer[index]) == 0) {
3402 if(poll_srb_done) {
3403 break;/*chip FIFO no ccb for completion already*/
3404 } else {
3405 UDELAY(25000);
3406 if ((poll_count > 100) && (poll_srb != NULL)) {
3407 break;
3408 }
3409 goto polling_ccb_retry;
3410 }
3411 }
3412 phbbmu->done_qbuffer[index] = 0;
3413 index++;
3414 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */
3415 phbbmu->doneq_index = index;
3416 /* check if command done with no error*/
3417 srb = (struct CommandControlBlock *)
3418 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
3419 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
3420 poll_srb_done = (srb == poll_srb) ? 1:0;
3421 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3422 if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3423 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'"
3424 "poll command abort successfully \n"
3425 , acb->pci_unit
3426 , srb->pccb->ccb_h.target_id
3427 , (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3428 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3429 arcmsr_srb_complete(srb, 1);
3430 continue;
3431 }
3432 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
3433 "srboutstandingcount=%d \n"
3434 , acb->pci_unit
3435 , srb, acb->srboutstandingcount);
3436 continue;
3437 }
3438 arcmsr_report_srb_state(acb, srb, error);
3439 } /*drain reply FIFO*/
3440 }
3441 /*
3442 **********************************************************************
3443 **
3444 **********************************************************************
3445 */
arcmsr_polling_hbc_srbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_srb)3446 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3447 {
3448 struct CommandControlBlock *srb;
3449 u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
3450 u_int16_t error;
3451
3452 polling_ccb_retry:
3453 poll_count++;
3454 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3455 while(1) {
3456 if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
3457 if(poll_srb_done) {
3458 break;/*chip FIFO no ccb for completion already*/
3459 } else {
3460 UDELAY(25000);
3461 if ((poll_count > 100) && (poll_srb != NULL)) {
3462 break;
3463 }
3464 if (acb->srboutstandingcount == 0) {
3465 break;
3466 }
3467 goto polling_ccb_retry;
3468 }
3469 }
3470 flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
3471 /* check if command done with no error*/
3472 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
3473 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
3474 if (poll_srb != NULL)
3475 poll_srb_done = (srb == poll_srb) ? 1:0;
3476 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3477 if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3478 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n"
3479 , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3480 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3481 arcmsr_srb_complete(srb, 1);
3482 continue;
3483 }
3484 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
3485 , acb->pci_unit, srb, acb->srboutstandingcount);
3486 continue;
3487 }
3488 arcmsr_report_srb_state(acb, srb, error);
3489 } /*drain reply FIFO*/
3490 }
3491 /*
3492 **********************************************************************
3493 **
3494 **********************************************************************
3495 */
arcmsr_polling_hbd_srbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_srb)3496 static void arcmsr_polling_hbd_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3497 {
3498 struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
3499 struct CommandControlBlock *srb;
3500 u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
3501 u_int32_t outbound_write_pointer;
3502 u_int16_t error, doneq_index;
3503
3504 polling_ccb_retry:
3505 poll_count++;
3506 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3507 while(1) {
3508 outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
3509 doneq_index = phbdmu->doneq_index;
3510 if ((outbound_write_pointer & 0xFF) == (doneq_index & 0xFF)) {
3511 if(poll_srb_done) {
3512 break;/*chip FIFO no ccb for completion already*/
3513 } else {
3514 UDELAY(25000);
3515 if ((poll_count > 100) && (poll_srb != NULL)) {
3516 break;
3517 }
3518 if (acb->srboutstandingcount == 0) {
3519 break;
3520 }
3521 goto polling_ccb_retry;
3522 }
3523 }
3524 doneq_index = arcmsr_get_doneq_index(phbdmu);
3525 flag_srb = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow;
3526 /* check if command done with no error*/
3527 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
3528 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
3529 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index);
3530 if (poll_srb != NULL)
3531 poll_srb_done = (srb == poll_srb) ? 1:0;
3532 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3533 if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3534 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n"
3535 , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3536 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3537 arcmsr_srb_complete(srb, 1);
3538 continue;
3539 }
3540 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
3541 , acb->pci_unit, srb, acb->srboutstandingcount);
3542 continue;
3543 }
3544 arcmsr_report_srb_state(acb, srb, error);
3545 } /*drain reply FIFO*/
3546 }
3547 /*
3548 **********************************************************************
3549 **
3550 **********************************************************************
3551 */
arcmsr_polling_hbe_srbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_srb)3552 static void arcmsr_polling_hbe_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3553 {
3554 struct CommandControlBlock *srb;
3555 u_int32_t poll_srb_done=0, poll_count=0, doneq_index;
3556 u_int16_t error, cmdSMID;
3557
3558 polling_ccb_retry:
3559 poll_count++;
3560 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3561 while(1) {
3562 doneq_index = acb->doneq_index;
3563 if((CHIP_REG_READ32(HBE_MessageUnit, 0, reply_post_producer_index) & 0xFFFF) == doneq_index) {
3564 if(poll_srb_done) {
3565 break;/*chip FIFO no ccb for completion already*/
3566 } else {
3567 UDELAY(25000);
3568 if ((poll_count > 100) && (poll_srb != NULL)) {
3569 break;
3570 }
3571 if (acb->srboutstandingcount == 0) {
3572 break;
3573 }
3574 goto polling_ccb_retry;
3575 }
3576 }
3577 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
3578 doneq_index++;
3579 if (doneq_index >= acb->completionQ_entry)
3580 doneq_index = 0;
3581 acb->doneq_index = doneq_index;
3582 srb = acb->psrb_pool[cmdSMID];
3583 error = (acb->pCompletionQ[doneq_index].cmdFlag & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
3584 if (poll_srb != NULL)
3585 poll_srb_done = (srb == poll_srb) ? 1:0;
3586 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
3587 if(srb->srb_state == ARCMSR_SRB_ABORTED) {
3588 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n"
3589 , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb);
3590 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3591 arcmsr_srb_complete(srb, 1);
3592 continue;
3593 }
3594 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
3595 , acb->pci_unit, srb, acb->srboutstandingcount);
3596 continue;
3597 }
3598 arcmsr_report_srb_state(acb, srb, error);
3599 } /*drain reply FIFO*/
3600 CHIP_REG_WRITE32(HBE_MessageUnit, 0, reply_post_producer_index, doneq_index);
3601 }
3602 /*
3603 **********************************************************************
3604 **********************************************************************
3605 */
arcmsr_polling_srbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_srb)3606 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
3607 {
3608 switch (acb->adapter_type) {
3609 case ACB_ADAPTER_TYPE_A: {
3610 arcmsr_polling_hba_srbdone(acb, poll_srb);
3611 }
3612 break;
3613 case ACB_ADAPTER_TYPE_B: {
3614 arcmsr_polling_hbb_srbdone(acb, poll_srb);
3615 }
3616 break;
3617 case ACB_ADAPTER_TYPE_C: {
3618 arcmsr_polling_hbc_srbdone(acb, poll_srb);
3619 }
3620 break;
3621 case ACB_ADAPTER_TYPE_D: {
3622 arcmsr_polling_hbd_srbdone(acb, poll_srb);
3623 }
3624 break;
3625 case ACB_ADAPTER_TYPE_E: {
3626 arcmsr_polling_hbe_srbdone(acb, poll_srb);
3627 }
3628 break;
3629 }
3630 }
3631 /*
3632 **********************************************************************
3633 **********************************************************************
3634 */
arcmsr_get_hba_config(struct AdapterControlBlock * acb)3635 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
3636 {
3637 char *acb_firm_model = acb->firm_model;
3638 char *acb_firm_version = acb->firm_version;
3639 char *acb_device_map = acb->device_map;
3640 size_t iop_firm_model = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/
3641 size_t iop_firm_version = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3642 size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3643 int i;
3644
3645 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3646 if(!arcmsr_hba_wait_msgint_ready(acb)) {
3647 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3648 }
3649 i = 0;
3650 while(i < 8) {
3651 *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3652 /* 8 bytes firm_model, 15, 60-67*/
3653 acb_firm_model++;
3654 i++;
3655 }
3656 i=0;
3657 while(i < 16) {
3658 *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3659 /* 16 bytes firm_version, 17, 68-83*/
3660 acb_firm_version++;
3661 i++;
3662 }
3663 i=0;
3664 while(i < 16) {
3665 *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3666 acb_device_map++;
3667 i++;
3668 }
3669 kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3670 acb->firm_request_len = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
3671 acb->firm_numbers_queue = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
3672 acb->firm_sdram_size = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
3673 acb->firm_ide_channels = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
3674 acb->firm_cfg_version = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
3675 if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
3676 acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1;
3677 else
3678 acb->maxOutstanding = acb->firm_numbers_queue - 1;
3679 }
3680 /*
3681 **********************************************************************
3682 **********************************************************************
3683 */
arcmsr_get_hbb_config(struct AdapterControlBlock * acb)3684 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
3685 {
3686 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
3687 char *acb_firm_model = acb->firm_model;
3688 char *acb_firm_version = acb->firm_version;
3689 char *acb_device_map = acb->device_map;
3690 size_t iop_firm_model = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/
3691 size_t iop_firm_version = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3692 size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3693 int i;
3694
3695 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
3696 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3697 kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3698 }
3699 i = 0;
3700 while(i < 8) {
3701 *acb_firm_model = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
3702 /* 8 bytes firm_model, 15, 60-67*/
3703 acb_firm_model++;
3704 i++;
3705 }
3706 i = 0;
3707 while(i < 16) {
3708 *acb_firm_version = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
3709 /* 16 bytes firm_version, 17, 68-83*/
3710 acb_firm_version++;
3711 i++;
3712 }
3713 i = 0;
3714 while(i < 16) {
3715 *acb_device_map = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
3716 acb_device_map++;
3717 i++;
3718 }
3719 kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3720 acb->firm_request_len = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
3721 acb->firm_numbers_queue = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
3722 acb->firm_sdram_size = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
3723 acb->firm_ide_channels = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
3724 acb->firm_cfg_version = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
3725 if(acb->firm_numbers_queue > ARCMSR_MAX_HBB_POSTQUEUE)
3726 acb->maxOutstanding = ARCMSR_MAX_HBB_POSTQUEUE - 1;
3727 else
3728 acb->maxOutstanding = acb->firm_numbers_queue - 1;
3729 }
3730 /*
3731 **********************************************************************
3732 **********************************************************************
3733 */
arcmsr_get_hbc_config(struct AdapterControlBlock * acb)3734 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
3735 {
3736 char *acb_firm_model = acb->firm_model;
3737 char *acb_firm_version = acb->firm_version;
3738 char *acb_device_map = acb->device_map;
3739 size_t iop_firm_model = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/
3740 size_t iop_firm_version = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3741 size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3742 int i;
3743
3744 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3745 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3746 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3747 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3748 }
3749 i = 0;
3750 while(i < 8) {
3751 *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3752 /* 8 bytes firm_model, 15, 60-67*/
3753 acb_firm_model++;
3754 i++;
3755 }
3756 i = 0;
3757 while(i < 16) {
3758 *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3759 /* 16 bytes firm_version, 17, 68-83*/
3760 acb_firm_version++;
3761 i++;
3762 }
3763 i = 0;
3764 while(i < 16) {
3765 *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3766 acb_device_map++;
3767 i++;
3768 }
3769 kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3770 acb->firm_request_len = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
3771 acb->firm_numbers_queue = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
3772 acb->firm_sdram_size = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
3773 acb->firm_ide_channels = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
3774 acb->firm_cfg_version = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
3775 if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
3776 acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1;
3777 else
3778 acb->maxOutstanding = acb->firm_numbers_queue - 1;
3779 }
3780 /*
3781 **********************************************************************
3782 **********************************************************************
3783 */
arcmsr_get_hbd_config(struct AdapterControlBlock * acb)3784 static void arcmsr_get_hbd_config(struct AdapterControlBlock *acb)
3785 {
3786 char *acb_firm_model = acb->firm_model;
3787 char *acb_firm_version = acb->firm_version;
3788 char *acb_device_map = acb->device_map;
3789 size_t iop_firm_model = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/
3790 size_t iop_firm_version = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3791 size_t iop_device_map = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3792 int i;
3793
3794 if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE)
3795 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);
3796 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3797 if(!arcmsr_hbd_wait_msgint_ready(acb)) {
3798 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3799 }
3800 i = 0;
3801 while(i < 8) {
3802 *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3803 /* 8 bytes firm_model, 15, 60-67*/
3804 acb_firm_model++;
3805 i++;
3806 }
3807 i = 0;
3808 while(i < 16) {
3809 *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3810 /* 16 bytes firm_version, 17, 68-83*/
3811 acb_firm_version++;
3812 i++;
3813 }
3814 i = 0;
3815 while(i < 16) {
3816 *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3817 acb_device_map++;
3818 i++;
3819 }
3820 kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3821 acb->firm_request_len = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
3822 acb->firm_numbers_queue = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
3823 acb->firm_sdram_size = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
3824 acb->firm_ide_channels = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
3825 acb->firm_cfg_version = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
3826 if(acb->firm_numbers_queue > ARCMSR_MAX_HBD_POSTQUEUE)
3827 acb->maxOutstanding = ARCMSR_MAX_HBD_POSTQUEUE - 1;
3828 else
3829 acb->maxOutstanding = acb->firm_numbers_queue - 1;
3830 }
3831 /*
3832 **********************************************************************
3833 **********************************************************************
3834 */
arcmsr_get_hbe_config(struct AdapterControlBlock * acb)3835 static void arcmsr_get_hbe_config(struct AdapterControlBlock *acb)
3836 {
3837 char *acb_firm_model = acb->firm_model;
3838 char *acb_firm_version = acb->firm_version;
3839 char *acb_device_map = acb->device_map;
3840 size_t iop_firm_model = offsetof(struct HBE_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/
3841 size_t iop_firm_version = offsetof(struct HBE_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3842 size_t iop_device_map = offsetof(struct HBE_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3843 int i;
3844
3845 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3846 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3847 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell);
3848 if(!arcmsr_hbe_wait_msgint_ready(acb)) {
3849 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3850 }
3851
3852 i = 0;
3853 while(i < 8) {
3854 *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3855 /* 8 bytes firm_model, 15, 60-67*/
3856 acb_firm_model++;
3857 i++;
3858 }
3859 i = 0;
3860 while(i < 16) {
3861 *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3862 /* 16 bytes firm_version, 17, 68-83*/
3863 acb_firm_version++;
3864 i++;
3865 }
3866 i = 0;
3867 while(i < 16) {
3868 *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3869 acb_device_map++;
3870 i++;
3871 }
3872 kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
3873 acb->firm_request_len = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
3874 acb->firm_numbers_queue = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
3875 acb->firm_sdram_size = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
3876 acb->firm_ide_channels = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
3877 acb->firm_cfg_version = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
3878 if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
3879 acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1;
3880 else
3881 acb->maxOutstanding = acb->firm_numbers_queue - 1;
3882 }
3883 /*
3884 **********************************************************************
3885 **********************************************************************
3886 */
arcmsr_get_firmware_spec(struct AdapterControlBlock * acb)3887 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3888 {
3889 switch (acb->adapter_type) {
3890 case ACB_ADAPTER_TYPE_A: {
3891 arcmsr_get_hba_config(acb);
3892 }
3893 break;
3894 case ACB_ADAPTER_TYPE_B: {
3895 arcmsr_get_hbb_config(acb);
3896 }
3897 break;
3898 case ACB_ADAPTER_TYPE_C: {
3899 arcmsr_get_hbc_config(acb);
3900 }
3901 break;
3902 case ACB_ADAPTER_TYPE_D: {
3903 arcmsr_get_hbd_config(acb);
3904 }
3905 break;
3906 case ACB_ADAPTER_TYPE_E: {
3907 arcmsr_get_hbe_config(acb);
3908 }
3909 break;
3910 }
3911 }
3912 /*
3913 **********************************************************************
3914 **********************************************************************
3915 */
arcmsr_wait_firmware_ready(struct AdapterControlBlock * acb)3916 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
3917 {
3918 int timeout=0;
3919
3920 switch (acb->adapter_type) {
3921 case ACB_ADAPTER_TYPE_A: {
3922 while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3923 {
3924 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3925 {
3926 kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3927 return;
3928 }
3929 UDELAY(15000); /* wait 15 milli-seconds */
3930 }
3931 }
3932 break;
3933 case ACB_ADAPTER_TYPE_B: {
3934 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
3935 while ((READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3936 {
3937 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3938 {
3939 kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3940 return;
3941 }
3942 UDELAY(15000); /* wait 15 milli-seconds */
3943 }
3944 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3945 }
3946 break;
3947 case ACB_ADAPTER_TYPE_C: {
3948 while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3949 {
3950 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3951 {
3952 kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3953 return;
3954 }
3955 UDELAY(15000); /* wait 15 milli-seconds */
3956 }
3957 }
3958 break;
3959 case ACB_ADAPTER_TYPE_D: {
3960 while ((CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK) == 0)
3961 {
3962 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3963 {
3964 kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3965 return;
3966 }
3967 UDELAY(15000); /* wait 15 milli-seconds */
3968 }
3969 }
3970 break;
3971 case ACB_ADAPTER_TYPE_E: {
3972 while ((CHIP_REG_READ32(HBE_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0)
3973 {
3974 if (timeout++ > 4000) /* (4000*15)/1000 = 60 sec */
3975 {
3976 kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3977 return;
3978 }
3979 UDELAY(15000); /* wait 15 milli-seconds */
3980 }
3981 }
3982 break;
3983 }
3984 }
3985 /*
3986 **********************************************************************
3987 **********************************************************************
3988 */
arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock * acb)3989 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3990 {
3991 u_int32_t outbound_doorbell;
3992
3993 switch (acb->adapter_type) {
3994 case ACB_ADAPTER_TYPE_A: {
3995 /* empty doorbell Qbuffer if door bell ringed */
3996 outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3997 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */
3998 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3999 }
4000 break;
4001 case ACB_ADAPTER_TYPE_B: {
4002 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
4003 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
4004 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
4005 /* let IOP know data has been read */
4006 }
4007 break;
4008 case ACB_ADAPTER_TYPE_C: {
4009 /* empty doorbell Qbuffer if door bell ringed */
4010 outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
4011 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */
4012 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
4013 CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell_clear); /* Dummy read to force pci flush */
4014 CHIP_REG_READ32(HBC_MessageUnit, 0, inbound_doorbell); /* Dummy read to force pci flush */
4015 }
4016 break;
4017 case ACB_ADAPTER_TYPE_D: {
4018 /* empty doorbell Qbuffer if door bell ringed */
4019 outbound_doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell);
4020 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */
4021 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ);
4022 }
4023 break;
4024 case ACB_ADAPTER_TYPE_E: {
4025 /* empty doorbell Qbuffer if door bell ringed */
4026 acb->in_doorbell = CHIP_REG_READ32(HBE_MessageUnit, 0, iobound_doorbell);
4027 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_status, 0); /*clear doorbell interrupt */
4028 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4029 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell);
4030 }
4031 break;
4032 }
4033 }
4034 /*
4035 ************************************************************************
4036 ************************************************************************
4037 */
arcmsr_iop_confirm(struct AdapterControlBlock * acb)4038 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
4039 {
4040 unsigned long srb_phyaddr;
4041 u_int32_t srb_phyaddr_hi32;
4042 u_int32_t srb_phyaddr_lo32;
4043
4044 /*
4045 ********************************************************************
4046 ** here we need to tell iop 331 our freesrb.HighPart
4047 ** if freesrb.HighPart is not zero
4048 ********************************************************************
4049 */
4050 srb_phyaddr = (unsigned long) acb->srb_phyaddr.phyaddr;
4051 srb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
4052 srb_phyaddr_lo32 = acb->srb_phyaddr.B.phyadd_low;
4053 switch (acb->adapter_type) {
4054 case ACB_ADAPTER_TYPE_A: {
4055 if(srb_phyaddr_hi32 != 0) {
4056 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
4057 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
4058 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
4059 if(!arcmsr_hba_wait_msgint_ready(acb)) {
4060 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
4061 return FALSE;
4062 }
4063 }
4064 }
4065 break;
4066 /*
4067 ***********************************************************************
4068 ** if adapter type B, set window of "post command Q"
4069 ***********************************************************************
4070 */
4071 case ACB_ADAPTER_TYPE_B: {
4072 u_int32_t post_queue_phyaddr;
4073 struct HBB_MessageUnit *phbbmu;
4074
4075 phbbmu = (struct HBB_MessageUnit *)acb->pmu;
4076 phbbmu->postq_index = 0;
4077 phbbmu->doneq_index = 0;
4078 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
4079 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
4080 kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
4081 return FALSE;
4082 }
4083 post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE
4084 + offsetof(struct HBB_MessageUnit, post_qbuffer);
4085 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
4086 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
4087 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
4088 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
4089 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
4090 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
4091 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
4092 kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
4093 return FALSE;
4094 }
4095 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
4096 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
4097 kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
4098 return FALSE;
4099 }
4100 }
4101 break;
4102 case ACB_ADAPTER_TYPE_C: {
4103 if(srb_phyaddr_hi32 != 0) {
4104 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
4105 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
4106 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
4107 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4108 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
4109 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
4110 return FALSE;
4111 }
4112 }
4113 }
4114 break;
4115 case ACB_ADAPTER_TYPE_D: {
4116 u_int32_t post_queue_phyaddr, done_queue_phyaddr;
4117 struct HBD_MessageUnit0 *phbdmu;
4118
4119 phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
4120 phbdmu->postq_index = 0;
4121 phbdmu->doneq_index = 0x40FF;
4122 post_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE
4123 + offsetof(struct HBD_MessageUnit0, post_qbuffer);
4124 done_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE
4125 + offsetof(struct HBD_MessageUnit0, done_qbuffer);
4126 CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
4127 CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
4128 CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ base */
4129 CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[3], done_queue_phyaddr); /* doneQ base */
4130 CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[4], 0x100);
4131 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
4132 if(!arcmsr_hbd_wait_msgint_ready(acb)) {
4133 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
4134 return FALSE;
4135 }
4136 }
4137 break;
4138 case ACB_ADAPTER_TYPE_E: {
4139 u_int32_t cdb_phyaddr_lo32;
4140 cdb_phyaddr_lo32 = srb_phyaddr_lo32 + offsetof(struct CommandControlBlock, arcmsr_cdb);
4141 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
4142 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[1], ARCMSR_SIGNATURE_1884);
4143 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[2], cdb_phyaddr_lo32);
4144 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[3], srb_phyaddr_hi32);
4145 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[4], SRB_SIZE);
4146 cdb_phyaddr_lo32 = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE;
4147 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[5], cdb_phyaddr_lo32);
4148 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[6], srb_phyaddr_hi32);
4149 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[7], COMPLETION_Q_POOL_SIZE);
4150 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
4151 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4152 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell);
4153 if(!arcmsr_hbe_wait_msgint_ready(acb)) {
4154 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
4155 return FALSE;
4156 }
4157 }
4158 break;
4159 }
4160 return (TRUE);
4161 }
4162 /*
4163 ************************************************************************
4164 ************************************************************************
4165 */
arcmsr_enable_eoi_mode(struct AdapterControlBlock * acb)4166 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
4167 {
4168 if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
4169 {
4170 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
4171 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
4172 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
4173 kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
4174 return;
4175 }
4176 }
4177 }
4178 /*
4179 **********************************************************************
4180 **********************************************************************
4181 */
arcmsr_iop_init(struct AdapterControlBlock * acb)4182 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
4183 {
4184 u_int32_t intmask_org;
4185
4186 /* disable all outbound interrupt */
4187 intmask_org = arcmsr_disable_allintr(acb);
4188 arcmsr_wait_firmware_ready(acb);
4189 arcmsr_iop_confirm(acb);
4190 arcmsr_get_firmware_spec(acb);
4191 /*start background rebuild*/
4192 arcmsr_start_adapter_bgrb(acb);
4193 /* empty doorbell Qbuffer if door bell ringed */
4194 arcmsr_clear_doorbell_queue_buffer(acb);
4195 arcmsr_enable_eoi_mode(acb);
4196 /* enable outbound Post Queue, outbound doorbell Interrupt */
4197 arcmsr_enable_allintr(acb, intmask_org);
4198 acb->acb_flags |= ACB_F_IOP_INITED;
4199 }
4200 /*
4201 **********************************************************************
4202 **********************************************************************
4203 */
arcmsr_map_free_srb(void * arg,bus_dma_segment_t * segs,int nseg,int error)4204 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
4205 {
4206 struct AdapterControlBlock *acb = arg;
4207 struct CommandControlBlock *srb_tmp;
4208 u_int32_t i;
4209 unsigned long srb_phyaddr = (unsigned long)segs->ds_addr;
4210
4211 acb->srb_phyaddr.phyaddr = srb_phyaddr;
4212 srb_tmp = (struct CommandControlBlock *)acb->uncacheptr;
4213 for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
4214 if(bus_dmamap_create(acb->dm_segs_dmat,
4215 /*flags*/0, &srb_tmp->dm_segs_dmamap) != 0) {
4216 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
4217 kprintf("arcmsr%d:"
4218 " srb dmamap bus_dmamap_create error\n", acb->pci_unit);
4219 return;
4220 }
4221 if((acb->adapter_type == ACB_ADAPTER_TYPE_C) || (acb->adapter_type == ACB_ADAPTER_TYPE_D)
4222 || (acb->adapter_type == ACB_ADAPTER_TYPE_E))
4223 {
4224 srb_tmp->cdb_phyaddr_low = srb_phyaddr;
4225 srb_tmp->cdb_phyaddr_high = (u_int32_t)((srb_phyaddr >> 16) >> 16);
4226 }
4227 else
4228 srb_tmp->cdb_phyaddr_low = srb_phyaddr >> 5;
4229 srb_tmp->acb = acb;
4230 srb_tmp->smid = i << 16;
4231 acb->srbworkingQ[i] = acb->psrb_pool[i] = srb_tmp;
4232 srb_phyaddr = srb_phyaddr + SRB_SIZE;
4233 srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp + SRB_SIZE);
4234 }
4235 acb->pCompletionQ = (pCompletion_Q)srb_tmp;
4236 acb->vir2phy_offset = (unsigned long)srb_tmp - (unsigned long)srb_phyaddr;
4237 }
4238 /*
4239 ************************************************************************
4240 ************************************************************************
4241 */
arcmsr_free_resource(struct AdapterControlBlock * acb)4242 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
4243 {
4244 /* remove the control device */
4245 if(acb->ioctl_dev != NULL) {
4246 destroy_dev(acb->ioctl_dev);
4247 }
4248 bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
4249 bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
4250 bus_dma_tag_destroy(acb->srb_dmat);
4251 bus_dma_tag_destroy(acb->dm_segs_dmat);
4252 bus_dma_tag_destroy(acb->parent_dmat);
4253 }
4254 /*
4255 ************************************************************************
4256 ************************************************************************
4257 */
arcmsr_mutex_init(struct AdapterControlBlock * acb)4258 static void arcmsr_mutex_init(struct AdapterControlBlock *acb)
4259 {
4260 ARCMSR_LOCK_INIT(&acb->isr_lock, "arcmsr isr lock");
4261 ARCMSR_LOCK_INIT(&acb->srb_lock, "arcmsr srb lock");
4262 ARCMSR_LOCK_INIT(&acb->postDone_lock, "arcmsr postQ lock");
4263 ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr RW buffer lock");
4264 ARCMSR_LOCK_INIT(&acb->io_lock, "arcmsr io lock");
4265 ARCMSR_LOCK_INIT(&acb->sim_lock, "arcmsr sim lock");
4266 }
4267 /*
4268 ************************************************************************
4269 ************************************************************************
4270 */
arcmsr_mutex_destroy(struct AdapterControlBlock * acb)4271 static void arcmsr_mutex_destroy(struct AdapterControlBlock *acb)
4272 {
4273 ARCMSR_LOCK_DESTROY(&acb->sim_lock);
4274 ARCMSR_LOCK_DESTROY(&acb->io_lock);
4275 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
4276 ARCMSR_LOCK_DESTROY(&acb->postDone_lock);
4277 ARCMSR_LOCK_DESTROY(&acb->srb_lock);
4278 ARCMSR_LOCK_DESTROY(&acb->isr_lock);
4279 }
4280 /*
4281 ************************************************************************
4282 ************************************************************************
4283 */
arcmsr_initialize(device_t dev)4284 static u_int32_t arcmsr_initialize(device_t dev)
4285 {
4286 struct AdapterControlBlock *acb = device_get_softc(dev);
4287 u_int16_t pci_command;
4288 int i, j,max_coherent_size;
4289 u_int32_t vendor_dev_id;
4290
4291 vendor_dev_id = pci_get_devid(dev);
4292 acb->vendor_device_id = vendor_dev_id;
4293 acb->sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
4294 switch (vendor_dev_id) {
4295 case PCIDevVenIDARC1880:
4296 case PCIDevVenIDARC1882:
4297 case PCIDevVenIDARC1213:
4298 case PCIDevVenIDARC1223: {
4299 acb->adapter_type = ACB_ADAPTER_TYPE_C;
4300 if ((acb->sub_device_id == ARECA_SUB_DEV_ID_1883) ||
4301 (acb->sub_device_id == ARECA_SUB_DEV_ID_1216) ||
4302 (acb->sub_device_id == ARECA_SUB_DEV_ID_1226))
4303 acb->adapter_bus_speed = ACB_BUS_SPEED_12G;
4304 else
4305 acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
4306 max_coherent_size = ARCMSR_SRBS_POOL_SIZE;
4307 }
4308 break;
4309 case PCIDevVenIDARC1884:
4310 acb->adapter_type = ACB_ADAPTER_TYPE_E;
4311 acb->adapter_bus_speed = ACB_BUS_SPEED_12G;
4312 max_coherent_size = ARCMSR_SRBS_POOL_SIZE + COMPLETION_Q_POOL_SIZE;
4313 acb->completionQ_entry = COMPLETION_Q_POOL_SIZE / sizeof(struct deliver_completeQ);
4314 break;
4315 case PCIDevVenIDARC1214: {
4316 acb->adapter_type = ACB_ADAPTER_TYPE_D;
4317 acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
4318 max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBD_MessageUnit0));
4319 }
4320 break;
4321 case PCIDevVenIDARC1200:
4322 case PCIDevVenIDARC1201: {
4323 acb->adapter_type = ACB_ADAPTER_TYPE_B;
4324 acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
4325 max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit));
4326 }
4327 break;
4328 case PCIDevVenIDARC1203: {
4329 acb->adapter_type = ACB_ADAPTER_TYPE_B;
4330 acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
4331 max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit));
4332 }
4333 break;
4334 case PCIDevVenIDARC1110:
4335 case PCIDevVenIDARC1120:
4336 case PCIDevVenIDARC1130:
4337 case PCIDevVenIDARC1160:
4338 case PCIDevVenIDARC1170:
4339 case PCIDevVenIDARC1210:
4340 case PCIDevVenIDARC1220:
4341 case PCIDevVenIDARC1230:
4342 case PCIDevVenIDARC1231:
4343 case PCIDevVenIDARC1260:
4344 case PCIDevVenIDARC1261:
4345 case PCIDevVenIDARC1270:
4346 case PCIDevVenIDARC1280:
4347 case PCIDevVenIDARC1212:
4348 case PCIDevVenIDARC1222:
4349 case PCIDevVenIDARC1380:
4350 case PCIDevVenIDARC1381:
4351 case PCIDevVenIDARC1680:
4352 case PCIDevVenIDARC1681: {
4353 acb->adapter_type = ACB_ADAPTER_TYPE_A;
4354 acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
4355 max_coherent_size = ARCMSR_SRBS_POOL_SIZE;
4356 }
4357 break;
4358 default: {
4359 kprintf("arcmsr%d:"
4360 " unknown RAID adapter type \n", device_get_unit(dev));
4361 return ENOMEM;
4362 }
4363 }
4364 if(bus_dma_tag_create( /*PCI parent*/ bus_get_dma_tag(dev),
4365 /*alignemnt*/ 1,
4366 /*boundary*/ 0,
4367 /*lowaddr*/ BUS_SPACE_MAXADDR,
4368 /*highaddr*/ BUS_SPACE_MAXADDR,
4369 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT,
4370 /*nsegments*/ BUS_SPACE_UNRESTRICTED,
4371 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
4372 /*flags*/ 0,
4373 &acb->parent_dmat) != 0)
4374 {
4375 kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
4376 return ENOMEM;
4377 }
4378 /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
4379 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
4380 /*alignment*/ 1,
4381 /*boundary*/ 0,
4382 /*lowaddr*/ BUS_SPACE_MAXADDR,
4383 /*highaddr*/ BUS_SPACE_MAXADDR,
4384 /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
4385 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES,
4386 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
4387 /*flags*/ 0,
4388 &acb->dm_segs_dmat) != 0)
4389 {
4390 bus_dma_tag_destroy(acb->parent_dmat);
4391 kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
4392 return ENOMEM;
4393 }
4394
4395 /* DMA tag for our srb structures.... Allocate the freesrb memory */
4396 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
4397 /*alignment*/ 0x20,
4398 /*boundary*/ 0,
4399 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT,
4400 /*highaddr*/ BUS_SPACE_MAXADDR,
4401 /*maxsize*/ max_coherent_size,
4402 /*nsegments*/ 1,
4403 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
4404 /*flags*/ 0,
4405 &acb->srb_dmat) != 0)
4406 {
4407 bus_dma_tag_destroy(acb->dm_segs_dmat);
4408 bus_dma_tag_destroy(acb->parent_dmat);
4409 kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
4410 return ENXIO;
4411 }
4412 /* Allocation for our srbs */
4413 if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
4414 bus_dma_tag_destroy(acb->srb_dmat);
4415 bus_dma_tag_destroy(acb->dm_segs_dmat);
4416 bus_dma_tag_destroy(acb->parent_dmat);
4417 kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
4418 return ENXIO;
4419 }
4420 /* And permanently map them */
4421 if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
4422 bus_dma_tag_destroy(acb->srb_dmat);
4423 bus_dma_tag_destroy(acb->dm_segs_dmat);
4424 bus_dma_tag_destroy(acb->parent_dmat);
4425 kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
4426 return ENXIO;
4427 }
4428 pci_command = pci_read_config(dev, PCIR_COMMAND, 2);
4429 pci_command |= PCIM_CMD_BUSMASTEREN;
4430 pci_command |= PCIM_CMD_PERRESPEN;
4431 pci_command |= PCIM_CMD_MWRICEN;
4432 /* Enable Busmaster */
4433 pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
4434 switch(acb->adapter_type) {
4435 case ACB_ADAPTER_TYPE_A: {
4436 u_int32_t rid0 = PCIR_BAR(0);
4437 vm_offset_t mem_base0;
4438
4439 acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE);
4440 if(acb->sys_res_arcmsr[0] == NULL) {
4441 arcmsr_free_resource(acb);
4442 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
4443 return ENOMEM;
4444 }
4445 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
4446 arcmsr_free_resource(acb);
4447 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
4448 return ENXIO;
4449 }
4450 mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
4451 if(mem_base0 == 0) {
4452 arcmsr_free_resource(acb);
4453 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
4454 return ENXIO;
4455 }
4456 acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
4457 acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
4458 acb->pmu = (struct MessageUnit_UNION *)mem_base0;
4459 acb->rid[0] = rid0;
4460 }
4461 break;
4462 case ACB_ADAPTER_TYPE_B: {
4463 struct HBB_MessageUnit *phbbmu;
4464 struct CommandControlBlock *freesrb;
4465 u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
4466 vm_offset_t mem_base[]={0,0};
4467 u_long size;
4468 if (vendor_dev_id == PCIDevVenIDARC1203)
4469 size = sizeof(struct HBB_DOORBELL_1203);
4470 else
4471 size = sizeof(struct HBB_DOORBELL);
4472 for(i=0; i < 2; i++) {
4473 acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid[i], RF_ACTIVE);
4474 if(acb->sys_res_arcmsr[i] == NULL) {
4475 arcmsr_free_resource(acb);
4476 kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
4477 return ENOMEM;
4478 }
4479 if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
4480 arcmsr_free_resource(acb);
4481 kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
4482 return ENXIO;
4483 }
4484 mem_base[i] = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
4485 if(mem_base[i] == 0) {
4486 arcmsr_free_resource(acb);
4487 kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
4488 return ENXIO;
4489 }
4490 acb->btag[i] = rman_get_bustag(acb->sys_res_arcmsr[i]);
4491 acb->bhandle[i] = rman_get_bushandle(acb->sys_res_arcmsr[i]);
4492 }
4493 freesrb = (struct CommandControlBlock *)acb->uncacheptr;
4494 acb->pmu = (struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE);
4495 phbbmu = (struct HBB_MessageUnit *)acb->pmu;
4496 phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)mem_base[0];
4497 phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)mem_base[1];
4498 if (vendor_dev_id == PCIDevVenIDARC1203) {
4499 phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell);
4500 phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell_mask);
4501 phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell);
4502 phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell_mask);
4503 } else {
4504 phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL, drv2iop_doorbell);
4505 phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL, drv2iop_doorbell_mask);
4506 phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL, iop2drv_doorbell);
4507 phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL, iop2drv_doorbell_mask);
4508 }
4509 acb->rid[0] = rid[0];
4510 acb->rid[1] = rid[1];
4511 }
4512 break;
4513 case ACB_ADAPTER_TYPE_C: {
4514 u_int32_t rid0 = PCIR_BAR(1);
4515 vm_offset_t mem_base0;
4516
4517 acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE);
4518 if(acb->sys_res_arcmsr[0] == NULL) {
4519 arcmsr_free_resource(acb);
4520 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
4521 return ENOMEM;
4522 }
4523 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
4524 arcmsr_free_resource(acb);
4525 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
4526 return ENXIO;
4527 }
4528 mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
4529 if(mem_base0 == 0) {
4530 arcmsr_free_resource(acb);
4531 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
4532 return ENXIO;
4533 }
4534 acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
4535 acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
4536 acb->pmu = (struct MessageUnit_UNION *)mem_base0;
4537 acb->rid[0] = rid0;
4538 }
4539 break;
4540 case ACB_ADAPTER_TYPE_D: {
4541 struct HBD_MessageUnit0 *phbdmu;
4542 u_int32_t rid0 = PCIR_BAR(0);
4543 vm_offset_t mem_base0;
4544
4545 acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE);
4546 if(acb->sys_res_arcmsr[0] == NULL) {
4547 arcmsr_free_resource(acb);
4548 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
4549 return ENOMEM;
4550 }
4551 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
4552 arcmsr_free_resource(acb);
4553 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
4554 return ENXIO;
4555 }
4556 mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
4557 if(mem_base0 == 0) {
4558 arcmsr_free_resource(acb);
4559 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
4560 return ENXIO;
4561 }
4562 acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
4563 acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
4564 acb->pmu = (struct MessageUnit_UNION *)((unsigned long)acb->uncacheptr+ARCMSR_SRBS_POOL_SIZE);
4565 phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
4566 phbdmu->phbdmu = (struct HBD_MessageUnit *)mem_base0;
4567 acb->rid[0] = rid0;
4568 }
4569 break;
4570 case ACB_ADAPTER_TYPE_E: {
4571 u_int32_t rid0 = PCIR_BAR(1);
4572 vm_offset_t mem_base0;
4573
4574 acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBE_MessageUnit), RF_ACTIVE);
4575 if(acb->sys_res_arcmsr[0] == NULL) {
4576 arcmsr_free_resource(acb);
4577 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
4578 return ENOMEM;
4579 }
4580 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
4581 arcmsr_free_resource(acb);
4582 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
4583 return ENXIO;
4584 }
4585 mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
4586 if(mem_base0 == 0) {
4587 arcmsr_free_resource(acb);
4588 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
4589 return ENXIO;
4590 }
4591 acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
4592 acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
4593 acb->pmu = (struct MessageUnit_UNION *)mem_base0;
4594 acb->doneq_index = 0;
4595 acb->in_doorbell = 0;
4596 acb->out_doorbell = 0;
4597 acb->rid[0] = rid0;
4598 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_status, 0); /*clear interrupt*/
4599 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, ARCMSR_HBEMU_DOORBELL_SYNC); /* synchronize doorbell to 0 */
4600 }
4601 break;
4602 }
4603 if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
4604 arcmsr_free_resource(acb);
4605 kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
4606 return ENXIO;
4607 }
4608 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
4609 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
4610 /*
4611 ********************************************************************
4612 ** init raid volume state
4613 ********************************************************************
4614 */
4615 for(i=0; i < ARCMSR_MAX_TARGETID; i++) {
4616 for(j=0; j < ARCMSR_MAX_TARGETLUN; j++) {
4617 acb->devstate[i][j] = ARECA_RAID_GONE;
4618 }
4619 }
4620 arcmsr_iop_init(acb);
4621 return(0);
4622 }
4623
4624 /*
4625 ************************************************************************
4626 ************************************************************************
4627 */
arcmsr_teardown_intr(device_t dev,struct AdapterControlBlock * acb)4628 static void arcmsr_teardown_intr(device_t dev, struct AdapterControlBlock *acb)
4629 {
4630 int i;
4631
4632 if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
4633 for (i = 0; i < acb->msix_vectors; i++) {
4634 if (acb->ih[i])
4635 bus_teardown_intr(dev, acb->irqres[i], acb->ih[i]);
4636 // if (acb->irqres[i] != NULL)
4637 // bus_release_resource(dev, SYS_RES_IRQ,
4638 // acb->irq_id[i], acb->irqres[i]);
4639
4640 acb->ih[i] = NULL;
4641 }
4642 pci_release_msi(dev);
4643 } else {
4644 if ((acb->ih[0] != NULL) && (acb->irqres[0] != NULL))
4645 bus_teardown_intr(dev, acb->irqres[0], acb->ih[0]);
4646 if (acb->irqres[0] != NULL)
4647 bus_release_resource(dev, SYS_RES_IRQ,
4648 acb->irq_id[0], acb->irqres[0]);
4649 if (acb->irq_type == PCI_INTR_TYPE_MSI)
4650 pci_release_msi(dev);
4651 acb->ih[0] = NULL;
4652 acb->irqres[0] = NULL;
4653 acb->irq_type = 0;
4654 }
4655
4656 }
4657 /*
4658 ************************************************************************
4659 ************************************************************************
4660 */
arcmsr_attach(device_t dev)4661 static int arcmsr_attach(device_t dev)
4662 {
4663 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
4664 u_int32_t unit=device_get_unit(dev);
4665 union ccb *ccb;
4666 struct cam_devq *devq; /* Device Queue to use for this SIM */
4667 struct resource *irqres;
4668 u_int irq_flags;
4669
4670 if(acb == NULL) {
4671 kprintf("arcmsr%d: cannot allocate softc\n", unit);
4672 return (ENOMEM);
4673 }
4674 arcmsr_mutex_init(acb);
4675 acb->pci_dev = dev;
4676 acb->pci_unit = unit;
4677 if(arcmsr_initialize(dev)) {
4678 kprintf("arcmsr%d: initialize failure!\n", unit);
4679 goto initialize_failed;
4680 }
4681 /* After setting up the adapter, map our interrupt */
4682 acb->irq_id[0] = 0;
4683 acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &acb->irq_id[0], &irq_flags);
4684 irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &acb->irq_id[0], irq_flags);
4685 if(irqres == NULL ) {
4686 kprintf("arcmsr%d: unable to alloc interrupt resource!\n", unit);
4687 goto alloc_intr_failed;
4688 }
4689 if(bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih[0], NULL)) {
4690 kprintf("arcmsr%d: unable to setup interrupt handler!\n", unit);
4691 goto setup_intr_failed;
4692 }
4693 acb->irqres[0] = irqres;
4694 /*
4695 * Now let the CAM generic SCSI layer find the SCSI devices on
4696 * the bus * start queue to reset to the idle loop. *
4697 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
4698 * max_sim_transactions
4699 */
4700 devq = cam_simq_alloc(acb->maxOutstanding);
4701 if(devq == NULL) {
4702 kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
4703 goto simq_alloc_failed;
4704 }
4705 acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->isr_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
4706 cam_simq_release(devq);
4707 if(acb->psim == NULL) {
4708 kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
4709 goto sim_alloc_failed;
4710 }
4711 ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
4712 if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
4713 kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
4714 goto xpt_bus_failed;
4715 }
4716 if ((ccb = xpt_alloc_ccb()) == NULL) {
4717 kprintf("arcmsr%d: xpt_alloc_ccb failure!\n", unit);
4718 goto xpt_ccb_failed;
4719 }
4720 if(xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
4721 kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
4722 goto xpt_path_failed;
4723 }
4724 /*
4725 ****************************************************
4726 */
4727 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, /*priority*/5);
4728 ccb->ccb_h.func_code = XPT_SCAN_BUS;
4729 ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb;
4730 ccb->crcn.flags = CAM_FLAG_NONE;
4731 xpt_action(ccb);
4732 ARCMSR_LOCK_RELEASE(&acb->isr_lock);
4733
4734 /* Create the control device. */
4735 acb->ioctl_dev = make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
4736 acb->ioctl_dev->si_drv1 = acb;
4737 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
4738 arcmsr_callout_init(&acb->devmap_callout);
4739 callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
4740 return (0);
4741
4742 xpt_path_failed:
4743 xpt_free_ccb(&ccb->ccb_h);
4744 xpt_ccb_failed:
4745 xpt_bus_deregister(cam_sim_path(acb->psim));
4746 xpt_bus_failed:
4747 ARCMSR_LOCK_RELEASE(&acb->isr_lock);
4748 cam_sim_free(acb->psim);
4749 sim_alloc_failed:
4750 cam_simq_release(devq);
4751 simq_alloc_failed:
4752 arcmsr_teardown_intr(dev, acb);
4753 setup_intr_failed:
4754 arcmsr_free_resource(acb);
4755 bus_release_resource(dev, SYS_RES_IRQ, acb->irq_id[0], irqres);
4756 alloc_intr_failed:
4757 if (acb->irq_type == PCI_INTR_TYPE_MSI)
4758 pci_release_msi(dev);
4759 initialize_failed:
4760 arcmsr_mutex_destroy(acb);
4761 return ENXIO;
4762 }
4763
4764 /*
4765 ************************************************************************
4766 ************************************************************************
4767 */
arcmsr_probe(device_t dev)4768 static int arcmsr_probe(device_t dev)
4769 {
4770 u_int32_t id;
4771 u_int16_t sub_device_id;
4772 static char buf[256];
4773 char x_type[]={"unknown"};
4774 char *type;
4775 int raid6 = 1;
4776
4777 if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
4778 return (ENXIO);
4779 }
4780 sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
4781 switch(id = pci_get_devid(dev)) {
4782 case PCIDevVenIDARC1110:
4783 case PCIDevVenIDARC1200:
4784 case PCIDevVenIDARC1201:
4785 case PCIDevVenIDARC1210:
4786 raid6 = 0;
4787 /*FALLTHRU*/
4788 case PCIDevVenIDARC1120:
4789 case PCIDevVenIDARC1130:
4790 case PCIDevVenIDARC1160:
4791 case PCIDevVenIDARC1170:
4792 case PCIDevVenIDARC1220:
4793 case PCIDevVenIDARC1230:
4794 case PCIDevVenIDARC1231:
4795 case PCIDevVenIDARC1260:
4796 case PCIDevVenIDARC1261:
4797 case PCIDevVenIDARC1270:
4798 case PCIDevVenIDARC1280:
4799 type = "SATA 3G";
4800 break;
4801 case PCIDevVenIDARC1212:
4802 case PCIDevVenIDARC1222:
4803 case PCIDevVenIDARC1380:
4804 case PCIDevVenIDARC1381:
4805 case PCIDevVenIDARC1680:
4806 case PCIDevVenIDARC1681:
4807 type = "SAS 3G";
4808 break;
4809 case PCIDevVenIDARC1880:
4810 case PCIDevVenIDARC1882:
4811 case PCIDevVenIDARC1213:
4812 case PCIDevVenIDARC1223:
4813 if ((sub_device_id == ARECA_SUB_DEV_ID_1883) ||
4814 (sub_device_id == ARECA_SUB_DEV_ID_1216) ||
4815 (sub_device_id == ARECA_SUB_DEV_ID_1226))
4816 type = "SAS 12G";
4817 else
4818 type = "SAS 6G";
4819 arcmsr_msi_enable = 0;
4820 break;
4821 case PCIDevVenIDARC1884:
4822 type = "SAS 12G";
4823 arcmsr_msi_enable = 0;
4824 break;
4825 case PCIDevVenIDARC1214:
4826 arcmsr_msi_enable = 0;
4827 case PCIDevVenIDARC1203:
4828 type = "SATA 6G";
4829 break;
4830 default:
4831 type = x_type;
4832 raid6 = 0;
4833 break;
4834 }
4835 if(type == x_type)
4836 return(ENXIO);
4837 ksprintf(buf, "Areca %s Host Adapter RAID Controller %s\n%s\n",
4838 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
4839 device_set_desc_copy(dev, buf);
4840 return (BUS_PROBE_DEFAULT);
4841 }
4842 /*
4843 ************************************************************************
4844 ************************************************************************
4845 */
arcmsr_shutdown(device_t dev)4846 static int arcmsr_shutdown(device_t dev)
4847 {
4848 u_int32_t i;
4849 struct CommandControlBlock *srb;
4850 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
4851
4852 /* stop adapter background rebuild */
4853 ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
4854 /* disable all outbound interrupt */
4855 arcmsr_disable_allintr(acb);
4856 arcmsr_stop_adapter_bgrb(acb);
4857 arcmsr_flush_adapter_cache(acb);
4858 /* abort all outstanding command */
4859 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
4860 acb->acb_flags &= ~ACB_F_IOP_INITED;
4861 if(acb->srboutstandingcount != 0) {
4862 /*clear and abort all outbound posted Q*/
4863 arcmsr_done4abort_postqueue(acb);
4864 /* talk to iop 331 outstanding command aborted*/
4865 arcmsr_abort_allcmd(acb);
4866 for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
4867 srb = acb->psrb_pool[i];
4868 if(srb->srb_state == ARCMSR_SRB_START) {
4869 srb->srb_state = ARCMSR_SRB_ABORTED;
4870 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
4871 arcmsr_srb_complete(srb, 1);
4872 }
4873 }
4874 }
4875 acb->srboutstandingcount = 0;
4876 acb->workingsrb_doneindex = 0;
4877 acb->workingsrb_startindex = 0;
4878 acb->pktRequestCount = 0;
4879 acb->pktReturnCount = 0;
4880 ARCMSR_LOCK_RELEASE(&acb->isr_lock);
4881 return (0);
4882 }
4883 /*
4884 ************************************************************************
4885 ************************************************************************
4886 */
arcmsr_detach(device_t dev)4887 static int arcmsr_detach(device_t dev)
4888 {
4889 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
4890 int i;
4891
4892 callout_stop(&acb->devmap_callout);
4893 arcmsr_teardown_intr(dev, acb);
4894 arcmsr_shutdown(dev);
4895 arcmsr_free_resource(acb);
4896 for(i=0; (i < 2) && (acb->sys_res_arcmsr[i]!=NULL); i++) {
4897 bus_release_resource(dev, SYS_RES_MEMORY, acb->rid[i], acb->sys_res_arcmsr[i]);
4898 }
4899 ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
4900 xpt_bus_deregister(cam_sim_path(acb->psim));
4901 cam_sim_free(acb->psim);
4902 ARCMSR_LOCK_RELEASE(&acb->isr_lock);
4903 arcmsr_mutex_destroy(acb);
4904 return (0);
4905 }
4906
4907 #ifdef ARCMSR_DEBUG1
arcmsr_dump_data(struct AdapterControlBlock * acb)4908 static void arcmsr_dump_data(struct AdapterControlBlock *acb)
4909 {
4910 if((acb->pktRequestCount - acb->pktReturnCount) == 0)
4911 return;
4912 kprintf("Command Request Count =0x%x\n",acb->pktRequestCount);
4913 kprintf("Command Return Count =0x%x\n",acb->pktReturnCount);
4914 kprintf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount));
4915 kprintf("Queued Command Count =0x%x\n",acb->srboutstandingcount);
4916 }
4917 #endif
4918