1 /* 2 ******************************************************************************** 3 ** OS : FreeBSD 4 ** FILE NAME : arcmsr.c 5 ** BY : Erich Chen, Ching Huang 6 ** Description: SCSI RAID Device Driver for 7 ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) 8 ** SATA/SAS RAID HOST Adapter 9 ******************************************************************************** 10 ******************************************************************************** 11 ** 12 ** Copyright (C) 2002 - 2012, Areca Technology Corporation All rights reserved. 13 ** 14 ** Redistribution and use in source and binary forms, with or without 15 ** modification, are permitted provided that the following conditions 16 ** are met: 17 ** 1. Redistributions of source code must retain the above copyright 18 ** notice, this list of conditions and the following disclaimer. 19 ** 2. Redistributions in binary form must reproduce the above copyright 20 ** notice, this list of conditions and the following disclaimer in the 21 ** documentation and/or other materials provided with the distribution. 22 ** 3. The name of the author may not be used to endorse or promote products 23 ** derived from this software without specific prior written permission. 24 ** 25 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 26 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 27 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 28 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 29 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT 30 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 31 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY 32 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 33 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF 34 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 ******************************************************************************** 36 ** History 37 ** 38 ** REV# DATE NAME DESCRIPTION 39 ** 1.00.00.00 03/31/2004 Erich Chen First release 40 ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error 41 ** 1.20.00.03 04/19/2005 Erich Chen add SATA 24 Ports adapter type support 42 ** clean unused function 43 ** 1.20.00.12 09/12/2005 Erich Chen bug fix with abort command handling, 44 ** firmware version check 45 ** and firmware update notify for hardware bug fix 46 ** handling if none zero high part physical address 47 ** of srb resource 48 ** 1.20.00.13 08/18/2006 Erich Chen remove pending srb and report busy 49 ** add iop message xfer 50 ** with scsi pass-through command 51 ** add new device id of sas raid adapters 52 ** code fit for SPARC64 & PPC 53 ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report 54 ** and cause g_vfs_done() read write error 55 ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x 56 ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x 57 ** bus_dmamem_alloc() with BUS_DMA_ZERO 58 ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880 59 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed, 60 ** prevent cam_periph_error removing all LUN devices of one Target id 61 ** for any one LUN device failed 62 ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step" 63 ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B 64 ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0 65 ** 1.20.00.20 12/08/2010 Ching Huang Avoid calling atomic_set_int function 66 ** 1.20.00.21 02/08/2011 Ching Huang Implement I/O request timeout 67 ** 02/14/2011 Ching Huang Modified pktRequestCount 68 ** 1.20.00.21 03/03/2011 Ching Huang if a command timeout, then wait its ccb back before free it 69 ** 1.20.00.22 07/04/2011 Ching Huang Fixed multiple MTX panic 70 ** 1.20.00.23 10/28/2011 Ching Huang Added TIMEOUT_DELAY in case of too many HDDs need to start 71 ** 1.20.00.23 11/08/2011 Ching Huang Added report device transfer speed 72 ** 1.20.00.23 01/30/2012 Ching Huang Fixed Request requeued and Retrying command 73 ** 1.20.00.24 06/11/2012 Ching Huang Fixed return sense data condition 74 ** 1.20.00.25 08/17/2012 Ching Huang Fixed hotplug device no function on type A adapter 75 ** 1.20.00.26 12/14/2012 Ching Huang Added support ARC1214,1224,1264,1284 76 ** 1.20.00.27 05/06/2013 Ching Huang Fixed out standing cmd full on ARC-12x4 77 ** 1.20.00.28 09/13/2013 Ching Huang Removed recursive mutex in arcmsr_abort_dr_ccbs 78 ** 1.20.00.29 12/18/2013 Ching Huang Change simq allocation number, support ARC1883 79 ** 1.30.00.00 11/30/2015 Ching Huang Added support ARC1203 80 ** 1.40.00.00 10/26/2017 Ching Huang Added support ARC1884 81 ****************************************************************************************** 82 * $FreeBSD: head/sys/dev/arcmsr/arcmsr.c 259565 2013-12-18 19:25:40Z delphij $ 83 */ 84 #if 0 85 #define ARCMSR_DEBUG1 1 86 #endif 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/malloc.h> 90 #include <sys/kernel.h> 91 #include <sys/bus.h> 92 #include <sys/queue.h> 93 #include <sys/stat.h> 94 #include <sys/kthread.h> 95 #include <sys/module.h> 96 #include <sys/proc.h> 97 #include <sys/lock.h> 98 #include <sys/sysctl.h> 99 #include <sys/thread2.h> 100 #include <sys/poll.h> 101 #include <sys/device.h> 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/pmap.h> 105 106 #include <machine/atomic.h> 107 #include <sys/conf.h> 108 #include <sys/rman.h> 109 110 #include <bus/cam/cam.h> 111 #include <bus/cam/cam_ccb.h> 112 #include <bus/cam/cam_sim.h> 113 #include <bus/cam/cam_periph.h> 114 #include <bus/cam/cam_xpt_periph.h> 115 #include <bus/cam/cam_xpt_sim.h> 116 #include <bus/cam/cam_debug.h> 117 #include <bus/cam/scsi/scsi_all.h> 118 #include <bus/cam/scsi/scsi_message.h> 119 /* 120 ************************************************************************** 121 ************************************************************************** 122 */ 123 #include <sys/endian.h> 124 #include <bus/pci/pcivar.h> 125 #include <bus/pci/pcireg.h> 126 127 #define arcmsr_callout_init(a) callout_init_mp(a); 128 129 #define ARCMSR_DRIVER_VERSION "arcmsr version 1.40.00.00 2017-10-26" 130 #include <dev/raid/arcmsr/arcmsr.h> 131 /* 132 ************************************************************************** 133 ************************************************************************** 134 */ 135 static void arcmsr_free_srb(struct CommandControlBlock *srb); 136 static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb); 137 static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb); 138 static int arcmsr_probe(device_t dev); 139 static int arcmsr_attach(device_t dev); 140 static int arcmsr_detach(device_t dev); 141 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); 142 static void arcmsr_iop_parking(struct AdapterControlBlock *acb); 143 static int arcmsr_shutdown(device_t dev); 144 static void arcmsr_interrupt(struct AdapterControlBlock *acb); 145 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); 146 static void arcmsr_free_resource(struct AdapterControlBlock *acb); 147 static void arcmsr_bus_reset(struct AdapterControlBlock *acb); 148 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 149 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 150 static void arcmsr_iop_init(struct AdapterControlBlock *acb); 151 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); 152 static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer); 153 static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb); 154 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); 155 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); 156 static void arcmsr_iop_reset(struct AdapterControlBlock *acb); 157 static void arcmsr_report_sense_info(struct CommandControlBlock *srb); 158 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg); 159 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb); 160 static int arcmsr_resume(device_t dev); 161 static int arcmsr_suspend(device_t dev); 162 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb); 163 static void arcmsr_polling_devmap(void *arg); 164 static void arcmsr_srb_timeout(void *arg); 165 static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb); 166 static void arcmsr_hbe_postqueue_isr(struct AdapterControlBlock *acb); 167 static void arcmsr_teardown_intr(device_t dev, struct AdapterControlBlock *acb); 168 #ifdef ARCMSR_DEBUG1 169 static void arcmsr_dump_data(struct AdapterControlBlock *acb); 170 #endif 171 /* 172 ************************************************************************** 173 ************************************************************************** 174 */ 175 static void UDELAY(u_int32_t us) { DELAY(us); } 176 /* 177 ************************************************************************** 178 ************************************************************************** 179 */ 180 static bus_dmamap_callback_t arcmsr_map_free_srb; 181 static bus_dmamap_callback_t arcmsr_execute_srb; 182 /* 183 ************************************************************************** 184 ************************************************************************** 185 */ 186 static d_open_t arcmsr_open; 187 static d_close_t arcmsr_close; 188 static d_ioctl_t arcmsr_ioctl; 189 190 static device_method_t arcmsr_methods[]={ 191 DEVMETHOD(device_probe, arcmsr_probe), 192 DEVMETHOD(device_attach, arcmsr_attach), 193 DEVMETHOD(device_detach, arcmsr_detach), 194 DEVMETHOD(device_shutdown, arcmsr_shutdown), 195 DEVMETHOD(device_suspend, arcmsr_suspend), 196 DEVMETHOD(device_resume, arcmsr_resume), 197 DEVMETHOD(bus_print_child, bus_generic_print_child), 198 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 199 DEVMETHOD_END 200 }; 201 202 static driver_t arcmsr_driver={ 203 "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) 204 }; 205 206 static devclass_t arcmsr_devclass; 207 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL); 208 MODULE_VERSION(arcmsr, 1); 209 MODULE_DEPEND(arcmsr, pci, 1, 1, 1); 210 MODULE_DEPEND(arcmsr, cam, 1, 1, 1); 211 #ifndef BUS_DMA_COHERENT 212 #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ 213 #endif 214 static struct dev_ops arcmsr_ops = { 215 { "arcmsr", 0, D_MPSAFE }, 216 .d_open = arcmsr_open, /* open */ 217 .d_close = arcmsr_close, /* close */ 218 .d_ioctl = arcmsr_ioctl, /* ioctl */ 219 }; 220 221 static int arcmsr_msi_enable = 1; 222 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable); 223 224 /* 225 ************************************************************************** 226 ************************************************************************** 227 */ 228 static int 229 arcmsr_open(struct dev_open_args *ap) 230 { 231 cdev_t dev = ap->a_head.a_dev; 232 struct AdapterControlBlock *acb = dev->si_drv1; 233 234 if(acb == NULL) { 235 return ENXIO; 236 } 237 return (0); 238 } 239 /* 240 ************************************************************************** 241 ************************************************************************** 242 */ 243 static int 244 arcmsr_close(struct dev_close_args *ap) 245 { 246 cdev_t dev = ap->a_head.a_dev; 247 struct AdapterControlBlock *acb = dev->si_drv1; 248 249 if(acb == NULL) { 250 return ENXIO; 251 } 252 return 0; 253 } 254 /* 255 ************************************************************************** 256 ************************************************************************** 257 */ 258 static int 259 arcmsr_ioctl(struct dev_ioctl_args *ap) 260 { 261 cdev_t dev = ap->a_head.a_dev; 262 u_long ioctl_cmd = ap->a_cmd; 263 caddr_t arg = ap->a_data; 264 struct AdapterControlBlock *acb = dev->si_drv1; 265 266 if(acb == NULL) { 267 return ENXIO; 268 } 269 return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); 270 } 271 /* 272 ********************************************************************** 273 ********************************************************************** 274 */ 275 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb) 276 { 277 u_int32_t intmask_org = 0; 278 279 switch (acb->adapter_type) { 280 case ACB_ADAPTER_TYPE_A: { 281 /* disable all outbound interrupt */ 282 intmask_org = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */ 283 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 284 } 285 break; 286 case ACB_ADAPTER_TYPE_B: { 287 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 288 /* disable all outbound interrupt */ 289 intmask_org = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask) 290 & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */ 291 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, 0); /* disable all interrupt */ 292 } 293 break; 294 case ACB_ADAPTER_TYPE_C: { 295 /* disable all outbound interrupt */ 296 intmask_org = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */ 297 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE); 298 } 299 break; 300 case ACB_ADAPTER_TYPE_D: { 301 /* disable all outbound interrupt */ 302 intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */ 303 CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE); 304 } 305 break; 306 case ACB_ADAPTER_TYPE_E: { 307 /* disable all outbound interrupt */ 308 intmask_org = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */ 309 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_mask, intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE); 310 } 311 break; 312 } 313 return (intmask_org); 314 } 315 /* 316 ********************************************************************** 317 ********************************************************************** 318 */ 319 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org) 320 { 321 u_int32_t mask; 322 323 switch (acb->adapter_type) { 324 case ACB_ADAPTER_TYPE_A: { 325 /* enable outbound Post Queue, outbound doorbell Interrupt */ 326 mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); 327 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask); 328 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 329 } 330 break; 331 case ACB_ADAPTER_TYPE_B: { 332 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 333 /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */ 334 mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); 335 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/ 336 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; 337 } 338 break; 339 case ACB_ADAPTER_TYPE_C: { 340 /* enable outbound Post Queue, outbound doorbell Interrupt */ 341 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); 342 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask); 343 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; 344 } 345 break; 346 case ACB_ADAPTER_TYPE_D: { 347 /* enable outbound Post Queue, outbound doorbell Interrupt */ 348 mask = ARCMSR_HBDMU_ALL_INT_ENABLE; 349 CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | mask); 350 CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable); 351 acb->outbound_int_enable = mask; 352 } 353 break; 354 case ACB_ADAPTER_TYPE_E: { 355 /* enable outbound Post Queue, outbound doorbell Interrupt */ 356 mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR); 357 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_mask, intmask_org & mask); 358 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; 359 } 360 break; 361 } 362 } 363 /* 364 ********************************************************************** 365 ********************************************************************** 366 */ 367 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) 368 { 369 u_int32_t Index; 370 u_int8_t Retries = 0x00; 371 372 do { 373 for(Index=0; Index < 100; Index++) { 374 if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 375 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/ 376 return TRUE; 377 } 378 UDELAY(10000); 379 }/*max 1 seconds*/ 380 }while(Retries++ < 20);/*max 20 sec*/ 381 return (FALSE); 382 } 383 /* 384 ********************************************************************** 385 ********************************************************************** 386 */ 387 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) 388 { 389 u_int32_t Index; 390 u_int8_t Retries = 0x00; 391 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 392 393 do { 394 for(Index=0; Index < 100; Index++) { 395 if(READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 396 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/ 397 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); 398 return TRUE; 399 } 400 UDELAY(10000); 401 }/*max 1 seconds*/ 402 }while(Retries++ < 20);/*max 20 sec*/ 403 return (FALSE); 404 } 405 /* 406 ********************************************************************** 407 ********************************************************************** 408 */ 409 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb) 410 { 411 u_int32_t Index; 412 u_int8_t Retries = 0x00; 413 414 do { 415 for(Index=0; Index < 100; Index++) { 416 if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 417 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/ 418 return TRUE; 419 } 420 UDELAY(10000); 421 }/*max 1 seconds*/ 422 }while(Retries++ < 20);/*max 20 sec*/ 423 return (FALSE); 424 } 425 /* 426 ********************************************************************** 427 ********************************************************************** 428 */ 429 static u_int8_t arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock *acb) 430 { 431 u_int32_t Index; 432 u_int8_t Retries = 0x00; 433 434 do { 435 for(Index=0; Index < 100; Index++) { 436 if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) { 437 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);/*clear interrupt*/ 438 return TRUE; 439 } 440 UDELAY(10000); 441 }/*max 1 seconds*/ 442 }while(Retries++ < 20);/*max 20 sec*/ 443 return (FALSE); 444 } 445 /* 446 ********************************************************************** 447 ********************************************************************** 448 */ 449 static u_int8_t arcmsr_hbe_wait_msgint_ready(struct AdapterControlBlock *acb) 450 { 451 u_int32_t Index, read_doorbell; 452 u_int8_t Retries = 0x00; 453 454 do { 455 for(Index=0; Index < 100; Index++) { 456 read_doorbell = CHIP_REG_READ32(HBE_MessageUnit, 0, iobound_doorbell); 457 if((read_doorbell ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { 458 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_status, 0);/*clear interrupt*/ 459 acb->in_doorbell = read_doorbell; 460 return TRUE; 461 } 462 UDELAY(10000); 463 }/*max 1 seconds*/ 464 }while(Retries++ < 20);/*max 20 sec*/ 465 return (FALSE); 466 } 467 /* 468 ************************************************************************ 469 ************************************************************************ 470 */ 471 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) 472 { 473 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ 474 475 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 476 do { 477 if(arcmsr_hba_wait_msgint_ready(acb)) { 478 break; 479 } else { 480 retry_count--; 481 } 482 }while(retry_count != 0); 483 } 484 /* 485 ************************************************************************ 486 ************************************************************************ 487 */ 488 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) 489 { 490 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ 491 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 492 493 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE); 494 do { 495 if(arcmsr_hbb_wait_msgint_ready(acb)) { 496 break; 497 } else { 498 retry_count--; 499 } 500 }while(retry_count != 0); 501 } 502 /* 503 ************************************************************************ 504 ************************************************************************ 505 */ 506 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb) 507 { 508 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ 509 510 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 511 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 512 do { 513 if(arcmsr_hbc_wait_msgint_ready(acb)) { 514 break; 515 } else { 516 retry_count--; 517 } 518 }while(retry_count != 0); 519 } 520 /* 521 ************************************************************************ 522 ************************************************************************ 523 */ 524 static void arcmsr_flush_hbd_cache(struct AdapterControlBlock *acb) 525 { 526 int retry_count = 30; /* enlarge wait flush adapter cache time: 10 minute */ 527 528 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 529 do { 530 if(arcmsr_hbd_wait_msgint_ready(acb)) { 531 break; 532 } else { 533 retry_count--; 534 } 535 }while(retry_count != 0); 536 } 537 /* 538 ************************************************************************ 539 ************************************************************************ 540 */ 541 static void arcmsr_flush_hbe_cache(struct AdapterControlBlock *acb) 542 { 543 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ 544 545 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 546 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 547 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell); 548 do { 549 if(arcmsr_hbe_wait_msgint_ready(acb)) { 550 break; 551 } else { 552 retry_count--; 553 } 554 }while(retry_count != 0); 555 } 556 /* 557 ************************************************************************ 558 ************************************************************************ 559 */ 560 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 561 { 562 switch (acb->adapter_type) { 563 case ACB_ADAPTER_TYPE_A: { 564 arcmsr_flush_hba_cache(acb); 565 } 566 break; 567 case ACB_ADAPTER_TYPE_B: { 568 arcmsr_flush_hbb_cache(acb); 569 } 570 break; 571 case ACB_ADAPTER_TYPE_C: { 572 arcmsr_flush_hbc_cache(acb); 573 } 574 break; 575 case ACB_ADAPTER_TYPE_D: { 576 arcmsr_flush_hbd_cache(acb); 577 } 578 break; 579 case ACB_ADAPTER_TYPE_E: { 580 arcmsr_flush_hbe_cache(acb); 581 } 582 break; 583 } 584 } 585 /* 586 ******************************************************************************* 587 ******************************************************************************* 588 */ 589 static int arcmsr_suspend(device_t dev) 590 { 591 struct AdapterControlBlock *acb = device_get_softc(dev); 592 593 /* flush controller */ 594 arcmsr_iop_parking(acb); 595 /* disable all outbound interrupt */ 596 arcmsr_disable_allintr(acb); 597 return(0); 598 } 599 /* 600 ******************************************************************************* 601 ******************************************************************************* 602 */ 603 static int arcmsr_resume(device_t dev) 604 { 605 struct AdapterControlBlock *acb = device_get_softc(dev); 606 607 arcmsr_iop_init(acb); 608 return(0); 609 } 610 /* 611 ********************************************************************** 612 ********************************************************************** 613 */ 614 static void arcmsr_report_sense_info(struct CommandControlBlock *srb) 615 { 616 union ccb *pccb = srb->pccb; 617 618 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 619 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 620 if(pccb->csio.sense_len) { 621 memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); 622 memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, 623 get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); 624 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ 625 pccb->ccb_h.status |= CAM_AUTOSNS_VALID; 626 } 627 } 628 /* 629 ********************************************************************* 630 ********************************************************************* 631 */ 632 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) 633 { 634 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 635 if(!arcmsr_hba_wait_msgint_ready(acb)) { 636 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); 637 } 638 } 639 /* 640 ********************************************************************* 641 ********************************************************************* 642 */ 643 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) 644 { 645 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 646 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD); 647 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 648 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); 649 } 650 } 651 /* 652 ********************************************************************* 653 ********************************************************************* 654 */ 655 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb) 656 { 657 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 658 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 659 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 660 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); 661 } 662 } 663 /* 664 ********************************************************************* 665 ********************************************************************* 666 */ 667 static void arcmsr_abort_hbd_allcmd(struct AdapterControlBlock *acb) 668 { 669 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 670 if(!arcmsr_hbd_wait_msgint_ready(acb)) { 671 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); 672 } 673 } 674 /* 675 ********************************************************************* 676 ********************************************************************* 677 */ 678 static void arcmsr_abort_hbe_allcmd(struct AdapterControlBlock *acb) 679 { 680 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 681 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 682 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell); 683 if(!arcmsr_hbe_wait_msgint_ready(acb)) { 684 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); 685 } 686 } 687 /* 688 ********************************************************************* 689 ********************************************************************* 690 */ 691 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 692 { 693 switch (acb->adapter_type) { 694 case ACB_ADAPTER_TYPE_A: { 695 arcmsr_abort_hba_allcmd(acb); 696 } 697 break; 698 case ACB_ADAPTER_TYPE_B: { 699 arcmsr_abort_hbb_allcmd(acb); 700 } 701 break; 702 case ACB_ADAPTER_TYPE_C: { 703 arcmsr_abort_hbc_allcmd(acb); 704 } 705 break; 706 case ACB_ADAPTER_TYPE_D: { 707 arcmsr_abort_hbd_allcmd(acb); 708 } 709 break; 710 case ACB_ADAPTER_TYPE_E: { 711 arcmsr_abort_hbe_allcmd(acb); 712 } 713 break; 714 } 715 } 716 /* 717 ********************************************************************** 718 ********************************************************************** 719 */ 720 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) 721 { 722 struct AdapterControlBlock *acb = srb->acb; 723 union ccb *pccb = srb->pccb; 724 725 if(srb->srb_flags & SRB_FLAG_TIMER_START) 726 callout_stop(&srb->ccb_callout); 727 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 728 bus_dmasync_op_t op; 729 730 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 731 op = BUS_DMASYNC_POSTREAD; 732 } else { 733 op = BUS_DMASYNC_POSTWRITE; 734 } 735 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 736 ARCMSR_LOCK_ACQUIRE(&acb->io_lock); 737 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 738 ARCMSR_LOCK_RELEASE(&acb->io_lock); 739 } 740 if(stand_flag == 1) { 741 atomic_subtract_int(&acb->srboutstandingcount, 1); 742 if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && ( 743 acb->srboutstandingcount < (acb->maxOutstanding -10))) { 744 acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN; 745 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 746 } 747 } 748 if(srb->srb_state != ARCMSR_SRB_TIMEOUT) 749 arcmsr_free_srb(srb); 750 acb->pktReturnCount++; 751 ARCMSR_LOCK_ACQUIRE(&acb->sim_lock); 752 xpt_done(pccb); 753 ARCMSR_LOCK_RELEASE(&acb->sim_lock); 754 } 755 /* 756 ************************************************************************** 757 ************************************************************************** 758 */ 759 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error) 760 { 761 int target, lun; 762 763 target = srb->pccb->ccb_h.target_id; 764 lun = srb->pccb->ccb_h.target_lun; 765 if(error == FALSE) { 766 if(acb->devstate[target][lun] == ARECA_RAID_GONE) { 767 acb->devstate[target][lun] = ARECA_RAID_GOOD; 768 } 769 srb->pccb->ccb_h.status |= CAM_REQ_CMP; 770 arcmsr_srb_complete(srb, 1); 771 } else { 772 switch(srb->arcmsr_cdb.DeviceStatus) { 773 case ARCMSR_DEV_SELECT_TIMEOUT: { 774 if(acb->devstate[target][lun] == ARECA_RAID_GOOD) { 775 kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun); 776 } 777 acb->devstate[target][lun] = ARECA_RAID_GONE; 778 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 779 arcmsr_srb_complete(srb, 1); 780 } 781 break; 782 case ARCMSR_DEV_ABORTED: 783 case ARCMSR_DEV_INIT_FAIL: { 784 acb->devstate[target][lun] = ARECA_RAID_GONE; 785 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 786 arcmsr_srb_complete(srb, 1); 787 } 788 break; 789 case SCSISTAT_CHECK_CONDITION: { 790 acb->devstate[target][lun] = ARECA_RAID_GOOD; 791 arcmsr_report_sense_info(srb); 792 arcmsr_srb_complete(srb, 1); 793 } 794 break; 795 default: 796 kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknown DeviceStatus=0x%x \n" 797 , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus); 798 acb->devstate[target][lun] = ARECA_RAID_GONE; 799 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; 800 /*unknown error or crc error just for retry*/ 801 arcmsr_srb_complete(srb, 1); 802 break; 803 } 804 } 805 } 806 /* 807 ************************************************************************** 808 ************************************************************************** 809 */ 810 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error) 811 { 812 struct CommandControlBlock *srb; 813 814 /* check if command done with no error*/ 815 switch (acb->adapter_type) { 816 case ACB_ADAPTER_TYPE_C: 817 case ACB_ADAPTER_TYPE_D: 818 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0)); /*frame must be 32 bytes aligned*/ 819 break; 820 case ACB_ADAPTER_TYPE_E: 821 srb = acb->psrb_pool[flag_srb]; 822 break; 823 case ACB_ADAPTER_TYPE_A: 824 case ACB_ADAPTER_TYPE_B: 825 default: 826 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ 827 break; 828 } 829 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { 830 if(srb->srb_state == ARCMSR_SRB_TIMEOUT) { 831 arcmsr_free_srb(srb); 832 kprintf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb); 833 return; 834 } 835 kprintf("arcmsr%d: return srb has been completed\n" 836 "srb='%p' srb_state=0x%x outstanding srb count=%d \n", 837 acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount); 838 return; 839 } 840 arcmsr_report_srb_state(acb, srb, error); 841 } 842 /* 843 ************************************************************************** 844 ************************************************************************** 845 */ 846 static void arcmsr_srb_timeout(void *arg) 847 { 848 struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; 849 struct AdapterControlBlock *acb; 850 int target, lun; 851 u_int8_t cmd; 852 853 target = srb->pccb->ccb_h.target_id; 854 lun = srb->pccb->ccb_h.target_lun; 855 acb = srb->acb; 856 if(srb->srb_state == ARCMSR_SRB_START) 857 { 858 cmd = srb->pccb->csio.cdb_io.cdb_bytes[0]; 859 srb->srb_state = ARCMSR_SRB_TIMEOUT; 860 srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT; 861 arcmsr_srb_complete(srb, 1); 862 kprintf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n", 863 acb->pci_unit, target, lun, cmd, srb); 864 } 865 #ifdef ARCMSR_DEBUG1 866 arcmsr_dump_data(acb); 867 #endif 868 } 869 870 /* 871 ********************************************************************** 872 ********************************************************************** 873 */ 874 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) 875 { 876 int i=0; 877 u_int32_t flag_srb; 878 u_int16_t error; 879 880 switch (acb->adapter_type) { 881 case ACB_ADAPTER_TYPE_A: { 882 u_int32_t outbound_intstatus; 883 884 /*clear and abort all outbound posted Q*/ 885 outbound_intstatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; 886 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 887 while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { 888 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 889 arcmsr_drain_donequeue(acb, flag_srb, error); 890 } 891 } 892 break; 893 case ACB_ADAPTER_TYPE_B: { 894 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 895 896 /*clear all outbound posted Q*/ 897 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ 898 for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 899 if((flag_srb = phbbmu->done_qbuffer[i]) != 0) { 900 phbbmu->done_qbuffer[i] = 0; 901 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 902 arcmsr_drain_donequeue(acb, flag_srb, error); 903 } 904 phbbmu->post_qbuffer[i] = 0; 905 }/*drain reply FIFO*/ 906 phbbmu->doneq_index = 0; 907 phbbmu->postq_index = 0; 908 } 909 break; 910 case ACB_ADAPTER_TYPE_C: { 911 912 while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { 913 flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); 914 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; 915 arcmsr_drain_donequeue(acb, flag_srb, error); 916 } 917 } 918 break; 919 case ACB_ADAPTER_TYPE_D: { 920 arcmsr_hbd_postqueue_isr(acb); 921 } 922 break; 923 case ACB_ADAPTER_TYPE_E: { 924 arcmsr_hbe_postqueue_isr(acb); 925 } 926 break; 927 } 928 } 929 /* 930 **************************************************************************** 931 **************************************************************************** 932 */ 933 static void arcmsr_iop_reset(struct AdapterControlBlock *acb) 934 { 935 struct CommandControlBlock *srb; 936 u_int32_t intmask_org; 937 u_int32_t i=0; 938 939 if(acb->srboutstandingcount>0) { 940 /* disable all outbound interrupt */ 941 intmask_org = arcmsr_disable_allintr(acb); 942 /*clear and abort all outbound posted Q*/ 943 arcmsr_done4abort_postqueue(acb); 944 /* talk to iop 331 outstanding command aborted*/ 945 arcmsr_abort_allcmd(acb); 946 for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { 947 srb = acb->psrb_pool[i]; 948 if(srb->srb_state == ARCMSR_SRB_START) { 949 srb->srb_state = ARCMSR_SRB_ABORTED; 950 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 951 arcmsr_srb_complete(srb, 1); 952 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p' aborted\n" 953 , acb->pci_unit, srb->pccb->ccb_h.target_id 954 , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); 955 } 956 } 957 /* enable all outbound interrupt */ 958 arcmsr_enable_allintr(acb, intmask_org); 959 } 960 acb->srboutstandingcount = 0; 961 acb->workingsrb_doneindex = 0; 962 acb->workingsrb_startindex = 0; 963 acb->pktRequestCount = 0; 964 acb->pktReturnCount = 0; 965 } 966 /* 967 ********************************************************************** 968 ********************************************************************** 969 */ 970 static void arcmsr_build_srb(struct CommandControlBlock *srb, 971 bus_dma_segment_t *dm_segs, u_int32_t nseg) 972 { 973 struct ARCMSR_CDB *arcmsr_cdb = &srb->arcmsr_cdb; 974 u_int8_t *psge = (u_int8_t *)&arcmsr_cdb->u; 975 u_int32_t address_lo, address_hi; 976 union ccb *pccb = srb->pccb; 977 struct ccb_scsiio *pcsio = &pccb->csio; 978 u_int32_t arccdbsize = 0x30; 979 980 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 981 arcmsr_cdb->Bus = 0; 982 arcmsr_cdb->TargetID = pccb->ccb_h.target_id; 983 arcmsr_cdb->LUN = pccb->ccb_h.target_lun; 984 arcmsr_cdb->Function = 1; 985 arcmsr_cdb->CdbLength = (u_int8_t)pcsio->cdb_len; 986 bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); 987 if(nseg != 0) { 988 struct AdapterControlBlock *acb = srb->acb; 989 bus_dmasync_op_t op; 990 u_int32_t length, i, cdb_sgcount = 0; 991 992 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 993 op = BUS_DMASYNC_PREREAD; 994 } else { 995 op = BUS_DMASYNC_PREWRITE; 996 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 997 srb->srb_flags |= SRB_FLAG_WRITE; 998 } 999 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 1000 for(i=0; i < nseg; i++) { 1001 /* Get the physical address of the current data pointer */ 1002 length = arcmsr_htole32(dm_segs[i].ds_len); 1003 address_lo = arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr)); 1004 address_hi = arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr)); 1005 if(address_hi == 0) { 1006 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; 1007 pdma_sg->address = address_lo; 1008 pdma_sg->length = length; 1009 psge += sizeof(struct SG32ENTRY); 1010 arccdbsize += sizeof(struct SG32ENTRY); 1011 } else { 1012 u_int32_t sg64s_size = 0, tmplength = length; 1013 1014 while(1) { 1015 u_int64_t span4G, length0; 1016 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; 1017 1018 span4G = (u_int64_t)address_lo + tmplength; 1019 pdma_sg->addresshigh = address_hi; 1020 pdma_sg->address = address_lo; 1021 if(span4G > 0x100000000) { 1022 /*see if cross 4G boundary*/ 1023 length0 = 0x100000000-address_lo; 1024 pdma_sg->length = (u_int32_t)length0 | IS_SG64_ADDR; 1025 address_hi = address_hi+1; 1026 address_lo = 0; 1027 tmplength = tmplength - (u_int32_t)length0; 1028 sg64s_size += sizeof(struct SG64ENTRY); 1029 psge += sizeof(struct SG64ENTRY); 1030 cdb_sgcount++; 1031 } else { 1032 pdma_sg->length = tmplength | IS_SG64_ADDR; 1033 sg64s_size += sizeof(struct SG64ENTRY); 1034 psge += sizeof(struct SG64ENTRY); 1035 break; 1036 } 1037 } 1038 arccdbsize += sg64s_size; 1039 } 1040 cdb_sgcount++; 1041 } 1042 arcmsr_cdb->sgcount = (u_int8_t)cdb_sgcount; 1043 arcmsr_cdb->DataLength = pcsio->dxfer_len; 1044 if( arccdbsize > 256) { 1045 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; 1046 } 1047 } else { 1048 arcmsr_cdb->DataLength = 0; 1049 } 1050 srb->arc_cdb_size = arccdbsize; 1051 arcmsr_cdb->msgPages = (arccdbsize/256) + ((arccdbsize % 256) ? 1 : 0); 1052 } 1053 /* 1054 ************************************************************************** 1055 ************************************************************************** 1056 */ 1057 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) 1058 { 1059 u_int32_t cdb_phyaddr_low = (u_int32_t) srb->cdb_phyaddr_low; 1060 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&srb->arcmsr_cdb; 1061 1062 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); 1063 atomic_add_int(&acb->srboutstandingcount, 1); 1064 srb->srb_state = ARCMSR_SRB_START; 1065 1066 switch (acb->adapter_type) { 1067 case ACB_ADAPTER_TYPE_A: { 1068 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 1069 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); 1070 } else { 1071 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low); 1072 } 1073 } 1074 break; 1075 case ACB_ADAPTER_TYPE_B: { 1076 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 1077 int ending_index, index; 1078 1079 index = phbbmu->postq_index; 1080 ending_index = ((index+1) % ARCMSR_MAX_HBB_POSTQUEUE); 1081 phbbmu->post_qbuffer[ending_index] = 0; 1082 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 1083 phbbmu->post_qbuffer[index] = cdb_phyaddr_low | ARCMSR_SRBPOST_FLAG_SGL_BSIZE; 1084 } else { 1085 phbbmu->post_qbuffer[index] = cdb_phyaddr_low; 1086 } 1087 index++; 1088 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ 1089 phbbmu->postq_index = index; 1090 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED); 1091 } 1092 break; 1093 case ACB_ADAPTER_TYPE_C: { 1094 u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32; 1095 1096 arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size; 1097 ccb_post_stamp = (cdb_phyaddr_low | ((arc_cdb_size-1) >> 6) | 1); 1098 cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; 1099 if(cdb_phyaddr_hi32) 1100 { 1101 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32); 1102 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); 1103 } 1104 else 1105 { 1106 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); 1107 } 1108 } 1109 break; 1110 case ACB_ADAPTER_TYPE_D: { 1111 struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; 1112 u_int16_t index_stripped; 1113 u_int16_t postq_index; 1114 struct InBound_SRB *pinbound_srb; 1115 1116 ARCMSR_LOCK_ACQUIRE(&acb->postDone_lock); 1117 postq_index = phbdmu->postq_index; 1118 pinbound_srb = (struct InBound_SRB *)&phbdmu->post_qbuffer[postq_index & 0xFF]; 1119 pinbound_srb->addressHigh = srb->cdb_phyaddr_high; 1120 pinbound_srb->addressLow = srb->cdb_phyaddr_low; 1121 pinbound_srb->length = srb->arc_cdb_size >> 2; 1122 arcmsr_cdb->Context = srb->cdb_phyaddr_low; 1123 if (postq_index & 0x4000) { 1124 index_stripped = postq_index & 0xFF; 1125 index_stripped += 1; 1126 index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; 1127 phbdmu->postq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped; 1128 } else { 1129 index_stripped = postq_index; 1130 index_stripped += 1; 1131 index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; 1132 phbdmu->postq_index = index_stripped ? index_stripped : (index_stripped | 0x4000); 1133 } 1134 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inboundlist_write_pointer, postq_index); 1135 ARCMSR_LOCK_RELEASE(&acb->postDone_lock); 1136 } 1137 break; 1138 case ACB_ADAPTER_TYPE_E: { 1139 u_int32_t ccb_post_stamp, arc_cdb_size; 1140 1141 arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size; 1142 ccb_post_stamp = (srb->smid | ((arc_cdb_size-1) >> 6)); 1143 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_queueport_high, 0); 1144 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_queueport_low, ccb_post_stamp); 1145 } 1146 break; 1147 } 1148 } 1149 /* 1150 ************************************************************************ 1151 ************************************************************************ 1152 */ 1153 static struct QBUFFER *arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb) 1154 { 1155 struct QBUFFER *qbuffer=NULL; 1156 1157 switch (acb->adapter_type) { 1158 case ACB_ADAPTER_TYPE_A: { 1159 struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu; 1160 1161 qbuffer = (struct QBUFFER *)&phbamu->message_rbuffer; 1162 } 1163 break; 1164 case ACB_ADAPTER_TYPE_B: { 1165 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 1166 1167 qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer; 1168 } 1169 break; 1170 case ACB_ADAPTER_TYPE_C: { 1171 struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu; 1172 1173 qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer; 1174 } 1175 break; 1176 case ACB_ADAPTER_TYPE_D: { 1177 struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; 1178 1179 qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_rbuffer; 1180 } 1181 break; 1182 case ACB_ADAPTER_TYPE_E: { 1183 struct HBE_MessageUnit *phbcmu = (struct HBE_MessageUnit *)acb->pmu; 1184 1185 qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer; 1186 } 1187 break; 1188 } 1189 return(qbuffer); 1190 } 1191 /* 1192 ************************************************************************ 1193 ************************************************************************ 1194 */ 1195 static struct QBUFFER *arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb) 1196 { 1197 struct QBUFFER *qbuffer = NULL; 1198 1199 switch (acb->adapter_type) { 1200 case ACB_ADAPTER_TYPE_A: { 1201 struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu; 1202 1203 qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer; 1204 } 1205 break; 1206 case ACB_ADAPTER_TYPE_B: { 1207 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 1208 1209 qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer; 1210 } 1211 break; 1212 case ACB_ADAPTER_TYPE_C: { 1213 struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu; 1214 1215 qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer; 1216 } 1217 break; 1218 case ACB_ADAPTER_TYPE_D: { 1219 struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; 1220 1221 qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_wbuffer; 1222 } 1223 break; 1224 case ACB_ADAPTER_TYPE_E: { 1225 struct HBE_MessageUnit *phbcmu = (struct HBE_MessageUnit *)acb->pmu; 1226 1227 qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer; 1228 } 1229 break; 1230 } 1231 return(qbuffer); 1232 } 1233 /* 1234 ************************************************************************** 1235 ************************************************************************** 1236 */ 1237 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) 1238 { 1239 switch (acb->adapter_type) { 1240 case ACB_ADAPTER_TYPE_A: { 1241 /* let IOP know data has been read */ 1242 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1243 } 1244 break; 1245 case ACB_ADAPTER_TYPE_B: { 1246 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 1247 /* let IOP know data has been read */ 1248 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); 1249 } 1250 break; 1251 case ACB_ADAPTER_TYPE_C: { 1252 /* let IOP know data has been read */ 1253 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); 1254 } 1255 break; 1256 case ACB_ADAPTER_TYPE_D: { 1257 /* let IOP know data has been read */ 1258 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ); 1259 } 1260 break; 1261 case ACB_ADAPTER_TYPE_E: { 1262 /* let IOP know data has been read */ 1263 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; 1264 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell); 1265 } 1266 break; 1267 } 1268 } 1269 /* 1270 ************************************************************************** 1271 ************************************************************************** 1272 */ 1273 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) 1274 { 1275 switch (acb->adapter_type) { 1276 case ACB_ADAPTER_TYPE_A: { 1277 /* 1278 ** push inbound doorbell tell iop, driver data write ok 1279 ** and wait reply on next hwinterrupt for next Qbuffer post 1280 */ 1281 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); 1282 } 1283 break; 1284 case ACB_ADAPTER_TYPE_B: { 1285 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 1286 /* 1287 ** push inbound doorbell tell iop, driver data write ok 1288 ** and wait reply on next hwinterrupt for next Qbuffer post 1289 */ 1290 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK); 1291 } 1292 break; 1293 case ACB_ADAPTER_TYPE_C: { 1294 /* 1295 ** push inbound doorbell tell iop, driver data write ok 1296 ** and wait reply on next hwinterrupt for next Qbuffer post 1297 */ 1298 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK); 1299 } 1300 break; 1301 case ACB_ADAPTER_TYPE_D: { 1302 /* 1303 ** push inbound doorbell tell iop, driver data write ok 1304 ** and wait reply on next hwinterrupt for next Qbuffer post 1305 */ 1306 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_IN_READY); 1307 } 1308 break; 1309 case ACB_ADAPTER_TYPE_E: { 1310 /* 1311 ** push inbound doorbell tell iop, driver data write ok 1312 ** and wait reply on next hwinterrupt for next Qbuffer post 1313 */ 1314 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK; 1315 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell); 1316 } 1317 break; 1318 } 1319 } 1320 /* 1321 ************************************************************************ 1322 ************************************************************************ 1323 */ 1324 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) 1325 { 1326 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1327 CHIP_REG_WRITE32(HBA_MessageUnit, 1328 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 1329 if(!arcmsr_hba_wait_msgint_ready(acb)) { 1330 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1331 , acb->pci_unit); 1332 } 1333 } 1334 /* 1335 ************************************************************************ 1336 ************************************************************************ 1337 */ 1338 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) 1339 { 1340 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 1341 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1342 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB); 1343 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 1344 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1345 , acb->pci_unit); 1346 } 1347 } 1348 /* 1349 ************************************************************************ 1350 ************************************************************************ 1351 */ 1352 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb) 1353 { 1354 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1355 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 1356 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 1357 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 1358 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); 1359 } 1360 } 1361 /* 1362 ************************************************************************ 1363 ************************************************************************ 1364 */ 1365 static void arcmsr_stop_hbd_bgrb(struct AdapterControlBlock *acb) 1366 { 1367 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1368 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 1369 if(!arcmsr_hbd_wait_msgint_ready(acb)) { 1370 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); 1371 } 1372 } 1373 /* 1374 ************************************************************************ 1375 ************************************************************************ 1376 */ 1377 static void arcmsr_stop_hbe_bgrb(struct AdapterControlBlock *acb) 1378 { 1379 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1380 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 1381 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 1382 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell); 1383 if(!arcmsr_hbe_wait_msgint_ready(acb)) { 1384 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); 1385 } 1386 } 1387 /* 1388 ************************************************************************ 1389 ************************************************************************ 1390 */ 1391 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 1392 { 1393 switch (acb->adapter_type) { 1394 case ACB_ADAPTER_TYPE_A: { 1395 arcmsr_stop_hba_bgrb(acb); 1396 } 1397 break; 1398 case ACB_ADAPTER_TYPE_B: { 1399 arcmsr_stop_hbb_bgrb(acb); 1400 } 1401 break; 1402 case ACB_ADAPTER_TYPE_C: { 1403 arcmsr_stop_hbc_bgrb(acb); 1404 } 1405 break; 1406 case ACB_ADAPTER_TYPE_D: { 1407 arcmsr_stop_hbd_bgrb(acb); 1408 } 1409 break; 1410 case ACB_ADAPTER_TYPE_E: { 1411 arcmsr_stop_hbe_bgrb(acb); 1412 } 1413 break; 1414 } 1415 } 1416 /* 1417 ************************************************************************ 1418 ************************************************************************ 1419 */ 1420 static void arcmsr_poll(struct cam_sim *psim) 1421 { 1422 struct AdapterControlBlock *acb; 1423 int mutex; 1424 1425 acb = (struct AdapterControlBlock *)cam_sim_softc(psim); 1426 mutex = lockstatus(&acb->isr_lock, curthread); 1427 if( mutex == 0 ) 1428 ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); 1429 arcmsr_interrupt(acb); 1430 if( mutex == 0 ) 1431 ARCMSR_LOCK_RELEASE(&acb->isr_lock); 1432 } 1433 /* 1434 ************************************************************************** 1435 ************************************************************************** 1436 */ 1437 static u_int32_t arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock *acb, 1438 struct QBUFFER *prbuffer) { 1439 1440 u_int8_t *pQbuffer; 1441 u_int8_t *buf1 = NULL; 1442 u_int32_t *iop_data, *buf2 = NULL; 1443 u_int32_t iop_len, data_len; 1444 1445 iop_data = (u_int32_t *)prbuffer->data; 1446 iop_len = (u_int32_t)prbuffer->data_len; 1447 if ( iop_len > 0 ) 1448 { 1449 buf1 = kmalloc(128, M_DEVBUF, M_NOWAIT | M_ZERO); 1450 buf2 = (u_int32_t *)buf1; 1451 if( buf1 == NULL) 1452 return (0); 1453 data_len = iop_len; 1454 while(data_len >= 4) 1455 { 1456 *buf2++ = *iop_data++; 1457 data_len -= 4; 1458 } 1459 if(data_len) 1460 *buf2 = *iop_data; 1461 buf2 = (u_int32_t *)buf1; 1462 } 1463 while (iop_len > 0) { 1464 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; 1465 *pQbuffer = *buf1; 1466 acb->rqbuf_lastindex++; 1467 /* if last, index number set it to 0 */ 1468 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1469 buf1++; 1470 iop_len--; 1471 } 1472 if(buf2) 1473 kfree( (u_int8_t *)buf2, M_DEVBUF); 1474 /* let IOP know data has been read */ 1475 arcmsr_iop_message_read(acb); 1476 return (1); 1477 } 1478 /* 1479 ************************************************************************** 1480 ************************************************************************** 1481 */ 1482 static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, 1483 struct QBUFFER *prbuffer) { 1484 1485 u_int8_t *pQbuffer; 1486 u_int8_t *iop_data; 1487 u_int32_t iop_len; 1488 1489 if(acb->adapter_type >= ACB_ADAPTER_TYPE_B) { 1490 return(arcmsr_Read_iop_rqbuffer_data_D(acb, prbuffer)); 1491 } 1492 iop_data = (u_int8_t *)prbuffer->data; 1493 iop_len = (u_int32_t)prbuffer->data_len; 1494 while (iop_len > 0) { 1495 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; 1496 *pQbuffer = *iop_data; 1497 acb->rqbuf_lastindex++; 1498 /* if last, index number set it to 0 */ 1499 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1500 iop_data++; 1501 iop_len--; 1502 } 1503 /* let IOP know data has been read */ 1504 arcmsr_iop_message_read(acb); 1505 return (1); 1506 } 1507 /* 1508 ************************************************************************** 1509 ************************************************************************** 1510 */ 1511 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) 1512 { 1513 struct QBUFFER *prbuffer; 1514 int my_empty_len; 1515 1516 /*check this iop data if overflow my rqbuffer*/ 1517 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1518 prbuffer = arcmsr_get_iop_rqbuffer(acb); 1519 my_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) & 1520 (ARCMSR_MAX_QBUFFER-1); 1521 if(my_empty_len >= prbuffer->data_len) { 1522 if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) 1523 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 1524 } else { 1525 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 1526 } 1527 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1528 } 1529 /* 1530 ********************************************************************** 1531 ********************************************************************** 1532 */ 1533 static void arcmsr_Write_data_2iop_wqbuffer_D(struct AdapterControlBlock *acb) 1534 { 1535 u_int8_t *pQbuffer; 1536 struct QBUFFER *pwbuffer; 1537 u_int8_t *buf1 = NULL; 1538 u_int32_t *iop_data, *buf2 = NULL; 1539 u_int32_t allxfer_len = 0, data_len; 1540 1541 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { 1542 buf1 = kmalloc(128, M_DEVBUF, M_NOWAIT | M_ZERO); 1543 buf2 = (u_int32_t *)buf1; 1544 if( buf1 == NULL) 1545 return; 1546 1547 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); 1548 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 1549 iop_data = (u_int32_t *)pwbuffer->data; 1550 while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) 1551 && (allxfer_len < 124)) { 1552 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; 1553 *buf1 = *pQbuffer; 1554 acb->wqbuf_firstindex++; 1555 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1556 buf1++; 1557 allxfer_len++; 1558 } 1559 pwbuffer->data_len = allxfer_len; 1560 data_len = allxfer_len; 1561 buf1 = (u_int8_t *)buf2; 1562 while(data_len >= 4) 1563 { 1564 *iop_data++ = *buf2++; 1565 data_len -= 4; 1566 } 1567 if(data_len) 1568 *iop_data = *buf2; 1569 kfree( buf1, M_DEVBUF); 1570 arcmsr_iop_message_wrote(acb); 1571 } 1572 } 1573 /* 1574 ********************************************************************** 1575 ********************************************************************** 1576 */ 1577 static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb) 1578 { 1579 u_int8_t *pQbuffer; 1580 struct QBUFFER *pwbuffer; 1581 u_int8_t *iop_data; 1582 int32_t allxfer_len=0; 1583 1584 if(acb->adapter_type >= ACB_ADAPTER_TYPE_B) { 1585 arcmsr_Write_data_2iop_wqbuffer_D(acb); 1586 return; 1587 } 1588 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { 1589 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); 1590 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 1591 iop_data = (u_int8_t *)pwbuffer->data; 1592 while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) 1593 && (allxfer_len < 124)) { 1594 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; 1595 *iop_data = *pQbuffer; 1596 acb->wqbuf_firstindex++; 1597 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1598 iop_data++; 1599 allxfer_len++; 1600 } 1601 pwbuffer->data_len = allxfer_len; 1602 arcmsr_iop_message_wrote(acb); 1603 } 1604 } 1605 /* 1606 ************************************************************************** 1607 ************************************************************************** 1608 */ 1609 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) 1610 { 1611 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1612 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ; 1613 /* 1614 ***************************************************************** 1615 ** check if there are any mail packages from user space program 1616 ** in my post bag, now is the time to send them into Areca's firmware 1617 ***************************************************************** 1618 */ 1619 if(acb->wqbuf_firstindex != acb->wqbuf_lastindex) { 1620 arcmsr_Write_data_2iop_wqbuffer(acb); 1621 } 1622 if(acb->wqbuf_firstindex == acb->wqbuf_lastindex) { 1623 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 1624 } 1625 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1626 } 1627 /* 1628 ************************************************************************** 1629 ************************************************************************** 1630 */ 1631 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb) 1632 { 1633 /* 1634 if (ccb->ccb_h.status != CAM_REQ_CMP) 1635 kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x," 1636 "failure status=%x\n", ccb->ccb_h.target_id, 1637 ccb->ccb_h.target_lun, ccb->ccb_h.status); 1638 else 1639 kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n"); 1640 */ 1641 xpt_free_path(ccb->ccb_h.path); 1642 xpt_free_ccb(ccb); 1643 } 1644 1645 static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun) 1646 { 1647 struct cam_path *path; 1648 union ccb *ccb; 1649 1650 if ((ccb = (union ccb *)xpt_alloc_ccb()) == NULL) 1651 return; 1652 if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP) 1653 { 1654 xpt_free_ccb(ccb); 1655 return; 1656 } 1657 /* kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */ 1658 bzero(ccb, sizeof(union ccb)); 1659 xpt_setup_ccb(&ccb->ccb_h, path, 5); 1660 ccb->ccb_h.func_code = XPT_SCAN_LUN; 1661 ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb; 1662 ccb->crcn.flags = CAM_FLAG_NONE; 1663 xpt_action(ccb); 1664 } 1665 1666 1667 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun) 1668 { 1669 struct CommandControlBlock *srb; 1670 u_int32_t intmask_org; 1671 int i; 1672 1673 /* disable all outbound interrupts */ 1674 intmask_org = arcmsr_disable_allintr(acb); 1675 for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++) 1676 { 1677 srb = acb->psrb_pool[i]; 1678 if (srb->srb_state == ARCMSR_SRB_START) 1679 { 1680 if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun)) 1681 { 1682 srb->srb_state = ARCMSR_SRB_ABORTED; 1683 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 1684 arcmsr_srb_complete(srb, 1); 1685 kprintf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb); 1686 } 1687 } 1688 } 1689 /* enable outbound Post Queue, outbound doorbell Interrupt */ 1690 arcmsr_enable_allintr(acb, intmask_org); 1691 } 1692 /* 1693 ************************************************************************** 1694 ************************************************************************** 1695 */ 1696 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) { 1697 u_int32_t devicemap; 1698 u_int32_t target, lun; 1699 u_int32_t deviceMapCurrent[4]={0}; 1700 u_int8_t *pDevMap; 1701 1702 switch (acb->adapter_type) { 1703 case ACB_ADAPTER_TYPE_A: 1704 devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 1705 for (target = 0; target < 4; target++) 1706 { 1707 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); 1708 devicemap += 4; 1709 } 1710 break; 1711 1712 case ACB_ADAPTER_TYPE_B: 1713 devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 1714 for (target = 0; target < 4; target++) 1715 { 1716 deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap); 1717 devicemap += 4; 1718 } 1719 break; 1720 1721 case ACB_ADAPTER_TYPE_C: 1722 devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 1723 for (target = 0; target < 4; target++) 1724 { 1725 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); 1726 devicemap += 4; 1727 } 1728 break; 1729 case ACB_ADAPTER_TYPE_D: 1730 devicemap = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 1731 for (target = 0; target < 4; target++) 1732 { 1733 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); 1734 devicemap += 4; 1735 } 1736 break; 1737 case ACB_ADAPTER_TYPE_E: 1738 devicemap = offsetof(struct HBE_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 1739 for (target = 0; target < 4; target++) 1740 { 1741 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); 1742 devicemap += 4; 1743 } 1744 break; 1745 } 1746 1747 if(acb->acb_flags & ACB_F_BUS_HANG_ON) 1748 { 1749 acb->acb_flags &= ~ACB_F_BUS_HANG_ON; 1750 } 1751 /* 1752 ** adapter posted CONFIG message 1753 ** copy the new map, note if there are differences with the current map 1754 */ 1755 pDevMap = (u_int8_t *)&deviceMapCurrent[0]; 1756 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) 1757 { 1758 if (*pDevMap != acb->device_map[target]) 1759 { 1760 u_int8_t difference, bit_check; 1761 1762 difference = *pDevMap ^ acb->device_map[target]; 1763 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++) 1764 { 1765 bit_check = (1 << lun); /*check bit from 0....31*/ 1766 if(difference & bit_check) 1767 { 1768 if(acb->device_map[target] & bit_check) 1769 {/* unit departed */ 1770 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun); 1771 arcmsr_abort_dr_ccbs(acb, target, lun); 1772 arcmsr_rescan_lun(acb, target, lun); 1773 acb->devstate[target][lun] = ARECA_RAID_GONE; 1774 } 1775 else 1776 {/* unit arrived */ 1777 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun); 1778 arcmsr_rescan_lun(acb, target, lun); 1779 acb->devstate[target][lun] = ARECA_RAID_GOOD; 1780 } 1781 } 1782 } 1783 /* kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */ 1784 acb->device_map[target] = *pDevMap; 1785 } 1786 pDevMap++; 1787 } 1788 } 1789 /* 1790 ************************************************************************** 1791 ************************************************************************** 1792 */ 1793 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) { 1794 u_int32_t outbound_message; 1795 1796 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); 1797 outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]); 1798 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) 1799 arcmsr_dr_handle( acb ); 1800 } 1801 /* 1802 ************************************************************************** 1803 ************************************************************************** 1804 */ 1805 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) { 1806 u_int32_t outbound_message; 1807 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 1808 1809 /* clear interrupts */ 1810 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN); 1811 outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]); 1812 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) 1813 arcmsr_dr_handle( acb ); 1814 } 1815 /* 1816 ************************************************************************** 1817 ************************************************************************** 1818 */ 1819 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) { 1820 u_int32_t outbound_message; 1821 1822 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR); 1823 outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]); 1824 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) 1825 arcmsr_dr_handle( acb ); 1826 } 1827 /* 1828 ************************************************************************** 1829 ************************************************************************** 1830 */ 1831 static void arcmsr_hbd_message_isr(struct AdapterControlBlock *acb) { 1832 u_int32_t outbound_message; 1833 1834 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR); 1835 outbound_message = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[0]); 1836 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) 1837 arcmsr_dr_handle( acb ); 1838 } 1839 /* 1840 ************************************************************************** 1841 ************************************************************************** 1842 */ 1843 static void arcmsr_hbe_message_isr(struct AdapterControlBlock *acb) { 1844 u_int32_t outbound_message; 1845 1846 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_status, 0); 1847 outbound_message = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[0]); 1848 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) 1849 arcmsr_dr_handle( acb ); 1850 } 1851 /* 1852 ************************************************************************** 1853 ************************************************************************** 1854 */ 1855 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) 1856 { 1857 u_int32_t doorbell_status; 1858 1859 /* 1860 ******************************************************************* 1861 ** Maybe here we need to check wrqbuffer_lock is lock or not 1862 ** DOORBELL: din! don! 1863 ** check if there are any mail need to pack from firmware 1864 ******************************************************************* 1865 */ 1866 doorbell_status = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); 1867 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ 1868 if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { 1869 arcmsr_iop2drv_data_wrote_handle(acb); 1870 } 1871 if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { 1872 arcmsr_iop2drv_data_read_handle(acb); 1873 } 1874 } 1875 /* 1876 ************************************************************************** 1877 ************************************************************************** 1878 */ 1879 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb) 1880 { 1881 u_int32_t doorbell_status; 1882 1883 /* 1884 ******************************************************************* 1885 ** Maybe here we need to check wrqbuffer_lock is lock or not 1886 ** DOORBELL: din! don! 1887 ** check if there are any mail need to pack from firmware 1888 ******************************************************************* 1889 */ 1890 doorbell_status = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); 1891 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, doorbell_status); /* clear doorbell interrupt */ 1892 if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { 1893 arcmsr_iop2drv_data_wrote_handle(acb); 1894 } 1895 if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { 1896 arcmsr_iop2drv_data_read_handle(acb); 1897 } 1898 if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 1899 arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */ 1900 } 1901 } 1902 /* 1903 ************************************************************************** 1904 ************************************************************************** 1905 */ 1906 static void arcmsr_hbd_doorbell_isr(struct AdapterControlBlock *acb) 1907 { 1908 u_int32_t doorbell_status; 1909 1910 /* 1911 ******************************************************************* 1912 ** Maybe here we need to check wrqbuffer_lock is lock or not 1913 ** DOORBELL: din! don! 1914 ** check if there are any mail need to pack from firmware 1915 ******************************************************************* 1916 */ 1917 doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE; 1918 if(doorbell_status) 1919 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ 1920 while( doorbell_status & ARCMSR_HBDMU_F0_DOORBELL_CAUSE ) { 1921 if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK) { 1922 arcmsr_iop2drv_data_wrote_handle(acb); 1923 } 1924 if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK) { 1925 arcmsr_iop2drv_data_read_handle(acb); 1926 } 1927 if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) { 1928 arcmsr_hbd_message_isr(acb); /* messenger of "driver to iop commands" */ 1929 } 1930 doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE; 1931 if(doorbell_status) 1932 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ 1933 } 1934 } 1935 /* 1936 ************************************************************************** 1937 ************************************************************************** 1938 */ 1939 static void arcmsr_hbe_doorbell_isr(struct AdapterControlBlock *acb) 1940 { 1941 u_int32_t doorbell_status, in_doorbell; 1942 1943 /* 1944 ******************************************************************* 1945 ** Maybe here we need to check wrqbuffer_lock is lock or not 1946 ** DOORBELL: din! don! 1947 ** check if there are any mail need to pack from firmware 1948 ******************************************************************* 1949 */ 1950 in_doorbell = CHIP_REG_READ32(HBE_MessageUnit, 0, iobound_doorbell); 1951 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_status, 0); /* clear doorbell interrupt */ 1952 doorbell_status = in_doorbell ^ acb->in_doorbell; 1953 if(doorbell_status & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) { 1954 arcmsr_iop2drv_data_wrote_handle(acb); 1955 } 1956 if(doorbell_status & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) { 1957 arcmsr_iop2drv_data_read_handle(acb); 1958 } 1959 if(doorbell_status & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { 1960 arcmsr_hbe_message_isr(acb); /* messenger of "driver to iop commands" */ 1961 } 1962 acb->in_doorbell = in_doorbell; 1963 } 1964 /* 1965 ************************************************************************** 1966 ************************************************************************** 1967 */ 1968 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) 1969 { 1970 u_int32_t flag_srb; 1971 u_int16_t error; 1972 1973 /* 1974 ***************************************************************************** 1975 ** areca cdb command done 1976 ***************************************************************************** 1977 */ 1978 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 1979 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1980 while((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 1981 0, outbound_queueport)) != 0xFFFFFFFF) { 1982 /* check if command done with no error*/ 1983 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0) ? TRUE : FALSE; 1984 arcmsr_drain_donequeue(acb, flag_srb, error); 1985 } /*drain reply FIFO*/ 1986 } 1987 /* 1988 ************************************************************************** 1989 ************************************************************************** 1990 */ 1991 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) 1992 { 1993 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 1994 u_int32_t flag_srb; 1995 int index; 1996 u_int16_t error; 1997 1998 /* 1999 ***************************************************************************** 2000 ** areca cdb command done 2001 ***************************************************************************** 2002 */ 2003 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 2004 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2005 index = phbbmu->doneq_index; 2006 while((flag_srb = phbbmu->done_qbuffer[index]) != 0) { 2007 phbbmu->done_qbuffer[index] = 0; 2008 index++; 2009 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ 2010 phbbmu->doneq_index = index; 2011 /* check if command done with no error*/ 2012 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 2013 arcmsr_drain_donequeue(acb, flag_srb, error); 2014 } /*drain reply FIFO*/ 2015 } 2016 /* 2017 ************************************************************************** 2018 ************************************************************************** 2019 */ 2020 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) 2021 { 2022 u_int32_t flag_srb,throttling = 0; 2023 u_int16_t error; 2024 2025 /* 2026 ***************************************************************************** 2027 ** areca cdb command done 2028 ***************************************************************************** 2029 */ 2030 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2031 do { 2032 flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); 2033 if (flag_srb == 0xFFFFFFFF) 2034 break; 2035 /* check if command done with no error*/ 2036 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; 2037 arcmsr_drain_donequeue(acb, flag_srb, error); 2038 throttling++; 2039 if(throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { 2040 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING); 2041 throttling = 0; 2042 } 2043 } while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR); 2044 } 2045 /* 2046 ********************************************************************** 2047 ** 2048 ********************************************************************** 2049 */ 2050 static uint16_t arcmsr_get_doneq_index(struct HBD_MessageUnit0 *phbdmu) 2051 { 2052 uint16_t doneq_index, index_stripped; 2053 2054 doneq_index = phbdmu->doneq_index; 2055 if (doneq_index & 0x4000) { 2056 index_stripped = doneq_index & 0xFF; 2057 index_stripped += 1; 2058 index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; 2059 phbdmu->doneq_index = index_stripped ? 2060 (index_stripped | 0x4000) : index_stripped; 2061 } else { 2062 index_stripped = doneq_index; 2063 index_stripped += 1; 2064 index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; 2065 phbdmu->doneq_index = index_stripped ? 2066 index_stripped : (index_stripped | 0x4000); 2067 } 2068 return (phbdmu->doneq_index); 2069 } 2070 /* 2071 ************************************************************************** 2072 ************************************************************************** 2073 */ 2074 static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb) 2075 { 2076 struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; 2077 u_int32_t outbound_write_pointer; 2078 u_int32_t addressLow; 2079 uint16_t doneq_index; 2080 u_int16_t error; 2081 /* 2082 ***************************************************************************** 2083 ** areca cdb command done 2084 ***************************************************************************** 2085 */ 2086 if((CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause) & 2087 ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT) == 0) 2088 return; 2089 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 2090 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2091 outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; 2092 doneq_index = phbdmu->doneq_index; 2093 while ((doneq_index & 0xFF) != (outbound_write_pointer & 0xFF)) { 2094 doneq_index = arcmsr_get_doneq_index(phbdmu); 2095 addressLow = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow; 2096 error = (addressLow & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; 2097 arcmsr_drain_donequeue(acb, addressLow, error); /*Check if command done with no error */ 2098 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index); 2099 outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; 2100 } 2101 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_interrupt_cause, ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT_CLEAR); 2102 CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause); /*Dummy ioread32 to force pci flush */ 2103 } 2104 /* 2105 ************************************************************************** 2106 ************************************************************************** 2107 */ 2108 static void arcmsr_hbe_postqueue_isr(struct AdapterControlBlock *acb) 2109 { 2110 u_int16_t error; 2111 uint32_t doneq_index; 2112 uint16_t cmdSMID; 2113 2114 /* 2115 ***************************************************************************** 2116 ** areca cdb command done 2117 ***************************************************************************** 2118 */ 2119 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2120 doneq_index = acb->doneq_index; 2121 while ((CHIP_REG_READ32(HBE_MessageUnit, 0, reply_post_producer_index) & 0xFFFF) != doneq_index) { 2122 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; 2123 error = (acb->pCompletionQ[doneq_index].cmdFlag & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; 2124 arcmsr_drain_donequeue(acb, (u_int32_t)cmdSMID, error); 2125 doneq_index++; 2126 if (doneq_index >= acb->completionQ_entry) 2127 doneq_index = 0; 2128 } 2129 acb->doneq_index = doneq_index; 2130 CHIP_REG_WRITE32(HBE_MessageUnit, 0, reply_post_consumer_index, doneq_index); 2131 } 2132 /* 2133 ********************************************************************** 2134 ********************************************************************** 2135 */ 2136 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb) 2137 { 2138 u_int32_t outbound_intStatus; 2139 /* 2140 ********************************************* 2141 ** check outbound intstatus 2142 ********************************************* 2143 */ 2144 outbound_intStatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; 2145 if(!outbound_intStatus) { 2146 /*it must be share irq*/ 2147 return; 2148 } 2149 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus); /*clear interrupt*/ 2150 /* MU doorbell interrupts*/ 2151 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { 2152 arcmsr_hba_doorbell_isr(acb); 2153 } 2154 /* MU post queue interrupts*/ 2155 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 2156 arcmsr_hba_postqueue_isr(acb); 2157 } 2158 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 2159 arcmsr_hba_message_isr(acb); 2160 } 2161 } 2162 /* 2163 ********************************************************************** 2164 ********************************************************************** 2165 */ 2166 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb) 2167 { 2168 u_int32_t outbound_doorbell; 2169 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 2170 /* 2171 ********************************************* 2172 ** check outbound intstatus 2173 ********************************************* 2174 */ 2175 outbound_doorbell = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & acb->outbound_int_enable; 2176 if(!outbound_doorbell) { 2177 /*it must be share irq*/ 2178 return; 2179 } 2180 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */ 2181 READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell); 2182 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); 2183 /* MU ioctl transfer doorbell interrupts*/ 2184 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 2185 arcmsr_iop2drv_data_wrote_handle(acb); 2186 } 2187 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { 2188 arcmsr_iop2drv_data_read_handle(acb); 2189 } 2190 /* MU post queue interrupts*/ 2191 if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { 2192 arcmsr_hbb_postqueue_isr(acb); 2193 } 2194 if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 2195 arcmsr_hbb_message_isr(acb); 2196 } 2197 } 2198 /* 2199 ********************************************************************** 2200 ********************************************************************** 2201 */ 2202 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb) 2203 { 2204 u_int32_t host_interrupt_status; 2205 /* 2206 ********************************************* 2207 ** check outbound intstatus 2208 ********************************************* 2209 */ 2210 host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & 2211 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | 2212 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR); 2213 if(!host_interrupt_status) { 2214 /*it must be share irq*/ 2215 return; 2216 } 2217 do { 2218 /* MU doorbell interrupts*/ 2219 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { 2220 arcmsr_hbc_doorbell_isr(acb); 2221 } 2222 /* MU post queue interrupts*/ 2223 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { 2224 arcmsr_hbc_postqueue_isr(acb); 2225 } 2226 host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status); 2227 } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); 2228 } 2229 /* 2230 ********************************************************************** 2231 ********************************************************************** 2232 */ 2233 static void arcmsr_handle_hbd_isr( struct AdapterControlBlock *acb) 2234 { 2235 u_int32_t host_interrupt_status; 2236 u_int32_t intmask_org; 2237 /* 2238 ********************************************* 2239 ** check outbound intstatus 2240 ********************************************* 2241 */ 2242 host_interrupt_status = CHIP_REG_READ32(HBD_MessageUnit, 0, host_int_status) & acb->outbound_int_enable; 2243 if(!(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_INT)) { 2244 /*it must be share irq*/ 2245 return; 2246 } 2247 /* disable outbound interrupt */ 2248 intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */ 2249 CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE); 2250 /* MU doorbell interrupts*/ 2251 if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_DOORBELL_INT) { 2252 arcmsr_hbd_doorbell_isr(acb); 2253 } 2254 /* MU post queue interrupts*/ 2255 if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_POSTQUEUE_INT) { 2256 arcmsr_hbd_postqueue_isr(acb); 2257 } 2258 /* enable all outbound interrupt */ 2259 CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | ARCMSR_HBDMU_ALL_INT_ENABLE); 2260 // CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable); 2261 } 2262 /* 2263 ********************************************************************** 2264 ********************************************************************** 2265 */ 2266 static void arcmsr_handle_hbe_isr( struct AdapterControlBlock *acb) 2267 { 2268 u_int32_t host_interrupt_status; 2269 /* 2270 ********************************************* 2271 ** check outbound intstatus 2272 ********************************************* 2273 */ 2274 host_interrupt_status = CHIP_REG_READ32(HBE_MessageUnit, 0, host_int_status) & 2275 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | 2276 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR); 2277 if(!host_interrupt_status) { 2278 /*it must be share irq*/ 2279 return; 2280 } 2281 do { 2282 /* MU doorbell interrupts*/ 2283 if(host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) { 2284 arcmsr_hbe_doorbell_isr(acb); 2285 } 2286 /* MU post queue interrupts*/ 2287 if(host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) { 2288 arcmsr_hbe_postqueue_isr(acb); 2289 } 2290 host_interrupt_status = CHIP_REG_READ32(HBE_MessageUnit, 0, host_int_status); 2291 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)); 2292 } 2293 /* 2294 ****************************************************************************** 2295 ****************************************************************************** 2296 */ 2297 static void arcmsr_interrupt(struct AdapterControlBlock *acb) 2298 { 2299 switch (acb->adapter_type) { 2300 case ACB_ADAPTER_TYPE_A: 2301 arcmsr_handle_hba_isr(acb); 2302 break; 2303 case ACB_ADAPTER_TYPE_B: 2304 arcmsr_handle_hbb_isr(acb); 2305 break; 2306 case ACB_ADAPTER_TYPE_C: 2307 arcmsr_handle_hbc_isr(acb); 2308 break; 2309 case ACB_ADAPTER_TYPE_D: 2310 arcmsr_handle_hbd_isr(acb); 2311 break; 2312 case ACB_ADAPTER_TYPE_E: 2313 arcmsr_handle_hbe_isr(acb); 2314 break; 2315 default: 2316 kprintf("arcmsr%d: interrupt service," 2317 " unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type); 2318 break; 2319 } 2320 } 2321 /* 2322 ********************************************************************** 2323 ********************************************************************** 2324 */ 2325 static void arcmsr_intr_handler(void *arg) 2326 { 2327 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; 2328 2329 arcmsr_interrupt(acb); 2330 } 2331 /* 2332 ****************************************************************************** 2333 ****************************************************************************** 2334 */ 2335 static void arcmsr_polling_devmap(void *arg) 2336 { 2337 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; 2338 switch (acb->adapter_type) { 2339 case ACB_ADAPTER_TYPE_A: 2340 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 2341 break; 2342 2343 case ACB_ADAPTER_TYPE_B: { 2344 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 2345 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); 2346 } 2347 break; 2348 2349 case ACB_ADAPTER_TYPE_C: 2350 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 2351 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 2352 break; 2353 2354 case ACB_ADAPTER_TYPE_D: 2355 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 2356 break; 2357 2358 case ACB_ADAPTER_TYPE_E: 2359 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 2360 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 2361 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell); 2362 break; 2363 } 2364 2365 if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0) 2366 { 2367 callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */ 2368 } 2369 } 2370 2371 /* 2372 ******************************************************************************* 2373 ** 2374 ******************************************************************************* 2375 */ 2376 static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 2377 { 2378 u_int32_t intmask_org; 2379 2380 if(acb != NULL) { 2381 /* stop adapter background rebuild */ 2382 if(acb->acb_flags & ACB_F_MSG_START_BGRB) { 2383 intmask_org = arcmsr_disable_allintr(acb); 2384 arcmsr_stop_adapter_bgrb(acb); 2385 arcmsr_flush_adapter_cache(acb); 2386 arcmsr_enable_allintr(acb, intmask_org); 2387 } 2388 } 2389 } 2390 /* 2391 *********************************************************************** 2392 ** 2393 ************************************************************************ 2394 */ 2395 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) 2396 { 2397 struct CMD_MESSAGE_FIELD *pcmdmessagefld; 2398 u_int32_t retvalue = EINVAL; 2399 2400 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) arg; 2401 if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { 2402 return retvalue; 2403 } 2404 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 2405 switch(ioctl_cmd) { 2406 case ARCMSR_MESSAGE_READ_RQBUFFER: { 2407 u_int8_t *pQbuffer; 2408 u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer; 2409 u_int32_t allxfer_len=0; 2410 2411 while((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 2412 && (allxfer_len < 1031)) { 2413 /*copy READ QBUFFER to srb*/ 2414 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 2415 *ptmpQbuffer = *pQbuffer; 2416 acb->rqbuf_firstindex++; 2417 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 2418 /*if last index number set it to 0 */ 2419 ptmpQbuffer++; 2420 allxfer_len++; 2421 } 2422 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2423 struct QBUFFER *prbuffer; 2424 2425 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2426 prbuffer = arcmsr_get_iop_rqbuffer(acb); 2427 if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) 2428 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 2429 } 2430 pcmdmessagefld->cmdmessage.Length = allxfer_len; 2431 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2432 retvalue = ARCMSR_MESSAGE_SUCCESS; 2433 } 2434 break; 2435 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 2436 u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 2437 u_int8_t *pQbuffer; 2438 u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer; 2439 2440 user_len = pcmdmessagefld->cmdmessage.Length; 2441 /*check if data xfer length of this request will overflow my array qbuffer */ 2442 wqbuf_lastindex = acb->wqbuf_lastindex; 2443 wqbuf_firstindex = acb->wqbuf_firstindex; 2444 if(wqbuf_lastindex != wqbuf_firstindex) { 2445 arcmsr_Write_data_2iop_wqbuffer(acb); 2446 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; 2447 } else { 2448 my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) & 2449 (ARCMSR_MAX_QBUFFER - 1); 2450 if(my_empty_len >= user_len) { 2451 while(user_len > 0) { 2452 /*copy srb data to wqbuffer*/ 2453 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; 2454 *pQbuffer = *ptmpuserbuffer; 2455 acb->wqbuf_lastindex++; 2456 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 2457 /*if last index number set it to 0 */ 2458 ptmpuserbuffer++; 2459 user_len--; 2460 } 2461 /*post fist Qbuffer*/ 2462 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 2463 acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 2464 arcmsr_Write_data_2iop_wqbuffer(acb); 2465 } 2466 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2467 } else { 2468 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; 2469 } 2470 } 2471 retvalue = ARCMSR_MESSAGE_SUCCESS; 2472 } 2473 break; 2474 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 2475 u_int8_t *pQbuffer = acb->rqbuffer; 2476 2477 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2478 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2479 arcmsr_iop_message_read(acb); 2480 /*signature, let IOP know data has been readed */ 2481 } 2482 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 2483 acb->rqbuf_firstindex = 0; 2484 acb->rqbuf_lastindex = 0; 2485 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 2486 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2487 retvalue = ARCMSR_MESSAGE_SUCCESS; 2488 } 2489 break; 2490 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: 2491 { 2492 u_int8_t *pQbuffer = acb->wqbuffer; 2493 2494 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2495 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2496 arcmsr_iop_message_read(acb); 2497 /*signature, let IOP know data has been readed */ 2498 } 2499 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); 2500 acb->wqbuf_firstindex = 0; 2501 acb->wqbuf_lastindex = 0; 2502 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 2503 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2504 retvalue = ARCMSR_MESSAGE_SUCCESS; 2505 } 2506 break; 2507 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 2508 u_int8_t *pQbuffer; 2509 2510 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2511 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2512 arcmsr_iop_message_read(acb); 2513 /*signature, let IOP know data has been readed */ 2514 } 2515 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED 2516 |ACB_F_MESSAGE_RQBUFFER_CLEARED 2517 |ACB_F_MESSAGE_WQBUFFER_READ); 2518 acb->rqbuf_firstindex = 0; 2519 acb->rqbuf_lastindex = 0; 2520 acb->wqbuf_firstindex = 0; 2521 acb->wqbuf_lastindex = 0; 2522 pQbuffer = acb->rqbuffer; 2523 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 2524 pQbuffer = acb->wqbuffer; 2525 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 2526 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2527 retvalue = ARCMSR_MESSAGE_SUCCESS; 2528 } 2529 break; 2530 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 2531 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 2532 retvalue = ARCMSR_MESSAGE_SUCCESS; 2533 } 2534 break; 2535 case ARCMSR_MESSAGE_SAY_HELLO: { 2536 u_int8_t *hello_string = "Hello! I am ARCMSR"; 2537 u_int8_t *puserbuffer = (u_int8_t *)pcmdmessagefld->messagedatabuffer; 2538 2539 if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { 2540 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; 2541 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 2542 return ENOIOCTL; 2543 } 2544 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2545 retvalue = ARCMSR_MESSAGE_SUCCESS; 2546 } 2547 break; 2548 case ARCMSR_MESSAGE_SAY_GOODBYE: { 2549 arcmsr_iop_parking(acb); 2550 retvalue = ARCMSR_MESSAGE_SUCCESS; 2551 } 2552 break; 2553 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { 2554 arcmsr_flush_adapter_cache(acb); 2555 retvalue = ARCMSR_MESSAGE_SUCCESS; 2556 } 2557 break; 2558 } 2559 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 2560 return (retvalue); 2561 } 2562 /* 2563 ************************************************************************** 2564 ************************************************************************** 2565 */ 2566 static void arcmsr_free_srb(struct CommandControlBlock *srb) 2567 { 2568 struct AdapterControlBlock *acb; 2569 2570 acb = srb->acb; 2571 ARCMSR_LOCK_ACQUIRE(&acb->srb_lock); 2572 srb->srb_state = ARCMSR_SRB_DONE; 2573 srb->srb_flags = 0; 2574 acb->srbworkingQ[acb->workingsrb_doneindex] = srb; 2575 acb->workingsrb_doneindex++; 2576 acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; 2577 ARCMSR_LOCK_RELEASE(&acb->srb_lock); 2578 } 2579 /* 2580 ************************************************************************** 2581 ************************************************************************** 2582 */ 2583 static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb) 2584 { 2585 struct CommandControlBlock *srb = NULL; 2586 u_int32_t workingsrb_startindex, workingsrb_doneindex; 2587 2588 ARCMSR_LOCK_ACQUIRE(&acb->srb_lock); 2589 workingsrb_doneindex = acb->workingsrb_doneindex; 2590 workingsrb_startindex = acb->workingsrb_startindex; 2591 srb = acb->srbworkingQ[workingsrb_startindex]; 2592 workingsrb_startindex++; 2593 workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; 2594 if(workingsrb_doneindex != workingsrb_startindex) { 2595 acb->workingsrb_startindex = workingsrb_startindex; 2596 } else { 2597 srb = NULL; 2598 } 2599 ARCMSR_LOCK_RELEASE(&acb->srb_lock); 2600 return(srb); 2601 } 2602 /* 2603 ************************************************************************** 2604 ************************************************************************** 2605 */ 2606 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb) 2607 { 2608 struct CMD_MESSAGE_FIELD *pcmdmessagefld; 2609 int retvalue = 0, transfer_len = 0; 2610 char *buffer; 2611 u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | 2612 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | 2613 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | 2614 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8]; 2615 /* 4 bytes: Areca io control code */ 2616 if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 2617 buffer = pccb->csio.data_ptr; 2618 transfer_len = pccb->csio.dxfer_len; 2619 } else { 2620 retvalue = ARCMSR_MESSAGE_FAIL; 2621 goto message_out; 2622 } 2623 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 2624 retvalue = ARCMSR_MESSAGE_FAIL; 2625 goto message_out; 2626 } 2627 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; 2628 switch(controlcode) { 2629 case ARCMSR_MESSAGE_READ_RQBUFFER: { 2630 u_int8_t *pQbuffer; 2631 u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer; 2632 int32_t allxfer_len = 0; 2633 2634 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 2635 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 2636 && (allxfer_len < 1031)) { 2637 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 2638 *ptmpQbuffer = *pQbuffer; 2639 acb->rqbuf_firstindex++; 2640 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 2641 ptmpQbuffer++; 2642 allxfer_len++; 2643 } 2644 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2645 struct QBUFFER *prbuffer; 2646 2647 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2648 prbuffer = arcmsr_get_iop_rqbuffer(acb); 2649 if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) 2650 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 2651 } 2652 pcmdmessagefld->cmdmessage.Length = allxfer_len; 2653 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2654 retvalue = ARCMSR_MESSAGE_SUCCESS; 2655 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 2656 } 2657 break; 2658 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 2659 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 2660 u_int8_t *pQbuffer; 2661 u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer; 2662 2663 user_len = pcmdmessagefld->cmdmessage.Length; 2664 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 2665 wqbuf_lastindex = acb->wqbuf_lastindex; 2666 wqbuf_firstindex = acb->wqbuf_firstindex; 2667 if (wqbuf_lastindex != wqbuf_firstindex) { 2668 arcmsr_Write_data_2iop_wqbuffer(acb); 2669 /* has error report sensedata */ 2670 if(pccb->csio.sense_len) { 2671 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 2672 /* Valid,ErrorCode */ 2673 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 2674 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 2675 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 2676 /* AdditionalSenseLength */ 2677 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 2678 /* AdditionalSenseCode */ 2679 } 2680 retvalue = ARCMSR_MESSAGE_FAIL; 2681 } else { 2682 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) 2683 &(ARCMSR_MAX_QBUFFER - 1); 2684 if (my_empty_len >= user_len) { 2685 while (user_len > 0) { 2686 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; 2687 *pQbuffer = *ptmpuserbuffer; 2688 acb->wqbuf_lastindex++; 2689 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 2690 ptmpuserbuffer++; 2691 user_len--; 2692 } 2693 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 2694 acb->acb_flags &= 2695 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 2696 arcmsr_Write_data_2iop_wqbuffer(acb); 2697 } 2698 } else { 2699 /* has error report sensedata */ 2700 if(pccb->csio.sense_len) { 2701 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 2702 /* Valid,ErrorCode */ 2703 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 2704 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 2705 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 2706 /* AdditionalSenseLength */ 2707 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 2708 /* AdditionalSenseCode */ 2709 } 2710 retvalue = ARCMSR_MESSAGE_FAIL; 2711 } 2712 } 2713 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 2714 } 2715 break; 2716 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 2717 u_int8_t *pQbuffer = acb->rqbuffer; 2718 2719 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 2720 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2721 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2722 arcmsr_iop_message_read(acb); 2723 } 2724 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 2725 acb->rqbuf_firstindex = 0; 2726 acb->rqbuf_lastindex = 0; 2727 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 2728 pcmdmessagefld->cmdmessage.ReturnCode = 2729 ARCMSR_MESSAGE_RETURNCODE_OK; 2730 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 2731 } 2732 break; 2733 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 2734 u_int8_t *pQbuffer = acb->wqbuffer; 2735 2736 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 2737 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2738 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2739 arcmsr_iop_message_read(acb); 2740 } 2741 acb->acb_flags |= 2742 (ACB_F_MESSAGE_WQBUFFER_CLEARED | 2743 ACB_F_MESSAGE_WQBUFFER_READ); 2744 acb->wqbuf_firstindex = 0; 2745 acb->wqbuf_lastindex = 0; 2746 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 2747 pcmdmessagefld->cmdmessage.ReturnCode = 2748 ARCMSR_MESSAGE_RETURNCODE_OK; 2749 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 2750 } 2751 break; 2752 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 2753 u_int8_t *pQbuffer; 2754 2755 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 2756 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2757 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2758 arcmsr_iop_message_read(acb); 2759 } 2760 acb->acb_flags |= 2761 (ACB_F_MESSAGE_WQBUFFER_CLEARED 2762 | ACB_F_MESSAGE_RQBUFFER_CLEARED 2763 | ACB_F_MESSAGE_WQBUFFER_READ); 2764 acb->rqbuf_firstindex = 0; 2765 acb->rqbuf_lastindex = 0; 2766 acb->wqbuf_firstindex = 0; 2767 acb->wqbuf_lastindex = 0; 2768 pQbuffer = acb->rqbuffer; 2769 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 2770 pQbuffer = acb->wqbuffer; 2771 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 2772 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2773 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 2774 } 2775 break; 2776 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 2777 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 2778 } 2779 break; 2780 case ARCMSR_MESSAGE_SAY_HELLO: { 2781 int8_t *hello_string = "Hello! I am ARCMSR"; 2782 2783 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 2784 , (int16_t)strlen(hello_string)); 2785 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2786 } 2787 break; 2788 case ARCMSR_MESSAGE_SAY_GOODBYE: 2789 arcmsr_iop_parking(acb); 2790 break; 2791 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 2792 arcmsr_flush_adapter_cache(acb); 2793 break; 2794 default: 2795 retvalue = ARCMSR_MESSAGE_FAIL; 2796 } 2797 message_out: 2798 return (retvalue); 2799 } 2800 /* 2801 ********************************************************************* 2802 ********************************************************************* 2803 */ 2804 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2805 { 2806 struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; 2807 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)srb->acb; 2808 union ccb *pccb; 2809 int target, lun; 2810 2811 pccb = srb->pccb; 2812 target = pccb->ccb_h.target_id; 2813 lun = pccb->ccb_h.target_lun; 2814 acb->pktRequestCount++; 2815 if(error != 0) { 2816 if(error != EFBIG) { 2817 kprintf("arcmsr%d: unexpected error %x" 2818 " returned from 'bus_dmamap_load' \n" 2819 , acb->pci_unit, error); 2820 } 2821 if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 2822 pccb->ccb_h.status |= CAM_REQ_TOO_BIG; 2823 } 2824 arcmsr_srb_complete(srb, 0); 2825 return; 2826 } 2827 if(nseg > ARCMSR_MAX_SG_ENTRIES) { 2828 pccb->ccb_h.status |= CAM_REQ_TOO_BIG; 2829 arcmsr_srb_complete(srb, 0); 2830 return; 2831 } 2832 if(acb->acb_flags & ACB_F_BUS_RESET) { 2833 kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); 2834 pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; 2835 arcmsr_srb_complete(srb, 0); 2836 return; 2837 } 2838 if(acb->devstate[target][lun] == ARECA_RAID_GONE) { 2839 u_int8_t block_cmd, cmd; 2840 2841 cmd = pccb->csio.cdb_io.cdb_bytes[0]; 2842 block_cmd = cmd & 0x0f; 2843 if(block_cmd == 0x08 || block_cmd == 0x0a) { 2844 kprintf("arcmsr%d:block 'read/write' command " 2845 "with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n" 2846 , acb->pci_unit, cmd, target, lun); 2847 pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 2848 arcmsr_srb_complete(srb, 0); 2849 return; 2850 } 2851 } 2852 if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2853 if(nseg != 0) { 2854 ARCMSR_LOCK_ACQUIRE(&acb->io_lock); 2855 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 2856 ARCMSR_LOCK_RELEASE(&acb->io_lock); 2857 } 2858 arcmsr_srb_complete(srb, 0); 2859 return; 2860 } 2861 if(acb->srboutstandingcount >= acb->maxOutstanding) { 2862 if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) == 0) 2863 { 2864 xpt_freeze_simq(acb->psim, 1); 2865 acb->acb_flags |= ACB_F_CAM_DEV_QFRZN; 2866 } 2867 pccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2868 pccb->ccb_h.status |= CAM_REQUEUE_REQ; 2869 arcmsr_srb_complete(srb, 0); 2870 return; 2871 } 2872 pccb->ccb_h.status |= CAM_SIM_QUEUED; 2873 arcmsr_build_srb(srb, dm_segs, nseg); 2874 arcmsr_post_srb(acb, srb); 2875 if (pccb->ccb_h.timeout != CAM_TIME_INFINITY) 2876 { 2877 callout_init_lk(&srb->ccb_callout, &srb->acb->isr_lock); 2878 callout_reset(&srb->ccb_callout, ((pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)) * hz) / 1000, arcmsr_srb_timeout, srb); 2879 srb->srb_flags |= SRB_FLAG_TIMER_START; 2880 } 2881 } 2882 /* 2883 ***************************************************************************************** 2884 ***************************************************************************************** 2885 */ 2886 static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb) 2887 { 2888 struct CommandControlBlock *srb; 2889 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; 2890 u_int32_t intmask_org; 2891 int i = 0; 2892 2893 acb->num_aborts++; 2894 /* 2895 *************************************************************************** 2896 ** It is the upper layer do abort command this lock just prior to calling us. 2897 ** First determine if we currently own this command. 2898 ** Start by searching the device queue. If not found 2899 ** at all, and the system wanted us to just abort the 2900 ** command return success. 2901 *************************************************************************** 2902 */ 2903 if(acb->srboutstandingcount != 0) { 2904 /* disable all outbound interrupt */ 2905 intmask_org = arcmsr_disable_allintr(acb); 2906 for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { 2907 srb = acb->psrb_pool[i]; 2908 if(srb->srb_state == ARCMSR_SRB_START) { 2909 if(srb->pccb == abortccb) { 2910 srb->srb_state = ARCMSR_SRB_ABORTED; 2911 kprintf("arcmsr%d:scsi id=%d lun=%jx abort srb '%p'" 2912 "outstanding command \n" 2913 , acb->pci_unit, abortccb->ccb_h.target_id 2914 , (uintmax_t)abortccb->ccb_h.target_lun, srb); 2915 arcmsr_polling_srbdone(acb, srb); 2916 /* enable outbound Post Queue, outbound doorbell Interrupt */ 2917 arcmsr_enable_allintr(acb, intmask_org); 2918 return (TRUE); 2919 } 2920 } 2921 } 2922 /* enable outbound Post Queue, outbound doorbell Interrupt */ 2923 arcmsr_enable_allintr(acb, intmask_org); 2924 } 2925 return(FALSE); 2926 } 2927 /* 2928 **************************************************************************** 2929 **************************************************************************** 2930 */ 2931 static void arcmsr_bus_reset(struct AdapterControlBlock *acb) 2932 { 2933 int retry = 0; 2934 2935 acb->num_resets++; 2936 acb->acb_flags |= ACB_F_BUS_RESET; 2937 while(acb->srboutstandingcount != 0 && retry < 400) { 2938 arcmsr_interrupt(acb); 2939 UDELAY(25000); 2940 retry++; 2941 } 2942 arcmsr_iop_reset(acb); 2943 acb->acb_flags &= ~ACB_F_BUS_RESET; 2944 } 2945 /* 2946 ************************************************************************** 2947 ************************************************************************** 2948 */ 2949 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, 2950 union ccb *pccb) 2951 { 2952 if (pccb->ccb_h.target_lun) { 2953 pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 2954 xpt_done(pccb); 2955 return; 2956 } 2957 pccb->ccb_h.status |= CAM_REQ_CMP; 2958 switch (pccb->csio.cdb_io.cdb_bytes[0]) { 2959 case INQUIRY: { 2960 unsigned char inqdata[36]; 2961 char *buffer = pccb->csio.data_ptr; 2962 2963 inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */ 2964 inqdata[1] = 0; /* rem media bit & Dev Type Modifier */ 2965 inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */ 2966 inqdata[3] = 0; 2967 inqdata[4] = 31; /* length of additional data */ 2968 inqdata[5] = 0; 2969 inqdata[6] = 0; 2970 inqdata[7] = 0; 2971 strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */ 2972 strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */ 2973 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 2974 memcpy(buffer, inqdata, sizeof(inqdata)); 2975 xpt_done(pccb); 2976 } 2977 break; 2978 case WRITE_BUFFER: 2979 case READ_BUFFER: { 2980 if (arcmsr_iop_message_xfer(acb, pccb)) { 2981 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2982 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 2983 } 2984 xpt_done(pccb); 2985 } 2986 break; 2987 default: 2988 xpt_done(pccb); 2989 } 2990 } 2991 /* 2992 ********************************************************************* 2993 ********************************************************************* 2994 */ 2995 static void arcmsr_action(struct cam_sim *psim, union ccb *pccb) 2996 { 2997 struct AdapterControlBlock *acb; 2998 2999 acb = (struct AdapterControlBlock *) cam_sim_softc(psim); 3000 if(acb == NULL) { 3001 pccb->ccb_h.status |= CAM_REQ_INVALID; 3002 xpt_done(pccb); 3003 return; 3004 } 3005 switch (pccb->ccb_h.func_code) { 3006 case XPT_SCSI_IO: { 3007 struct CommandControlBlock *srb; 3008 int target = pccb->ccb_h.target_id; 3009 3010 if(target == 16) { 3011 /* virtual device for iop message transfer */ 3012 arcmsr_handle_virtual_command(acb, pccb); 3013 return; 3014 } 3015 if((srb = arcmsr_get_freesrb(acb)) == NULL) { 3016 pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; 3017 xpt_done(pccb); 3018 return; 3019 } 3020 pccb->ccb_h.arcmsr_ccbsrb_ptr = srb; 3021 pccb->ccb_h.arcmsr_ccbacb_ptr = acb; 3022 srb->pccb = pccb; 3023 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 3024 if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) { 3025 /* Single buffer */ 3026 if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) { 3027 /* Buffer is virtual */ 3028 u_int32_t error; 3029 3030 crit_enter(); 3031 ARCMSR_LOCK_ACQUIRE(&acb->io_lock); 3032 error = bus_dmamap_load(acb->dm_segs_dmat 3033 , srb->dm_segs_dmamap 3034 , pccb->csio.data_ptr 3035 , pccb->csio.dxfer_len 3036 , arcmsr_execute_srb, srb, /*flags*/0); 3037 ARCMSR_LOCK_RELEASE(&acb->io_lock); 3038 if(error == EINPROGRESS) { 3039 xpt_freeze_simq(acb->psim, 1); 3040 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 3041 } 3042 crit_exit(); 3043 } 3044 else { /* Buffer is physical */ 3045 struct bus_dma_segment seg; 3046 3047 seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr; 3048 seg.ds_len = pccb->csio.dxfer_len; 3049 arcmsr_execute_srb(srb, &seg, 1, 0); 3050 } 3051 } else { 3052 /* Scatter/gather list */ 3053 struct bus_dma_segment *segs; 3054 3055 if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 3056 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { 3057 pccb->ccb_h.status |= CAM_PROVIDE_FAIL; 3058 xpt_done(pccb); 3059 kfree(srb, M_DEVBUF); 3060 return; 3061 } 3062 segs=(struct bus_dma_segment *)pccb->csio.data_ptr; 3063 arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0); 3064 } 3065 } else { 3066 arcmsr_execute_srb(srb, NULL, 0, 0); 3067 } 3068 break; 3069 } 3070 case XPT_TARGET_IO: { 3071 /* target mode not yet support vendor specific commands. */ 3072 pccb->ccb_h.status |= CAM_REQ_CMP; 3073 xpt_done(pccb); 3074 break; 3075 } 3076 case XPT_PATH_INQ: { 3077 struct ccb_pathinq *cpi = &pccb->cpi; 3078 3079 cpi->version_num = 1; 3080 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; 3081 cpi->target_sprt = 0; 3082 cpi->hba_misc = 0; 3083 cpi->hba_eng_cnt = 0; 3084 cpi->max_target = ARCMSR_MAX_TARGETID; /* 0-16 */ 3085 cpi->max_lun = ARCMSR_MAX_TARGETLUN; /* 0-7 */ 3086 cpi->initiator_id = ARCMSR_SCSI_INITIATOR_ID; /* 255 */ 3087 cpi->bus_id = cam_sim_bus(psim); 3088 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3089 strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); 3090 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); 3091 cpi->unit_number = cam_sim_unit(psim); 3092 if(acb->adapter_bus_speed == ACB_BUS_SPEED_12G) 3093 cpi->base_transfer_speed = 1200000; 3094 else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G) 3095 cpi->base_transfer_speed = 600000; 3096 else 3097 cpi->base_transfer_speed = 300000; 3098 if((acb->vendor_device_id == PCIDevVenIDARC1880) || 3099 (acb->vendor_device_id == PCIDevVenIDARC1884) || 3100 (acb->vendor_device_id == PCIDevVenIDARC1680) || 3101 (acb->vendor_device_id == PCIDevVenIDARC1214)) 3102 { 3103 cpi->transport = XPORT_SAS; 3104 cpi->transport_version = 0; 3105 cpi->protocol_version = SCSI_REV_SPC2; 3106 } 3107 else 3108 { 3109 cpi->transport = XPORT_SPI; 3110 cpi->transport_version = 2; 3111 cpi->protocol_version = SCSI_REV_2; 3112 } 3113 cpi->protocol = PROTO_SCSI; 3114 cpi->ccb_h.status |= CAM_REQ_CMP; 3115 xpt_done(pccb); 3116 break; 3117 } 3118 case XPT_ABORT: { 3119 union ccb *pabort_ccb; 3120 3121 pabort_ccb = pccb->cab.abort_ccb; 3122 switch (pabort_ccb->ccb_h.func_code) { 3123 case XPT_ACCEPT_TARGET_IO: 3124 case XPT_IMMED_NOTIFY: 3125 case XPT_CONT_TARGET_IO: 3126 if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { 3127 pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; 3128 xpt_done(pabort_ccb); 3129 pccb->ccb_h.status |= CAM_REQ_CMP; 3130 } else { 3131 xpt_print_path(pabort_ccb->ccb_h.path); 3132 kprintf("Not found\n"); 3133 pccb->ccb_h.status |= CAM_PATH_INVALID; 3134 } 3135 break; 3136 case XPT_SCSI_IO: 3137 pccb->ccb_h.status |= CAM_UA_ABORT; 3138 break; 3139 default: 3140 pccb->ccb_h.status |= CAM_REQ_INVALID; 3141 break; 3142 } 3143 xpt_done(pccb); 3144 break; 3145 } 3146 case XPT_RESET_BUS: 3147 case XPT_RESET_DEV: { 3148 u_int32_t i; 3149 3150 arcmsr_bus_reset(acb); 3151 for (i=0; i < 500; i++) { 3152 DELAY(1000); 3153 } 3154 pccb->ccb_h.status |= CAM_REQ_CMP; 3155 xpt_done(pccb); 3156 break; 3157 } 3158 case XPT_TERM_IO: { 3159 pccb->ccb_h.status |= CAM_REQ_INVALID; 3160 xpt_done(pccb); 3161 break; 3162 } 3163 case XPT_GET_TRAN_SETTINGS: { 3164 struct ccb_trans_settings *cts; 3165 3166 if(pccb->ccb_h.target_id == 16) { 3167 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 3168 xpt_done(pccb); 3169 break; 3170 } 3171 cts = &pccb->cts; 3172 { 3173 struct ccb_trans_settings_scsi *scsi; 3174 struct ccb_trans_settings_spi *spi; 3175 struct ccb_trans_settings_sas *sas; 3176 3177 scsi = &cts->proto_specific.scsi; 3178 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3179 scsi->valid = CTS_SCSI_VALID_TQ; 3180 cts->protocol = PROTO_SCSI; 3181 3182 if((acb->vendor_device_id == PCIDevVenIDARC1880) || 3183 (acb->vendor_device_id == PCIDevVenIDARC1884) || 3184 (acb->vendor_device_id == PCIDevVenIDARC1680) || 3185 (acb->vendor_device_id == PCIDevVenIDARC1214)) 3186 { 3187 cts->protocol_version = SCSI_REV_SPC2; 3188 cts->transport_version = 0; 3189 cts->transport = XPORT_SAS; 3190 sas = &cts->xport_specific.sas; 3191 sas->valid = CTS_SAS_VALID_SPEED; 3192 if (acb->adapter_bus_speed == ACB_BUS_SPEED_12G) 3193 sas->bitrate = 1200000; 3194 else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G) 3195 sas->bitrate = 600000; 3196 else if(acb->adapter_bus_speed == ACB_BUS_SPEED_3G) 3197 sas->bitrate = 300000; 3198 } 3199 else 3200 { 3201 cts->protocol_version = SCSI_REV_2; 3202 cts->transport_version = 2; 3203 cts->transport = XPORT_SPI; 3204 spi = &cts->xport_specific.spi; 3205 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 3206 if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G) 3207 spi->sync_period = 1; 3208 else 3209 spi->sync_period = 2; 3210 spi->sync_offset = 32; 3211 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3212 spi->valid = CTS_SPI_VALID_DISC 3213 | CTS_SPI_VALID_SYNC_RATE 3214 | CTS_SPI_VALID_SYNC_OFFSET 3215 | CTS_SPI_VALID_BUS_WIDTH; 3216 } 3217 } 3218 pccb->ccb_h.status |= CAM_REQ_CMP; 3219 xpt_done(pccb); 3220 break; 3221 } 3222 case XPT_SET_TRAN_SETTINGS: { 3223 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 3224 xpt_done(pccb); 3225 break; 3226 } 3227 case XPT_CALC_GEOMETRY: 3228 if(pccb->ccb_h.target_id == 16) { 3229 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 3230 xpt_done(pccb); 3231 break; 3232 } 3233 cam_calc_geometry(&pccb->ccg, 1); 3234 xpt_done(pccb); 3235 break; 3236 default: 3237 pccb->ccb_h.status |= CAM_REQ_INVALID; 3238 xpt_done(pccb); 3239 break; 3240 } 3241 } 3242 /* 3243 ********************************************************************** 3244 ********************************************************************** 3245 */ 3246 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) 3247 { 3248 acb->acb_flags |= ACB_F_MSG_START_BGRB; 3249 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 3250 if(!arcmsr_hba_wait_msgint_ready(acb)) { 3251 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 3252 } 3253 } 3254 /* 3255 ********************************************************************** 3256 ********************************************************************** 3257 */ 3258 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) 3259 { 3260 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 3261 acb->acb_flags |= ACB_F_MSG_START_BGRB; 3262 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB); 3263 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 3264 kprintf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 3265 } 3266 } 3267 /* 3268 ********************************************************************** 3269 ********************************************************************** 3270 */ 3271 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb) 3272 { 3273 acb->acb_flags |= ACB_F_MSG_START_BGRB; 3274 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 3275 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 3276 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 3277 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 3278 } 3279 } 3280 /* 3281 ********************************************************************** 3282 ********************************************************************** 3283 */ 3284 static void arcmsr_start_hbd_bgrb(struct AdapterControlBlock *acb) 3285 { 3286 acb->acb_flags |= ACB_F_MSG_START_BGRB; 3287 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 3288 if(!arcmsr_hbd_wait_msgint_ready(acb)) { 3289 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 3290 } 3291 } 3292 /* 3293 ********************************************************************** 3294 ********************************************************************** 3295 */ 3296 static void arcmsr_start_hbe_bgrb(struct AdapterControlBlock *acb) 3297 { 3298 acb->acb_flags |= ACB_F_MSG_START_BGRB; 3299 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 3300 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3301 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell); 3302 if(!arcmsr_hbe_wait_msgint_ready(acb)) { 3303 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 3304 } 3305 } 3306 /* 3307 ********************************************************************** 3308 ********************************************************************** 3309 */ 3310 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 3311 { 3312 switch (acb->adapter_type) { 3313 case ACB_ADAPTER_TYPE_A: 3314 arcmsr_start_hba_bgrb(acb); 3315 break; 3316 case ACB_ADAPTER_TYPE_B: 3317 arcmsr_start_hbb_bgrb(acb); 3318 break; 3319 case ACB_ADAPTER_TYPE_C: 3320 arcmsr_start_hbc_bgrb(acb); 3321 break; 3322 case ACB_ADAPTER_TYPE_D: 3323 arcmsr_start_hbd_bgrb(acb); 3324 break; 3325 case ACB_ADAPTER_TYPE_E: 3326 arcmsr_start_hbe_bgrb(acb); 3327 break; 3328 } 3329 } 3330 /* 3331 ********************************************************************** 3332 ** 3333 ********************************************************************** 3334 */ 3335 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 3336 { 3337 struct CommandControlBlock *srb; 3338 u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; 3339 u_int16_t error; 3340 3341 polling_ccb_retry: 3342 poll_count++; 3343 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; 3344 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/ 3345 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3346 while(1) { 3347 if((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 3348 0, outbound_queueport)) == 0xFFFFFFFF) { 3349 if(poll_srb_done) { 3350 break;/*chip FIFO no ccb for completion already*/ 3351 } else { 3352 UDELAY(25000); 3353 if ((poll_count > 100) && (poll_srb != NULL)) { 3354 break; 3355 } 3356 goto polling_ccb_retry; 3357 } 3358 } 3359 /* check if command done with no error*/ 3360 srb = (struct CommandControlBlock *) 3361 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ 3362 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 3363 poll_srb_done = (srb == poll_srb) ? 1:0; 3364 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { 3365 if(srb->srb_state == ARCMSR_SRB_ABORTED) { 3366 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'" 3367 "poll command abort successfully \n" 3368 , acb->pci_unit 3369 , srb->pccb->ccb_h.target_id 3370 , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); 3371 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 3372 arcmsr_srb_complete(srb, 1); 3373 continue; 3374 } 3375 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'" 3376 "srboutstandingcount=%d \n" 3377 , acb->pci_unit 3378 , srb, acb->srboutstandingcount); 3379 continue; 3380 } 3381 arcmsr_report_srb_state(acb, srb, error); 3382 } /*drain reply FIFO*/ 3383 } 3384 /* 3385 ********************************************************************** 3386 ** 3387 ********************************************************************** 3388 */ 3389 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 3390 { 3391 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 3392 struct CommandControlBlock *srb; 3393 u_int32_t flag_srb, poll_srb_done=0, poll_count=0; 3394 int index; 3395 u_int16_t error; 3396 3397 polling_ccb_retry: 3398 poll_count++; 3399 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ 3400 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3401 while(1) { 3402 index = phbbmu->doneq_index; 3403 if((flag_srb = phbbmu->done_qbuffer[index]) == 0) { 3404 if(poll_srb_done) { 3405 break;/*chip FIFO no ccb for completion already*/ 3406 } else { 3407 UDELAY(25000); 3408 if ((poll_count > 100) && (poll_srb != NULL)) { 3409 break; 3410 } 3411 goto polling_ccb_retry; 3412 } 3413 } 3414 phbbmu->done_qbuffer[index] = 0; 3415 index++; 3416 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ 3417 phbbmu->doneq_index = index; 3418 /* check if command done with no error*/ 3419 srb = (struct CommandControlBlock *) 3420 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ 3421 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 3422 poll_srb_done = (srb == poll_srb) ? 1:0; 3423 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { 3424 if(srb->srb_state == ARCMSR_SRB_ABORTED) { 3425 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'" 3426 "poll command abort successfully \n" 3427 , acb->pci_unit 3428 , srb->pccb->ccb_h.target_id 3429 , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); 3430 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 3431 arcmsr_srb_complete(srb, 1); 3432 continue; 3433 } 3434 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'" 3435 "srboutstandingcount=%d \n" 3436 , acb->pci_unit 3437 , srb, acb->srboutstandingcount); 3438 continue; 3439 } 3440 arcmsr_report_srb_state(acb, srb, error); 3441 } /*drain reply FIFO*/ 3442 } 3443 /* 3444 ********************************************************************** 3445 ** 3446 ********************************************************************** 3447 */ 3448 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 3449 { 3450 struct CommandControlBlock *srb; 3451 u_int32_t flag_srb, poll_srb_done=0, poll_count=0; 3452 u_int16_t error; 3453 3454 polling_ccb_retry: 3455 poll_count++; 3456 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3457 while(1) { 3458 if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) { 3459 if(poll_srb_done) { 3460 break;/*chip FIFO no ccb for completion already*/ 3461 } else { 3462 UDELAY(25000); 3463 if ((poll_count > 100) && (poll_srb != NULL)) { 3464 break; 3465 } 3466 if (acb->srboutstandingcount == 0) { 3467 break; 3468 } 3469 goto polling_ccb_retry; 3470 } 3471 } 3472 flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); 3473 /* check if command done with no error*/ 3474 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ 3475 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; 3476 if (poll_srb != NULL) 3477 poll_srb_done = (srb == poll_srb) ? 1:0; 3478 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { 3479 if(srb->srb_state == ARCMSR_SRB_ABORTED) { 3480 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n" 3481 , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb); 3482 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 3483 arcmsr_srb_complete(srb, 1); 3484 continue; 3485 } 3486 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" 3487 , acb->pci_unit, srb, acb->srboutstandingcount); 3488 continue; 3489 } 3490 arcmsr_report_srb_state(acb, srb, error); 3491 } /*drain reply FIFO*/ 3492 } 3493 /* 3494 ********************************************************************** 3495 ** 3496 ********************************************************************** 3497 */ 3498 static void arcmsr_polling_hbd_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 3499 { 3500 struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; 3501 struct CommandControlBlock *srb; 3502 u_int32_t flag_srb, poll_srb_done=0, poll_count=0; 3503 u_int32_t outbound_write_pointer; 3504 u_int16_t error, doneq_index; 3505 3506 polling_ccb_retry: 3507 poll_count++; 3508 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3509 while(1) { 3510 outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; 3511 doneq_index = phbdmu->doneq_index; 3512 if ((outbound_write_pointer & 0xFF) == (doneq_index & 0xFF)) { 3513 if(poll_srb_done) { 3514 break;/*chip FIFO no ccb for completion already*/ 3515 } else { 3516 UDELAY(25000); 3517 if ((poll_count > 100) && (poll_srb != NULL)) { 3518 break; 3519 } 3520 if (acb->srboutstandingcount == 0) { 3521 break; 3522 } 3523 goto polling_ccb_retry; 3524 } 3525 } 3526 doneq_index = arcmsr_get_doneq_index(phbdmu); 3527 flag_srb = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow; 3528 /* check if command done with no error*/ 3529 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ 3530 error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; 3531 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index); 3532 if (poll_srb != NULL) 3533 poll_srb_done = (srb == poll_srb) ? 1:0; 3534 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { 3535 if(srb->srb_state == ARCMSR_SRB_ABORTED) { 3536 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n" 3537 , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb); 3538 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 3539 arcmsr_srb_complete(srb, 1); 3540 continue; 3541 } 3542 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" 3543 , acb->pci_unit, srb, acb->srboutstandingcount); 3544 continue; 3545 } 3546 arcmsr_report_srb_state(acb, srb, error); 3547 } /*drain reply FIFO*/ 3548 } 3549 /* 3550 ********************************************************************** 3551 ** 3552 ********************************************************************** 3553 */ 3554 static void arcmsr_polling_hbe_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 3555 { 3556 struct CommandControlBlock *srb; 3557 u_int32_t poll_srb_done=0, poll_count=0, doneq_index; 3558 u_int16_t error, cmdSMID; 3559 3560 polling_ccb_retry: 3561 poll_count++; 3562 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3563 while(1) { 3564 doneq_index = acb->doneq_index; 3565 if((CHIP_REG_READ32(HBE_MessageUnit, 0, reply_post_producer_index) & 0xFFFF) == doneq_index) { 3566 if(poll_srb_done) { 3567 break;/*chip FIFO no ccb for completion already*/ 3568 } else { 3569 UDELAY(25000); 3570 if ((poll_count > 100) && (poll_srb != NULL)) { 3571 break; 3572 } 3573 if (acb->srboutstandingcount == 0) { 3574 break; 3575 } 3576 goto polling_ccb_retry; 3577 } 3578 } 3579 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; 3580 doneq_index++; 3581 if (doneq_index >= acb->completionQ_entry) 3582 doneq_index = 0; 3583 acb->doneq_index = doneq_index; 3584 srb = acb->psrb_pool[cmdSMID]; 3585 error = (acb->pCompletionQ[doneq_index].cmdFlag & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; 3586 if (poll_srb != NULL) 3587 poll_srb_done = (srb == poll_srb) ? 1:0; 3588 if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { 3589 if(srb->srb_state == ARCMSR_SRB_ABORTED) { 3590 kprintf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n" 3591 , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb); 3592 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 3593 arcmsr_srb_complete(srb, 1); 3594 continue; 3595 } 3596 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" 3597 , acb->pci_unit, srb, acb->srboutstandingcount); 3598 continue; 3599 } 3600 arcmsr_report_srb_state(acb, srb, error); 3601 } /*drain reply FIFO*/ 3602 CHIP_REG_WRITE32(HBE_MessageUnit, 0, reply_post_producer_index, doneq_index); 3603 } 3604 /* 3605 ********************************************************************** 3606 ********************************************************************** 3607 */ 3608 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 3609 { 3610 switch (acb->adapter_type) { 3611 case ACB_ADAPTER_TYPE_A: { 3612 arcmsr_polling_hba_srbdone(acb, poll_srb); 3613 } 3614 break; 3615 case ACB_ADAPTER_TYPE_B: { 3616 arcmsr_polling_hbb_srbdone(acb, poll_srb); 3617 } 3618 break; 3619 case ACB_ADAPTER_TYPE_C: { 3620 arcmsr_polling_hbc_srbdone(acb, poll_srb); 3621 } 3622 break; 3623 case ACB_ADAPTER_TYPE_D: { 3624 arcmsr_polling_hbd_srbdone(acb, poll_srb); 3625 } 3626 break; 3627 case ACB_ADAPTER_TYPE_E: { 3628 arcmsr_polling_hbe_srbdone(acb, poll_srb); 3629 } 3630 break; 3631 } 3632 } 3633 /* 3634 ********************************************************************** 3635 ********************************************************************** 3636 */ 3637 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) 3638 { 3639 char *acb_firm_model = acb->firm_model; 3640 char *acb_firm_version = acb->firm_version; 3641 char *acb_device_map = acb->device_map; 3642 size_t iop_firm_model = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ 3643 size_t iop_firm_version = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ 3644 size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 3645 int i; 3646 3647 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 3648 if(!arcmsr_hba_wait_msgint_ready(acb)) { 3649 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); 3650 } 3651 i = 0; 3652 while(i < 8) { 3653 *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); 3654 /* 8 bytes firm_model, 15, 60-67*/ 3655 acb_firm_model++; 3656 i++; 3657 } 3658 i=0; 3659 while(i < 16) { 3660 *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); 3661 /* 16 bytes firm_version, 17, 68-83*/ 3662 acb_firm_version++; 3663 i++; 3664 } 3665 i=0; 3666 while(i < 16) { 3667 *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); 3668 acb_device_map++; 3669 i++; 3670 } 3671 kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); 3672 acb->firm_request_len = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 3673 acb->firm_numbers_queue = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 3674 acb->firm_sdram_size = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 3675 acb->firm_ide_channels = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 3676 acb->firm_cfg_version = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ 3677 if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) 3678 acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1; 3679 else 3680 acb->maxOutstanding = acb->firm_numbers_queue - 1; 3681 } 3682 /* 3683 ********************************************************************** 3684 ********************************************************************** 3685 */ 3686 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) 3687 { 3688 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 3689 char *acb_firm_model = acb->firm_model; 3690 char *acb_firm_version = acb->firm_version; 3691 char *acb_device_map = acb->device_map; 3692 size_t iop_firm_model = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ 3693 size_t iop_firm_version = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ 3694 size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 3695 int i; 3696 3697 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); 3698 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 3699 kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); 3700 } 3701 i = 0; 3702 while(i < 8) { 3703 *acb_firm_model = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i); 3704 /* 8 bytes firm_model, 15, 60-67*/ 3705 acb_firm_model++; 3706 i++; 3707 } 3708 i = 0; 3709 while(i < 16) { 3710 *acb_firm_version = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i); 3711 /* 16 bytes firm_version, 17, 68-83*/ 3712 acb_firm_version++; 3713 i++; 3714 } 3715 i = 0; 3716 while(i < 16) { 3717 *acb_device_map = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i); 3718 acb_device_map++; 3719 i++; 3720 } 3721 kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); 3722 acb->firm_request_len = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 3723 acb->firm_numbers_queue = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 3724 acb->firm_sdram_size = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 3725 acb->firm_ide_channels = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 3726 acb->firm_cfg_version = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ 3727 if(acb->firm_numbers_queue > ARCMSR_MAX_HBB_POSTQUEUE) 3728 acb->maxOutstanding = ARCMSR_MAX_HBB_POSTQUEUE - 1; 3729 else 3730 acb->maxOutstanding = acb->firm_numbers_queue - 1; 3731 } 3732 /* 3733 ********************************************************************** 3734 ********************************************************************** 3735 */ 3736 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb) 3737 { 3738 char *acb_firm_model = acb->firm_model; 3739 char *acb_firm_version = acb->firm_version; 3740 char *acb_device_map = acb->device_map; 3741 size_t iop_firm_model = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ 3742 size_t iop_firm_version = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ 3743 size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 3744 int i; 3745 3746 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 3747 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 3748 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 3749 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); 3750 } 3751 i = 0; 3752 while(i < 8) { 3753 *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); 3754 /* 8 bytes firm_model, 15, 60-67*/ 3755 acb_firm_model++; 3756 i++; 3757 } 3758 i = 0; 3759 while(i < 16) { 3760 *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); 3761 /* 16 bytes firm_version, 17, 68-83*/ 3762 acb_firm_version++; 3763 i++; 3764 } 3765 i = 0; 3766 while(i < 16) { 3767 *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); 3768 acb_device_map++; 3769 i++; 3770 } 3771 kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); 3772 acb->firm_request_len = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 3773 acb->firm_numbers_queue = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 3774 acb->firm_sdram_size = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 3775 acb->firm_ide_channels = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 3776 acb->firm_cfg_version = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ 3777 if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) 3778 acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1; 3779 else 3780 acb->maxOutstanding = acb->firm_numbers_queue - 1; 3781 } 3782 /* 3783 ********************************************************************** 3784 ********************************************************************** 3785 */ 3786 static void arcmsr_get_hbd_config(struct AdapterControlBlock *acb) 3787 { 3788 char *acb_firm_model = acb->firm_model; 3789 char *acb_firm_version = acb->firm_version; 3790 char *acb_device_map = acb->device_map; 3791 size_t iop_firm_model = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ 3792 size_t iop_firm_version = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ 3793 size_t iop_device_map = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 3794 int i; 3795 3796 if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) 3797 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR); 3798 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 3799 if(!arcmsr_hbd_wait_msgint_ready(acb)) { 3800 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); 3801 } 3802 i = 0; 3803 while(i < 8) { 3804 *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); 3805 /* 8 bytes firm_model, 15, 60-67*/ 3806 acb_firm_model++; 3807 i++; 3808 } 3809 i = 0; 3810 while(i < 16) { 3811 *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); 3812 /* 16 bytes firm_version, 17, 68-83*/ 3813 acb_firm_version++; 3814 i++; 3815 } 3816 i = 0; 3817 while(i < 16) { 3818 *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); 3819 acb_device_map++; 3820 i++; 3821 } 3822 kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); 3823 acb->firm_request_len = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 3824 acb->firm_numbers_queue = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 3825 acb->firm_sdram_size = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 3826 acb->firm_ide_channels = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 3827 acb->firm_cfg_version = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ 3828 if(acb->firm_numbers_queue > ARCMSR_MAX_HBD_POSTQUEUE) 3829 acb->maxOutstanding = ARCMSR_MAX_HBD_POSTQUEUE - 1; 3830 else 3831 acb->maxOutstanding = acb->firm_numbers_queue - 1; 3832 } 3833 /* 3834 ********************************************************************** 3835 ********************************************************************** 3836 */ 3837 static void arcmsr_get_hbe_config(struct AdapterControlBlock *acb) 3838 { 3839 char *acb_firm_model = acb->firm_model; 3840 char *acb_firm_version = acb->firm_version; 3841 char *acb_device_map = acb->device_map; 3842 size_t iop_firm_model = offsetof(struct HBE_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ 3843 size_t iop_firm_version = offsetof(struct HBE_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ 3844 size_t iop_device_map = offsetof(struct HBE_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 3845 int i; 3846 3847 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 3848 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3849 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell); 3850 if(!arcmsr_hbe_wait_msgint_ready(acb)) { 3851 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); 3852 } 3853 3854 i = 0; 3855 while(i < 8) { 3856 *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); 3857 /* 8 bytes firm_model, 15, 60-67*/ 3858 acb_firm_model++; 3859 i++; 3860 } 3861 i = 0; 3862 while(i < 16) { 3863 *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); 3864 /* 16 bytes firm_version, 17, 68-83*/ 3865 acb_firm_version++; 3866 i++; 3867 } 3868 i = 0; 3869 while(i < 16) { 3870 *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); 3871 acb_device_map++; 3872 i++; 3873 } 3874 kprintf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); 3875 acb->firm_request_len = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 3876 acb->firm_numbers_queue = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 3877 acb->firm_sdram_size = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 3878 acb->firm_ide_channels = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 3879 acb->firm_cfg_version = CHIP_REG_READ32(HBE_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ 3880 if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) 3881 acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1; 3882 else 3883 acb->maxOutstanding = acb->firm_numbers_queue - 1; 3884 } 3885 /* 3886 ********************************************************************** 3887 ********************************************************************** 3888 */ 3889 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 3890 { 3891 switch (acb->adapter_type) { 3892 case ACB_ADAPTER_TYPE_A: { 3893 arcmsr_get_hba_config(acb); 3894 } 3895 break; 3896 case ACB_ADAPTER_TYPE_B: { 3897 arcmsr_get_hbb_config(acb); 3898 } 3899 break; 3900 case ACB_ADAPTER_TYPE_C: { 3901 arcmsr_get_hbc_config(acb); 3902 } 3903 break; 3904 case ACB_ADAPTER_TYPE_D: { 3905 arcmsr_get_hbd_config(acb); 3906 } 3907 break; 3908 case ACB_ADAPTER_TYPE_E: { 3909 arcmsr_get_hbe_config(acb); 3910 } 3911 break; 3912 } 3913 } 3914 /* 3915 ********************************************************************** 3916 ********************************************************************** 3917 */ 3918 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb) 3919 { 3920 int timeout=0; 3921 3922 switch (acb->adapter_type) { 3923 case ACB_ADAPTER_TYPE_A: { 3924 while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) 3925 { 3926 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ 3927 { 3928 kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit); 3929 return; 3930 } 3931 UDELAY(15000); /* wait 15 milli-seconds */ 3932 } 3933 } 3934 break; 3935 case ACB_ADAPTER_TYPE_B: { 3936 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 3937 while ((READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0) 3938 { 3939 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ 3940 { 3941 kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit); 3942 return; 3943 } 3944 UDELAY(15000); /* wait 15 milli-seconds */ 3945 } 3946 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); 3947 } 3948 break; 3949 case ACB_ADAPTER_TYPE_C: { 3950 while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0) 3951 { 3952 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ 3953 { 3954 kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); 3955 return; 3956 } 3957 UDELAY(15000); /* wait 15 milli-seconds */ 3958 } 3959 } 3960 break; 3961 case ACB_ADAPTER_TYPE_D: { 3962 while ((CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK) == 0) 3963 { 3964 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ 3965 { 3966 kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); 3967 return; 3968 } 3969 UDELAY(15000); /* wait 15 milli-seconds */ 3970 } 3971 } 3972 break; 3973 case ACB_ADAPTER_TYPE_E: { 3974 while ((CHIP_REG_READ32(HBE_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0) 3975 { 3976 if (timeout++ > 4000) /* (4000*15)/1000 = 60 sec */ 3977 { 3978 kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); 3979 return; 3980 } 3981 UDELAY(15000); /* wait 15 milli-seconds */ 3982 } 3983 } 3984 break; 3985 } 3986 } 3987 /* 3988 ********************************************************************** 3989 ********************************************************************** 3990 */ 3991 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb) 3992 { 3993 u_int32_t outbound_doorbell; 3994 3995 switch (acb->adapter_type) { 3996 case ACB_ADAPTER_TYPE_A: { 3997 /* empty doorbell Qbuffer if door bell ringed */ 3998 outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); 3999 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ 4000 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 4001 } 4002 break; 4003 case ACB_ADAPTER_TYPE_B: { 4004 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 4005 WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN);/*clear interrupt and message state*/ 4006 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); 4007 /* let IOP know data has been read */ 4008 } 4009 break; 4010 case ACB_ADAPTER_TYPE_C: { 4011 /* empty doorbell Qbuffer if door bell ringed */ 4012 outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); 4013 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */ 4014 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); 4015 CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell_clear); /* Dummy read to force pci flush */ 4016 CHIP_REG_READ32(HBC_MessageUnit, 0, inbound_doorbell); /* Dummy read to force pci flush */ 4017 } 4018 break; 4019 case ACB_ADAPTER_TYPE_D: { 4020 /* empty doorbell Qbuffer if door bell ringed */ 4021 outbound_doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell); 4022 CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ 4023 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ); 4024 } 4025 break; 4026 case ACB_ADAPTER_TYPE_E: { 4027 /* empty doorbell Qbuffer if door bell ringed */ 4028 acb->in_doorbell = CHIP_REG_READ32(HBE_MessageUnit, 0, iobound_doorbell); 4029 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_status, 0); /*clear doorbell interrupt */ 4030 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; 4031 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell); 4032 } 4033 break; 4034 } 4035 } 4036 /* 4037 ************************************************************************ 4038 ************************************************************************ 4039 */ 4040 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb) 4041 { 4042 unsigned long srb_phyaddr; 4043 u_int32_t srb_phyaddr_hi32; 4044 u_int32_t srb_phyaddr_lo32; 4045 4046 /* 4047 ******************************************************************** 4048 ** here we need to tell iop 331 our freesrb.HighPart 4049 ** if freesrb.HighPart is not zero 4050 ******************************************************************** 4051 */ 4052 srb_phyaddr = (unsigned long) acb->srb_phyaddr.phyaddr; 4053 srb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; 4054 srb_phyaddr_lo32 = acb->srb_phyaddr.B.phyadd_low; 4055 switch (acb->adapter_type) { 4056 case ACB_ADAPTER_TYPE_A: { 4057 if(srb_phyaddr_hi32 != 0) { 4058 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); 4059 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); 4060 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 4061 if(!arcmsr_hba_wait_msgint_ready(acb)) { 4062 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 4063 return FALSE; 4064 } 4065 } 4066 } 4067 break; 4068 /* 4069 *********************************************************************** 4070 ** if adapter type B, set window of "post command Q" 4071 *********************************************************************** 4072 */ 4073 case ACB_ADAPTER_TYPE_B: { 4074 u_int32_t post_queue_phyaddr; 4075 struct HBB_MessageUnit *phbbmu; 4076 4077 phbbmu = (struct HBB_MessageUnit *)acb->pmu; 4078 phbbmu->postq_index = 0; 4079 phbbmu->doneq_index = 0; 4080 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW); 4081 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 4082 kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit); 4083 return FALSE; 4084 } 4085 post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE 4086 + offsetof(struct HBB_MessageUnit, post_qbuffer); 4087 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ 4088 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */ 4089 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */ 4090 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */ 4091 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */ 4092 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG); 4093 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 4094 kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit); 4095 return FALSE; 4096 } 4097 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE); 4098 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 4099 kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit); 4100 return FALSE; 4101 } 4102 } 4103 break; 4104 case ACB_ADAPTER_TYPE_C: { 4105 if(srb_phyaddr_hi32 != 0) { 4106 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); 4107 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); 4108 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 4109 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 4110 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 4111 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 4112 return FALSE; 4113 } 4114 } 4115 } 4116 break; 4117 case ACB_ADAPTER_TYPE_D: { 4118 u_int32_t post_queue_phyaddr, done_queue_phyaddr; 4119 struct HBD_MessageUnit0 *phbdmu; 4120 4121 phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; 4122 phbdmu->postq_index = 0; 4123 phbdmu->doneq_index = 0x40FF; 4124 post_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE 4125 + offsetof(struct HBD_MessageUnit0, post_qbuffer); 4126 done_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE 4127 + offsetof(struct HBD_MessageUnit0, done_qbuffer); 4128 CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ 4129 CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); 4130 CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ base */ 4131 CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[3], done_queue_phyaddr); /* doneQ base */ 4132 CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[4], 0x100); 4133 CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 4134 if(!arcmsr_hbd_wait_msgint_ready(acb)) { 4135 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 4136 return FALSE; 4137 } 4138 } 4139 break; 4140 case ACB_ADAPTER_TYPE_E: { 4141 u_int32_t cdb_phyaddr_lo32; 4142 cdb_phyaddr_lo32 = srb_phyaddr_lo32 + offsetof(struct CommandControlBlock, arcmsr_cdb); 4143 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); 4144 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[1], ARCMSR_SIGNATURE_1884); 4145 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[2], cdb_phyaddr_lo32); 4146 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[3], srb_phyaddr_hi32); 4147 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[4], SRB_SIZE); 4148 cdb_phyaddr_lo32 = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE; 4149 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[5], cdb_phyaddr_lo32); 4150 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[6], srb_phyaddr_hi32); 4151 CHIP_REG_WRITE32(HBE_MessageUnit, 0, msgcode_rwbuffer[7], COMPLETION_Q_POOL_SIZE); 4152 CHIP_REG_WRITE32(HBE_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 4153 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 4154 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, acb->out_doorbell); 4155 if(!arcmsr_hbe_wait_msgint_ready(acb)) { 4156 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 4157 return FALSE; 4158 } 4159 } 4160 break; 4161 } 4162 return (TRUE); 4163 } 4164 /* 4165 ************************************************************************ 4166 ************************************************************************ 4167 */ 4168 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) 4169 { 4170 if (acb->adapter_type == ACB_ADAPTER_TYPE_B) 4171 { 4172 struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; 4173 WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ACTIVE_EOI_MODE); 4174 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 4175 kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit); 4176 return; 4177 } 4178 } 4179 } 4180 /* 4181 ********************************************************************** 4182 ********************************************************************** 4183 */ 4184 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 4185 { 4186 u_int32_t intmask_org; 4187 4188 /* disable all outbound interrupt */ 4189 intmask_org = arcmsr_disable_allintr(acb); 4190 arcmsr_wait_firmware_ready(acb); 4191 arcmsr_iop_confirm(acb); 4192 arcmsr_get_firmware_spec(acb); 4193 /*start background rebuild*/ 4194 arcmsr_start_adapter_bgrb(acb); 4195 /* empty doorbell Qbuffer if door bell ringed */ 4196 arcmsr_clear_doorbell_queue_buffer(acb); 4197 arcmsr_enable_eoi_mode(acb); 4198 /* enable outbound Post Queue, outbound doorbell Interrupt */ 4199 arcmsr_enable_allintr(acb, intmask_org); 4200 acb->acb_flags |= ACB_F_IOP_INITED; 4201 } 4202 /* 4203 ********************************************************************** 4204 ********************************************************************** 4205 */ 4206 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 4207 { 4208 struct AdapterControlBlock *acb = arg; 4209 struct CommandControlBlock *srb_tmp; 4210 u_int32_t i; 4211 unsigned long srb_phyaddr = (unsigned long)segs->ds_addr; 4212 4213 acb->srb_phyaddr.phyaddr = srb_phyaddr; 4214 srb_tmp = (struct CommandControlBlock *)acb->uncacheptr; 4215 for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { 4216 if(bus_dmamap_create(acb->dm_segs_dmat, 4217 /*flags*/0, &srb_tmp->dm_segs_dmamap) != 0) { 4218 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; 4219 kprintf("arcmsr%d:" 4220 " srb dmamap bus_dmamap_create error\n", acb->pci_unit); 4221 return; 4222 } 4223 if((acb->adapter_type == ACB_ADAPTER_TYPE_C) || (acb->adapter_type == ACB_ADAPTER_TYPE_D) 4224 || (acb->adapter_type == ACB_ADAPTER_TYPE_E)) 4225 { 4226 srb_tmp->cdb_phyaddr_low = srb_phyaddr; 4227 srb_tmp->cdb_phyaddr_high = (u_int32_t)((srb_phyaddr >> 16) >> 16); 4228 } 4229 else 4230 srb_tmp->cdb_phyaddr_low = srb_phyaddr >> 5; 4231 srb_tmp->acb = acb; 4232 srb_tmp->smid = i << 16; 4233 acb->srbworkingQ[i] = acb->psrb_pool[i] = srb_tmp; 4234 srb_phyaddr = srb_phyaddr + SRB_SIZE; 4235 srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp + SRB_SIZE); 4236 } 4237 acb->pCompletionQ = (pCompletion_Q)srb_tmp; 4238 acb->vir2phy_offset = (unsigned long)srb_tmp - (unsigned long)srb_phyaddr; 4239 } 4240 /* 4241 ************************************************************************ 4242 ************************************************************************ 4243 */ 4244 static void arcmsr_free_resource(struct AdapterControlBlock *acb) 4245 { 4246 /* remove the control device */ 4247 if(acb->ioctl_dev != NULL) { 4248 destroy_dev(acb->ioctl_dev); 4249 } 4250 bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); 4251 bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); 4252 bus_dma_tag_destroy(acb->srb_dmat); 4253 bus_dma_tag_destroy(acb->dm_segs_dmat); 4254 bus_dma_tag_destroy(acb->parent_dmat); 4255 } 4256 /* 4257 ************************************************************************ 4258 ************************************************************************ 4259 */ 4260 static void arcmsr_mutex_init(struct AdapterControlBlock *acb) 4261 { 4262 ARCMSR_LOCK_INIT(&acb->isr_lock, "arcmsr isr lock"); 4263 ARCMSR_LOCK_INIT(&acb->srb_lock, "arcmsr srb lock"); 4264 ARCMSR_LOCK_INIT(&acb->postDone_lock, "arcmsr postQ lock"); 4265 ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr RW buffer lock"); 4266 ARCMSR_LOCK_INIT(&acb->io_lock, "arcmsr io lock"); 4267 ARCMSR_LOCK_INIT(&acb->sim_lock, "arcmsr sim lock"); 4268 } 4269 /* 4270 ************************************************************************ 4271 ************************************************************************ 4272 */ 4273 static void arcmsr_mutex_destroy(struct AdapterControlBlock *acb) 4274 { 4275 ARCMSR_LOCK_DESTROY(&acb->sim_lock); 4276 ARCMSR_LOCK_DESTROY(&acb->io_lock); 4277 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 4278 ARCMSR_LOCK_DESTROY(&acb->postDone_lock); 4279 ARCMSR_LOCK_DESTROY(&acb->srb_lock); 4280 ARCMSR_LOCK_DESTROY(&acb->isr_lock); 4281 } 4282 /* 4283 ************************************************************************ 4284 ************************************************************************ 4285 */ 4286 static u_int32_t arcmsr_initialize(device_t dev) 4287 { 4288 struct AdapterControlBlock *acb = device_get_softc(dev); 4289 u_int16_t pci_command; 4290 int i, j,max_coherent_size; 4291 u_int32_t vendor_dev_id; 4292 4293 vendor_dev_id = pci_get_devid(dev); 4294 acb->vendor_device_id = vendor_dev_id; 4295 acb->sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 4296 switch (vendor_dev_id) { 4297 case PCIDevVenIDARC1880: 4298 case PCIDevVenIDARC1882: 4299 case PCIDevVenIDARC1213: 4300 case PCIDevVenIDARC1223: { 4301 acb->adapter_type = ACB_ADAPTER_TYPE_C; 4302 if ((acb->sub_device_id == ARECA_SUB_DEV_ID_1883) || 4303 (acb->sub_device_id == ARECA_SUB_DEV_ID_1216) || 4304 (acb->sub_device_id == ARECA_SUB_DEV_ID_1226)) 4305 acb->adapter_bus_speed = ACB_BUS_SPEED_12G; 4306 else 4307 acb->adapter_bus_speed = ACB_BUS_SPEED_6G; 4308 max_coherent_size = ARCMSR_SRBS_POOL_SIZE; 4309 } 4310 break; 4311 case PCIDevVenIDARC1884: 4312 acb->adapter_type = ACB_ADAPTER_TYPE_E; 4313 acb->adapter_bus_speed = ACB_BUS_SPEED_12G; 4314 max_coherent_size = ARCMSR_SRBS_POOL_SIZE + COMPLETION_Q_POOL_SIZE; 4315 acb->completionQ_entry = COMPLETION_Q_POOL_SIZE / sizeof(struct deliver_completeQ); 4316 break; 4317 case PCIDevVenIDARC1214: { 4318 acb->adapter_type = ACB_ADAPTER_TYPE_D; 4319 acb->adapter_bus_speed = ACB_BUS_SPEED_6G; 4320 max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBD_MessageUnit0)); 4321 } 4322 break; 4323 case PCIDevVenIDARC1200: 4324 case PCIDevVenIDARC1201: { 4325 acb->adapter_type = ACB_ADAPTER_TYPE_B; 4326 acb->adapter_bus_speed = ACB_BUS_SPEED_3G; 4327 max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit)); 4328 } 4329 break; 4330 case PCIDevVenIDARC1203: { 4331 acb->adapter_type = ACB_ADAPTER_TYPE_B; 4332 acb->adapter_bus_speed = ACB_BUS_SPEED_6G; 4333 max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit)); 4334 } 4335 break; 4336 case PCIDevVenIDARC1110: 4337 case PCIDevVenIDARC1120: 4338 case PCIDevVenIDARC1130: 4339 case PCIDevVenIDARC1160: 4340 case PCIDevVenIDARC1170: 4341 case PCIDevVenIDARC1210: 4342 case PCIDevVenIDARC1220: 4343 case PCIDevVenIDARC1230: 4344 case PCIDevVenIDARC1231: 4345 case PCIDevVenIDARC1260: 4346 case PCIDevVenIDARC1261: 4347 case PCIDevVenIDARC1270: 4348 case PCIDevVenIDARC1280: 4349 case PCIDevVenIDARC1212: 4350 case PCIDevVenIDARC1222: 4351 case PCIDevVenIDARC1380: 4352 case PCIDevVenIDARC1381: 4353 case PCIDevVenIDARC1680: 4354 case PCIDevVenIDARC1681: { 4355 acb->adapter_type = ACB_ADAPTER_TYPE_A; 4356 acb->adapter_bus_speed = ACB_BUS_SPEED_3G; 4357 max_coherent_size = ARCMSR_SRBS_POOL_SIZE; 4358 } 4359 break; 4360 default: { 4361 kprintf("arcmsr%d:" 4362 " unknown RAID adapter type \n", device_get_unit(dev)); 4363 return ENOMEM; 4364 } 4365 } 4366 if(bus_dma_tag_create( /*PCI parent*/ bus_get_dma_tag(dev), 4367 /*alignemnt*/ 1, 4368 /*boundary*/ 0, 4369 /*lowaddr*/ BUS_SPACE_MAXADDR, 4370 /*highaddr*/ BUS_SPACE_MAXADDR, 4371 /*filter*/ NULL, 4372 /*filterarg*/ NULL, 4373 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, 4374 /*nsegments*/ BUS_SPACE_UNRESTRICTED, 4375 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 4376 /*flags*/ 0, 4377 &acb->parent_dmat) != 0) 4378 { 4379 kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); 4380 return ENOMEM; 4381 } 4382 /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ 4383 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 4384 /*alignment*/ 1, 4385 /*boundary*/ 0, 4386 /*lowaddr*/ BUS_SPACE_MAXADDR, 4387 /*highaddr*/ BUS_SPACE_MAXADDR, 4388 /*filter*/ NULL, 4389 /*filterarg*/ NULL, 4390 /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM, 4391 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, 4392 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 4393 /*flags*/ 0, 4394 &acb->dm_segs_dmat) != 0) 4395 { 4396 bus_dma_tag_destroy(acb->parent_dmat); 4397 kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); 4398 return ENOMEM; 4399 } 4400 4401 /* DMA tag for our srb structures.... Allocate the freesrb memory */ 4402 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 4403 /*alignment*/ 0x20, 4404 /*boundary*/ 0, 4405 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, 4406 /*highaddr*/ BUS_SPACE_MAXADDR, 4407 /*filter*/ NULL, 4408 /*filterarg*/ NULL, 4409 /*maxsize*/ max_coherent_size, 4410 /*nsegments*/ 1, 4411 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 4412 /*flags*/ 0, 4413 &acb->srb_dmat) != 0) 4414 { 4415 bus_dma_tag_destroy(acb->dm_segs_dmat); 4416 bus_dma_tag_destroy(acb->parent_dmat); 4417 kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); 4418 return ENXIO; 4419 } 4420 /* Allocation for our srbs */ 4421 if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) { 4422 bus_dma_tag_destroy(acb->srb_dmat); 4423 bus_dma_tag_destroy(acb->dm_segs_dmat); 4424 bus_dma_tag_destroy(acb->parent_dmat); 4425 kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev)); 4426 return ENXIO; 4427 } 4428 /* And permanently map them */ 4429 if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) { 4430 bus_dma_tag_destroy(acb->srb_dmat); 4431 bus_dma_tag_destroy(acb->dm_segs_dmat); 4432 bus_dma_tag_destroy(acb->parent_dmat); 4433 kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev)); 4434 return ENXIO; 4435 } 4436 pci_command = pci_read_config(dev, PCIR_COMMAND, 2); 4437 pci_command |= PCIM_CMD_BUSMASTEREN; 4438 pci_command |= PCIM_CMD_PERRESPEN; 4439 pci_command |= PCIM_CMD_MWRICEN; 4440 /* Enable Busmaster */ 4441 pci_write_config(dev, PCIR_COMMAND, pci_command, 2); 4442 switch(acb->adapter_type) { 4443 case ACB_ADAPTER_TYPE_A: { 4444 u_int32_t rid0 = PCIR_BAR(0); 4445 vm_offset_t mem_base0; 4446 4447 acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); 4448 if(acb->sys_res_arcmsr[0] == NULL) { 4449 arcmsr_free_resource(acb); 4450 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); 4451 return ENOMEM; 4452 } 4453 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { 4454 arcmsr_free_resource(acb); 4455 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); 4456 return ENXIO; 4457 } 4458 mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); 4459 if(mem_base0 == 0) { 4460 arcmsr_free_resource(acb); 4461 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); 4462 return ENXIO; 4463 } 4464 acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); 4465 acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); 4466 acb->pmu = (struct MessageUnit_UNION *)mem_base0; 4467 acb->rid[0] = rid0; 4468 } 4469 break; 4470 case ACB_ADAPTER_TYPE_B: { 4471 struct HBB_MessageUnit *phbbmu; 4472 struct CommandControlBlock *freesrb; 4473 u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) }; 4474 vm_offset_t mem_base[]={0,0}; 4475 u_long size; 4476 if (vendor_dev_id == PCIDevVenIDARC1203) 4477 size = sizeof(struct HBB_DOORBELL_1203); 4478 else 4479 size = sizeof(struct HBB_DOORBELL); 4480 for(i=0; i < 2; i++) { 4481 acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid[i], RF_ACTIVE); 4482 if(acb->sys_res_arcmsr[i] == NULL) { 4483 arcmsr_free_resource(acb); 4484 kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i); 4485 return ENOMEM; 4486 } 4487 if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) { 4488 arcmsr_free_resource(acb); 4489 kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i); 4490 return ENXIO; 4491 } 4492 mem_base[i] = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]); 4493 if(mem_base[i] == 0) { 4494 arcmsr_free_resource(acb); 4495 kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i); 4496 return ENXIO; 4497 } 4498 acb->btag[i] = rman_get_bustag(acb->sys_res_arcmsr[i]); 4499 acb->bhandle[i] = rman_get_bushandle(acb->sys_res_arcmsr[i]); 4500 } 4501 freesrb = (struct CommandControlBlock *)acb->uncacheptr; 4502 acb->pmu = (struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE); 4503 phbbmu = (struct HBB_MessageUnit *)acb->pmu; 4504 phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)mem_base[0]; 4505 phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)mem_base[1]; 4506 if (vendor_dev_id == PCIDevVenIDARC1203) { 4507 phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell); 4508 phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell_mask); 4509 phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell); 4510 phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell_mask); 4511 } else { 4512 phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL, drv2iop_doorbell); 4513 phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL, drv2iop_doorbell_mask); 4514 phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL, iop2drv_doorbell); 4515 phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL, iop2drv_doorbell_mask); 4516 } 4517 acb->rid[0] = rid[0]; 4518 acb->rid[1] = rid[1]; 4519 } 4520 break; 4521 case ACB_ADAPTER_TYPE_C: { 4522 u_int32_t rid0 = PCIR_BAR(1); 4523 vm_offset_t mem_base0; 4524 4525 acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); 4526 if(acb->sys_res_arcmsr[0] == NULL) { 4527 arcmsr_free_resource(acb); 4528 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); 4529 return ENOMEM; 4530 } 4531 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { 4532 arcmsr_free_resource(acb); 4533 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); 4534 return ENXIO; 4535 } 4536 mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); 4537 if(mem_base0 == 0) { 4538 arcmsr_free_resource(acb); 4539 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); 4540 return ENXIO; 4541 } 4542 acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); 4543 acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); 4544 acb->pmu = (struct MessageUnit_UNION *)mem_base0; 4545 acb->rid[0] = rid0; 4546 } 4547 break; 4548 case ACB_ADAPTER_TYPE_D: { 4549 struct HBD_MessageUnit0 *phbdmu; 4550 u_int32_t rid0 = PCIR_BAR(0); 4551 vm_offset_t mem_base0; 4552 4553 acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); 4554 if(acb->sys_res_arcmsr[0] == NULL) { 4555 arcmsr_free_resource(acb); 4556 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); 4557 return ENOMEM; 4558 } 4559 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { 4560 arcmsr_free_resource(acb); 4561 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); 4562 return ENXIO; 4563 } 4564 mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); 4565 if(mem_base0 == 0) { 4566 arcmsr_free_resource(acb); 4567 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); 4568 return ENXIO; 4569 } 4570 acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); 4571 acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); 4572 acb->pmu = (struct MessageUnit_UNION *)((unsigned long)acb->uncacheptr+ARCMSR_SRBS_POOL_SIZE); 4573 phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; 4574 phbdmu->phbdmu = (struct HBD_MessageUnit *)mem_base0; 4575 acb->rid[0] = rid0; 4576 } 4577 break; 4578 case ACB_ADAPTER_TYPE_E: { 4579 u_int32_t rid0 = PCIR_BAR(1); 4580 vm_offset_t mem_base0; 4581 4582 acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBE_MessageUnit), RF_ACTIVE); 4583 if(acb->sys_res_arcmsr[0] == NULL) { 4584 arcmsr_free_resource(acb); 4585 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); 4586 return ENOMEM; 4587 } 4588 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { 4589 arcmsr_free_resource(acb); 4590 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); 4591 return ENXIO; 4592 } 4593 mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); 4594 if(mem_base0 == 0) { 4595 arcmsr_free_resource(acb); 4596 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); 4597 return ENXIO; 4598 } 4599 acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); 4600 acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); 4601 acb->pmu = (struct MessageUnit_UNION *)mem_base0; 4602 acb->doneq_index = 0; 4603 acb->in_doorbell = 0; 4604 acb->out_doorbell = 0; 4605 acb->rid[0] = rid0; 4606 CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_status, 0); /*clear interrupt*/ 4607 CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, ARCMSR_HBEMU_DOORBELL_SYNC); /* synchronize doorbell to 0 */ 4608 } 4609 break; 4610 } 4611 if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { 4612 arcmsr_free_resource(acb); 4613 kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev)); 4614 return ENXIO; 4615 } 4616 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); 4617 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 4618 /* 4619 ******************************************************************** 4620 ** init raid volume state 4621 ******************************************************************** 4622 */ 4623 for(i=0; i < ARCMSR_MAX_TARGETID; i++) { 4624 for(j=0; j < ARCMSR_MAX_TARGETLUN; j++) { 4625 acb->devstate[i][j] = ARECA_RAID_GONE; 4626 } 4627 } 4628 arcmsr_iop_init(acb); 4629 return(0); 4630 } 4631 4632 /* 4633 ************************************************************************ 4634 ************************************************************************ 4635 */ 4636 static void arcmsr_teardown_intr(device_t dev, struct AdapterControlBlock *acb) 4637 { 4638 int i; 4639 4640 if (acb->acb_flags & ACB_F_MSIX_ENABLED) { 4641 for (i = 0; i < acb->msix_vectors; i++) { 4642 if (acb->ih[i]) 4643 bus_teardown_intr(dev, acb->irqres[i], acb->ih[i]); 4644 // if (acb->irqres[i] != NULL) 4645 // bus_release_resource(dev, SYS_RES_IRQ, 4646 // acb->irq_id[i], acb->irqres[i]); 4647 4648 acb->ih[i] = NULL; 4649 } 4650 pci_release_msi(dev); 4651 } else { 4652 if ((acb->ih[0] != NULL) && (acb->irqres[0] != NULL)) 4653 bus_teardown_intr(dev, acb->irqres[0], acb->ih[0]); 4654 if (acb->irqres[0] != NULL) 4655 bus_release_resource(dev, SYS_RES_IRQ, 4656 acb->irq_id[0], acb->irqres[0]); 4657 if (acb->irq_type == PCI_INTR_TYPE_MSI) 4658 pci_release_msi(dev); 4659 acb->ih[0] = NULL; 4660 acb->irqres[0] = NULL; 4661 acb->irq_type = 0; 4662 } 4663 4664 } 4665 /* 4666 ************************************************************************ 4667 ************************************************************************ 4668 */ 4669 static int arcmsr_attach(device_t dev) 4670 { 4671 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 4672 u_int32_t unit=device_get_unit(dev); 4673 union ccb *ccb; 4674 struct cam_devq *devq; /* Device Queue to use for this SIM */ 4675 struct resource *irqres; 4676 u_int irq_flags; 4677 4678 if(acb == NULL) { 4679 kprintf("arcmsr%d: cannot allocate softc\n", unit); 4680 return (ENOMEM); 4681 } 4682 arcmsr_mutex_init(acb); 4683 acb->pci_dev = dev; 4684 acb->pci_unit = unit; 4685 if(arcmsr_initialize(dev)) { 4686 kprintf("arcmsr%d: initialize failure!\n", unit); 4687 goto initialize_failed; 4688 } 4689 /* After setting up the adapter, map our interrupt */ 4690 acb->irq_id[0] = 0; 4691 acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &acb->irq_id[0], &irq_flags); 4692 irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &acb->irq_id[0], irq_flags); 4693 if(irqres == NULL ) { 4694 kprintf("arcmsr%d: unable to alloc interrupt resource!\n", unit); 4695 goto alloc_intr_failed; 4696 } 4697 if(bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih[0], NULL)) { 4698 kprintf("arcmsr%d: unable to setup interrupt handler!\n", unit); 4699 goto setup_intr_failed; 4700 } 4701 acb->irqres[0] = irqres; 4702 /* 4703 * Now let the CAM generic SCSI layer find the SCSI devices on 4704 * the bus * start queue to reset to the idle loop. * 4705 * Create device queue of SIM(s) * (MAX_START_JOB - 1) : 4706 * max_sim_transactions 4707 */ 4708 devq = cam_simq_alloc(acb->maxOutstanding); 4709 if(devq == NULL) { 4710 kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit); 4711 goto simq_alloc_failed; 4712 } 4713 acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->isr_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); 4714 cam_simq_release(devq); 4715 if(acb->psim == NULL) { 4716 kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit); 4717 goto sim_alloc_failed; 4718 } 4719 ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); 4720 if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { 4721 kprintf("arcmsr%d: xpt_bus_register failure!\n", unit); 4722 goto xpt_bus_failed; 4723 } 4724 if ((ccb = xpt_alloc_ccb()) == NULL) { 4725 kprintf("arcmsr%d: xpt_alloc_ccb failure!\n", unit); 4726 goto xpt_ccb_failed; 4727 } 4728 if(xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 4729 kprintf("arcmsr%d: xpt_create_path failure!\n", unit); 4730 goto xpt_path_failed; 4731 } 4732 /* 4733 **************************************************** 4734 */ 4735 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, /*priority*/5); 4736 ccb->ccb_h.func_code = XPT_SCAN_BUS; 4737 ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb; 4738 ccb->crcn.flags = CAM_FLAG_NONE; 4739 xpt_action(ccb); 4740 ARCMSR_LOCK_RELEASE(&acb->isr_lock); 4741 4742 /* Create the control device. */ 4743 acb->ioctl_dev = make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit); 4744 acb->ioctl_dev->si_drv1 = acb; 4745 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); 4746 arcmsr_callout_init(&acb->devmap_callout); 4747 callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb); 4748 return (0); 4749 4750 xpt_path_failed: 4751 xpt_free_ccb(ccb); 4752 xpt_ccb_failed: 4753 xpt_bus_deregister(cam_sim_path(acb->psim)); 4754 xpt_bus_failed: 4755 ARCMSR_LOCK_RELEASE(&acb->isr_lock); 4756 cam_sim_free(acb->psim); 4757 sim_alloc_failed: 4758 cam_simq_release(devq); 4759 simq_alloc_failed: 4760 arcmsr_teardown_intr(dev, acb); 4761 setup_intr_failed: 4762 arcmsr_free_resource(acb); 4763 bus_release_resource(dev, SYS_RES_IRQ, acb->irq_id[0], irqres); 4764 alloc_intr_failed: 4765 if (acb->irq_type == PCI_INTR_TYPE_MSI) 4766 pci_release_msi(dev); 4767 initialize_failed: 4768 arcmsr_mutex_destroy(acb); 4769 return ENXIO; 4770 } 4771 4772 /* 4773 ************************************************************************ 4774 ************************************************************************ 4775 */ 4776 static int arcmsr_probe(device_t dev) 4777 { 4778 u_int32_t id; 4779 u_int16_t sub_device_id; 4780 static char buf[256]; 4781 char x_type[]={"unknown"}; 4782 char *type; 4783 int raid6 = 1; 4784 4785 if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { 4786 return (ENXIO); 4787 } 4788 sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 4789 switch(id = pci_get_devid(dev)) { 4790 case PCIDevVenIDARC1110: 4791 case PCIDevVenIDARC1200: 4792 case PCIDevVenIDARC1201: 4793 case PCIDevVenIDARC1210: 4794 raid6 = 0; 4795 /*FALLTHRU*/ 4796 case PCIDevVenIDARC1120: 4797 case PCIDevVenIDARC1130: 4798 case PCIDevVenIDARC1160: 4799 case PCIDevVenIDARC1170: 4800 case PCIDevVenIDARC1220: 4801 case PCIDevVenIDARC1230: 4802 case PCIDevVenIDARC1231: 4803 case PCIDevVenIDARC1260: 4804 case PCIDevVenIDARC1261: 4805 case PCIDevVenIDARC1270: 4806 case PCIDevVenIDARC1280: 4807 type = "SATA 3G"; 4808 break; 4809 case PCIDevVenIDARC1212: 4810 case PCIDevVenIDARC1222: 4811 case PCIDevVenIDARC1380: 4812 case PCIDevVenIDARC1381: 4813 case PCIDevVenIDARC1680: 4814 case PCIDevVenIDARC1681: 4815 type = "SAS 3G"; 4816 break; 4817 case PCIDevVenIDARC1880: 4818 case PCIDevVenIDARC1882: 4819 case PCIDevVenIDARC1213: 4820 case PCIDevVenIDARC1223: 4821 if ((sub_device_id == ARECA_SUB_DEV_ID_1883) || 4822 (sub_device_id == ARECA_SUB_DEV_ID_1216) || 4823 (sub_device_id == ARECA_SUB_DEV_ID_1226)) 4824 type = "SAS 12G"; 4825 else 4826 type = "SAS 6G"; 4827 arcmsr_msi_enable = 0; 4828 break; 4829 case PCIDevVenIDARC1884: 4830 type = "SAS 12G"; 4831 arcmsr_msi_enable = 0; 4832 break; 4833 case PCIDevVenIDARC1214: 4834 arcmsr_msi_enable = 0; 4835 case PCIDevVenIDARC1203: 4836 type = "SATA 6G"; 4837 break; 4838 default: 4839 type = x_type; 4840 raid6 = 0; 4841 break; 4842 } 4843 if(type == x_type) 4844 return(ENXIO); 4845 ksprintf(buf, "Areca %s Host Adapter RAID Controller %s\n%s\n", 4846 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); 4847 device_set_desc_copy(dev, buf); 4848 return (BUS_PROBE_DEFAULT); 4849 } 4850 /* 4851 ************************************************************************ 4852 ************************************************************************ 4853 */ 4854 static int arcmsr_shutdown(device_t dev) 4855 { 4856 u_int32_t i; 4857 struct CommandControlBlock *srb; 4858 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 4859 4860 /* stop adapter background rebuild */ 4861 ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); 4862 /* disable all outbound interrupt */ 4863 arcmsr_disable_allintr(acb); 4864 arcmsr_stop_adapter_bgrb(acb); 4865 arcmsr_flush_adapter_cache(acb); 4866 /* abort all outstanding command */ 4867 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 4868 acb->acb_flags &= ~ACB_F_IOP_INITED; 4869 if(acb->srboutstandingcount != 0) { 4870 /*clear and abort all outbound posted Q*/ 4871 arcmsr_done4abort_postqueue(acb); 4872 /* talk to iop 331 outstanding command aborted*/ 4873 arcmsr_abort_allcmd(acb); 4874 for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { 4875 srb = acb->psrb_pool[i]; 4876 if(srb->srb_state == ARCMSR_SRB_START) { 4877 srb->srb_state = ARCMSR_SRB_ABORTED; 4878 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 4879 arcmsr_srb_complete(srb, 1); 4880 } 4881 } 4882 } 4883 acb->srboutstandingcount = 0; 4884 acb->workingsrb_doneindex = 0; 4885 acb->workingsrb_startindex = 0; 4886 acb->pktRequestCount = 0; 4887 acb->pktReturnCount = 0; 4888 ARCMSR_LOCK_RELEASE(&acb->isr_lock); 4889 return (0); 4890 } 4891 /* 4892 ************************************************************************ 4893 ************************************************************************ 4894 */ 4895 static int arcmsr_detach(device_t dev) 4896 { 4897 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 4898 int i; 4899 4900 callout_stop(&acb->devmap_callout); 4901 arcmsr_teardown_intr(dev, acb); 4902 arcmsr_shutdown(dev); 4903 arcmsr_free_resource(acb); 4904 for(i=0; (i < 2) && (acb->sys_res_arcmsr[i]!=NULL); i++) { 4905 bus_release_resource(dev, SYS_RES_MEMORY, acb->rid[i], acb->sys_res_arcmsr[i]); 4906 } 4907 ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); 4908 xpt_bus_deregister(cam_sim_path(acb->psim)); 4909 cam_sim_free(acb->psim); 4910 ARCMSR_LOCK_RELEASE(&acb->isr_lock); 4911 arcmsr_mutex_destroy(acb); 4912 return (0); 4913 } 4914 4915 #ifdef ARCMSR_DEBUG1 4916 static void arcmsr_dump_data(struct AdapterControlBlock *acb) 4917 { 4918 if((acb->pktRequestCount - acb->pktReturnCount) == 0) 4919 return; 4920 kprintf("Command Request Count =0x%x\n",acb->pktRequestCount); 4921 kprintf("Command Return Count =0x%x\n",acb->pktReturnCount); 4922 kprintf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount)); 4923 kprintf("Queued Command Count =0x%x\n",acb->srboutstandingcount); 4924 } 4925 #endif 4926