1 /* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */ 2 /* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.28 2006/12/22 23:26:23 swildner Exp $ */ 3 /* 4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation 5 * Copyright (c) 2000-2001 Adaptec Corporation 6 * All rights reserved. 7 * 8 * TERMS AND CONDITIONS OF USE 9 * 10 * Redistribution and use in source form, with or without modification, are 11 * permitted provided that redistributions of source code must retain the 12 * above copyright notice, this list of conditions and the following disclaimer. 13 * 14 * This software is provided `as is' by Adaptec and any express or implied 15 * warranties, including, but not limited to, the implied warranties of 16 * merchantability and fitness for a particular purpose, are disclaimed. In no 17 * event shall Adaptec be liable for any direct, indirect, incidental, special, 18 * exemplary or consequential damages (including, but not limited to, 19 * procurement of substitute goods or services; loss of use, data, or profits; 20 * or business interruptions) however caused and on any theory of liability, 21 * whether in contract, strict liability, or tort (including negligence or 22 * otherwise) arising in any way out of the use of this driver software, even 23 * if advised of the possibility of such damage. 24 * 25 * SCSI I2O host adapter driver 26 * 27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com 28 * - The 2000S and 2005S do not initialize on some machines, 29 * increased timeout to 255ms from 50ms for the StatusGet 30 * command. 31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com 32 * - I knew this one was too good to be true. The error return 33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not 34 * to the bit masked status. 35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com 36 * - The 2005S that was supported is affectionately called the 37 * Conjoined BAR Firmware. In order to support RAID-5 in a 38 * 16MB low-cost configuration, Firmware was forced to go 39 * to a Split BAR Firmware. This requires a separate IOP and 40 * Messaging base address. 41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com 42 * - Handle support for 2005S Zero Channel RAID solution. 43 * - System locked up if the Adapter locked up. Do not try 44 * to send other commands if the resetIOP command fails. The 45 * fail outstanding command discovery loop was flawed as the 46 * removal of the command from the list prevented discovering 47 * all the commands. 48 * - Comment changes to clarify driver. 49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM. 50 * - We do not use the AC_FOUND_DEV event because of I2O. 51 * Removed asr_async. 52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org, 53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com. 54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0 55 * mode as this is confused with competitor adapters in run 56 * mode. 57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove 58 * to prevent operating system panic. 59 * - moved default major number to 154 from 97. 60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com 61 * - The controller is not actually an ASR (Adaptec SCSI RAID) 62 * series that is visible, it's more of an internal code name. 63 * remove any visible references within reason for now. 64 * - bus_ptr->LUN was not correctly zeroed when initially 65 * allocated causing a possible panic of the operating system 66 * during boot. 67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com 68 * - Code always fails for ASR_getTid affecting performance. 69 * - initiated a set of changes that resulted from a formal 70 * code inspection by Mark_Salyzyn@adaptec.com, 71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com, 72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com. 73 * Their findings were focussed on the LCT & TID handler, and 74 * all resulting changes were to improve code readability, 75 * consistency or have a positive effect on performance. 76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com 77 * - Passthrough returned an incorrect error. 78 * - Passthrough did not migrate the intrinsic scsi layer wakeup 79 * on command completion. 80 * - generate control device nodes using make_dev and delete_dev. 81 * - Performance affected by TID caching reallocing. 82 * - Made suggested changes by Justin_Gibbs@adaptec.com 83 * - use splcam instead of splbio. 84 * - use u_int8_t instead of u_char. 85 * - use u_int16_t instead of u_short. 86 * - use u_int32_t instead of u_long where appropriate. 87 * - use 64 bit context handler instead of 32 bit. 88 * - create_ccb should only allocate the worst case 89 * requirements for the driver since CAM may evolve 90 * making union ccb much larger than needed here. 91 * renamed create_ccb to asr_alloc_ccb. 92 * - go nutz justifying all debug prints as macros 93 * defined at the top and remove unsightly ifdefs. 94 * - INLINE STATIC viewed as confusing. Historically 95 * utilized to affect code performance and debug 96 * issues in OS, Compiler or OEM specific situations. 97 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com 98 * - Ported from FreeBSD 2.2.X DPT I2O driver. 99 * changed struct scsi_xfer to union ccb/struct ccb_hdr 100 * changed variable name xs to ccb 101 * changed struct scsi_link to struct cam_path 102 * changed struct scsibus_data to struct cam_sim 103 * stopped using fordriver for holding on to the TID 104 * use proprietary packet creation instead of scsi_inquire 105 * CAM layer sends synchronize commands. 106 */ 107 108 #define ASR_VERSION 1 109 #define ASR_REVISION '0' 110 #define ASR_SUBREVISION '8' 111 #define ASR_MONTH 8 112 #define ASR_DAY 21 113 #define ASR_YEAR 2001 - 1980 114 115 /* 116 * Debug macros to reduce the unsightly ifdefs 117 */ 118 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD)) 119 # define debug_asr_message(message) \ 120 { \ 121 u_int32_t * pointer = (u_int32_t *)message; \ 122 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\ 123 u_int32_t counter = 0; \ 124 \ 125 while (length--) { \ 126 kprintf ("%08lx%c", (u_long)*(pointer++), \ 127 (((++counter & 7) == 0) || (length == 0)) \ 128 ? '\n' \ 129 : ' '); \ 130 } \ 131 } 132 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */ 133 134 #if (defined(DEBUG_ASR)) 135 /* Breaks on none STDC based compilers :-( */ 136 # define debug_asr_printf(fmt,args...) kprintf(fmt, ##args) 137 # define debug_asr_dump_message(message) debug_asr_message(message) 138 # define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path); 139 /* None fatal version of the ASSERT macro */ 140 # if (defined(__STDC__)) 141 # define ASSERT(phrase) if(!(phrase))kprintf(#phrase " at line %d file %s\n",__LINE__,__FILE__) 142 # else 143 # define ASSERT(phrase) if(!(phrase))kprintf("phrase" " at line %d file %s\n",__LINE__,__FILE__) 144 # endif 145 #else /* DEBUG_ASR */ 146 # define debug_asr_printf(fmt,args...) 147 # define debug_asr_dump_message(message) 148 # define debug_asr_print_path(ccb) 149 # define ASSERT(x) 150 #endif /* DEBUG_ASR */ 151 152 /* 153 * If DEBUG_ASR_CMD is defined: 154 * 0 - Display incoming SCSI commands 155 * 1 - add in a quick character before queueing. 156 * 2 - add in outgoing message frames. 157 */ 158 #if (defined(DEBUG_ASR_CMD)) 159 # define debug_asr_cmd_printf(fmt,args...) kprintf(fmt,##args) 160 # define debug_asr_dump_ccb(ccb) \ 161 { \ 162 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \ 163 int len = ccb->csio.cdb_len; \ 164 \ 165 while (len) { \ 166 debug_asr_cmd_printf (" %02x", *(cp++)); \ 167 --len; \ 168 } \ 169 } 170 # if (DEBUG_ASR_CMD > 0) 171 # define debug_asr_cmd1_printf debug_asr_cmd_printf 172 # else 173 # define debug_asr_cmd1_printf(fmt,args...) 174 # endif 175 # if (DEBUG_ASR_CMD > 1) 176 # define debug_asr_cmd2_printf debug_asr_cmd_printf 177 # define debug_asr_cmd2_dump_message(message) debug_asr_message(message) 178 # else 179 # define debug_asr_cmd2_printf(fmt,args...) 180 # define debug_asr_cmd2_dump_message(message) 181 # endif 182 #else /* DEBUG_ASR_CMD */ 183 # define debug_asr_cmd_printf(fmt,args...) 184 # define debug_asr_cmd_dump_ccb(ccb) 185 # define debug_asr_cmd1_printf(fmt,args...) 186 # define debug_asr_cmd2_printf(fmt,args...) 187 # define debug_asr_cmd2_dump_message(message) 188 #endif /* DEBUG_ASR_CMD */ 189 190 #if (defined(DEBUG_ASR_USR_CMD)) 191 # define debug_usr_cmd_printf(fmt,args...) kprintf(fmt,##args) 192 # define debug_usr_cmd_dump_message(message) debug_usr_message(message) 193 #else /* DEBUG_ASR_USR_CMD */ 194 # define debug_usr_cmd_printf(fmt,args...) 195 # define debug_usr_cmd_dump_message(message) 196 #endif /* DEBUG_ASR_USR_CMD */ 197 198 #define dsDescription_size 46 /* Snug as a bug in a rug */ 199 #include "dptsig.h" 200 201 static dpt_sig_S ASR_sig = { 202 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL, 203 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0, 204 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, 205 ADF_ALL_SC5, 206 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION, 207 ASR_MONTH, ASR_DAY, ASR_YEAR, 208 /* 01234567890123456789012345678901234567890123456789 < 50 chars */ 209 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver" 210 /* ^^^^^ asr_attach alters these to match OS */ 211 }; 212 213 #include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */ 214 #include <sys/kernel.h> 215 #include <sys/systm.h> 216 #include <sys/malloc.h> 217 #include <sys/proc.h> 218 #include <sys/conf.h> 219 #include <sys/disklabel.h> 220 #include <sys/bus.h> 221 #include <sys/rman.h> 222 #include <sys/stat.h> 223 #include <sys/device.h> 224 #include <sys/thread2.h> 225 226 #include <bus/cam/cam.h> 227 #include <bus/cam/cam_ccb.h> 228 #include <bus/cam/cam_sim.h> 229 #include <bus/cam/cam_xpt_sim.h> 230 #include <bus/cam/cam_xpt_periph.h> 231 232 #include <bus/cam/scsi/scsi_all.h> 233 #include <bus/cam/scsi/scsi_message.h> 234 235 #include <vm/vm.h> 236 #include <vm/pmap.h> 237 #include <machine/cputypes.h> 238 #include <machine/clock.h> 239 #include <machine/vmparam.h> 240 241 #include <bus/pci/pcivar.h> 242 #include <bus/pci/pcireg.h> 243 244 #define STATIC static 245 #define INLINE 246 247 #if (defined(DEBUG_ASR) && (DEBUG_ASR > 0)) 248 # undef STATIC 249 # define STATIC 250 # undef INLINE 251 # define INLINE 252 #endif 253 #define IN 254 #define OUT 255 #define INOUT 256 257 #define osdSwap4(x) ((u_long)ntohl((u_long)(x))) 258 #define KVTOPHYS(x) vtophys(x) 259 #include "dptalign.h" 260 #include "i2oexec.h" 261 #include "i2obscsi.h" 262 #include "i2odpt.h" 263 #include "i2oadptr.h" 264 #include "sys_info.h" 265 266 /* Configuration Definitions */ 267 268 #define SG_SIZE 58 /* Scatter Gather list Size */ 269 #define MAX_TARGET_ID 126 /* Maximum Target ID supported */ 270 #define MAX_LUN 255 /* Maximum LUN Supported */ 271 #define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */ 272 #define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */ 273 #define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */ 274 #define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */ 275 #define MAX_MAP 4194304L /* Maximum mapping size of IOP */ 276 /* Also serves as the minimum map for */ 277 /* the 2005S zero channel RAID product */ 278 279 /************************************************************************** 280 ** ASR Host Adapter structure - One Structure For Each Host Adapter That ** 281 ** Is Configured Into The System. The Structure Supplies Configuration ** 282 ** Information, Status Info, Queue Info And An Active CCB List Pointer. ** 283 ***************************************************************************/ 284 285 /* I2O register set */ 286 typedef struct { 287 U8 Address[0x30]; 288 volatile U32 Status; 289 volatile U32 Mask; 290 # define Mask_InterruptsDisabled 0x08 291 U32 x[2]; 292 volatile U32 ToFIFO; /* In Bound FIFO */ 293 volatile U32 FromFIFO; /* Out Bound FIFO */ 294 } i2oRegs_t; 295 296 /* 297 * A MIX of performance and space considerations for TID lookups 298 */ 299 typedef u_int16_t tid_t; 300 301 typedef struct { 302 u_int32_t size; /* up to MAX_LUN */ 303 tid_t TID[1]; 304 } lun2tid_t; 305 306 typedef struct { 307 u_int32_t size; /* up to MAX_TARGET */ 308 lun2tid_t * LUN[1]; 309 } target2lun_t; 310 311 /* 312 * To ensure that we only allocate and use the worst case ccb here, lets 313 * make our own local ccb union. If asr_alloc_ccb is utilized for another 314 * ccb type, ensure that you add the additional structures into our local 315 * ccb union. To ensure strict type checking, we will utilize the local 316 * ccb definition wherever possible. 317 */ 318 union asr_ccb { 319 struct ccb_hdr ccb_h; /* For convenience */ 320 struct ccb_scsiio csio; 321 struct ccb_setasync csa; 322 }; 323 324 typedef struct Asr_softc { 325 u_int16_t ha_irq; 326 void * ha_Base; /* base port for each board */ 327 u_int8_t * volatile ha_blinkLED; 328 i2oRegs_t * ha_Virt; /* Base address of IOP */ 329 U8 * ha_Fvirt; /* Base address of Frames */ 330 I2O_IOP_ENTRY ha_SystemTable; 331 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */ 332 struct cam_path * ha_path[MAX_CHANNEL+1]; 333 struct cam_sim * ha_sim[MAX_CHANNEL+1]; 334 struct resource * ha_mem_res; 335 struct resource * ha_mes_res; 336 struct resource * ha_irq_res; 337 void * ha_intr; 338 PI2O_LCT ha_LCT; /* Complete list of devices */ 339 # define le_type IdentityTag[0] 340 # define I2O_BSA 0x20 341 # define I2O_FCA 0x40 342 # define I2O_SCSI 0x00 343 # define I2O_PORT 0x80 344 # define I2O_UNKNOWN 0x7F 345 # define le_bus IdentityTag[1] 346 # define le_target IdentityTag[2] 347 # define le_lun IdentityTag[3] 348 target2lun_t * ha_targets[MAX_CHANNEL+1]; 349 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs; 350 u_long ha_Msgs_Phys; 351 352 u_int8_t ha_in_reset; 353 # define HA_OPERATIONAL 0 354 # define HA_IN_RESET 1 355 # define HA_OFF_LINE 2 356 # define HA_OFF_LINE_RECOVERY 3 357 /* Configuration information */ 358 /* The target id maximums we take */ 359 u_int8_t ha_MaxBus; /* Maximum bus */ 360 u_int8_t ha_MaxId; /* Maximum target ID */ 361 u_int8_t ha_MaxLun; /* Maximum target LUN */ 362 u_int8_t ha_SgSize; /* Max SG elements */ 363 u_int8_t ha_pciBusNum; 364 u_int8_t ha_pciDeviceNum; 365 u_int8_t ha_adapter_target[MAX_CHANNEL+1]; 366 u_int16_t ha_QueueSize; /* Max outstanding commands */ 367 u_int16_t ha_Msgs_Count; 368 369 /* Links into other parents and HBAs */ 370 struct Asr_softc * ha_next; /* HBA list */ 371 } Asr_softc_t; 372 373 STATIC Asr_softc_t * Asr_softc; 374 375 /* 376 * Prototypes of the routines we have in this object. 377 */ 378 379 /* Externally callable routines */ 380 #define PROBE_ARGS IN device_t tag 381 #define PROBE_RET int 382 #define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag) 383 #define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);} 384 #define ATTACH_ARGS IN device_t tag 385 #define ATTACH_RET int 386 #define ATTACH_SET() int unit = device_get_unit(tag) 387 #define ATTACH_RETURN(retval) return(retval) 388 /* I2O HDM interface */ 389 STATIC PROBE_RET asr_probe (PROBE_ARGS); 390 STATIC ATTACH_RET asr_attach (ATTACH_ARGS); 391 /* DOMINO placeholder */ 392 STATIC PROBE_RET domino_probe (PROBE_ARGS); 393 STATIC ATTACH_RET domino_attach (ATTACH_ARGS); 394 /* MODE0 adapter placeholder */ 395 STATIC PROBE_RET mode0_probe (PROBE_ARGS); 396 STATIC ATTACH_RET mode0_attach (ATTACH_ARGS); 397 398 STATIC Asr_softc_t * ASR_get_sc (cdev_t dev); 399 STATIC d_ioctl_t asr_ioctl; 400 STATIC d_open_t asr_open; 401 STATIC d_close_t asr_close; 402 STATIC int asr_intr (IN Asr_softc_t *sc); 403 STATIC void asr_timeout (INOUT void *arg); 404 STATIC int ASR_init (IN Asr_softc_t *sc); 405 STATIC INLINE int ASR_acquireLct (INOUT Asr_softc_t *sc); 406 STATIC INLINE int ASR_acquireHrt (INOUT Asr_softc_t *sc); 407 STATIC void asr_action (IN struct cam_sim *sim, 408 IN union ccb *ccb); 409 STATIC void asr_poll (IN struct cam_sim * sim); 410 411 /* 412 * Here is the auto-probe structure used to nest our tests appropriately 413 * during the startup phase of the operating system. 414 */ 415 STATIC device_method_t asr_methods[] = { 416 DEVMETHOD(device_probe, asr_probe), 417 DEVMETHOD(device_attach, asr_attach), 418 { 0, 0 } 419 }; 420 421 STATIC driver_t asr_driver = { 422 "asr", 423 asr_methods, 424 sizeof(Asr_softc_t) 425 }; 426 427 STATIC devclass_t asr_devclass; 428 429 DECLARE_DUMMY_MODULE(asr); 430 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0); 431 432 STATIC device_method_t domino_methods[] = { 433 DEVMETHOD(device_probe, domino_probe), 434 DEVMETHOD(device_attach, domino_attach), 435 { 0, 0 } 436 }; 437 438 STATIC driver_t domino_driver = { 439 "domino", 440 domino_methods, 441 0 442 }; 443 444 STATIC devclass_t domino_devclass; 445 446 DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0); 447 448 STATIC device_method_t mode0_methods[] = { 449 DEVMETHOD(device_probe, mode0_probe), 450 DEVMETHOD(device_attach, mode0_attach), 451 { 0, 0 } 452 }; 453 454 STATIC driver_t mode0_driver = { 455 "mode0", 456 mode0_methods, 457 0 458 }; 459 460 STATIC devclass_t mode0_devclass; 461 462 DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0); 463 464 /* 465 * devsw for asr hba driver 466 * 467 * only ioctl is used. the sd driver provides all other access. 468 */ 469 #define CDEV_MAJOR 154 /* prefered default character major */ 470 STATIC struct dev_ops asr_ops = { 471 { "asr", CDEV_MAJOR, 0 }, 472 .d_open = asr_open, 473 .d_close = asr_close, 474 .d_ioctl = asr_ioctl, 475 }; 476 477 /* 478 * Initialize the dynamic dev_ops hooks. 479 */ 480 STATIC void 481 asr_drvinit (void * unused) 482 { 483 static int asr_devsw_installed = 0; 484 485 if (asr_devsw_installed) { 486 return; 487 } 488 asr_devsw_installed++; 489 /* 490 * Find a free spot (the report during driver load used by 491 * osd layer in engine to generate the controlling nodes). 492 * 493 * XXX this is garbage code, store a unit number in asr_ops 494 * and iterate through that instead? 495 */ 496 while (asr_ops.head.maj < NUMCDEVSW && 497 dev_ops_get(asr_ops.head.maj, -1) != NULL 498 ) { 499 ++asr_ops.head.maj; 500 } 501 if (asr_ops.head.maj >= NUMCDEVSW) { 502 asr_ops.head.maj = 0; 503 while (asr_ops.head.maj < CDEV_MAJOR && 504 dev_ops_get(asr_ops.head.maj, -1) != NULL 505 ) { 506 ++asr_ops.head.maj; 507 } 508 } 509 510 /* 511 * Come to papa 512 */ 513 dev_ops_add(&asr_ops, 0, 0); 514 } /* asr_drvinit */ 515 516 /* Must initialize before CAM layer picks up our HBA driver */ 517 SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL) 518 519 /* I2O support routines */ 520 #define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)] 521 #define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME)) 522 523 /* 524 * Fill message with default. 525 */ 526 STATIC PI2O_MESSAGE_FRAME 527 ASR_fillMessage ( 528 IN char * Message, 529 IN u_int16_t size) 530 { 531 OUT PI2O_MESSAGE_FRAME Message_Ptr; 532 533 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message); 534 bzero ((void *)Message_Ptr, size); 535 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11); 536 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 537 (size + sizeof(U32) - 1) >> 2); 538 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 539 return (Message_Ptr); 540 } /* ASR_fillMessage */ 541 542 #define EMPTY_QUEUE ((U32)-1L) 543 544 STATIC INLINE U32 545 ASR_getMessage( 546 IN i2oRegs_t * virt) 547 { 548 OUT U32 MessageOffset; 549 550 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) { 551 MessageOffset = virt->ToFIFO; 552 } 553 return (MessageOffset); 554 } /* ASR_getMessage */ 555 556 /* Issue a polled command */ 557 STATIC U32 558 ASR_initiateCp ( 559 INOUT i2oRegs_t * virt, 560 INOUT U8 * fvirt, 561 IN PI2O_MESSAGE_FRAME Message) 562 { 563 OUT U32 Mask = -1L; 564 U32 MessageOffset; 565 u_int Delay = 1500; 566 567 /* 568 * ASR_initiateCp is only used for synchronous commands and will 569 * be made more resiliant to adapter delays since commands like 570 * resetIOP can cause the adapter to be deaf for a little time. 571 */ 572 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE) 573 && (--Delay != 0)) { 574 DELAY (10000); 575 } 576 if (MessageOffset != EMPTY_QUEUE) { 577 bcopy (Message, fvirt + MessageOffset, 578 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2); 579 /* 580 * Disable the Interrupts 581 */ 582 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled; 583 virt->ToFIFO = MessageOffset; 584 } 585 return (Mask); 586 } /* ASR_initiateCp */ 587 588 /* 589 * Reset the adapter. 590 */ 591 STATIC U32 592 ASR_resetIOP ( 593 INOUT i2oRegs_t * virt, 594 INOUT U8 * fvirt) 595 { 596 struct resetMessage { 597 I2O_EXEC_IOP_RESET_MESSAGE M; 598 U32 R; 599 }; 600 defAlignLong(struct resetMessage,Message); 601 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr; 602 OUT U32 * volatile Reply_Ptr; 603 U32 Old; 604 605 /* 606 * Build up our copy of the Message. 607 */ 608 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message, 609 sizeof(I2O_EXEC_IOP_RESET_MESSAGE)); 610 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET); 611 /* 612 * Reset the Reply Status 613 */ 614 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 615 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0; 616 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr, 617 KVTOPHYS((void *)Reply_Ptr)); 618 /* 619 * Send the Message out 620 */ 621 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 622 /* 623 * Wait for a response (Poll), timeouts are dangerous if 624 * the card is truly responsive. We assume response in 2s. 625 */ 626 u_int8_t Delay = 200; 627 628 while ((*Reply_Ptr == 0) && (--Delay != 0)) { 629 DELAY (10000); 630 } 631 /* 632 * Re-enable the interrupts. 633 */ 634 virt->Mask = Old; 635 ASSERT (*Reply_Ptr); 636 return (*Reply_Ptr); 637 } 638 ASSERT (Old != (U32)-1L); 639 return (0); 640 } /* ASR_resetIOP */ 641 642 /* 643 * Get the curent state of the adapter 644 */ 645 STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY 646 ASR_getStatus ( 647 INOUT i2oRegs_t * virt, 648 INOUT U8 * fvirt, 649 OUT PI2O_EXEC_STATUS_GET_REPLY buffer) 650 { 651 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message); 652 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr; 653 U32 Old; 654 655 /* 656 * Build up our copy of the Message. 657 */ 658 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message, 659 sizeof(I2O_EXEC_STATUS_GET_MESSAGE)); 660 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr, 661 I2O_EXEC_STATUS_GET); 662 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr, 663 KVTOPHYS((void *)buffer)); 664 /* This one is a Byte Count */ 665 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr, 666 sizeof(I2O_EXEC_STATUS_GET_REPLY)); 667 /* 668 * Reset the Reply Status 669 */ 670 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY)); 671 /* 672 * Send the Message out 673 */ 674 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 675 /* 676 * Wait for a response (Poll), timeouts are dangerous if 677 * the card is truly responsive. We assume response in 50ms. 678 */ 679 u_int8_t Delay = 255; 680 681 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) { 682 if (--Delay == 0) { 683 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL; 684 break; 685 } 686 DELAY (1000); 687 } 688 /* 689 * Re-enable the interrupts. 690 */ 691 virt->Mask = Old; 692 return (buffer); 693 } 694 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL); 695 } /* ASR_getStatus */ 696 697 /* 698 * Check if the device is a SCSI I2O HBA, and add it to the list. 699 */ 700 701 /* 702 * Probe for ASR controller. If we find it, we will use it. 703 * virtual adapters. 704 */ 705 STATIC PROBE_RET 706 asr_probe(PROBE_ARGS) 707 { 708 PROBE_SET(); 709 if ((id == 0xA5011044) || (id == 0xA5111044)) { 710 PROBE_RETURN ("Adaptec Caching SCSI RAID"); 711 } 712 PROBE_RETURN (NULL); 713 } /* asr_probe */ 714 715 /* 716 * Probe/Attach for DOMINO chipset. 717 */ 718 STATIC PROBE_RET 719 domino_probe(PROBE_ARGS) 720 { 721 PROBE_SET(); 722 if (id == 0x10121044) { 723 PROBE_RETURN ("Adaptec Caching Memory Controller"); 724 } 725 PROBE_RETURN (NULL); 726 } /* domino_probe */ 727 728 STATIC ATTACH_RET 729 domino_attach (ATTACH_ARGS) 730 { 731 ATTACH_RETURN (0); 732 } /* domino_attach */ 733 734 /* 735 * Probe/Attach for MODE0 adapters. 736 */ 737 STATIC PROBE_RET 738 mode0_probe(PROBE_ARGS) 739 { 740 PROBE_SET(); 741 742 /* 743 * If/When we can get a business case to commit to a 744 * Mode0 driver here, we can make all these tests more 745 * specific and robust. Mode0 adapters have their processors 746 * turned off, this the chips are in a raw state. 747 */ 748 749 /* This is a PLX9054 */ 750 if (id == 0x905410B5) { 751 PROBE_RETURN ("Adaptec Mode0 PM3757"); 752 } 753 /* This is a PLX9080 */ 754 if (id == 0x908010B5) { 755 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755"); 756 } 757 /* This is a ZION 80303 */ 758 if (id == 0x53098086) { 759 PROBE_RETURN ("Adaptec Mode0 3010S"); 760 } 761 /* This is an i960RS */ 762 if (id == 0x39628086) { 763 PROBE_RETURN ("Adaptec Mode0 2100S"); 764 } 765 /* This is an i960RN */ 766 if (id == 0x19648086) { 767 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S"); 768 } 769 #if 0 /* this would match any generic i960 -- mjs */ 770 /* This is an i960RP (typically also on Motherboards) */ 771 if (id == 0x19608086) { 772 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654"); 773 } 774 #endif 775 PROBE_RETURN (NULL); 776 } /* mode0_probe */ 777 778 STATIC ATTACH_RET 779 mode0_attach (ATTACH_ARGS) 780 { 781 ATTACH_RETURN (0); 782 } /* mode0_attach */ 783 784 STATIC INLINE union asr_ccb * 785 asr_alloc_ccb ( 786 IN Asr_softc_t * sc) 787 { 788 OUT union asr_ccb * new_ccb; 789 790 if ((new_ccb = (union asr_ccb *)kmalloc(sizeof(*new_ccb), 791 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) { 792 bzero (new_ccb, sizeof(*new_ccb)); 793 new_ccb->ccb_h.pinfo.priority = 1; 794 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; 795 new_ccb->ccb_h.spriv_ptr0 = sc; 796 } 797 return (new_ccb); 798 } /* asr_alloc_ccb */ 799 800 STATIC INLINE void 801 asr_free_ccb ( 802 IN union asr_ccb * free_ccb) 803 { 804 kfree(free_ccb, M_DEVBUF); 805 } /* asr_free_ccb */ 806 807 /* 808 * Print inquiry data `carefully' 809 */ 810 STATIC void 811 ASR_prstring ( 812 u_int8_t * s, 813 int len) 814 { 815 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) { 816 kprintf ("%c", *(s++)); 817 } 818 } /* ASR_prstring */ 819 820 /* 821 * Prototypes 822 */ 823 STATIC INLINE int ASR_queue ( 824 IN Asr_softc_t * sc, 825 IN PI2O_MESSAGE_FRAME Message); 826 /* 827 * Send a message synchronously and without Interrupt to a ccb. 828 */ 829 STATIC int 830 ASR_queue_s ( 831 INOUT union asr_ccb * ccb, 832 IN PI2O_MESSAGE_FRAME Message) 833 { 834 U32 Mask; 835 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 836 837 /* 838 * We do not need any (optional byteswapping) method access to 839 * the Initiator context field. 840 */ 841 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 842 843 /* Prevent interrupt service */ 844 crit_enter(); 845 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask) 846 | Mask_InterruptsDisabled; 847 848 if (ASR_queue (sc, Message) == EMPTY_QUEUE) { 849 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 850 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 851 } 852 853 /* 854 * Wait for this board to report a finished instruction. 855 */ 856 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 857 (void)asr_intr (sc); 858 } 859 860 /* Re-enable Interrupts */ 861 sc->ha_Virt->Mask = Mask; 862 crit_exit(); 863 864 return (ccb->ccb_h.status); 865 } /* ASR_queue_s */ 866 867 /* 868 * Send a message synchronously to a Asr_softc_t 869 */ 870 STATIC int 871 ASR_queue_c ( 872 IN Asr_softc_t * sc, 873 IN PI2O_MESSAGE_FRAME Message) 874 { 875 union asr_ccb * ccb; 876 OUT int status; 877 878 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 879 return (CAM_REQUEUE_REQ); 880 } 881 882 status = ASR_queue_s (ccb, Message); 883 884 asr_free_ccb(ccb); 885 886 return (status); 887 } /* ASR_queue_c */ 888 889 /* 890 * Add the specified ccb to the active queue 891 */ 892 STATIC INLINE void 893 ASR_ccbAdd ( 894 IN Asr_softc_t * sc, 895 INOUT union asr_ccb * ccb) 896 { 897 crit_enter(); 898 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le); 899 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 900 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) { 901 /* 902 * RAID systems can take considerable time to 903 * complete some commands given the large cache 904 * flashes switching from write back to write thru. 905 */ 906 ccb->ccb_h.timeout = 6 * 60 * 1000; 907 } 908 callout_reset(&ccb->ccb_h.timeout_ch, 909 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb); 910 } 911 crit_exit(); 912 } /* ASR_ccbAdd */ 913 914 /* 915 * Remove the specified ccb from the active queue. 916 */ 917 STATIC INLINE void 918 ASR_ccbRemove ( 919 IN Asr_softc_t * sc, 920 INOUT union asr_ccb * ccb) 921 { 922 crit_enter(); 923 callout_stop(&ccb->ccb_h.timeout_ch); 924 LIST_REMOVE(&(ccb->ccb_h), sim_links.le); 925 crit_exit(); 926 } /* ASR_ccbRemove */ 927 928 /* 929 * Fail all the active commands, so they get re-issued by the operating 930 * system. 931 */ 932 STATIC INLINE void 933 ASR_failActiveCommands ( 934 IN Asr_softc_t * sc) 935 { 936 struct ccb_hdr * ccb; 937 938 #if 0 /* Currently handled by callers, unnecessary paranoia currently */ 939 /* Left in for historical perspective. */ 940 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message); 941 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 942 943 /* Send a blind LCT command to wait for the enableSys to complete */ 944 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message, 945 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT)); 946 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 947 I2O_EXEC_LCT_NOTIFY); 948 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 949 I2O_CLASS_MATCH_ANYCLASS); 950 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 951 #endif 952 953 crit_enter(); 954 /* 955 * We do not need to inform the CAM layer that we had a bus 956 * reset since we manage it on our own, this also prevents the 957 * SCSI_DELAY settling that would be required on other systems. 958 * The `SCSI_DELAY' has already been handled by the card via the 959 * acquisition of the LCT table while we are at CAM priority level. 960 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) { 961 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL); 962 * } 963 */ 964 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) { 965 ASR_ccbRemove (sc, (union asr_ccb *)ccb); 966 967 ccb->status &= ~CAM_STATUS_MASK; 968 ccb->status |= CAM_REQUEUE_REQ; 969 /* Nothing Transfered */ 970 ((struct ccb_scsiio *)ccb)->resid 971 = ((struct ccb_scsiio *)ccb)->dxfer_len; 972 973 if (ccb->path) { 974 xpt_done ((union ccb *)ccb); 975 } else { 976 wakeup ((caddr_t)ccb); 977 } 978 } 979 crit_exit(); 980 } /* ASR_failActiveCommands */ 981 982 /* 983 * The following command causes the HBA to reset the specific bus 984 */ 985 STATIC INLINE void 986 ASR_resetBus( 987 IN Asr_softc_t * sc, 988 IN int bus) 989 { 990 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message); 991 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr; 992 PI2O_LCT_ENTRY Device; 993 994 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message, 995 sizeof(I2O_HBA_BUS_RESET_MESSAGE)); 996 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame, 997 I2O_HBA_BUS_RESET); 998 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 999 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1000 ++Device) { 1001 if (((Device->le_type & I2O_PORT) != 0) 1002 && (Device->le_bus == bus)) { 1003 I2O_MESSAGE_FRAME_setTargetAddress( 1004 &Message_Ptr->StdMessageFrame, 1005 I2O_LCT_ENTRY_getLocalTID(Device)); 1006 /* Asynchronous command, with no expectations */ 1007 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1008 break; 1009 } 1010 } 1011 } /* ASR_resetBus */ 1012 1013 STATIC INLINE int 1014 ASR_getBlinkLedCode ( 1015 IN Asr_softc_t * sc) 1016 { 1017 if ((sc != (Asr_softc_t *)NULL) 1018 && (sc->ha_blinkLED != (u_int8_t *)NULL) 1019 && (sc->ha_blinkLED[1] == 0xBC)) { 1020 return (sc->ha_blinkLED[0]); 1021 } 1022 return (0); 1023 } /* ASR_getBlinkCode */ 1024 1025 /* 1026 * Determine the address of an TID lookup. Must be done at high priority 1027 * since the address can be changed by other threads of execution. 1028 * 1029 * Returns NULL pointer if not indexible (but will attempt to generate 1030 * an index if `new_entry' flag is set to TRUE). 1031 * 1032 * All addressible entries are to be guaranteed zero if never initialized. 1033 */ 1034 STATIC INLINE tid_t * 1035 ASR_getTidAddress( 1036 INOUT Asr_softc_t * sc, 1037 IN int bus, 1038 IN int target, 1039 IN int lun, 1040 IN int new_entry) 1041 { 1042 target2lun_t * bus_ptr; 1043 lun2tid_t * target_ptr; 1044 unsigned new_size; 1045 1046 /* 1047 * Validity checking of incoming parameters. More of a bound 1048 * expansion limit than an issue with the code dealing with the 1049 * values. 1050 * 1051 * sc must be valid before it gets here, so that check could be 1052 * dropped if speed a critical issue. 1053 */ 1054 if ((sc == (Asr_softc_t *)NULL) 1055 || (bus > MAX_CHANNEL) 1056 || (target > sc->ha_MaxId) 1057 || (lun > sc->ha_MaxLun)) { 1058 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n", 1059 (u_long)sc, bus, target, lun); 1060 return ((tid_t *)NULL); 1061 } 1062 /* 1063 * See if there is an associated bus list. 1064 * 1065 * for performance, allocate in size of BUS_CHUNK chunks. 1066 * BUS_CHUNK must be a power of two. This is to reduce 1067 * fragmentation effects on the allocations. 1068 */ 1069 # define BUS_CHUNK 8 1070 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1)); 1071 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) { 1072 /* 1073 * Allocate a new structure? 1074 * Since one element in structure, the +1 1075 * needed for size has been abstracted. 1076 */ 1077 if ((new_entry == FALSE) 1078 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc ( 1079 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 1080 M_TEMP, M_WAITOK)) 1081 == (target2lun_t *)NULL)) { 1082 debug_asr_printf("failed to allocate bus list\n"); 1083 return ((tid_t *)NULL); 1084 } 1085 bzero (bus_ptr, sizeof(*bus_ptr) 1086 + (sizeof(bus_ptr->LUN) * new_size)); 1087 bus_ptr->size = new_size + 1; 1088 } else if (bus_ptr->size <= new_size) { 1089 target2lun_t * new_bus_ptr; 1090 1091 /* 1092 * Reallocate a new structure? 1093 * Since one element in structure, the +1 1094 * needed for size has been abstracted. 1095 */ 1096 if ((new_entry == FALSE) 1097 || ((new_bus_ptr = (target2lun_t *)kmalloc ( 1098 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 1099 M_TEMP, M_WAITOK)) 1100 == (target2lun_t *)NULL)) { 1101 debug_asr_printf("failed to reallocate bus list\n"); 1102 return ((tid_t *)NULL); 1103 } 1104 /* 1105 * Zero and copy the whole thing, safer, simpler coding 1106 * and not really performance critical at this point. 1107 */ 1108 bzero (new_bus_ptr, sizeof(*bus_ptr) 1109 + (sizeof(bus_ptr->LUN) * new_size)); 1110 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr) 1111 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1))); 1112 sc->ha_targets[bus] = new_bus_ptr; 1113 kfree (bus_ptr, M_TEMP); 1114 bus_ptr = new_bus_ptr; 1115 bus_ptr->size = new_size + 1; 1116 } 1117 /* 1118 * We now have the bus list, lets get to the target list. 1119 * Since most systems have only *one* lun, we do not allocate 1120 * in chunks as above, here we allow one, then in chunk sizes. 1121 * TARGET_CHUNK must be a power of two. This is to reduce 1122 * fragmentation effects on the allocations. 1123 */ 1124 # define TARGET_CHUNK 8 1125 if ((new_size = lun) != 0) { 1126 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1)); 1127 } 1128 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) { 1129 /* 1130 * Allocate a new structure? 1131 * Since one element in structure, the +1 1132 * needed for size has been abstracted. 1133 */ 1134 if ((new_entry == FALSE) 1135 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc ( 1136 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1137 M_TEMP, M_WAITOK)) 1138 == (lun2tid_t *)NULL)) { 1139 debug_asr_printf("failed to allocate target list\n"); 1140 return ((tid_t *)NULL); 1141 } 1142 bzero (target_ptr, sizeof(*target_ptr) 1143 + (sizeof(target_ptr->TID) * new_size)); 1144 target_ptr->size = new_size + 1; 1145 } else if (target_ptr->size <= new_size) { 1146 lun2tid_t * new_target_ptr; 1147 1148 /* 1149 * Reallocate a new structure? 1150 * Since one element in structure, the +1 1151 * needed for size has been abstracted. 1152 */ 1153 if ((new_entry == FALSE) 1154 || ((new_target_ptr = (lun2tid_t *)kmalloc ( 1155 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1156 M_TEMP, M_WAITOK)) 1157 == (lun2tid_t *)NULL)) { 1158 debug_asr_printf("failed to reallocate target list\n"); 1159 return ((tid_t *)NULL); 1160 } 1161 /* 1162 * Zero and copy the whole thing, safer, simpler coding 1163 * and not really performance critical at this point. 1164 */ 1165 bzero (new_target_ptr, sizeof(*target_ptr) 1166 + (sizeof(target_ptr->TID) * new_size)); 1167 bcopy (target_ptr, new_target_ptr, 1168 sizeof(*target_ptr) 1169 + (sizeof(target_ptr->TID) * (target_ptr->size - 1))); 1170 bus_ptr->LUN[target] = new_target_ptr; 1171 kfree (target_ptr, M_TEMP); 1172 target_ptr = new_target_ptr; 1173 target_ptr->size = new_size + 1; 1174 } 1175 /* 1176 * Now, acquire the TID address from the LUN indexed list. 1177 */ 1178 return (&(target_ptr->TID[lun])); 1179 } /* ASR_getTidAddress */ 1180 1181 /* 1182 * Get a pre-existing TID relationship. 1183 * 1184 * If the TID was never set, return (tid_t)-1. 1185 * 1186 * should use mutex rather than spl. 1187 */ 1188 STATIC INLINE tid_t 1189 ASR_getTid ( 1190 IN Asr_softc_t * sc, 1191 IN int bus, 1192 IN int target, 1193 IN int lun) 1194 { 1195 tid_t * tid_ptr; 1196 OUT tid_t retval; 1197 1198 crit_enter(); 1199 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE)) 1200 == (tid_t *)NULL) 1201 /* (tid_t)0 or (tid_t)-1 indicate no TID */ 1202 || (*tid_ptr == (tid_t)0)) { 1203 crit_exit(); 1204 return ((tid_t)-1); 1205 } 1206 retval = *tid_ptr; 1207 crit_exit(); 1208 return (retval); 1209 } /* ASR_getTid */ 1210 1211 /* 1212 * Set a TID relationship. 1213 * 1214 * If the TID was not set, return (tid_t)-1. 1215 * 1216 * should use mutex rather than spl. 1217 */ 1218 STATIC INLINE tid_t 1219 ASR_setTid ( 1220 INOUT Asr_softc_t * sc, 1221 IN int bus, 1222 IN int target, 1223 IN int lun, 1224 INOUT tid_t TID) 1225 { 1226 tid_t * tid_ptr; 1227 1228 if (TID != (tid_t)-1) { 1229 if (TID == 0) { 1230 return ((tid_t)-1); 1231 } 1232 crit_enter(); 1233 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE)) 1234 == (tid_t *)NULL) { 1235 crit_exit(); 1236 return ((tid_t)-1); 1237 } 1238 *tid_ptr = TID; 1239 crit_exit(); 1240 } 1241 return (TID); 1242 } /* ASR_setTid */ 1243 1244 /*-------------------------------------------------------------------------*/ 1245 /* Function ASR_rescan */ 1246 /*-------------------------------------------------------------------------*/ 1247 /* The Parameters Passed To This Function Are : */ 1248 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1249 /* */ 1250 /* This Function Will rescan the adapter and resynchronize any data */ 1251 /* */ 1252 /* Return : 0 For OK, Error Code Otherwise */ 1253 /*-------------------------------------------------------------------------*/ 1254 1255 STATIC INLINE int 1256 ASR_rescan( 1257 IN Asr_softc_t * sc) 1258 { 1259 int bus; 1260 OUT int error; 1261 1262 /* 1263 * Re-acquire the LCT table and synchronize us to the adapter. 1264 */ 1265 if ((error = ASR_acquireLct(sc)) == 0) { 1266 error = ASR_acquireHrt(sc); 1267 } 1268 1269 if (error != 0) { 1270 return error; 1271 } 1272 1273 bus = sc->ha_MaxBus; 1274 /* Reset all existing cached TID lookups */ 1275 do { 1276 int target, event = 0; 1277 1278 /* 1279 * Scan for all targets on this bus to see if they 1280 * got affected by the rescan. 1281 */ 1282 for (target = 0; target <= sc->ha_MaxId; ++target) { 1283 int lun; 1284 1285 /* Stay away from the controller ID */ 1286 if (target == sc->ha_adapter_target[bus]) { 1287 continue; 1288 } 1289 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 1290 PI2O_LCT_ENTRY Device; 1291 tid_t TID = (tid_t)-1; 1292 tid_t LastTID; 1293 1294 /* 1295 * See if the cached TID changed. Search for 1296 * the device in our new LCT. 1297 */ 1298 for (Device = sc->ha_LCT->LCTEntry; 1299 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT) 1300 + I2O_LCT_getTableSize(sc->ha_LCT)); 1301 ++Device) { 1302 if ((Device->le_type != I2O_UNKNOWN) 1303 && (Device->le_bus == bus) 1304 && (Device->le_target == target) 1305 && (Device->le_lun == lun) 1306 && (I2O_LCT_ENTRY_getUserTID(Device) 1307 == 0xFFF)) { 1308 TID = I2O_LCT_ENTRY_getLocalTID( 1309 Device); 1310 break; 1311 } 1312 } 1313 /* 1314 * Indicate to the OS that the label needs 1315 * to be recalculated, or that the specific 1316 * open device is no longer valid (Merde) 1317 * because the cached TID changed. 1318 */ 1319 LastTID = ASR_getTid (sc, bus, target, lun); 1320 if (LastTID != TID) { 1321 struct cam_path * path; 1322 1323 if (xpt_create_path(&path, 1324 /*periph*/NULL, 1325 cam_sim_path(sc->ha_sim[bus]), 1326 target, lun) != CAM_REQ_CMP) { 1327 if (TID == (tid_t)-1) { 1328 event |= AC_LOST_DEVICE; 1329 } else { 1330 event |= AC_INQ_CHANGED 1331 | AC_GETDEV_CHANGED; 1332 } 1333 } else { 1334 if (TID == (tid_t)-1) { 1335 xpt_async( 1336 AC_LOST_DEVICE, 1337 path, NULL); 1338 } else if (LastTID == (tid_t)-1) { 1339 struct ccb_getdev ccb; 1340 1341 xpt_setup_ccb( 1342 &(ccb.ccb_h), 1343 path, /*priority*/5); 1344 xpt_async( 1345 AC_FOUND_DEVICE, 1346 path, 1347 &ccb); 1348 } else { 1349 xpt_async( 1350 AC_INQ_CHANGED, 1351 path, NULL); 1352 xpt_async( 1353 AC_GETDEV_CHANGED, 1354 path, NULL); 1355 } 1356 } 1357 } 1358 /* 1359 * We have the option of clearing the 1360 * cached TID for it to be rescanned, or to 1361 * set it now even if the device never got 1362 * accessed. We chose the later since we 1363 * currently do not use the condition that 1364 * the TID ever got cached. 1365 */ 1366 ASR_setTid (sc, bus, target, lun, TID); 1367 } 1368 } 1369 /* 1370 * The xpt layer can not handle multiple events at the 1371 * same call. 1372 */ 1373 if (event & AC_LOST_DEVICE) { 1374 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL); 1375 } 1376 if (event & AC_INQ_CHANGED) { 1377 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL); 1378 } 1379 if (event & AC_GETDEV_CHANGED) { 1380 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL); 1381 } 1382 } while (--bus >= 0); 1383 return (error); 1384 } /* ASR_rescan */ 1385 1386 /*-------------------------------------------------------------------------*/ 1387 /* Function ASR_reset */ 1388 /*-------------------------------------------------------------------------*/ 1389 /* The Parameters Passed To This Function Are : */ 1390 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1391 /* */ 1392 /* This Function Will reset the adapter and resynchronize any data */ 1393 /* */ 1394 /* Return : None */ 1395 /*-------------------------------------------------------------------------*/ 1396 1397 STATIC INLINE int 1398 ASR_reset( 1399 IN Asr_softc_t * sc) 1400 { 1401 int retVal; 1402 1403 crit_enter(); 1404 if ((sc->ha_in_reset == HA_IN_RESET) 1405 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) { 1406 crit_exit(); 1407 return (EBUSY); 1408 } 1409 /* 1410 * Promotes HA_OPERATIONAL to HA_IN_RESET, 1411 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY. 1412 */ 1413 ++(sc->ha_in_reset); 1414 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) { 1415 debug_asr_printf ("ASR_resetIOP failed\n"); 1416 /* 1417 * We really need to take this card off-line, easier said 1418 * than make sense. Better to keep retrying for now since if a 1419 * UART cable is connected the blinkLEDs the adapter is now in 1420 * a hard state requiring action from the monitor commands to 1421 * the HBA to continue. For debugging waiting forever is a 1422 * good thing. In a production system, however, one may wish 1423 * to instead take the card off-line ... 1424 */ 1425 # if 0 && (defined(HA_OFF_LINE)) 1426 /* 1427 * Take adapter off-line. 1428 */ 1429 kprintf ("asr%d: Taking adapter off-line\n", 1430 sc->ha_path[0] 1431 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0])) 1432 : 0); 1433 sc->ha_in_reset = HA_OFF_LINE; 1434 crit_exit(); 1435 return (ENXIO); 1436 # else 1437 /* Wait Forever */ 1438 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0); 1439 # endif 1440 } 1441 retVal = ASR_init (sc); 1442 crit_exit(); 1443 if (retVal != 0) { 1444 debug_asr_printf ("ASR_init failed\n"); 1445 sc->ha_in_reset = HA_OFF_LINE; 1446 return (ENXIO); 1447 } 1448 if (ASR_rescan (sc) != 0) { 1449 debug_asr_printf ("ASR_rescan failed\n"); 1450 } 1451 ASR_failActiveCommands (sc); 1452 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) { 1453 kprintf ("asr%d: Brining adapter back on-line\n", 1454 sc->ha_path[0] 1455 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0])) 1456 : 0); 1457 } 1458 sc->ha_in_reset = HA_OPERATIONAL; 1459 return (0); 1460 } /* ASR_reset */ 1461 1462 /* 1463 * Device timeout handler. 1464 */ 1465 STATIC void 1466 asr_timeout( 1467 INOUT void * arg) 1468 { 1469 union asr_ccb * ccb = (union asr_ccb *)arg; 1470 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1471 int s; 1472 1473 debug_asr_print_path(ccb); 1474 debug_asr_printf("timed out"); 1475 1476 /* 1477 * Check if the adapter has locked up? 1478 */ 1479 if ((s = ASR_getBlinkLedCode(sc)) != 0) { 1480 /* Reset Adapter */ 1481 kprintf ("asr%d: Blink LED 0x%x resetting adapter\n", 1482 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s); 1483 if (ASR_reset (sc) == ENXIO) { 1484 /* Try again later */ 1485 callout_reset(&ccb->ccb_h.timeout_ch, 1486 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb); 1487 } 1488 return; 1489 } 1490 /* 1491 * Abort does not function on the ASR card!!! Walking away from 1492 * the SCSI command is also *very* dangerous. A SCSI BUS reset is 1493 * our best bet, followed by a complete adapter reset if that fails. 1494 */ 1495 crit_enter(); 1496 /* Check if we already timed out once to raise the issue */ 1497 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) { 1498 debug_asr_printf (" AGAIN\nreinitializing adapter\n"); 1499 if (ASR_reset (sc) == ENXIO) { 1500 callout_reset(&ccb->ccb_h.timeout_ch, 1501 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb); 1502 } 1503 crit_exit(); 1504 return; 1505 } 1506 debug_asr_printf ("\nresetting bus\n"); 1507 /* If the BUS reset does not take, then an adapter reset is next! */ 1508 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1509 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 1510 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000, 1511 asr_timeout, ccb); 1512 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path))); 1513 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL); 1514 crit_exit(); 1515 } /* asr_timeout */ 1516 1517 /* 1518 * send a message asynchronously 1519 */ 1520 STATIC INLINE int 1521 ASR_queue( 1522 IN Asr_softc_t * sc, 1523 IN PI2O_MESSAGE_FRAME Message) 1524 { 1525 OUT U32 MessageOffset; 1526 union asr_ccb * ccb; 1527 1528 debug_asr_printf ("Host Command Dump:\n"); 1529 debug_asr_dump_message (Message); 1530 1531 ccb = (union asr_ccb *)(long) 1532 I2O_MESSAGE_FRAME_getInitiatorContext64(Message); 1533 1534 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) { 1535 bcopy (Message, sc->ha_Fvirt + MessageOffset, 1536 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2); 1537 if (ccb) { 1538 ASR_ccbAdd (sc, ccb); 1539 } 1540 /* Post the command */ 1541 sc->ha_Virt->ToFIFO = MessageOffset; 1542 } else { 1543 if (ASR_getBlinkLedCode(sc)) { 1544 /* 1545 * Unlikely we can do anything if we can't grab a 1546 * message frame :-(, but lets give it a try. 1547 */ 1548 (void)ASR_reset (sc); 1549 } 1550 } 1551 return (MessageOffset); 1552 } /* ASR_queue */ 1553 1554 1555 /* Simple Scatter Gather elements */ 1556 #define SG(SGL,Index,Flags,Buffer,Size) \ 1557 I2O_FLAGS_COUNT_setCount( \ 1558 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1559 Size); \ 1560 I2O_FLAGS_COUNT_setFlags( \ 1561 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1562 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \ 1563 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \ 1564 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \ 1565 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer)) 1566 1567 /* 1568 * Retrieve Parameter Group. 1569 * Buffer must be allocated using defAlignLong macro. 1570 */ 1571 STATIC void * 1572 ASR_getParams( 1573 IN Asr_softc_t * sc, 1574 IN tid_t TID, 1575 IN int Group, 1576 OUT void * Buffer, 1577 IN unsigned BufferSize) 1578 { 1579 struct paramGetMessage { 1580 I2O_UTIL_PARAMS_GET_MESSAGE M; 1581 char F[ 1582 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)]; 1583 struct Operations { 1584 I2O_PARAM_OPERATIONS_LIST_HEADER Header; 1585 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1]; 1586 } O; 1587 }; 1588 defAlignLong(struct paramGetMessage, Message); 1589 struct Operations * Operations_Ptr; 1590 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr; 1591 struct ParamBuffer { 1592 I2O_PARAM_RESULTS_LIST_HEADER Header; 1593 I2O_PARAM_READ_OPERATION_RESULT Read; 1594 char Info[1]; 1595 } * Buffer_Ptr; 1596 1597 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message, 1598 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1599 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1600 Operations_Ptr = (struct Operations *)((char *)Message_Ptr 1601 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1602 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1603 bzero ((void *)Operations_Ptr, sizeof(struct Operations)); 1604 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount( 1605 &(Operations_Ptr->Header), 1); 1606 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation( 1607 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET); 1608 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount( 1609 &(Operations_Ptr->Template[0]), 0xFFFF); 1610 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber( 1611 &(Operations_Ptr->Template[0]), Group); 1612 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)), 1613 BufferSize); 1614 1615 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1616 I2O_VERSION_11 1617 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1618 / sizeof(U32)) << 4)); 1619 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame), 1620 TID); 1621 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 1622 I2O_UTIL_PARAMS_GET); 1623 /* 1624 * Set up the buffers as scatter gather elements. 1625 */ 1626 SG(&(Message_Ptr->SGL), 0, 1627 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, 1628 Operations_Ptr, sizeof(struct Operations)); 1629 SG(&(Message_Ptr->SGL), 1, 1630 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 1631 Buffer_Ptr, BufferSize); 1632 1633 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP) 1634 && (Buffer_Ptr->Header.ResultCount)) { 1635 return ((void *)(Buffer_Ptr->Info)); 1636 } 1637 return ((void *)NULL); 1638 } /* ASR_getParams */ 1639 1640 /* 1641 * Acquire the LCT information. 1642 */ 1643 STATIC INLINE int 1644 ASR_acquireLct ( 1645 INOUT Asr_softc_t * sc) 1646 { 1647 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 1648 PI2O_SGE_SIMPLE_ELEMENT sg; 1649 int MessageSizeInBytes; 1650 caddr_t v; 1651 int len; 1652 I2O_LCT Table; 1653 PI2O_LCT_ENTRY Entry; 1654 1655 /* 1656 * sc value assumed valid 1657 */ 1658 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) 1659 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT); 1660 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc ( 1661 MessageSizeInBytes, M_TEMP, M_WAITOK)) 1662 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) { 1663 return (ENOMEM); 1664 } 1665 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes); 1666 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1667 (I2O_VERSION_11 + 1668 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1669 / sizeof(U32)) << 4))); 1670 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1671 I2O_EXEC_LCT_NOTIFY); 1672 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 1673 I2O_CLASS_MATCH_ANYCLASS); 1674 /* 1675 * Call the LCT table to determine the number of device entries 1676 * to reserve space for. 1677 */ 1678 SG(&(Message_Ptr->SGL), 0, 1679 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table, 1680 sizeof(I2O_LCT)); 1681 /* 1682 * since this code is reused in several systems, code efficiency 1683 * is greater by using a shift operation rather than a divide by 1684 * sizeof(u_int32_t). 1685 */ 1686 I2O_LCT_setTableSize(&Table, 1687 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1688 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1689 /* 1690 * Determine the size of the LCT table. 1691 */ 1692 if (sc->ha_LCT) { 1693 kfree (sc->ha_LCT, M_TEMP); 1694 } 1695 /* 1696 * kmalloc only generates contiguous memory when less than a 1697 * page is expected. We must break the request up into an SG list ... 1698 */ 1699 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <= 1700 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY))) 1701 || (len > (128 * 1024))) { /* Arbitrary */ 1702 kfree (Message_Ptr, M_TEMP); 1703 return (EINVAL); 1704 } 1705 if ((sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK)) 1706 == (PI2O_LCT)NULL) { 1707 kfree (Message_Ptr, M_TEMP); 1708 return (ENOMEM); 1709 } 1710 /* 1711 * since this code is reused in several systems, code efficiency 1712 * is greater by using a shift operation rather than a divide by 1713 * sizeof(u_int32_t). 1714 */ 1715 I2O_LCT_setTableSize(sc->ha_LCT, 1716 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1717 /* 1718 * Convert the access to the LCT table into a SG list. 1719 */ 1720 sg = Message_Ptr->SGL.u.Simple; 1721 v = (caddr_t)(sc->ha_LCT); 1722 for (;;) { 1723 int next, base, span; 1724 1725 span = 0; 1726 next = base = KVTOPHYS(v); 1727 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 1728 1729 /* How far can we go contiguously */ 1730 while ((len > 0) && (base == next)) { 1731 int size; 1732 1733 next = trunc_page(base) + PAGE_SIZE; 1734 size = next - base; 1735 if (size > len) { 1736 size = len; 1737 } 1738 span += size; 1739 v += size; 1740 len -= size; 1741 base = KVTOPHYS(v); 1742 } 1743 1744 /* Construct the Flags */ 1745 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 1746 { 1747 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT; 1748 if (len <= 0) { 1749 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT 1750 | I2O_SGL_FLAGS_LAST_ELEMENT 1751 | I2O_SGL_FLAGS_END_OF_BUFFER); 1752 } 1753 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw); 1754 } 1755 1756 if (len <= 0) { 1757 break; 1758 } 1759 1760 /* 1761 * Incrementing requires resizing of the packet. 1762 */ 1763 ++sg; 1764 MessageSizeInBytes += sizeof(*sg); 1765 I2O_MESSAGE_FRAME_setMessageSize( 1766 &(Message_Ptr->StdMessageFrame), 1767 I2O_MESSAGE_FRAME_getMessageSize( 1768 &(Message_Ptr->StdMessageFrame)) 1769 + (sizeof(*sg) / sizeof(U32))); 1770 { 1771 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr; 1772 1773 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE) 1774 kmalloc (MessageSizeInBytes, M_TEMP, M_WAITOK)) 1775 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) { 1776 kfree (sc->ha_LCT, M_TEMP); 1777 sc->ha_LCT = (PI2O_LCT)NULL; 1778 kfree (Message_Ptr, M_TEMP); 1779 return (ENOMEM); 1780 } 1781 span = ((caddr_t)sg) - (caddr_t)Message_Ptr; 1782 bcopy ((caddr_t)Message_Ptr, 1783 (caddr_t)NewMessage_Ptr, span); 1784 kfree (Message_Ptr, M_TEMP); 1785 sg = (PI2O_SGE_SIMPLE_ELEMENT) 1786 (((caddr_t)NewMessage_Ptr) + span); 1787 Message_Ptr = NewMessage_Ptr; 1788 } 1789 } 1790 { int retval; 1791 1792 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1793 kfree (Message_Ptr, M_TEMP); 1794 if (retval != CAM_REQ_CMP) { 1795 return (ENODEV); 1796 } 1797 } 1798 /* If the LCT table grew, lets truncate accesses */ 1799 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) { 1800 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table)); 1801 } 1802 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY) 1803 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1804 ++Entry) { 1805 Entry->le_type = I2O_UNKNOWN; 1806 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) { 1807 1808 case I2O_CLASS_RANDOM_BLOCK_STORAGE: 1809 Entry->le_type = I2O_BSA; 1810 break; 1811 1812 case I2O_CLASS_SCSI_PERIPHERAL: 1813 Entry->le_type = I2O_SCSI; 1814 break; 1815 1816 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: 1817 Entry->le_type = I2O_FCA; 1818 break; 1819 1820 case I2O_CLASS_BUS_ADAPTER_PORT: 1821 Entry->le_type = I2O_PORT | I2O_SCSI; 1822 /* FALLTHRU */ 1823 case I2O_CLASS_FIBRE_CHANNEL_PORT: 1824 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) == 1825 I2O_CLASS_FIBRE_CHANNEL_PORT) { 1826 Entry->le_type = I2O_PORT | I2O_FCA; 1827 } 1828 { struct ControllerInfo { 1829 I2O_PARAM_RESULTS_LIST_HEADER Header; 1830 I2O_PARAM_READ_OPERATION_RESULT Read; 1831 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1832 }; 1833 defAlignLong(struct ControllerInfo, Buffer); 1834 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1835 1836 Entry->le_bus = 0xff; 1837 Entry->le_target = 0xff; 1838 Entry->le_lun = 0xff; 1839 1840 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR) 1841 ASR_getParams(sc, 1842 I2O_LCT_ENTRY_getLocalTID(Entry), 1843 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO, 1844 Buffer, sizeof(struct ControllerInfo))) 1845 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) { 1846 continue; 1847 } 1848 Entry->le_target 1849 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID( 1850 Info); 1851 Entry->le_lun = 0; 1852 } /* FALLTHRU */ 1853 default: 1854 continue; 1855 } 1856 { struct DeviceInfo { 1857 I2O_PARAM_RESULTS_LIST_HEADER Header; 1858 I2O_PARAM_READ_OPERATION_RESULT Read; 1859 I2O_DPT_DEVICE_INFO_SCALAR Info; 1860 }; 1861 defAlignLong (struct DeviceInfo, Buffer); 1862 PI2O_DPT_DEVICE_INFO_SCALAR Info; 1863 1864 Entry->le_bus = 0xff; 1865 Entry->le_target = 0xff; 1866 Entry->le_lun = 0xff; 1867 1868 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR) 1869 ASR_getParams(sc, 1870 I2O_LCT_ENTRY_getLocalTID(Entry), 1871 I2O_DPT_DEVICE_INFO_GROUP_NO, 1872 Buffer, sizeof(struct DeviceInfo))) 1873 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) { 1874 continue; 1875 } 1876 Entry->le_type 1877 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info); 1878 Entry->le_bus 1879 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info); 1880 if ((Entry->le_bus > sc->ha_MaxBus) 1881 && (Entry->le_bus <= MAX_CHANNEL)) { 1882 sc->ha_MaxBus = Entry->le_bus; 1883 } 1884 Entry->le_target 1885 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info); 1886 Entry->le_lun 1887 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info); 1888 } 1889 } 1890 /* 1891 * A zero return value indicates success. 1892 */ 1893 return (0); 1894 } /* ASR_acquireLct */ 1895 1896 /* 1897 * Initialize a message frame. 1898 * We assume that the CDB has already been set up, so all we do here is 1899 * generate the Scatter Gather list. 1900 */ 1901 STATIC INLINE PI2O_MESSAGE_FRAME 1902 ASR_init_message( 1903 IN union asr_ccb * ccb, 1904 OUT PI2O_MESSAGE_FRAME Message) 1905 { 1906 int next, span, base, rw; 1907 OUT PI2O_MESSAGE_FRAME Message_Ptr; 1908 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1909 PI2O_SGE_SIMPLE_ELEMENT sg; 1910 caddr_t v; 1911 vm_size_t size, len; 1912 U32 MessageSize; 1913 1914 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */ 1915 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message), 1916 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))); 1917 1918 { 1919 int target = ccb->ccb_h.target_id; 1920 int lun = ccb->ccb_h.target_lun; 1921 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1922 tid_t TID; 1923 1924 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) { 1925 PI2O_LCT_ENTRY Device; 1926 1927 TID = (tid_t)0; 1928 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 1929 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1930 ++Device) { 1931 if ((Device->le_type != I2O_UNKNOWN) 1932 && (Device->le_bus == bus) 1933 && (Device->le_target == target) 1934 && (Device->le_lun == lun) 1935 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) { 1936 TID = I2O_LCT_ENTRY_getLocalTID(Device); 1937 ASR_setTid (sc, Device->le_bus, 1938 Device->le_target, Device->le_lun, 1939 TID); 1940 break; 1941 } 1942 } 1943 } 1944 if (TID == (tid_t)0) { 1945 return ((PI2O_MESSAGE_FRAME)NULL); 1946 } 1947 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID); 1948 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID( 1949 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID); 1950 } 1951 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 | 1952 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1953 / sizeof(U32)) << 4)); 1954 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 1955 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 1956 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)); 1957 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 1958 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE); 1959 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 1960 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); 1961 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 1962 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 1963 I2O_SCB_FLAG_ENABLE_DISCONNECT 1964 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1965 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 1966 /* 1967 * We do not need any (optional byteswapping) method access to 1968 * the Initiator & Transaction context field. 1969 */ 1970 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 1971 1972 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 1973 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID); 1974 /* 1975 * copy the cdb over 1976 */ 1977 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength( 1978 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len); 1979 bcopy (&(ccb->csio.cdb_io), 1980 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len); 1981 1982 /* 1983 * Given a buffer describing a transfer, set up a scatter/gather map 1984 * in a ccb to map that SCSI transfer. 1985 */ 1986 1987 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR; 1988 1989 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 1990 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 1991 (ccb->csio.dxfer_len) 1992 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE 1993 | I2O_SCB_FLAG_ENABLE_DISCONNECT 1994 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1995 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER) 1996 : (I2O_SCB_FLAG_XFER_FROM_DEVICE 1997 | I2O_SCB_FLAG_ENABLE_DISCONNECT 1998 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1999 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)) 2000 : (I2O_SCB_FLAG_ENABLE_DISCONNECT 2001 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2002 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2003 2004 /* 2005 * Given a transfer described by a `data', fill in the SG list. 2006 */ 2007 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0]; 2008 2009 len = ccb->csio.dxfer_len; 2010 v = ccb->csio.data_ptr; 2011 ASSERT (ccb->csio.dxfer_len >= 0); 2012 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr); 2013 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 2014 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len); 2015 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2016 Message_Ptr)->SGL.u.Simple[SG_SIZE])) { 2017 span = 0; 2018 next = base = KVTOPHYS(v); 2019 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 2020 2021 /* How far can we go contiguously */ 2022 while ((len > 0) && (base == next)) { 2023 next = trunc_page(base) + PAGE_SIZE; 2024 size = next - base; 2025 if (size > len) { 2026 size = len; 2027 } 2028 span += size; 2029 v += size; 2030 len -= size; 2031 base = KVTOPHYS(v); 2032 } 2033 2034 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 2035 if (len == 0) { 2036 rw |= I2O_SGL_FLAGS_LAST_ELEMENT; 2037 } 2038 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), 2039 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw); 2040 ++sg; 2041 MessageSize += sizeof(*sg) / sizeof(U32); 2042 } 2043 /* We always do the request sense ... */ 2044 if ((span = ccb->csio.sense_len) == 0) { 2045 span = sizeof(ccb->csio.sense_data); 2046 } 2047 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2048 &(ccb->csio.sense_data), span); 2049 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 2050 MessageSize + (sizeof(*sg) / sizeof(U32))); 2051 return (Message_Ptr); 2052 } /* ASR_init_message */ 2053 2054 /* 2055 * Reset the adapter. 2056 */ 2057 STATIC INLINE U32 2058 ASR_initOutBound ( 2059 INOUT Asr_softc_t * sc) 2060 { 2061 struct initOutBoundMessage { 2062 I2O_EXEC_OUTBOUND_INIT_MESSAGE M; 2063 U32 R; 2064 }; 2065 defAlignLong(struct initOutBoundMessage,Message); 2066 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr; 2067 OUT U32 * volatile Reply_Ptr; 2068 U32 Old; 2069 2070 /* 2071 * Build up our copy of the Message. 2072 */ 2073 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message, 2074 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE)); 2075 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2076 I2O_EXEC_OUTBOUND_INIT); 2077 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE); 2078 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr, 2079 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)); 2080 /* 2081 * Reset the Reply Status 2082 */ 2083 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 2084 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0; 2085 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr, 2086 sizeof(U32)); 2087 /* 2088 * Send the Message out 2089 */ 2090 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 2091 u_long size, addr; 2092 2093 /* 2094 * Wait for a response (Poll). 2095 */ 2096 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED); 2097 /* 2098 * Re-enable the interrupts. 2099 */ 2100 sc->ha_Virt->Mask = Old; 2101 /* 2102 * Populate the outbound table. 2103 */ 2104 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 2105 2106 /* Allocate the reply frames */ 2107 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 2108 * sc->ha_Msgs_Count; 2109 2110 /* 2111 * contigmalloc only works reliably at 2112 * initialization time. 2113 */ 2114 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 2115 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul, 2116 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) 2117 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 2118 (void)bzero ((char *)sc->ha_Msgs, size); 2119 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs); 2120 } 2121 } 2122 2123 /* Initialize the outbound FIFO */ 2124 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) 2125 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys; 2126 size; --size) { 2127 sc->ha_Virt->FromFIFO = addr; 2128 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME); 2129 } 2130 return (*Reply_Ptr); 2131 } 2132 return (0); 2133 } /* ASR_initOutBound */ 2134 2135 /* 2136 * Set the system table 2137 */ 2138 STATIC INLINE int 2139 ASR_setSysTab( 2140 IN Asr_softc_t * sc) 2141 { 2142 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr; 2143 PI2O_SET_SYSTAB_HEADER SystemTable; 2144 Asr_softc_t * ha; 2145 PI2O_SGE_SIMPLE_ELEMENT sg; 2146 int retVal; 2147 2148 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc ( 2149 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK)) 2150 == (PI2O_SET_SYSTAB_HEADER)NULL) { 2151 return (ENOMEM); 2152 } 2153 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); 2154 for (ha = Asr_softc; ha; ha = ha->ha_next) { 2155 ++SystemTable->NumberEntries; 2156 } 2157 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc ( 2158 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2159 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)), 2160 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) { 2161 kfree (SystemTable, M_TEMP); 2162 return (ENOMEM); 2163 } 2164 (void)ASR_fillMessage((char *)Message_Ptr, 2165 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2166 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT))); 2167 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2168 (I2O_VERSION_11 + 2169 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2170 / sizeof(U32)) << 4))); 2171 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2172 I2O_EXEC_SYS_TAB_SET); 2173 /* 2174 * Call the LCT table to determine the number of device entries 2175 * to reserve space for. 2176 * since this code is reused in several systems, code efficiency 2177 * is greater by using a shift operation rather than a divide by 2178 * sizeof(u_int32_t). 2179 */ 2180 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 2181 + ((I2O_MESSAGE_FRAME_getVersionOffset( 2182 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2)); 2183 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); 2184 ++sg; 2185 for (ha = Asr_softc; ha; ha = ha->ha_next) { 2186 SG(sg, 0, 2187 ((ha->ha_next) 2188 ? (I2O_SGL_FLAGS_DIR) 2189 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)), 2190 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable)); 2191 ++sg; 2192 } 2193 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2194 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT 2195 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2196 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2197 kfree (Message_Ptr, M_TEMP); 2198 kfree (SystemTable, M_TEMP); 2199 return (retVal); 2200 } /* ASR_setSysTab */ 2201 2202 STATIC INLINE int 2203 ASR_acquireHrt ( 2204 INOUT Asr_softc_t * sc) 2205 { 2206 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message); 2207 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr; 2208 struct { 2209 I2O_HRT Header; 2210 I2O_HRT_ENTRY Entry[MAX_CHANNEL]; 2211 } Hrt; 2212 u_int8_t NumberOfEntries; 2213 PI2O_HRT_ENTRY Entry; 2214 2215 bzero ((void *)&Hrt, sizeof (Hrt)); 2216 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message, 2217 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2218 + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2219 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2220 (I2O_VERSION_11 2221 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2222 / sizeof(U32)) << 4))); 2223 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 2224 I2O_EXEC_HRT_GET); 2225 2226 /* 2227 * Set up the buffers as scatter gather elements. 2228 */ 2229 SG(&(Message_Ptr->SGL), 0, 2230 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2231 &Hrt, sizeof(Hrt)); 2232 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) { 2233 return (ENODEV); 2234 } 2235 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header)) 2236 > (MAX_CHANNEL + 1)) { 2237 NumberOfEntries = MAX_CHANNEL + 1; 2238 } 2239 for (Entry = Hrt.Header.HRTEntry; 2240 NumberOfEntries != 0; 2241 ++Entry, --NumberOfEntries) { 2242 PI2O_LCT_ENTRY Device; 2243 2244 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2245 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2246 ++Device) { 2247 if (I2O_LCT_ENTRY_getLocalTID(Device) 2248 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) { 2249 Device->le_bus = I2O_HRT_ENTRY_getAdapterID( 2250 Entry) >> 16; 2251 if ((Device->le_bus > sc->ha_MaxBus) 2252 && (Device->le_bus <= MAX_CHANNEL)) { 2253 sc->ha_MaxBus = Device->le_bus; 2254 } 2255 } 2256 } 2257 } 2258 return (0); 2259 } /* ASR_acquireHrt */ 2260 2261 /* 2262 * Enable the adapter. 2263 */ 2264 STATIC INLINE int 2265 ASR_enableSys ( 2266 IN Asr_softc_t * sc) 2267 { 2268 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message); 2269 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr; 2270 2271 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message, 2272 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE)); 2273 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2274 I2O_EXEC_SYS_ENABLE); 2275 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0); 2276 } /* ASR_enableSys */ 2277 2278 /* 2279 * Perform the stages necessary to initialize the adapter 2280 */ 2281 STATIC int 2282 ASR_init( 2283 IN Asr_softc_t * sc) 2284 { 2285 return ((ASR_initOutBound(sc) == 0) 2286 || (ASR_setSysTab(sc) != CAM_REQ_CMP) 2287 || (ASR_enableSys(sc) != CAM_REQ_CMP)); 2288 } /* ASR_init */ 2289 2290 /* 2291 * Send a Synchronize Cache command to the target device. 2292 */ 2293 STATIC INLINE void 2294 ASR_sync ( 2295 IN Asr_softc_t * sc, 2296 IN int bus, 2297 IN int target, 2298 IN int lun) 2299 { 2300 tid_t TID; 2301 2302 /* 2303 * We will not synchronize the device when there are outstanding 2304 * commands issued by the OS (this is due to a locked up device, 2305 * as the OS normally would flush all outstanding commands before 2306 * issuing a shutdown or an adapter reset). 2307 */ 2308 if ((sc != (Asr_softc_t *)NULL) 2309 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL) 2310 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1) 2311 && (TID != (tid_t)0)) { 2312 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message); 2313 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2314 2315 bzero (Message_Ptr 2316 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message), 2317 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2318 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2319 2320 I2O_MESSAGE_FRAME_setVersionOffset( 2321 (PI2O_MESSAGE_FRAME)Message_Ptr, 2322 I2O_VERSION_11 2323 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2324 - sizeof(I2O_SG_ELEMENT)) 2325 / sizeof(U32)) << 4)); 2326 I2O_MESSAGE_FRAME_setMessageSize( 2327 (PI2O_MESSAGE_FRAME)Message_Ptr, 2328 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2329 - sizeof(I2O_SG_ELEMENT)) 2330 / sizeof(U32)); 2331 I2O_MESSAGE_FRAME_setInitiatorAddress ( 2332 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2333 I2O_MESSAGE_FRAME_setFunction( 2334 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2335 I2O_MESSAGE_FRAME_setTargetAddress( 2336 (PI2O_MESSAGE_FRAME)Message_Ptr, TID); 2337 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2338 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2339 I2O_SCSI_SCB_EXEC); 2340 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID); 2341 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2342 I2O_SCB_FLAG_ENABLE_DISCONNECT 2343 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2344 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2345 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2346 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2347 DPT_ORGANIZATION_ID); 2348 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2349 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE; 2350 Message_Ptr->CDB[1] = (lun << 5); 2351 2352 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2353 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2354 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2355 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2356 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2357 2358 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2359 2360 } 2361 } 2362 2363 STATIC INLINE void 2364 ASR_synchronize ( 2365 IN Asr_softc_t * sc) 2366 { 2367 int bus, target, lun; 2368 2369 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2370 for (target = 0; target <= sc->ha_MaxId; ++target) { 2371 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 2372 ASR_sync(sc,bus,target,lun); 2373 } 2374 } 2375 } 2376 } 2377 2378 /* 2379 * Reset the HBA, targets and BUS. 2380 * Currently this resets *all* the SCSI busses. 2381 */ 2382 STATIC INLINE void 2383 asr_hbareset( 2384 IN Asr_softc_t * sc) 2385 { 2386 ASR_synchronize (sc); 2387 (void)ASR_reset (sc); 2388 } /* asr_hbareset */ 2389 2390 /* 2391 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP 2392 * limit and a reduction in error checking (in the pre 4.0 case). 2393 */ 2394 STATIC int 2395 asr_pci_map_mem ( 2396 IN device_t tag, 2397 IN Asr_softc_t * sc) 2398 { 2399 int rid; 2400 u_int32_t p, l, s; 2401 2402 /* 2403 * I2O specification says we must find first *memory* mapped BAR 2404 */ 2405 for (rid = PCIR_MAPS; 2406 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t)); 2407 rid += sizeof(u_int32_t)) { 2408 p = pci_read_config(tag, rid, sizeof(p)); 2409 if ((p & 1) == 0) { 2410 break; 2411 } 2412 } 2413 /* 2414 * Give up? 2415 */ 2416 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) { 2417 rid = PCIR_MAPS; 2418 } 2419 p = pci_read_config(tag, rid, sizeof(p)); 2420 pci_write_config(tag, rid, -1, sizeof(p)); 2421 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2422 pci_write_config(tag, rid, p, sizeof(p)); 2423 if (l > MAX_MAP) { 2424 l = MAX_MAP; 2425 } 2426 /* 2427 * The 2005S Zero Channel RAID solution is not a perfect PCI 2428 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once 2429 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to 2430 * BAR0+2MB and sets it's size to 2MB. The IOP registers are 2431 * accessible via BAR0, the messaging registers are accessible 2432 * via BAR1. If the subdevice code is 50 to 59 decimal. 2433 */ 2434 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s)); 2435 if (s != 0xA5111044) { 2436 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s)); 2437 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0) 2438 && (ADPTDOMINATOR_SUB_ID_START <= s) 2439 && (s <= ADPTDOMINATOR_SUB_ID_END)) { 2440 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */ 2441 } 2442 } 2443 p &= ~15; 2444 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2445 p, p + l, l, RF_ACTIVE); 2446 if (sc->ha_mem_res == (struct resource *)NULL) { 2447 return (0); 2448 } 2449 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res); 2450 if (sc->ha_Base == (void *)NULL) { 2451 return (0); 2452 } 2453 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res); 2454 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */ 2455 if ((rid += sizeof(u_int32_t)) 2456 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) { 2457 return (0); 2458 } 2459 p = pci_read_config(tag, rid, sizeof(p)); 2460 pci_write_config(tag, rid, -1, sizeof(p)); 2461 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2462 pci_write_config(tag, rid, p, sizeof(p)); 2463 if (l > MAX_MAP) { 2464 l = MAX_MAP; 2465 } 2466 p &= ~15; 2467 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2468 p, p + l, l, RF_ACTIVE); 2469 if (sc->ha_mes_res == (struct resource *)NULL) { 2470 return (0); 2471 } 2472 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) { 2473 return (0); 2474 } 2475 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res); 2476 } else { 2477 sc->ha_Fvirt = (U8 *)(sc->ha_Virt); 2478 } 2479 return (1); 2480 } /* asr_pci_map_mem */ 2481 2482 /* 2483 * A simplified copy of the real pci_map_int with additional 2484 * registration requirements. 2485 */ 2486 STATIC int 2487 asr_pci_map_int ( 2488 IN device_t tag, 2489 IN Asr_softc_t * sc) 2490 { 2491 int rid = 0; 2492 int error; 2493 2494 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid, 2495 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); 2496 if (sc->ha_irq_res == (struct resource *)NULL) { 2497 return (0); 2498 } 2499 error = bus_setup_intr(tag, sc->ha_irq_res, 0, 2500 (driver_intr_t *)asr_intr, (void *)sc, 2501 &(sc->ha_intr), NULL); 2502 if (error) { 2503 return (0); 2504 } 2505 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char)); 2506 return (1); 2507 } /* asr_pci_map_int */ 2508 2509 /* 2510 * Attach the devices, and virtual devices to the driver list. 2511 */ 2512 STATIC ATTACH_RET 2513 asr_attach (ATTACH_ARGS) 2514 { 2515 Asr_softc_t * sc; 2516 struct scsi_inquiry_data * iq; 2517 ATTACH_SET(); 2518 2519 sc = kmalloc(sizeof(*sc), M_DEVBUF, M_INTWAIT); 2520 if (Asr_softc == (Asr_softc_t *)NULL) { 2521 /* 2522 * Fixup the OS revision as saved in the dptsig for the 2523 * engine (dptioctl.h) to pick up. 2524 */ 2525 bcopy (osrelease, &ASR_sig.dsDescription[16], 5); 2526 kprintf ("asr%d: major=%d\n", unit, asr_ops.head.maj); 2527 } 2528 /* 2529 * Initialize the software structure 2530 */ 2531 bzero (sc, sizeof(*sc)); 2532 LIST_INIT(&(sc->ha_ccb)); 2533 /* Link us into the HA list */ 2534 { 2535 Asr_softc_t **ha; 2536 2537 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next)); 2538 *(ha) = sc; 2539 } 2540 { 2541 PI2O_EXEC_STATUS_GET_REPLY status; 2542 int size; 2543 2544 /* 2545 * This is the real McCoy! 2546 */ 2547 if (!asr_pci_map_mem(tag, sc)) { 2548 kprintf ("asr%d: could not map memory\n", unit); 2549 ATTACH_RETURN(ENXIO); 2550 } 2551 /* Enable if not formerly enabled */ 2552 pci_write_config (tag, PCIR_COMMAND, 2553 pci_read_config (tag, PCIR_COMMAND, sizeof(char)) 2554 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char)); 2555 /* Knowledge is power, responsibility is direct */ 2556 { 2557 struct pci_devinfo { 2558 STAILQ_ENTRY(pci_devinfo) pci_links; 2559 struct resource_list resources; 2560 pcicfgregs cfg; 2561 } * dinfo = device_get_ivars(tag); 2562 sc->ha_pciBusNum = dinfo->cfg.bus; 2563 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3) 2564 | dinfo->cfg.func; 2565 } 2566 /* Check if the device is there? */ 2567 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0) 2568 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)kmalloc ( 2569 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK)) 2570 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) 2571 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) { 2572 kprintf ("asr%d: could not initialize hardware\n", unit); 2573 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */ 2574 } 2575 sc->ha_SystemTable.OrganizationID = status->OrganizationID; 2576 sc->ha_SystemTable.IOP_ID = status->IOP_ID; 2577 sc->ha_SystemTable.I2oVersion = status->I2oVersion; 2578 sc->ha_SystemTable.IopState = status->IopState; 2579 sc->ha_SystemTable.MessengerType = status->MessengerType; 2580 sc->ha_SystemTable.InboundMessageFrameSize 2581 = status->InboundMFrameSize; 2582 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow 2583 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO)); 2584 2585 if (!asr_pci_map_int(tag, (void *)sc)) { 2586 kprintf ("asr%d: could not map interrupt\n", unit); 2587 ATTACH_RETURN(ENXIO); 2588 } 2589 2590 /* Adjust the maximim inbound count */ 2591 if (((sc->ha_QueueSize 2592 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) 2593 > MAX_INBOUND) 2594 || (sc->ha_QueueSize == 0)) { 2595 sc->ha_QueueSize = MAX_INBOUND; 2596 } 2597 2598 /* Adjust the maximum outbound count */ 2599 if (((sc->ha_Msgs_Count 2600 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) 2601 > MAX_OUTBOUND) 2602 || (sc->ha_Msgs_Count == 0)) { 2603 sc->ha_Msgs_Count = MAX_OUTBOUND; 2604 } 2605 if (sc->ha_Msgs_Count > sc->ha_QueueSize) { 2606 sc->ha_Msgs_Count = sc->ha_QueueSize; 2607 } 2608 2609 /* Adjust the maximum SG size to adapter */ 2610 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize( 2611 status) << 2)) > MAX_INBOUND_SIZE) { 2612 size = MAX_INBOUND_SIZE; 2613 } 2614 kfree (status, M_TEMP); 2615 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2616 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT); 2617 } 2618 2619 /* 2620 * Only do a bus/HBA reset on the first time through. On this 2621 * first time through, we do not send a flush to the devices. 2622 */ 2623 if (ASR_init(sc) == 0) { 2624 struct BufferInfo { 2625 I2O_PARAM_RESULTS_LIST_HEADER Header; 2626 I2O_PARAM_READ_OPERATION_RESULT Read; 2627 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2628 }; 2629 defAlignLong (struct BufferInfo, Buffer); 2630 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2631 # define FW_DEBUG_BLED_OFFSET 8 2632 2633 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR) 2634 ASR_getParams(sc, 0, 2635 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO, 2636 Buffer, sizeof(struct BufferInfo))) 2637 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) { 2638 sc->ha_blinkLED = sc->ha_Fvirt 2639 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info) 2640 + FW_DEBUG_BLED_OFFSET; 2641 } 2642 if (ASR_acquireLct(sc) == 0) { 2643 (void)ASR_acquireHrt(sc); 2644 } 2645 } else { 2646 kprintf ("asr%d: failed to initialize\n", unit); 2647 ATTACH_RETURN(ENXIO); 2648 } 2649 /* 2650 * Add in additional probe responses for more channels. We 2651 * are reusing the variable `target' for a channel loop counter. 2652 * Done here because of we need both the acquireLct and 2653 * acquireHrt data. 2654 */ 2655 { PI2O_LCT_ENTRY Device; 2656 2657 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2658 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2659 ++Device) { 2660 if (Device->le_type == I2O_UNKNOWN) { 2661 continue; 2662 } 2663 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) { 2664 if (Device->le_target > sc->ha_MaxId) { 2665 sc->ha_MaxId = Device->le_target; 2666 } 2667 if (Device->le_lun > sc->ha_MaxLun) { 2668 sc->ha_MaxLun = Device->le_lun; 2669 } 2670 } 2671 if (((Device->le_type & I2O_PORT) != 0) 2672 && (Device->le_bus <= MAX_CHANNEL)) { 2673 /* Do not increase MaxId for efficiency */ 2674 sc->ha_adapter_target[Device->le_bus] 2675 = Device->le_target; 2676 } 2677 } 2678 } 2679 2680 2681 /* 2682 * Print the HBA model number as inquired from the card. 2683 */ 2684 2685 kprintf ("asr%d:", unit); 2686 2687 if ((iq = (struct scsi_inquiry_data *)kmalloc ( 2688 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK)) 2689 != (struct scsi_inquiry_data *)NULL) { 2690 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message); 2691 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2692 int posted = 0; 2693 2694 bzero (iq, sizeof(struct scsi_inquiry_data)); 2695 bzero (Message_Ptr 2696 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message), 2697 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2698 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2699 2700 I2O_MESSAGE_FRAME_setVersionOffset( 2701 (PI2O_MESSAGE_FRAME)Message_Ptr, 2702 I2O_VERSION_11 2703 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2704 - sizeof(I2O_SG_ELEMENT)) 2705 / sizeof(U32)) << 4)); 2706 I2O_MESSAGE_FRAME_setMessageSize( 2707 (PI2O_MESSAGE_FRAME)Message_Ptr, 2708 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2709 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) 2710 / sizeof(U32)); 2711 I2O_MESSAGE_FRAME_setInitiatorAddress ( 2712 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2713 I2O_MESSAGE_FRAME_setFunction( 2714 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2715 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2716 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2717 I2O_SCSI_SCB_EXEC); 2718 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2719 I2O_SCB_FLAG_ENABLE_DISCONNECT 2720 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2721 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2722 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1); 2723 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2724 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2725 DPT_ORGANIZATION_ID); 2726 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2727 Message_Ptr->CDB[0] = INQUIRY; 2728 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data); 2729 if (Message_Ptr->CDB[4] == 0) { 2730 Message_Ptr->CDB[4] = 255; 2731 } 2732 2733 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2734 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2735 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2736 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2737 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2738 2739 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 2740 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 2741 sizeof(struct scsi_inquiry_data)); 2742 SG(&(Message_Ptr->SGL), 0, 2743 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2744 iq, sizeof(struct scsi_inquiry_data)); 2745 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2746 2747 if (iq->vendor[0] && (iq->vendor[0] != ' ')) { 2748 kprintf (" "); 2749 ASR_prstring (iq->vendor, 8); 2750 ++posted; 2751 } 2752 if (iq->product[0] && (iq->product[0] != ' ')) { 2753 kprintf (" "); 2754 ASR_prstring (iq->product, 16); 2755 ++posted; 2756 } 2757 if (iq->revision[0] && (iq->revision[0] != ' ')) { 2758 kprintf (" FW Rev. "); 2759 ASR_prstring (iq->revision, 4); 2760 ++posted; 2761 } 2762 kfree ((caddr_t)iq, M_TEMP); 2763 if (posted) { 2764 kprintf (","); 2765 } 2766 } 2767 kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1, 2768 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize); 2769 2770 /* 2771 * fill in the prototype cam_path. 2772 */ 2773 { 2774 int bus; 2775 union asr_ccb * ccb; 2776 2777 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 2778 kprintf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit); 2779 ATTACH_RETURN(ENOMEM); 2780 } 2781 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2782 int QueueSize = sc->ha_QueueSize; 2783 2784 if (QueueSize > MAX_INBOUND) { 2785 QueueSize = MAX_INBOUND; 2786 } 2787 2788 /* 2789 * Construct our first channel SIM entry 2790 */ 2791 sc->ha_sim[bus] = cam_sim_alloc( 2792 asr_action, asr_poll, "asr", sc, 2793 unit, 1, QueueSize, NULL); 2794 if (sc->ha_sim[bus] == NULL) 2795 continue; 2796 2797 if (xpt_bus_register(sc->ha_sim[bus], bus) 2798 != CAM_SUCCESS) { 2799 cam_sim_free(sc->ha_sim[bus]); 2800 sc->ha_sim[bus] = NULL; 2801 continue; 2802 } 2803 2804 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL, 2805 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD, 2806 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2807 xpt_bus_deregister( 2808 cam_sim_path(sc->ha_sim[bus])); 2809 cam_sim_free(sc->ha_sim[bus]); 2810 sc->ha_sim[bus] = NULL; 2811 continue; 2812 } 2813 } 2814 asr_free_ccb (ccb); 2815 } 2816 /* 2817 * Generate the device node information 2818 */ 2819 make_dev(&asr_ops, unit, 0, 0, S_IRWXU, "rasr%d", unit); 2820 ATTACH_RETURN(0); 2821 } /* asr_attach */ 2822 2823 STATIC void 2824 asr_poll( 2825 IN struct cam_sim *sim) 2826 { 2827 asr_intr(cam_sim_softc(sim)); 2828 } /* asr_poll */ 2829 2830 STATIC void 2831 asr_action( 2832 IN struct cam_sim * sim, 2833 IN union ccb * ccb) 2834 { 2835 struct Asr_softc * sc; 2836 2837 debug_asr_printf ("asr_action(%lx,%lx{%x})\n", 2838 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code); 2839 2840 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n")); 2841 2842 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim); 2843 2844 switch (ccb->ccb_h.func_code) { 2845 2846 /* Common cases first */ 2847 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2848 { 2849 struct Message { 2850 char M[MAX_INBOUND_SIZE]; 2851 }; 2852 defAlignLong(struct Message,Message); 2853 PI2O_MESSAGE_FRAME Message_Ptr; 2854 2855 /* Reject incoming commands while we are resetting the card */ 2856 if (sc->ha_in_reset != HA_OPERATIONAL) { 2857 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2858 if (sc->ha_in_reset >= HA_OFF_LINE) { 2859 /* HBA is now off-line */ 2860 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; 2861 } else { 2862 /* HBA currently resetting, try again later. */ 2863 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2864 } 2865 debug_asr_cmd_printf (" e\n"); 2866 xpt_done(ccb); 2867 debug_asr_cmd_printf (" q\n"); 2868 break; 2869 } 2870 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2871 kprintf( 2872 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n", 2873 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 2874 ccb->csio.cdb_io.cdb_bytes[0], 2875 cam_sim_bus(sim), 2876 ccb->ccb_h.target_id, 2877 ccb->ccb_h.target_lun); 2878 } 2879 debug_asr_cmd_printf ("(%d,%d,%d,%d)", 2880 cam_sim_unit(sim), 2881 cam_sim_bus(sim), 2882 ccb->ccb_h.target_id, 2883 ccb->ccb_h.target_lun); 2884 debug_asr_cmd_dump_ccb(ccb); 2885 2886 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb, 2887 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) { 2888 debug_asr_cmd2_printf ("TID=%x:\n", 2889 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID( 2890 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)); 2891 debug_asr_cmd2_dump_message(Message_Ptr); 2892 debug_asr_cmd1_printf (" q"); 2893 2894 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) { 2895 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2896 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2897 debug_asr_cmd_printf (" E\n"); 2898 xpt_done(ccb); 2899 } 2900 debug_asr_cmd_printf (" Q\n"); 2901 break; 2902 } 2903 /* 2904 * We will get here if there is no valid TID for the device 2905 * referenced in the scsi command packet. 2906 */ 2907 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2908 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 2909 debug_asr_cmd_printf (" B\n"); 2910 xpt_done(ccb); 2911 break; 2912 } 2913 2914 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 2915 /* Rese HBA device ... */ 2916 asr_hbareset (sc); 2917 ccb->ccb_h.status = CAM_REQ_CMP; 2918 xpt_done(ccb); 2919 break; 2920 2921 # if (defined(REPORT_LUNS)) 2922 case REPORT_LUNS: 2923 # endif 2924 case XPT_ABORT: /* Abort the specified CCB */ 2925 /* XXX Implement */ 2926 ccb->ccb_h.status = CAM_REQ_INVALID; 2927 xpt_done(ccb); 2928 break; 2929 2930 case XPT_SET_TRAN_SETTINGS: 2931 /* XXX Implement */ 2932 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2933 xpt_done(ccb); 2934 break; 2935 2936 case XPT_GET_TRAN_SETTINGS: 2937 /* Get default/user set transfer settings for the target */ 2938 { 2939 struct ccb_trans_settings *cts; 2940 u_int target_mask; 2941 2942 cts = &(ccb->cts); 2943 target_mask = 0x01 << ccb->ccb_h.target_id; 2944 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 2945 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB; 2946 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2947 cts->sync_period = 6; /* 40MHz */ 2948 cts->sync_offset = 15; 2949 2950 cts->valid = CCB_TRANS_SYNC_RATE_VALID 2951 | CCB_TRANS_SYNC_OFFSET_VALID 2952 | CCB_TRANS_BUS_WIDTH_VALID 2953 | CCB_TRANS_DISC_VALID 2954 | CCB_TRANS_TQ_VALID; 2955 ccb->ccb_h.status = CAM_REQ_CMP; 2956 } else { 2957 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2958 } 2959 xpt_done(ccb); 2960 break; 2961 } 2962 2963 case XPT_CALC_GEOMETRY: 2964 { 2965 struct ccb_calc_geometry *ccg; 2966 u_int32_t size_mb; 2967 u_int32_t secs_per_cylinder; 2968 2969 ccg = &(ccb->ccg); 2970 size_mb = ccg->volume_size 2971 / ((1024L * 1024L) / ccg->block_size); 2972 2973 if (size_mb > 4096) { 2974 ccg->heads = 255; 2975 ccg->secs_per_track = 63; 2976 } else if (size_mb > 2048) { 2977 ccg->heads = 128; 2978 ccg->secs_per_track = 63; 2979 } else if (size_mb > 1024) { 2980 ccg->heads = 65; 2981 ccg->secs_per_track = 63; 2982 } else { 2983 ccg->heads = 64; 2984 ccg->secs_per_track = 32; 2985 } 2986 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2987 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2988 ccb->ccb_h.status = CAM_REQ_CMP; 2989 xpt_done(ccb); 2990 break; 2991 } 2992 2993 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 2994 ASR_resetBus (sc, cam_sim_bus(sim)); 2995 ccb->ccb_h.status = CAM_REQ_CMP; 2996 xpt_done(ccb); 2997 break; 2998 2999 case XPT_TERM_IO: /* Terminate the I/O process */ 3000 /* XXX Implement */ 3001 ccb->ccb_h.status = CAM_REQ_INVALID; 3002 xpt_done(ccb); 3003 break; 3004 3005 case XPT_PATH_INQ: /* Path routing inquiry */ 3006 { 3007 struct ccb_pathinq *cpi = &(ccb->cpi); 3008 3009 cpi->version_num = 1; /* XXX??? */ 3010 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3011 cpi->target_sprt = 0; 3012 /* Not necessary to reset bus, done by HDM initialization */ 3013 cpi->hba_misc = PIM_NOBUSRESET; 3014 cpi->hba_eng_cnt = 0; 3015 cpi->max_target = sc->ha_MaxId; 3016 cpi->max_lun = sc->ha_MaxLun; 3017 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)]; 3018 cpi->bus_id = cam_sim_bus(sim); 3019 cpi->base_transfer_speed = 3300; 3020 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3021 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 3022 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3023 cpi->unit_number = cam_sim_unit(sim); 3024 cpi->ccb_h.status = CAM_REQ_CMP; 3025 xpt_done(ccb); 3026 break; 3027 } 3028 default: 3029 ccb->ccb_h.status = CAM_REQ_INVALID; 3030 xpt_done(ccb); 3031 break; 3032 } 3033 } /* asr_action */ 3034 3035 3036 /* 3037 * Handle processing of current CCB as pointed to by the Status. 3038 */ 3039 STATIC int 3040 asr_intr ( 3041 IN Asr_softc_t * sc) 3042 { 3043 OUT int processed; 3044 3045 for (processed = 0; 3046 sc->ha_Virt->Status & Mask_InterruptsDisabled; 3047 processed = 1) { 3048 union asr_ccb * ccb; 3049 U32 ReplyOffset; 3050 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3051 3052 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE) 3053 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) { 3054 break; 3055 } 3056 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset 3057 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs)); 3058 /* 3059 * We do not need any (optional byteswapping) method access to 3060 * the Initiator context field. 3061 */ 3062 ccb = (union asr_ccb *)(long) 3063 I2O_MESSAGE_FRAME_getInitiatorContext64( 3064 &(Reply->StdReplyFrame.StdMessageFrame)); 3065 if (I2O_MESSAGE_FRAME_getMsgFlags( 3066 &(Reply->StdReplyFrame.StdMessageFrame)) 3067 & I2O_MESSAGE_FLAGS_FAIL) { 3068 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message); 3069 PI2O_UTIL_NOP_MESSAGE Message_Ptr; 3070 U32 MessageOffset; 3071 3072 MessageOffset = (u_long) 3073 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA( 3074 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply); 3075 /* 3076 * Get the Original Message Frame's address, and get 3077 * it's Transaction Context into our space. (Currently 3078 * unused at original authorship, but better to be 3079 * safe than sorry). Straight copy means that we 3080 * need not concern ourselves with the (optional 3081 * byteswapping) method access. 3082 */ 3083 Reply->StdReplyFrame.TransactionContext 3084 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME) 3085 (sc->ha_Fvirt + MessageOffset))->TransactionContext; 3086 /* 3087 * For 64 bit machines, we need to reconstruct the 3088 * 64 bit context. 3089 */ 3090 ccb = (union asr_ccb *)(long) 3091 I2O_MESSAGE_FRAME_getInitiatorContext64( 3092 &(Reply->StdReplyFrame.StdMessageFrame)); 3093 /* 3094 * Unique error code for command failure. 3095 */ 3096 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3097 &(Reply->StdReplyFrame), (u_int16_t)-2); 3098 /* 3099 * Modify the message frame to contain a NOP and 3100 * re-issue it to the controller. 3101 */ 3102 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage( 3103 Message, sizeof(I2O_UTIL_NOP_MESSAGE)); 3104 # if (I2O_UTIL_NOP != 0) 3105 I2O_MESSAGE_FRAME_setFunction ( 3106 &(Message_Ptr->StdMessageFrame), 3107 I2O_UTIL_NOP); 3108 # endif 3109 /* 3110 * Copy the packet out to the Original Message 3111 */ 3112 bcopy ((caddr_t)Message_Ptr, 3113 sc->ha_Fvirt + MessageOffset, 3114 sizeof(I2O_UTIL_NOP_MESSAGE)); 3115 /* 3116 * Issue the NOP 3117 */ 3118 sc->ha_Virt->ToFIFO = MessageOffset; 3119 } 3120 3121 /* 3122 * Asynchronous command with no return requirements, 3123 * and a generic handler for immunity against odd error 3124 * returns from the adapter. 3125 */ 3126 if (ccb == (union asr_ccb *)NULL) { 3127 /* 3128 * Return Reply so that it can be used for the 3129 * next command 3130 */ 3131 sc->ha_Virt->FromFIFO = ReplyOffset; 3132 continue; 3133 } 3134 3135 /* Welease Wadjah! (and stop timeouts) */ 3136 ASR_ccbRemove (sc, ccb); 3137 3138 switch ( 3139 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode( 3140 &(Reply->StdReplyFrame))) { 3141 3142 case I2O_SCSI_DSC_SUCCESS: 3143 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3144 ccb->ccb_h.status |= CAM_REQ_CMP; 3145 break; 3146 3147 case I2O_SCSI_DSC_CHECK_CONDITION: 3148 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3149 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID; 3150 break; 3151 3152 case I2O_SCSI_DSC_BUSY: 3153 /* FALLTHRU */ 3154 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY: 3155 /* FALLTHRU */ 3156 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET: 3157 /* FALLTHRU */ 3158 case I2O_SCSI_HBA_DSC_BUS_BUSY: 3159 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3160 ccb->ccb_h.status |= CAM_SCSI_BUSY; 3161 break; 3162 3163 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT: 3164 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3165 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 3166 break; 3167 3168 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT: 3169 /* FALLTHRU */ 3170 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT: 3171 /* FALLTHRU */ 3172 case I2O_SCSI_HBA_DSC_LUN_INVALID: 3173 /* FALLTHRU */ 3174 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID: 3175 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3176 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 3177 break; 3178 3179 case I2O_SCSI_HBA_DSC_DATA_OVERRUN: 3180 /* FALLTHRU */ 3181 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR: 3182 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3183 ccb->ccb_h.status |= CAM_DATA_RUN_ERR; 3184 break; 3185 3186 default: 3187 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3188 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 3189 break; 3190 } 3191 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) { 3192 ccb->csio.resid -= 3193 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount( 3194 Reply); 3195 } 3196 3197 /* Sense data in reply packet */ 3198 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) { 3199 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply); 3200 3201 if (size) { 3202 if (size > sizeof(ccb->csio.sense_data)) { 3203 size = sizeof(ccb->csio.sense_data); 3204 } 3205 if (size > I2O_SCSI_SENSE_DATA_SZ) { 3206 size = I2O_SCSI_SENSE_DATA_SZ; 3207 } 3208 if ((ccb->csio.sense_len) 3209 && (size > ccb->csio.sense_len)) { 3210 size = ccb->csio.sense_len; 3211 } 3212 bcopy ((caddr_t)Reply->SenseData, 3213 (caddr_t)&(ccb->csio.sense_data), size); 3214 } 3215 } 3216 3217 /* 3218 * Return Reply so that it can be used for the next command 3219 * since we have no more need for it now 3220 */ 3221 sc->ha_Virt->FromFIFO = ReplyOffset; 3222 3223 if (ccb->ccb_h.path) { 3224 xpt_done ((union ccb *)ccb); 3225 } else { 3226 wakeup ((caddr_t)ccb); 3227 } 3228 } 3229 return (processed); 3230 } /* asr_intr */ 3231 3232 #undef QueueSize /* Grrrr */ 3233 #undef SG_Size /* Grrrr */ 3234 3235 /* 3236 * Meant to be included at the bottom of asr.c !!! 3237 */ 3238 3239 /* 3240 * Included here as hard coded. Done because other necessary include 3241 * files utilize C++ comment structures which make them a nuisance to 3242 * included here just to pick up these three typedefs. 3243 */ 3244 typedef U32 DPT_TAG_T; 3245 typedef U32 DPT_MSG_T; 3246 typedef U32 DPT_RTN_T; 3247 3248 #undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */ 3249 #include "osd_unix.h" 3250 3251 #define asr_unit(dev) minor(dev) 3252 3253 STATIC INLINE Asr_softc_t * 3254 ASR_get_sc ( 3255 IN cdev_t dev) 3256 { 3257 int unit = asr_unit(dev); 3258 OUT Asr_softc_t * sc = Asr_softc; 3259 3260 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) { 3261 sc = sc->ha_next; 3262 } 3263 return (sc); 3264 } /* ASR_get_sc */ 3265 3266 STATIC u_int8_t ASR_ctlr_held; 3267 #if (!defined(UNREFERENCED_PARAMETER)) 3268 # define UNREFERENCED_PARAMETER(x) (void)(x) 3269 #endif 3270 3271 STATIC int 3272 asr_open(struct dev_open_args *ap) 3273 { 3274 cdev_t dev = ap->a_head.a_dev; 3275 OUT int error; 3276 3277 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) { 3278 return (ENODEV); 3279 } 3280 crit_enter(); 3281 if (ASR_ctlr_held) { 3282 error = EBUSY; 3283 } else if ((error = suser_cred(ap->a_cred, 0)) == 0) { 3284 ++ASR_ctlr_held; 3285 } 3286 crit_exit(); 3287 return (error); 3288 } /* asr_open */ 3289 3290 STATIC int 3291 asr_close(struct dev_close_args *ap) 3292 { 3293 ASR_ctlr_held = 0; 3294 return (0); 3295 } /* asr_close */ 3296 3297 3298 /*-------------------------------------------------------------------------*/ 3299 /* Function ASR_queue_i */ 3300 /*-------------------------------------------------------------------------*/ 3301 /* The Parameters Passed To This Function Are : */ 3302 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 3303 /* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */ 3304 /* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */ 3305 /* */ 3306 /* This Function Will Take The User Request Packet And Convert It To An */ 3307 /* I2O MSG And Send It Off To The Adapter. */ 3308 /* */ 3309 /* Return : 0 For OK, Error Code Otherwise */ 3310 /*-------------------------------------------------------------------------*/ 3311 STATIC INLINE int 3312 ASR_queue_i( 3313 IN Asr_softc_t * sc, 3314 INOUT PI2O_MESSAGE_FRAME Packet) 3315 { 3316 union asr_ccb * ccb; 3317 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3318 PI2O_MESSAGE_FRAME Message_Ptr; 3319 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr; 3320 int MessageSizeInBytes; 3321 int ReplySizeInBytes; 3322 int error; 3323 int s; 3324 /* Scatter Gather buffer list */ 3325 struct ioctlSgList_S { 3326 SLIST_ENTRY(ioctlSgList_S) link; 3327 caddr_t UserSpace; 3328 I2O_FLAGS_COUNT FlagsCount; 3329 char KernelSpace[sizeof(long)]; 3330 } * elm; 3331 /* Generates a `first' entry */ 3332 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList; 3333 3334 if (ASR_getBlinkLedCode(sc)) { 3335 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n", 3336 ASR_getBlinkLedCode(sc)); 3337 return (EIO); 3338 } 3339 /* Copy in the message into a local allocation */ 3340 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc ( 3341 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) 3342 == (PI2O_MESSAGE_FRAME)NULL) { 3343 debug_usr_cmd_printf ( 3344 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3345 return (ENOMEM); 3346 } 3347 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3348 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3349 kfree (Message_Ptr, M_TEMP); 3350 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error); 3351 return (error); 3352 } 3353 /* Acquire information to determine type of packet */ 3354 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2); 3355 /* The offset of the reply information within the user packet */ 3356 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet 3357 + MessageSizeInBytes); 3358 3359 /* Check if the message is a synchronous initialization command */ 3360 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr); 3361 kfree (Message_Ptr, M_TEMP); 3362 switch (s) { 3363 3364 case I2O_EXEC_IOP_RESET: 3365 { U32 status; 3366 3367 status = ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt); 3368 ReplySizeInBytes = sizeof(status); 3369 debug_usr_cmd_printf ("resetIOP done\n"); 3370 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3371 ReplySizeInBytes)); 3372 } 3373 3374 case I2O_EXEC_STATUS_GET: 3375 { I2O_EXEC_STATUS_GET_REPLY status; 3376 3377 if (ASR_getStatus (sc->ha_Virt, sc->ha_Fvirt, &status) 3378 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) { 3379 debug_usr_cmd_printf ("getStatus failed\n"); 3380 return (ENXIO); 3381 } 3382 ReplySizeInBytes = sizeof(status); 3383 debug_usr_cmd_printf ("getStatus done\n"); 3384 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3385 ReplySizeInBytes)); 3386 } 3387 3388 case I2O_EXEC_OUTBOUND_INIT: 3389 { U32 status; 3390 3391 status = ASR_initOutBound(sc); 3392 ReplySizeInBytes = sizeof(status); 3393 debug_usr_cmd_printf ("intOutBound done\n"); 3394 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3395 ReplySizeInBytes)); 3396 } 3397 } 3398 3399 /* Determine if the message size is valid */ 3400 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME)) 3401 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) { 3402 debug_usr_cmd_printf ("Packet size %d incorrect\n", 3403 MessageSizeInBytes); 3404 return (EINVAL); 3405 } 3406 3407 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes, 3408 M_TEMP, M_WAITOK)) == (PI2O_MESSAGE_FRAME)NULL) { 3409 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3410 MessageSizeInBytes); 3411 return (ENOMEM); 3412 } 3413 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3414 MessageSizeInBytes)) != 0) { 3415 kfree (Message_Ptr, M_TEMP); 3416 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n", 3417 MessageSizeInBytes, error); 3418 return (error); 3419 } 3420 3421 /* Check the size of the reply frame, and start constructing */ 3422 3423 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc ( 3424 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) 3425 == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 3426 kfree (Message_Ptr, M_TEMP); 3427 debug_usr_cmd_printf ( 3428 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3429 return (ENOMEM); 3430 } 3431 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr, 3432 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3433 kfree (Reply_Ptr, M_TEMP); 3434 kfree (Message_Ptr, M_TEMP); 3435 debug_usr_cmd_printf ( 3436 "Failed to copy in reply frame, errno=%d\n", 3437 error); 3438 return (error); 3439 } 3440 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize( 3441 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2); 3442 kfree (Reply_Ptr, M_TEMP); 3443 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) { 3444 kfree (Message_Ptr, M_TEMP); 3445 debug_usr_cmd_printf ( 3446 "Failed to copy in reply frame[%d], errno=%d\n", 3447 ReplySizeInBytes, error); 3448 return (EINVAL); 3449 } 3450 3451 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc ( 3452 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)) 3453 ? ReplySizeInBytes 3454 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)), 3455 M_TEMP, M_WAITOK)) == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 3456 kfree (Message_Ptr, M_TEMP); 3457 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3458 ReplySizeInBytes); 3459 return (ENOMEM); 3460 } 3461 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes); 3462 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext 3463 = Message_Ptr->InitiatorContext; 3464 Reply_Ptr->StdReplyFrame.TransactionContext 3465 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext; 3466 I2O_MESSAGE_FRAME_setMsgFlags( 3467 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3468 I2O_MESSAGE_FRAME_getMsgFlags( 3469 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) 3470 | I2O_MESSAGE_FLAGS_REPLY); 3471 3472 /* Check if the message is a special case command */ 3473 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) { 3474 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */ 3475 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset( 3476 Message_Ptr) & 0xF0) >> 2)) { 3477 kfree (Message_Ptr, M_TEMP); 3478 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3479 &(Reply_Ptr->StdReplyFrame), 3480 (ASR_setSysTab(sc) != CAM_REQ_CMP)); 3481 I2O_MESSAGE_FRAME_setMessageSize( 3482 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3483 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)); 3484 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 3485 ReplySizeInBytes); 3486 kfree (Reply_Ptr, M_TEMP); 3487 return (error); 3488 } 3489 } 3490 3491 /* Deal in the general case */ 3492 /* First allocate and optionally copy in each scatter gather element */ 3493 SLIST_INIT(&sgList); 3494 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) { 3495 PI2O_SGE_SIMPLE_ELEMENT sg; 3496 3497 /* 3498 * since this code is reused in several systems, code 3499 * efficiency is greater by using a shift operation rather 3500 * than a divide by sizeof(u_int32_t). 3501 */ 3502 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3503 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) 3504 >> 2)); 3505 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr) 3506 + MessageSizeInBytes)) { 3507 caddr_t v; 3508 int len; 3509 3510 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3511 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) { 3512 error = EINVAL; 3513 break; 3514 } 3515 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount)); 3516 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n", 3517 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3518 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3519 Message_Ptr) & 0xF0) >> 2)), 3520 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len); 3521 3522 if ((elm = (struct ioctlSgList_S *)kmalloc ( 3523 sizeof(*elm) - sizeof(elm->KernelSpace) + len, 3524 M_TEMP, M_WAITOK)) 3525 == (struct ioctlSgList_S *)NULL) { 3526 debug_usr_cmd_printf ( 3527 "Failed to allocate SG[%d]\n", len); 3528 error = ENOMEM; 3529 break; 3530 } 3531 SLIST_INSERT_HEAD(&sgList, elm, link); 3532 elm->FlagsCount = sg->FlagsCount; 3533 elm->UserSpace = (caddr_t) 3534 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg)); 3535 v = elm->KernelSpace; 3536 /* Copy in outgoing data (DIR bit could be invalid) */ 3537 if ((error = copyin (elm->UserSpace, (caddr_t)v, len)) 3538 != 0) { 3539 break; 3540 } 3541 /* 3542 * If the buffer is not contiguous, lets 3543 * break up the scatter/gather entries. 3544 */ 3545 while ((len > 0) 3546 && (sg < (PI2O_SGE_SIMPLE_ELEMENT) 3547 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) { 3548 int next, base, span; 3549 3550 span = 0; 3551 next = base = KVTOPHYS(v); 3552 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, 3553 base); 3554 3555 /* How far can we go physically contiguously */ 3556 while ((len > 0) && (base == next)) { 3557 int size; 3558 3559 next = trunc_page(base) + PAGE_SIZE; 3560 size = next - base; 3561 if (size > len) { 3562 size = len; 3563 } 3564 span += size; 3565 v += size; 3566 len -= size; 3567 base = KVTOPHYS(v); 3568 } 3569 3570 /* Construct the Flags */ 3571 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), 3572 span); 3573 { 3574 int flags = I2O_FLAGS_COUNT_getFlags( 3575 &(elm->FlagsCount)); 3576 /* Any remaining length? */ 3577 if (len > 0) { 3578 flags &= 3579 ~(I2O_SGL_FLAGS_END_OF_BUFFER 3580 | I2O_SGL_FLAGS_LAST_ELEMENT); 3581 } 3582 I2O_FLAGS_COUNT_setFlags( 3583 &(sg->FlagsCount), flags); 3584 } 3585 3586 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n", 3587 sg - (PI2O_SGE_SIMPLE_ELEMENT) 3588 ((char *)Message_Ptr 3589 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3590 Message_Ptr) & 0xF0) >> 2)), 3591 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), 3592 span); 3593 if (len <= 0) { 3594 break; 3595 } 3596 3597 /* 3598 * Incrementing requires resizing of the 3599 * packet, and moving up the existing SG 3600 * elements. 3601 */ 3602 ++sg; 3603 MessageSizeInBytes += sizeof(*sg); 3604 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 3605 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr) 3606 + (sizeof(*sg) / sizeof(U32))); 3607 { 3608 PI2O_MESSAGE_FRAME NewMessage_Ptr; 3609 3610 if ((NewMessage_Ptr 3611 = (PI2O_MESSAGE_FRAME) 3612 kmalloc (MessageSizeInBytes, 3613 M_TEMP, M_WAITOK)) 3614 == (PI2O_MESSAGE_FRAME)NULL) { 3615 debug_usr_cmd_printf ( 3616 "Failed to acquire frame[%d] memory\n", 3617 MessageSizeInBytes); 3618 error = ENOMEM; 3619 break; 3620 } 3621 span = ((caddr_t)sg) 3622 - (caddr_t)Message_Ptr; 3623 bcopy ((caddr_t)Message_Ptr, 3624 (caddr_t)NewMessage_Ptr, span); 3625 bcopy ((caddr_t)(sg-1), 3626 ((caddr_t)NewMessage_Ptr) + span, 3627 MessageSizeInBytes - span); 3628 kfree (Message_Ptr, M_TEMP); 3629 sg = (PI2O_SGE_SIMPLE_ELEMENT) 3630 (((caddr_t)NewMessage_Ptr) + span); 3631 Message_Ptr = NewMessage_Ptr; 3632 } 3633 } 3634 if ((error) 3635 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3636 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) { 3637 break; 3638 } 3639 ++sg; 3640 } 3641 if (error) { 3642 while ((elm = SLIST_FIRST(&sgList)) 3643 != (struct ioctlSgList_S *)NULL) { 3644 SLIST_REMOVE_HEAD(&sgList, link); 3645 kfree (elm, M_TEMP); 3646 } 3647 kfree (Reply_Ptr, M_TEMP); 3648 kfree (Message_Ptr, M_TEMP); 3649 return (error); 3650 } 3651 } 3652 3653 debug_usr_cmd_printf ("Inbound: "); 3654 debug_usr_cmd_dump_message(Message_Ptr); 3655 3656 /* Send the command */ 3657 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 3658 /* Free up in-kernel buffers */ 3659 while ((elm = SLIST_FIRST(&sgList)) 3660 != (struct ioctlSgList_S *)NULL) { 3661 SLIST_REMOVE_HEAD(&sgList, link); 3662 kfree (elm, M_TEMP); 3663 } 3664 kfree (Reply_Ptr, M_TEMP); 3665 kfree (Message_Ptr, M_TEMP); 3666 return (ENOMEM); 3667 } 3668 3669 /* 3670 * We do not need any (optional byteswapping) method access to 3671 * the Initiator context field. 3672 */ 3673 I2O_MESSAGE_FRAME_setInitiatorContext64( 3674 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb); 3675 3676 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 3677 3678 kfree (Message_Ptr, M_TEMP); 3679 3680 /* 3681 * Wait for the board to report a finished instruction. 3682 */ 3683 crit_enter(); 3684 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 3685 if (ASR_getBlinkLedCode(sc)) { 3686 /* Reset Adapter */ 3687 kprintf ("asr%d: Blink LED 0x%x resetting adapter\n", 3688 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 3689 ASR_getBlinkLedCode(sc)); 3690 if (ASR_reset (sc) == ENXIO) { 3691 /* Command Cleanup */ 3692 ASR_ccbRemove(sc, ccb); 3693 } 3694 crit_exit(); 3695 /* Free up in-kernel buffers */ 3696 while ((elm = SLIST_FIRST(&sgList)) 3697 != (struct ioctlSgList_S *)NULL) { 3698 SLIST_REMOVE_HEAD(&sgList, link); 3699 kfree (elm, M_TEMP); 3700 } 3701 kfree (Reply_Ptr, M_TEMP); 3702 asr_free_ccb(ccb); 3703 return (EIO); 3704 } 3705 /* Check every second for BlinkLed */ 3706 tsleep((caddr_t)ccb, 0, "asr", hz); 3707 } 3708 crit_exit(); 3709 3710 debug_usr_cmd_printf ("Outbound: "); 3711 debug_usr_cmd_dump_message(Reply_Ptr); 3712 3713 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3714 &(Reply_Ptr->StdReplyFrame), 3715 (ccb->ccb_h.status != CAM_REQ_CMP)); 3716 3717 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3718 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) { 3719 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr, 3720 ccb->csio.dxfer_len - ccb->csio.resid); 3721 } 3722 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes 3723 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3724 - I2O_SCSI_SENSE_DATA_SZ))) { 3725 int size = ReplySizeInBytes 3726 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3727 - I2O_SCSI_SENSE_DATA_SZ; 3728 3729 if (size > sizeof(ccb->csio.sense_data)) { 3730 size = sizeof(ccb->csio.sense_data); 3731 } 3732 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData, 3733 size); 3734 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount( 3735 Reply_Ptr, size); 3736 } 3737 3738 /* Free up in-kernel buffers */ 3739 while ((elm = SLIST_FIRST(&sgList)) != (struct ioctlSgList_S *)NULL) { 3740 /* Copy out as necessary */ 3741 if ((error == 0) 3742 /* DIR bit considered `valid', error due to ignorance works */ 3743 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount)) 3744 & I2O_SGL_FLAGS_DIR) == 0)) { 3745 error = copyout ((caddr_t)(elm->KernelSpace), 3746 elm->UserSpace, 3747 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount))); 3748 } 3749 SLIST_REMOVE_HEAD(&sgList, link); 3750 kfree (elm, M_TEMP); 3751 } 3752 if (error == 0) { 3753 /* Copy reply frame to user space */ 3754 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 3755 ReplySizeInBytes); 3756 } 3757 kfree (Reply_Ptr, M_TEMP); 3758 asr_free_ccb(ccb); 3759 3760 return (error); 3761 } /* ASR_queue_i */ 3762 3763 /*----------------------------------------------------------------------*/ 3764 /* Function asr_ioctl */ 3765 /*----------------------------------------------------------------------*/ 3766 /* The parameters passed to this function are : */ 3767 /* dev : Device number. */ 3768 /* cmd : Ioctl Command */ 3769 /* data : User Argument Passed In. */ 3770 /* flag : Mode Parameter */ 3771 /* proc : Process Parameter */ 3772 /* */ 3773 /* This function is the user interface into this adapter driver */ 3774 /* */ 3775 /* Return : zero if OK, error code if not */ 3776 /*----------------------------------------------------------------------*/ 3777 3778 STATIC int 3779 asr_ioctl(struct dev_ioctl_args *ap) 3780 { 3781 cdev_t dev = ap->a_head.a_dev; 3782 caddr_t data = ap->a_data; 3783 int i, j; 3784 OUT int error = 0; 3785 Asr_softc_t * sc = ASR_get_sc (dev); 3786 3787 if (sc != (Asr_softc_t *)NULL) 3788 switch(ap->a_cmd) { 3789 3790 case DPT_SIGNATURE: 3791 # if (dsDescription_size != 50) 3792 case DPT_SIGNATURE + ((50 - dsDescription_size) << 16): 3793 # endif 3794 if (ap->a_cmd & 0xFFFF0000) { 3795 (void)bcopy ((caddr_t)(&ASR_sig), data, 3796 sizeof(dpt_sig_S)); 3797 return (0); 3798 } 3799 /* Traditional version of the ioctl interface */ 3800 case DPT_SIGNATURE & 0x0000FFFF: 3801 return (copyout ((caddr_t)(&ASR_sig), *((caddr_t *)data), 3802 sizeof(dpt_sig_S))); 3803 3804 /* Traditional version of the ioctl interface */ 3805 case DPT_CTRLINFO & 0x0000FFFF: 3806 case DPT_CTRLINFO: { 3807 struct { 3808 u_int16_t length; 3809 u_int16_t drvrHBAnum; 3810 u_int32_t baseAddr; 3811 u_int16_t blinkState; 3812 u_int8_t pciBusNum; 3813 u_int8_t pciDeviceNum; 3814 u_int16_t hbaFlags; 3815 u_int16_t Interrupt; 3816 u_int32_t reserved1; 3817 u_int32_t reserved2; 3818 u_int32_t reserved3; 3819 } CtlrInfo; 3820 3821 bzero (&CtlrInfo, sizeof(CtlrInfo)); 3822 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t); 3823 CtlrInfo.drvrHBAnum = asr_unit(dev); 3824 CtlrInfo.baseAddr = (u_long)sc->ha_Base; 3825 i = ASR_getBlinkLedCode (sc); 3826 if (i == -1) { 3827 i = 0; 3828 } 3829 CtlrInfo.blinkState = i; 3830 CtlrInfo.pciBusNum = sc->ha_pciBusNum; 3831 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum; 3832 #define FLG_OSD_PCI_VALID 0x0001 3833 #define FLG_OSD_DMA 0x0002 3834 #define FLG_OSD_I2O 0x0004 3835 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O; 3836 CtlrInfo.Interrupt = sc->ha_irq; 3837 if (ap->a_cmd & 0xFFFF0000) { 3838 bcopy (&CtlrInfo, data, sizeof(CtlrInfo)); 3839 } else { 3840 error = copyout (&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo)); 3841 } 3842 } return (error); 3843 3844 /* Traditional version of the ioctl interface */ 3845 case DPT_SYSINFO & 0x0000FFFF: 3846 case DPT_SYSINFO: { 3847 sysInfo_S Info; 3848 char * cp; 3849 /* Kernel Specific ptok `hack' */ 3850 # define ptok(a) ((char *)(a) + KERNBASE) 3851 3852 bzero (&Info, sizeof(Info)); 3853 3854 /* Appears I am the only person in the Kernel doing this */ 3855 outb (0x70, 0x12); 3856 i = inb(0x71); 3857 j = i >> 4; 3858 if (i == 0x0f) { 3859 outb (0x70, 0x19); 3860 j = inb (0x71); 3861 } 3862 Info.drive0CMOS = j; 3863 3864 j = i & 0x0f; 3865 if (i == 0x0f) { 3866 outb (0x70, 0x1a); 3867 j = inb (0x71); 3868 } 3869 Info.drive1CMOS = j; 3870 3871 Info.numDrives = *((char *)ptok(0x475)); 3872 3873 Info.processorFamily = ASR_sig.dsProcessorFamily; 3874 switch (cpu) { 3875 case CPU_386SX: case CPU_386: 3876 Info.processorType = PROC_386; break; 3877 case CPU_486SX: case CPU_486: 3878 Info.processorType = PROC_486; break; 3879 case CPU_586: 3880 Info.processorType = PROC_PENTIUM; break; 3881 case CPU_686: 3882 Info.processorType = PROC_SEXIUM; break; 3883 } 3884 Info.osType = OS_BSDI_UNIX; 3885 Info.osMajorVersion = osrelease[0] - '0'; 3886 Info.osMinorVersion = osrelease[2] - '0'; 3887 /* Info.osRevision = 0; */ 3888 /* Info.osSubRevision = 0; */ 3889 Info.busType = SI_PCI_BUS; 3890 Info.flags = SI_CMOS_Valid | SI_NumDrivesValid 3891 | SI_OSversionValid | SI_BusTypeValid | SI_NO_SmartROM; 3892 3893 /* Go Out And Look For I2O SmartROM */ 3894 for(j = 0xC8000; j < 0xE0000; j += 2048) { 3895 int k; 3896 3897 cp = ptok(j); 3898 if (*((unsigned short *)cp) != 0xAA55) { 3899 continue; 3900 } 3901 j += (cp[2] * 512) - 2048; 3902 if ((*((u_long *)(cp + 6)) 3903 != ('S' + (' ' * 256) + (' ' * 65536L))) 3904 || (*((u_long *)(cp + 10)) 3905 != ('I' + ('2' * 256) + ('0' * 65536L)))) { 3906 continue; 3907 } 3908 cp += 0x24; 3909 for (k = 0; k < 64; ++k) { 3910 if (*((unsigned short *)cp) 3911 == (' ' + ('v' * 256))) { 3912 break; 3913 } 3914 } 3915 if (k < 64) { 3916 Info.smartROMMajorVersion 3917 = *((unsigned char *)(cp += 4)) - '0'; 3918 Info.smartROMMinorVersion 3919 = *((unsigned char *)(cp += 2)); 3920 Info.smartROMRevision 3921 = *((unsigned char *)(++cp)); 3922 Info.flags |= SI_SmartROMverValid; 3923 Info.flags &= ~SI_NO_SmartROM; 3924 break; 3925 } 3926 } 3927 /* Get The Conventional Memory Size From CMOS */ 3928 outb (0x70, 0x16); 3929 j = inb (0x71); 3930 j <<= 8; 3931 outb (0x70, 0x15); 3932 j |= inb(0x71); 3933 Info.conventionalMemSize = j; 3934 3935 /* Get The Extended Memory Found At Power On From CMOS */ 3936 outb (0x70, 0x31); 3937 j = inb (0x71); 3938 j <<= 8; 3939 outb (0x70, 0x30); 3940 j |= inb(0x71); 3941 Info.extendedMemSize = j; 3942 Info.flags |= SI_MemorySizeValid; 3943 3944 # if (defined(THIS_IS_BROKEN)) 3945 /* If There Is 1 or 2 Drives Found, Set Up Drive Parameters */ 3946 if (Info.numDrives > 0) { 3947 /* 3948 * Get The Pointer From Int 41 For The First 3949 * Drive Parameters 3950 */ 3951 j = ((unsigned)(*((unsigned short *)ptok(0x104+2))) << 4) 3952 + (unsigned)(*((unsigned short *)ptok(0x104+0))); 3953 /* 3954 * It appears that SmartROM's Int41/Int46 pointers 3955 * use memory that gets stepped on by the kernel 3956 * loading. We no longer have access to this 3957 * geometry information but try anyways (!?) 3958 */ 3959 Info.drives[0].cylinders = *((unsigned char *)ptok(j)); 3960 ++j; 3961 Info.drives[0].cylinders += ((int)*((unsigned char *) 3962 ptok(j))) << 8; 3963 ++j; 3964 Info.drives[0].heads = *((unsigned char *)ptok(j)); 3965 j += 12; 3966 Info.drives[0].sectors = *((unsigned char *)ptok(j)); 3967 Info.flags |= SI_DriveParamsValid; 3968 if ((Info.drives[0].cylinders == 0) 3969 || (Info.drives[0].heads == 0) 3970 || (Info.drives[0].sectors == 0)) { 3971 Info.flags &= ~SI_DriveParamsValid; 3972 } 3973 if (Info.numDrives > 1) { 3974 /* 3975 * Get The Pointer From Int 46 For The 3976 * Second Drive Parameters 3977 */ 3978 j = ((unsigned)(*((unsigned short *)ptok(0x118+2))) << 4) 3979 + (unsigned)(*((unsigned short *)ptok(0x118+0))); 3980 Info.drives[1].cylinders = *((unsigned char *) 3981 ptok(j)); 3982 ++j; 3983 Info.drives[1].cylinders += ((int) 3984 *((unsigned char *)ptok(j))) << 8; 3985 ++j; 3986 Info.drives[1].heads = *((unsigned char *) 3987 ptok(j)); 3988 j += 12; 3989 Info.drives[1].sectors = *((unsigned char *) 3990 ptok(j)); 3991 if ((Info.drives[1].cylinders == 0) 3992 || (Info.drives[1].heads == 0) 3993 || (Info.drives[1].sectors == 0)) { 3994 Info.flags &= ~SI_DriveParamsValid; 3995 } 3996 } 3997 } 3998 # endif 3999 /* Copy Out The Info Structure To The User */ 4000 if (ap->a_cmd & 0xFFFF0000) { 4001 bcopy (&Info, data, sizeof(Info)); 4002 } else { 4003 error = copyout (&Info, *(caddr_t *)data, sizeof(Info)); 4004 } 4005 return (error); } 4006 4007 /* Get The BlinkLED State */ 4008 case DPT_BLINKLED: 4009 i = ASR_getBlinkLedCode (sc); 4010 if (i == -1) { 4011 i = 0; 4012 } 4013 if (ap->a_cmd & 0xFFFF0000) { 4014 bcopy ((caddr_t)(&i), data, sizeof(i)); 4015 } else { 4016 error = copyout (&i, *(caddr_t *)data, sizeof(i)); 4017 } 4018 break; 4019 4020 /* Send an I2O command */ 4021 case I2OUSRCMD: 4022 return (ASR_queue_i (sc, *((PI2O_MESSAGE_FRAME *)data))); 4023 4024 /* Reset and re-initialize the adapter */ 4025 case I2ORESETCMD: 4026 return (ASR_reset (sc)); 4027 4028 /* Rescan the LCT table and resynchronize the information */ 4029 case I2ORESCANCMD: 4030 return (ASR_rescan (sc)); 4031 } 4032 return (EINVAL); 4033 } /* asr_ioctl */ 4034