1 /* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */ 2 /* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.11 2003/11/20 22:07:33 dillon Exp $ */ 3 /* 4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation 5 * Copyright (c) 2000-2001 Adaptec Corporation 6 * All rights reserved. 7 * 8 * TERMS AND CONDITIONS OF USE 9 * 10 * Redistribution and use in source form, with or without modification, are 11 * permitted provided that redistributions of source code must retain the 12 * above copyright notice, this list of conditions and the following disclaimer. 13 * 14 * This software is provided `as is' by Adaptec and any express or implied 15 * warranties, including, but not limited to, the implied warranties of 16 * merchantability and fitness for a particular purpose, are disclaimed. In no 17 * event shall Adaptec be liable for any direct, indirect, incidental, special, 18 * exemplary or consequential damages (including, but not limited to, 19 * procurement of substitute goods or services; loss of use, data, or profits; 20 * or business interruptions) however caused and on any theory of liability, 21 * whether in contract, strict liability, or tort (including negligence or 22 * otherwise) arising in any way out of the use of this driver software, even 23 * if advised of the possibility of such damage. 24 * 25 * SCSI I2O host adapter driver 26 * 27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com 28 * - The 2000S and 2005S do not initialize on some machines, 29 * increased timeout to 255ms from 50ms for the StatusGet 30 * command. 31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com 32 * - I knew this one was too good to be true. The error return 33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not 34 * to the bit masked status. 35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com 36 * - The 2005S that was supported is affectionately called the 37 * Conjoined BAR Firmware. In order to support RAID-5 in a 38 * 16MB low-cost configuration, Firmware was forced to go 39 * to a Split BAR Firmware. This requires a separate IOP and 40 * Messaging base address. 41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com 42 * - Handle support for 2005S Zero Channel RAID solution. 43 * - System locked up if the Adapter locked up. Do not try 44 * to send other commands if the resetIOP command fails. The 45 * fail outstanding command discovery loop was flawed as the 46 * removal of the command from the list prevented discovering 47 * all the commands. 48 * - Comment changes to clarify driver. 49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM. 50 * - We do not use the AC_FOUND_DEV event because of I2O. 51 * Removed asr_async. 52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org, 53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com. 54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0 55 * mode as this is confused with competitor adapters in run 56 * mode. 57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove 58 * to prevent operating system panic. 59 * - moved default major number to 154 from 97. 60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com 61 * - The controller is not actually an ASR (Adaptec SCSI RAID) 62 * series that is visible, it's more of an internal code name. 63 * remove any visible references within reason for now. 64 * - bus_ptr->LUN was not correctly zeroed when initially 65 * allocated causing a possible panic of the operating system 66 * during boot. 67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com 68 * - Code always fails for ASR_getTid affecting performance. 69 * - initiated a set of changes that resulted from a formal 70 * code inspection by Mark_Salyzyn@adaptec.com, 71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com, 72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com. 73 * Their findings were focussed on the LCT & TID handler, and 74 * all resulting changes were to improve code readability, 75 * consistency or have a positive effect on performance. 76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com 77 * - Passthrough returned an incorrect error. 78 * - Passthrough did not migrate the intrinsic scsi layer wakeup 79 * on command completion. 80 * - generate control device nodes using make_dev and delete_dev. 81 * - Performance affected by TID caching reallocing. 82 * - Made suggested changes by Justin_Gibbs@adaptec.com 83 * - use splcam instead of splbio. 84 * - use cam_imask instead of bio_imask. 85 * - use u_int8_t instead of u_char. 86 * - use u_int16_t instead of u_short. 87 * - use u_int32_t instead of u_long where appropriate. 88 * - use 64 bit context handler instead of 32 bit. 89 * - create_ccb should only allocate the worst case 90 * requirements for the driver since CAM may evolve 91 * making union ccb much larger than needed here. 92 * renamed create_ccb to asr_alloc_ccb. 93 * - go nutz justifying all debug prints as macros 94 * defined at the top and remove unsightly ifdefs. 95 * - INLINE STATIC viewed as confusing. Historically 96 * utilized to affect code performance and debug 97 * issues in OS, Compiler or OEM specific situations. 98 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com 99 * - Ported from FreeBSD 2.2.X DPT I2O driver. 100 * changed struct scsi_xfer to union ccb/struct ccb_hdr 101 * changed variable name xs to ccb 102 * changed struct scsi_link to struct cam_path 103 * changed struct scsibus_data to struct cam_sim 104 * stopped using fordriver for holding on to the TID 105 * use proprietary packet creation instead of scsi_inquire 106 * CAM layer sends synchronize commands. 107 */ 108 109 #define ASR_VERSION 1 110 #define ASR_REVISION '0' 111 #define ASR_SUBREVISION '8' 112 #define ASR_MONTH 8 113 #define ASR_DAY 21 114 #define ASR_YEAR 2001 - 1980 115 116 /* 117 * Debug macros to reduce the unsightly ifdefs 118 */ 119 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD)) 120 # define debug_asr_message(message) \ 121 { \ 122 u_int32_t * pointer = (u_int32_t *)message; \ 123 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\ 124 u_int32_t counter = 0; \ 125 \ 126 while (length--) { \ 127 printf ("%08lx%c", (u_long)*(pointer++), \ 128 (((++counter & 7) == 0) || (length == 0)) \ 129 ? '\n' \ 130 : ' '); \ 131 } \ 132 } 133 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */ 134 135 #if (defined(DEBUG_ASR)) 136 /* Breaks on none STDC based compilers :-( */ 137 # define debug_asr_printf(fmt,args...) printf(fmt, ##args) 138 # define debug_asr_dump_message(message) debug_asr_message(message) 139 # define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path); 140 /* None fatal version of the ASSERT macro */ 141 # if (defined(__STDC__)) 142 # define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__) 143 # else 144 # define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__) 145 # endif 146 #else /* DEBUG_ASR */ 147 # define debug_asr_printf(fmt,args...) 148 # define debug_asr_dump_message(message) 149 # define debug_asr_print_path(ccb) 150 # define ASSERT(x) 151 #endif /* DEBUG_ASR */ 152 153 /* 154 * If DEBUG_ASR_CMD is defined: 155 * 0 - Display incoming SCSI commands 156 * 1 - add in a quick character before queueing. 157 * 2 - add in outgoing message frames. 158 */ 159 #if (defined(DEBUG_ASR_CMD)) 160 # define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args) 161 # define debug_asr_dump_ccb(ccb) \ 162 { \ 163 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \ 164 int len = ccb->csio.cdb_len; \ 165 \ 166 while (len) { \ 167 debug_asr_cmd_printf (" %02x", *(cp++)); \ 168 --len; \ 169 } \ 170 } 171 # if (DEBUG_ASR_CMD > 0) 172 # define debug_asr_cmd1_printf debug_asr_cmd_printf 173 # else 174 # define debug_asr_cmd1_printf(fmt,args...) 175 # endif 176 # if (DEBUG_ASR_CMD > 1) 177 # define debug_asr_cmd2_printf debug_asr_cmd_printf 178 # define debug_asr_cmd2_dump_message(message) debug_asr_message(message) 179 # else 180 # define debug_asr_cmd2_printf(fmt,args...) 181 # define debug_asr_cmd2_dump_message(message) 182 # endif 183 #else /* DEBUG_ASR_CMD */ 184 # define debug_asr_cmd_printf(fmt,args...) 185 # define debug_asr_cmd_dump_ccb(ccb) 186 # define debug_asr_cmd1_printf(fmt,args...) 187 # define debug_asr_cmd2_printf(fmt,args...) 188 # define debug_asr_cmd2_dump_message(message) 189 #endif /* DEBUG_ASR_CMD */ 190 191 #if (defined(DEBUG_ASR_USR_CMD)) 192 # define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args) 193 # define debug_usr_cmd_dump_message(message) debug_usr_message(message) 194 #else /* DEBUG_ASR_USR_CMD */ 195 # define debug_usr_cmd_printf(fmt,args...) 196 # define debug_usr_cmd_dump_message(message) 197 #endif /* DEBUG_ASR_USR_CMD */ 198 199 #define dsDescription_size 46 /* Snug as a bug in a rug */ 200 #include "dptsig.h" 201 202 static dpt_sig_S ASR_sig = { 203 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL, 204 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0, 205 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, 206 ADF_ALL_SC5, 207 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION, 208 ASR_MONTH, ASR_DAY, ASR_YEAR, 209 /* 01234567890123456789012345678901234567890123456789 < 50 chars */ 210 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver" 211 /* ^^^^^ asr_attach alters these to match OS */ 212 }; 213 214 #include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */ 215 #include <sys/kernel.h> 216 #include <sys/systm.h> 217 #include <sys/malloc.h> 218 #include <sys/proc.h> 219 #include <sys/conf.h> 220 #include <sys/disklabel.h> 221 #include <sys/bus.h> 222 #include <machine/resource.h> 223 #include <machine/bus.h> 224 #include <sys/rman.h> 225 #include <sys/stat.h> 226 #include <sys/device.h> 227 228 #include <bus/cam/cam.h> 229 #include <bus/cam/cam_ccb.h> 230 #include <bus/cam/cam_sim.h> 231 #include <bus/cam/cam_xpt_sim.h> 232 #include <bus/cam/cam_xpt_periph.h> 233 234 #include <bus/cam/scsi/scsi_all.h> 235 #include <bus/cam/scsi/scsi_message.h> 236 237 #include <vm/vm.h> 238 #include <vm/pmap.h> 239 #include <machine/cputypes.h> 240 #include <machine/clock.h> 241 #include <i386/include/vmparam.h> 242 243 #include <bus/pci/pcivar.h> 244 #include <bus/pci/pcireg.h> 245 246 #define STATIC static 247 #define INLINE 248 249 #if (defined(DEBUG_ASR) && (DEBUG_ASR > 0)) 250 # undef STATIC 251 # define STATIC 252 # undef INLINE 253 # define INLINE 254 #endif 255 #define IN 256 #define OUT 257 #define INOUT 258 259 #define osdSwap4(x) ((u_long)ntohl((u_long)(x))) 260 #define KVTOPHYS(x) vtophys(x) 261 #include "dptalign.h" 262 #include "i2oexec.h" 263 #include "i2obscsi.h" 264 #include "i2odpt.h" 265 #include "i2oadptr.h" 266 #include "opt_asr.h" 267 268 #include "sys_info.h" 269 270 /* Configuration Definitions */ 271 272 #define SG_SIZE 58 /* Scatter Gather list Size */ 273 #define MAX_TARGET_ID 126 /* Maximum Target ID supported */ 274 #define MAX_LUN 255 /* Maximum LUN Supported */ 275 #define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */ 276 #define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */ 277 #define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */ 278 #define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */ 279 #define MAX_MAP 4194304L /* Maximum mapping size of IOP */ 280 /* Also serves as the minimum map for */ 281 /* the 2005S zero channel RAID product */ 282 283 /************************************************************************** 284 ** ASR Host Adapter structure - One Structure For Each Host Adapter That ** 285 ** Is Configured Into The System. The Structure Supplies Configuration ** 286 ** Information, Status Info, Queue Info And An Active CCB List Pointer. ** 287 ***************************************************************************/ 288 289 /* I2O register set */ 290 typedef struct { 291 U8 Address[0x30]; 292 volatile U32 Status; 293 volatile U32 Mask; 294 # define Mask_InterruptsDisabled 0x08 295 U32 x[2]; 296 volatile U32 ToFIFO; /* In Bound FIFO */ 297 volatile U32 FromFIFO; /* Out Bound FIFO */ 298 } i2oRegs_t; 299 300 /* 301 * A MIX of performance and space considerations for TID lookups 302 */ 303 typedef u_int16_t tid_t; 304 305 typedef struct { 306 u_int32_t size; /* up to MAX_LUN */ 307 tid_t TID[1]; 308 } lun2tid_t; 309 310 typedef struct { 311 u_int32_t size; /* up to MAX_TARGET */ 312 lun2tid_t * LUN[1]; 313 } target2lun_t; 314 315 /* 316 * To ensure that we only allocate and use the worst case ccb here, lets 317 * make our own local ccb union. If asr_alloc_ccb is utilized for another 318 * ccb type, ensure that you add the additional structures into our local 319 * ccb union. To ensure strict type checking, we will utilize the local 320 * ccb definition wherever possible. 321 */ 322 union asr_ccb { 323 struct ccb_hdr ccb_h; /* For convenience */ 324 struct ccb_scsiio csio; 325 struct ccb_setasync csa; 326 }; 327 328 typedef struct Asr_softc { 329 u_int16_t ha_irq; 330 void * ha_Base; /* base port for each board */ 331 u_int8_t * volatile ha_blinkLED; 332 i2oRegs_t * ha_Virt; /* Base address of IOP */ 333 U8 * ha_Fvirt; /* Base address of Frames */ 334 I2O_IOP_ENTRY ha_SystemTable; 335 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */ 336 struct cam_path * ha_path[MAX_CHANNEL+1]; 337 struct cam_sim * ha_sim[MAX_CHANNEL+1]; 338 #if __FreeBSD_version >= 400000 339 struct resource * ha_mem_res; 340 struct resource * ha_mes_res; 341 struct resource * ha_irq_res; 342 void * ha_intr; 343 #endif 344 PI2O_LCT ha_LCT; /* Complete list of devices */ 345 # define le_type IdentityTag[0] 346 # define I2O_BSA 0x20 347 # define I2O_FCA 0x40 348 # define I2O_SCSI 0x00 349 # define I2O_PORT 0x80 350 # define I2O_UNKNOWN 0x7F 351 # define le_bus IdentityTag[1] 352 # define le_target IdentityTag[2] 353 # define le_lun IdentityTag[3] 354 target2lun_t * ha_targets[MAX_CHANNEL+1]; 355 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs; 356 u_long ha_Msgs_Phys; 357 358 u_int8_t ha_in_reset; 359 # define HA_OPERATIONAL 0 360 # define HA_IN_RESET 1 361 # define HA_OFF_LINE 2 362 # define HA_OFF_LINE_RECOVERY 3 363 /* Configuration information */ 364 /* The target id maximums we take */ 365 u_int8_t ha_MaxBus; /* Maximum bus */ 366 u_int8_t ha_MaxId; /* Maximum target ID */ 367 u_int8_t ha_MaxLun; /* Maximum target LUN */ 368 u_int8_t ha_SgSize; /* Max SG elements */ 369 u_int8_t ha_pciBusNum; 370 u_int8_t ha_pciDeviceNum; 371 u_int8_t ha_adapter_target[MAX_CHANNEL+1]; 372 u_int16_t ha_QueueSize; /* Max outstanding commands */ 373 u_int16_t ha_Msgs_Count; 374 375 /* Links into other parents and HBAs */ 376 struct Asr_softc * ha_next; /* HBA list */ 377 378 #ifdef ASR_MEASURE_PERFORMANCE 379 #define MAX_TIMEQ_SIZE 256 // assumes MAX 256 scsi commands sent 380 asr_perf_t ha_performance; 381 u_int32_t ha_submitted_ccbs_count; 382 383 // Queueing macros for a circular queue 384 #define TIMEQ_FREE_LIST_EMPTY(head, tail) (-1 == (head) && -1 == (tail)) 385 #define TIMEQ_FREE_LIST_FULL(head, tail) ((((tail) + 1) % MAX_TIMEQ_SIZE) == (head)) 386 #define ENQ_TIMEQ_FREE_LIST(item, Q, head, tail) \ 387 if (!TIMEQ_FREE_LIST_FULL((head), (tail))) { \ 388 if TIMEQ_FREE_LIST_EMPTY((head),(tail)) { \ 389 (head) = (tail) = 0; \ 390 } \ 391 else (tail) = ((tail) + 1) % MAX_TIMEQ_SIZE; \ 392 Q[(tail)] = (item); \ 393 } \ 394 else { \ 395 debug_asr_printf("asr: Enqueueing when TimeQ Free List is full... This should not happen!\n"); \ 396 } 397 #define DEQ_TIMEQ_FREE_LIST(item, Q, head, tail) \ 398 if (!TIMEQ_FREE_LIST_EMPTY((head), (tail))) { \ 399 item = Q[(head)]; \ 400 if ((head) == (tail)) { (head) = (tail) = -1; } \ 401 else (head) = ((head) + 1) % MAX_TIMEQ_SIZE; \ 402 } \ 403 else { \ 404 (item) = -1; \ 405 debug_asr_printf("asr: Dequeueing when TimeQ Free List is empty... This should not happen!\n"); \ 406 } 407 408 // Circular queue of time stamps 409 struct timeval ha_timeQ[MAX_TIMEQ_SIZE]; 410 u_int32_t ha_timeQFreeList[MAX_TIMEQ_SIZE]; 411 int ha_timeQFreeHead; 412 int ha_timeQFreeTail; 413 #endif 414 } Asr_softc_t; 415 416 STATIC Asr_softc_t * Asr_softc; 417 418 /* 419 * Prototypes of the routines we have in this object. 420 */ 421 422 /* Externally callable routines */ 423 #if __FreeBSD_version >= 400000 424 #define PROBE_ARGS IN device_t tag 425 #define PROBE_RET int 426 #define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag) 427 #define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);} 428 #define ATTACH_ARGS IN device_t tag 429 #define ATTACH_RET int 430 #define ATTACH_SET() int unit = device_get_unit(tag) 431 #define ATTACH_RETURN(retval) return(retval) 432 #else 433 #define PROBE_ARGS IN pcici_t tag, IN pcidi_t id 434 #define PROBE_RET const char * 435 #define PROBE_SET() 436 #define PROBE_RETURN(retval) return(retval) 437 #define ATTACH_ARGS IN pcici_t tag, IN int unit 438 #define ATTACH_RET void 439 #define ATTACH_SET() 440 #define ATTACH_RETURN(retval) return 441 #endif 442 /* I2O HDM interface */ 443 STATIC PROBE_RET asr_probe (PROBE_ARGS); 444 STATIC ATTACH_RET asr_attach (ATTACH_ARGS); 445 /* DOMINO placeholder */ 446 STATIC PROBE_RET domino_probe (PROBE_ARGS); 447 STATIC ATTACH_RET domino_attach (ATTACH_ARGS); 448 /* MODE0 adapter placeholder */ 449 STATIC PROBE_RET mode0_probe (PROBE_ARGS); 450 STATIC ATTACH_RET mode0_attach (ATTACH_ARGS); 451 452 STATIC Asr_softc_t * ASR_get_sc ( 453 IN dev_t dev); 454 STATIC int asr_ioctl ( 455 IN dev_t dev, 456 IN u_long cmd, 457 INOUT caddr_t data, 458 int flag, 459 d_thread_t *td); 460 STATIC int asr_open ( 461 IN dev_t dev, 462 int32_t flags, 463 int32_t ifmt, 464 IN d_thread_t *td); 465 STATIC int asr_close ( 466 dev_t dev, 467 int flags, 468 int ifmt, 469 d_thread_t *td); 470 STATIC int asr_intr ( 471 IN Asr_softc_t * sc); 472 STATIC void asr_timeout ( 473 INOUT void * arg); 474 STATIC int ASR_init ( 475 IN Asr_softc_t * sc); 476 STATIC INLINE int ASR_acquireLct ( 477 INOUT Asr_softc_t * sc); 478 STATIC INLINE int ASR_acquireHrt ( 479 INOUT Asr_softc_t * sc); 480 STATIC void asr_action ( 481 IN struct cam_sim * sim, 482 IN union ccb * ccb); 483 STATIC void asr_poll ( 484 IN struct cam_sim * sim); 485 486 /* 487 * Here is the auto-probe structure used to nest our tests appropriately 488 * during the startup phase of the operating system. 489 */ 490 #if __FreeBSD_version >= 400000 491 STATIC device_method_t asr_methods[] = { 492 DEVMETHOD(device_probe, asr_probe), 493 DEVMETHOD(device_attach, asr_attach), 494 { 0, 0 } 495 }; 496 497 STATIC driver_t asr_driver = { 498 "asr", 499 asr_methods, 500 sizeof(Asr_softc_t) 501 }; 502 503 STATIC devclass_t asr_devclass; 504 505 DECLARE_DUMMY_MODULE(asr); 506 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0); 507 508 STATIC device_method_t domino_methods[] = { 509 DEVMETHOD(device_probe, domino_probe), 510 DEVMETHOD(device_attach, domino_attach), 511 { 0, 0 } 512 }; 513 514 STATIC driver_t domino_driver = { 515 "domino", 516 domino_methods, 517 0 518 }; 519 520 STATIC devclass_t domino_devclass; 521 522 DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0); 523 524 STATIC device_method_t mode0_methods[] = { 525 DEVMETHOD(device_probe, mode0_probe), 526 DEVMETHOD(device_attach, mode0_attach), 527 { 0, 0 } 528 }; 529 530 STATIC driver_t mode0_driver = { 531 "mode0", 532 mode0_methods, 533 0 534 }; 535 536 STATIC devclass_t mode0_devclass; 537 538 DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0); 539 #else 540 STATIC u_long asr_pcicount = 0; 541 STATIC struct pci_device asr_pcidev = { 542 "asr", 543 asr_probe, 544 asr_attach, 545 &asr_pcicount, 546 NULL 547 }; 548 DATA_SET (asr_pciset, asr_pcidev); 549 550 STATIC u_long domino_pcicount = 0; 551 STATIC struct pci_device domino_pcidev = { 552 "domino", 553 domino_probe, 554 domino_attach, 555 &domino_pcicount, 556 NULL 557 }; 558 DATA_SET (domino_pciset, domino_pcidev); 559 560 STATIC u_long mode0_pcicount = 0; 561 STATIC struct pci_device mode0_pcidev = { 562 "mode0", 563 mode0_probe, 564 mode0_attach, 565 &mode0_pcicount, 566 NULL 567 }; 568 DATA_SET (mode0_pciset, mode0_pcidev); 569 #endif 570 571 /* 572 * devsw for asr hba driver 573 * 574 * only ioctl is used. the sd driver provides all other access. 575 */ 576 #define CDEV_MAJOR 154 /* prefered default character major */ 577 STATIC struct cdevsw asr_cdevsw = { 578 "asr", /* name */ 579 CDEV_MAJOR, /* maj */ 580 0, /* flags */ 581 NULL, /* port */ 582 0, /* auto */ 583 584 asr_open, /* open */ 585 asr_close, /* close */ 586 noread, /* read */ 587 nowrite, /* write */ 588 asr_ioctl, /* ioctl */ 589 nopoll, /* poll */ 590 nommap, /* mmap */ 591 nostrategy, /* strategy */ 592 nodump, /* dump */ 593 nopsize /* psize */ 594 }; 595 596 #ifdef ASR_MEASURE_PERFORMANCE 597 STATIC u_int32_t asr_time_delta (IN struct timeval start, 598 IN struct timeval end); 599 #endif 600 601 /* 602 * Initialize the dynamic cdevsw hooks. 603 */ 604 STATIC void 605 asr_drvinit ( 606 void * unused) 607 { 608 static int asr_devsw_installed = 0; 609 610 if (asr_devsw_installed) { 611 return; 612 } 613 asr_devsw_installed++; 614 /* 615 * Find a free spot (the report during driver load used by 616 * osd layer in engine to generate the controlling nodes). 617 */ 618 while ((asr_cdevsw.d_maj < NUMCDEVSW) 619 && (dev_dport(makedev(asr_cdevsw.d_maj,0)) != NULL)) { 620 ++asr_cdevsw.d_maj; 621 } 622 if (asr_cdevsw.d_maj >= NUMCDEVSW) for ( 623 asr_cdevsw.d_maj = 0; 624 (asr_cdevsw.d_maj < CDEV_MAJOR) 625 && (dev_dport(makedev(asr_cdevsw.d_maj,0)) != NULL); 626 ++asr_cdevsw.d_maj); 627 /* 628 * Come to papa 629 */ 630 cdevsw_add(&asr_cdevsw); 631 /* 632 * delete any nodes that would attach to the primary adapter, 633 * let the adapter scans add them. 634 */ 635 destroy_dev(makedev(asr_cdevsw.d_maj,0)); 636 } /* asr_drvinit */ 637 638 /* Must initialize before CAM layer picks up our HBA driver */ 639 SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL) 640 641 /* I2O support routines */ 642 #define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)] 643 #define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME)) 644 645 /* 646 * Fill message with default. 647 */ 648 STATIC PI2O_MESSAGE_FRAME 649 ASR_fillMessage ( 650 IN char * Message, 651 IN u_int16_t size) 652 { 653 OUT PI2O_MESSAGE_FRAME Message_Ptr; 654 655 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message); 656 bzero ((void *)Message_Ptr, size); 657 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11); 658 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 659 (size + sizeof(U32) - 1) >> 2); 660 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 661 return (Message_Ptr); 662 } /* ASR_fillMessage */ 663 664 #define EMPTY_QUEUE ((U32)-1L) 665 666 STATIC INLINE U32 667 ASR_getMessage( 668 IN i2oRegs_t * virt) 669 { 670 OUT U32 MessageOffset; 671 672 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) { 673 MessageOffset = virt->ToFIFO; 674 } 675 return (MessageOffset); 676 } /* ASR_getMessage */ 677 678 /* Issue a polled command */ 679 STATIC U32 680 ASR_initiateCp ( 681 INOUT i2oRegs_t * virt, 682 INOUT U8 * fvirt, 683 IN PI2O_MESSAGE_FRAME Message) 684 { 685 OUT U32 Mask = -1L; 686 U32 MessageOffset; 687 u_int Delay = 1500; 688 689 /* 690 * ASR_initiateCp is only used for synchronous commands and will 691 * be made more resiliant to adapter delays since commands like 692 * resetIOP can cause the adapter to be deaf for a little time. 693 */ 694 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE) 695 && (--Delay != 0)) { 696 DELAY (10000); 697 } 698 if (MessageOffset != EMPTY_QUEUE) { 699 bcopy (Message, fvirt + MessageOffset, 700 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2); 701 /* 702 * Disable the Interrupts 703 */ 704 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled; 705 virt->ToFIFO = MessageOffset; 706 } 707 return (Mask); 708 } /* ASR_initiateCp */ 709 710 /* 711 * Reset the adapter. 712 */ 713 STATIC U32 714 ASR_resetIOP ( 715 INOUT i2oRegs_t * virt, 716 INOUT U8 * fvirt) 717 { 718 struct resetMessage { 719 I2O_EXEC_IOP_RESET_MESSAGE M; 720 U32 R; 721 }; 722 defAlignLong(struct resetMessage,Message); 723 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr; 724 OUT U32 * volatile Reply_Ptr; 725 U32 Old; 726 727 /* 728 * Build up our copy of the Message. 729 */ 730 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message, 731 sizeof(I2O_EXEC_IOP_RESET_MESSAGE)); 732 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET); 733 /* 734 * Reset the Reply Status 735 */ 736 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 737 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0; 738 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr, 739 KVTOPHYS((void *)Reply_Ptr)); 740 /* 741 * Send the Message out 742 */ 743 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 744 /* 745 * Wait for a response (Poll), timeouts are dangerous if 746 * the card is truly responsive. We assume response in 2s. 747 */ 748 u_int8_t Delay = 200; 749 750 while ((*Reply_Ptr == 0) && (--Delay != 0)) { 751 DELAY (10000); 752 } 753 /* 754 * Re-enable the interrupts. 755 */ 756 virt->Mask = Old; 757 ASSERT (*Reply_Ptr); 758 return (*Reply_Ptr); 759 } 760 ASSERT (Old != (U32)-1L); 761 return (0); 762 } /* ASR_resetIOP */ 763 764 /* 765 * Get the curent state of the adapter 766 */ 767 STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY 768 ASR_getStatus ( 769 INOUT i2oRegs_t * virt, 770 INOUT U8 * fvirt, 771 OUT PI2O_EXEC_STATUS_GET_REPLY buffer) 772 { 773 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message); 774 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr; 775 U32 Old; 776 777 /* 778 * Build up our copy of the Message. 779 */ 780 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message, 781 sizeof(I2O_EXEC_STATUS_GET_MESSAGE)); 782 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr, 783 I2O_EXEC_STATUS_GET); 784 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr, 785 KVTOPHYS((void *)buffer)); 786 /* This one is a Byte Count */ 787 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr, 788 sizeof(I2O_EXEC_STATUS_GET_REPLY)); 789 /* 790 * Reset the Reply Status 791 */ 792 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY)); 793 /* 794 * Send the Message out 795 */ 796 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 797 /* 798 * Wait for a response (Poll), timeouts are dangerous if 799 * the card is truly responsive. We assume response in 50ms. 800 */ 801 u_int8_t Delay = 255; 802 803 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) { 804 if (--Delay == 0) { 805 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL; 806 break; 807 } 808 DELAY (1000); 809 } 810 /* 811 * Re-enable the interrupts. 812 */ 813 virt->Mask = Old; 814 return (buffer); 815 } 816 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL); 817 } /* ASR_getStatus */ 818 819 /* 820 * Check if the device is a SCSI I2O HBA, and add it to the list. 821 */ 822 823 /* 824 * Probe for ASR controller. If we find it, we will use it. 825 * virtual adapters. 826 */ 827 STATIC PROBE_RET 828 asr_probe(PROBE_ARGS) 829 { 830 PROBE_SET(); 831 if ((id == 0xA5011044) || (id == 0xA5111044)) { 832 PROBE_RETURN ("Adaptec Caching SCSI RAID"); 833 } 834 PROBE_RETURN (NULL); 835 } /* asr_probe */ 836 837 /* 838 * Probe/Attach for DOMINO chipset. 839 */ 840 STATIC PROBE_RET 841 domino_probe(PROBE_ARGS) 842 { 843 PROBE_SET(); 844 if (id == 0x10121044) { 845 PROBE_RETURN ("Adaptec Caching Memory Controller"); 846 } 847 PROBE_RETURN (NULL); 848 } /* domino_probe */ 849 850 STATIC ATTACH_RET 851 domino_attach (ATTACH_ARGS) 852 { 853 ATTACH_RETURN (0); 854 } /* domino_attach */ 855 856 /* 857 * Probe/Attach for MODE0 adapters. 858 */ 859 STATIC PROBE_RET 860 mode0_probe(PROBE_ARGS) 861 { 862 PROBE_SET(); 863 864 /* 865 * If/When we can get a business case to commit to a 866 * Mode0 driver here, we can make all these tests more 867 * specific and robust. Mode0 adapters have their processors 868 * turned off, this the chips are in a raw state. 869 */ 870 871 /* This is a PLX9054 */ 872 if (id == 0x905410B5) { 873 PROBE_RETURN ("Adaptec Mode0 PM3757"); 874 } 875 /* This is a PLX9080 */ 876 if (id == 0x908010B5) { 877 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755"); 878 } 879 /* This is a ZION 80303 */ 880 if (id == 0x53098086) { 881 PROBE_RETURN ("Adaptec Mode0 3010S"); 882 } 883 /* This is an i960RS */ 884 if (id == 0x39628086) { 885 PROBE_RETURN ("Adaptec Mode0 2100S"); 886 } 887 /* This is an i960RN */ 888 if (id == 0x19648086) { 889 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S"); 890 } 891 #if 0 /* this would match any generic i960 -- mjs */ 892 /* This is an i960RP (typically also on Motherboards) */ 893 if (id == 0x19608086) { 894 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654"); 895 } 896 #endif 897 PROBE_RETURN (NULL); 898 } /* mode0_probe */ 899 900 STATIC ATTACH_RET 901 mode0_attach (ATTACH_ARGS) 902 { 903 ATTACH_RETURN (0); 904 } /* mode0_attach */ 905 906 STATIC INLINE union asr_ccb * 907 asr_alloc_ccb ( 908 IN Asr_softc_t * sc) 909 { 910 OUT union asr_ccb * new_ccb; 911 912 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb), 913 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) { 914 bzero (new_ccb, sizeof(*new_ccb)); 915 new_ccb->ccb_h.pinfo.priority = 1; 916 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; 917 new_ccb->ccb_h.spriv_ptr0 = sc; 918 } 919 return (new_ccb); 920 } /* asr_alloc_ccb */ 921 922 STATIC INLINE void 923 asr_free_ccb ( 924 IN union asr_ccb * free_ccb) 925 { 926 free(free_ccb, M_DEVBUF); 927 } /* asr_free_ccb */ 928 929 /* 930 * Print inquiry data `carefully' 931 */ 932 STATIC void 933 ASR_prstring ( 934 u_int8_t * s, 935 int len) 936 { 937 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) { 938 printf ("%c", *(s++)); 939 } 940 } /* ASR_prstring */ 941 942 /* 943 * Prototypes 944 */ 945 STATIC INLINE int ASR_queue ( 946 IN Asr_softc_t * sc, 947 IN PI2O_MESSAGE_FRAME Message); 948 /* 949 * Send a message synchronously and without Interrupt to a ccb. 950 */ 951 STATIC int 952 ASR_queue_s ( 953 INOUT union asr_ccb * ccb, 954 IN PI2O_MESSAGE_FRAME Message) 955 { 956 int s; 957 U32 Mask; 958 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 959 960 /* 961 * We do not need any (optional byteswapping) method access to 962 * the Initiator context field. 963 */ 964 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 965 966 /* Prevent interrupt service */ 967 s = splcam (); 968 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask) 969 | Mask_InterruptsDisabled; 970 971 if (ASR_queue (sc, Message) == EMPTY_QUEUE) { 972 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 973 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 974 } 975 976 /* 977 * Wait for this board to report a finished instruction. 978 */ 979 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 980 (void)asr_intr (sc); 981 } 982 983 /* Re-enable Interrupts */ 984 sc->ha_Virt->Mask = Mask; 985 splx(s); 986 987 return (ccb->ccb_h.status); 988 } /* ASR_queue_s */ 989 990 /* 991 * Send a message synchronously to a Asr_softc_t 992 */ 993 STATIC int 994 ASR_queue_c ( 995 IN Asr_softc_t * sc, 996 IN PI2O_MESSAGE_FRAME Message) 997 { 998 union asr_ccb * ccb; 999 OUT int status; 1000 1001 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 1002 return (CAM_REQUEUE_REQ); 1003 } 1004 1005 status = ASR_queue_s (ccb, Message); 1006 1007 asr_free_ccb(ccb); 1008 1009 return (status); 1010 } /* ASR_queue_c */ 1011 1012 /* 1013 * Add the specified ccb to the active queue 1014 */ 1015 STATIC INLINE void 1016 ASR_ccbAdd ( 1017 IN Asr_softc_t * sc, 1018 INOUT union asr_ccb * ccb) 1019 { 1020 int s; 1021 1022 s = splcam(); 1023 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le); 1024 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1025 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) { 1026 /* 1027 * RAID systems can take considerable time to 1028 * complete some commands given the large cache 1029 * flashes switching from write back to write thru. 1030 */ 1031 ccb->ccb_h.timeout = 6 * 60 * 1000; 1032 } 1033 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, 1034 (ccb->ccb_h.timeout * hz) / 1000); 1035 } 1036 splx(s); 1037 } /* ASR_ccbAdd */ 1038 1039 /* 1040 * Remove the specified ccb from the active queue. 1041 */ 1042 STATIC INLINE void 1043 ASR_ccbRemove ( 1044 IN Asr_softc_t * sc, 1045 INOUT union asr_ccb * ccb) 1046 { 1047 int s; 1048 1049 s = splcam(); 1050 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch); 1051 LIST_REMOVE(&(ccb->ccb_h), sim_links.le); 1052 splx(s); 1053 } /* ASR_ccbRemove */ 1054 1055 /* 1056 * Fail all the active commands, so they get re-issued by the operating 1057 * system. 1058 */ 1059 STATIC INLINE void 1060 ASR_failActiveCommands ( 1061 IN Asr_softc_t * sc) 1062 { 1063 struct ccb_hdr * ccb; 1064 int s; 1065 1066 #if 0 /* Currently handled by callers, unnecessary paranoia currently */ 1067 /* Left in for historical perspective. */ 1068 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message); 1069 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 1070 1071 /* Send a blind LCT command to wait for the enableSys to complete */ 1072 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message, 1073 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT)); 1074 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1075 I2O_EXEC_LCT_NOTIFY); 1076 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 1077 I2O_CLASS_MATCH_ANYCLASS); 1078 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1079 #endif 1080 1081 s = splcam(); 1082 /* 1083 * We do not need to inform the CAM layer that we had a bus 1084 * reset since we manage it on our own, this also prevents the 1085 * SCSI_DELAY settling that would be required on other systems. 1086 * The `SCSI_DELAY' has already been handled by the card via the 1087 * acquisition of the LCT table while we are at CAM priority level. 1088 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) { 1089 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL); 1090 * } 1091 */ 1092 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) { 1093 ASR_ccbRemove (sc, (union asr_ccb *)ccb); 1094 1095 ccb->status &= ~CAM_STATUS_MASK; 1096 ccb->status |= CAM_REQUEUE_REQ; 1097 /* Nothing Transfered */ 1098 ((struct ccb_scsiio *)ccb)->resid 1099 = ((struct ccb_scsiio *)ccb)->dxfer_len; 1100 1101 if (ccb->path) { 1102 xpt_done ((union ccb *)ccb); 1103 } else { 1104 wakeup ((caddr_t)ccb); 1105 } 1106 } 1107 splx(s); 1108 } /* ASR_failActiveCommands */ 1109 1110 /* 1111 * The following command causes the HBA to reset the specific bus 1112 */ 1113 STATIC INLINE void 1114 ASR_resetBus( 1115 IN Asr_softc_t * sc, 1116 IN int bus) 1117 { 1118 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message); 1119 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr; 1120 PI2O_LCT_ENTRY Device; 1121 1122 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message, 1123 sizeof(I2O_HBA_BUS_RESET_MESSAGE)); 1124 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame, 1125 I2O_HBA_BUS_RESET); 1126 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 1127 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1128 ++Device) { 1129 if (((Device->le_type & I2O_PORT) != 0) 1130 && (Device->le_bus == bus)) { 1131 I2O_MESSAGE_FRAME_setTargetAddress( 1132 &Message_Ptr->StdMessageFrame, 1133 I2O_LCT_ENTRY_getLocalTID(Device)); 1134 /* Asynchronous command, with no expectations */ 1135 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1136 break; 1137 } 1138 } 1139 } /* ASR_resetBus */ 1140 1141 STATIC INLINE int 1142 ASR_getBlinkLedCode ( 1143 IN Asr_softc_t * sc) 1144 { 1145 if ((sc != (Asr_softc_t *)NULL) 1146 && (sc->ha_blinkLED != (u_int8_t *)NULL) 1147 && (sc->ha_blinkLED[1] == 0xBC)) { 1148 return (sc->ha_blinkLED[0]); 1149 } 1150 return (0); 1151 } /* ASR_getBlinkCode */ 1152 1153 /* 1154 * Determine the address of an TID lookup. Must be done at high priority 1155 * since the address can be changed by other threads of execution. 1156 * 1157 * Returns NULL pointer if not indexible (but will attempt to generate 1158 * an index if `new_entry' flag is set to TRUE). 1159 * 1160 * All addressible entries are to be guaranteed zero if never initialized. 1161 */ 1162 STATIC INLINE tid_t * 1163 ASR_getTidAddress( 1164 INOUT Asr_softc_t * sc, 1165 IN int bus, 1166 IN int target, 1167 IN int lun, 1168 IN int new_entry) 1169 { 1170 target2lun_t * bus_ptr; 1171 lun2tid_t * target_ptr; 1172 unsigned new_size; 1173 1174 /* 1175 * Validity checking of incoming parameters. More of a bound 1176 * expansion limit than an issue with the code dealing with the 1177 * values. 1178 * 1179 * sc must be valid before it gets here, so that check could be 1180 * dropped if speed a critical issue. 1181 */ 1182 if ((sc == (Asr_softc_t *)NULL) 1183 || (bus > MAX_CHANNEL) 1184 || (target > sc->ha_MaxId) 1185 || (lun > sc->ha_MaxLun)) { 1186 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n", 1187 (u_long)sc, bus, target, lun); 1188 return ((tid_t *)NULL); 1189 } 1190 /* 1191 * See if there is an associated bus list. 1192 * 1193 * for performance, allocate in size of BUS_CHUNK chunks. 1194 * BUS_CHUNK must be a power of two. This is to reduce 1195 * fragmentation effects on the allocations. 1196 */ 1197 # define BUS_CHUNK 8 1198 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1)); 1199 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) { 1200 /* 1201 * Allocate a new structure? 1202 * Since one element in structure, the +1 1203 * needed for size has been abstracted. 1204 */ 1205 if ((new_entry == FALSE) 1206 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc ( 1207 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 1208 M_TEMP, M_WAITOK)) 1209 == (target2lun_t *)NULL)) { 1210 debug_asr_printf("failed to allocate bus list\n"); 1211 return ((tid_t *)NULL); 1212 } 1213 bzero (bus_ptr, sizeof(*bus_ptr) 1214 + (sizeof(bus_ptr->LUN) * new_size)); 1215 bus_ptr->size = new_size + 1; 1216 } else if (bus_ptr->size <= new_size) { 1217 target2lun_t * new_bus_ptr; 1218 1219 /* 1220 * Reallocate a new structure? 1221 * Since one element in structure, the +1 1222 * needed for size has been abstracted. 1223 */ 1224 if ((new_entry == FALSE) 1225 || ((new_bus_ptr = (target2lun_t *)malloc ( 1226 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 1227 M_TEMP, M_WAITOK)) 1228 == (target2lun_t *)NULL)) { 1229 debug_asr_printf("failed to reallocate bus list\n"); 1230 return ((tid_t *)NULL); 1231 } 1232 /* 1233 * Zero and copy the whole thing, safer, simpler coding 1234 * and not really performance critical at this point. 1235 */ 1236 bzero (new_bus_ptr, sizeof(*bus_ptr) 1237 + (sizeof(bus_ptr->LUN) * new_size)); 1238 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr) 1239 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1))); 1240 sc->ha_targets[bus] = new_bus_ptr; 1241 free (bus_ptr, M_TEMP); 1242 bus_ptr = new_bus_ptr; 1243 bus_ptr->size = new_size + 1; 1244 } 1245 /* 1246 * We now have the bus list, lets get to the target list. 1247 * Since most systems have only *one* lun, we do not allocate 1248 * in chunks as above, here we allow one, then in chunk sizes. 1249 * TARGET_CHUNK must be a power of two. This is to reduce 1250 * fragmentation effects on the allocations. 1251 */ 1252 # define TARGET_CHUNK 8 1253 if ((new_size = lun) != 0) { 1254 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1)); 1255 } 1256 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) { 1257 /* 1258 * Allocate a new structure? 1259 * Since one element in structure, the +1 1260 * needed for size has been abstracted. 1261 */ 1262 if ((new_entry == FALSE) 1263 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc ( 1264 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1265 M_TEMP, M_WAITOK)) 1266 == (lun2tid_t *)NULL)) { 1267 debug_asr_printf("failed to allocate target list\n"); 1268 return ((tid_t *)NULL); 1269 } 1270 bzero (target_ptr, sizeof(*target_ptr) 1271 + (sizeof(target_ptr->TID) * new_size)); 1272 target_ptr->size = new_size + 1; 1273 } else if (target_ptr->size <= new_size) { 1274 lun2tid_t * new_target_ptr; 1275 1276 /* 1277 * Reallocate a new structure? 1278 * Since one element in structure, the +1 1279 * needed for size has been abstracted. 1280 */ 1281 if ((new_entry == FALSE) 1282 || ((new_target_ptr = (lun2tid_t *)malloc ( 1283 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1284 M_TEMP, M_WAITOK)) 1285 == (lun2tid_t *)NULL)) { 1286 debug_asr_printf("failed to reallocate target list\n"); 1287 return ((tid_t *)NULL); 1288 } 1289 /* 1290 * Zero and copy the whole thing, safer, simpler coding 1291 * and not really performance critical at this point. 1292 */ 1293 bzero (new_target_ptr, sizeof(*target_ptr) 1294 + (sizeof(target_ptr->TID) * new_size)); 1295 bcopy (target_ptr, new_target_ptr, 1296 sizeof(*target_ptr) 1297 + (sizeof(target_ptr->TID) * (target_ptr->size - 1))); 1298 bus_ptr->LUN[target] = new_target_ptr; 1299 free (target_ptr, M_TEMP); 1300 target_ptr = new_target_ptr; 1301 target_ptr->size = new_size + 1; 1302 } 1303 /* 1304 * Now, acquire the TID address from the LUN indexed list. 1305 */ 1306 return (&(target_ptr->TID[lun])); 1307 } /* ASR_getTidAddress */ 1308 1309 /* 1310 * Get a pre-existing TID relationship. 1311 * 1312 * If the TID was never set, return (tid_t)-1. 1313 * 1314 * should use mutex rather than spl. 1315 */ 1316 STATIC INLINE tid_t 1317 ASR_getTid ( 1318 IN Asr_softc_t * sc, 1319 IN int bus, 1320 IN int target, 1321 IN int lun) 1322 { 1323 tid_t * tid_ptr; 1324 int s; 1325 OUT tid_t retval; 1326 1327 s = splcam(); 1328 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE)) 1329 == (tid_t *)NULL) 1330 /* (tid_t)0 or (tid_t)-1 indicate no TID */ 1331 || (*tid_ptr == (tid_t)0)) { 1332 splx(s); 1333 return ((tid_t)-1); 1334 } 1335 retval = *tid_ptr; 1336 splx(s); 1337 return (retval); 1338 } /* ASR_getTid */ 1339 1340 /* 1341 * Set a TID relationship. 1342 * 1343 * If the TID was not set, return (tid_t)-1. 1344 * 1345 * should use mutex rather than spl. 1346 */ 1347 STATIC INLINE tid_t 1348 ASR_setTid ( 1349 INOUT Asr_softc_t * sc, 1350 IN int bus, 1351 IN int target, 1352 IN int lun, 1353 INOUT tid_t TID) 1354 { 1355 tid_t * tid_ptr; 1356 int s; 1357 1358 if (TID != (tid_t)-1) { 1359 if (TID == 0) { 1360 return ((tid_t)-1); 1361 } 1362 s = splcam(); 1363 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE)) 1364 == (tid_t *)NULL) { 1365 splx(s); 1366 return ((tid_t)-1); 1367 } 1368 *tid_ptr = TID; 1369 splx(s); 1370 } 1371 return (TID); 1372 } /* ASR_setTid */ 1373 1374 /*-------------------------------------------------------------------------*/ 1375 /* Function ASR_rescan */ 1376 /*-------------------------------------------------------------------------*/ 1377 /* The Parameters Passed To This Function Are : */ 1378 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1379 /* */ 1380 /* This Function Will rescan the adapter and resynchronize any data */ 1381 /* */ 1382 /* Return : 0 For OK, Error Code Otherwise */ 1383 /*-------------------------------------------------------------------------*/ 1384 1385 STATIC INLINE int 1386 ASR_rescan( 1387 IN Asr_softc_t * sc) 1388 { 1389 int bus; 1390 OUT int error; 1391 1392 /* 1393 * Re-acquire the LCT table and synchronize us to the adapter. 1394 */ 1395 if ((error = ASR_acquireLct(sc)) == 0) { 1396 error = ASR_acquireHrt(sc); 1397 } 1398 1399 if (error != 0) { 1400 return error; 1401 } 1402 1403 bus = sc->ha_MaxBus; 1404 /* Reset all existing cached TID lookups */ 1405 do { 1406 int target, event = 0; 1407 1408 /* 1409 * Scan for all targets on this bus to see if they 1410 * got affected by the rescan. 1411 */ 1412 for (target = 0; target <= sc->ha_MaxId; ++target) { 1413 int lun; 1414 1415 /* Stay away from the controller ID */ 1416 if (target == sc->ha_adapter_target[bus]) { 1417 continue; 1418 } 1419 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 1420 PI2O_LCT_ENTRY Device; 1421 tid_t TID = (tid_t)-1; 1422 tid_t LastTID; 1423 1424 /* 1425 * See if the cached TID changed. Search for 1426 * the device in our new LCT. 1427 */ 1428 for (Device = sc->ha_LCT->LCTEntry; 1429 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT) 1430 + I2O_LCT_getTableSize(sc->ha_LCT)); 1431 ++Device) { 1432 if ((Device->le_type != I2O_UNKNOWN) 1433 && (Device->le_bus == bus) 1434 && (Device->le_target == target) 1435 && (Device->le_lun == lun) 1436 && (I2O_LCT_ENTRY_getUserTID(Device) 1437 == 0xFFF)) { 1438 TID = I2O_LCT_ENTRY_getLocalTID( 1439 Device); 1440 break; 1441 } 1442 } 1443 /* 1444 * Indicate to the OS that the label needs 1445 * to be recalculated, or that the specific 1446 * open device is no longer valid (Merde) 1447 * because the cached TID changed. 1448 */ 1449 LastTID = ASR_getTid (sc, bus, target, lun); 1450 if (LastTID != TID) { 1451 struct cam_path * path; 1452 1453 if (xpt_create_path(&path, 1454 /*periph*/NULL, 1455 cam_sim_path(sc->ha_sim[bus]), 1456 target, lun) != CAM_REQ_CMP) { 1457 if (TID == (tid_t)-1) { 1458 event |= AC_LOST_DEVICE; 1459 } else { 1460 event |= AC_INQ_CHANGED 1461 | AC_GETDEV_CHANGED; 1462 } 1463 } else { 1464 if (TID == (tid_t)-1) { 1465 xpt_async( 1466 AC_LOST_DEVICE, 1467 path, NULL); 1468 } else if (LastTID == (tid_t)-1) { 1469 struct ccb_getdev ccb; 1470 1471 xpt_setup_ccb( 1472 &(ccb.ccb_h), 1473 path, /*priority*/5); 1474 xpt_async( 1475 AC_FOUND_DEVICE, 1476 path, 1477 &ccb); 1478 } else { 1479 xpt_async( 1480 AC_INQ_CHANGED, 1481 path, NULL); 1482 xpt_async( 1483 AC_GETDEV_CHANGED, 1484 path, NULL); 1485 } 1486 } 1487 } 1488 /* 1489 * We have the option of clearing the 1490 * cached TID for it to be rescanned, or to 1491 * set it now even if the device never got 1492 * accessed. We chose the later since we 1493 * currently do not use the condition that 1494 * the TID ever got cached. 1495 */ 1496 ASR_setTid (sc, bus, target, lun, TID); 1497 } 1498 } 1499 /* 1500 * The xpt layer can not handle multiple events at the 1501 * same call. 1502 */ 1503 if (event & AC_LOST_DEVICE) { 1504 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL); 1505 } 1506 if (event & AC_INQ_CHANGED) { 1507 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL); 1508 } 1509 if (event & AC_GETDEV_CHANGED) { 1510 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL); 1511 } 1512 } while (--bus >= 0); 1513 return (error); 1514 } /* ASR_rescan */ 1515 1516 /*-------------------------------------------------------------------------*/ 1517 /* Function ASR_reset */ 1518 /*-------------------------------------------------------------------------*/ 1519 /* The Parameters Passed To This Function Are : */ 1520 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1521 /* */ 1522 /* This Function Will reset the adapter and resynchronize any data */ 1523 /* */ 1524 /* Return : None */ 1525 /*-------------------------------------------------------------------------*/ 1526 1527 STATIC INLINE int 1528 ASR_reset( 1529 IN Asr_softc_t * sc) 1530 { 1531 int s, retVal; 1532 1533 s = splcam(); 1534 if ((sc->ha_in_reset == HA_IN_RESET) 1535 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) { 1536 splx (s); 1537 return (EBUSY); 1538 } 1539 /* 1540 * Promotes HA_OPERATIONAL to HA_IN_RESET, 1541 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY. 1542 */ 1543 ++(sc->ha_in_reset); 1544 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) { 1545 debug_asr_printf ("ASR_resetIOP failed\n"); 1546 /* 1547 * We really need to take this card off-line, easier said 1548 * than make sense. Better to keep retrying for now since if a 1549 * UART cable is connected the blinkLEDs the adapter is now in 1550 * a hard state requiring action from the monitor commands to 1551 * the HBA to continue. For debugging waiting forever is a 1552 * good thing. In a production system, however, one may wish 1553 * to instead take the card off-line ... 1554 */ 1555 # if 0 && (defined(HA_OFF_LINE)) 1556 /* 1557 * Take adapter off-line. 1558 */ 1559 printf ("asr%d: Taking adapter off-line\n", 1560 sc->ha_path[0] 1561 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0])) 1562 : 0); 1563 sc->ha_in_reset = HA_OFF_LINE; 1564 splx (s); 1565 return (ENXIO); 1566 # else 1567 /* Wait Forever */ 1568 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0); 1569 # endif 1570 } 1571 retVal = ASR_init (sc); 1572 splx (s); 1573 if (retVal != 0) { 1574 debug_asr_printf ("ASR_init failed\n"); 1575 sc->ha_in_reset = HA_OFF_LINE; 1576 return (ENXIO); 1577 } 1578 if (ASR_rescan (sc) != 0) { 1579 debug_asr_printf ("ASR_rescan failed\n"); 1580 } 1581 ASR_failActiveCommands (sc); 1582 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) { 1583 printf ("asr%d: Brining adapter back on-line\n", 1584 sc->ha_path[0] 1585 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0])) 1586 : 0); 1587 } 1588 sc->ha_in_reset = HA_OPERATIONAL; 1589 return (0); 1590 } /* ASR_reset */ 1591 1592 /* 1593 * Device timeout handler. 1594 */ 1595 STATIC void 1596 asr_timeout( 1597 INOUT void * arg) 1598 { 1599 union asr_ccb * ccb = (union asr_ccb *)arg; 1600 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1601 int s; 1602 1603 debug_asr_print_path(ccb); 1604 debug_asr_printf("timed out"); 1605 1606 /* 1607 * Check if the adapter has locked up? 1608 */ 1609 if ((s = ASR_getBlinkLedCode(sc)) != 0) { 1610 /* Reset Adapter */ 1611 printf ("asr%d: Blink LED 0x%x resetting adapter\n", 1612 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s); 1613 if (ASR_reset (sc) == ENXIO) { 1614 /* Try again later */ 1615 ccb->ccb_h.timeout_ch = timeout(asr_timeout, 1616 (caddr_t)ccb, 1617 (ccb->ccb_h.timeout * hz) / 1000); 1618 } 1619 return; 1620 } 1621 /* 1622 * Abort does not function on the ASR card!!! Walking away from 1623 * the SCSI command is also *very* dangerous. A SCSI BUS reset is 1624 * our best bet, followed by a complete adapter reset if that fails. 1625 */ 1626 s = splcam(); 1627 /* Check if we already timed out once to raise the issue */ 1628 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) { 1629 debug_asr_printf (" AGAIN\nreinitializing adapter\n"); 1630 if (ASR_reset (sc) == ENXIO) { 1631 ccb->ccb_h.timeout_ch = timeout(asr_timeout, 1632 (caddr_t)ccb, 1633 (ccb->ccb_h.timeout * hz) / 1000); 1634 } 1635 splx(s); 1636 return; 1637 } 1638 debug_asr_printf ("\nresetting bus\n"); 1639 /* If the BUS reset does not take, then an adapter reset is next! */ 1640 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1641 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 1642 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, 1643 (ccb->ccb_h.timeout * hz) / 1000); 1644 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path))); 1645 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL); 1646 splx(s); 1647 } /* asr_timeout */ 1648 1649 /* 1650 * send a message asynchronously 1651 */ 1652 STATIC INLINE int 1653 ASR_queue( 1654 IN Asr_softc_t * sc, 1655 IN PI2O_MESSAGE_FRAME Message) 1656 { 1657 OUT U32 MessageOffset; 1658 union asr_ccb * ccb; 1659 1660 debug_asr_printf ("Host Command Dump:\n"); 1661 debug_asr_dump_message (Message); 1662 1663 ccb = (union asr_ccb *)(long) 1664 I2O_MESSAGE_FRAME_getInitiatorContext64(Message); 1665 1666 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) { 1667 #ifdef ASR_MEASURE_PERFORMANCE 1668 int startTimeIndex; 1669 1670 if (ccb) { 1671 ++sc->ha_performance.command_count[ 1672 (int) ccb->csio.cdb_io.cdb_bytes[0]]; 1673 DEQ_TIMEQ_FREE_LIST(startTimeIndex, 1674 sc->ha_timeQFreeList, 1675 sc->ha_timeQFreeHead, 1676 sc->ha_timeQFreeTail); 1677 if (-1 != startTimeIndex) { 1678 microtime(&(sc->ha_timeQ[startTimeIndex])); 1679 } 1680 /* Time stamp the command before we send it out */ 1681 ((PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *) Message)-> 1682 PrivateMessageFrame.TransactionContext 1683 = (I2O_TRANSACTION_CONTEXT) startTimeIndex; 1684 1685 ++sc->ha_submitted_ccbs_count; 1686 if (sc->ha_performance.max_submit_count 1687 < sc->ha_submitted_ccbs_count) { 1688 sc->ha_performance.max_submit_count 1689 = sc->ha_submitted_ccbs_count; 1690 } 1691 } 1692 #endif 1693 bcopy (Message, sc->ha_Fvirt + MessageOffset, 1694 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2); 1695 if (ccb) { 1696 ASR_ccbAdd (sc, ccb); 1697 } 1698 /* Post the command */ 1699 sc->ha_Virt->ToFIFO = MessageOffset; 1700 } else { 1701 if (ASR_getBlinkLedCode(sc)) { 1702 /* 1703 * Unlikely we can do anything if we can't grab a 1704 * message frame :-(, but lets give it a try. 1705 */ 1706 (void)ASR_reset (sc); 1707 } 1708 } 1709 return (MessageOffset); 1710 } /* ASR_queue */ 1711 1712 1713 /* Simple Scatter Gather elements */ 1714 #define SG(SGL,Index,Flags,Buffer,Size) \ 1715 I2O_FLAGS_COUNT_setCount( \ 1716 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1717 Size); \ 1718 I2O_FLAGS_COUNT_setFlags( \ 1719 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1720 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \ 1721 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \ 1722 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \ 1723 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer)) 1724 1725 /* 1726 * Retrieve Parameter Group. 1727 * Buffer must be allocated using defAlignLong macro. 1728 */ 1729 STATIC void * 1730 ASR_getParams( 1731 IN Asr_softc_t * sc, 1732 IN tid_t TID, 1733 IN int Group, 1734 OUT void * Buffer, 1735 IN unsigned BufferSize) 1736 { 1737 struct paramGetMessage { 1738 I2O_UTIL_PARAMS_GET_MESSAGE M; 1739 char F[ 1740 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)]; 1741 struct Operations { 1742 I2O_PARAM_OPERATIONS_LIST_HEADER Header; 1743 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1]; 1744 } O; 1745 }; 1746 defAlignLong(struct paramGetMessage, Message); 1747 struct Operations * Operations_Ptr; 1748 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr; 1749 struct ParamBuffer { 1750 I2O_PARAM_RESULTS_LIST_HEADER Header; 1751 I2O_PARAM_READ_OPERATION_RESULT Read; 1752 char Info[1]; 1753 } * Buffer_Ptr; 1754 1755 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message, 1756 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1757 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1758 Operations_Ptr = (struct Operations *)((char *)Message_Ptr 1759 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1760 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1761 bzero ((void *)Operations_Ptr, sizeof(struct Operations)); 1762 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount( 1763 &(Operations_Ptr->Header), 1); 1764 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation( 1765 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET); 1766 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount( 1767 &(Operations_Ptr->Template[0]), 0xFFFF); 1768 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber( 1769 &(Operations_Ptr->Template[0]), Group); 1770 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)), 1771 BufferSize); 1772 1773 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1774 I2O_VERSION_11 1775 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1776 / sizeof(U32)) << 4)); 1777 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame), 1778 TID); 1779 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 1780 I2O_UTIL_PARAMS_GET); 1781 /* 1782 * Set up the buffers as scatter gather elements. 1783 */ 1784 SG(&(Message_Ptr->SGL), 0, 1785 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, 1786 Operations_Ptr, sizeof(struct Operations)); 1787 SG(&(Message_Ptr->SGL), 1, 1788 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 1789 Buffer_Ptr, BufferSize); 1790 1791 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP) 1792 && (Buffer_Ptr->Header.ResultCount)) { 1793 return ((void *)(Buffer_Ptr->Info)); 1794 } 1795 return ((void *)NULL); 1796 } /* ASR_getParams */ 1797 1798 /* 1799 * Acquire the LCT information. 1800 */ 1801 STATIC INLINE int 1802 ASR_acquireLct ( 1803 INOUT Asr_softc_t * sc) 1804 { 1805 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 1806 PI2O_SGE_SIMPLE_ELEMENT sg; 1807 int MessageSizeInBytes; 1808 caddr_t v; 1809 int len; 1810 I2O_LCT Table; 1811 PI2O_LCT_ENTRY Entry; 1812 1813 /* 1814 * sc value assumed valid 1815 */ 1816 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) 1817 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT); 1818 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc ( 1819 MessageSizeInBytes, M_TEMP, M_WAITOK)) 1820 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) { 1821 return (ENOMEM); 1822 } 1823 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes); 1824 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1825 (I2O_VERSION_11 + 1826 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1827 / sizeof(U32)) << 4))); 1828 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1829 I2O_EXEC_LCT_NOTIFY); 1830 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 1831 I2O_CLASS_MATCH_ANYCLASS); 1832 /* 1833 * Call the LCT table to determine the number of device entries 1834 * to reserve space for. 1835 */ 1836 SG(&(Message_Ptr->SGL), 0, 1837 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table, 1838 sizeof(I2O_LCT)); 1839 /* 1840 * since this code is reused in several systems, code efficiency 1841 * is greater by using a shift operation rather than a divide by 1842 * sizeof(u_int32_t). 1843 */ 1844 I2O_LCT_setTableSize(&Table, 1845 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1846 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1847 /* 1848 * Determine the size of the LCT table. 1849 */ 1850 if (sc->ha_LCT) { 1851 free (sc->ha_LCT, M_TEMP); 1852 } 1853 /* 1854 * malloc only generates contiguous memory when less than a 1855 * page is expected. We must break the request up into an SG list ... 1856 */ 1857 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <= 1858 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY))) 1859 || (len > (128 * 1024))) { /* Arbitrary */ 1860 free (Message_Ptr, M_TEMP); 1861 return (EINVAL); 1862 } 1863 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) 1864 == (PI2O_LCT)NULL) { 1865 free (Message_Ptr, M_TEMP); 1866 return (ENOMEM); 1867 } 1868 /* 1869 * since this code is reused in several systems, code efficiency 1870 * is greater by using a shift operation rather than a divide by 1871 * sizeof(u_int32_t). 1872 */ 1873 I2O_LCT_setTableSize(sc->ha_LCT, 1874 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1875 /* 1876 * Convert the access to the LCT table into a SG list. 1877 */ 1878 sg = Message_Ptr->SGL.u.Simple; 1879 v = (caddr_t)(sc->ha_LCT); 1880 for (;;) { 1881 int next, base, span; 1882 1883 span = 0; 1884 next = base = KVTOPHYS(v); 1885 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 1886 1887 /* How far can we go contiguously */ 1888 while ((len > 0) && (base == next)) { 1889 int size; 1890 1891 next = trunc_page(base) + PAGE_SIZE; 1892 size = next - base; 1893 if (size > len) { 1894 size = len; 1895 } 1896 span += size; 1897 v += size; 1898 len -= size; 1899 base = KVTOPHYS(v); 1900 } 1901 1902 /* Construct the Flags */ 1903 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 1904 { 1905 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT; 1906 if (len <= 0) { 1907 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT 1908 | I2O_SGL_FLAGS_LAST_ELEMENT 1909 | I2O_SGL_FLAGS_END_OF_BUFFER); 1910 } 1911 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw); 1912 } 1913 1914 if (len <= 0) { 1915 break; 1916 } 1917 1918 /* 1919 * Incrementing requires resizing of the packet. 1920 */ 1921 ++sg; 1922 MessageSizeInBytes += sizeof(*sg); 1923 I2O_MESSAGE_FRAME_setMessageSize( 1924 &(Message_Ptr->StdMessageFrame), 1925 I2O_MESSAGE_FRAME_getMessageSize( 1926 &(Message_Ptr->StdMessageFrame)) 1927 + (sizeof(*sg) / sizeof(U32))); 1928 { 1929 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr; 1930 1931 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE) 1932 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK)) 1933 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) { 1934 free (sc->ha_LCT, M_TEMP); 1935 sc->ha_LCT = (PI2O_LCT)NULL; 1936 free (Message_Ptr, M_TEMP); 1937 return (ENOMEM); 1938 } 1939 span = ((caddr_t)sg) - (caddr_t)Message_Ptr; 1940 bcopy ((caddr_t)Message_Ptr, 1941 (caddr_t)NewMessage_Ptr, span); 1942 free (Message_Ptr, M_TEMP); 1943 sg = (PI2O_SGE_SIMPLE_ELEMENT) 1944 (((caddr_t)NewMessage_Ptr) + span); 1945 Message_Ptr = NewMessage_Ptr; 1946 } 1947 } 1948 { int retval; 1949 1950 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1951 free (Message_Ptr, M_TEMP); 1952 if (retval != CAM_REQ_CMP) { 1953 return (ENODEV); 1954 } 1955 } 1956 /* If the LCT table grew, lets truncate accesses */ 1957 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) { 1958 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table)); 1959 } 1960 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY) 1961 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1962 ++Entry) { 1963 Entry->le_type = I2O_UNKNOWN; 1964 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) { 1965 1966 case I2O_CLASS_RANDOM_BLOCK_STORAGE: 1967 Entry->le_type = I2O_BSA; 1968 break; 1969 1970 case I2O_CLASS_SCSI_PERIPHERAL: 1971 Entry->le_type = I2O_SCSI; 1972 break; 1973 1974 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: 1975 Entry->le_type = I2O_FCA; 1976 break; 1977 1978 case I2O_CLASS_BUS_ADAPTER_PORT: 1979 Entry->le_type = I2O_PORT | I2O_SCSI; 1980 /* FALLTHRU */ 1981 case I2O_CLASS_FIBRE_CHANNEL_PORT: 1982 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) == 1983 I2O_CLASS_FIBRE_CHANNEL_PORT) { 1984 Entry->le_type = I2O_PORT | I2O_FCA; 1985 } 1986 { struct ControllerInfo { 1987 I2O_PARAM_RESULTS_LIST_HEADER Header; 1988 I2O_PARAM_READ_OPERATION_RESULT Read; 1989 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1990 }; 1991 defAlignLong(struct ControllerInfo, Buffer); 1992 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1993 1994 Entry->le_bus = 0xff; 1995 Entry->le_target = 0xff; 1996 Entry->le_lun = 0xff; 1997 1998 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR) 1999 ASR_getParams(sc, 2000 I2O_LCT_ENTRY_getLocalTID(Entry), 2001 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO, 2002 Buffer, sizeof(struct ControllerInfo))) 2003 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) { 2004 continue; 2005 } 2006 Entry->le_target 2007 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID( 2008 Info); 2009 Entry->le_lun = 0; 2010 } /* FALLTHRU */ 2011 default: 2012 continue; 2013 } 2014 { struct DeviceInfo { 2015 I2O_PARAM_RESULTS_LIST_HEADER Header; 2016 I2O_PARAM_READ_OPERATION_RESULT Read; 2017 I2O_DPT_DEVICE_INFO_SCALAR Info; 2018 }; 2019 defAlignLong (struct DeviceInfo, Buffer); 2020 PI2O_DPT_DEVICE_INFO_SCALAR Info; 2021 2022 Entry->le_bus = 0xff; 2023 Entry->le_target = 0xff; 2024 Entry->le_lun = 0xff; 2025 2026 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR) 2027 ASR_getParams(sc, 2028 I2O_LCT_ENTRY_getLocalTID(Entry), 2029 I2O_DPT_DEVICE_INFO_GROUP_NO, 2030 Buffer, sizeof(struct DeviceInfo))) 2031 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) { 2032 continue; 2033 } 2034 Entry->le_type 2035 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info); 2036 Entry->le_bus 2037 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info); 2038 if ((Entry->le_bus > sc->ha_MaxBus) 2039 && (Entry->le_bus <= MAX_CHANNEL)) { 2040 sc->ha_MaxBus = Entry->le_bus; 2041 } 2042 Entry->le_target 2043 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info); 2044 Entry->le_lun 2045 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info); 2046 } 2047 } 2048 /* 2049 * A zero return value indicates success. 2050 */ 2051 return (0); 2052 } /* ASR_acquireLct */ 2053 2054 /* 2055 * Initialize a message frame. 2056 * We assume that the CDB has already been set up, so all we do here is 2057 * generate the Scatter Gather list. 2058 */ 2059 STATIC INLINE PI2O_MESSAGE_FRAME 2060 ASR_init_message( 2061 IN union asr_ccb * ccb, 2062 OUT PI2O_MESSAGE_FRAME Message) 2063 { 2064 int next, span, base, rw; 2065 OUT PI2O_MESSAGE_FRAME Message_Ptr; 2066 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 2067 PI2O_SGE_SIMPLE_ELEMENT sg; 2068 caddr_t v; 2069 vm_size_t size, len; 2070 U32 MessageSize; 2071 2072 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */ 2073 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message), 2074 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))); 2075 2076 { 2077 int target = ccb->ccb_h.target_id; 2078 int lun = ccb->ccb_h.target_lun; 2079 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2080 tid_t TID; 2081 2082 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) { 2083 PI2O_LCT_ENTRY Device; 2084 2085 TID = (tid_t)0; 2086 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2087 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2088 ++Device) { 2089 if ((Device->le_type != I2O_UNKNOWN) 2090 && (Device->le_bus == bus) 2091 && (Device->le_target == target) 2092 && (Device->le_lun == lun) 2093 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) { 2094 TID = I2O_LCT_ENTRY_getLocalTID(Device); 2095 ASR_setTid (sc, Device->le_bus, 2096 Device->le_target, Device->le_lun, 2097 TID); 2098 break; 2099 } 2100 } 2101 } 2102 if (TID == (tid_t)0) { 2103 return ((PI2O_MESSAGE_FRAME)NULL); 2104 } 2105 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID); 2106 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID( 2107 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID); 2108 } 2109 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 | 2110 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2111 / sizeof(U32)) << 4)); 2112 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 2113 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2114 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)); 2115 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 2116 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE); 2117 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2118 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); 2119 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 2120 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 2121 I2O_SCB_FLAG_ENABLE_DISCONNECT 2122 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2123 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2124 /* 2125 * We do not need any (optional byteswapping) method access to 2126 * the Initiator & Transaction context field. 2127 */ 2128 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 2129 2130 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2131 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID); 2132 /* 2133 * copy the cdb over 2134 */ 2135 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength( 2136 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len); 2137 bcopy (&(ccb->csio.cdb_io), 2138 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len); 2139 2140 /* 2141 * Given a buffer describing a transfer, set up a scatter/gather map 2142 * in a ccb to map that SCSI transfer. 2143 */ 2144 2145 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR; 2146 2147 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 2148 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 2149 (ccb->csio.dxfer_len) 2150 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE 2151 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2152 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2153 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER) 2154 : (I2O_SCB_FLAG_XFER_FROM_DEVICE 2155 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2156 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2157 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)) 2158 : (I2O_SCB_FLAG_ENABLE_DISCONNECT 2159 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2160 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2161 2162 /* 2163 * Given a transfer described by a `data', fill in the SG list. 2164 */ 2165 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0]; 2166 2167 len = ccb->csio.dxfer_len; 2168 v = ccb->csio.data_ptr; 2169 ASSERT (ccb->csio.dxfer_len >= 0); 2170 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr); 2171 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 2172 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len); 2173 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2174 Message_Ptr)->SGL.u.Simple[SG_SIZE])) { 2175 span = 0; 2176 next = base = KVTOPHYS(v); 2177 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 2178 2179 /* How far can we go contiguously */ 2180 while ((len > 0) && (base == next)) { 2181 next = trunc_page(base) + PAGE_SIZE; 2182 size = next - base; 2183 if (size > len) { 2184 size = len; 2185 } 2186 span += size; 2187 v += size; 2188 len -= size; 2189 base = KVTOPHYS(v); 2190 } 2191 2192 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 2193 if (len == 0) { 2194 rw |= I2O_SGL_FLAGS_LAST_ELEMENT; 2195 } 2196 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), 2197 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw); 2198 ++sg; 2199 MessageSize += sizeof(*sg) / sizeof(U32); 2200 } 2201 /* We always do the request sense ... */ 2202 if ((span = ccb->csio.sense_len) == 0) { 2203 span = sizeof(ccb->csio.sense_data); 2204 } 2205 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2206 &(ccb->csio.sense_data), span); 2207 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 2208 MessageSize + (sizeof(*sg) / sizeof(U32))); 2209 return (Message_Ptr); 2210 } /* ASR_init_message */ 2211 2212 /* 2213 * Reset the adapter. 2214 */ 2215 STATIC INLINE U32 2216 ASR_initOutBound ( 2217 INOUT Asr_softc_t * sc) 2218 { 2219 struct initOutBoundMessage { 2220 I2O_EXEC_OUTBOUND_INIT_MESSAGE M; 2221 U32 R; 2222 }; 2223 defAlignLong(struct initOutBoundMessage,Message); 2224 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr; 2225 OUT U32 * volatile Reply_Ptr; 2226 U32 Old; 2227 2228 /* 2229 * Build up our copy of the Message. 2230 */ 2231 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message, 2232 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE)); 2233 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2234 I2O_EXEC_OUTBOUND_INIT); 2235 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE); 2236 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr, 2237 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)); 2238 /* 2239 * Reset the Reply Status 2240 */ 2241 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 2242 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0; 2243 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr, 2244 sizeof(U32)); 2245 /* 2246 * Send the Message out 2247 */ 2248 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 2249 u_long size, addr; 2250 2251 /* 2252 * Wait for a response (Poll). 2253 */ 2254 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED); 2255 /* 2256 * Re-enable the interrupts. 2257 */ 2258 sc->ha_Virt->Mask = Old; 2259 /* 2260 * Populate the outbound table. 2261 */ 2262 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 2263 2264 /* Allocate the reply frames */ 2265 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 2266 * sc->ha_Msgs_Count; 2267 2268 /* 2269 * contigmalloc only works reliably at 2270 * initialization time. 2271 */ 2272 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 2273 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul, 2274 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) 2275 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 2276 (void)bzero ((char *)sc->ha_Msgs, size); 2277 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs); 2278 } 2279 } 2280 2281 /* Initialize the outbound FIFO */ 2282 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) 2283 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys; 2284 size; --size) { 2285 sc->ha_Virt->FromFIFO = addr; 2286 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME); 2287 } 2288 return (*Reply_Ptr); 2289 } 2290 return (0); 2291 } /* ASR_initOutBound */ 2292 2293 /* 2294 * Set the system table 2295 */ 2296 STATIC INLINE int 2297 ASR_setSysTab( 2298 IN Asr_softc_t * sc) 2299 { 2300 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr; 2301 PI2O_SET_SYSTAB_HEADER SystemTable; 2302 Asr_softc_t * ha; 2303 PI2O_SGE_SIMPLE_ELEMENT sg; 2304 int retVal; 2305 2306 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc ( 2307 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK)) 2308 == (PI2O_SET_SYSTAB_HEADER)NULL) { 2309 return (ENOMEM); 2310 } 2311 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); 2312 for (ha = Asr_softc; ha; ha = ha->ha_next) { 2313 ++SystemTable->NumberEntries; 2314 } 2315 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc ( 2316 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2317 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)), 2318 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) { 2319 free (SystemTable, M_TEMP); 2320 return (ENOMEM); 2321 } 2322 (void)ASR_fillMessage((char *)Message_Ptr, 2323 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2324 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT))); 2325 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2326 (I2O_VERSION_11 + 2327 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2328 / sizeof(U32)) << 4))); 2329 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2330 I2O_EXEC_SYS_TAB_SET); 2331 /* 2332 * Call the LCT table to determine the number of device entries 2333 * to reserve space for. 2334 * since this code is reused in several systems, code efficiency 2335 * is greater by using a shift operation rather than a divide by 2336 * sizeof(u_int32_t). 2337 */ 2338 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 2339 + ((I2O_MESSAGE_FRAME_getVersionOffset( 2340 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2)); 2341 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); 2342 ++sg; 2343 for (ha = Asr_softc; ha; ha = ha->ha_next) { 2344 SG(sg, 0, 2345 ((ha->ha_next) 2346 ? (I2O_SGL_FLAGS_DIR) 2347 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)), 2348 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable)); 2349 ++sg; 2350 } 2351 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2352 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT 2353 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2354 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2355 free (Message_Ptr, M_TEMP); 2356 free (SystemTable, M_TEMP); 2357 return (retVal); 2358 } /* ASR_setSysTab */ 2359 2360 STATIC INLINE int 2361 ASR_acquireHrt ( 2362 INOUT Asr_softc_t * sc) 2363 { 2364 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message); 2365 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr; 2366 struct { 2367 I2O_HRT Header; 2368 I2O_HRT_ENTRY Entry[MAX_CHANNEL]; 2369 } Hrt; 2370 u_int8_t NumberOfEntries; 2371 PI2O_HRT_ENTRY Entry; 2372 2373 bzero ((void *)&Hrt, sizeof (Hrt)); 2374 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message, 2375 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2376 + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2377 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2378 (I2O_VERSION_11 2379 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2380 / sizeof(U32)) << 4))); 2381 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 2382 I2O_EXEC_HRT_GET); 2383 2384 /* 2385 * Set up the buffers as scatter gather elements. 2386 */ 2387 SG(&(Message_Ptr->SGL), 0, 2388 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2389 &Hrt, sizeof(Hrt)); 2390 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) { 2391 return (ENODEV); 2392 } 2393 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header)) 2394 > (MAX_CHANNEL + 1)) { 2395 NumberOfEntries = MAX_CHANNEL + 1; 2396 } 2397 for (Entry = Hrt.Header.HRTEntry; 2398 NumberOfEntries != 0; 2399 ++Entry, --NumberOfEntries) { 2400 PI2O_LCT_ENTRY Device; 2401 2402 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2403 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2404 ++Device) { 2405 if (I2O_LCT_ENTRY_getLocalTID(Device) 2406 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) { 2407 Device->le_bus = I2O_HRT_ENTRY_getAdapterID( 2408 Entry) >> 16; 2409 if ((Device->le_bus > sc->ha_MaxBus) 2410 && (Device->le_bus <= MAX_CHANNEL)) { 2411 sc->ha_MaxBus = Device->le_bus; 2412 } 2413 } 2414 } 2415 } 2416 return (0); 2417 } /* ASR_acquireHrt */ 2418 2419 /* 2420 * Enable the adapter. 2421 */ 2422 STATIC INLINE int 2423 ASR_enableSys ( 2424 IN Asr_softc_t * sc) 2425 { 2426 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message); 2427 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr; 2428 2429 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message, 2430 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE)); 2431 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2432 I2O_EXEC_SYS_ENABLE); 2433 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0); 2434 } /* ASR_enableSys */ 2435 2436 /* 2437 * Perform the stages necessary to initialize the adapter 2438 */ 2439 STATIC int 2440 ASR_init( 2441 IN Asr_softc_t * sc) 2442 { 2443 return ((ASR_initOutBound(sc) == 0) 2444 || (ASR_setSysTab(sc) != CAM_REQ_CMP) 2445 || (ASR_enableSys(sc) != CAM_REQ_CMP)); 2446 } /* ASR_init */ 2447 2448 /* 2449 * Send a Synchronize Cache command to the target device. 2450 */ 2451 STATIC INLINE void 2452 ASR_sync ( 2453 IN Asr_softc_t * sc, 2454 IN int bus, 2455 IN int target, 2456 IN int lun) 2457 { 2458 tid_t TID; 2459 2460 /* 2461 * We will not synchronize the device when there are outstanding 2462 * commands issued by the OS (this is due to a locked up device, 2463 * as the OS normally would flush all outstanding commands before 2464 * issuing a shutdown or an adapter reset). 2465 */ 2466 if ((sc != (Asr_softc_t *)NULL) 2467 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL) 2468 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1) 2469 && (TID != (tid_t)0)) { 2470 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message); 2471 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2472 2473 bzero (Message_Ptr 2474 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message), 2475 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2476 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2477 2478 I2O_MESSAGE_FRAME_setVersionOffset( 2479 (PI2O_MESSAGE_FRAME)Message_Ptr, 2480 I2O_VERSION_11 2481 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2482 - sizeof(I2O_SG_ELEMENT)) 2483 / sizeof(U32)) << 4)); 2484 I2O_MESSAGE_FRAME_setMessageSize( 2485 (PI2O_MESSAGE_FRAME)Message_Ptr, 2486 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2487 - sizeof(I2O_SG_ELEMENT)) 2488 / sizeof(U32)); 2489 I2O_MESSAGE_FRAME_setInitiatorAddress ( 2490 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2491 I2O_MESSAGE_FRAME_setFunction( 2492 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2493 I2O_MESSAGE_FRAME_setTargetAddress( 2494 (PI2O_MESSAGE_FRAME)Message_Ptr, TID); 2495 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2496 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2497 I2O_SCSI_SCB_EXEC); 2498 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID); 2499 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2500 I2O_SCB_FLAG_ENABLE_DISCONNECT 2501 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2502 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2503 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2504 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2505 DPT_ORGANIZATION_ID); 2506 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2507 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE; 2508 Message_Ptr->CDB[1] = (lun << 5); 2509 2510 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2511 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2512 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2513 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2514 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2515 2516 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2517 2518 } 2519 } 2520 2521 STATIC INLINE void 2522 ASR_synchronize ( 2523 IN Asr_softc_t * sc) 2524 { 2525 int bus, target, lun; 2526 2527 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2528 for (target = 0; target <= sc->ha_MaxId; ++target) { 2529 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 2530 ASR_sync(sc,bus,target,lun); 2531 } 2532 } 2533 } 2534 } 2535 2536 /* 2537 * Reset the HBA, targets and BUS. 2538 * Currently this resets *all* the SCSI busses. 2539 */ 2540 STATIC INLINE void 2541 asr_hbareset( 2542 IN Asr_softc_t * sc) 2543 { 2544 ASR_synchronize (sc); 2545 (void)ASR_reset (sc); 2546 } /* asr_hbareset */ 2547 2548 /* 2549 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP 2550 * limit and a reduction in error checking (in the pre 4.0 case). 2551 */ 2552 STATIC int 2553 asr_pci_map_mem ( 2554 #if __FreeBSD_version >= 400000 2555 IN device_t tag, 2556 #else 2557 IN pcici_t tag, 2558 #endif 2559 IN Asr_softc_t * sc) 2560 { 2561 int rid; 2562 u_int32_t p, l, s; 2563 2564 #if __FreeBSD_version >= 400000 2565 /* 2566 * I2O specification says we must find first *memory* mapped BAR 2567 */ 2568 for (rid = PCIR_MAPS; 2569 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t)); 2570 rid += sizeof(u_int32_t)) { 2571 p = pci_read_config(tag, rid, sizeof(p)); 2572 if ((p & 1) == 0) { 2573 break; 2574 } 2575 } 2576 /* 2577 * Give up? 2578 */ 2579 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) { 2580 rid = PCIR_MAPS; 2581 } 2582 p = pci_read_config(tag, rid, sizeof(p)); 2583 pci_write_config(tag, rid, -1, sizeof(p)); 2584 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2585 pci_write_config(tag, rid, p, sizeof(p)); 2586 if (l > MAX_MAP) { 2587 l = MAX_MAP; 2588 } 2589 /* 2590 * The 2005S Zero Channel RAID solution is not a perfect PCI 2591 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once 2592 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to 2593 * BAR0+2MB and sets it's size to 2MB. The IOP registers are 2594 * accessible via BAR0, the messaging registers are accessible 2595 * via BAR1. If the subdevice code is 50 to 59 decimal. 2596 */ 2597 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s)); 2598 if (s != 0xA5111044) { 2599 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s)); 2600 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0) 2601 && (ADPTDOMINATOR_SUB_ID_START <= s) 2602 && (s <= ADPTDOMINATOR_SUB_ID_END)) { 2603 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */ 2604 } 2605 } 2606 p &= ~15; 2607 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2608 p, p + l, l, RF_ACTIVE); 2609 if (sc->ha_mem_res == (struct resource *)NULL) { 2610 return (0); 2611 } 2612 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res); 2613 if (sc->ha_Base == (void *)NULL) { 2614 return (0); 2615 } 2616 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res); 2617 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */ 2618 if ((rid += sizeof(u_int32_t)) 2619 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) { 2620 return (0); 2621 } 2622 p = pci_read_config(tag, rid, sizeof(p)); 2623 pci_write_config(tag, rid, -1, sizeof(p)); 2624 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2625 pci_write_config(tag, rid, p, sizeof(p)); 2626 if (l > MAX_MAP) { 2627 l = MAX_MAP; 2628 } 2629 p &= ~15; 2630 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2631 p, p + l, l, RF_ACTIVE); 2632 if (sc->ha_mes_res == (struct resource *)NULL) { 2633 return (0); 2634 } 2635 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) { 2636 return (0); 2637 } 2638 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res); 2639 } else { 2640 sc->ha_Fvirt = (U8 *)(sc->ha_Virt); 2641 } 2642 #else 2643 vm_size_t psize, poffs; 2644 2645 /* 2646 * I2O specification says we must find first *memory* mapped BAR 2647 */ 2648 for (rid = PCI_MAP_REG_START; 2649 rid < (PCI_MAP_REG_START + 4 * sizeof(u_int32_t)); 2650 rid += sizeof(u_int32_t)) { 2651 p = pci_conf_read (tag, rid); 2652 if ((p & 1) == 0) { 2653 break; 2654 } 2655 } 2656 if (rid >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) { 2657 rid = PCI_MAP_REG_START; 2658 } 2659 /* 2660 ** save old mapping, get size and type of memory 2661 ** 2662 ** type is in the lowest four bits. 2663 ** If device requires 2^n bytes, the next 2664 ** n-4 bits are read as 0. 2665 */ 2666 2667 sc->ha_Base = (void *)((p = pci_conf_read (tag, rid)) 2668 & PCI_MAP_MEMORY_ADDRESS_MASK); 2669 pci_conf_write (tag, rid, 0xfffffffful); 2670 l = pci_conf_read (tag, rid); 2671 pci_conf_write (tag, rid, p); 2672 2673 /* 2674 ** check the type 2675 */ 2676 2677 if (!((l & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_32BIT_1M 2678 && ((u_long)sc->ha_Base & ~0xfffff) == 0) 2679 && ((l & PCI_MAP_MEMORY_TYPE_MASK) != PCI_MAP_MEMORY_TYPE_32BIT)) { 2680 debug_asr_printf ( 2681 "asr_pci_map_mem failed: bad memory type=0x%x\n", 2682 (unsigned) l); 2683 return (0); 2684 }; 2685 2686 /* 2687 ** get the size. 2688 */ 2689 2690 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK); 2691 if (psize > MAX_MAP) { 2692 psize = MAX_MAP; 2693 } 2694 /* 2695 * The 2005S Zero Channel RAID solution is not a perfect PCI 2696 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once 2697 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to 2698 * BAR0+2MB and sets it's size to 2MB. The IOP registers are 2699 * accessible via BAR0, the messaging registers are accessible 2700 * via BAR1. If the subdevice code is 50 to 59 decimal. 2701 */ 2702 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s)); 2703 if (s != 0xA5111044) { 2704 s = pci_conf_read (tag, PCIR_SUBVEND_0) 2705 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0) 2706 && (ADPTDOMINATOR_SUB_ID_START <= s) 2707 && (s <= ADPTDOMINATOR_SUB_ID_END)) { 2708 psize = MAX_MAP; 2709 } 2710 } 2711 2712 if ((sc->ha_Base == (void *)NULL) 2713 || (sc->ha_Base == (void *)PCI_MAP_MEMORY_ADDRESS_MASK)) { 2714 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n"); 2715 return (0); 2716 }; 2717 2718 /* 2719 ** Truncate sc->ha_Base to page boundary. 2720 ** (Or does pmap_mapdev the job?) 2721 */ 2722 2723 poffs = (u_long)sc->ha_Base - trunc_page ((u_long)sc->ha_Base); 2724 sc->ha_Virt = (i2oRegs_t *)pmap_mapdev ((u_long)sc->ha_Base - poffs, 2725 psize + poffs); 2726 2727 if (sc->ha_Virt == (i2oRegs_t *)NULL) { 2728 return (0); 2729 } 2730 2731 sc->ha_Virt = (i2oRegs_t *)((u_long)sc->ha_Virt + poffs); 2732 if (s == 0xA5111044) { 2733 if ((rid += sizeof(u_int32_t)) 2734 >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) { 2735 return (0); 2736 } 2737 2738 /* 2739 ** save old mapping, get size and type of memory 2740 ** 2741 ** type is in the lowest four bits. 2742 ** If device requires 2^n bytes, the next 2743 ** n-4 bits are read as 0. 2744 */ 2745 2746 if ((((p = pci_conf_read (tag, rid)) 2747 & PCI_MAP_MEMORY_ADDRESS_MASK) == 0L) 2748 || ((p & PCI_MAP_MEMORY_ADDRESS_MASK) 2749 == PCI_MAP_MEMORY_ADDRESS_MASK)) { 2750 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n"); 2751 } 2752 pci_conf_write (tag, rid, 0xfffffffful); 2753 l = pci_conf_read (tag, rid); 2754 pci_conf_write (tag, rid, p); 2755 p &= PCI_MAP_MEMORY_TYPE_MASK; 2756 2757 /* 2758 ** check the type 2759 */ 2760 2761 if (!((l & PCI_MAP_MEMORY_TYPE_MASK) 2762 == PCI_MAP_MEMORY_TYPE_32BIT_1M 2763 && (p & ~0xfffff) == 0) 2764 && ((l & PCI_MAP_MEMORY_TYPE_MASK) 2765 != PCI_MAP_MEMORY_TYPE_32BIT)) { 2766 debug_asr_printf ( 2767 "asr_pci_map_mem failed: bad memory type=0x%x\n", 2768 (unsigned) l); 2769 return (0); 2770 }; 2771 2772 /* 2773 ** get the size. 2774 */ 2775 2776 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK); 2777 if (psize > MAX_MAP) { 2778 psize = MAX_MAP; 2779 } 2780 2781 /* 2782 ** Truncate p to page boundary. 2783 ** (Or does pmap_mapdev the job?) 2784 */ 2785 2786 poffs = p - trunc_page (p); 2787 sc->ha_Fvirt = (U8 *)pmap_mapdev (p - poffs, psize + poffs); 2788 2789 if (sc->ha_Fvirt == (U8 *)NULL) { 2790 return (0); 2791 } 2792 2793 sc->ha_Fvirt = (U8 *)((u_long)sc->ha_Fvirt + poffs); 2794 } else { 2795 sc->ha_Fvirt = (U8 *)(sc->ha_Virt); 2796 } 2797 #endif 2798 return (1); 2799 } /* asr_pci_map_mem */ 2800 2801 /* 2802 * A simplified copy of the real pci_map_int with additional 2803 * registration requirements. 2804 */ 2805 STATIC int 2806 asr_pci_map_int ( 2807 #if __FreeBSD_version >= 400000 2808 IN device_t tag, 2809 #else 2810 IN pcici_t tag, 2811 #endif 2812 IN Asr_softc_t * sc) 2813 { 2814 #if __FreeBSD_version >= 400000 2815 int rid = 0; 2816 2817 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid, 2818 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); 2819 if (sc->ha_irq_res == (struct resource *)NULL) { 2820 return (0); 2821 } 2822 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM, 2823 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) { 2824 return (0); 2825 } 2826 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char)); 2827 #else 2828 if (!pci_map_int(tag, (pci_inthand_t *)asr_intr, 2829 (void *)sc, &cam_imask)) { 2830 return (0); 2831 } 2832 sc->ha_irq = pci_conf_read(tag, PCIR_INTLINE); 2833 #endif 2834 return (1); 2835 } /* asr_pci_map_int */ 2836 2837 /* 2838 * Attach the devices, and virtual devices to the driver list. 2839 */ 2840 STATIC ATTACH_RET 2841 asr_attach (ATTACH_ARGS) 2842 { 2843 Asr_softc_t * sc; 2844 struct scsi_inquiry_data * iq; 2845 ATTACH_SET(); 2846 2847 if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT)) == (Asr_softc_t *)NULL) { 2848 ATTACH_RETURN(ENOMEM); 2849 } 2850 if (Asr_softc == (Asr_softc_t *)NULL) { 2851 /* 2852 * Fixup the OS revision as saved in the dptsig for the 2853 * engine (dptioctl.h) to pick up. 2854 */ 2855 bcopy (osrelease, &ASR_sig.dsDescription[16], 5); 2856 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj); 2857 } 2858 /* 2859 * Initialize the software structure 2860 */ 2861 bzero (sc, sizeof(*sc)); 2862 LIST_INIT(&(sc->ha_ccb)); 2863 # ifdef ASR_MEASURE_PERFORMANCE 2864 { 2865 u_int32_t i; 2866 2867 // initialize free list for timeQ 2868 sc->ha_timeQFreeHead = 0; 2869 sc->ha_timeQFreeTail = MAX_TIMEQ_SIZE - 1; 2870 for (i = 0; i < MAX_TIMEQ_SIZE; i++) { 2871 sc->ha_timeQFreeList[i] = i; 2872 } 2873 } 2874 # endif 2875 /* Link us into the HA list */ 2876 { 2877 Asr_softc_t **ha; 2878 2879 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next)); 2880 *(ha) = sc; 2881 } 2882 { 2883 PI2O_EXEC_STATUS_GET_REPLY status; 2884 int size; 2885 2886 /* 2887 * This is the real McCoy! 2888 */ 2889 if (!asr_pci_map_mem(tag, sc)) { 2890 printf ("asr%d: could not map memory\n", unit); 2891 ATTACH_RETURN(ENXIO); 2892 } 2893 /* Enable if not formerly enabled */ 2894 #if __FreeBSD_version >= 400000 2895 pci_write_config (tag, PCIR_COMMAND, 2896 pci_read_config (tag, PCIR_COMMAND, sizeof(char)) 2897 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char)); 2898 /* Knowledge is power, responsibility is direct */ 2899 { 2900 struct pci_devinfo { 2901 STAILQ_ENTRY(pci_devinfo) pci_links; 2902 struct resource_list resources; 2903 pcicfgregs cfg; 2904 } * dinfo = device_get_ivars(tag); 2905 sc->ha_pciBusNum = dinfo->cfg.bus; 2906 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3) 2907 | dinfo->cfg.func; 2908 } 2909 #else 2910 pci_conf_write (tag, PCIR_COMMAND, 2911 pci_conf_read (tag, PCIR_COMMAND) 2912 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 2913 /* Knowledge is power, responsibility is direct */ 2914 switch (pci_mechanism) { 2915 2916 case 1: 2917 sc->ha_pciBusNum = tag.cfg1 >> 16; 2918 sc->ha_pciDeviceNum = tag.cfg1 >> 8; 2919 2920 case 2: 2921 sc->ha_pciBusNum = tag.cfg2.forward; 2922 sc->ha_pciDeviceNum = ((tag.cfg2.enable >> 1) & 7) 2923 | (tag.cfg2.port >> 5); 2924 } 2925 #endif 2926 /* Check if the device is there? */ 2927 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0) 2928 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc ( 2929 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK)) 2930 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) 2931 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) { 2932 printf ("asr%d: could not initialize hardware\n", unit); 2933 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */ 2934 } 2935 sc->ha_SystemTable.OrganizationID = status->OrganizationID; 2936 sc->ha_SystemTable.IOP_ID = status->IOP_ID; 2937 sc->ha_SystemTable.I2oVersion = status->I2oVersion; 2938 sc->ha_SystemTable.IopState = status->IopState; 2939 sc->ha_SystemTable.MessengerType = status->MessengerType; 2940 sc->ha_SystemTable.InboundMessageFrameSize 2941 = status->InboundMFrameSize; 2942 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow 2943 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO)); 2944 2945 if (!asr_pci_map_int(tag, (void *)sc)) { 2946 printf ("asr%d: could not map interrupt\n", unit); 2947 ATTACH_RETURN(ENXIO); 2948 } 2949 2950 /* Adjust the maximim inbound count */ 2951 if (((sc->ha_QueueSize 2952 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) 2953 > MAX_INBOUND) 2954 || (sc->ha_QueueSize == 0)) { 2955 sc->ha_QueueSize = MAX_INBOUND; 2956 } 2957 2958 /* Adjust the maximum outbound count */ 2959 if (((sc->ha_Msgs_Count 2960 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) 2961 > MAX_OUTBOUND) 2962 || (sc->ha_Msgs_Count == 0)) { 2963 sc->ha_Msgs_Count = MAX_OUTBOUND; 2964 } 2965 if (sc->ha_Msgs_Count > sc->ha_QueueSize) { 2966 sc->ha_Msgs_Count = sc->ha_QueueSize; 2967 } 2968 2969 /* Adjust the maximum SG size to adapter */ 2970 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize( 2971 status) << 2)) > MAX_INBOUND_SIZE) { 2972 size = MAX_INBOUND_SIZE; 2973 } 2974 free (status, M_TEMP); 2975 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2976 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT); 2977 } 2978 2979 /* 2980 * Only do a bus/HBA reset on the first time through. On this 2981 * first time through, we do not send a flush to the devices. 2982 */ 2983 if (ASR_init(sc) == 0) { 2984 struct BufferInfo { 2985 I2O_PARAM_RESULTS_LIST_HEADER Header; 2986 I2O_PARAM_READ_OPERATION_RESULT Read; 2987 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2988 }; 2989 defAlignLong (struct BufferInfo, Buffer); 2990 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2991 # define FW_DEBUG_BLED_OFFSET 8 2992 2993 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR) 2994 ASR_getParams(sc, 0, 2995 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO, 2996 Buffer, sizeof(struct BufferInfo))) 2997 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) { 2998 sc->ha_blinkLED = sc->ha_Fvirt 2999 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info) 3000 + FW_DEBUG_BLED_OFFSET; 3001 } 3002 if (ASR_acquireLct(sc) == 0) { 3003 (void)ASR_acquireHrt(sc); 3004 } 3005 } else { 3006 printf ("asr%d: failed to initialize\n", unit); 3007 ATTACH_RETURN(ENXIO); 3008 } 3009 /* 3010 * Add in additional probe responses for more channels. We 3011 * are reusing the variable `target' for a channel loop counter. 3012 * Done here because of we need both the acquireLct and 3013 * acquireHrt data. 3014 */ 3015 { PI2O_LCT_ENTRY Device; 3016 3017 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 3018 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 3019 ++Device) { 3020 if (Device->le_type == I2O_UNKNOWN) { 3021 continue; 3022 } 3023 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) { 3024 if (Device->le_target > sc->ha_MaxId) { 3025 sc->ha_MaxId = Device->le_target; 3026 } 3027 if (Device->le_lun > sc->ha_MaxLun) { 3028 sc->ha_MaxLun = Device->le_lun; 3029 } 3030 } 3031 if (((Device->le_type & I2O_PORT) != 0) 3032 && (Device->le_bus <= MAX_CHANNEL)) { 3033 /* Do not increase MaxId for efficiency */ 3034 sc->ha_adapter_target[Device->le_bus] 3035 = Device->le_target; 3036 } 3037 } 3038 } 3039 3040 3041 /* 3042 * Print the HBA model number as inquired from the card. 3043 */ 3044 3045 printf ("asr%d:", unit); 3046 3047 if ((iq = (struct scsi_inquiry_data *)malloc ( 3048 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK)) 3049 != (struct scsi_inquiry_data *)NULL) { 3050 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message); 3051 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 3052 int posted = 0; 3053 3054 bzero (iq, sizeof(struct scsi_inquiry_data)); 3055 bzero (Message_Ptr 3056 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message), 3057 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 3058 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 3059 3060 I2O_MESSAGE_FRAME_setVersionOffset( 3061 (PI2O_MESSAGE_FRAME)Message_Ptr, 3062 I2O_VERSION_11 3063 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 3064 - sizeof(I2O_SG_ELEMENT)) 3065 / sizeof(U32)) << 4)); 3066 I2O_MESSAGE_FRAME_setMessageSize( 3067 (PI2O_MESSAGE_FRAME)Message_Ptr, 3068 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 3069 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) 3070 / sizeof(U32)); 3071 I2O_MESSAGE_FRAME_setInitiatorAddress ( 3072 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 3073 I2O_MESSAGE_FRAME_setFunction( 3074 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 3075 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 3076 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 3077 I2O_SCSI_SCB_EXEC); 3078 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 3079 I2O_SCB_FLAG_ENABLE_DISCONNECT 3080 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 3081 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 3082 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1); 3083 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 3084 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 3085 DPT_ORGANIZATION_ID); 3086 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 3087 Message_Ptr->CDB[0] = INQUIRY; 3088 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data); 3089 if (Message_Ptr->CDB[4] == 0) { 3090 Message_Ptr->CDB[4] = 255; 3091 } 3092 3093 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 3094 (I2O_SCB_FLAG_XFER_FROM_DEVICE 3095 | I2O_SCB_FLAG_ENABLE_DISCONNECT 3096 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 3097 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 3098 3099 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 3100 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 3101 sizeof(struct scsi_inquiry_data)); 3102 SG(&(Message_Ptr->SGL), 0, 3103 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 3104 iq, sizeof(struct scsi_inquiry_data)); 3105 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 3106 3107 if (iq->vendor[0] && (iq->vendor[0] != ' ')) { 3108 printf (" "); 3109 ASR_prstring (iq->vendor, 8); 3110 ++posted; 3111 } 3112 if (iq->product[0] && (iq->product[0] != ' ')) { 3113 printf (" "); 3114 ASR_prstring (iq->product, 16); 3115 ++posted; 3116 } 3117 if (iq->revision[0] && (iq->revision[0] != ' ')) { 3118 printf (" FW Rev. "); 3119 ASR_prstring (iq->revision, 4); 3120 ++posted; 3121 } 3122 free ((caddr_t)iq, M_TEMP); 3123 if (posted) { 3124 printf (","); 3125 } 3126 } 3127 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1, 3128 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize); 3129 3130 /* 3131 * fill in the prototype cam_path. 3132 */ 3133 { 3134 int bus; 3135 union asr_ccb * ccb; 3136 3137 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 3138 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit); 3139 ATTACH_RETURN(ENOMEM); 3140 } 3141 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 3142 struct cam_devq * devq; 3143 int QueueSize = sc->ha_QueueSize; 3144 3145 if (QueueSize > MAX_INBOUND) { 3146 QueueSize = MAX_INBOUND; 3147 } 3148 3149 /* 3150 * Create the device queue for our SIM(s). 3151 */ 3152 if ((devq = cam_simq_alloc(QueueSize)) == NULL) { 3153 continue; 3154 } 3155 3156 /* 3157 * Construct our first channel SIM entry 3158 */ 3159 sc->ha_sim[bus] = cam_sim_alloc( 3160 asr_action, asr_poll, "asr", sc, 3161 unit, 1, QueueSize, devq); 3162 if (sc->ha_sim[bus] == NULL) { 3163 continue; 3164 } 3165 3166 if (xpt_bus_register(sc->ha_sim[bus], bus) 3167 != CAM_SUCCESS) { 3168 cam_sim_free(sc->ha_sim[bus], 3169 /*free_devq*/TRUE); 3170 sc->ha_sim[bus] = NULL; 3171 continue; 3172 } 3173 3174 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL, 3175 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD, 3176 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3177 xpt_bus_deregister( 3178 cam_sim_path(sc->ha_sim[bus])); 3179 cam_sim_free(sc->ha_sim[bus], 3180 /*free_devq*/TRUE); 3181 sc->ha_sim[bus] = NULL; 3182 continue; 3183 } 3184 } 3185 asr_free_ccb (ccb); 3186 } 3187 /* 3188 * Generate the device node information 3189 */ 3190 (void)make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit); 3191 destroy_dev(makedev(asr_cdevsw.d_maj,unit+1)); 3192 ATTACH_RETURN(0); 3193 } /* asr_attach */ 3194 3195 STATIC void 3196 asr_poll( 3197 IN struct cam_sim *sim) 3198 { 3199 asr_intr(cam_sim_softc(sim)); 3200 } /* asr_poll */ 3201 3202 STATIC void 3203 asr_action( 3204 IN struct cam_sim * sim, 3205 IN union ccb * ccb) 3206 { 3207 struct Asr_softc * sc; 3208 3209 debug_asr_printf ("asr_action(%lx,%lx{%x})\n", 3210 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code); 3211 3212 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n")); 3213 3214 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim); 3215 3216 switch (ccb->ccb_h.func_code) { 3217 3218 /* Common cases first */ 3219 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3220 { 3221 struct Message { 3222 char M[MAX_INBOUND_SIZE]; 3223 }; 3224 defAlignLong(struct Message,Message); 3225 PI2O_MESSAGE_FRAME Message_Ptr; 3226 3227 /* Reject incoming commands while we are resetting the card */ 3228 if (sc->ha_in_reset != HA_OPERATIONAL) { 3229 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3230 if (sc->ha_in_reset >= HA_OFF_LINE) { 3231 /* HBA is now off-line */ 3232 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; 3233 } else { 3234 /* HBA currently resetting, try again later. */ 3235 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 3236 } 3237 debug_asr_cmd_printf (" e\n"); 3238 xpt_done(ccb); 3239 debug_asr_cmd_printf (" q\n"); 3240 break; 3241 } 3242 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 3243 printf( 3244 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n", 3245 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 3246 ccb->csio.cdb_io.cdb_bytes[0], 3247 cam_sim_bus(sim), 3248 ccb->ccb_h.target_id, 3249 ccb->ccb_h.target_lun); 3250 } 3251 debug_asr_cmd_printf ("(%d,%d,%d,%d)", 3252 cam_sim_unit(sim), 3253 cam_sim_bus(sim), 3254 ccb->ccb_h.target_id, 3255 ccb->ccb_h.target_lun); 3256 debug_asr_cmd_dump_ccb(ccb); 3257 3258 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb, 3259 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) { 3260 debug_asr_cmd2_printf ("TID=%x:\n", 3261 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID( 3262 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)); 3263 debug_asr_cmd2_dump_message(Message_Ptr); 3264 debug_asr_cmd1_printf (" q"); 3265 3266 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) { 3267 #ifdef ASR_MEASURE_PERFORMANCE 3268 ++sc->ha_performance.command_too_busy; 3269 #endif 3270 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3271 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 3272 debug_asr_cmd_printf (" E\n"); 3273 xpt_done(ccb); 3274 } 3275 debug_asr_cmd_printf (" Q\n"); 3276 break; 3277 } 3278 /* 3279 * We will get here if there is no valid TID for the device 3280 * referenced in the scsi command packet. 3281 */ 3282 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3283 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 3284 debug_asr_cmd_printf (" B\n"); 3285 xpt_done(ccb); 3286 break; 3287 } 3288 3289 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 3290 /* Rese HBA device ... */ 3291 asr_hbareset (sc); 3292 ccb->ccb_h.status = CAM_REQ_CMP; 3293 xpt_done(ccb); 3294 break; 3295 3296 # if (defined(REPORT_LUNS)) 3297 case REPORT_LUNS: 3298 # endif 3299 case XPT_ABORT: /* Abort the specified CCB */ 3300 /* XXX Implement */ 3301 ccb->ccb_h.status = CAM_REQ_INVALID; 3302 xpt_done(ccb); 3303 break; 3304 3305 case XPT_SET_TRAN_SETTINGS: 3306 /* XXX Implement */ 3307 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3308 xpt_done(ccb); 3309 break; 3310 3311 case XPT_GET_TRAN_SETTINGS: 3312 /* Get default/user set transfer settings for the target */ 3313 { 3314 struct ccb_trans_settings *cts; 3315 u_int target_mask; 3316 3317 cts = &(ccb->cts); 3318 target_mask = 0x01 << ccb->ccb_h.target_id; 3319 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 3320 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB; 3321 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3322 cts->sync_period = 6; /* 40MHz */ 3323 cts->sync_offset = 15; 3324 3325 cts->valid = CCB_TRANS_SYNC_RATE_VALID 3326 | CCB_TRANS_SYNC_OFFSET_VALID 3327 | CCB_TRANS_BUS_WIDTH_VALID 3328 | CCB_TRANS_DISC_VALID 3329 | CCB_TRANS_TQ_VALID; 3330 ccb->ccb_h.status = CAM_REQ_CMP; 3331 } else { 3332 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3333 } 3334 xpt_done(ccb); 3335 break; 3336 } 3337 3338 case XPT_CALC_GEOMETRY: 3339 { 3340 struct ccb_calc_geometry *ccg; 3341 u_int32_t size_mb; 3342 u_int32_t secs_per_cylinder; 3343 3344 ccg = &(ccb->ccg); 3345 size_mb = ccg->volume_size 3346 / ((1024L * 1024L) / ccg->block_size); 3347 3348 if (size_mb > 4096) { 3349 ccg->heads = 255; 3350 ccg->secs_per_track = 63; 3351 } else if (size_mb > 2048) { 3352 ccg->heads = 128; 3353 ccg->secs_per_track = 63; 3354 } else if (size_mb > 1024) { 3355 ccg->heads = 65; 3356 ccg->secs_per_track = 63; 3357 } else { 3358 ccg->heads = 64; 3359 ccg->secs_per_track = 32; 3360 } 3361 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3362 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3363 ccb->ccb_h.status = CAM_REQ_CMP; 3364 xpt_done(ccb); 3365 break; 3366 } 3367 3368 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 3369 ASR_resetBus (sc, cam_sim_bus(sim)); 3370 ccb->ccb_h.status = CAM_REQ_CMP; 3371 xpt_done(ccb); 3372 break; 3373 3374 case XPT_TERM_IO: /* Terminate the I/O process */ 3375 /* XXX Implement */ 3376 ccb->ccb_h.status = CAM_REQ_INVALID; 3377 xpt_done(ccb); 3378 break; 3379 3380 case XPT_PATH_INQ: /* Path routing inquiry */ 3381 { 3382 struct ccb_pathinq *cpi = &(ccb->cpi); 3383 3384 cpi->version_num = 1; /* XXX??? */ 3385 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3386 cpi->target_sprt = 0; 3387 /* Not necessary to reset bus, done by HDM initialization */ 3388 cpi->hba_misc = PIM_NOBUSRESET; 3389 cpi->hba_eng_cnt = 0; 3390 cpi->max_target = sc->ha_MaxId; 3391 cpi->max_lun = sc->ha_MaxLun; 3392 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)]; 3393 cpi->bus_id = cam_sim_bus(sim); 3394 cpi->base_transfer_speed = 3300; 3395 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3396 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 3397 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3398 cpi->unit_number = cam_sim_unit(sim); 3399 cpi->ccb_h.status = CAM_REQ_CMP; 3400 xpt_done(ccb); 3401 break; 3402 } 3403 default: 3404 ccb->ccb_h.status = CAM_REQ_INVALID; 3405 xpt_done(ccb); 3406 break; 3407 } 3408 } /* asr_action */ 3409 3410 #ifdef ASR_MEASURE_PERFORMANCE 3411 #define WRITE_OP 1 3412 #define READ_OP 2 3413 #define min_submitR sc->ha_performance.read_by_size_min_time[index] 3414 #define max_submitR sc->ha_performance.read_by_size_max_time[index] 3415 #define min_submitW sc->ha_performance.write_by_size_min_time[index] 3416 #define max_submitW sc->ha_performance.write_by_size_max_time[index] 3417 3418 STATIC INLINE void 3419 asr_IObySize( 3420 IN Asr_softc_t * sc, 3421 IN u_int32_t submitted_time, 3422 IN int op, 3423 IN int index) 3424 { 3425 struct timeval submitted_timeval; 3426 3427 submitted_timeval.tv_sec = 0; 3428 submitted_timeval.tv_usec = submitted_time; 3429 3430 if ( op == READ_OP ) { 3431 ++sc->ha_performance.read_by_size_count[index]; 3432 3433 if ( submitted_time != 0xffffffff ) { 3434 timevaladd( 3435 &(sc->ha_performance.read_by_size_total_time[index]), 3436 &submitted_timeval); 3437 if ( (min_submitR == 0) 3438 || (submitted_time < min_submitR) ) { 3439 min_submitR = submitted_time; 3440 } 3441 3442 if ( submitted_time > max_submitR ) { 3443 max_submitR = submitted_time; 3444 } 3445 } 3446 } else { 3447 ++sc->ha_performance.write_by_size_count[index]; 3448 if ( submitted_time != 0xffffffff ) { 3449 timevaladd( 3450 &(sc->ha_performance.write_by_size_total_time[index]), 3451 &submitted_timeval); 3452 if ( (submitted_time < min_submitW) 3453 || (min_submitW == 0) ) { 3454 min_submitW = submitted_time; 3455 } 3456 3457 if ( submitted_time > max_submitW ) { 3458 max_submitW = submitted_time; 3459 } 3460 } 3461 } 3462 } /* asr_IObySize */ 3463 #endif 3464 3465 /* 3466 * Handle processing of current CCB as pointed to by the Status. 3467 */ 3468 STATIC int 3469 asr_intr ( 3470 IN Asr_softc_t * sc) 3471 { 3472 OUT int processed; 3473 3474 #ifdef ASR_MEASURE_PERFORMANCE 3475 struct timeval junk; 3476 3477 microtime(&junk); 3478 sc->ha_performance.intr_started = junk; 3479 #endif 3480 3481 for (processed = 0; 3482 sc->ha_Virt->Status & Mask_InterruptsDisabled; 3483 processed = 1) { 3484 union asr_ccb * ccb; 3485 U32 ReplyOffset; 3486 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3487 3488 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE) 3489 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) { 3490 break; 3491 } 3492 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset 3493 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs)); 3494 /* 3495 * We do not need any (optional byteswapping) method access to 3496 * the Initiator context field. 3497 */ 3498 ccb = (union asr_ccb *)(long) 3499 I2O_MESSAGE_FRAME_getInitiatorContext64( 3500 &(Reply->StdReplyFrame.StdMessageFrame)); 3501 if (I2O_MESSAGE_FRAME_getMsgFlags( 3502 &(Reply->StdReplyFrame.StdMessageFrame)) 3503 & I2O_MESSAGE_FLAGS_FAIL) { 3504 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message); 3505 PI2O_UTIL_NOP_MESSAGE Message_Ptr; 3506 U32 MessageOffset; 3507 3508 MessageOffset = (u_long) 3509 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA( 3510 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply); 3511 /* 3512 * Get the Original Message Frame's address, and get 3513 * it's Transaction Context into our space. (Currently 3514 * unused at original authorship, but better to be 3515 * safe than sorry). Straight copy means that we 3516 * need not concern ourselves with the (optional 3517 * byteswapping) method access. 3518 */ 3519 Reply->StdReplyFrame.TransactionContext 3520 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME) 3521 (sc->ha_Fvirt + MessageOffset))->TransactionContext; 3522 /* 3523 * For 64 bit machines, we need to reconstruct the 3524 * 64 bit context. 3525 */ 3526 ccb = (union asr_ccb *)(long) 3527 I2O_MESSAGE_FRAME_getInitiatorContext64( 3528 &(Reply->StdReplyFrame.StdMessageFrame)); 3529 /* 3530 * Unique error code for command failure. 3531 */ 3532 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3533 &(Reply->StdReplyFrame), (u_int16_t)-2); 3534 /* 3535 * Modify the message frame to contain a NOP and 3536 * re-issue it to the controller. 3537 */ 3538 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage( 3539 Message, sizeof(I2O_UTIL_NOP_MESSAGE)); 3540 # if (I2O_UTIL_NOP != 0) 3541 I2O_MESSAGE_FRAME_setFunction ( 3542 &(Message_Ptr->StdMessageFrame), 3543 I2O_UTIL_NOP); 3544 # endif 3545 /* 3546 * Copy the packet out to the Original Message 3547 */ 3548 bcopy ((caddr_t)Message_Ptr, 3549 sc->ha_Fvirt + MessageOffset, 3550 sizeof(I2O_UTIL_NOP_MESSAGE)); 3551 /* 3552 * Issue the NOP 3553 */ 3554 sc->ha_Virt->ToFIFO = MessageOffset; 3555 } 3556 3557 /* 3558 * Asynchronous command with no return requirements, 3559 * and a generic handler for immunity against odd error 3560 * returns from the adapter. 3561 */ 3562 if (ccb == (union asr_ccb *)NULL) { 3563 /* 3564 * Return Reply so that it can be used for the 3565 * next command 3566 */ 3567 sc->ha_Virt->FromFIFO = ReplyOffset; 3568 continue; 3569 } 3570 3571 /* Welease Wadjah! (and stop timeouts) */ 3572 ASR_ccbRemove (sc, ccb); 3573 3574 switch ( 3575 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode( 3576 &(Reply->StdReplyFrame))) { 3577 3578 case I2O_SCSI_DSC_SUCCESS: 3579 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3580 ccb->ccb_h.status |= CAM_REQ_CMP; 3581 break; 3582 3583 case I2O_SCSI_DSC_CHECK_CONDITION: 3584 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3585 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID; 3586 break; 3587 3588 case I2O_SCSI_DSC_BUSY: 3589 /* FALLTHRU */ 3590 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY: 3591 /* FALLTHRU */ 3592 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET: 3593 /* FALLTHRU */ 3594 case I2O_SCSI_HBA_DSC_BUS_BUSY: 3595 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3596 ccb->ccb_h.status |= CAM_SCSI_BUSY; 3597 break; 3598 3599 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT: 3600 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3601 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 3602 break; 3603 3604 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT: 3605 /* FALLTHRU */ 3606 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT: 3607 /* FALLTHRU */ 3608 case I2O_SCSI_HBA_DSC_LUN_INVALID: 3609 /* FALLTHRU */ 3610 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID: 3611 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3612 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 3613 break; 3614 3615 case I2O_SCSI_HBA_DSC_DATA_OVERRUN: 3616 /* FALLTHRU */ 3617 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR: 3618 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3619 ccb->ccb_h.status |= CAM_DATA_RUN_ERR; 3620 break; 3621 3622 default: 3623 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3624 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 3625 break; 3626 } 3627 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) { 3628 ccb->csio.resid -= 3629 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount( 3630 Reply); 3631 } 3632 3633 #ifdef ASR_MEASURE_PERFORMANCE 3634 { 3635 struct timeval endTime; 3636 u_int32_t submitted_time; 3637 u_int32_t size; 3638 int op_type; 3639 int startTimeIndex; 3640 3641 --sc->ha_submitted_ccbs_count; 3642 startTimeIndex 3643 = (int)Reply->StdReplyFrame.TransactionContext; 3644 if (-1 != startTimeIndex) { 3645 /* Compute the time spent in device/adapter */ 3646 microtime(&endTime); 3647 submitted_time = asr_time_delta(sc->ha_timeQ[ 3648 startTimeIndex], endTime); 3649 /* put the startTimeIndex back on free list */ 3650 ENQ_TIMEQ_FREE_LIST(startTimeIndex, 3651 sc->ha_timeQFreeList, 3652 sc->ha_timeQFreeHead, 3653 sc->ha_timeQFreeTail); 3654 } else { 3655 submitted_time = 0xffffffff; 3656 } 3657 3658 #define maxctime sc->ha_performance.max_command_time[ccb->csio.cdb_io.cdb_bytes[0]] 3659 #define minctime sc->ha_performance.min_command_time[ccb->csio.cdb_io.cdb_bytes[0]] 3660 if (submitted_time != 0xffffffff) { 3661 if ( maxctime < submitted_time ) { 3662 maxctime = submitted_time; 3663 } 3664 if ( (minctime == 0) 3665 || (minctime > submitted_time) ) { 3666 minctime = submitted_time; 3667 } 3668 3669 if ( sc->ha_performance.max_submit_time 3670 < submitted_time ) { 3671 sc->ha_performance.max_submit_time 3672 = submitted_time; 3673 } 3674 if ( sc->ha_performance.min_submit_time == 0 3675 || sc->ha_performance.min_submit_time 3676 > submitted_time) { 3677 sc->ha_performance.min_submit_time 3678 = submitted_time; 3679 } 3680 3681 switch ( ccb->csio.cdb_io.cdb_bytes[0] ) { 3682 3683 case 0xa8: /* 12-byte READ */ 3684 /* FALLTHRU */ 3685 case 0x08: /* 6-byte READ */ 3686 /* FALLTHRU */ 3687 case 0x28: /* 10-byte READ */ 3688 op_type = READ_OP; 3689 break; 3690 3691 case 0x0a: /* 6-byte WRITE */ 3692 /* FALLTHRU */ 3693 case 0xaa: /* 12-byte WRITE */ 3694 /* FALLTHRU */ 3695 case 0x2a: /* 10-byte WRITE */ 3696 op_type = WRITE_OP; 3697 break; 3698 3699 default: 3700 op_type = 0; 3701 break; 3702 } 3703 3704 if ( op_type != 0 ) { 3705 struct scsi_rw_big * cmd; 3706 3707 cmd = (struct scsi_rw_big *) 3708 &(ccb->csio.cdb_io); 3709 3710 size = (((u_int32_t) cmd->length2 << 8) 3711 | ((u_int32_t) cmd->length1)) << 9; 3712 3713 switch ( size ) { 3714 3715 case 512: 3716 asr_IObySize(sc, 3717 submitted_time, op_type, 3718 SIZE_512); 3719 break; 3720 3721 case 1024: 3722 asr_IObySize(sc, 3723 submitted_time, op_type, 3724 SIZE_1K); 3725 break; 3726 3727 case 2048: 3728 asr_IObySize(sc, 3729 submitted_time, op_type, 3730 SIZE_2K); 3731 break; 3732 3733 case 4096: 3734 asr_IObySize(sc, 3735 submitted_time, op_type, 3736 SIZE_4K); 3737 break; 3738 3739 case 8192: 3740 asr_IObySize(sc, 3741 submitted_time, op_type, 3742 SIZE_8K); 3743 break; 3744 3745 case 16384: 3746 asr_IObySize(sc, 3747 submitted_time, op_type, 3748 SIZE_16K); 3749 break; 3750 3751 case 32768: 3752 asr_IObySize(sc, 3753 submitted_time, op_type, 3754 SIZE_32K); 3755 break; 3756 3757 case 65536: 3758 asr_IObySize(sc, 3759 submitted_time, op_type, 3760 SIZE_64K); 3761 break; 3762 3763 default: 3764 if ( size > (1 << 16) ) { 3765 asr_IObySize(sc, 3766 submitted_time, 3767 op_type, 3768 SIZE_BIGGER); 3769 } else { 3770 asr_IObySize(sc, 3771 submitted_time, 3772 op_type, 3773 SIZE_OTHER); 3774 } 3775 break; 3776 } 3777 } 3778 } 3779 } 3780 #endif 3781 /* Sense data in reply packet */ 3782 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) { 3783 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply); 3784 3785 if (size) { 3786 if (size > sizeof(ccb->csio.sense_data)) { 3787 size = sizeof(ccb->csio.sense_data); 3788 } 3789 if (size > I2O_SCSI_SENSE_DATA_SZ) { 3790 size = I2O_SCSI_SENSE_DATA_SZ; 3791 } 3792 if ((ccb->csio.sense_len) 3793 && (size > ccb->csio.sense_len)) { 3794 size = ccb->csio.sense_len; 3795 } 3796 bcopy ((caddr_t)Reply->SenseData, 3797 (caddr_t)&(ccb->csio.sense_data), size); 3798 } 3799 } 3800 3801 /* 3802 * Return Reply so that it can be used for the next command 3803 * since we have no more need for it now 3804 */ 3805 sc->ha_Virt->FromFIFO = ReplyOffset; 3806 3807 if (ccb->ccb_h.path) { 3808 xpt_done ((union ccb *)ccb); 3809 } else { 3810 wakeup ((caddr_t)ccb); 3811 } 3812 } 3813 #ifdef ASR_MEASURE_PERFORMANCE 3814 { 3815 u_int32_t result; 3816 3817 microtime(&junk); 3818 result = asr_time_delta(sc->ha_performance.intr_started, junk); 3819 3820 if (result != 0xffffffff) { 3821 if ( sc->ha_performance.max_intr_time < result ) { 3822 sc->ha_performance.max_intr_time = result; 3823 } 3824 3825 if ( (sc->ha_performance.min_intr_time == 0) 3826 || (sc->ha_performance.min_intr_time > result) ) { 3827 sc->ha_performance.min_intr_time = result; 3828 } 3829 } 3830 } 3831 #endif 3832 return (processed); 3833 } /* asr_intr */ 3834 3835 #undef QueueSize /* Grrrr */ 3836 #undef SG_Size /* Grrrr */ 3837 3838 /* 3839 * Meant to be included at the bottom of asr.c !!! 3840 */ 3841 3842 /* 3843 * Included here as hard coded. Done because other necessary include 3844 * files utilize C++ comment structures which make them a nuisance to 3845 * included here just to pick up these three typedefs. 3846 */ 3847 typedef U32 DPT_TAG_T; 3848 typedef U32 DPT_MSG_T; 3849 typedef U32 DPT_RTN_T; 3850 3851 #undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */ 3852 #include "osd_unix.h" 3853 3854 #define asr_unit(dev) minor(dev) 3855 3856 STATIC INLINE Asr_softc_t * 3857 ASR_get_sc ( 3858 IN dev_t dev) 3859 { 3860 int unit = asr_unit(dev); 3861 OUT Asr_softc_t * sc = Asr_softc; 3862 3863 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) { 3864 sc = sc->ha_next; 3865 } 3866 return (sc); 3867 } /* ASR_get_sc */ 3868 3869 STATIC u_int8_t ASR_ctlr_held; 3870 #if (!defined(UNREFERENCED_PARAMETER)) 3871 # define UNREFERENCED_PARAMETER(x) (void)(x) 3872 #endif 3873 3874 STATIC int 3875 asr_open( 3876 IN dev_t dev, 3877 int32_t flags, 3878 int32_t ifmt, 3879 IN d_thread_t *td) 3880 { 3881 int s; 3882 OUT int error; 3883 UNREFERENCED_PARAMETER(flags); 3884 UNREFERENCED_PARAMETER(ifmt); 3885 3886 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) { 3887 return (ENODEV); 3888 } 3889 KKASSERT(td->td_proc); 3890 s = splcam (); 3891 if (ASR_ctlr_held) { 3892 error = EBUSY; 3893 } else if ((error = suser_cred(td->td_proc->p_ucred, 0)) == 0) { 3894 ++ASR_ctlr_held; 3895 } 3896 splx(s); 3897 return (error); 3898 } /* asr_open */ 3899 3900 STATIC int 3901 asr_close( 3902 dev_t dev, 3903 int flags, 3904 int ifmt, 3905 d_thread_t *td) 3906 { 3907 UNREFERENCED_PARAMETER(dev); 3908 UNREFERENCED_PARAMETER(flags); 3909 UNREFERENCED_PARAMETER(ifmt); 3910 UNREFERENCED_PARAMETER(td); 3911 3912 ASR_ctlr_held = 0; 3913 return (0); 3914 } /* asr_close */ 3915 3916 3917 /*-------------------------------------------------------------------------*/ 3918 /* Function ASR_queue_i */ 3919 /*-------------------------------------------------------------------------*/ 3920 /* The Parameters Passed To This Function Are : */ 3921 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 3922 /* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */ 3923 /* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */ 3924 /* */ 3925 /* This Function Will Take The User Request Packet And Convert It To An */ 3926 /* I2O MSG And Send It Off To The Adapter. */ 3927 /* */ 3928 /* Return : 0 For OK, Error Code Otherwise */ 3929 /*-------------------------------------------------------------------------*/ 3930 STATIC INLINE int 3931 ASR_queue_i( 3932 IN Asr_softc_t * sc, 3933 INOUT PI2O_MESSAGE_FRAME Packet) 3934 { 3935 union asr_ccb * ccb; 3936 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3937 PI2O_MESSAGE_FRAME Message_Ptr; 3938 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr; 3939 int MessageSizeInBytes; 3940 int ReplySizeInBytes; 3941 int error; 3942 int s; 3943 /* Scatter Gather buffer list */ 3944 struct ioctlSgList_S { 3945 SLIST_ENTRY(ioctlSgList_S) link; 3946 caddr_t UserSpace; 3947 I2O_FLAGS_COUNT FlagsCount; 3948 char KernelSpace[sizeof(long)]; 3949 } * elm; 3950 /* Generates a `first' entry */ 3951 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList; 3952 3953 if (ASR_getBlinkLedCode(sc)) { 3954 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n", 3955 ASR_getBlinkLedCode(sc)); 3956 return (EIO); 3957 } 3958 /* Copy in the message into a local allocation */ 3959 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc ( 3960 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) 3961 == (PI2O_MESSAGE_FRAME)NULL) { 3962 debug_usr_cmd_printf ( 3963 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3964 return (ENOMEM); 3965 } 3966 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3967 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3968 free (Message_Ptr, M_TEMP); 3969 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error); 3970 return (error); 3971 } 3972 /* Acquire information to determine type of packet */ 3973 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2); 3974 /* The offset of the reply information within the user packet */ 3975 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet 3976 + MessageSizeInBytes); 3977 3978 /* Check if the message is a synchronous initialization command */ 3979 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr); 3980 free (Message_Ptr, M_TEMP); 3981 switch (s) { 3982 3983 case I2O_EXEC_IOP_RESET: 3984 { U32 status; 3985 3986 status = ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt); 3987 ReplySizeInBytes = sizeof(status); 3988 debug_usr_cmd_printf ("resetIOP done\n"); 3989 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3990 ReplySizeInBytes)); 3991 } 3992 3993 case I2O_EXEC_STATUS_GET: 3994 { I2O_EXEC_STATUS_GET_REPLY status; 3995 3996 if (ASR_getStatus (sc->ha_Virt, sc->ha_Fvirt, &status) 3997 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) { 3998 debug_usr_cmd_printf ("getStatus failed\n"); 3999 return (ENXIO); 4000 } 4001 ReplySizeInBytes = sizeof(status); 4002 debug_usr_cmd_printf ("getStatus done\n"); 4003 return (copyout ((caddr_t)&status, (caddr_t)Reply, 4004 ReplySizeInBytes)); 4005 } 4006 4007 case I2O_EXEC_OUTBOUND_INIT: 4008 { U32 status; 4009 4010 status = ASR_initOutBound(sc); 4011 ReplySizeInBytes = sizeof(status); 4012 debug_usr_cmd_printf ("intOutBound done\n"); 4013 return (copyout ((caddr_t)&status, (caddr_t)Reply, 4014 ReplySizeInBytes)); 4015 } 4016 } 4017 4018 /* Determine if the message size is valid */ 4019 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME)) 4020 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) { 4021 debug_usr_cmd_printf ("Packet size %d incorrect\n", 4022 MessageSizeInBytes); 4023 return (EINVAL); 4024 } 4025 4026 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes, 4027 M_TEMP, M_WAITOK)) == (PI2O_MESSAGE_FRAME)NULL) { 4028 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 4029 MessageSizeInBytes); 4030 return (ENOMEM); 4031 } 4032 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 4033 MessageSizeInBytes)) != 0) { 4034 free (Message_Ptr, M_TEMP); 4035 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n", 4036 MessageSizeInBytes, error); 4037 return (error); 4038 } 4039 4040 /* Check the size of the reply frame, and start constructing */ 4041 4042 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 4043 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) 4044 == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 4045 free (Message_Ptr, M_TEMP); 4046 debug_usr_cmd_printf ( 4047 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 4048 return (ENOMEM); 4049 } 4050 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr, 4051 sizeof(I2O_MESSAGE_FRAME))) != 0) { 4052 free (Reply_Ptr, M_TEMP); 4053 free (Message_Ptr, M_TEMP); 4054 debug_usr_cmd_printf ( 4055 "Failed to copy in reply frame, errno=%d\n", 4056 error); 4057 return (error); 4058 } 4059 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize( 4060 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2); 4061 free (Reply_Ptr, M_TEMP); 4062 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) { 4063 free (Message_Ptr, M_TEMP); 4064 debug_usr_cmd_printf ( 4065 "Failed to copy in reply frame[%d], errno=%d\n", 4066 ReplySizeInBytes, error); 4067 return (EINVAL); 4068 } 4069 4070 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 4071 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)) 4072 ? ReplySizeInBytes 4073 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)), 4074 M_TEMP, M_WAITOK)) == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 4075 free (Message_Ptr, M_TEMP); 4076 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 4077 ReplySizeInBytes); 4078 return (ENOMEM); 4079 } 4080 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes); 4081 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext 4082 = Message_Ptr->InitiatorContext; 4083 Reply_Ptr->StdReplyFrame.TransactionContext 4084 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext; 4085 I2O_MESSAGE_FRAME_setMsgFlags( 4086 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 4087 I2O_MESSAGE_FRAME_getMsgFlags( 4088 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) 4089 | I2O_MESSAGE_FLAGS_REPLY); 4090 4091 /* Check if the message is a special case command */ 4092 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) { 4093 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */ 4094 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset( 4095 Message_Ptr) & 0xF0) >> 2)) { 4096 free (Message_Ptr, M_TEMP); 4097 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 4098 &(Reply_Ptr->StdReplyFrame), 4099 (ASR_setSysTab(sc) != CAM_REQ_CMP)); 4100 I2O_MESSAGE_FRAME_setMessageSize( 4101 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 4102 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)); 4103 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 4104 ReplySizeInBytes); 4105 free (Reply_Ptr, M_TEMP); 4106 return (error); 4107 } 4108 } 4109 4110 /* Deal in the general case */ 4111 /* First allocate and optionally copy in each scatter gather element */ 4112 SLIST_INIT(&sgList); 4113 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) { 4114 PI2O_SGE_SIMPLE_ELEMENT sg; 4115 4116 /* 4117 * since this code is reused in several systems, code 4118 * efficiency is greater by using a shift operation rather 4119 * than a divide by sizeof(u_int32_t). 4120 */ 4121 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 4122 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) 4123 >> 2)); 4124 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr) 4125 + MessageSizeInBytes)) { 4126 caddr_t v; 4127 int len; 4128 4129 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 4130 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) { 4131 error = EINVAL; 4132 break; 4133 } 4134 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount)); 4135 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n", 4136 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 4137 + ((I2O_MESSAGE_FRAME_getVersionOffset( 4138 Message_Ptr) & 0xF0) >> 2)), 4139 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len); 4140 4141 if ((elm = (struct ioctlSgList_S *)malloc ( 4142 sizeof(*elm) - sizeof(elm->KernelSpace) + len, 4143 M_TEMP, M_WAITOK)) 4144 == (struct ioctlSgList_S *)NULL) { 4145 debug_usr_cmd_printf ( 4146 "Failed to allocate SG[%d]\n", len); 4147 error = ENOMEM; 4148 break; 4149 } 4150 SLIST_INSERT_HEAD(&sgList, elm, link); 4151 elm->FlagsCount = sg->FlagsCount; 4152 elm->UserSpace = (caddr_t) 4153 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg)); 4154 v = elm->KernelSpace; 4155 /* Copy in outgoing data (DIR bit could be invalid) */ 4156 if ((error = copyin (elm->UserSpace, (caddr_t)v, len)) 4157 != 0) { 4158 break; 4159 } 4160 /* 4161 * If the buffer is not contiguous, lets 4162 * break up the scatter/gather entries. 4163 */ 4164 while ((len > 0) 4165 && (sg < (PI2O_SGE_SIMPLE_ELEMENT) 4166 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) { 4167 int next, base, span; 4168 4169 span = 0; 4170 next = base = KVTOPHYS(v); 4171 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, 4172 base); 4173 4174 /* How far can we go physically contiguously */ 4175 while ((len > 0) && (base == next)) { 4176 int size; 4177 4178 next = trunc_page(base) + PAGE_SIZE; 4179 size = next - base; 4180 if (size > len) { 4181 size = len; 4182 } 4183 span += size; 4184 v += size; 4185 len -= size; 4186 base = KVTOPHYS(v); 4187 } 4188 4189 /* Construct the Flags */ 4190 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), 4191 span); 4192 { 4193 int flags = I2O_FLAGS_COUNT_getFlags( 4194 &(elm->FlagsCount)); 4195 /* Any remaining length? */ 4196 if (len > 0) { 4197 flags &= 4198 ~(I2O_SGL_FLAGS_END_OF_BUFFER 4199 | I2O_SGL_FLAGS_LAST_ELEMENT); 4200 } 4201 I2O_FLAGS_COUNT_setFlags( 4202 &(sg->FlagsCount), flags); 4203 } 4204 4205 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n", 4206 sg - (PI2O_SGE_SIMPLE_ELEMENT) 4207 ((char *)Message_Ptr 4208 + ((I2O_MESSAGE_FRAME_getVersionOffset( 4209 Message_Ptr) & 0xF0) >> 2)), 4210 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), 4211 span); 4212 if (len <= 0) { 4213 break; 4214 } 4215 4216 /* 4217 * Incrementing requires resizing of the 4218 * packet, and moving up the existing SG 4219 * elements. 4220 */ 4221 ++sg; 4222 MessageSizeInBytes += sizeof(*sg); 4223 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 4224 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr) 4225 + (sizeof(*sg) / sizeof(U32))); 4226 { 4227 PI2O_MESSAGE_FRAME NewMessage_Ptr; 4228 4229 if ((NewMessage_Ptr 4230 = (PI2O_MESSAGE_FRAME) 4231 malloc (MessageSizeInBytes, 4232 M_TEMP, M_WAITOK)) 4233 == (PI2O_MESSAGE_FRAME)NULL) { 4234 debug_usr_cmd_printf ( 4235 "Failed to acquire frame[%d] memory\n", 4236 MessageSizeInBytes); 4237 error = ENOMEM; 4238 break; 4239 } 4240 span = ((caddr_t)sg) 4241 - (caddr_t)Message_Ptr; 4242 bcopy ((caddr_t)Message_Ptr, 4243 (caddr_t)NewMessage_Ptr, span); 4244 bcopy ((caddr_t)(sg-1), 4245 ((caddr_t)NewMessage_Ptr) + span, 4246 MessageSizeInBytes - span); 4247 free (Message_Ptr, M_TEMP); 4248 sg = (PI2O_SGE_SIMPLE_ELEMENT) 4249 (((caddr_t)NewMessage_Ptr) + span); 4250 Message_Ptr = NewMessage_Ptr; 4251 } 4252 } 4253 if ((error) 4254 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 4255 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) { 4256 break; 4257 } 4258 ++sg; 4259 } 4260 if (error) { 4261 while ((elm = SLIST_FIRST(&sgList)) 4262 != (struct ioctlSgList_S *)NULL) { 4263 SLIST_REMOVE_HEAD(&sgList, link); 4264 free (elm, M_TEMP); 4265 } 4266 free (Reply_Ptr, M_TEMP); 4267 free (Message_Ptr, M_TEMP); 4268 return (error); 4269 } 4270 } 4271 4272 debug_usr_cmd_printf ("Inbound: "); 4273 debug_usr_cmd_dump_message(Message_Ptr); 4274 4275 /* Send the command */ 4276 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 4277 /* Free up in-kernel buffers */ 4278 while ((elm = SLIST_FIRST(&sgList)) 4279 != (struct ioctlSgList_S *)NULL) { 4280 SLIST_REMOVE_HEAD(&sgList, link); 4281 free (elm, M_TEMP); 4282 } 4283 free (Reply_Ptr, M_TEMP); 4284 free (Message_Ptr, M_TEMP); 4285 return (ENOMEM); 4286 } 4287 4288 /* 4289 * We do not need any (optional byteswapping) method access to 4290 * the Initiator context field. 4291 */ 4292 I2O_MESSAGE_FRAME_setInitiatorContext64( 4293 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb); 4294 4295 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 4296 4297 free (Message_Ptr, M_TEMP); 4298 4299 /* 4300 * Wait for the board to report a finished instruction. 4301 */ 4302 s = splcam(); 4303 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 4304 if (ASR_getBlinkLedCode(sc)) { 4305 /* Reset Adapter */ 4306 printf ("asr%d: Blink LED 0x%x resetting adapter\n", 4307 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 4308 ASR_getBlinkLedCode(sc)); 4309 if (ASR_reset (sc) == ENXIO) { 4310 /* Command Cleanup */ 4311 ASR_ccbRemove(sc, ccb); 4312 } 4313 splx(s); 4314 /* Free up in-kernel buffers */ 4315 while ((elm = SLIST_FIRST(&sgList)) 4316 != (struct ioctlSgList_S *)NULL) { 4317 SLIST_REMOVE_HEAD(&sgList, link); 4318 free (elm, M_TEMP); 4319 } 4320 free (Reply_Ptr, M_TEMP); 4321 asr_free_ccb(ccb); 4322 return (EIO); 4323 } 4324 /* Check every second for BlinkLed */ 4325 tsleep((caddr_t)ccb, 0, "asr", hz); 4326 } 4327 splx(s); 4328 4329 debug_usr_cmd_printf ("Outbound: "); 4330 debug_usr_cmd_dump_message(Reply_Ptr); 4331 4332 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 4333 &(Reply_Ptr->StdReplyFrame), 4334 (ccb->ccb_h.status != CAM_REQ_CMP)); 4335 4336 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 4337 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) { 4338 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr, 4339 ccb->csio.dxfer_len - ccb->csio.resid); 4340 } 4341 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes 4342 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 4343 - I2O_SCSI_SENSE_DATA_SZ))) { 4344 int size = ReplySizeInBytes 4345 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 4346 - I2O_SCSI_SENSE_DATA_SZ; 4347 4348 if (size > sizeof(ccb->csio.sense_data)) { 4349 size = sizeof(ccb->csio.sense_data); 4350 } 4351 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData, 4352 size); 4353 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount( 4354 Reply_Ptr, size); 4355 } 4356 4357 /* Free up in-kernel buffers */ 4358 while ((elm = SLIST_FIRST(&sgList)) != (struct ioctlSgList_S *)NULL) { 4359 /* Copy out as necessary */ 4360 if ((error == 0) 4361 /* DIR bit considered `valid', error due to ignorance works */ 4362 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount)) 4363 & I2O_SGL_FLAGS_DIR) == 0)) { 4364 error = copyout ((caddr_t)(elm->KernelSpace), 4365 elm->UserSpace, 4366 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount))); 4367 } 4368 SLIST_REMOVE_HEAD(&sgList, link); 4369 free (elm, M_TEMP); 4370 } 4371 if (error == 0) { 4372 /* Copy reply frame to user space */ 4373 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 4374 ReplySizeInBytes); 4375 } 4376 free (Reply_Ptr, M_TEMP); 4377 asr_free_ccb(ccb); 4378 4379 return (error); 4380 } /* ASR_queue_i */ 4381 4382 /*----------------------------------------------------------------------*/ 4383 /* Function asr_ioctl */ 4384 /*----------------------------------------------------------------------*/ 4385 /* The parameters passed to this function are : */ 4386 /* dev : Device number. */ 4387 /* cmd : Ioctl Command */ 4388 /* data : User Argument Passed In. */ 4389 /* flag : Mode Parameter */ 4390 /* proc : Process Parameter */ 4391 /* */ 4392 /* This function is the user interface into this adapter driver */ 4393 /* */ 4394 /* Return : zero if OK, error code if not */ 4395 /*----------------------------------------------------------------------*/ 4396 4397 STATIC int 4398 asr_ioctl( 4399 IN dev_t dev, 4400 IN u_long cmd, 4401 INOUT caddr_t data, 4402 int flag, 4403 struct thread *td) 4404 { 4405 int i, j; 4406 OUT int error = 0; 4407 Asr_softc_t * sc = ASR_get_sc (dev); 4408 UNREFERENCED_PARAMETER(flag); 4409 UNREFERENCED_PARAMETER(td); 4410 4411 if (sc != (Asr_softc_t *)NULL) 4412 switch(cmd) { 4413 4414 case DPT_SIGNATURE: 4415 # if (dsDescription_size != 50) 4416 case DPT_SIGNATURE + ((50 - dsDescription_size) << 16): 4417 # endif 4418 if (cmd & 0xFFFF0000) { 4419 (void)bcopy ((caddr_t)(&ASR_sig), data, 4420 sizeof(dpt_sig_S)); 4421 return (0); 4422 } 4423 /* Traditional version of the ioctl interface */ 4424 case DPT_SIGNATURE & 0x0000FFFF: 4425 return (copyout ((caddr_t)(&ASR_sig), *((caddr_t *)data), 4426 sizeof(dpt_sig_S))); 4427 4428 /* Traditional version of the ioctl interface */ 4429 case DPT_CTRLINFO & 0x0000FFFF: 4430 case DPT_CTRLINFO: { 4431 struct { 4432 u_int16_t length; 4433 u_int16_t drvrHBAnum; 4434 u_int32_t baseAddr; 4435 u_int16_t blinkState; 4436 u_int8_t pciBusNum; 4437 u_int8_t pciDeviceNum; 4438 u_int16_t hbaFlags; 4439 u_int16_t Interrupt; 4440 u_int32_t reserved1; 4441 u_int32_t reserved2; 4442 u_int32_t reserved3; 4443 } CtlrInfo; 4444 4445 bzero (&CtlrInfo, sizeof(CtlrInfo)); 4446 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t); 4447 CtlrInfo.drvrHBAnum = asr_unit(dev); 4448 CtlrInfo.baseAddr = (u_long)sc->ha_Base; 4449 i = ASR_getBlinkLedCode (sc); 4450 if (i == -1) { 4451 i = 0; 4452 } 4453 CtlrInfo.blinkState = i; 4454 CtlrInfo.pciBusNum = sc->ha_pciBusNum; 4455 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum; 4456 #define FLG_OSD_PCI_VALID 0x0001 4457 #define FLG_OSD_DMA 0x0002 4458 #define FLG_OSD_I2O 0x0004 4459 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O; 4460 CtlrInfo.Interrupt = sc->ha_irq; 4461 if (cmd & 0xFFFF0000) { 4462 bcopy (&CtlrInfo, data, sizeof(CtlrInfo)); 4463 } else { 4464 error = copyout (&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo)); 4465 } 4466 } return (error); 4467 4468 /* Traditional version of the ioctl interface */ 4469 case DPT_SYSINFO & 0x0000FFFF: 4470 case DPT_SYSINFO: { 4471 sysInfo_S Info; 4472 char * cp; 4473 /* Kernel Specific ptok `hack' */ 4474 # define ptok(a) ((char *)(a) + KERNBASE) 4475 4476 bzero (&Info, sizeof(Info)); 4477 4478 /* Appears I am the only person in the Kernel doing this */ 4479 outb (0x70, 0x12); 4480 i = inb(0x71); 4481 j = i >> 4; 4482 if (i == 0x0f) { 4483 outb (0x70, 0x19); 4484 j = inb (0x71); 4485 } 4486 Info.drive0CMOS = j; 4487 4488 j = i & 0x0f; 4489 if (i == 0x0f) { 4490 outb (0x70, 0x1a); 4491 j = inb (0x71); 4492 } 4493 Info.drive1CMOS = j; 4494 4495 Info.numDrives = *((char *)ptok(0x475)); 4496 4497 Info.processorFamily = ASR_sig.dsProcessorFamily; 4498 switch (cpu) { 4499 case CPU_386SX: case CPU_386: 4500 Info.processorType = PROC_386; break; 4501 case CPU_486SX: case CPU_486: 4502 Info.processorType = PROC_486; break; 4503 case CPU_586: 4504 Info.processorType = PROC_PENTIUM; break; 4505 case CPU_686: 4506 Info.processorType = PROC_SEXIUM; break; 4507 } 4508 Info.osType = OS_BSDI_UNIX; 4509 Info.osMajorVersion = osrelease[0] - '0'; 4510 Info.osMinorVersion = osrelease[2] - '0'; 4511 /* Info.osRevision = 0; */ 4512 /* Info.osSubRevision = 0; */ 4513 Info.busType = SI_PCI_BUS; 4514 Info.flags = SI_CMOS_Valid | SI_NumDrivesValid 4515 | SI_OSversionValid | SI_BusTypeValid | SI_NO_SmartROM; 4516 4517 /* Go Out And Look For I2O SmartROM */ 4518 for(j = 0xC8000; j < 0xE0000; j += 2048) { 4519 int k; 4520 4521 cp = ptok(j); 4522 if (*((unsigned short *)cp) != 0xAA55) { 4523 continue; 4524 } 4525 j += (cp[2] * 512) - 2048; 4526 if ((*((u_long *)(cp + 6)) 4527 != ('S' + (' ' * 256) + (' ' * 65536L))) 4528 || (*((u_long *)(cp + 10)) 4529 != ('I' + ('2' * 256) + ('0' * 65536L)))) { 4530 continue; 4531 } 4532 cp += 0x24; 4533 for (k = 0; k < 64; ++k) { 4534 if (*((unsigned short *)cp) 4535 == (' ' + ('v' * 256))) { 4536 break; 4537 } 4538 } 4539 if (k < 64) { 4540 Info.smartROMMajorVersion 4541 = *((unsigned char *)(cp += 4)) - '0'; 4542 Info.smartROMMinorVersion 4543 = *((unsigned char *)(cp += 2)); 4544 Info.smartROMRevision 4545 = *((unsigned char *)(++cp)); 4546 Info.flags |= SI_SmartROMverValid; 4547 Info.flags &= ~SI_NO_SmartROM; 4548 break; 4549 } 4550 } 4551 /* Get The Conventional Memory Size From CMOS */ 4552 outb (0x70, 0x16); 4553 j = inb (0x71); 4554 j <<= 8; 4555 outb (0x70, 0x15); 4556 j |= inb(0x71); 4557 Info.conventionalMemSize = j; 4558 4559 /* Get The Extended Memory Found At Power On From CMOS */ 4560 outb (0x70, 0x31); 4561 j = inb (0x71); 4562 j <<= 8; 4563 outb (0x70, 0x30); 4564 j |= inb(0x71); 4565 Info.extendedMemSize = j; 4566 Info.flags |= SI_MemorySizeValid; 4567 4568 # if (defined(THIS_IS_BROKEN)) 4569 /* If There Is 1 or 2 Drives Found, Set Up Drive Parameters */ 4570 if (Info.numDrives > 0) { 4571 /* 4572 * Get The Pointer From Int 41 For The First 4573 * Drive Parameters 4574 */ 4575 j = ((unsigned)(*((unsigned short *)ptok(0x104+2))) << 4) 4576 + (unsigned)(*((unsigned short *)ptok(0x104+0))); 4577 /* 4578 * It appears that SmartROM's Int41/Int46 pointers 4579 * use memory that gets stepped on by the kernel 4580 * loading. We no longer have access to this 4581 * geometry information but try anyways (!?) 4582 */ 4583 Info.drives[0].cylinders = *((unsigned char *)ptok(j)); 4584 ++j; 4585 Info.drives[0].cylinders += ((int)*((unsigned char *) 4586 ptok(j))) << 8; 4587 ++j; 4588 Info.drives[0].heads = *((unsigned char *)ptok(j)); 4589 j += 12; 4590 Info.drives[0].sectors = *((unsigned char *)ptok(j)); 4591 Info.flags |= SI_DriveParamsValid; 4592 if ((Info.drives[0].cylinders == 0) 4593 || (Info.drives[0].heads == 0) 4594 || (Info.drives[0].sectors == 0)) { 4595 Info.flags &= ~SI_DriveParamsValid; 4596 } 4597 if (Info.numDrives > 1) { 4598 /* 4599 * Get The Pointer From Int 46 For The 4600 * Second Drive Parameters 4601 */ 4602 j = ((unsigned)(*((unsigned short *)ptok(0x118+2))) << 4) 4603 + (unsigned)(*((unsigned short *)ptok(0x118+0))); 4604 Info.drives[1].cylinders = *((unsigned char *) 4605 ptok(j)); 4606 ++j; 4607 Info.drives[1].cylinders += ((int) 4608 *((unsigned char *)ptok(j))) << 8; 4609 ++j; 4610 Info.drives[1].heads = *((unsigned char *) 4611 ptok(j)); 4612 j += 12; 4613 Info.drives[1].sectors = *((unsigned char *) 4614 ptok(j)); 4615 if ((Info.drives[1].cylinders == 0) 4616 || (Info.drives[1].heads == 0) 4617 || (Info.drives[1].sectors == 0)) { 4618 Info.flags &= ~SI_DriveParamsValid; 4619 } 4620 } 4621 } 4622 # endif 4623 /* Copy Out The Info Structure To The User */ 4624 if (cmd & 0xFFFF0000) { 4625 bcopy (&Info, data, sizeof(Info)); 4626 } else { 4627 error = copyout (&Info, *(caddr_t *)data, sizeof(Info)); 4628 } 4629 return (error); } 4630 4631 /* Get The BlinkLED State */ 4632 case DPT_BLINKLED: 4633 i = ASR_getBlinkLedCode (sc); 4634 if (i == -1) { 4635 i = 0; 4636 } 4637 if (cmd & 0xFFFF0000) { 4638 bcopy ((caddr_t)(&i), data, sizeof(i)); 4639 } else { 4640 error = copyout (&i, *(caddr_t *)data, sizeof(i)); 4641 } 4642 break; 4643 4644 /* Get performance metrics */ 4645 #ifdef ASR_MEASURE_PERFORMANCE 4646 case DPT_PERF_INFO: 4647 bcopy((caddr_t) &(sc->ha_performance), data, 4648 sizeof(sc->ha_performance)); 4649 return (0); 4650 #endif 4651 4652 /* Send an I2O command */ 4653 case I2OUSRCMD: 4654 return (ASR_queue_i (sc, *((PI2O_MESSAGE_FRAME *)data))); 4655 4656 /* Reset and re-initialize the adapter */ 4657 case I2ORESETCMD: 4658 return (ASR_reset (sc)); 4659 4660 /* Rescan the LCT table and resynchronize the information */ 4661 case I2ORESCANCMD: 4662 return (ASR_rescan (sc)); 4663 } 4664 return (EINVAL); 4665 } /* asr_ioctl */ 4666 4667 #ifdef ASR_MEASURE_PERFORMANCE 4668 /* 4669 * This function subtracts one timeval structure from another, 4670 * Returning the result in usec. 4671 * It assumes that less than 4 billion usecs passed form start to end. 4672 * If times are sensless, 0xffffffff is returned. 4673 */ 4674 4675 STATIC u_int32_t 4676 asr_time_delta( 4677 IN struct timeval start, 4678 IN struct timeval end) 4679 { 4680 OUT u_int32_t result; 4681 4682 if (start.tv_sec > end.tv_sec) { 4683 result = 0xffffffff; 4684 } 4685 else { 4686 if (start.tv_sec == end.tv_sec) { 4687 if (start.tv_usec > end.tv_usec) { 4688 result = 0xffffffff; 4689 } else { 4690 return (end.tv_usec - start.tv_usec); 4691 } 4692 } else { 4693 return (end.tv_sec - start.tv_sec) * 1000000 + 4694 end.tv_usec + (1000000 - start.tv_usec); 4695 } 4696 } 4697 return(result); 4698 } /* asr_time_delta */ 4699 #endif 4700