1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 #include <sys/scsi/adapters/pmcs/pmcs.h> 26 27 #define PMCS_DRIVER_VERSION "pmcs HBA device driver" 28 29 static char *pmcs_driver_rev = PMCS_DRIVER_VERSION; 30 31 /* 32 * Non-DDI Compliant stuff 33 */ 34 extern char hw_serial[]; 35 36 /* 37 * Global driver data 38 */ 39 void *pmcs_softc_state = NULL; 40 void *pmcs_iport_softstate = NULL; 41 42 /* 43 * Tracing and Logging info 44 */ 45 pmcs_tbuf_t *pmcs_tbuf = NULL; 46 uint32_t pmcs_tbuf_num_elems = 0; 47 pmcs_tbuf_t *pmcs_tbuf_ptr; 48 uint32_t pmcs_tbuf_idx = 0; 49 boolean_t pmcs_tbuf_wrap = B_FALSE; 50 static kmutex_t pmcs_trace_lock; 51 52 /* 53 * If pmcs_force_syslog value is non-zero, all messages put in the trace log 54 * will also be sent to system log. 55 */ 56 int pmcs_force_syslog = 0; 57 int pmcs_console = 0; 58 59 /* 60 * External References 61 */ 62 extern int ncpus_online; 63 64 /* 65 * Local static data 66 */ 67 static int fwlog_level = 3; 68 static int physpeed = PHY_LINK_ALL; 69 static int phymode = PHY_LM_AUTO; 70 static int block_mask = 0; 71 static int phymap_usec = 3 * MICROSEC; 72 static int iportmap_usec = 2 * MICROSEC; 73 74 #ifdef DEBUG 75 static int debug_mask = 1; 76 #else 77 static int debug_mask = 0; 78 #endif 79 80 #ifdef DISABLE_MSIX 81 static int disable_msix = 1; 82 #else 83 static int disable_msix = 0; 84 #endif 85 86 #ifdef DISABLE_MSI 87 static int disable_msi = 1; 88 #else 89 static int disable_msi = 0; 90 #endif 91 92 static uint16_t maxqdepth = 0xfffe; 93 94 /* 95 * Local prototypes 96 */ 97 static int pmcs_attach(dev_info_t *, ddi_attach_cmd_t); 98 static int pmcs_detach(dev_info_t *, ddi_detach_cmd_t); 99 static int pmcs_unattach(pmcs_hw_t *); 100 static int pmcs_iport_unattach(pmcs_iport_t *); 101 static int pmcs_add_more_chunks(pmcs_hw_t *, unsigned long); 102 static void pmcs_watchdog(void *); 103 static int pmcs_setup_intr(pmcs_hw_t *); 104 static int pmcs_teardown_intr(pmcs_hw_t *); 105 106 static uint_t pmcs_nonio_ix(caddr_t, caddr_t); 107 static uint_t pmcs_general_ix(caddr_t, caddr_t); 108 static uint_t pmcs_event_ix(caddr_t, caddr_t); 109 static uint_t pmcs_iodone_ix(caddr_t, caddr_t); 110 static uint_t pmcs_fatal_ix(caddr_t, caddr_t); 111 static uint_t pmcs_all_intr(caddr_t, caddr_t); 112 static int pmcs_quiesce(dev_info_t *dip); 113 static boolean_t pmcs_fabricate_wwid(pmcs_hw_t *); 114 115 static void pmcs_create_phy_stats(pmcs_iport_t *); 116 int pmcs_update_phy_stats(kstat_t *, int); 117 static void pmcs_destroy_phy_stats(pmcs_iport_t *); 118 119 static void pmcs_fm_fini(pmcs_hw_t *pwp); 120 static void pmcs_fm_init(pmcs_hw_t *pwp); 121 static int pmcs_fm_error_cb(dev_info_t *dip, 122 ddi_fm_error_t *err, const void *impl_data); 123 124 /* 125 * Local configuration data 126 */ 127 static struct dev_ops pmcs_ops = { 128 DEVO_REV, /* devo_rev, */ 129 0, /* refcnt */ 130 ddi_no_info, /* info */ 131 nulldev, /* identify */ 132 nulldev, /* probe */ 133 pmcs_attach, /* attach */ 134 pmcs_detach, /* detach */ 135 nodev, /* reset */ 136 NULL, /* driver operations */ 137 NULL, /* bus operations */ 138 ddi_power, /* power management */ 139 pmcs_quiesce /* quiesce */ 140 }; 141 142 static struct modldrv modldrv = { 143 &mod_driverops, 144 PMCS_DRIVER_VERSION, 145 &pmcs_ops, /* driver ops */ 146 }; 147 static struct modlinkage modlinkage = { 148 MODREV_1, &modldrv, NULL 149 }; 150 151 const ddi_dma_attr_t pmcs_dattr = { 152 DMA_ATTR_V0, /* dma_attr version */ 153 0x0000000000000000ull, /* dma_attr_addr_lo */ 154 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 155 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 156 0x0000000000000001ull, /* dma_attr_align */ 157 0x00000078, /* dma_attr_burstsizes */ 158 0x00000001, /* dma_attr_minxfer */ 159 0x00000000FFFFFFFFull, /* dma_attr_maxxfer */ 160 0x00000000FFFFFFFFull, /* dma_attr_seg */ 161 1, /* dma_attr_sgllen */ 162 512, /* dma_attr_granular */ 163 0 /* dma_attr_flags */ 164 }; 165 166 static ddi_device_acc_attr_t rattr = { 167 DDI_DEVICE_ATTR_V0, 168 DDI_STRUCTURE_LE_ACC, 169 DDI_STRICTORDER_ACC, 170 DDI_DEFAULT_ACC 171 }; 172 173 174 /* 175 * Attach/Detach functions 176 */ 177 178 int 179 _init(void) 180 { 181 int ret; 182 183 ret = ddi_soft_state_init(&pmcs_softc_state, sizeof (pmcs_hw_t), 1); 184 if (ret != 0) { 185 cmn_err(CE_WARN, "?soft state init failed for pmcs"); 186 return (ret); 187 } 188 189 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 190 cmn_err(CE_WARN, "?scsi_hba_init failed for pmcs"); 191 ddi_soft_state_fini(&pmcs_softc_state); 192 return (ret); 193 } 194 195 /* 196 * Allocate soft state for iports 197 */ 198 ret = ddi_soft_state_init(&pmcs_iport_softstate, 199 sizeof (pmcs_iport_t), 2); 200 if (ret != 0) { 201 cmn_err(CE_WARN, "?iport soft state init failed for pmcs"); 202 ddi_soft_state_fini(&pmcs_softc_state); 203 return (ret); 204 } 205 206 ret = mod_install(&modlinkage); 207 if (ret != 0) { 208 cmn_err(CE_WARN, "?mod_install failed for pmcs (%d)", ret); 209 scsi_hba_fini(&modlinkage); 210 ddi_soft_state_fini(&pmcs_iport_softstate); 211 ddi_soft_state_fini(&pmcs_softc_state); 212 return (ret); 213 } 214 215 /* Initialize the global trace lock */ 216 mutex_init(&pmcs_trace_lock, NULL, MUTEX_DRIVER, NULL); 217 218 return (0); 219 } 220 221 int 222 _fini(void) 223 { 224 int ret; 225 if ((ret = mod_remove(&modlinkage)) != 0) { 226 return (ret); 227 } 228 scsi_hba_fini(&modlinkage); 229 230 /* Free pmcs log buffer and destroy the global lock */ 231 if (pmcs_tbuf) { 232 kmem_free(pmcs_tbuf, 233 pmcs_tbuf_num_elems * sizeof (pmcs_tbuf_t)); 234 pmcs_tbuf = NULL; 235 } 236 mutex_destroy(&pmcs_trace_lock); 237 238 ddi_soft_state_fini(&pmcs_iport_softstate); 239 ddi_soft_state_fini(&pmcs_softc_state); 240 return (0); 241 } 242 243 int 244 _info(struct modinfo *modinfop) 245 { 246 return (mod_info(&modlinkage, modinfop)); 247 } 248 249 static int 250 pmcs_iport_attach(dev_info_t *dip) 251 { 252 pmcs_iport_t *iport; 253 pmcs_hw_t *pwp; 254 scsi_hba_tran_t *tran; 255 void *ua_priv = NULL; 256 char *iport_ua; 257 char *init_port; 258 int hba_inst; 259 int inst; 260 261 hba_inst = ddi_get_instance(ddi_get_parent(dip)); 262 inst = ddi_get_instance(dip); 263 264 pwp = ddi_get_soft_state(pmcs_softc_state, hba_inst); 265 if (pwp == NULL) { 266 pmcs_prt(pwp, PMCS_PRT_DEBUG, 267 "%s: iport%d attach invoked with NULL parent (HBA) node)", 268 __func__, inst); 269 return (DDI_FAILURE); 270 } 271 272 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) { 273 return (DDI_FAILURE); 274 } 275 276 if ((iport_ua = scsi_hba_iport_unit_address(dip)) == NULL) { 277 pmcs_prt(pwp, PMCS_PRT_DEBUG, 278 "%s: invoked with NULL unit address, inst (%d)", 279 __func__, inst); 280 return (DDI_FAILURE); 281 } 282 283 if (ddi_soft_state_zalloc(pmcs_iport_softstate, inst) != DDI_SUCCESS) { 284 pmcs_prt(pwp, PMCS_PRT_DEBUG, 285 "Failed to alloc soft state for iport %d", inst); 286 return (DDI_FAILURE); 287 } 288 289 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 290 if (iport == NULL) { 291 pmcs_prt(pwp, PMCS_PRT_DEBUG, 292 "cannot get iport soft state"); 293 goto iport_attach_fail1; 294 } 295 296 mutex_init(&iport->lock, NULL, MUTEX_DRIVER, 297 DDI_INTR_PRI(pwp->intr_pri)); 298 cv_init(&iport->refcnt_cv, NULL, CV_DEFAULT, NULL); 299 mutex_init(&iport->refcnt_lock, NULL, MUTEX_DRIVER, 300 DDI_INTR_PRI(pwp->intr_pri)); 301 302 /* Set some data on the iport handle */ 303 iport->dip = dip; 304 iport->pwp = pwp; 305 306 /* Dup the UA into the iport handle */ 307 iport->ua = strdup(iport_ua); 308 309 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 310 tran->tran_hba_private = iport; 311 312 list_create(&iport->phys, sizeof (pmcs_phy_t), 313 offsetof(pmcs_phy_t, list_node)); 314 315 /* 316 * If our unit address is active in the phymap, configure our 317 * iport's phylist. 318 */ 319 mutex_enter(&iport->lock); 320 ua_priv = sas_phymap_lookup_uapriv(pwp->hss_phymap, iport->ua); 321 if (ua_priv) { 322 /* Non-NULL private data indicates the unit address is active */ 323 iport->ua_state = UA_ACTIVE; 324 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) { 325 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: failed to " 326 "configure phys on iport handle (0x%p), " 327 " unit address [%s]", __func__, 328 (void *)iport, iport_ua); 329 mutex_exit(&iport->lock); 330 goto iport_attach_fail2; 331 } 332 } else { 333 iport->ua_state = UA_INACTIVE; 334 } 335 mutex_exit(&iport->lock); 336 337 /* Allocate string-based soft state pool for targets */ 338 iport->tgt_sstate = NULL; 339 if (ddi_soft_state_bystr_init(&iport->tgt_sstate, 340 sizeof (pmcs_xscsi_t), PMCS_TGT_SSTATE_SZ) != 0) { 341 pmcs_prt(pwp, PMCS_PRT_DEBUG, 342 "cannot get iport tgt soft state"); 343 goto iport_attach_fail2; 344 } 345 346 /* Create this iport's target map */ 347 if (pmcs_iport_tgtmap_create(iport) == B_FALSE) { 348 pmcs_prt(pwp, PMCS_PRT_DEBUG, 349 "Failed to create tgtmap on iport %d", inst); 350 goto iport_attach_fail3; 351 } 352 353 /* Set up the 'initiator-port' DDI property on this iport */ 354 init_port = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP); 355 if (pwp->separate_ports) { 356 pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: separate ports not " 357 "supported", __func__); 358 } else { 359 /* Set initiator-port value to the HBA's base WWN */ 360 (void) scsi_wwn_to_wwnstr(pwp->sas_wwns[0], 1, 361 init_port); 362 } 363 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_STRING, 364 SCSI_ADDR_PROP_INITIATOR_PORT, init_port); 365 kmem_free(init_port, PMCS_MAX_UA_SIZE); 366 367 /* Set up a 'num-phys' DDI property for the iport node */ 368 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 369 &iport->nphy); 370 371 /* Create kstats for each of the phys in this port */ 372 pmcs_create_phy_stats(iport); 373 374 /* 375 * Insert this iport handle into our list and set 376 * iports_attached on the HBA node. 377 */ 378 rw_enter(&pwp->iports_lock, RW_WRITER); 379 ASSERT(!list_link_active(&iport->list_node)); 380 list_insert_tail(&pwp->iports, iport); 381 pwp->iports_attached = 1; 382 pwp->num_iports++; 383 rw_exit(&pwp->iports_lock); 384 385 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, "iport%d attached", inst); 386 ddi_report_dev(dip); 387 return (DDI_SUCCESS); 388 389 /* teardown and fail */ 390 iport_attach_fail3: 391 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 392 iport_attach_fail2: 393 list_destroy(&iport->phys); 394 strfree(iport->ua); 395 mutex_destroy(&iport->refcnt_lock); 396 cv_destroy(&iport->refcnt_cv); 397 mutex_destroy(&iport->lock); 398 iport_attach_fail1: 399 ddi_soft_state_free(pmcs_iport_softstate, inst); 400 return (DDI_FAILURE); 401 } 402 403 static int 404 pmcs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 405 { 406 scsi_hba_tran_t *tran; 407 char chiprev, *fwsupport, hw_rev[24], fw_rev[24]; 408 off_t set3size; 409 int inst, i; 410 int sm_hba = 1; 411 int protocol = 0; 412 int num_phys = 0; 413 pmcs_hw_t *pwp; 414 pmcs_phy_t *phyp; 415 uint32_t num_threads; 416 char buf[64]; 417 418 switch (cmd) { 419 case DDI_ATTACH: 420 break; 421 422 case DDI_PM_RESUME: 423 case DDI_RESUME: 424 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 425 if (!tran) { 426 return (DDI_FAILURE); 427 } 428 /* No DDI_?_RESUME on iport nodes */ 429 if (scsi_hba_iport_unit_address(dip) != NULL) { 430 return (DDI_SUCCESS); 431 } 432 pwp = TRAN2PMC(tran); 433 if (pwp == NULL) { 434 return (DDI_FAILURE); 435 } 436 437 mutex_enter(&pwp->lock); 438 pwp->suspended = 0; 439 if (pwp->tq) { 440 ddi_taskq_resume(pwp->tq); 441 } 442 mutex_exit(&pwp->lock); 443 return (DDI_SUCCESS); 444 445 default: 446 return (DDI_FAILURE); 447 } 448 449 /* 450 * If this is an iport node, invoke iport attach. 451 */ 452 if (scsi_hba_iport_unit_address(dip) != NULL) { 453 return (pmcs_iport_attach(dip)); 454 } 455 456 /* 457 * From here on is attach for the HBA node 458 */ 459 460 #ifdef DEBUG 461 /* 462 * Check to see if this unit is to be disabled. We can't disable 463 * on a per-iport node. It's either the entire HBA or nothing. 464 */ 465 (void) snprintf(buf, sizeof (buf), 466 "disable-instance-%d", ddi_get_instance(dip)); 467 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 468 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, buf, 0)) { 469 cmn_err(CE_NOTE, "pmcs%d: disabled by configuration", 470 ddi_get_instance(dip)); 471 return (DDI_FAILURE); 472 } 473 #endif 474 475 /* 476 * Allocate softstate 477 */ 478 inst = ddi_get_instance(dip); 479 if (ddi_soft_state_zalloc(pmcs_softc_state, inst) != DDI_SUCCESS) { 480 cmn_err(CE_WARN, "pmcs%d: Failed to alloc soft state", inst); 481 return (DDI_FAILURE); 482 } 483 484 pwp = ddi_get_soft_state(pmcs_softc_state, inst); 485 if (pwp == NULL) { 486 cmn_err(CE_WARN, "pmcs%d: cannot get soft state", inst); 487 ddi_soft_state_free(pmcs_softc_state, inst); 488 return (DDI_FAILURE); 489 } 490 pwp->dip = dip; 491 STAILQ_INIT(&pwp->dq); 492 STAILQ_INIT(&pwp->cq); 493 STAILQ_INIT(&pwp->wf); 494 STAILQ_INIT(&pwp->pf); 495 /* 496 * Create the list for iports 497 */ 498 list_create(&pwp->iports, sizeof (pmcs_iport_t), 499 offsetof(pmcs_iport_t, list_node)); 500 501 pwp->state = STATE_PROBING; 502 503 /* 504 * Get driver.conf properties 505 */ 506 pwp->debug_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 507 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-debug-mask", 508 debug_mask); 509 pwp->phyid_block_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 510 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phyid-block-mask", 511 block_mask); 512 pwp->physpeed = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 513 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-physpeed", physpeed); 514 pwp->phymode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 515 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phymode", phymode); 516 pwp->fwlog = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 517 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fwlog", fwlog_level); 518 if (pwp->fwlog > PMCS_FWLOG_MAX) { 519 pwp->fwlog = PMCS_FWLOG_MAX; 520 } 521 522 mutex_enter(&pmcs_trace_lock); 523 if (pmcs_tbuf == NULL) { 524 /* Allocate trace buffer */ 525 pmcs_tbuf_num_elems = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 526 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-tbuf-num-elems", 527 PMCS_TBUF_NUM_ELEMS_DEF); 528 if ((pmcs_tbuf_num_elems == DDI_PROP_NOT_FOUND) || 529 (pmcs_tbuf_num_elems == 0)) { 530 pmcs_tbuf_num_elems = PMCS_TBUF_NUM_ELEMS_DEF; 531 } 532 533 pmcs_tbuf = kmem_zalloc(pmcs_tbuf_num_elems * 534 sizeof (pmcs_tbuf_t), KM_SLEEP); 535 pmcs_tbuf_ptr = pmcs_tbuf; 536 pmcs_tbuf_idx = 0; 537 } 538 mutex_exit(&pmcs_trace_lock); 539 540 disable_msix = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 541 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msix", 542 disable_msix); 543 disable_msi = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 544 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msi", 545 disable_msi); 546 maxqdepth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 547 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-maxqdepth", maxqdepth); 548 pwp->fw_force_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 549 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fw-force-update", 0); 550 if (pwp->fw_force_update == 0) { 551 pwp->fw_disable_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 552 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 553 "pmcs-fw-disable-update", 0); 554 } 555 pwp->ioq_depth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 556 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-num-io-qentries", 557 PMCS_NQENTRY); 558 559 /* 560 * Initialize FMA 561 */ 562 pwp->dev_acc_attr = pwp->reg_acc_attr = rattr; 563 pwp->iqp_dma_attr = pwp->oqp_dma_attr = 564 pwp->regdump_dma_attr = pwp->cip_dma_attr = 565 pwp->fwlog_dma_attr = pmcs_dattr; 566 pwp->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, pwp->dip, 567 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "fm-capable", 568 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 569 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 570 pmcs_fm_init(pwp); 571 572 /* 573 * Map registers 574 */ 575 if (pci_config_setup(dip, &pwp->pci_acc_handle)) { 576 pmcs_prt(pwp, PMCS_PRT_WARN, "pci config setup failed"); 577 ddi_soft_state_free(pmcs_softc_state, inst); 578 return (DDI_FAILURE); 579 } 580 581 /* 582 * Get the size of register set 3. 583 */ 584 if (ddi_dev_regsize(dip, PMCS_REGSET_3, &set3size) != DDI_SUCCESS) { 585 pmcs_prt(pwp, PMCS_PRT_DEBUG, 586 "unable to get size of register set %d", PMCS_REGSET_3); 587 pci_config_teardown(&pwp->pci_acc_handle); 588 ddi_soft_state_free(pmcs_softc_state, inst); 589 return (DDI_FAILURE); 590 } 591 592 /* 593 * Map registers 594 */ 595 pwp->reg_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 596 597 if (ddi_regs_map_setup(dip, PMCS_REGSET_0, (caddr_t *)&pwp->msg_regs, 598 0, 0, &pwp->reg_acc_attr, &pwp->msg_acc_handle)) { 599 pmcs_prt(pwp, PMCS_PRT_DEBUG, 600 "failed to map Message Unit registers"); 601 pci_config_teardown(&pwp->pci_acc_handle); 602 ddi_soft_state_free(pmcs_softc_state, inst); 603 return (DDI_FAILURE); 604 } 605 606 if (ddi_regs_map_setup(dip, PMCS_REGSET_1, (caddr_t *)&pwp->top_regs, 607 0, 0, &pwp->reg_acc_attr, &pwp->top_acc_handle)) { 608 pmcs_prt(pwp, PMCS_PRT_DEBUG, "failed to map TOP registers"); 609 ddi_regs_map_free(&pwp->msg_acc_handle); 610 pci_config_teardown(&pwp->pci_acc_handle); 611 ddi_soft_state_free(pmcs_softc_state, inst); 612 return (DDI_FAILURE); 613 } 614 615 if (ddi_regs_map_setup(dip, PMCS_REGSET_2, (caddr_t *)&pwp->gsm_regs, 616 0, 0, &pwp->reg_acc_attr, &pwp->gsm_acc_handle)) { 617 pmcs_prt(pwp, PMCS_PRT_DEBUG, "failed to map GSM registers"); 618 ddi_regs_map_free(&pwp->top_acc_handle); 619 ddi_regs_map_free(&pwp->msg_acc_handle); 620 pci_config_teardown(&pwp->pci_acc_handle); 621 ddi_soft_state_free(pmcs_softc_state, inst); 622 return (DDI_FAILURE); 623 } 624 625 if (ddi_regs_map_setup(dip, PMCS_REGSET_3, (caddr_t *)&pwp->mpi_regs, 626 0, 0, &pwp->reg_acc_attr, &pwp->mpi_acc_handle)) { 627 pmcs_prt(pwp, PMCS_PRT_DEBUG, "failed to map MPI registers"); 628 ddi_regs_map_free(&pwp->top_acc_handle); 629 ddi_regs_map_free(&pwp->gsm_acc_handle); 630 ddi_regs_map_free(&pwp->msg_acc_handle); 631 pci_config_teardown(&pwp->pci_acc_handle); 632 ddi_soft_state_free(pmcs_softc_state, inst); 633 return (DDI_FAILURE); 634 } 635 pwp->mpibar = 636 (((5U << 2) + 0x10) << PMCS_MSGU_MPI_BAR_SHIFT) | set3size; 637 638 /* 639 * Make sure we can support this card. 640 */ 641 pwp->chiprev = pmcs_rd_topunit(pwp, PMCS_DEVICE_REVISION); 642 643 switch (pwp->chiprev) { 644 case PMCS_PM8001_REV_A: 645 case PMCS_PM8001_REV_B: 646 pmcs_prt(pwp, PMCS_PRT_ERR, 647 "Rev A/B Card no longer supported"); 648 goto failure; 649 case PMCS_PM8001_REV_C: 650 break; 651 default: 652 pmcs_prt(pwp, PMCS_PRT_ERR, 653 "Unknown chip revision (%d)", pwp->chiprev); 654 goto failure; 655 } 656 657 /* 658 * Allocate DMA addressable area for Inbound and Outbound Queue indices 659 * that the chip needs to access plus a space for scratch usage 660 */ 661 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 662 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pwp->cip_acchdls, 663 &pwp->cip_handles, ptob(1), (caddr_t *)&pwp->cip, 664 &pwp->ciaddr) == B_FALSE) { 665 pmcs_prt(pwp, PMCS_PRT_DEBUG, 666 "Failed to setup DMA for index/scratch"); 667 goto failure; 668 } 669 670 bzero(pwp->cip, ptob(1)); 671 pwp->scratch = &pwp->cip[PMCS_INDICES_SIZE]; 672 pwp->scratch_dma = pwp->ciaddr + PMCS_INDICES_SIZE; 673 674 /* 675 * Allocate DMA S/G list chunks 676 */ 677 (void) pmcs_add_more_chunks(pwp, ptob(1) * PMCS_MIN_CHUNK_PAGES); 678 679 /* 680 * Allocate a DMA addressable area for the firmware log (if needed) 681 */ 682 if (pwp->fwlog) { 683 /* 684 * Align to event log header and entry size 685 */ 686 pwp->fwlog_dma_attr.dma_attr_align = 32; 687 if (pmcs_dma_setup(pwp, &pwp->fwlog_dma_attr, 688 &pwp->fwlog_acchdl, 689 &pwp->fwlog_hndl, PMCS_FWLOG_SIZE, 690 (caddr_t *)&pwp->fwlogp, 691 &pwp->fwaddr) == B_FALSE) { 692 pmcs_prt(pwp, PMCS_PRT_DEBUG, 693 "Failed to setup DMA for fwlog area"); 694 pwp->fwlog = 0; 695 } else { 696 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE); 697 } 698 } 699 700 if (pwp->flash_chunk_addr == NULL) { 701 pwp->regdump_dma_attr.dma_attr_align = PMCS_FLASH_CHUNK_SIZE; 702 if (pmcs_dma_setup(pwp, &pwp->regdump_dma_attr, 703 &pwp->regdump_acchdl, 704 &pwp->regdump_hndl, PMCS_FLASH_CHUNK_SIZE, 705 (caddr_t *)&pwp->flash_chunkp, &pwp->flash_chunk_addr) == 706 B_FALSE) { 707 pmcs_prt(pwp, PMCS_PRT_DEBUG, 708 "Failed to setup DMA for register dump area"); 709 goto failure; 710 } 711 bzero(pwp->flash_chunkp, PMCS_FLASH_CHUNK_SIZE); 712 } 713 714 /* 715 * More bits of local initialization... 716 */ 717 pwp->tq = ddi_taskq_create(dip, "_tq", 4, TASKQ_DEFAULTPRI, 0); 718 if (pwp->tq == NULL) { 719 pmcs_prt(pwp, PMCS_PRT_DEBUG, "unable to create worker taskq"); 720 goto failure; 721 } 722 723 /* 724 * Cache of structures for dealing with I/O completion callbacks. 725 */ 726 (void) snprintf(buf, sizeof (buf), "pmcs_iocomp_cb_cache%d", inst); 727 pwp->iocomp_cb_cache = kmem_cache_create(buf, 728 sizeof (pmcs_iocomp_cb_t), 16, NULL, NULL, NULL, NULL, NULL, 0); 729 730 /* 731 * Cache of PHY structures 732 */ 733 (void) snprintf(buf, sizeof (buf), "pmcs_phy_cache%d", inst); 734 pwp->phy_cache = kmem_cache_create(buf, sizeof (pmcs_phy_t), 8, 735 pmcs_phy_constructor, pmcs_phy_destructor, NULL, (void *)pwp, 736 NULL, 0); 737 738 /* 739 * Allocate space for the I/O completion threads 740 */ 741 num_threads = ncpus_online; 742 if (num_threads > PMCS_MAX_CQ_THREADS) { 743 num_threads = PMCS_MAX_CQ_THREADS; 744 } 745 746 pwp->cq_info.cq_thr_info = kmem_zalloc(sizeof (pmcs_cq_thr_info_t) * 747 num_threads, KM_SLEEP); 748 pwp->cq_info.cq_threads = num_threads; 749 pwp->cq_info.cq_next_disp_thr = 0; 750 pwp->cq_info.cq_stop = B_FALSE; 751 752 /* 753 * Set the quantum value in clock ticks for the I/O interrupt 754 * coalescing timer. 755 */ 756 pwp->io_intr_coal.quantum = drv_usectohz(PMCS_QUANTUM_TIME_USECS); 757 758 /* 759 * We have a delicate dance here. We need to set up 760 * interrupts so we know how to set up some OQC 761 * tables. However, while we're setting up table 762 * access, we may need to flash new firmware and 763 * reset the card, which will take some finessing. 764 */ 765 766 /* 767 * Set up interrupts here. 768 */ 769 switch (pmcs_setup_intr(pwp)) { 770 case 0: 771 break; 772 case EIO: 773 pwp->stuck = 1; 774 /* FALLTHROUGH */ 775 default: 776 goto failure; 777 } 778 779 /* 780 * Set these up now becuase they are used to initialize the OQC tables. 781 * 782 * If we have MSI or MSI-X interrupts set up and we have enough 783 * vectors for each OQ, the Outbound Queue vectors can all be the 784 * same as the appropriate interrupt routine will have been called 785 * and the doorbell register automatically cleared. 786 * This keeps us from having to check the Outbound Doorbell register 787 * when the routines for these interrupts are called. 788 * 789 * If we have Legacy INT-X interrupts set up or we didn't have enough 790 * MSI/MSI-X vectors to uniquely identify each OQ, we point these 791 * vectors to the bits we would like to have set in the Outbound 792 * Doorbell register because pmcs_all_intr will read the doorbell 793 * register to find out why we have an interrupt and write the 794 * corresponding 'clear' bit for that interrupt. 795 */ 796 797 switch (pwp->intr_cnt) { 798 case 1: 799 /* 800 * Only one vector, so we must check all OQs for MSI. For 801 * INT-X, there's only one vector anyway, so we can just 802 * use the outbound queue bits to keep from having to 803 * check each queue for each interrupt. 804 */ 805 if (pwp->int_type == PMCS_INT_FIXED) { 806 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 807 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 808 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 809 } else { 810 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 811 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_IODONE; 812 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_IODONE; 813 } 814 break; 815 case 2: 816 /* With 2, we can at least isolate IODONE */ 817 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 818 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 819 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_GENERAL; 820 break; 821 case 4: 822 /* With 4 vectors, everybody gets one */ 823 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 824 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 825 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 826 break; 827 } 828 829 /* 830 * Do the first part of setup 831 */ 832 if (pmcs_setup(pwp)) { 833 goto failure; 834 } 835 pmcs_report_fwversion(pwp); 836 837 /* 838 * Now do some additonal allocations based upon information 839 * gathered during MPI setup. 840 */ 841 pwp->root_phys = kmem_zalloc(pwp->nphy * sizeof (pmcs_phy_t), KM_SLEEP); 842 ASSERT(pwp->nphy < SAS2_PHYNUM_MAX); 843 phyp = pwp->root_phys; 844 for (i = 0; i < pwp->nphy; i++) { 845 if (i < pwp->nphy-1) { 846 phyp->sibling = (phyp + 1); 847 } 848 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER, 849 DDI_INTR_PRI(pwp->intr_pri)); 850 phyp->phynum = i & SAS2_PHYNUM_MASK; 851 pmcs_phy_name(pwp, phyp, phyp->path, sizeof (phyp->path)); 852 phyp->pwp = pwp; 853 phyp->device_id = PMCS_INVALID_DEVICE_ID; 854 phyp++; 855 } 856 857 pwp->work = kmem_zalloc(pwp->max_cmd * sizeof (pmcwork_t), KM_SLEEP); 858 for (i = 0; i < pwp->max_cmd - 1; i++) { 859 pmcwork_t *pwrk = &pwp->work[i]; 860 mutex_init(&pwrk->lock, NULL, MUTEX_DRIVER, 861 DDI_INTR_PRI(pwp->intr_pri)); 862 cv_init(&pwrk->sleep_cv, NULL, CV_DRIVER, NULL); 863 STAILQ_INSERT_TAIL(&pwp->wf, pwrk, next); 864 865 } 866 pwp->targets = (pmcs_xscsi_t **) 867 kmem_zalloc(pwp->max_dev * sizeof (pmcs_xscsi_t *), KM_SLEEP); 868 869 pwp->iqpt = (pmcs_iqp_trace_t *) 870 kmem_zalloc(sizeof (pmcs_iqp_trace_t), KM_SLEEP); 871 pwp->iqpt->head = kmem_zalloc(PMCS_IQP_TRACE_BUFFER_SIZE, KM_SLEEP); 872 pwp->iqpt->curpos = pwp->iqpt->head; 873 pwp->iqpt->size_left = PMCS_IQP_TRACE_BUFFER_SIZE; 874 875 /* 876 * Start MPI communication. 877 */ 878 if (pmcs_start_mpi(pwp)) { 879 if (pmcs_soft_reset(pwp, B_FALSE)) { 880 goto failure; 881 } 882 } 883 884 /* 885 * Do some initial acceptance tests. 886 * This tests interrupts and queues. 887 */ 888 if (pmcs_echo_test(pwp)) { 889 goto failure; 890 } 891 892 /* Read VPD - if it exists */ 893 if (pmcs_get_nvmd(pwp, PMCS_NVMD_VPD, PMCIN_NVMD_VPD, 0, NULL, 0)) { 894 pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Unable to read VPD: " 895 "attempting to fabricate", __func__); 896 /* 897 * When we release, this must goto failure and the call 898 * to pmcs_fabricate_wwid is removed. 899 */ 900 /* goto failure; */ 901 if (!pmcs_fabricate_wwid(pwp)) { 902 goto failure; 903 } 904 } 905 906 /* 907 * We're now officially running 908 */ 909 pwp->state = STATE_RUNNING; 910 911 /* 912 * Check firmware versions and load new firmware 913 * if needed and reset. 914 */ 915 if (pmcs_firmware_update(pwp)) { 916 pmcs_prt(pwp, PMCS_PRT_WARN, "%s: Firmware update failed", 917 __func__); 918 goto failure; 919 } 920 921 /* 922 * Create completion threads. 923 */ 924 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 925 pwp->cq_info.cq_thr_info[i].cq_pwp = pwp; 926 pwp->cq_info.cq_thr_info[i].cq_thread = 927 thread_create(NULL, 0, pmcs_scsa_cq_run, 928 &pwp->cq_info.cq_thr_info[i], 0, &p0, TS_RUN, minclsyspri); 929 } 930 931 /* 932 * Create one thread to deal with the updating of the interrupt 933 * coalescing timer. 934 */ 935 pwp->ict_thread = thread_create(NULL, 0, pmcs_check_intr_coal, 936 pwp, 0, &p0, TS_RUN, minclsyspri); 937 938 /* 939 * Kick off the watchdog 940 */ 941 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 942 drv_usectohz(PMCS_WATCH_INTERVAL)); 943 /* 944 * Do the SCSI attachment code (before starting phys) 945 */ 946 if (pmcs_scsa_init(pwp, &pmcs_dattr)) { 947 goto failure; 948 } 949 pwp->hba_attached = 1; 950 951 /* 952 * Initialize the rwlock for the iport elements. 953 */ 954 rw_init(&pwp->iports_lock, NULL, RW_DRIVER, NULL); 955 956 /* Check all acc & dma handles allocated in attach */ 957 if (pmcs_check_acc_dma_handle(pwp)) { 958 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 959 goto failure; 960 } 961 962 /* 963 * Create the phymap for this HBA instance 964 */ 965 if (sas_phymap_create(dip, phymap_usec, PHYMAP_MODE_SIMPLE, NULL, 966 pwp, pmcs_phymap_activate, pmcs_phymap_deactivate, 967 &pwp->hss_phymap) != DDI_SUCCESS) { 968 pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: pmcs%d phymap_create failed", 969 __func__, inst); 970 goto failure; 971 } 972 ASSERT(pwp->hss_phymap); 973 974 /* 975 * Create the iportmap for this HBA instance 976 */ 977 if (scsi_hba_iportmap_create(dip, iportmap_usec, pwp->nphy, 978 &pwp->hss_iportmap) != DDI_SUCCESS) { 979 pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: pmcs%d iportmap_create " 980 "failed", __func__, inst); 981 goto failure; 982 } 983 ASSERT(pwp->hss_iportmap); 984 985 /* 986 * Start the PHYs. 987 */ 988 if (pmcs_start_phys(pwp)) { 989 goto failure; 990 } 991 992 /* 993 * From this point on, we can't fail. 994 */ 995 ddi_report_dev(dip); 996 997 /* SM-HBA */ 998 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SMHBA_SUPPORTED, 999 &sm_hba); 1000 1001 /* SM-HBA */ 1002 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_DRV_VERSION, 1003 pmcs_driver_rev); 1004 1005 /* SM-HBA */ 1006 chiprev = 'A' + pwp->chiprev; 1007 (void) snprintf(hw_rev, 2, "%s", &chiprev); 1008 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_HWARE_VERSION, 1009 hw_rev); 1010 1011 /* SM-HBA */ 1012 switch (PMCS_FW_TYPE(pwp)) { 1013 case PMCS_FW_TYPE_RELEASED: 1014 fwsupport = "Released"; 1015 break; 1016 case PMCS_FW_TYPE_DEVELOPMENT: 1017 fwsupport = "Development"; 1018 break; 1019 case PMCS_FW_TYPE_ALPHA: 1020 fwsupport = "Alpha"; 1021 break; 1022 case PMCS_FW_TYPE_BETA: 1023 fwsupport = "Beta"; 1024 break; 1025 default: 1026 fwsupport = "Special"; 1027 break; 1028 } 1029 (void) snprintf(fw_rev, sizeof (fw_rev), "%x.%x.%x %s", 1030 PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp), 1031 fwsupport); 1032 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_FWARE_VERSION, 1033 fw_rev); 1034 1035 /* SM-HBA */ 1036 num_phys = pwp->nphy; 1037 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_NUM_PHYS_HBA, 1038 &num_phys); 1039 1040 /* SM-HBA */ 1041 protocol = SAS_SSP_SUPPORT | SAS_SATA_SUPPORT | SAS_SMP_SUPPORT; 1042 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SUPPORTED_PROTOCOL, 1043 &protocol); 1044 1045 return (DDI_SUCCESS); 1046 1047 failure: 1048 if (pmcs_unattach(pwp)) { 1049 pwp->stuck = 1; 1050 } 1051 return (DDI_FAILURE); 1052 } 1053 1054 int 1055 pmcs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1056 { 1057 int inst = ddi_get_instance(dip); 1058 pmcs_iport_t *iport = NULL; 1059 pmcs_hw_t *pwp = NULL; 1060 scsi_hba_tran_t *tran; 1061 1062 if (scsi_hba_iport_unit_address(dip) != NULL) { 1063 /* iport node */ 1064 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 1065 ASSERT(iport); 1066 if (iport == NULL) { 1067 return (DDI_FAILURE); 1068 } 1069 pwp = iport->pwp; 1070 } else { 1071 /* hba node */ 1072 pwp = (pmcs_hw_t *)ddi_get_soft_state(pmcs_softc_state, inst); 1073 ASSERT(pwp); 1074 if (pwp == NULL) { 1075 return (DDI_FAILURE); 1076 } 1077 } 1078 1079 switch (cmd) { 1080 case DDI_DETACH: 1081 if (iport) { 1082 /* iport detach */ 1083 if (pmcs_iport_unattach(iport)) { 1084 return (DDI_FAILURE); 1085 } 1086 pmcs_prt(pwp, PMCS_PRT_DEBUG, "iport%d detached", inst); 1087 return (DDI_SUCCESS); 1088 } else { 1089 /* HBA detach */ 1090 if (pmcs_unattach(pwp)) { 1091 return (DDI_FAILURE); 1092 } 1093 return (DDI_SUCCESS); 1094 } 1095 1096 case DDI_SUSPEND: 1097 case DDI_PM_SUSPEND: 1098 /* No DDI_SUSPEND on iport nodes */ 1099 if (iport) { 1100 return (DDI_SUCCESS); 1101 } 1102 1103 if (pwp->stuck) { 1104 return (DDI_FAILURE); 1105 } 1106 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 1107 if (!tran) { 1108 return (DDI_FAILURE); 1109 } 1110 1111 pwp = TRAN2PMC(tran); 1112 if (pwp == NULL) { 1113 return (DDI_FAILURE); 1114 } 1115 mutex_enter(&pwp->lock); 1116 if (pwp->tq) { 1117 ddi_taskq_suspend(pwp->tq); 1118 } 1119 pwp->suspended = 1; 1120 mutex_exit(&pwp->lock); 1121 pmcs_prt(pwp, PMCS_PRT_INFO, "PMC8X6G suspending"); 1122 return (DDI_SUCCESS); 1123 1124 default: 1125 return (DDI_FAILURE); 1126 } 1127 } 1128 1129 static int 1130 pmcs_iport_unattach(pmcs_iport_t *iport) 1131 { 1132 pmcs_hw_t *pwp = iport->pwp; 1133 1134 /* 1135 * First, check if there are still any configured targets on this 1136 * iport. If so, we fail detach. 1137 */ 1138 if (pmcs_iport_has_targets(pwp, iport)) { 1139 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, "iport%d detach failure: " 1140 "iport has targets (luns)", ddi_get_instance(iport->dip)); 1141 return (DDI_FAILURE); 1142 } 1143 1144 /* 1145 * Remove this iport from our list if it is inactive in the phymap. 1146 */ 1147 rw_enter(&pwp->iports_lock, RW_WRITER); 1148 mutex_enter(&iport->lock); 1149 1150 if (iport->ua_state == UA_ACTIVE) { 1151 mutex_exit(&iport->lock); 1152 rw_exit(&pwp->iports_lock); 1153 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, "iport%d detach failure: " 1154 "iport unit address active in phymap", 1155 ddi_get_instance(iport->dip)); 1156 return (DDI_FAILURE); 1157 } 1158 1159 /* If it's our only iport, clear iports_attached */ 1160 ASSERT(pwp->num_iports >= 1); 1161 if (--pwp->num_iports == 0) { 1162 pwp->iports_attached = 0; 1163 } 1164 1165 ASSERT(list_link_active(&iport->list_node)); 1166 list_remove(&pwp->iports, iport); 1167 rw_exit(&pwp->iports_lock); 1168 1169 /* 1170 * We have removed the iport handle from the HBA's iports list, 1171 * there will be no new references to it. Two things must be 1172 * guarded against here. First, we could have PHY up events, 1173 * adding themselves to the iport->phys list and grabbing ref's 1174 * on our iport handle. Second, we could have existing references 1175 * to this iport handle from a point in time prior to the list 1176 * removal above. 1177 * 1178 * So first, destroy the phys list. Remove any phys that have snuck 1179 * in after the phymap deactivate, dropping the refcnt accordingly. 1180 * If these PHYs are still up if and when the phymap reactivates 1181 * (i.e. when this iport reattaches), we'll populate the list with 1182 * them and bump the refcnt back up. 1183 */ 1184 pmcs_remove_phy_from_iport(iport, NULL); 1185 ASSERT(list_is_empty(&iport->phys)); 1186 list_destroy(&iport->phys); 1187 mutex_exit(&iport->lock); 1188 1189 /* 1190 * Second, wait for any other references to this iport to be 1191 * dropped, then continue teardown. 1192 */ 1193 mutex_enter(&iport->refcnt_lock); 1194 while (iport->refcnt != 0) { 1195 cv_wait(&iport->refcnt_cv, &iport->refcnt_lock); 1196 } 1197 mutex_exit(&iport->refcnt_lock); 1198 1199 /* Delete kstats */ 1200 pmcs_destroy_phy_stats(iport); 1201 1202 /* Destroy the iport target map */ 1203 if (pmcs_iport_tgtmap_destroy(iport) == B_FALSE) { 1204 return (DDI_FAILURE); 1205 } 1206 1207 /* Free the tgt soft state */ 1208 if (iport->tgt_sstate != NULL) { 1209 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 1210 } 1211 1212 /* Free our unit address string */ 1213 strfree(iport->ua); 1214 1215 /* Finish teardown and free the softstate */ 1216 mutex_destroy(&iport->refcnt_lock); 1217 ASSERT(iport->refcnt == 0); 1218 cv_destroy(&iport->refcnt_cv); 1219 mutex_destroy(&iport->lock); 1220 ddi_soft_state_free(pmcs_iport_softstate, ddi_get_instance(iport->dip)); 1221 1222 return (DDI_SUCCESS); 1223 } 1224 1225 static int 1226 pmcs_unattach(pmcs_hw_t *pwp) 1227 { 1228 int i; 1229 enum pwpstate curstate; 1230 pmcs_cq_thr_info_t *cqti; 1231 1232 /* 1233 * Tear down the interrupt infrastructure. 1234 */ 1235 if (pmcs_teardown_intr(pwp)) { 1236 pwp->stuck = 1; 1237 } 1238 pwp->intr_cnt = 0; 1239 1240 /* 1241 * Grab a lock, if initted, to set state. 1242 */ 1243 if (pwp->locks_initted) { 1244 mutex_enter(&pwp->lock); 1245 if (pwp->state != STATE_DEAD) { 1246 pwp->state = STATE_UNPROBING; 1247 } 1248 curstate = pwp->state; 1249 mutex_exit(&pwp->lock); 1250 1251 /* 1252 * Stop the I/O completion threads. 1253 */ 1254 mutex_enter(&pwp->cq_lock); 1255 pwp->cq_info.cq_stop = B_TRUE; 1256 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 1257 if (pwp->cq_info.cq_thr_info[i].cq_thread) { 1258 cqti = &pwp->cq_info.cq_thr_info[i]; 1259 mutex_enter(&cqti->cq_thr_lock); 1260 cv_signal(&cqti->cq_cv); 1261 mutex_exit(&cqti->cq_thr_lock); 1262 mutex_exit(&pwp->cq_lock); 1263 thread_join(cqti->cq_thread->t_did); 1264 mutex_enter(&pwp->cq_lock); 1265 } 1266 } 1267 mutex_exit(&pwp->cq_lock); 1268 1269 /* 1270 * Stop the interrupt coalescing timer thread 1271 */ 1272 if (pwp->ict_thread) { 1273 mutex_enter(&pwp->ict_lock); 1274 pwp->io_intr_coal.stop_thread = B_TRUE; 1275 cv_signal(&pwp->ict_cv); 1276 mutex_exit(&pwp->ict_lock); 1277 thread_join(pwp->ict_thread->t_did); 1278 } 1279 } else { 1280 if (pwp->state != STATE_DEAD) { 1281 pwp->state = STATE_UNPROBING; 1282 } 1283 curstate = pwp->state; 1284 } 1285 1286 if (&pwp->iports != NULL) { 1287 /* Destroy the iports lock */ 1288 rw_destroy(&pwp->iports_lock); 1289 /* Destroy the iports list */ 1290 ASSERT(list_is_empty(&pwp->iports)); 1291 list_destroy(&pwp->iports); 1292 } 1293 1294 if (pwp->hss_iportmap != NULL) { 1295 /* Destroy the iportmap */ 1296 scsi_hba_iportmap_destroy(pwp->hss_iportmap); 1297 } 1298 1299 if (pwp->hss_phymap != NULL) { 1300 /* Destroy the phymap */ 1301 sas_phymap_destroy(pwp->hss_phymap); 1302 } 1303 1304 /* 1305 * Make sure that any pending watchdog won't 1306 * be called from this point on out. 1307 */ 1308 (void) untimeout(pwp->wdhandle); 1309 /* 1310 * After the above action, the watchdog 1311 * timer that starts up the worker task 1312 * may trigger but will exit immediately 1313 * on triggering. 1314 * 1315 * Now that this is done, we can destroy 1316 * the task queue, which will wait if we're 1317 * running something on it. 1318 */ 1319 if (pwp->tq) { 1320 ddi_taskq_destroy(pwp->tq); 1321 pwp->tq = NULL; 1322 } 1323 1324 pmcs_fm_fini(pwp); 1325 1326 if (pwp->hba_attached) { 1327 (void) scsi_hba_detach(pwp->dip); 1328 pwp->hba_attached = 0; 1329 } 1330 1331 /* 1332 * If the chip hasn't been marked dead, shut it down now 1333 * to bring it back to a known state without attempting 1334 * a soft reset. 1335 */ 1336 if (curstate != STATE_DEAD && pwp->locks_initted) { 1337 /* 1338 * De-register all registered devices 1339 */ 1340 pmcs_deregister_devices(pwp, pwp->root_phys); 1341 1342 /* 1343 * Stop all the phys. 1344 */ 1345 pmcs_stop_phys(pwp); 1346 1347 /* 1348 * Shut Down Message Passing 1349 */ 1350 (void) pmcs_stop_mpi(pwp); 1351 1352 /* 1353 * Reset chip 1354 */ 1355 (void) pmcs_soft_reset(pwp, B_FALSE); 1356 } 1357 1358 /* 1359 * Turn off interrupts on the chip 1360 */ 1361 if (pwp->mpi_acc_handle) { 1362 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1363 } 1364 1365 /* Destroy pwp's lock */ 1366 if (pwp->locks_initted) { 1367 mutex_destroy(&pwp->lock); 1368 mutex_destroy(&pwp->dma_lock); 1369 mutex_destroy(&pwp->axil_lock); 1370 mutex_destroy(&pwp->cq_lock); 1371 mutex_destroy(&pwp->config_lock); 1372 mutex_destroy(&pwp->ict_lock); 1373 mutex_destroy(&pwp->wfree_lock); 1374 mutex_destroy(&pwp->pfree_lock); 1375 mutex_destroy(&pwp->dead_phylist_lock); 1376 #ifdef DEBUG 1377 mutex_destroy(&pwp->dbglock); 1378 #endif 1379 cv_destroy(&pwp->ict_cv); 1380 cv_destroy(&pwp->drain_cv); 1381 pwp->locks_initted = 0; 1382 } 1383 1384 /* 1385 * Free DMA handles and associated consistent memory 1386 */ 1387 if (pwp->regdump_hndl) { 1388 if (ddi_dma_unbind_handle(pwp->regdump_hndl) != DDI_SUCCESS) { 1389 pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check failed " 1390 "at %s():%d", __func__, __LINE__); 1391 } 1392 ddi_dma_free_handle(&pwp->regdump_hndl); 1393 ddi_dma_mem_free(&pwp->regdump_acchdl); 1394 pwp->regdump_hndl = 0; 1395 } 1396 if (pwp->fwlog_hndl) { 1397 if (ddi_dma_unbind_handle(pwp->fwlog_hndl) != DDI_SUCCESS) { 1398 pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check failed " 1399 "at %s():%d", __func__, __LINE__); 1400 } 1401 ddi_dma_free_handle(&pwp->fwlog_hndl); 1402 ddi_dma_mem_free(&pwp->fwlog_acchdl); 1403 pwp->fwlog_hndl = 0; 1404 } 1405 if (pwp->cip_handles) { 1406 if (ddi_dma_unbind_handle(pwp->cip_handles) != DDI_SUCCESS) { 1407 pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check failed " 1408 "at %s():%d", __func__, __LINE__); 1409 } 1410 ddi_dma_free_handle(&pwp->cip_handles); 1411 ddi_dma_mem_free(&pwp->cip_acchdls); 1412 pwp->cip_handles = 0; 1413 } 1414 for (i = 0; i < PMCS_NOQ; i++) { 1415 if (pwp->oqp_handles[i]) { 1416 if (ddi_dma_unbind_handle(pwp->oqp_handles[i]) != 1417 DDI_SUCCESS) { 1418 pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check " 1419 "failed at %s():%d", __func__, __LINE__); 1420 } 1421 ddi_dma_free_handle(&pwp->oqp_handles[i]); 1422 ddi_dma_mem_free(&pwp->oqp_acchdls[i]); 1423 pwp->oqp_handles[i] = 0; 1424 } 1425 } 1426 for (i = 0; i < PMCS_NIQ; i++) { 1427 if (pwp->iqp_handles[i]) { 1428 if (ddi_dma_unbind_handle(pwp->iqp_handles[i]) != 1429 DDI_SUCCESS) { 1430 pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check " 1431 "failed at %s():%d", __func__, __LINE__); 1432 } 1433 ddi_dma_free_handle(&pwp->iqp_handles[i]); 1434 ddi_dma_mem_free(&pwp->iqp_acchdls[i]); 1435 pwp->iqp_handles[i] = 0; 1436 } 1437 } 1438 1439 pmcs_free_dma_chunklist(pwp); 1440 1441 /* 1442 * Unmap registers and destroy access handles 1443 */ 1444 if (pwp->mpi_acc_handle) { 1445 ddi_regs_map_free(&pwp->mpi_acc_handle); 1446 pwp->mpi_acc_handle = 0; 1447 } 1448 if (pwp->top_acc_handle) { 1449 ddi_regs_map_free(&pwp->top_acc_handle); 1450 pwp->top_acc_handle = 0; 1451 } 1452 if (pwp->gsm_acc_handle) { 1453 ddi_regs_map_free(&pwp->gsm_acc_handle); 1454 pwp->gsm_acc_handle = 0; 1455 } 1456 if (pwp->msg_acc_handle) { 1457 ddi_regs_map_free(&pwp->msg_acc_handle); 1458 pwp->msg_acc_handle = 0; 1459 } 1460 if (pwp->pci_acc_handle) { 1461 pci_config_teardown(&pwp->pci_acc_handle); 1462 pwp->pci_acc_handle = 0; 1463 } 1464 1465 /* 1466 * Do memory allocation cleanup. 1467 */ 1468 while (pwp->dma_freelist) { 1469 pmcs_dmachunk_t *this = pwp->dma_freelist; 1470 pwp->dma_freelist = this->nxt; 1471 kmem_free(this, sizeof (pmcs_dmachunk_t)); 1472 } 1473 1474 /* 1475 * Free pools 1476 */ 1477 if (pwp->iocomp_cb_cache) { 1478 kmem_cache_destroy(pwp->iocomp_cb_cache); 1479 } 1480 1481 /* 1482 * Free all PHYs (at level > 0), then free the cache 1483 */ 1484 pmcs_free_all_phys(pwp, pwp->root_phys); 1485 if (pwp->phy_cache) { 1486 kmem_cache_destroy(pwp->phy_cache); 1487 } 1488 1489 /* 1490 * Free root PHYs 1491 */ 1492 if (pwp->root_phys) { 1493 pmcs_phy_t *phyp = pwp->root_phys; 1494 for (i = 0; i < pwp->nphy; i++) { 1495 mutex_destroy(&phyp->phy_lock); 1496 phyp = phyp->sibling; 1497 } 1498 kmem_free(pwp->root_phys, pwp->nphy * sizeof (pmcs_phy_t)); 1499 pwp->root_phys = NULL; 1500 pwp->nphy = 0; 1501 } 1502 1503 /* Free the targets list */ 1504 if (pwp->targets) { 1505 kmem_free(pwp->targets, 1506 sizeof (pmcs_xscsi_t *) * pwp->max_dev); 1507 } 1508 1509 /* 1510 * Free work structures 1511 */ 1512 1513 if (pwp->work && pwp->max_cmd) { 1514 for (i = 0; i < pwp->max_cmd - 1; i++) { 1515 pmcwork_t *pwrk = &pwp->work[i]; 1516 mutex_destroy(&pwrk->lock); 1517 cv_destroy(&pwrk->sleep_cv); 1518 } 1519 kmem_free(pwp->work, sizeof (pmcwork_t) * pwp->max_cmd); 1520 pwp->work = NULL; 1521 pwp->max_cmd = 0; 1522 } 1523 1524 /* 1525 * Do last property and SCSA cleanup 1526 */ 1527 if (pwp->tran) { 1528 scsi_hba_tran_free(pwp->tran); 1529 pwp->tran = NULL; 1530 } 1531 if (pwp->reset_notify_listf) { 1532 scsi_hba_reset_notify_tear_down(pwp->reset_notify_listf); 1533 pwp->reset_notify_listf = NULL; 1534 } 1535 ddi_prop_remove_all(pwp->dip); 1536 if (pwp->stuck) { 1537 return (-1); 1538 } 1539 1540 /* Free register dump area if allocated */ 1541 if (pwp->regdumpp) { 1542 kmem_free(pwp->regdumpp, PMCS_REG_DUMP_SIZE); 1543 pwp->regdumpp = NULL; 1544 } 1545 if (pwp->iqpt && pwp->iqpt->head) { 1546 kmem_free(pwp->iqpt->head, PMCS_IQP_TRACE_BUFFER_SIZE); 1547 pwp->iqpt->head = pwp->iqpt->curpos = NULL; 1548 } 1549 if (pwp->iqpt) { 1550 kmem_free(pwp->iqpt, sizeof (pmcs_iqp_trace_t)); 1551 pwp->iqpt = NULL; 1552 } 1553 1554 ddi_soft_state_free(pmcs_softc_state, ddi_get_instance(pwp->dip)); 1555 return (0); 1556 } 1557 1558 /* 1559 * quiesce (9E) entry point 1560 * 1561 * This function is called when the system is single-threaded at high PIL 1562 * with preemption disabled. Therefore, the function must not block/wait/sleep. 1563 * 1564 * Returns DDI_SUCCESS or DDI_FAILURE. 1565 * 1566 */ 1567 static int 1568 pmcs_quiesce(dev_info_t *dip) 1569 { 1570 pmcs_hw_t *pwp; 1571 scsi_hba_tran_t *tran; 1572 1573 if ((tran = ddi_get_driver_private(dip)) == NULL) 1574 return (DDI_SUCCESS); 1575 1576 /* No quiesce necessary on a per-iport basis */ 1577 if (scsi_hba_iport_unit_address(dip) != NULL) { 1578 return (DDI_SUCCESS); 1579 } 1580 1581 if ((pwp = TRAN2PMC(tran)) == NULL) 1582 return (DDI_SUCCESS); 1583 1584 /* Stop MPI & Reset chip (no need to re-initialize) */ 1585 (void) pmcs_stop_mpi(pwp); 1586 (void) pmcs_soft_reset(pwp, B_TRUE); 1587 1588 return (DDI_SUCCESS); 1589 } 1590 1591 /* 1592 * Called with xp->statlock and PHY lock and scratch acquired. 1593 */ 1594 static int 1595 pmcs_add_sata_device(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1596 { 1597 ata_identify_t *ati; 1598 int result, i; 1599 pmcs_phy_t *pptr; 1600 uint16_t *a; 1601 union { 1602 uint8_t nsa[8]; 1603 uint16_t nsb[4]; 1604 } u; 1605 1606 /* 1607 * Safe defaults - use only if this target is brand new (i.e. doesn't 1608 * already have these settings configured) 1609 */ 1610 if (xp->capacity == 0) { 1611 xp->capacity = (uint64_t)-1; 1612 xp->ca = 1; 1613 xp->qdepth = 1; 1614 xp->pio = 1; 1615 } 1616 1617 pptr = xp->phy; 1618 1619 /* 1620 * We only try and issue an IDENTIFY for first level 1621 * (direct attached) devices. We don't try and 1622 * set other quirks here (this will happen later, 1623 * if the device is fully configured) 1624 */ 1625 if (pptr->level) { 1626 return (0); 1627 } 1628 1629 mutex_exit(&xp->statlock); 1630 result = pmcs_sata_identify(pwp, pptr); 1631 mutex_enter(&xp->statlock); 1632 1633 if (result) { 1634 return (result); 1635 } 1636 ati = pwp->scratch; 1637 a = &ati->word108; 1638 for (i = 0; i < 4; i++) { 1639 u.nsb[i] = ddi_swap16(*a++); 1640 } 1641 1642 /* 1643 * Check the returned data for being a valid (NAA=5) WWN. 1644 * If so, use that and override the SAS address we were 1645 * given at Link Up time. 1646 */ 1647 if ((u.nsa[0] >> 4) == 5) { 1648 (void) memcpy(pptr->sas_address, u.nsa, 8); 1649 } 1650 pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: %s has SAS ADDRESS " SAS_ADDR_FMT, 1651 __func__, pptr->path, SAS_ADDR_PRT(pptr->sas_address)); 1652 return (0); 1653 } 1654 1655 /* 1656 * Called with PHY lock and target statlock held and scratch acquired 1657 */ 1658 static boolean_t 1659 pmcs_add_new_device(pmcs_hw_t *pwp, pmcs_xscsi_t *target) 1660 { 1661 ASSERT(target != NULL); 1662 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: target = 0x%p", 1663 __func__, (void *) target); 1664 1665 switch (target->phy->dtype) { 1666 case SATA: 1667 if (pmcs_add_sata_device(pwp, target) != 0) { 1668 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 1669 "%s: add_sata_device failed for tgt 0x%p", 1670 __func__, (void *) target); 1671 return (B_FALSE); 1672 } 1673 break; 1674 case SAS: 1675 target->qdepth = maxqdepth; 1676 break; 1677 case EXPANDER: 1678 target->qdepth = 1; 1679 break; 1680 } 1681 1682 target->new = 0; 1683 target->assigned = 1; 1684 target->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1685 target->dtype = target->phy->dtype; 1686 1687 /* 1688 * Set the PHY's config stop time to 0. This is one of the final 1689 * stops along the config path, so we're indicating that we 1690 * successfully configured the PHY. 1691 */ 1692 target->phy->config_stop = 0; 1693 1694 return (B_TRUE); 1695 } 1696 1697 1698 static void 1699 pmcs_rem_old_devices(pmcs_hw_t *pwp) 1700 { 1701 pmcs_xscsi_t *xp; 1702 int i; 1703 1704 mutex_enter(&pwp->lock); 1705 for (i = 0; i < pwp->max_dev; i++) { 1706 xp = pwp->targets[i]; 1707 if (xp == NULL) { 1708 continue; 1709 } 1710 mutex_exit(&pwp->lock); 1711 1712 mutex_enter(&xp->statlock); 1713 if (xp->dying && (xp->dip != NULL)) { 1714 pmcs_clear_xp(pwp, xp); 1715 /* Target is now gone */ 1716 } 1717 mutex_exit(&xp->statlock); 1718 mutex_enter(&pwp->lock); 1719 } 1720 mutex_exit(&pwp->lock); 1721 } 1722 1723 1724 void 1725 pmcs_worker(void *arg) 1726 { 1727 pmcs_hw_t *pwp = arg; 1728 ulong_t work_flags; 1729 1730 DTRACE_PROBE2(pmcs__worker, ulong_t, pwp->work_flags, boolean_t, 1731 pwp->config_changed); 1732 1733 if (pwp->state != STATE_RUNNING) { 1734 return; 1735 } 1736 1737 work_flags = atomic_swap_ulong(&pwp->work_flags, 0); 1738 1739 if (work_flags & PMCS_WORK_FLAG_SAS_HW_ACK) { 1740 pmcs_ack_events(pwp); 1741 } 1742 1743 if (work_flags & PMCS_WORK_FLAG_SPINUP_RELEASE) { 1744 mutex_enter(&pwp->lock); 1745 pmcs_spinup_release(pwp, NULL); 1746 mutex_exit(&pwp->lock); 1747 } 1748 1749 if (work_flags & PMCS_WORK_FLAG_SSP_EVT_RECOVERY) { 1750 pmcs_ssp_event_recovery(pwp); 1751 } 1752 1753 if (work_flags & PMCS_WORK_FLAG_DS_ERR_RECOVERY) { 1754 pmcs_dev_state_recovery(pwp, NULL); 1755 } 1756 1757 if (work_flags & PMCS_WORK_FLAG_REM_DEVICES) { 1758 pmcs_rem_old_devices(pwp); 1759 } 1760 1761 if (work_flags & PMCS_WORK_FLAG_DISCOVER) { 1762 pmcs_discover(pwp); 1763 } 1764 1765 if (work_flags & PMCS_WORK_FLAG_ABORT_HANDLE) { 1766 if (pmcs_abort_handler(pwp)) { 1767 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 1768 } 1769 } 1770 1771 if (work_flags & PMCS_WORK_FLAG_SATA_RUN) { 1772 pmcs_sata_work(pwp); 1773 } 1774 1775 if (work_flags & PMCS_WORK_FLAG_RUN_QUEUES) { 1776 pmcs_scsa_wq_run(pwp); 1777 mutex_enter(&pwp->lock); 1778 PMCS_CQ_RUN(pwp); 1779 mutex_exit(&pwp->lock); 1780 } 1781 1782 if (work_flags & PMCS_WORK_FLAG_ADD_DMA_CHUNKS) { 1783 if (pmcs_add_more_chunks(pwp, 1784 ptob(1) * PMCS_ADDTL_CHUNK_PAGES)) { 1785 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 1786 } else { 1787 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1788 } 1789 } 1790 } 1791 1792 static int 1793 pmcs_add_more_chunks(pmcs_hw_t *pwp, unsigned long nsize) 1794 { 1795 pmcs_dmachunk_t *dc; 1796 unsigned long dl; 1797 pmcs_chunk_t *pchunk = NULL; 1798 1799 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 1800 1801 pchunk = kmem_zalloc(sizeof (pmcs_chunk_t), KM_SLEEP); 1802 if (pchunk == NULL) { 1803 pmcs_prt(pwp, PMCS_PRT_DEBUG, 1804 "Not enough memory for DMA chunks"); 1805 return (-1); 1806 } 1807 1808 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pchunk->acc_handle, 1809 &pchunk->dma_handle, nsize, (caddr_t *)&pchunk->addrp, 1810 &pchunk->dma_addr) == B_FALSE) { 1811 pmcs_prt(pwp, PMCS_PRT_DEBUG, "Failed to setup DMA for chunks"); 1812 kmem_free(pchunk, sizeof (pmcs_chunk_t)); 1813 return (-1); 1814 } 1815 1816 if ((pmcs_check_acc_handle(pchunk->acc_handle) != DDI_SUCCESS) || 1817 (pmcs_check_dma_handle(pchunk->dma_handle) != DDI_SUCCESS)) { 1818 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1819 return (-1); 1820 } 1821 1822 bzero(pchunk->addrp, nsize); 1823 dc = NULL; 1824 for (dl = 0; dl < (nsize / PMCS_SGL_CHUNKSZ); dl++) { 1825 pmcs_dmachunk_t *tmp; 1826 tmp = kmem_alloc(sizeof (pmcs_dmachunk_t), KM_SLEEP); 1827 tmp->nxt = dc; 1828 dc = tmp; 1829 } 1830 mutex_enter(&pwp->dma_lock); 1831 pmcs_idma_chunks(pwp, dc, pchunk, nsize); 1832 pwp->nchunks++; 1833 mutex_exit(&pwp->dma_lock); 1834 return (0); 1835 } 1836 1837 1838 static void 1839 pmcs_check_commands(pmcs_hw_t *pwp) 1840 { 1841 pmcs_cmd_t *sp; 1842 size_t amt; 1843 char path[32]; 1844 pmcwork_t *pwrk; 1845 pmcs_xscsi_t *target; 1846 pmcs_phy_t *phyp; 1847 1848 for (pwrk = pwp->work; pwrk < &pwp->work[pwp->max_cmd]; pwrk++) { 1849 mutex_enter(&pwrk->lock); 1850 1851 /* 1852 * If the command isn't active, we can't be timing it still. 1853 * Active means the tag is not free and the state is "on chip". 1854 */ 1855 if (!PMCS_COMMAND_ACTIVE(pwrk)) { 1856 mutex_exit(&pwrk->lock); 1857 continue; 1858 } 1859 1860 /* 1861 * No timer active for this command. 1862 */ 1863 if (pwrk->timer == 0) { 1864 mutex_exit(&pwrk->lock); 1865 continue; 1866 } 1867 1868 /* 1869 * Knock off bits for the time interval. 1870 */ 1871 if (pwrk->timer >= US2WT(PMCS_WATCH_INTERVAL)) { 1872 pwrk->timer -= US2WT(PMCS_WATCH_INTERVAL); 1873 } else { 1874 pwrk->timer = 0; 1875 } 1876 if (pwrk->timer > 0) { 1877 mutex_exit(&pwrk->lock); 1878 continue; 1879 } 1880 1881 /* 1882 * The command has now officially timed out. 1883 * Get the path for it. If it doesn't have 1884 * a phy pointer any more, it's really dead 1885 * and can just be put back on the free list. 1886 * There should *not* be any commands associated 1887 * with it any more. 1888 */ 1889 if (pwrk->phy == NULL) { 1890 pmcs_prt(pwp, PMCS_PRT_DEBUG, 1891 "dead command with gone phy being recycled"); 1892 ASSERT(pwrk->xp == NULL); 1893 pmcs_pwork(pwp, pwrk); 1894 continue; 1895 } 1896 amt = sizeof (path); 1897 amt = min(sizeof (pwrk->phy->path), amt); 1898 (void) memcpy(path, pwrk->phy->path, amt); 1899 1900 /* 1901 * If this is a non-SCSA command, stop here. Eventually 1902 * we might do something with non-SCSA commands here- 1903 * but so far their timeout mechanisms are handled in 1904 * the WAIT_FOR macro. 1905 */ 1906 if (pwrk->xp == NULL) { 1907 pmcs_prt(pwp, PMCS_PRT_DEBUG, 1908 "%s: non-SCSA cmd tag 0x%x timed out", 1909 path, pwrk->htag); 1910 mutex_exit(&pwrk->lock); 1911 continue; 1912 } 1913 1914 sp = pwrk->arg; 1915 ASSERT(sp != NULL); 1916 1917 /* 1918 * Mark it as timed out. 1919 */ 1920 CMD2PKT(sp)->pkt_reason = CMD_TIMEOUT; 1921 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 1922 #ifdef DEBUG 1923 pmcs_prt(pwp, PMCS_PRT_DEBUG, 1924 "%s: SCSA cmd tag 0x%x timed out (state %x) onwire=%d", 1925 path, pwrk->htag, pwrk->state, pwrk->onwire); 1926 #else 1927 pmcs_prt(pwp, PMCS_PRT_DEBUG, 1928 "%s: SCSA cmd tag 0x%x timed out (state %x)", 1929 path, pwrk->htag, pwrk->state); 1930 #endif 1931 /* 1932 * Mark the work structure as timed out. 1933 */ 1934 pwrk->state = PMCS_WORK_STATE_TIMED_OUT; 1935 phyp = pwrk->phy; 1936 target = pwrk->xp; 1937 mutex_exit(&pwrk->lock); 1938 1939 pmcs_lock_phy(phyp); 1940 mutex_enter(&target->statlock); 1941 1942 /* 1943 * No point attempting recovery if the device is gone 1944 */ 1945 if (pwrk->xp->dev_gone) { 1946 mutex_exit(&target->statlock); 1947 pmcs_unlock_phy(phyp); 1948 pmcs_prt(pwp, PMCS_PRT_DEBUG, 1949 "%s: tgt(0x%p) is gone. Returning CMD_DEV_GONE " 1950 "for htag 0x%08x", __func__, 1951 (void *)pwrk->xp, pwrk->htag); 1952 mutex_enter(&pwrk->lock); 1953 if (!PMCS_COMMAND_DONE(pwrk)) { 1954 /* Complete this command here */ 1955 pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: " 1956 "Completing cmd (htag 0x%08x) " 1957 "anyway", __func__, pwrk->htag); 1958 pwrk->dead = 1; 1959 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 1960 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 1961 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 1962 } else { 1963 mutex_exit(&pwrk->lock); 1964 } 1965 continue; 1966 } 1967 1968 /* 1969 * See if we're already waiting for device state recovery 1970 */ 1971 if (target->recover_wait) { 1972 pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, 1973 "%s: Target %p already in recovery", __func__, 1974 (void *)target); 1975 mutex_exit(&target->statlock); 1976 pmcs_unlock_phy(phyp); 1977 continue; 1978 } 1979 1980 pmcs_start_dev_state_recovery(target, phyp); 1981 mutex_exit(&target->statlock); 1982 pmcs_unlock_phy(phyp); 1983 } 1984 /* 1985 * Run any completions that may have been queued up. 1986 */ 1987 PMCS_CQ_RUN(pwp); 1988 } 1989 1990 static void 1991 pmcs_watchdog(void *arg) 1992 { 1993 pmcs_hw_t *pwp = arg; 1994 1995 DTRACE_PROBE2(pmcs__watchdog, ulong_t, pwp->work_flags, boolean_t, 1996 pwp->config_changed); 1997 1998 mutex_enter(&pwp->lock); 1999 2000 if (pwp->state != STATE_RUNNING) { 2001 mutex_exit(&pwp->lock); 2002 return; 2003 } 2004 2005 if (atomic_cas_ulong(&pwp->work_flags, 0, 0) != 0) { 2006 if (ddi_taskq_dispatch(pwp->tq, pmcs_worker, pwp, 2007 DDI_NOSLEEP) != DDI_SUCCESS) { 2008 pmcs_prt(pwp, PMCS_PRT_DEBUG, 2009 "Could not dispatch to worker thread"); 2010 } 2011 } 2012 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 2013 drv_usectohz(PMCS_WATCH_INTERVAL)); 2014 mutex_exit(&pwp->lock); 2015 pmcs_check_commands(pwp); 2016 pmcs_handle_dead_phys(pwp); 2017 } 2018 2019 static int 2020 pmcs_remove_ihandlers(pmcs_hw_t *pwp, int icnt) 2021 { 2022 int i, r, rslt = 0; 2023 for (i = 0; i < icnt; i++) { 2024 r = ddi_intr_remove_handler(pwp->ih_table[i]); 2025 if (r == DDI_SUCCESS) { 2026 continue; 2027 } 2028 pmcs_prt(pwp, PMCS_PRT_DEBUG, 2029 "%s: unable to remove interrupt handler %d", __func__, i); 2030 rslt = -1; 2031 break; 2032 } 2033 return (rslt); 2034 } 2035 2036 static int 2037 pmcs_disable_intrs(pmcs_hw_t *pwp, int icnt) 2038 { 2039 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2040 int r = ddi_intr_block_disable(&pwp->ih_table[0], 2041 pwp->intr_cnt); 2042 if (r != DDI_SUCCESS) { 2043 pmcs_prt(pwp, PMCS_PRT_DEBUG, 2044 "unable to disable interrupt block"); 2045 return (-1); 2046 } 2047 } else { 2048 int i; 2049 for (i = 0; i < icnt; i++) { 2050 if (ddi_intr_disable(pwp->ih_table[i]) == DDI_SUCCESS) { 2051 continue; 2052 } 2053 pmcs_prt(pwp, PMCS_PRT_DEBUG, 2054 "unable to disable interrupt %d", i); 2055 return (-1); 2056 } 2057 } 2058 return (0); 2059 } 2060 2061 static int 2062 pmcs_free_intrs(pmcs_hw_t *pwp, int icnt) 2063 { 2064 int i; 2065 for (i = 0; i < icnt; i++) { 2066 if (ddi_intr_free(pwp->ih_table[i]) == DDI_SUCCESS) { 2067 continue; 2068 } 2069 pmcs_prt(pwp, PMCS_PRT_DEBUG, "unable to free interrupt %d", i); 2070 return (-1); 2071 } 2072 kmem_free(pwp->ih_table, pwp->ih_table_size); 2073 pwp->ih_table_size = 0; 2074 return (0); 2075 } 2076 2077 /* 2078 * Try to set up interrupts of type "type" with a minimum number of interrupts 2079 * of "min". 2080 */ 2081 static void 2082 pmcs_setup_intr_impl(pmcs_hw_t *pwp, int type, int min) 2083 { 2084 int rval, avail, count, actual, max; 2085 2086 rval = ddi_intr_get_nintrs(pwp->dip, type, &count); 2087 if ((rval != DDI_SUCCESS) || (count < min)) { 2088 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 2089 "%s: get_nintrs failed; type: %d rc: %d count: %d min: %d", 2090 __func__, type, rval, count, min); 2091 return; 2092 } 2093 2094 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 2095 "%s: nintrs = %d for type: %d", __func__, count, type); 2096 2097 rval = ddi_intr_get_navail(pwp->dip, type, &avail); 2098 if ((rval != DDI_SUCCESS) || (avail < min)) { 2099 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 2100 "%s: get_navail failed; type: %d rc: %d avail: %d min: %d", 2101 __func__, type, rval, avail, min); 2102 return; 2103 } 2104 2105 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 2106 "%s: navail = %d for type: %d", __func__, avail, type); 2107 2108 pwp->ih_table_size = avail * sizeof (ddi_intr_handle_t); 2109 pwp->ih_table = kmem_alloc(pwp->ih_table_size, KM_SLEEP); 2110 2111 switch (type) { 2112 case DDI_INTR_TYPE_MSIX: 2113 pwp->int_type = PMCS_INT_MSIX; 2114 max = PMCS_MAX_MSIX; 2115 break; 2116 case DDI_INTR_TYPE_MSI: 2117 pwp->int_type = PMCS_INT_MSI; 2118 max = PMCS_MAX_MSI; 2119 break; 2120 case DDI_INTR_TYPE_FIXED: 2121 default: 2122 pwp->int_type = PMCS_INT_FIXED; 2123 max = PMCS_MAX_FIXED; 2124 break; 2125 } 2126 2127 rval = ddi_intr_alloc(pwp->dip, pwp->ih_table, type, 0, max, &actual, 2128 DDI_INTR_ALLOC_NORMAL); 2129 if (rval != DDI_SUCCESS) { 2130 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 2131 "%s: ddi_intr_alloc failed; type: %d rc: %d", 2132 __func__, type, rval); 2133 kmem_free(pwp->ih_table, pwp->ih_table_size); 2134 pwp->ih_table = NULL; 2135 pwp->ih_table_size = 0; 2136 pwp->intr_cnt = 0; 2137 pwp->int_type = PMCS_INT_NONE; 2138 return; 2139 } 2140 2141 pwp->intr_cnt = actual; 2142 } 2143 2144 /* 2145 * Set up interrupts. 2146 * We return one of three values: 2147 * 2148 * 0 - success 2149 * EAGAIN - failure to set up interrupts 2150 * EIO - "" + we're now stuck partly enabled 2151 * 2152 * If EIO is returned, we can't unload the driver. 2153 */ 2154 static int 2155 pmcs_setup_intr(pmcs_hw_t *pwp) 2156 { 2157 int i, r, itypes, oqv_count; 2158 ddi_intr_handler_t **iv_table; 2159 size_t iv_table_size; 2160 uint_t pri; 2161 2162 if (ddi_intr_get_supported_types(pwp->dip, &itypes) != DDI_SUCCESS) { 2163 pmcs_prt(pwp, PMCS_PRT_DEBUG, "cannot get interrupt types"); 2164 return (EAGAIN); 2165 } 2166 2167 if (disable_msix) { 2168 itypes &= ~DDI_INTR_TYPE_MSIX; 2169 } 2170 if (disable_msi) { 2171 itypes &= ~DDI_INTR_TYPE_MSI; 2172 } 2173 2174 /* 2175 * We won't know what firmware we're running until we call pmcs_setup, 2176 * and we can't call pmcs_setup until we establish interrupts. 2177 */ 2178 2179 pwp->int_type = PMCS_INT_NONE; 2180 2181 /* 2182 * We want PMCS_MAX_MSIX vectors for MSI-X. Anything less would be 2183 * uncivilized. 2184 */ 2185 if (itypes & DDI_INTR_TYPE_MSIX) { 2186 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSIX, PMCS_MAX_MSIX); 2187 if (pwp->int_type == PMCS_INT_MSIX) { 2188 itypes = 0; 2189 } 2190 } 2191 2192 if (itypes & DDI_INTR_TYPE_MSI) { 2193 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSI, 1); 2194 if (pwp->int_type == PMCS_INT_MSI) { 2195 itypes = 0; 2196 } 2197 } 2198 2199 if (itypes & DDI_INTR_TYPE_FIXED) { 2200 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_FIXED, 1); 2201 if (pwp->int_type == PMCS_INT_FIXED) { 2202 itypes = 0; 2203 } 2204 } 2205 2206 if (pwp->intr_cnt == 0) { 2207 pmcs_prt(pwp, PMCS_PRT_ERR, "No interrupts available"); 2208 return (EAGAIN); 2209 } 2210 2211 iv_table_size = sizeof (ddi_intr_handler_t *) * pwp->intr_cnt; 2212 iv_table = kmem_alloc(iv_table_size, KM_SLEEP); 2213 2214 /* 2215 * Get iblock cookie and add handlers. 2216 */ 2217 switch (pwp->intr_cnt) { 2218 case 1: 2219 iv_table[0] = pmcs_all_intr; 2220 break; 2221 case 2: 2222 iv_table[0] = pmcs_iodone_ix; 2223 iv_table[1] = pmcs_nonio_ix; 2224 break; 2225 case 4: 2226 iv_table[PMCS_MSIX_GENERAL] = pmcs_general_ix; 2227 iv_table[PMCS_MSIX_IODONE] = pmcs_iodone_ix; 2228 iv_table[PMCS_MSIX_EVENTS] = pmcs_event_ix; 2229 iv_table[PMCS_MSIX_FATAL] = pmcs_fatal_ix; 2230 break; 2231 default: 2232 pmcs_prt(pwp, PMCS_PRT_DEBUG, 2233 "%s: intr_cnt = %d - unexpected", __func__, pwp->intr_cnt); 2234 kmem_free(iv_table, iv_table_size); 2235 return (EAGAIN); 2236 } 2237 2238 for (i = 0; i < pwp->intr_cnt; i++) { 2239 r = ddi_intr_add_handler(pwp->ih_table[i], iv_table[i], 2240 (caddr_t)pwp, NULL); 2241 if (r != DDI_SUCCESS) { 2242 kmem_free(iv_table, iv_table_size); 2243 if (pmcs_remove_ihandlers(pwp, i)) { 2244 return (EIO); 2245 } 2246 if (pmcs_free_intrs(pwp, i)) { 2247 return (EIO); 2248 } 2249 pwp->intr_cnt = 0; 2250 return (EAGAIN); 2251 } 2252 } 2253 2254 kmem_free(iv_table, iv_table_size); 2255 2256 if (ddi_intr_get_cap(pwp->ih_table[0], &pwp->intr_cap) != DDI_SUCCESS) { 2257 pmcs_prt(pwp, PMCS_PRT_DEBUG, "unable to get int capabilities"); 2258 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2259 return (EIO); 2260 } 2261 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2262 return (EIO); 2263 } 2264 pwp->intr_cnt = 0; 2265 return (EAGAIN); 2266 } 2267 2268 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2269 r = ddi_intr_block_enable(&pwp->ih_table[0], pwp->intr_cnt); 2270 if (r != DDI_SUCCESS) { 2271 pmcs_prt(pwp, PMCS_PRT_DEBUG, "intr blk enable failed"); 2272 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2273 return (EIO); 2274 } 2275 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2276 return (EIO); 2277 } 2278 pwp->intr_cnt = 0; 2279 return (EFAULT); 2280 } 2281 } else { 2282 for (i = 0; i < pwp->intr_cnt; i++) { 2283 r = ddi_intr_enable(pwp->ih_table[i]); 2284 if (r == DDI_SUCCESS) { 2285 continue; 2286 } 2287 pmcs_prt(pwp, PMCS_PRT_DEBUG, 2288 "unable to enable interrupt %d", i); 2289 if (pmcs_disable_intrs(pwp, i)) { 2290 return (EIO); 2291 } 2292 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2293 return (EIO); 2294 } 2295 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2296 return (EIO); 2297 } 2298 pwp->intr_cnt = 0; 2299 return (EAGAIN); 2300 } 2301 } 2302 2303 /* 2304 * Set up locks. 2305 */ 2306 if (ddi_intr_get_pri(pwp->ih_table[0], &pri) != DDI_SUCCESS) { 2307 pmcs_prt(pwp, PMCS_PRT_DEBUG, 2308 "unable to get interrupt priority"); 2309 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2310 return (EIO); 2311 } 2312 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2313 return (EIO); 2314 } 2315 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2316 return (EIO); 2317 } 2318 pwp->intr_cnt = 0; 2319 return (EAGAIN); 2320 } 2321 2322 pwp->locks_initted = 1; 2323 pwp->intr_pri = pri; 2324 mutex_init(&pwp->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2325 mutex_init(&pwp->dma_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2326 mutex_init(&pwp->axil_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2327 mutex_init(&pwp->cq_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2328 mutex_init(&pwp->ict_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2329 mutex_init(&pwp->config_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2330 mutex_init(&pwp->wfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2331 mutex_init(&pwp->pfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2332 mutex_init(&pwp->dead_phylist_lock, NULL, MUTEX_DRIVER, 2333 DDI_INTR_PRI(pri)); 2334 #ifdef DEBUG 2335 mutex_init(&pwp->dbglock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2336 #endif 2337 cv_init(&pwp->ict_cv, NULL, CV_DRIVER, NULL); 2338 cv_init(&pwp->drain_cv, NULL, CV_DRIVER, NULL); 2339 for (i = 0; i < PMCS_NIQ; i++) { 2340 mutex_init(&pwp->iqp_lock[i], NULL, 2341 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2342 } 2343 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 2344 mutex_init(&pwp->cq_info.cq_thr_info[i].cq_thr_lock, NULL, 2345 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2346 cv_init(&pwp->cq_info.cq_thr_info[i].cq_cv, NULL, 2347 CV_DRIVER, NULL); 2348 } 2349 2350 pmcs_prt(pwp, PMCS_PRT_INFO, "%d %s interrup%s configured", 2351 pwp->intr_cnt, (pwp->int_type == PMCS_INT_MSIX)? "MSI-X" : 2352 ((pwp->int_type == PMCS_INT_MSI)? "MSI" : "INT-X"), 2353 pwp->intr_cnt == 1? "t" : "ts"); 2354 2355 2356 /* 2357 * Enable Interrupts 2358 */ 2359 if (pwp->intr_cnt > PMCS_NOQ) { 2360 oqv_count = pwp->intr_cnt; 2361 } else { 2362 oqv_count = PMCS_NOQ; 2363 } 2364 for (pri = 0xffffffff, i = 0; i < oqv_count; i++) { 2365 pri ^= (1 << i); 2366 } 2367 2368 mutex_enter(&pwp->lock); 2369 pwp->intr_mask = pri; 2370 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask); 2371 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 2372 mutex_exit(&pwp->lock); 2373 2374 return (0); 2375 } 2376 2377 static int 2378 pmcs_teardown_intr(pmcs_hw_t *pwp) 2379 { 2380 if (pwp->intr_cnt) { 2381 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2382 return (EIO); 2383 } 2384 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2385 return (EIO); 2386 } 2387 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2388 return (EIO); 2389 } 2390 pwp->intr_cnt = 0; 2391 } 2392 return (0); 2393 } 2394 2395 static uint_t 2396 pmcs_general_ix(caddr_t arg1, caddr_t arg2) 2397 { 2398 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2399 _NOTE(ARGUNUSED(arg2)); 2400 pmcs_general_intr(pwp); 2401 return (DDI_INTR_CLAIMED); 2402 } 2403 2404 static uint_t 2405 pmcs_event_ix(caddr_t arg1, caddr_t arg2) 2406 { 2407 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2408 _NOTE(ARGUNUSED(arg2)); 2409 pmcs_event_intr(pwp); 2410 return (DDI_INTR_CLAIMED); 2411 } 2412 2413 static uint_t 2414 pmcs_iodone_ix(caddr_t arg1, caddr_t arg2) 2415 { 2416 _NOTE(ARGUNUSED(arg2)); 2417 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2418 2419 /* 2420 * It's possible that if we just turned interrupt coalescing off 2421 * (and thus, re-enabled auto clear for interrupts on the I/O outbound 2422 * queue) that there was an interrupt already pending. We use 2423 * io_intr_coal.int_cleared to ensure that we still drop in here and 2424 * clear the appropriate interrupt bit one last time. 2425 */ 2426 mutex_enter(&pwp->ict_lock); 2427 if (pwp->io_intr_coal.timer_on || 2428 (pwp->io_intr_coal.int_cleared == B_FALSE)) { 2429 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2430 (1 << PMCS_OQ_IODONE)); 2431 pwp->io_intr_coal.int_cleared = B_TRUE; 2432 } 2433 mutex_exit(&pwp->ict_lock); 2434 2435 pmcs_iodone_intr(pwp); 2436 2437 return (DDI_INTR_CLAIMED); 2438 } 2439 2440 static uint_t 2441 pmcs_fatal_ix(caddr_t arg1, caddr_t arg2) 2442 { 2443 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2444 _NOTE(ARGUNUSED(arg2)); 2445 pmcs_fatal_handler(pwp); 2446 return (DDI_INTR_CLAIMED); 2447 } 2448 2449 static uint_t 2450 pmcs_nonio_ix(caddr_t arg1, caddr_t arg2) 2451 { 2452 _NOTE(ARGUNUSED(arg2)); 2453 pmcs_hw_t *pwp = (void *)arg1; 2454 uint32_t obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2455 2456 /* 2457 * Check for Fatal Interrupts 2458 */ 2459 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2460 pmcs_fatal_handler(pwp); 2461 return (DDI_INTR_CLAIMED); 2462 } 2463 2464 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2465 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2466 (1 << PMCS_OQ_GENERAL)); 2467 pmcs_general_intr(pwp); 2468 pmcs_event_intr(pwp); 2469 } 2470 2471 return (DDI_INTR_CLAIMED); 2472 } 2473 2474 static uint_t 2475 pmcs_all_intr(caddr_t arg1, caddr_t arg2) 2476 { 2477 _NOTE(ARGUNUSED(arg2)); 2478 pmcs_hw_t *pwp = (void *) arg1; 2479 uint32_t obdb; 2480 int handled = 0; 2481 2482 obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2483 2484 /* 2485 * Check for Fatal Interrupts 2486 */ 2487 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2488 pmcs_fatal_handler(pwp); 2489 return (DDI_INTR_CLAIMED); 2490 } 2491 2492 /* 2493 * Check for Outbound Queue service needed 2494 */ 2495 if (obdb & (1 << PMCS_OQ_IODONE)) { 2496 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2497 (1 << PMCS_OQ_IODONE)); 2498 obdb ^= (1 << PMCS_OQ_IODONE); 2499 handled++; 2500 pmcs_iodone_intr(pwp); 2501 } 2502 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2503 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2504 (1 << PMCS_OQ_GENERAL)); 2505 obdb ^= (1 << PMCS_OQ_GENERAL); 2506 handled++; 2507 pmcs_general_intr(pwp); 2508 } 2509 if (obdb & (1 << PMCS_OQ_EVENTS)) { 2510 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2511 (1 << PMCS_OQ_EVENTS)); 2512 obdb ^= (1 << PMCS_OQ_EVENTS); 2513 handled++; 2514 pmcs_event_intr(pwp); 2515 } 2516 if (obdb) { 2517 pmcs_prt(pwp, PMCS_PRT_DEBUG, 2518 "interrupt bits not handled (0x%x)", obdb); 2519 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, obdb); 2520 handled++; 2521 } 2522 if (pwp->int_type == PMCS_INT_MSI) { 2523 handled++; 2524 } 2525 return (handled? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 2526 } 2527 2528 void 2529 pmcs_fatal_handler(pmcs_hw_t *pwp) 2530 { 2531 pmcs_prt(pwp, PMCS_PRT_ERR, "Fatal Interrupt caught"); 2532 mutex_enter(&pwp->lock); 2533 pwp->state = STATE_DEAD; 2534 pmcs_register_dump_int(pwp); 2535 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 2536 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 2537 mutex_exit(&pwp->lock); 2538 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE); 2539 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 2540 2541 #ifdef DEBUG 2542 cmn_err(CE_PANIC, "PMCS Fatal Firmware Error"); 2543 #endif 2544 } 2545 2546 /* 2547 * Called with PHY lock and target statlock held and scratch acquired. 2548 */ 2549 boolean_t 2550 pmcs_assign_device(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt) 2551 { 2552 pmcs_phy_t *pptr = tgt->phy; 2553 2554 switch (pptr->dtype) { 2555 case SAS: 2556 case EXPANDER: 2557 break; 2558 case SATA: 2559 tgt->ca = 1; 2560 break; 2561 default: 2562 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 2563 "%s: Target %p has PHY %p with invalid dtype", 2564 __func__, (void *)tgt, (void *)pptr); 2565 return (B_FALSE); 2566 } 2567 2568 tgt->new = 1; 2569 tgt->dev_gone = 0; 2570 tgt->dying = 0; 2571 tgt->recover_wait = 0; 2572 2573 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 2574 "%s: config %s vtgt %u for " SAS_ADDR_FMT, __func__, 2575 pptr->path, tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2576 2577 if (pmcs_add_new_device(pwp, tgt) != B_TRUE) { 2578 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 2579 "%s: Failed for vtgt %u / WWN " SAS_ADDR_FMT, __func__, 2580 tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2581 mutex_destroy(&tgt->statlock); 2582 mutex_destroy(&tgt->wqlock); 2583 mutex_destroy(&tgt->aqlock); 2584 return (B_FALSE); 2585 } 2586 2587 return (B_TRUE); 2588 } 2589 2590 /* 2591 * Called with softstate lock held 2592 */ 2593 void 2594 pmcs_remove_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2595 { 2596 pmcs_xscsi_t *xp; 2597 unsigned int vtgt; 2598 2599 ASSERT(mutex_owned(&pwp->lock)); 2600 2601 for (vtgt = 0; vtgt < pwp->max_dev; vtgt++) { 2602 xp = pwp->targets[vtgt]; 2603 if (xp == NULL) { 2604 continue; 2605 } 2606 2607 mutex_enter(&xp->statlock); 2608 if (xp->phy == pptr) { 2609 if (xp->new) { 2610 xp->new = 0; 2611 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 2612 "cancel config of vtgt %u", vtgt); 2613 } else { 2614 xp->assigned = 0; 2615 xp->dying = 1; 2616 SCHEDULE_WORK(pwp, PMCS_WORK_REM_DEVICES); 2617 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 2618 "Scheduling removal of tgt 0x%p vtgt %u", 2619 (void *)xp, vtgt); 2620 } 2621 mutex_exit(&xp->statlock); 2622 break; 2623 } 2624 mutex_exit(&xp->statlock); 2625 } 2626 } 2627 2628 void 2629 pmcs_prt_impl(pmcs_hw_t *pwp, pmcs_prt_level_t level, const char *fmt, ...) 2630 { 2631 va_list ap; 2632 int written = 0; 2633 char *ptr; 2634 uint32_t elem_size = PMCS_TBUF_ELEM_SIZE - 1; 2635 boolean_t system_log; 2636 int system_log_level; 2637 2638 switch (level) { 2639 case PMCS_PRT_DEBUG_DEVEL: 2640 case PMCS_PRT_DEBUG_DEV_STATE: 2641 case PMCS_PRT_DEBUG_PHY_LOCKING: 2642 case PMCS_PRT_DEBUG_SCSI_STATUS: 2643 case PMCS_PRT_DEBUG_UNDERFLOW: 2644 case PMCS_PRT_DEBUG_CONFIG: 2645 case PMCS_PRT_DEBUG_IPORT: 2646 case PMCS_PRT_DEBUG_MAP: 2647 case PMCS_PRT_DEBUG3: 2648 case PMCS_PRT_DEBUG2: 2649 case PMCS_PRT_DEBUG1: 2650 case PMCS_PRT_DEBUG: 2651 system_log = B_FALSE; 2652 break; 2653 case PMCS_PRT_INFO: 2654 system_log = B_TRUE; 2655 system_log_level = CE_CONT; 2656 break; 2657 case PMCS_PRT_WARN: 2658 system_log = B_TRUE; 2659 system_log_level = CE_NOTE; 2660 break; 2661 case PMCS_PRT_ERR: 2662 system_log = B_TRUE; 2663 system_log_level = CE_WARN; 2664 break; 2665 default: 2666 return; 2667 } 2668 2669 mutex_enter(&pmcs_trace_lock); 2670 gethrestime(&pmcs_tbuf_ptr->timestamp); 2671 ptr = pmcs_tbuf_ptr->buf; 2672 written += snprintf(ptr, elem_size, "pmcs%d:%d: ", 2673 ddi_get_instance(pwp->dip), level); 2674 ptr += strlen(ptr); 2675 va_start(ap, fmt); 2676 written += vsnprintf(ptr, elem_size - written, fmt, ap); 2677 va_end(ap); 2678 if (written > elem_size - 1) { 2679 /* Indicate truncation */ 2680 pmcs_tbuf_ptr->buf[elem_size - 1] = '+'; 2681 } 2682 if (++pmcs_tbuf_idx == pmcs_tbuf_num_elems) { 2683 pmcs_tbuf_ptr = pmcs_tbuf; 2684 pmcs_tbuf_wrap = B_TRUE; 2685 pmcs_tbuf_idx = 0; 2686 } else { 2687 ++pmcs_tbuf_ptr; 2688 } 2689 mutex_exit(&pmcs_trace_lock); 2690 2691 /* 2692 * When pmcs_force_syslog in non-zero, everything goes also 2693 * to syslog, at CE_CONT level. 2694 */ 2695 if (pmcs_force_syslog) { 2696 system_log = B_TRUE; 2697 system_log_level = CE_CONT; 2698 } 2699 2700 /* 2701 * Anything that comes in with PMCS_PRT_INFO, WARN, or ERR also 2702 * goes to syslog. 2703 */ 2704 if (system_log) { 2705 char local[196]; 2706 2707 switch (system_log_level) { 2708 case CE_CONT: 2709 (void) snprintf(local, sizeof (local), "%sINFO: ", 2710 pmcs_console ? "" : "?"); 2711 break; 2712 case CE_NOTE: 2713 case CE_WARN: 2714 local[0] = 0; 2715 break; 2716 default: 2717 return; 2718 } 2719 2720 ptr = local; 2721 ptr += strlen(local); 2722 (void) snprintf(ptr, (sizeof (local)) - 2723 ((size_t)ptr - (size_t)local), "pmcs%d: ", 2724 ddi_get_instance(pwp->dip)); 2725 ptr += strlen(ptr); 2726 va_start(ap, fmt); 2727 (void) vsnprintf(ptr, 2728 (sizeof (local)) - ((size_t)ptr - (size_t)local), fmt, ap); 2729 va_end(ap); 2730 if (level == CE_CONT) { 2731 (void) strlcat(local, "\n", sizeof (local)); 2732 } 2733 cmn_err(system_log_level, local); 2734 } 2735 2736 } 2737 2738 /* 2739 * pmcs_acquire_scratch 2740 * 2741 * If "wait" is true, the caller will wait until it can acquire the scratch. 2742 * This implies the caller needs to be in a context where spinning for an 2743 * indeterminate amount of time is acceptable. 2744 */ 2745 int 2746 pmcs_acquire_scratch(pmcs_hw_t *pwp, boolean_t wait) 2747 { 2748 int rval; 2749 2750 if (!wait) { 2751 return (atomic_swap_8(&pwp->scratch_locked, 1)); 2752 } 2753 2754 /* 2755 * Caller will wait for scratch. 2756 */ 2757 while ((rval = atomic_swap_8(&pwp->scratch_locked, 1)) != 0) { 2758 drv_usecwait(100); 2759 } 2760 2761 return (rval); 2762 } 2763 2764 void 2765 pmcs_release_scratch(pmcs_hw_t *pwp) 2766 { 2767 pwp->scratch_locked = 0; 2768 } 2769 2770 static void 2771 pmcs_create_phy_stats(pmcs_iport_t *iport) 2772 { 2773 sas_phy_stats_t *ps; 2774 pmcs_hw_t *pwp; 2775 pmcs_phy_t *phyp; 2776 int ndata; 2777 char ks_name[KSTAT_STRLEN]; 2778 2779 ASSERT(iport != NULL); 2780 pwp = iport->pwp; 2781 ASSERT(pwp != NULL); 2782 2783 mutex_enter(&iport->lock); 2784 2785 for (phyp = list_head(&iport->phys); 2786 phyp != NULL; 2787 phyp = list_next(&iport->phys, phyp)) { 2788 2789 pmcs_lock_phy(phyp); 2790 2791 if (phyp->phy_stats != NULL) { 2792 pmcs_unlock_phy(phyp); 2793 /* We've already created this kstat instance */ 2794 continue; 2795 } 2796 2797 ndata = (sizeof (sas_phy_stats_t)/sizeof (kstat_named_t)); 2798 2799 (void) snprintf(ks_name, sizeof (ks_name), 2800 "%s.%llx.%d.%d", ddi_driver_name(iport->dip), 2801 (longlong_t)pwp->sas_wwns[0], 2802 ddi_get_instance(iport->dip), phyp->phynum); 2803 2804 phyp->phy_stats = kstat_create("pmcs", 2805 ddi_get_instance(iport->dip), ks_name, KSTAT_SAS_PHY_CLASS, 2806 KSTAT_TYPE_NAMED, ndata, 0); 2807 2808 if (phyp->phy_stats == NULL) { 2809 pmcs_unlock_phy(phyp); 2810 pmcs_prt(pwp, PMCS_PRT_DEBUG, 2811 "%s: Failed to create %s kstats", __func__, 2812 ks_name); 2813 continue; 2814 } 2815 2816 ps = (sas_phy_stats_t *)phyp->phy_stats->ks_data; 2817 2818 kstat_named_init(&ps->seconds_since_last_reset, 2819 "SecondsSinceLastReset", KSTAT_DATA_ULONGLONG); 2820 kstat_named_init(&ps->tx_frames, 2821 "TxFrames", KSTAT_DATA_ULONGLONG); 2822 kstat_named_init(&ps->rx_frames, 2823 "RxFrames", KSTAT_DATA_ULONGLONG); 2824 kstat_named_init(&ps->tx_words, 2825 "TxWords", KSTAT_DATA_ULONGLONG); 2826 kstat_named_init(&ps->rx_words, 2827 "RxWords", KSTAT_DATA_ULONGLONG); 2828 kstat_named_init(&ps->invalid_dword_count, 2829 "InvalidDwordCount", KSTAT_DATA_ULONGLONG); 2830 kstat_named_init(&ps->running_disparity_error_count, 2831 "RunningDisparityErrorCount", KSTAT_DATA_ULONGLONG); 2832 kstat_named_init(&ps->loss_of_dword_sync_count, 2833 "LossofDwordSyncCount", KSTAT_DATA_ULONGLONG); 2834 kstat_named_init(&ps->phy_reset_problem_count, 2835 "PhyResetProblemCount", KSTAT_DATA_ULONGLONG); 2836 2837 phyp->phy_stats->ks_private = phyp; 2838 phyp->phy_stats->ks_update = pmcs_update_phy_stats; 2839 kstat_install(phyp->phy_stats); 2840 pmcs_unlock_phy(phyp); 2841 } 2842 2843 mutex_exit(&iport->lock); 2844 } 2845 2846 int 2847 pmcs_update_phy_stats(kstat_t *ks, int rw) 2848 { 2849 int val, ret = DDI_FAILURE; 2850 pmcs_phy_t *pptr = (pmcs_phy_t *)ks->ks_private; 2851 pmcs_hw_t *pwp = pptr->pwp; 2852 sas_phy_stats_t *ps = ks->ks_data; 2853 2854 _NOTE(ARGUNUSED(rw)); 2855 ASSERT((pptr != NULL) && (pwp != NULL)); 2856 2857 /* 2858 * We just want to lock against other invocations of kstat; 2859 * we don't need to pmcs_lock_phy() for this. 2860 */ 2861 mutex_enter(&pptr->phy_lock); 2862 2863 /* Get Stats from Chip */ 2864 val = pmcs_get_diag_report(pwp, PMCS_INVALID_DWORD_CNT, pptr->phynum); 2865 if (val == DDI_FAILURE) 2866 goto fail; 2867 ps->invalid_dword_count.value.ull = (unsigned long long)val; 2868 2869 val = pmcs_get_diag_report(pwp, PMCS_DISPARITY_ERR_CNT, pptr->phynum); 2870 if (val == DDI_FAILURE) 2871 goto fail; 2872 ps->running_disparity_error_count.value.ull = (unsigned long long)val; 2873 2874 val = pmcs_get_diag_report(pwp, PMCS_LOST_DWORD_SYNC_CNT, pptr->phynum); 2875 if (val == DDI_FAILURE) 2876 goto fail; 2877 ps->loss_of_dword_sync_count.value.ull = (unsigned long long)val; 2878 2879 val = pmcs_get_diag_report(pwp, PMCS_RESET_FAILED_CNT, pptr->phynum); 2880 if (val == DDI_FAILURE) 2881 goto fail; 2882 ps->phy_reset_problem_count.value.ull = (unsigned long long)val; 2883 2884 ret = DDI_SUCCESS; 2885 fail: 2886 mutex_exit(&pptr->phy_lock); 2887 return (ret); 2888 } 2889 2890 static void 2891 pmcs_destroy_phy_stats(pmcs_iport_t *iport) 2892 { 2893 pmcs_phy_t *phyp; 2894 2895 ASSERT(iport != NULL); 2896 mutex_enter(&iport->lock); 2897 phyp = iport->pptr; 2898 if (phyp == NULL) { 2899 mutex_exit(&iport->lock); 2900 return; 2901 } 2902 2903 pmcs_lock_phy(phyp); 2904 if (phyp->phy_stats != NULL) { 2905 kstat_delete(phyp->phy_stats); 2906 phyp->phy_stats = NULL; 2907 } 2908 pmcs_unlock_phy(phyp); 2909 2910 mutex_exit(&iport->lock); 2911 } 2912 2913 /*ARGSUSED*/ 2914 static int 2915 pmcs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2916 { 2917 /* 2918 * as the driver can always deal with an error in any dma or 2919 * access handle, we can just return the fme_status value. 2920 */ 2921 pci_ereport_post(dip, err, NULL); 2922 return (err->fme_status); 2923 } 2924 2925 static void 2926 pmcs_fm_init(pmcs_hw_t *pwp) 2927 { 2928 ddi_iblock_cookie_t fm_ibc; 2929 2930 /* Only register with IO Fault Services if we have some capability */ 2931 if (pwp->fm_capabilities) { 2932 /* Adjust access and dma attributes for FMA */ 2933 pwp->reg_acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC; 2934 pwp->dev_acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC; 2935 pwp->iqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2936 pwp->oqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2937 pwp->cip_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2938 pwp->fwlog_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2939 2940 /* 2941 * Register capabilities with IO Fault Services. 2942 */ 2943 ddi_fm_init(pwp->dip, &pwp->fm_capabilities, &fm_ibc); 2944 2945 /* 2946 * Initialize pci ereport capabilities if ereport 2947 * capable (should always be.) 2948 */ 2949 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 2950 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2951 pci_ereport_setup(pwp->dip); 2952 } 2953 2954 /* 2955 * Register error callback if error callback capable. 2956 */ 2957 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2958 ddi_fm_handler_register(pwp->dip, 2959 pmcs_fm_error_cb, (void *) pwp); 2960 } 2961 } 2962 } 2963 2964 static void 2965 pmcs_fm_fini(pmcs_hw_t *pwp) 2966 { 2967 /* Only unregister FMA capabilities if registered */ 2968 if (pwp->fm_capabilities) { 2969 /* 2970 * Un-register error callback if error callback capable. 2971 */ 2972 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2973 ddi_fm_handler_unregister(pwp->dip); 2974 } 2975 2976 /* 2977 * Release any resources allocated by pci_ereport_setup() 2978 */ 2979 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 2980 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2981 pci_ereport_teardown(pwp->dip); 2982 } 2983 2984 /* Unregister from IO Fault Services */ 2985 ddi_fm_fini(pwp->dip); 2986 2987 /* Adjust access and dma attributes for FMA */ 2988 pwp->reg_acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC; 2989 pwp->dev_acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC; 2990 pwp->iqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 2991 pwp->oqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 2992 pwp->cip_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 2993 pwp->fwlog_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 2994 } 2995 } 2996 2997 static boolean_t 2998 pmcs_fabricate_wwid(pmcs_hw_t *pwp) 2999 { 3000 char *cp, c; 3001 uint64_t adr; 3002 int i; 3003 3004 cp = &c; 3005 (void) ddi_strtoul(hw_serial, &cp, 10, (unsigned long *)&adr); 3006 if (adr == 0) { 3007 static const char foo[] = __DATE__ __TIME__; 3008 /* Oh, dear, we're toast */ 3009 pmcs_prt(pwp, PMCS_PRT_DEBUG, 3010 "%s: No serial number available to fabricate WWN", 3011 __func__); 3012 for (i = 0; foo[i]; i++) { 3013 adr += foo[i]; 3014 } 3015 } 3016 adr <<= 8; 3017 adr |= ((uint64_t)ddi_get_instance(pwp->dip) << 52); 3018 adr |= (5ULL << 60); 3019 for (i = 0; i < PMCS_MAX_PORTS; i++) { 3020 pwp->sas_wwns[i] = adr + i; 3021 } 3022 3023 return (B_TRUE); 3024 } 3025