1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file contains various support routines. 28 */ 29 30 #include <sys/scsi/adapters/pmcs/pmcs.h> 31 32 /* 33 * Local static data 34 */ 35 static int tgtmap_usec = MICROSEC; 36 37 /* 38 * SAS Topology Configuration 39 */ 40 static void pmcs_new_tport(pmcs_hw_t *, pmcs_phy_t *); 41 static void pmcs_configure_expander(pmcs_hw_t *, pmcs_phy_t *, pmcs_iport_t *); 42 43 static void pmcs_check_expanders(pmcs_hw_t *, pmcs_phy_t *); 44 static void pmcs_check_expander(pmcs_hw_t *, pmcs_phy_t *); 45 static void pmcs_clear_expander(pmcs_hw_t *, pmcs_phy_t *, int); 46 47 static int pmcs_expander_get_nphy(pmcs_hw_t *, pmcs_phy_t *); 48 static int pmcs_expander_content_discover(pmcs_hw_t *, pmcs_phy_t *, 49 pmcs_phy_t *); 50 51 static int pmcs_smp_function_result(pmcs_hw_t *, smp_response_frame_t *); 52 static boolean_t pmcs_validate_devid(pmcs_phy_t *, pmcs_phy_t *, uint32_t); 53 static void pmcs_clear_phys(pmcs_hw_t *, pmcs_phy_t *); 54 static int pmcs_configure_new_devices(pmcs_hw_t *, pmcs_phy_t *); 55 static void pmcs_begin_observations(pmcs_hw_t *); 56 static boolean_t pmcs_report_observations(pmcs_hw_t *); 57 static boolean_t pmcs_report_iport_observations(pmcs_hw_t *, pmcs_iport_t *, 58 pmcs_phy_t *); 59 static pmcs_phy_t *pmcs_find_phy_needing_work(pmcs_hw_t *, pmcs_phy_t *); 60 static int pmcs_kill_devices(pmcs_hw_t *, pmcs_phy_t *); 61 static void pmcs_lock_phy_impl(pmcs_phy_t *, int); 62 static void pmcs_unlock_phy_impl(pmcs_phy_t *, int); 63 static pmcs_phy_t *pmcs_clone_phy(pmcs_phy_t *); 64 static boolean_t pmcs_configure_phy(pmcs_hw_t *, pmcs_phy_t *); 65 static void pmcs_reap_dead_phy(pmcs_phy_t *); 66 static pmcs_iport_t *pmcs_get_iport_by_ua(pmcs_hw_t *, char *); 67 static boolean_t pmcs_phy_target_match(pmcs_phy_t *); 68 static void pmcs_iport_active(pmcs_iport_t *); 69 static void pmcs_tgtmap_activate_cb(void *, char *, scsi_tgtmap_tgt_type_t, 70 void **); 71 static boolean_t pmcs_tgtmap_deactivate_cb(void *, char *, 72 scsi_tgtmap_tgt_type_t, void *, scsi_tgtmap_deact_rsn_t); 73 static void pmcs_add_dead_phys(pmcs_hw_t *, pmcs_phy_t *); 74 75 /* 76 * Often used strings 77 */ 78 const char pmcs_nowrk[] = "%s: unable to get work structure"; 79 const char pmcs_nomsg[] = "%s: unable to get Inbound Message entry"; 80 const char pmcs_timeo[] = "%s: command timed out"; 81 82 extern const ddi_dma_attr_t pmcs_dattr; 83 84 /* 85 * Some Initial setup steps. 86 */ 87 88 int 89 pmcs_setup(pmcs_hw_t *pwp) 90 { 91 uint32_t barval = pwp->mpibar; 92 uint32_t i, scratch, regbar, regoff, barbar, baroff; 93 uint32_t new_ioq_depth, ferr = 0; 94 95 /* 96 * Check current state. If we're not at READY state, 97 * we can't go further. 98 */ 99 scratch = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 100 if ((scratch & PMCS_MSGU_AAP_STATE_MASK) == PMCS_MSGU_AAP_STATE_ERROR) { 101 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 102 "%s: AAP Error State (0x%x)", 103 __func__, pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 104 PMCS_MSGU_AAP_ERROR_MASK); 105 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE); 106 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 107 return (-1); 108 } 109 if ((scratch & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) { 110 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 111 "%s: AAP unit not ready (state 0x%x)", 112 __func__, scratch & PMCS_MSGU_AAP_STATE_MASK); 113 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE); 114 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 115 return (-1); 116 } 117 118 /* 119 * Read the offset from the Message Unit scratchpad 0 register. 120 * This allows us to read the MPI Configuration table. 121 * 122 * Check its signature for validity. 123 */ 124 baroff = barval; 125 barbar = barval >> PMCS_MSGU_MPI_BAR_SHIFT; 126 baroff &= PMCS_MSGU_MPI_OFFSET_MASK; 127 128 regoff = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0); 129 regbar = regoff >> PMCS_MSGU_MPI_BAR_SHIFT; 130 regoff &= PMCS_MSGU_MPI_OFFSET_MASK; 131 132 if (regoff > baroff) { 133 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 134 "%s: bad MPI Table Length (register offset=0x%08x, " 135 "passed offset=0x%08x)", __func__, regoff, baroff); 136 return (-1); 137 } 138 if (regbar != barbar) { 139 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 140 "%s: bad MPI BAR (register BAROFF=0x%08x, " 141 "passed BAROFF=0x%08x)", __func__, regbar, barbar); 142 return (-1); 143 } 144 pwp->mpi_offset = regoff; 145 if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS) != PMCS_SIGNATURE) { 146 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 147 "%s: Bad MPI Configuration Table Signature 0x%x", __func__, 148 pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS)); 149 return (-1); 150 } 151 152 if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR) != PMCS_MPI_REVISION1) { 153 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 154 "%s: Bad MPI Configuration Revision 0x%x", __func__, 155 pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR)); 156 return (-1); 157 } 158 159 /* 160 * Generate offsets for the General System, Inbound Queue Configuration 161 * and Outbound Queue configuration tables. This way the macros to 162 * access those tables will work correctly. 163 */ 164 pwp->mpi_gst_offset = 165 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_GSTO); 166 pwp->mpi_iqc_offset = 167 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IQCTO); 168 pwp->mpi_oqc_offset = 169 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_OQCTO); 170 171 pwp->fw = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FW); 172 173 pwp->max_cmd = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MOIO); 174 pwp->max_dev = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO0) >> 16; 175 176 pwp->max_iq = PMCS_MNIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 177 pwp->max_oq = PMCS_MNOQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 178 pwp->nphy = PMCS_NPHY(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 179 if (pwp->max_iq <= PMCS_NIQ) { 180 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 181 "%s: not enough Inbound Queues supported " 182 "(need %d, max_oq=%d)", __func__, pwp->max_iq, PMCS_NIQ); 183 return (-1); 184 } 185 if (pwp->max_oq <= PMCS_NOQ) { 186 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 187 "%s: not enough Outbound Queues supported " 188 "(need %d, max_oq=%d)", __func__, pwp->max_oq, PMCS_NOQ); 189 return (-1); 190 } 191 if (pwp->nphy == 0) { 192 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 193 "%s: zero phys reported", __func__); 194 return (-1); 195 } 196 if (PMCS_HPIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1))) { 197 pwp->hipri_queue = (1 << PMCS_IQ_OTHER); 198 } 199 200 201 for (i = 0; i < pwp->nphy; i++) { 202 PMCS_MPI_EVQSET(pwp, PMCS_OQ_EVENTS, i); 203 PMCS_MPI_NCQSET(pwp, PMCS_OQ_EVENTS, i); 204 } 205 206 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_INFO2, 207 (PMCS_OQ_EVENTS << GENERAL_EVENT_OQ_SHIFT) | 208 (PMCS_OQ_EVENTS << DEVICE_HANDLE_REMOVED_SHIFT)); 209 210 /* 211 * Verify that ioq_depth is valid (> 0 and not so high that it 212 * would cause us to overrun the chip with commands). 213 */ 214 if (pwp->ioq_depth == 0) { 215 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 216 "%s: I/O queue depth set to 0. Setting to %d", 217 __func__, PMCS_NQENTRY); 218 pwp->ioq_depth = PMCS_NQENTRY; 219 } 220 221 if (pwp->ioq_depth < PMCS_MIN_NQENTRY) { 222 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 223 "%s: I/O queue depth set too low (%d). Setting to %d", 224 __func__, pwp->ioq_depth, PMCS_MIN_NQENTRY); 225 pwp->ioq_depth = PMCS_MIN_NQENTRY; 226 } 227 228 if (pwp->ioq_depth > (pwp->max_cmd / (PMCS_IO_IQ_MASK + 1))) { 229 new_ioq_depth = pwp->max_cmd / (PMCS_IO_IQ_MASK + 1); 230 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 231 "%s: I/O queue depth set too high (%d). Setting to %d", 232 __func__, pwp->ioq_depth, new_ioq_depth); 233 pwp->ioq_depth = new_ioq_depth; 234 } 235 236 /* 237 * Allocate consistent memory for OQs and IQs. 238 */ 239 pwp->iqp_dma_attr = pwp->oqp_dma_attr = pmcs_dattr; 240 pwp->iqp_dma_attr.dma_attr_align = 241 pwp->oqp_dma_attr.dma_attr_align = PMCS_QENTRY_SIZE; 242 243 /* 244 * The Rev C chip has the ability to do PIO to or from consistent 245 * memory anywhere in a 64 bit address space, but the firmware is 246 * not presently set up to do so. 247 */ 248 pwp->iqp_dma_attr.dma_attr_addr_hi = 249 pwp->oqp_dma_attr.dma_attr_addr_hi = 0x000000FFFFFFFFFFull; 250 251 for (i = 0; i < PMCS_NIQ; i++) { 252 if (pmcs_dma_setup(pwp, &pwp->iqp_dma_attr, 253 &pwp->iqp_acchdls[i], 254 &pwp->iqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth, 255 (caddr_t *)&pwp->iqp[i], &pwp->iqaddr[i]) == B_FALSE) { 256 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 257 "Failed to setup DMA for iqp[%d]", i); 258 return (-1); 259 } 260 bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 261 } 262 263 for (i = 0; i < PMCS_NOQ; i++) { 264 if (pmcs_dma_setup(pwp, &pwp->oqp_dma_attr, 265 &pwp->oqp_acchdls[i], 266 &pwp->oqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth, 267 (caddr_t *)&pwp->oqp[i], &pwp->oqaddr[i]) == B_FALSE) { 268 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 269 "Failed to setup DMA for oqp[%d]", i); 270 return (-1); 271 } 272 bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 273 } 274 275 /* 276 * Install the IQ and OQ addresses (and null out the rest). 277 */ 278 for (i = 0; i < pwp->max_iq; i++) { 279 pwp->iqpi_offset[i] = pmcs_rd_iqc_tbl(pwp, PMCS_IQPIOFFX(i)); 280 if (i < PMCS_NIQ) { 281 if (i != PMCS_IQ_OTHER) { 282 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 283 pwp->ioq_depth | (PMCS_QENTRY_SIZE << 16)); 284 } else { 285 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 286 (1 << 30) | pwp->ioq_depth | 287 (PMCS_QENTRY_SIZE << 16)); 288 } 289 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 290 DWORD1(pwp->iqaddr[i])); 291 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 292 DWORD0(pwp->iqaddr[i])); 293 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 294 DWORD1(pwp->ciaddr+IQ_OFFSET(i))); 295 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 296 DWORD0(pwp->ciaddr+IQ_OFFSET(i))); 297 } else { 298 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0); 299 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0); 300 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0); 301 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0); 302 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0); 303 } 304 } 305 306 for (i = 0; i < pwp->max_oq; i++) { 307 pwp->oqci_offset[i] = pmcs_rd_oqc_tbl(pwp, PMCS_OQCIOFFX(i)); 308 if (i < PMCS_NOQ) { 309 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), pwp->ioq_depth | 310 (PMCS_QENTRY_SIZE << 16) | OQIEX); 311 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 312 DWORD1(pwp->oqaddr[i])); 313 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 314 DWORD0(pwp->oqaddr[i])); 315 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 316 DWORD1(pwp->ciaddr+OQ_OFFSET(i))); 317 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 318 DWORD0(pwp->ciaddr+OQ_OFFSET(i))); 319 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 320 pwp->oqvec[i] << 24); 321 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 322 } else { 323 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0); 324 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0); 325 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0); 326 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0); 327 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0); 328 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0); 329 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 330 } 331 } 332 333 /* 334 * Set up logging, if defined. 335 */ 336 if (pwp->fwlog) { 337 uint64_t logdma = pwp->fwaddr; 338 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAH, DWORD1(logdma)); 339 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAL, DWORD0(logdma)); 340 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBS, PMCS_FWLOG_SIZE >> 1); 341 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELSEV, pwp->fwlog); 342 logdma += (PMCS_FWLOG_SIZE >> 1); 343 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAH, DWORD1(logdma)); 344 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAL, DWORD0(logdma)); 345 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBS, PMCS_FWLOG_SIZE >> 1); 346 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELSEV, pwp->fwlog); 347 } 348 349 /* 350 * Interrupt vectors, outbound queues, and odb_auto_clear 351 * 352 * MSI/MSI-X: 353 * If we got 4 interrupt vectors, we'll assign one to each outbound 354 * queue as well as the fatal interrupt, and auto clear can be set 355 * for each. 356 * 357 * If we only got 2 vectors, one will be used for I/O completions 358 * and the other for the other two vectors. In this case, auto_ 359 * clear can only be set for I/Os, which is fine. The fatal 360 * interrupt will be mapped to the PMCS_FATAL_INTERRUPT bit, which 361 * is not an interrupt vector. 362 * 363 * MSI/MSI-X/INT-X: 364 * If we only got 1 interrupt vector, auto_clear must be set to 0, 365 * and again the fatal interrupt will be mapped to the 366 * PMCS_FATAL_INTERRUPT bit (again, not an interrupt vector). 367 */ 368 369 switch (pwp->int_type) { 370 case PMCS_INT_MSIX: 371 case PMCS_INT_MSI: 372 switch (pwp->intr_cnt) { 373 case 1: 374 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 375 (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 376 pwp->odb_auto_clear = 0; 377 break; 378 case 2: 379 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 380 (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 381 pwp->odb_auto_clear = (1 << PMCS_FATAL_INTERRUPT) | 382 (1 << PMCS_MSIX_IODONE); 383 break; 384 case 4: 385 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 386 (PMCS_MSIX_FATAL << PMCS_FERIV_SHIFT)); 387 pwp->odb_auto_clear = (1 << PMCS_MSIX_FATAL) | 388 (1 << PMCS_MSIX_GENERAL) | (1 << PMCS_MSIX_IODONE) | 389 (1 << PMCS_MSIX_EVENTS); 390 break; 391 } 392 break; 393 394 case PMCS_INT_FIXED: 395 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 396 PMCS_FERRIE | (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 397 pwp->odb_auto_clear = 0; 398 break; 399 } 400 401 /* 402 * Enable Interrupt Reassertion 403 * Default Delay 1000us 404 */ 405 ferr = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FERR); 406 if ((ferr & PMCS_MPI_IRAE) == 0) { 407 ferr &= ~(PMCS_MPI_IRAU | PMCS_MPI_IRAD_MASK); 408 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, ferr | PMCS_MPI_IRAE); 409 } 410 411 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, pwp->odb_auto_clear); 412 pwp->mpi_table_setup = 1; 413 return (0); 414 } 415 416 /* 417 * Start the Message Passing protocol with the PMC chip. 418 */ 419 int 420 pmcs_start_mpi(pmcs_hw_t *pwp) 421 { 422 int i; 423 424 pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPIINI); 425 for (i = 0; i < 1000; i++) { 426 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & 427 PMCS_MSGU_IBDB_MPIINI) == 0) { 428 break; 429 } 430 drv_usecwait(1000); 431 } 432 if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPIINI) { 433 return (-1); 434 } 435 drv_usecwait(500000); 436 437 /* 438 * Check to make sure we got to INIT state. 439 */ 440 if (PMCS_MPI_S(pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE)) != 441 PMCS_MPI_STATE_INIT) { 442 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 443 "%s: MPI launch failed (GST 0x%x DBCLR 0x%x)", __func__, 444 pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE), 445 pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB_CLEAR)); 446 return (-1); 447 } 448 return (0); 449 } 450 451 /* 452 * Stop the Message Passing protocol with the PMC chip. 453 */ 454 int 455 pmcs_stop_mpi(pmcs_hw_t *pwp) 456 { 457 int i; 458 459 for (i = 0; i < pwp->max_iq; i++) { 460 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0); 461 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0); 462 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0); 463 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0); 464 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0); 465 } 466 for (i = 0; i < pwp->max_oq; i++) { 467 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0); 468 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0); 469 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0); 470 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0); 471 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0); 472 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0); 473 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 474 } 475 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 0); 476 pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPICTU); 477 for (i = 0; i < 2000; i++) { 478 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & 479 PMCS_MSGU_IBDB_MPICTU) == 0) { 480 break; 481 } 482 drv_usecwait(1000); 483 } 484 if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPICTU) { 485 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 486 "%s: MPI stop failed", __func__); 487 return (-1); 488 } 489 return (0); 490 } 491 492 /* 493 * Do a sequence of ECHO messages to test for MPI functionality, 494 * all inbound and outbound queue functionality and interrupts. 495 */ 496 int 497 pmcs_echo_test(pmcs_hw_t *pwp) 498 { 499 echo_test_t fred; 500 struct pmcwork *pwrk; 501 uint32_t *msg, count; 502 int iqe = 0, iqo = 0, result, rval = 0; 503 int iterations; 504 hrtime_t echo_start, echo_end, echo_total; 505 506 ASSERT(pwp->max_cmd > 0); 507 508 /* 509 * We want iterations to be max_cmd * 3 to ensure that we run the 510 * echo test enough times to iterate through every inbound queue 511 * at least twice. 512 */ 513 iterations = pwp->max_cmd * 3; 514 515 echo_total = 0; 516 count = 0; 517 518 while (count < iterations) { 519 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 520 if (pwrk == NULL) { 521 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 522 pmcs_nowrk, __func__); 523 rval = -1; 524 break; 525 } 526 527 mutex_enter(&pwp->iqp_lock[iqe]); 528 msg = GET_IQ_ENTRY(pwp, iqe); 529 if (msg == NULL) { 530 mutex_exit(&pwp->iqp_lock[iqe]); 531 pmcs_pwork(pwp, pwrk); 532 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 533 pmcs_nomsg, __func__); 534 rval = -1; 535 break; 536 } 537 538 bzero(msg, PMCS_QENTRY_SIZE); 539 540 if (iqe == PMCS_IQ_OTHER) { 541 /* This is on the high priority queue */ 542 msg[0] = LE_32(PMCS_HIPRI(pwp, iqo, PMCIN_ECHO)); 543 } else { 544 msg[0] = LE_32(PMCS_IOMB_IN_SAS(iqo, PMCIN_ECHO)); 545 } 546 msg[1] = LE_32(pwrk->htag); 547 fred.signature = 0xdeadbeef; 548 fred.count = count; 549 fred.ptr = &count; 550 (void) memcpy(&msg[2], &fred, sizeof (fred)); 551 pwrk->state = PMCS_WORK_STATE_ONCHIP; 552 553 INC_IQ_ENTRY(pwp, iqe); 554 555 echo_start = gethrtime(); 556 DTRACE_PROBE2(pmcs__echo__test__wait__start, 557 hrtime_t, echo_start, uint32_t, pwrk->htag); 558 559 if (++iqe == PMCS_NIQ) { 560 iqe = 0; 561 } 562 if (++iqo == PMCS_NOQ) { 563 iqo = 0; 564 } 565 566 WAIT_FOR(pwrk, 250, result); 567 568 echo_end = gethrtime(); 569 DTRACE_PROBE2(pmcs__echo__test__wait__end, 570 hrtime_t, echo_end, int, result); 571 572 echo_total += (echo_end - echo_start); 573 574 pmcs_pwork(pwp, pwrk); 575 if (result) { 576 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 577 "%s: command timed out on echo test #%d", 578 __func__, count); 579 rval = -1; 580 break; 581 } 582 } 583 584 /* 585 * The intr_threshold is adjusted by PMCS_INTR_THRESHOLD in order to 586 * remove the overhead of things like the delay in getting signaled 587 * for completion. 588 */ 589 if (echo_total != 0) { 590 pwp->io_intr_coal.intr_latency = 591 (echo_total / iterations) / 2; 592 pwp->io_intr_coal.intr_threshold = 593 PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 / 594 pwp->io_intr_coal.intr_latency); 595 } 596 597 return (rval); 598 } 599 600 /* 601 * Start the (real) phys 602 */ 603 int 604 pmcs_start_phy(pmcs_hw_t *pwp, int phynum, int linkmode, int speed) 605 { 606 int result; 607 uint32_t *msg; 608 struct pmcwork *pwrk; 609 pmcs_phy_t *pptr; 610 sas_identify_af_t sap; 611 612 mutex_enter(&pwp->lock); 613 pptr = pwp->root_phys + phynum; 614 if (pptr == NULL) { 615 mutex_exit(&pwp->lock); 616 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 617 "%s: cannot find port %d", __func__, phynum); 618 return (0); 619 } 620 621 pmcs_lock_phy(pptr); 622 mutex_exit(&pwp->lock); 623 624 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 625 if (pwrk == NULL) { 626 pmcs_unlock_phy(pptr); 627 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 628 return (-1); 629 } 630 631 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 632 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 633 634 if (msg == NULL) { 635 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 636 pmcs_unlock_phy(pptr); 637 pmcs_pwork(pwp, pwrk); 638 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 639 return (-1); 640 } 641 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_START)); 642 msg[1] = LE_32(pwrk->htag); 643 msg[2] = LE_32(linkmode | speed | phynum); 644 bzero(&sap, sizeof (sap)); 645 sap.device_type = SAS_IF_DTYPE_ENDPOINT; 646 sap.ssp_ini_port = 1; 647 648 if (pwp->separate_ports) { 649 pmcs_wwn2barray(pwp->sas_wwns[phynum], sap.sas_address); 650 } else { 651 pmcs_wwn2barray(pwp->sas_wwns[0], sap.sas_address); 652 } 653 654 ASSERT(phynum < SAS2_PHYNUM_MAX); 655 sap.phy_identifier = phynum & SAS2_PHYNUM_MASK; 656 (void) memcpy(&msg[3], &sap, sizeof (sas_identify_af_t)); 657 pwrk->state = PMCS_WORK_STATE_ONCHIP; 658 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 659 660 pptr->state.prog_min_rate = (lowbit((ulong_t)speed) - 1); 661 pptr->state.prog_max_rate = (highbit((ulong_t)speed) - 1); 662 pptr->state.hw_min_rate = PMCS_HW_MIN_LINK_RATE; 663 pptr->state.hw_max_rate = PMCS_HW_MAX_LINK_RATE; 664 665 pmcs_unlock_phy(pptr); 666 WAIT_FOR(pwrk, 1000, result); 667 pmcs_pwork(pwp, pwrk); 668 669 if (result) { 670 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 671 } else { 672 mutex_enter(&pwp->lock); 673 pwp->phys_started |= (1 << phynum); 674 mutex_exit(&pwp->lock); 675 } 676 677 return (0); 678 } 679 680 int 681 pmcs_start_phys(pmcs_hw_t *pwp) 682 { 683 int i; 684 685 for (i = 0; i < pwp->nphy; i++) { 686 if ((pwp->phyid_block_mask & (1 << i)) == 0) { 687 if (pmcs_start_phy(pwp, i, 688 (pwp->phymode << PHY_MODE_SHIFT), 689 pwp->physpeed << PHY_LINK_SHIFT)) { 690 return (-1); 691 } 692 if (pmcs_clear_diag_counters(pwp, i)) { 693 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 694 "%s: failed to reset counters on PHY (%d)", 695 __func__, i); 696 } 697 } 698 } 699 return (0); 700 } 701 702 /* 703 * Called with PHY locked 704 */ 705 int 706 pmcs_reset_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t type) 707 { 708 uint32_t *msg; 709 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 710 const char *mbar; 711 uint32_t amt; 712 uint32_t pdevid; 713 uint32_t stsoff; 714 uint32_t status; 715 int result, level, phynum; 716 struct pmcwork *pwrk; 717 uint32_t htag; 718 719 ASSERT(mutex_owned(&pptr->phy_lock)); 720 721 bzero(iomb, PMCS_QENTRY_SIZE); 722 phynum = pptr->phynum; 723 level = pptr->level; 724 if (level > 0) { 725 pdevid = pptr->parent->device_id; 726 } else if ((level == 0) && (pptr->dtype == EXPANDER)) { 727 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 728 "%s: Not resetting HBA PHY @ %s", __func__, pptr->path); 729 return (0); 730 } 731 732 if (!pptr->iport || !pptr->valid_device_id) { 733 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 734 "%s: Can't reach PHY %s", __func__, pptr->path); 735 return (0); 736 } 737 738 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 739 740 if (pwrk == NULL) { 741 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 742 return (ENOMEM); 743 } 744 745 pwrk->arg = iomb; 746 747 /* 748 * If level > 0, we need to issue an SMP_REQUEST with a PHY_CONTROL 749 * function to do either a link reset or hard reset. If level == 0, 750 * then we do a LOCAL_PHY_CONTROL IOMB to do link/hard reset to the 751 * root (local) PHY 752 */ 753 if (level) { 754 stsoff = 2; 755 iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 756 PMCIN_SMP_REQUEST)); 757 iomb[1] = LE_32(pwrk->htag); 758 iomb[2] = LE_32(pdevid); 759 iomb[3] = LE_32(40 << SMP_REQUEST_LENGTH_SHIFT); 760 /* 761 * Send SMP PHY CONTROL/HARD or LINK RESET 762 */ 763 iomb[4] = BE_32(0x40910000); 764 iomb[5] = 0; 765 766 if (type == PMCS_PHYOP_HARD_RESET) { 767 mbar = "SMP PHY CONTROL/HARD RESET"; 768 iomb[6] = BE_32((phynum << 24) | 769 (PMCS_PHYOP_HARD_RESET << 16)); 770 } else { 771 mbar = "SMP PHY CONTROL/LINK RESET"; 772 iomb[6] = BE_32((phynum << 24) | 773 (PMCS_PHYOP_LINK_RESET << 16)); 774 } 775 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 776 "%s: sending %s to %s for phy 0x%x", 777 __func__, mbar, pptr->parent->path, pptr->phynum); 778 amt = 7; 779 } else { 780 /* 781 * Unlike most other Outbound messages, status for 782 * a local phy operation is in DWORD 3. 783 */ 784 stsoff = 3; 785 iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 786 PMCIN_LOCAL_PHY_CONTROL)); 787 iomb[1] = LE_32(pwrk->htag); 788 if (type == PMCS_PHYOP_LINK_RESET) { 789 mbar = "LOCAL PHY LINK RESET"; 790 iomb[2] = LE_32((PMCS_PHYOP_LINK_RESET << 8) | phynum); 791 } else { 792 mbar = "LOCAL PHY HARD RESET"; 793 iomb[2] = LE_32((PMCS_PHYOP_HARD_RESET << 8) | phynum); 794 } 795 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 796 "%s: sending %s to %s", __func__, mbar, pptr->path); 797 amt = 3; 798 } 799 800 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 801 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 802 if (msg == NULL) { 803 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 804 pmcs_pwork(pwp, pwrk); 805 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 806 return (ENOMEM); 807 } 808 COPY_MESSAGE(msg, iomb, amt); 809 htag = pwrk->htag; 810 811 /* SMP serialization */ 812 pmcs_smp_acquire(pptr->iport); 813 814 pwrk->state = PMCS_WORK_STATE_ONCHIP; 815 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 816 817 pmcs_unlock_phy(pptr); 818 WAIT_FOR(pwrk, 1000, result); 819 pmcs_pwork(pwp, pwrk); 820 /* Release SMP lock before reacquiring PHY lock */ 821 pmcs_smp_release(pptr->iport); 822 pmcs_lock_phy(pptr); 823 824 if (result) { 825 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 826 827 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 828 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 829 "%s: Unable to issue SMP abort for htag 0x%08x", 830 __func__, htag); 831 } else { 832 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 833 "%s: Issuing SMP ABORT for htag 0x%08x", 834 __func__, htag); 835 } 836 return (EIO); 837 } 838 status = LE_32(iomb[stsoff]); 839 840 if (status != PMCOUT_STATUS_OK) { 841 char buf[32]; 842 const char *es = pmcs_status_str(status); 843 if (es == NULL) { 844 (void) snprintf(buf, sizeof (buf), "Status 0x%x", 845 status); 846 es = buf; 847 } 848 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 849 "%s: %s action returned %s for %s", __func__, mbar, es, 850 pptr->path); 851 return (status); 852 } 853 854 return (0); 855 } 856 857 /* 858 * Stop the (real) phys. No PHY or softstate locks are required as this only 859 * happens during detach. 860 */ 861 void 862 pmcs_stop_phy(pmcs_hw_t *pwp, int phynum) 863 { 864 int result; 865 pmcs_phy_t *pptr; 866 uint32_t *msg; 867 struct pmcwork *pwrk; 868 869 pptr = pwp->root_phys + phynum; 870 if (pptr == NULL) { 871 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 872 "%s: unable to find port %d", __func__, phynum); 873 return; 874 } 875 876 if (pwp->phys_started & (1 << phynum)) { 877 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 878 879 if (pwrk == NULL) { 880 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, 881 pmcs_nowrk, __func__); 882 return; 883 } 884 885 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 886 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 887 888 if (msg == NULL) { 889 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 890 pmcs_pwork(pwp, pwrk); 891 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, 892 pmcs_nomsg, __func__); 893 return; 894 } 895 896 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_STOP)); 897 msg[1] = LE_32(pwrk->htag); 898 msg[2] = LE_32(phynum); 899 pwrk->state = PMCS_WORK_STATE_ONCHIP; 900 /* 901 * Make this unconfigured now. 902 */ 903 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 904 WAIT_FOR(pwrk, 1000, result); 905 906 pmcs_pwork(pwp, pwrk); 907 if (result) { 908 pmcs_prt(pwp, PMCS_PRT_DEBUG, 909 pptr, NULL, pmcs_timeo, __func__); 910 } 911 912 pwp->phys_started &= ~(1 << phynum); 913 } 914 915 pptr->configured = 0; 916 } 917 918 /* 919 * No locks should be required as this is only called during detach 920 */ 921 void 922 pmcs_stop_phys(pmcs_hw_t *pwp) 923 { 924 int i; 925 for (i = 0; i < pwp->nphy; i++) { 926 if ((pwp->phyid_block_mask & (1 << i)) == 0) { 927 pmcs_stop_phy(pwp, i); 928 } 929 } 930 } 931 932 /* 933 * Run SAS_DIAG_EXECUTE with cmd and cmd_desc passed. 934 * ERR_CNT_RESET: return status of cmd 935 * DIAG_REPORT_GET: return value of the counter 936 */ 937 int 938 pmcs_sas_diag_execute(pmcs_hw_t *pwp, uint32_t cmd, uint32_t cmd_desc, 939 uint8_t phynum) 940 { 941 uint32_t htag, *ptr, status, msg[PMCS_MSG_SIZE << 1]; 942 int result; 943 struct pmcwork *pwrk; 944 945 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 946 if (pwrk == NULL) { 947 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__); 948 return (DDI_FAILURE); 949 } 950 pwrk->arg = msg; 951 htag = pwrk->htag; 952 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_SAS_DIAG_EXECUTE)); 953 msg[1] = LE_32(htag); 954 msg[2] = LE_32((cmd << PMCS_DIAG_CMD_SHIFT) | 955 (cmd_desc << PMCS_DIAG_CMD_DESC_SHIFT) | phynum); 956 957 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 958 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 959 if (ptr == NULL) { 960 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 961 pmcs_pwork(pwp, pwrk); 962 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__); 963 return (DDI_FAILURE); 964 } 965 COPY_MESSAGE(ptr, msg, 3); 966 pwrk->state = PMCS_WORK_STATE_ONCHIP; 967 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 968 969 WAIT_FOR(pwrk, 1000, result); 970 971 pmcs_pwork(pwp, pwrk); 972 973 if (result) { 974 pmcs_timed_out(pwp, htag, __func__); 975 return (DDI_FAILURE); 976 } 977 978 status = LE_32(msg[3]); 979 980 /* Return for counter reset */ 981 if (cmd == PMCS_ERR_CNT_RESET) 982 return (status); 983 984 /* Return for counter value */ 985 if (status) { 986 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 987 "%s: failed, status (0x%x)", __func__, status); 988 return (DDI_FAILURE); 989 } 990 return (LE_32(msg[4])); 991 } 992 993 /* Get the current value of the counter for desc on phynum and return it. */ 994 int 995 pmcs_get_diag_report(pmcs_hw_t *pwp, uint32_t desc, uint8_t phynum) 996 { 997 return (pmcs_sas_diag_execute(pwp, PMCS_DIAG_REPORT_GET, desc, phynum)); 998 } 999 1000 /* Clear all of the counters for phynum. Returns the status of the command. */ 1001 int 1002 pmcs_clear_diag_counters(pmcs_hw_t *pwp, uint8_t phynum) 1003 { 1004 uint32_t cmd = PMCS_ERR_CNT_RESET; 1005 uint32_t cmd_desc; 1006 1007 cmd_desc = PMCS_INVALID_DWORD_CNT; 1008 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1009 return (DDI_FAILURE); 1010 1011 cmd_desc = PMCS_DISPARITY_ERR_CNT; 1012 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1013 return (DDI_FAILURE); 1014 1015 cmd_desc = PMCS_LOST_DWORD_SYNC_CNT; 1016 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1017 return (DDI_FAILURE); 1018 1019 cmd_desc = PMCS_RESET_FAILED_CNT; 1020 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1021 return (DDI_FAILURE); 1022 1023 return (DDI_SUCCESS); 1024 } 1025 1026 /* 1027 * Get firmware timestamp 1028 */ 1029 int 1030 pmcs_get_time_stamp(pmcs_hw_t *pwp, uint64_t *ts) 1031 { 1032 uint32_t htag, *ptr, msg[PMCS_MSG_SIZE << 1]; 1033 int result; 1034 struct pmcwork *pwrk; 1035 1036 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 1037 if (pwrk == NULL) { 1038 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__); 1039 return (-1); 1040 } 1041 pwrk->arg = msg; 1042 htag = pwrk->htag; 1043 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_GET_TIME_STAMP)); 1044 msg[1] = LE_32(pwrk->htag); 1045 1046 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1047 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1048 if (ptr == NULL) { 1049 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1050 pmcs_pwork(pwp, pwrk); 1051 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__); 1052 return (-1); 1053 } 1054 COPY_MESSAGE(ptr, msg, 2); 1055 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1056 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1057 1058 WAIT_FOR(pwrk, 1000, result); 1059 1060 pmcs_pwork(pwp, pwrk); 1061 1062 if (result) { 1063 pmcs_timed_out(pwp, htag, __func__); 1064 return (-1); 1065 } 1066 *ts = LE_32(msg[2]) | (((uint64_t)LE_32(msg[3])) << 32); 1067 return (0); 1068 } 1069 1070 /* 1071 * Dump all pertinent registers 1072 */ 1073 1074 void 1075 pmcs_register_dump(pmcs_hw_t *pwp) 1076 { 1077 int i; 1078 uint32_t val; 1079 1080 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump start", 1081 ddi_get_instance(pwp->dip)); 1082 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1083 "OBDB (intr): 0x%08x (mask): 0x%08x (clear): 0x%08x", 1084 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB), 1085 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_MASK), 1086 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR)); 1087 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH0: 0x%08x", 1088 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0)); 1089 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH1: 0x%08x", 1090 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1)); 1091 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH2: 0x%08x", 1092 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2)); 1093 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH3: 0x%08x", 1094 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH3)); 1095 for (i = 0; i < PMCS_NIQ; i++) { 1096 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "IQ %d: CI %u PI %u", 1097 i, pmcs_rd_iqci(pwp, i), pmcs_rd_iqpi(pwp, i)); 1098 } 1099 for (i = 0; i < PMCS_NOQ; i++) { 1100 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "OQ %d: CI %u PI %u", 1101 i, pmcs_rd_oqci(pwp, i), pmcs_rd_oqpi(pwp, i)); 1102 } 1103 val = pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE); 1104 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1105 "GST TABLE BASE: 0x%08x (STATE=0x%x QF=%d GSTLEN=%d HMI_ERR=0x%x)", 1106 val, PMCS_MPI_S(val), PMCS_QF(val), PMCS_GSTLEN(val) * 4, 1107 PMCS_HMI_ERR(val)); 1108 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ0: 0x%08x", 1109 pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ0)); 1110 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ1: 0x%08x", 1111 pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ1)); 1112 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE MSGU TICK: 0x%08x", 1113 pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK)); 1114 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IOP TICK: 0x%08x", 1115 pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK)); 1116 for (i = 0; i < pwp->nphy; i++) { 1117 uint32_t rerrf, pinfo, started = 0, link = 0; 1118 pinfo = pmcs_rd_gst_tbl(pwp, PMCS_GST_PHY_INFO(i)); 1119 if (pinfo & 1) { 1120 started = 1; 1121 link = pinfo & 2; 1122 } 1123 rerrf = pmcs_rd_gst_tbl(pwp, PMCS_GST_RERR_INFO(i)); 1124 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1125 "GST TABLE PHY%d STARTED=%d LINK=%d RERR=0x%08x", 1126 i, started, link, rerrf); 1127 } 1128 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump end", 1129 ddi_get_instance(pwp->dip)); 1130 } 1131 1132 /* 1133 * Handle SATA Abort and other error processing 1134 */ 1135 int 1136 pmcs_abort_handler(pmcs_hw_t *pwp) 1137 { 1138 pmcs_phy_t *pptr, *pnext, *pnext_uplevel[PMCS_MAX_XPND]; 1139 pmcs_xscsi_t *tgt; 1140 int r, level = 0; 1141 1142 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s", __func__); 1143 1144 mutex_enter(&pwp->lock); 1145 pptr = pwp->root_phys; 1146 mutex_exit(&pwp->lock); 1147 1148 while (pptr) { 1149 /* 1150 * XXX: Need to make sure this doesn't happen 1151 * XXX: when non-NCQ commands are running. 1152 */ 1153 pmcs_lock_phy(pptr); 1154 if (pptr->need_rl_ext) { 1155 ASSERT(pptr->dtype == SATA); 1156 if (pmcs_acquire_scratch(pwp, B_FALSE)) { 1157 goto next_phy; 1158 } 1159 r = pmcs_sata_abort_ncq(pwp, pptr); 1160 pmcs_release_scratch(pwp); 1161 if (r == ENOMEM) { 1162 goto next_phy; 1163 } 1164 if (r) { 1165 r = pmcs_reset_phy(pwp, pptr, 1166 PMCS_PHYOP_LINK_RESET); 1167 if (r == ENOMEM) { 1168 goto next_phy; 1169 } 1170 /* what if other failures happened? */ 1171 pptr->abort_pending = 1; 1172 pptr->abort_sent = 0; 1173 } 1174 } 1175 if (pptr->abort_pending == 0 || pptr->abort_sent) { 1176 goto next_phy; 1177 } 1178 pptr->abort_pending = 0; 1179 if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) == ENOMEM) { 1180 pptr->abort_pending = 1; 1181 goto next_phy; 1182 } 1183 pptr->abort_sent = 1; 1184 1185 /* 1186 * If the iport is no longer active, flush the queues 1187 */ 1188 if ((pptr->iport == NULL) || 1189 (pptr->iport->ua_state != UA_ACTIVE)) { 1190 tgt = pptr->target; 1191 if (tgt) { 1192 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 1193 "%s: Clearing target 0x%p, inactive iport", 1194 __func__, (void *) tgt); 1195 mutex_enter(&tgt->statlock); 1196 pmcs_clear_xp(pwp, tgt); 1197 mutex_exit(&tgt->statlock); 1198 } 1199 } 1200 1201 next_phy: 1202 if (pptr->children) { 1203 pnext = pptr->children; 1204 pnext_uplevel[level++] = pptr->sibling; 1205 } else { 1206 pnext = pptr->sibling; 1207 while ((pnext == NULL) && (level > 0)) { 1208 pnext = pnext_uplevel[--level]; 1209 } 1210 } 1211 1212 pmcs_unlock_phy(pptr); 1213 pptr = pnext; 1214 } 1215 1216 return (0); 1217 } 1218 1219 /* 1220 * Register a device (get a device handle for it). 1221 * Called with PHY lock held. 1222 */ 1223 int 1224 pmcs_register_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1225 { 1226 struct pmcwork *pwrk; 1227 int result = 0; 1228 uint32_t *msg; 1229 uint32_t tmp, status; 1230 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 1231 1232 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1233 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1234 1235 if (msg == NULL || 1236 (pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) { 1237 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1238 result = ENOMEM; 1239 goto out; 1240 } 1241 1242 pwrk->arg = iomb; 1243 pwrk->dtype = pptr->dtype; 1244 1245 msg[1] = LE_32(pwrk->htag); 1246 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_REGISTER_DEVICE)); 1247 tmp = PMCS_DEVREG_TLR | 1248 (pptr->link_rate << PMCS_DEVREG_LINK_RATE_SHIFT); 1249 if (IS_ROOT_PHY(pptr)) { 1250 msg[2] = LE_32(pptr->portid | 1251 (pptr->phynum << PMCS_PHYID_SHIFT)); 1252 } else { 1253 msg[2] = LE_32(pptr->portid); 1254 } 1255 if (pptr->dtype == SATA) { 1256 if (IS_ROOT_PHY(pptr)) { 1257 tmp |= PMCS_DEVREG_TYPE_SATA_DIRECT; 1258 } else { 1259 tmp |= PMCS_DEVREG_TYPE_SATA; 1260 } 1261 } else { 1262 tmp |= PMCS_DEVREG_TYPE_SAS; 1263 } 1264 msg[3] = LE_32(tmp); 1265 msg[4] = LE_32(PMCS_DEVREG_IT_NEXUS_TIMEOUT); 1266 (void) memcpy(&msg[5], pptr->sas_address, 8); 1267 1268 CLEAN_MESSAGE(msg, 7); 1269 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1270 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1271 1272 pmcs_unlock_phy(pptr); 1273 WAIT_FOR(pwrk, 250, result); 1274 pmcs_lock_phy(pptr); 1275 pmcs_pwork(pwp, pwrk); 1276 1277 if (result) { 1278 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 1279 result = ETIMEDOUT; 1280 goto out; 1281 } 1282 status = LE_32(iomb[2]); 1283 tmp = LE_32(iomb[3]); 1284 switch (status) { 1285 case PMCS_DEVREG_OK: 1286 case PMCS_DEVREG_DEVICE_ALREADY_REGISTERED: 1287 case PMCS_DEVREG_PHY_ALREADY_REGISTERED: 1288 if (pmcs_validate_devid(pwp->root_phys, pptr, tmp) == B_FALSE) { 1289 result = EEXIST; 1290 goto out; 1291 } else if (status != PMCS_DEVREG_OK) { 1292 if (tmp == 0xffffffff) { /* F/W bug */ 1293 pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL, 1294 "%s: phy %s already has bogus devid 0x%x", 1295 __func__, pptr->path, tmp); 1296 result = EIO; 1297 goto out; 1298 } else { 1299 pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL, 1300 "%s: phy %s already has a device id 0x%x", 1301 __func__, pptr->path, tmp); 1302 } 1303 } 1304 break; 1305 default: 1306 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1307 "%s: status 0x%x when trying to register device %s", 1308 __func__, status, pptr->path); 1309 result = EIO; 1310 goto out; 1311 } 1312 pptr->device_id = tmp; 1313 pptr->valid_device_id = 1; 1314 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Phy %s/" SAS_ADDR_FMT 1315 " registered with device_id 0x%x (portid %d)", pptr->path, 1316 SAS_ADDR_PRT(pptr->sas_address), tmp, pptr->portid); 1317 out: 1318 return (result); 1319 } 1320 1321 /* 1322 * Deregister a device (remove a device handle). 1323 * Called with PHY locked. 1324 */ 1325 void 1326 pmcs_deregister_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1327 { 1328 struct pmcwork *pwrk; 1329 uint32_t msg[PMCS_MSG_SIZE], *ptr, status; 1330 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 1331 int result; 1332 1333 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 1334 if (pwrk == NULL) { 1335 return; 1336 } 1337 1338 pwrk->arg = iomb; 1339 pwrk->dtype = pptr->dtype; 1340 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1341 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1342 if (ptr == NULL) { 1343 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1344 pmcs_pwork(pwp, pwrk); 1345 return; 1346 } 1347 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 1348 PMCIN_DEREGISTER_DEVICE_HANDLE)); 1349 msg[1] = LE_32(pwrk->htag); 1350 msg[2] = LE_32(pptr->device_id); 1351 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1352 COPY_MESSAGE(ptr, msg, 3); 1353 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1354 1355 pmcs_unlock_phy(pptr); 1356 WAIT_FOR(pwrk, 250, result); 1357 pmcs_pwork(pwp, pwrk); 1358 pmcs_lock_phy(pptr); 1359 1360 if (result) { 1361 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 1362 return; 1363 } 1364 status = LE_32(iomb[2]); 1365 if (status != PMCOUT_STATUS_OK) { 1366 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1367 "%s: status 0x%x when trying to deregister device %s", 1368 __func__, status, pptr->path); 1369 } else { 1370 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1371 "%s: device %s deregistered", __func__, pptr->path); 1372 pptr->valid_device_id = 0; 1373 pptr->device_id = PMCS_INVALID_DEVICE_ID; 1374 pptr->configured = 0; 1375 pptr->deregister_wait = 0; 1376 } 1377 } 1378 1379 /* 1380 * Deregister all registered devices. 1381 */ 1382 void 1383 pmcs_deregister_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 1384 { 1385 /* 1386 * Start at the maximum level and walk back to level 0. This only 1387 * gets done during detach after all threads and timers have been 1388 * destroyed, so there's no need to hold the softstate or PHY lock. 1389 */ 1390 while (phyp) { 1391 if (phyp->children) { 1392 pmcs_deregister_devices(pwp, phyp->children); 1393 } 1394 if (phyp->valid_device_id) { 1395 pmcs_deregister_device(pwp, phyp); 1396 } 1397 phyp = phyp->sibling; 1398 } 1399 } 1400 1401 /* 1402 * Perform a 'soft' reset on the PMC chip 1403 */ 1404 int 1405 pmcs_soft_reset(pmcs_hw_t *pwp, boolean_t no_restart) 1406 { 1407 uint32_t s2, sfrbits, gsm, rapchk, wapchk, wdpchk, spc, tsmode; 1408 pmcs_phy_t *pptr; 1409 char *msg = NULL; 1410 int i; 1411 1412 /* 1413 * Disable interrupts 1414 */ 1415 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1416 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1417 1418 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%s", __func__); 1419 1420 if (pwp->locks_initted) { 1421 mutex_enter(&pwp->lock); 1422 } 1423 pwp->blocked = 1; 1424 1425 /* 1426 * Clear our softstate copies of the MSGU and IOP heartbeats. 1427 */ 1428 pwp->last_msgu_tick = pwp->last_iop_tick = 0; 1429 1430 /* 1431 * Step 1 1432 */ 1433 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2); 1434 if ((s2 & PMCS_MSGU_HOST_SOFT_RESET_READY) == 0) { 1435 pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE); 1436 pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE); 1437 for (i = 0; i < 100; i++) { 1438 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1439 PMCS_MSGU_HOST_SOFT_RESET_READY; 1440 if (s2) { 1441 break; 1442 } 1443 drv_usecwait(10000); 1444 } 1445 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1446 PMCS_MSGU_HOST_SOFT_RESET_READY; 1447 if (s2 == 0) { 1448 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1449 "%s: PMCS_MSGU_HOST_SOFT_RESET_READY never came " 1450 "ready", __func__); 1451 pmcs_register_dump(pwp); 1452 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1453 PMCS_MSGU_CPU_SOFT_RESET_READY) == 0 || 1454 (pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1455 PMCS_MSGU_CPU_SOFT_RESET_READY) == 0) { 1456 pwp->state = STATE_DEAD; 1457 pwp->blocked = 0; 1458 if (pwp->locks_initted) { 1459 mutex_exit(&pwp->lock); 1460 } 1461 return (-1); 1462 } 1463 } 1464 } 1465 1466 /* 1467 * Step 2 1468 */ 1469 pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_IOP, 0); 1470 drv_usecwait(10); 1471 pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_AAP1, 0); 1472 drv_usecwait(10); 1473 pmcs_wr_topunit(pwp, PMCS_EVENT_INT_ENABLE, 0); 1474 drv_usecwait(10); 1475 pmcs_wr_topunit(pwp, PMCS_EVENT_INT_STAT, 1476 pmcs_rd_topunit(pwp, PMCS_EVENT_INT_STAT)); 1477 drv_usecwait(10); 1478 pmcs_wr_topunit(pwp, PMCS_ERROR_INT_ENABLE, 0); 1479 drv_usecwait(10); 1480 pmcs_wr_topunit(pwp, PMCS_ERROR_INT_STAT, 1481 pmcs_rd_topunit(pwp, PMCS_ERROR_INT_STAT)); 1482 drv_usecwait(10); 1483 1484 sfrbits = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1485 PMCS_MSGU_AAP_SFR_PROGRESS; 1486 sfrbits ^= PMCS_MSGU_AAP_SFR_PROGRESS; 1487 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "PMCS_MSGU_HOST_SCRATCH0 " 1488 "%08x -> %08x", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0), 1489 HST_SFT_RESET_SIG); 1490 pmcs_wr_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0, HST_SFT_RESET_SIG); 1491 1492 /* 1493 * Step 3 1494 */ 1495 gsm = pmcs_rd_gsm_reg(pwp, GSM_CFG_AND_RESET); 1496 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm, 1497 gsm & ~PMCS_SOFT_RESET_BITS); 1498 pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm & ~PMCS_SOFT_RESET_BITS); 1499 1500 /* 1501 * Step 4 1502 */ 1503 rapchk = pmcs_rd_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN); 1504 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN " 1505 "%08x -> %08x", rapchk, 0); 1506 pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, 0); 1507 wapchk = pmcs_rd_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN); 1508 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN " 1509 "%08x -> %08x", wapchk, 0); 1510 pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, 0); 1511 wdpchk = pmcs_rd_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN); 1512 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN " 1513 "%08x -> %08x", wdpchk, 0); 1514 pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, 0); 1515 1516 /* 1517 * Step 5 1518 */ 1519 drv_usecwait(100); 1520 1521 /* 1522 * Step 5.5 (Temporary workaround for 1.07.xx Beta) 1523 */ 1524 tsmode = pmcs_rd_gsm_reg(pwp, PMCS_GPIO_TRISTATE_MODE_ADDR); 1525 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GPIO TSMODE %08x -> %08x", 1526 tsmode, tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1)); 1527 pmcs_wr_gsm_reg(pwp, PMCS_GPIO_TRISTATE_MODE_ADDR, 1528 tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1)); 1529 drv_usecwait(10); 1530 1531 /* 1532 * Step 6 1533 */ 1534 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1535 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1536 spc, spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1537 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, 1538 spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1539 drv_usecwait(10); 1540 1541 /* 1542 * Step 7 1543 */ 1544 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1545 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1546 spc, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB)); 1547 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB)); 1548 1549 /* 1550 * Step 8 1551 */ 1552 drv_usecwait(100); 1553 1554 /* 1555 * Step 9 1556 */ 1557 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1558 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1559 spc, spc | (BDMA_CORE_RSTB|OSSP_RSTB)); 1560 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc | (BDMA_CORE_RSTB|OSSP_RSTB)); 1561 1562 /* 1563 * Step 10 1564 */ 1565 drv_usecwait(100); 1566 1567 /* 1568 * Step 11 1569 */ 1570 gsm = pmcs_rd_gsm_reg(pwp, GSM_CFG_AND_RESET); 1571 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm, 1572 gsm | PMCS_SOFT_RESET_BITS); 1573 pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm | PMCS_SOFT_RESET_BITS); 1574 drv_usecwait(10); 1575 1576 /* 1577 * Step 12 1578 */ 1579 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN " 1580 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN), 1581 rapchk); 1582 pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, rapchk); 1583 drv_usecwait(10); 1584 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN " 1585 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN), 1586 wapchk); 1587 pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, wapchk); 1588 drv_usecwait(10); 1589 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN " 1590 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN), 1591 wapchk); 1592 pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, wdpchk); 1593 drv_usecwait(10); 1594 1595 /* 1596 * Step 13 1597 */ 1598 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1599 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1600 spc, spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1601 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, 1602 spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1603 1604 /* 1605 * Step 14 1606 */ 1607 drv_usecwait(100); 1608 1609 /* 1610 * Step 15 1611 */ 1612 for (spc = 0, i = 0; i < 1000; i++) { 1613 drv_usecwait(1000); 1614 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 1615 if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) == sfrbits) { 1616 break; 1617 } 1618 } 1619 1620 if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) != sfrbits) { 1621 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1622 "SFR didn't toggle (sfr 0x%x)", spc); 1623 pwp->state = STATE_DEAD; 1624 pwp->blocked = 0; 1625 if (pwp->locks_initted) { 1626 mutex_exit(&pwp->lock); 1627 } 1628 return (-1); 1629 } 1630 1631 /* 1632 * Step 16 1633 */ 1634 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1635 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1636 1637 /* 1638 * Wait for up to 5 seconds for AAP state to come either ready or error. 1639 */ 1640 for (i = 0; i < 50; i++) { 1641 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1642 PMCS_MSGU_AAP_STATE_MASK; 1643 if (spc == PMCS_MSGU_AAP_STATE_ERROR || 1644 spc == PMCS_MSGU_AAP_STATE_READY) { 1645 break; 1646 } 1647 drv_usecwait(100000); 1648 } 1649 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 1650 if ((spc & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) { 1651 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1652 "soft reset failed (state 0x%x)", spc); 1653 pwp->state = STATE_DEAD; 1654 pwp->blocked = 0; 1655 if (pwp->locks_initted) { 1656 mutex_exit(&pwp->lock); 1657 } 1658 return (-1); 1659 } 1660 1661 /* Clear the firmware log */ 1662 if (pwp->fwlogp) { 1663 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE); 1664 } 1665 1666 /* Reset our queue indices and entries */ 1667 bzero(pwp->shadow_iqpi, sizeof (pwp->shadow_iqpi)); 1668 bzero(pwp->last_iqci, sizeof (pwp->last_iqci)); 1669 bzero(pwp->last_htag, sizeof (pwp->last_htag)); 1670 for (i = 0; i < PMCS_NIQ; i++) { 1671 if (pwp->iqp[i]) { 1672 bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 1673 pmcs_wr_iqpi(pwp, i, 0); 1674 pmcs_wr_iqci(pwp, i, 0); 1675 } 1676 } 1677 for (i = 0; i < PMCS_NOQ; i++) { 1678 if (pwp->oqp[i]) { 1679 bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 1680 pmcs_wr_oqpi(pwp, i, 0); 1681 pmcs_wr_oqci(pwp, i, 0); 1682 } 1683 1684 } 1685 1686 if (pwp->state == STATE_DEAD || pwp->state == STATE_UNPROBING || 1687 pwp->state == STATE_PROBING || pwp->locks_initted == 0) { 1688 pwp->blocked = 0; 1689 if (pwp->locks_initted) { 1690 mutex_exit(&pwp->lock); 1691 } 1692 return (0); 1693 } 1694 1695 /* 1696 * Return at this point if we dont need to startup. 1697 */ 1698 if (no_restart) { 1699 return (0); 1700 } 1701 1702 ASSERT(pwp->locks_initted != 0); 1703 1704 /* 1705 * Flush the target queues and clear each target's PHY 1706 */ 1707 if (pwp->targets) { 1708 for (i = 0; i < pwp->max_dev; i++) { 1709 pmcs_xscsi_t *xp = pwp->targets[i]; 1710 1711 if (xp == NULL) { 1712 continue; 1713 } 1714 1715 mutex_enter(&xp->statlock); 1716 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES); 1717 xp->phy = NULL; 1718 mutex_exit(&xp->statlock); 1719 } 1720 } 1721 1722 /* 1723 * Zero out the ports list, free non root phys, clear root phys 1724 */ 1725 bzero(pwp->ports, sizeof (pwp->ports)); 1726 pmcs_free_all_phys(pwp, pwp->root_phys); 1727 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 1728 pmcs_lock_phy(pptr); 1729 pmcs_clear_phy(pwp, pptr); 1730 pptr->target = NULL; 1731 pmcs_unlock_phy(pptr); 1732 } 1733 1734 /* 1735 * Restore Interrupt Mask 1736 */ 1737 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask); 1738 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1739 1740 pwp->mpi_table_setup = 0; 1741 mutex_exit(&pwp->lock); 1742 1743 /* 1744 * Set up MPI again. 1745 */ 1746 if (pmcs_setup(pwp)) { 1747 msg = "unable to setup MPI tables again"; 1748 goto fail_restart; 1749 } 1750 pmcs_report_fwversion(pwp); 1751 1752 /* 1753 * Restart MPI 1754 */ 1755 if (pmcs_start_mpi(pwp)) { 1756 msg = "unable to restart MPI again"; 1757 goto fail_restart; 1758 } 1759 1760 mutex_enter(&pwp->lock); 1761 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1762 mutex_exit(&pwp->lock); 1763 1764 /* 1765 * Run any completions 1766 */ 1767 PMCS_CQ_RUN(pwp); 1768 1769 /* 1770 * Delay 1771 */ 1772 drv_usecwait(1000000); 1773 return (0); 1774 1775 fail_restart: 1776 mutex_enter(&pwp->lock); 1777 pwp->state = STATE_DEAD; 1778 mutex_exit(&pwp->lock); 1779 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 1780 "%s: Failed: %s", __func__, msg); 1781 return (-1); 1782 } 1783 1784 1785 /* 1786 * Perform a 'hot' reset, which will soft reset the chip and 1787 * restore the state back to pre-reset context. Called with pwp 1788 * lock held. 1789 */ 1790 int 1791 pmcs_hot_reset(pmcs_hw_t *pwp) 1792 { 1793 pmcs_iport_t *iport; 1794 1795 ASSERT(mutex_owned(&pwp->lock)); 1796 pwp->state = STATE_IN_RESET; 1797 1798 /* 1799 * For any iports on this HBA, report empty target sets and 1800 * then tear them down. 1801 */ 1802 rw_enter(&pwp->iports_lock, RW_READER); 1803 for (iport = list_head(&pwp->iports); iport != NULL; 1804 iport = list_next(&pwp->iports, iport)) { 1805 mutex_enter(&iport->lock); 1806 (void) scsi_hba_tgtmap_set_begin(iport->iss_tgtmap); 1807 (void) scsi_hba_tgtmap_set_end(iport->iss_tgtmap, 0); 1808 pmcs_iport_teardown_phys(iport); 1809 mutex_exit(&iport->lock); 1810 } 1811 rw_exit(&pwp->iports_lock); 1812 1813 /* Grab a register dump, in the event that reset fails */ 1814 pmcs_register_dump_int(pwp); 1815 mutex_exit(&pwp->lock); 1816 1817 /* Issue soft reset and clean up related softstate */ 1818 if (pmcs_soft_reset(pwp, B_FALSE)) { 1819 /* 1820 * Disable interrupts, in case we got far enough along to 1821 * enable them, then fire off ereport and service impact. 1822 */ 1823 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1824 "%s: failed soft reset", __func__); 1825 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1826 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1827 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE); 1828 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 1829 mutex_enter(&pwp->lock); 1830 pwp->state = STATE_DEAD; 1831 return (DDI_FAILURE); 1832 } 1833 1834 mutex_enter(&pwp->lock); 1835 pwp->state = STATE_RUNNING; 1836 mutex_exit(&pwp->lock); 1837 1838 /* 1839 * Finally, restart the phys, which will bring the iports back 1840 * up and eventually result in discovery running. 1841 */ 1842 if (pmcs_start_phys(pwp)) { 1843 /* We should be up and running now, so retry */ 1844 if (pmcs_start_phys(pwp)) { 1845 /* Apparently unable to restart PHYs, fail */ 1846 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1847 "%s: failed to restart PHYs after soft reset", 1848 __func__); 1849 mutex_enter(&pwp->lock); 1850 return (DDI_FAILURE); 1851 } 1852 } 1853 1854 mutex_enter(&pwp->lock); 1855 return (DDI_SUCCESS); 1856 } 1857 1858 /* 1859 * Reset a device or a logical unit. 1860 */ 1861 int 1862 pmcs_reset_dev(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint64_t lun) 1863 { 1864 int rval = 0; 1865 1866 if (pptr == NULL) { 1867 return (ENXIO); 1868 } 1869 1870 pmcs_lock_phy(pptr); 1871 if (pptr->dtype == SAS) { 1872 /* 1873 * Some devices do not support SAS_I_T_NEXUS_RESET as 1874 * it is not a mandatory (in SAM4) task management 1875 * function, while LOGIC_UNIT_RESET is mandatory. 1876 * 1877 * The problem here is that we need to iterate over 1878 * all known LUNs to emulate the semantics of 1879 * "RESET_TARGET". 1880 * 1881 * XXX: FIX ME 1882 */ 1883 if (lun == (uint64_t)-1) { 1884 lun = 0; 1885 } 1886 rval = pmcs_ssp_tmf(pwp, pptr, SAS_LOGICAL_UNIT_RESET, 0, lun, 1887 NULL); 1888 } else if (pptr->dtype == SATA) { 1889 if (lun != 0ull) { 1890 pmcs_unlock_phy(pptr); 1891 return (EINVAL); 1892 } 1893 rval = pmcs_reset_phy(pwp, pptr, PMCS_PHYOP_LINK_RESET); 1894 } else { 1895 pmcs_unlock_phy(pptr); 1896 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1897 "%s: cannot reset a SMP device yet (%s)", 1898 __func__, pptr->path); 1899 return (EINVAL); 1900 } 1901 1902 /* 1903 * Now harvest any commands killed by this action 1904 * by issuing an ABORT for all commands on this device. 1905 * 1906 * We do this even if the the tmf or reset fails (in case there 1907 * are any dead commands around to be harvested *anyway*). 1908 * We don't have to await for the abort to complete. 1909 */ 1910 if (pmcs_abort(pwp, pptr, 0, 1, 0)) { 1911 pptr->abort_pending = 1; 1912 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 1913 } 1914 1915 pmcs_unlock_phy(pptr); 1916 return (rval); 1917 } 1918 1919 /* 1920 * Called with PHY locked. 1921 */ 1922 static int 1923 pmcs_get_device_handle(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1924 { 1925 if (pptr->valid_device_id == 0) { 1926 int result = pmcs_register_device(pwp, pptr); 1927 1928 /* 1929 * If we changed while registering, punt 1930 */ 1931 if (pptr->changed) { 1932 RESTART_DISCOVERY(pwp); 1933 return (-1); 1934 } 1935 1936 /* 1937 * If we had a failure to register, check against errors. 1938 * An ENOMEM error means we just retry (temp resource shortage). 1939 */ 1940 if (result == ENOMEM) { 1941 PHY_CHANGED(pwp, pptr); 1942 RESTART_DISCOVERY(pwp); 1943 return (-1); 1944 } 1945 1946 /* 1947 * An ETIMEDOUT error means we retry (if our counter isn't 1948 * exhausted) 1949 */ 1950 if (result == ETIMEDOUT) { 1951 if (ddi_get_lbolt() < pptr->config_stop) { 1952 PHY_CHANGED(pwp, pptr); 1953 RESTART_DISCOVERY(pwp); 1954 } else { 1955 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 1956 "%s: Retries exhausted for %s, killing", 1957 __func__, pptr->path); 1958 pptr->config_stop = 0; 1959 pmcs_kill_changed(pwp, pptr, 0); 1960 } 1961 return (-1); 1962 } 1963 /* 1964 * Other errors or no valid device id is fatal, but don't 1965 * preclude a future action. 1966 */ 1967 if (result || pptr->valid_device_id == 0) { 1968 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 1969 "%s: %s could not be registered", __func__, 1970 pptr->path); 1971 return (-1); 1972 } 1973 } 1974 return (0); 1975 } 1976 1977 int 1978 pmcs_iport_tgtmap_create(pmcs_iport_t *iport) 1979 { 1980 ASSERT(iport); 1981 if (iport == NULL) 1982 return (B_FALSE); 1983 1984 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__); 1985 1986 /* create target map */ 1987 if (scsi_hba_tgtmap_create(iport->dip, SCSI_TM_FULLSET, tgtmap_usec, 1988 (void *)iport, pmcs_tgtmap_activate_cb, pmcs_tgtmap_deactivate_cb, 1989 &iport->iss_tgtmap) != DDI_SUCCESS) { 1990 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG, NULL, NULL, 1991 "%s: failed to create tgtmap", __func__); 1992 return (B_FALSE); 1993 } 1994 return (B_TRUE); 1995 } 1996 1997 int 1998 pmcs_iport_tgtmap_destroy(pmcs_iport_t *iport) 1999 { 2000 ASSERT(iport && iport->iss_tgtmap); 2001 if ((iport == NULL) || (iport->iss_tgtmap == NULL)) 2002 return (B_FALSE); 2003 2004 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__); 2005 2006 /* destroy target map */ 2007 scsi_hba_tgtmap_destroy(iport->iss_tgtmap); 2008 return (B_TRUE); 2009 } 2010 2011 /* 2012 * Remove all phys from an iport's phymap and empty it's phylist. 2013 * Called when a port has been reset by the host (see pmcs_intr.c) 2014 * or prior to issuing a soft reset if we detect a stall on the chip 2015 * (see pmcs_attach.c). 2016 */ 2017 void 2018 pmcs_iport_teardown_phys(pmcs_iport_t *iport) 2019 { 2020 pmcs_hw_t *pwp; 2021 sas_phymap_phys_t *phys; 2022 int phynum; 2023 2024 ASSERT(iport); 2025 ASSERT(mutex_owned(&iport->lock)); 2026 pwp = iport->pwp; 2027 ASSERT(pwp); 2028 2029 /* 2030 * Remove all phys from the iport handle's phy list, unset its 2031 * primary phy and update its state. 2032 */ 2033 pmcs_remove_phy_from_iport(iport, NULL); 2034 iport->pptr = NULL; 2035 iport->ua_state = UA_PEND_DEACTIVATE; 2036 2037 /* Remove all phys from the phymap */ 2038 phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua); 2039 if (phys) { 2040 while ((phynum = sas_phymap_phys_next(phys)) != -1) { 2041 (void) sas_phymap_phy_rem(pwp->hss_phymap, phynum); 2042 } 2043 sas_phymap_phys_free(phys); 2044 } 2045 } 2046 2047 /* 2048 * Query the phymap and populate the iport handle passed in. 2049 * Called with iport lock held. 2050 */ 2051 int 2052 pmcs_iport_configure_phys(pmcs_iport_t *iport) 2053 { 2054 pmcs_hw_t *pwp; 2055 pmcs_phy_t *pptr; 2056 sas_phymap_phys_t *phys; 2057 int phynum; 2058 int inst; 2059 2060 ASSERT(iport); 2061 ASSERT(mutex_owned(&iport->lock)); 2062 pwp = iport->pwp; 2063 ASSERT(pwp); 2064 inst = ddi_get_instance(iport->dip); 2065 2066 mutex_enter(&pwp->lock); 2067 ASSERT(pwp->root_phys != NULL); 2068 2069 /* 2070 * Query the phymap regarding the phys in this iport and populate 2071 * the iport's phys list. Hereafter this list is maintained via 2072 * port up and down events in pmcs_intr.c 2073 */ 2074 ASSERT(list_is_empty(&iport->phys)); 2075 phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua); 2076 ASSERT(phys != NULL); 2077 while ((phynum = sas_phymap_phys_next(phys)) != -1) { 2078 /* Grab the phy pointer from root_phys */ 2079 pptr = pwp->root_phys + phynum; 2080 ASSERT(pptr); 2081 pmcs_lock_phy(pptr); 2082 ASSERT(pptr->phynum == phynum); 2083 2084 /* 2085 * Set a back pointer in the phy to this iport. 2086 */ 2087 pptr->iport = iport; 2088 2089 /* 2090 * If this phy is the primary, set a pointer to it on our 2091 * iport handle, and set our portid from it. 2092 */ 2093 if (!pptr->subsidiary) { 2094 iport->pptr = pptr; 2095 iport->portid = pptr->portid; 2096 } 2097 2098 /* 2099 * Finally, insert the phy into our list 2100 */ 2101 pmcs_unlock_phy(pptr); 2102 pmcs_add_phy_to_iport(iport, pptr); 2103 2104 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: found " 2105 "phy %d [0x%p] on iport%d, refcnt(%d)", __func__, phynum, 2106 (void *)pptr, inst, iport->refcnt); 2107 } 2108 mutex_exit(&pwp->lock); 2109 sas_phymap_phys_free(phys); 2110 RESTART_DISCOVERY(pwp); 2111 return (DDI_SUCCESS); 2112 } 2113 2114 /* 2115 * Return the iport that ua is associated with, or NULL. If an iport is 2116 * returned, it will be held and the caller must release the hold. 2117 */ 2118 static pmcs_iport_t * 2119 pmcs_get_iport_by_ua(pmcs_hw_t *pwp, char *ua) 2120 { 2121 pmcs_iport_t *iport = NULL; 2122 2123 rw_enter(&pwp->iports_lock, RW_READER); 2124 for (iport = list_head(&pwp->iports); 2125 iport != NULL; 2126 iport = list_next(&pwp->iports, iport)) { 2127 mutex_enter(&iport->lock); 2128 if (strcmp(iport->ua, ua) == 0) { 2129 mutex_exit(&iport->lock); 2130 mutex_enter(&iport->refcnt_lock); 2131 iport->refcnt++; 2132 mutex_exit(&iport->refcnt_lock); 2133 break; 2134 } 2135 mutex_exit(&iport->lock); 2136 } 2137 rw_exit(&pwp->iports_lock); 2138 2139 return (iport); 2140 } 2141 2142 /* 2143 * Return the iport that pptr is associated with, or NULL. 2144 * If an iport is returned, there is a hold that the caller must release. 2145 */ 2146 pmcs_iport_t * 2147 pmcs_get_iport_by_wwn(pmcs_hw_t *pwp, uint64_t wwn) 2148 { 2149 pmcs_iport_t *iport = NULL; 2150 char *ua; 2151 2152 ua = sas_phymap_lookup_ua(pwp->hss_phymap, pwp->sas_wwns[0], wwn); 2153 if (ua) { 2154 iport = pmcs_get_iport_by_ua(pwp, ua); 2155 if (iport) { 2156 mutex_enter(&iport->lock); 2157 pmcs_iport_active(iport); 2158 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: " 2159 "found iport [0x%p] on ua (%s), refcnt (%d)", 2160 __func__, (void *)iport, ua, iport->refcnt); 2161 mutex_exit(&iport->lock); 2162 } 2163 } 2164 2165 return (iport); 2166 } 2167 2168 /* 2169 * Promote the next phy on this port to primary, and return it. 2170 * Called when the primary PHY on a port is going down, but the port 2171 * remains up (see pmcs_intr.c). 2172 */ 2173 pmcs_phy_t * 2174 pmcs_promote_next_phy(pmcs_phy_t *prev_primary) 2175 { 2176 pmcs_hw_t *pwp; 2177 pmcs_iport_t *iport; 2178 pmcs_phy_t *pptr, *child; 2179 int portid; 2180 2181 pmcs_lock_phy(prev_primary); 2182 portid = prev_primary->portid; 2183 iport = prev_primary->iport; 2184 pwp = prev_primary->pwp; 2185 2186 /* Use the first available phy in this port */ 2187 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 2188 if ((pptr->portid == portid) && (pptr != prev_primary)) { 2189 mutex_enter(&pptr->phy_lock); 2190 break; 2191 } 2192 } 2193 2194 if (pptr == NULL) { 2195 pmcs_unlock_phy(prev_primary); 2196 return (NULL); 2197 } 2198 2199 if (iport) { 2200 mutex_enter(&iport->lock); 2201 iport->pptr = pptr; 2202 mutex_exit(&iport->lock); 2203 } 2204 2205 /* Update the phy handle with the data from the previous primary */ 2206 pptr->children = prev_primary->children; 2207 child = pptr->children; 2208 while (child) { 2209 child->parent = pptr; 2210 child = child->sibling; 2211 } 2212 pptr->ncphy = prev_primary->ncphy; 2213 pptr->width = prev_primary->width; 2214 pptr->dtype = prev_primary->dtype; 2215 pptr->pend_dtype = prev_primary->pend_dtype; 2216 pptr->tolerates_sas2 = prev_primary->tolerates_sas2; 2217 pptr->atdt = prev_primary->atdt; 2218 pptr->portid = prev_primary->portid; 2219 pptr->link_rate = prev_primary->link_rate; 2220 pptr->configured = prev_primary->configured; 2221 pptr->iport = prev_primary->iport; 2222 pptr->target = prev_primary->target; 2223 if (pptr->target) { 2224 pptr->target->phy = pptr; 2225 } 2226 2227 /* Update the phy mask properties for the affected PHYs */ 2228 /* Clear the current values... */ 2229 pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp, 2230 pptr->tgt_port_pm_tmp, B_FALSE); 2231 /* ...replace with the values from prev_primary... */ 2232 pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm_tmp, 2233 prev_primary->tgt_port_pm_tmp, B_TRUE); 2234 /* ...then clear prev_primary's PHY values from the new primary */ 2235 pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm, 2236 prev_primary->tgt_port_pm, B_FALSE); 2237 /* Clear the prev_primary's values */ 2238 pmcs_update_phy_pm_props(prev_primary, prev_primary->att_port_pm_tmp, 2239 prev_primary->tgt_port_pm_tmp, B_FALSE); 2240 2241 pptr->subsidiary = 0; 2242 2243 prev_primary->subsidiary = 1; 2244 prev_primary->children = NULL; 2245 prev_primary->target = NULL; 2246 pptr->device_id = prev_primary->device_id; 2247 pptr->valid_device_id = 1; 2248 pmcs_unlock_phy(prev_primary); 2249 2250 /* 2251 * We call pmcs_unlock_phy() on pptr because it now contains the 2252 * list of children. 2253 */ 2254 pmcs_unlock_phy(pptr); 2255 2256 return (pptr); 2257 } 2258 2259 void 2260 pmcs_rele_iport(pmcs_iport_t *iport) 2261 { 2262 /* 2263 * Release a refcnt on this iport. If this is the last reference, 2264 * signal the potential waiter in pmcs_iport_unattach(). 2265 */ 2266 ASSERT(iport->refcnt > 0); 2267 mutex_enter(&iport->refcnt_lock); 2268 iport->refcnt--; 2269 mutex_exit(&iport->refcnt_lock); 2270 if (iport->refcnt == 0) { 2271 cv_signal(&iport->refcnt_cv); 2272 } 2273 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: iport " 2274 "[0x%p] refcnt (%d)", __func__, (void *)iport, iport->refcnt); 2275 } 2276 2277 void 2278 pmcs_phymap_activate(void *arg, char *ua, void **privp) 2279 { 2280 _NOTE(ARGUNUSED(privp)); 2281 pmcs_hw_t *pwp = arg; 2282 pmcs_iport_t *iport = NULL; 2283 2284 mutex_enter(&pwp->lock); 2285 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) { 2286 mutex_exit(&pwp->lock); 2287 return; 2288 } 2289 pwp->phymap_active++; 2290 mutex_exit(&pwp->lock); 2291 2292 if (scsi_hba_iportmap_iport_add(pwp->hss_iportmap, ua, NULL) != 2293 DDI_SUCCESS) { 2294 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to " 2295 "add iport handle on unit address [%s]", __func__, ua); 2296 } else { 2297 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: " 2298 "phymap_active count (%d), added iport handle on unit " 2299 "address [%s]", __func__, pwp->phymap_active, ua); 2300 } 2301 2302 /* Set the HBA softstate as our private data for this unit address */ 2303 *privp = (void *)pwp; 2304 2305 /* 2306 * We are waiting on attach for this iport node, unless it is still 2307 * attached. This can happen if a consumer has an outstanding open 2308 * on our iport node, but the port is down. If this is the case, we 2309 * need to configure our iport here for reuse. 2310 */ 2311 iport = pmcs_get_iport_by_ua(pwp, ua); 2312 if (iport) { 2313 mutex_enter(&iport->lock); 2314 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) { 2315 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: " 2316 "failed to configure phys on iport [0x%p] at " 2317 "unit address (%s)", __func__, (void *)iport, ua); 2318 } 2319 pmcs_iport_active(iport); 2320 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 2321 &iport->nphy); 2322 mutex_exit(&iport->lock); 2323 pmcs_rele_iport(iport); 2324 } 2325 2326 } 2327 2328 void 2329 pmcs_phymap_deactivate(void *arg, char *ua, void *privp) 2330 { 2331 _NOTE(ARGUNUSED(privp)); 2332 pmcs_hw_t *pwp = arg; 2333 pmcs_iport_t *iport; 2334 2335 mutex_enter(&pwp->lock); 2336 pwp->phymap_active--; 2337 mutex_exit(&pwp->lock); 2338 2339 if (scsi_hba_iportmap_iport_remove(pwp->hss_iportmap, ua) != 2340 DDI_SUCCESS) { 2341 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to " 2342 "remove iport handle on unit address [%s]", __func__, ua); 2343 } else { 2344 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: " 2345 "phymap_active count (%d), removed iport handle on unit " 2346 "address [%s]", __func__, pwp->phymap_active, ua); 2347 } 2348 2349 iport = pmcs_get_iport_by_ua(pwp, ua); 2350 2351 if (iport == NULL) { 2352 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: failed " 2353 "lookup of iport handle on unit addr (%s)", __func__, ua); 2354 return; 2355 } 2356 2357 mutex_enter(&iport->lock); 2358 iport->ua_state = UA_INACTIVE; 2359 iport->portid = PMCS_IPORT_INVALID_PORT_ID; 2360 pmcs_remove_phy_from_iport(iport, NULL); 2361 mutex_exit(&iport->lock); 2362 pmcs_rele_iport(iport); 2363 } 2364 2365 /* 2366 * Top-level discovery function 2367 */ 2368 void 2369 pmcs_discover(pmcs_hw_t *pwp) 2370 { 2371 pmcs_phy_t *pptr; 2372 pmcs_phy_t *root_phy; 2373 int phymap_active; 2374 2375 DTRACE_PROBE2(pmcs__discover__entry, ulong_t, pwp->work_flags, 2376 boolean_t, pwp->config_changed); 2377 2378 mutex_enter(&pwp->lock); 2379 2380 if (pwp->state != STATE_RUNNING) { 2381 mutex_exit(&pwp->lock); 2382 return; 2383 } 2384 2385 /* Ensure we have at least one phymap active */ 2386 if (pwp->phymap_active == 0) { 2387 mutex_exit(&pwp->lock); 2388 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2389 "%s: phymap inactive, exiting", __func__); 2390 return; 2391 } 2392 2393 phymap_active = pwp->phymap_active; 2394 mutex_exit(&pwp->lock); 2395 2396 /* 2397 * If no iports have attached, but we have PHYs that are up, we 2398 * are waiting for iport attach to complete. Restart discovery. 2399 */ 2400 rw_enter(&pwp->iports_lock, RW_READER); 2401 if (!pwp->iports_attached) { 2402 rw_exit(&pwp->iports_lock); 2403 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2404 "%s: no iports attached, retry discovery", __func__); 2405 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2406 return; 2407 } 2408 if (pwp->num_iports != phymap_active) { 2409 rw_exit(&pwp->iports_lock); 2410 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2411 "%s: phymaps or iport maps not stable; retry discovery", 2412 __func__); 2413 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2414 return; 2415 } 2416 rw_exit(&pwp->iports_lock); 2417 2418 mutex_enter(&pwp->config_lock); 2419 if (pwp->configuring) { 2420 mutex_exit(&pwp->config_lock); 2421 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2422 "%s: configuration already in progress", __func__); 2423 return; 2424 } 2425 2426 if (pmcs_acquire_scratch(pwp, B_FALSE)) { 2427 mutex_exit(&pwp->config_lock); 2428 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2429 "%s: cannot allocate scratch", __func__); 2430 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2431 return; 2432 } 2433 2434 pwp->configuring = 1; 2435 pwp->config_changed = B_FALSE; 2436 mutex_exit(&pwp->config_lock); 2437 2438 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery begin"); 2439 2440 /* 2441 * First, tell SCSA that we're beginning set operations. 2442 */ 2443 pmcs_begin_observations(pwp); 2444 2445 /* 2446 * The order of the following traversals is important. 2447 * 2448 * The first one checks for changed expanders. 2449 * 2450 * The second one aborts commands for dead devices and deregisters them. 2451 * 2452 * The third one clears the contents of dead expanders from the tree 2453 * 2454 * The fourth one clears now dead devices in expanders that remain. 2455 */ 2456 2457 /* 2458 * 1. Check expanders marked changed (but not dead) to see if they still 2459 * have the same number of phys and the same SAS address. Mark them, 2460 * their subsidiary phys (if wide) and their descendents dead if 2461 * anything has changed. Check the devices they contain to see if 2462 * *they* have changed. If they've changed from type NOTHING we leave 2463 * them marked changed to be configured later (picking up a new SAS 2464 * address and link rate if possible). Otherwise, any change in type, 2465 * SAS address or removal of target role will cause us to mark them 2466 * (and their descendents) as dead (and cause any pending commands 2467 * and associated devices to be removed). 2468 * 2469 * NOTE: We don't want to bail on discovery if the config has 2470 * changed until *after* we run pmcs_kill_devices. 2471 */ 2472 root_phy = pwp->root_phys; 2473 pmcs_check_expanders(pwp, root_phy); 2474 2475 /* 2476 * 2. Descend the tree looking for dead devices and kill them 2477 * by aborting all active commands and then deregistering them. 2478 */ 2479 if (pmcs_kill_devices(pwp, root_phy)) { 2480 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2481 "%s: pmcs_kill_devices failed!", __func__); 2482 } 2483 2484 /* 2485 * 3. Check for dead expanders and remove their children from the tree. 2486 * By the time we get here, the devices and commands for them have 2487 * already been terminated and removed. 2488 * 2489 * We do this independent of the configuration count changing so we can 2490 * free any dead device PHYs that were discovered while checking 2491 * expanders. We ignore any subsidiary phys as pmcs_clear_expander 2492 * will take care of those. 2493 * 2494 * NOTE: pmcs_clear_expander requires softstate lock 2495 */ 2496 mutex_enter(&pwp->lock); 2497 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 2498 /* 2499 * Call pmcs_clear_expander for every root PHY. It will 2500 * recurse and determine which (if any) expanders actually 2501 * need to be cleared. 2502 */ 2503 pmcs_lock_phy(pptr); 2504 pmcs_clear_expander(pwp, pptr, 0); 2505 pmcs_unlock_phy(pptr); 2506 } 2507 mutex_exit(&pwp->lock); 2508 2509 /* 2510 * 4. Check for dead devices and nullify them. By the time we get here, 2511 * the devices and commands for them have already been terminated 2512 * and removed. This is different from step 2 in that this just nulls 2513 * phys that are part of expanders that are still here but used to 2514 * be something but are no longer something (e.g., after a pulled 2515 * disk drive). Note that dead expanders had their contained phys 2516 * removed from the tree- here, the expanders themselves are 2517 * nullified (unless they were removed by being contained in another 2518 * expander phy). 2519 */ 2520 pmcs_clear_phys(pwp, root_phy); 2521 2522 /* 2523 * 5. Now check for and configure new devices. 2524 */ 2525 if (pmcs_configure_new_devices(pwp, root_phy)) { 2526 goto restart; 2527 } 2528 2529 out: 2530 DTRACE_PROBE2(pmcs__discover__exit, ulong_t, pwp->work_flags, 2531 boolean_t, pwp->config_changed); 2532 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery end"); 2533 2534 mutex_enter(&pwp->config_lock); 2535 2536 if (pwp->config_changed == B_FALSE) { 2537 /* 2538 * Observation is stable, report what we currently see to 2539 * the tgtmaps for delta processing. Start by setting 2540 * BEGIN on all tgtmaps. 2541 */ 2542 mutex_exit(&pwp->config_lock); 2543 if (pmcs_report_observations(pwp) == B_FALSE) { 2544 goto restart; 2545 } 2546 mutex_enter(&pwp->config_lock); 2547 } else { 2548 /* 2549 * If config_changed is TRUE, we need to reschedule 2550 * discovery now. 2551 */ 2552 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2553 "%s: Config has changed, will re-run discovery", __func__); 2554 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2555 } 2556 2557 pmcs_release_scratch(pwp); 2558 if (!pwp->quiesced) { 2559 pwp->blocked = 0; 2560 } 2561 pwp->configuring = 0; 2562 mutex_exit(&pwp->config_lock); 2563 2564 #ifdef DEBUG 2565 pptr = pmcs_find_phy_needing_work(pwp, pwp->root_phys); 2566 if (pptr != NULL) { 2567 if (!WORK_IS_SCHEDULED(pwp, PMCS_WORK_DISCOVER)) { 2568 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 2569 "PHY %s dead=%d changed=%d configured=%d " 2570 "but no work scheduled", pptr->path, pptr->dead, 2571 pptr->changed, pptr->configured); 2572 } 2573 pmcs_unlock_phy(pptr); 2574 } 2575 #endif 2576 2577 return; 2578 2579 restart: 2580 /* Clean up and restart discovery */ 2581 pmcs_release_scratch(pwp); 2582 mutex_enter(&pwp->config_lock); 2583 pwp->configuring = 0; 2584 RESTART_DISCOVERY_LOCKED(pwp); 2585 mutex_exit(&pwp->config_lock); 2586 } 2587 2588 /* 2589 * Return any PHY that needs to have scheduled work done. The PHY is returned 2590 * locked. 2591 */ 2592 static pmcs_phy_t * 2593 pmcs_find_phy_needing_work(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2594 { 2595 pmcs_phy_t *cphyp, *pnext; 2596 2597 while (pptr) { 2598 pmcs_lock_phy(pptr); 2599 2600 if (pptr->changed || (pptr->dead && pptr->valid_device_id)) { 2601 return (pptr); 2602 } 2603 2604 pnext = pptr->sibling; 2605 2606 if (pptr->children) { 2607 cphyp = pptr->children; 2608 pmcs_unlock_phy(pptr); 2609 cphyp = pmcs_find_phy_needing_work(pwp, cphyp); 2610 if (cphyp) { 2611 return (cphyp); 2612 } 2613 } else { 2614 pmcs_unlock_phy(pptr); 2615 } 2616 2617 pptr = pnext; 2618 } 2619 2620 return (NULL); 2621 } 2622 2623 /* 2624 * We may (or may not) report observations to SCSA. This is prefaced by 2625 * issuing a set_begin for each iport target map. 2626 */ 2627 static void 2628 pmcs_begin_observations(pmcs_hw_t *pwp) 2629 { 2630 pmcs_iport_t *iport; 2631 scsi_hba_tgtmap_t *tgtmap; 2632 2633 rw_enter(&pwp->iports_lock, RW_READER); 2634 for (iport = list_head(&pwp->iports); iport != NULL; 2635 iport = list_next(&pwp->iports, iport)) { 2636 /* 2637 * Unless we have at least one phy up, skip this iport. 2638 * Note we don't need to lock the iport for report_skip 2639 * since it is only used here. We are doing the skip so that 2640 * the phymap and iportmap stabilization times are honored - 2641 * giving us the ability to recover port operation within the 2642 * stabilization time without unconfiguring targets using the 2643 * port. 2644 */ 2645 if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) { 2646 iport->report_skip = 1; 2647 continue; /* skip set_begin */ 2648 } 2649 iport->report_skip = 0; 2650 2651 tgtmap = iport->iss_tgtmap; 2652 ASSERT(tgtmap); 2653 if (scsi_hba_tgtmap_set_begin(tgtmap) != DDI_SUCCESS) { 2654 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2655 "%s: cannot set_begin tgtmap ", __func__); 2656 rw_exit(&pwp->iports_lock); 2657 return; 2658 } 2659 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2660 "%s: set begin on tgtmap [0x%p]", __func__, (void *)tgtmap); 2661 } 2662 rw_exit(&pwp->iports_lock); 2663 } 2664 2665 /* 2666 * Report current observations to SCSA. 2667 */ 2668 static boolean_t 2669 pmcs_report_observations(pmcs_hw_t *pwp) 2670 { 2671 pmcs_iport_t *iport; 2672 scsi_hba_tgtmap_t *tgtmap; 2673 char *ap; 2674 pmcs_phy_t *pptr; 2675 uint64_t wwn; 2676 2677 /* 2678 * Observation is stable, report what we currently see to the tgtmaps 2679 * for delta processing. 2680 */ 2681 pptr = pwp->root_phys; 2682 2683 while (pptr) { 2684 pmcs_lock_phy(pptr); 2685 2686 /* 2687 * Skip PHYs that have nothing attached or are dead. 2688 */ 2689 if ((pptr->dtype == NOTHING) || pptr->dead) { 2690 pmcs_unlock_phy(pptr); 2691 pptr = pptr->sibling; 2692 continue; 2693 } 2694 2695 if (pptr->changed) { 2696 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 2697 "%s: oops, PHY %s changed; restart discovery", 2698 __func__, pptr->path); 2699 pmcs_unlock_phy(pptr); 2700 return (B_FALSE); 2701 } 2702 2703 /* 2704 * Get the iport for this root PHY, then call the helper 2705 * to report observations for this iport's targets 2706 */ 2707 wwn = pmcs_barray2wwn(pptr->sas_address); 2708 pmcs_unlock_phy(pptr); 2709 iport = pmcs_get_iport_by_wwn(pwp, wwn); 2710 if (iport == NULL) { 2711 /* No iport for this tgt */ 2712 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2713 "%s: no iport for this target", __func__); 2714 pptr = pptr->sibling; 2715 continue; 2716 } 2717 2718 pmcs_lock_phy(pptr); 2719 if (!iport->report_skip) { 2720 if (pmcs_report_iport_observations( 2721 pwp, iport, pptr) == B_FALSE) { 2722 pmcs_rele_iport(iport); 2723 pmcs_unlock_phy(pptr); 2724 return (B_FALSE); 2725 } 2726 } 2727 pmcs_rele_iport(iport); 2728 pmcs_unlock_phy(pptr); 2729 pptr = pptr->sibling; 2730 } 2731 2732 /* 2733 * The observation is complete, end sets. Note we will skip any 2734 * iports that are active, but have no PHYs in them (i.e. awaiting 2735 * unconfigure). Set to restart discovery if we find this. 2736 */ 2737 rw_enter(&pwp->iports_lock, RW_READER); 2738 for (iport = list_head(&pwp->iports); 2739 iport != NULL; 2740 iport = list_next(&pwp->iports, iport)) { 2741 2742 if (iport->report_skip) 2743 continue; /* skip set_end */ 2744 2745 tgtmap = iport->iss_tgtmap; 2746 ASSERT(tgtmap); 2747 if (scsi_hba_tgtmap_set_end(tgtmap, 0) != DDI_SUCCESS) { 2748 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2749 "%s: cannot set_end tgtmap ", __func__); 2750 rw_exit(&pwp->iports_lock); 2751 return (B_FALSE); 2752 } 2753 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2754 "%s: set end on tgtmap [0x%p]", __func__, (void *)tgtmap); 2755 } 2756 2757 /* 2758 * Now that discovery is complete, set up the necessary 2759 * DDI properties on each iport node. 2760 */ 2761 for (iport = list_head(&pwp->iports); iport != NULL; 2762 iport = list_next(&pwp->iports, iport)) { 2763 /* Set up the 'attached-port' property on the iport */ 2764 ap = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP); 2765 mutex_enter(&iport->lock); 2766 pptr = iport->pptr; 2767 mutex_exit(&iport->lock); 2768 if (pptr == NULL) { 2769 /* 2770 * This iport is down, but has not been 2771 * removed from our list (unconfigured). 2772 * Set our value to '0'. 2773 */ 2774 (void) snprintf(ap, 1, "%s", "0"); 2775 } else { 2776 /* Otherwise, set it to remote phy's wwn */ 2777 pmcs_lock_phy(pptr); 2778 wwn = pmcs_barray2wwn(pptr->sas_address); 2779 (void) scsi_wwn_to_wwnstr(wwn, 1, ap); 2780 pmcs_unlock_phy(pptr); 2781 } 2782 if (ndi_prop_update_string(DDI_DEV_T_NONE, iport->dip, 2783 SCSI_ADDR_PROP_ATTACHED_PORT, ap) != DDI_SUCCESS) { 2784 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed " 2785 "to set prop ("SCSI_ADDR_PROP_ATTACHED_PORT")", 2786 __func__); 2787 } 2788 kmem_free(ap, PMCS_MAX_UA_SIZE); 2789 } 2790 rw_exit(&pwp->iports_lock); 2791 2792 return (B_TRUE); 2793 } 2794 2795 /* 2796 * Report observations into a particular iport's target map 2797 * 2798 * Called with phyp (and all descendents) locked 2799 */ 2800 static boolean_t 2801 pmcs_report_iport_observations(pmcs_hw_t *pwp, pmcs_iport_t *iport, 2802 pmcs_phy_t *phyp) 2803 { 2804 pmcs_phy_t *lphyp; 2805 scsi_hba_tgtmap_t *tgtmap; 2806 scsi_tgtmap_tgt_type_t tgt_type; 2807 char *ua; 2808 uint64_t wwn; 2809 2810 tgtmap = iport->iss_tgtmap; 2811 ASSERT(tgtmap); 2812 2813 lphyp = phyp; 2814 while (lphyp) { 2815 switch (lphyp->dtype) { 2816 default: /* Skip unknown PHYs. */ 2817 /* for non-root phys, skip to sibling */ 2818 goto next_phy; 2819 2820 case SATA: 2821 case SAS: 2822 tgt_type = SCSI_TGT_SCSI_DEVICE; 2823 break; 2824 2825 case EXPANDER: 2826 tgt_type = SCSI_TGT_SMP_DEVICE; 2827 break; 2828 } 2829 2830 if (lphyp->dead || !lphyp->configured) { 2831 goto next_phy; 2832 } 2833 2834 /* 2835 * Validate the PHY's SAS address 2836 */ 2837 if (((lphyp->sas_address[0] & 0xf0) >> 4) != NAA_IEEE_REG) { 2838 pmcs_prt(pwp, PMCS_PRT_ERR, lphyp, NULL, 2839 "PHY 0x%p (%s) has invalid SAS address; " 2840 "will not enumerate", (void *)lphyp, lphyp->path); 2841 goto next_phy; 2842 } 2843 2844 wwn = pmcs_barray2wwn(lphyp->sas_address); 2845 ua = scsi_wwn_to_wwnstr(wwn, 1, NULL); 2846 2847 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, lphyp, NULL, 2848 "iport_observation: adding %s on tgtmap [0x%p] phy [0x%p]", 2849 ua, (void *)tgtmap, (void*)lphyp); 2850 2851 if (scsi_hba_tgtmap_set_add(tgtmap, tgt_type, ua, NULL) != 2852 DDI_SUCCESS) { 2853 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2854 "%s: failed to add address %s", __func__, ua); 2855 scsi_free_wwnstr(ua); 2856 return (B_FALSE); 2857 } 2858 scsi_free_wwnstr(ua); 2859 2860 if (lphyp->children) { 2861 if (pmcs_report_iport_observations(pwp, iport, 2862 lphyp->children) == B_FALSE) { 2863 return (B_FALSE); 2864 } 2865 } 2866 2867 /* for non-root phys, report siblings too */ 2868 next_phy: 2869 if (IS_ROOT_PHY(lphyp)) { 2870 lphyp = NULL; 2871 } else { 2872 lphyp = lphyp->sibling; 2873 } 2874 } 2875 2876 return (B_TRUE); 2877 } 2878 2879 /* 2880 * Check for and configure new devices. 2881 * 2882 * If the changed device is a SATA device, add a SATA device. 2883 * 2884 * If the changed device is a SAS device, add a SAS device. 2885 * 2886 * If the changed device is an EXPANDER device, do a REPORT 2887 * GENERAL SMP command to find out the number of contained phys. 2888 * 2889 * For each number of contained phys, allocate a phy, do a 2890 * DISCOVERY SMP command to find out what kind of device it 2891 * is and add it to the linked list of phys on the *next* level. 2892 * 2893 * NOTE: pptr passed in by the caller will be a root PHY 2894 */ 2895 static int 2896 pmcs_configure_new_devices(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2897 { 2898 int rval = 0; 2899 pmcs_iport_t *iport; 2900 pmcs_phy_t *pnext, *orig_pptr = pptr, *root_phy, *pchild; 2901 uint64_t wwn; 2902 2903 /* 2904 * First, walk through each PHY at this level 2905 */ 2906 while (pptr) { 2907 pmcs_lock_phy(pptr); 2908 pnext = pptr->sibling; 2909 2910 /* 2911 * Set the new dtype if it has changed 2912 */ 2913 if ((pptr->pend_dtype != NEW) && 2914 (pptr->pend_dtype != pptr->dtype)) { 2915 pptr->dtype = pptr->pend_dtype; 2916 } 2917 2918 if (pptr->changed == 0 || pptr->dead || pptr->configured) { 2919 goto next_phy; 2920 } 2921 2922 /* 2923 * Confirm that this target's iport is configured 2924 */ 2925 root_phy = pmcs_get_root_phy(pptr); 2926 wwn = pmcs_barray2wwn(root_phy->sas_address); 2927 pmcs_unlock_phy(pptr); 2928 iport = pmcs_get_iport_by_wwn(pwp, wwn); 2929 if (iport == NULL) { 2930 /* No iport for this tgt, restart */ 2931 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2932 "%s: iport not yet configured, " 2933 "retry discovery", __func__); 2934 pnext = NULL; 2935 rval = -1; 2936 pmcs_lock_phy(pptr); 2937 goto next_phy; 2938 } 2939 2940 pmcs_lock_phy(pptr); 2941 switch (pptr->dtype) { 2942 case NOTHING: 2943 pptr->changed = 0; 2944 break; 2945 case SATA: 2946 case SAS: 2947 pptr->iport = iport; 2948 pmcs_new_tport(pwp, pptr); 2949 break; 2950 case EXPANDER: 2951 pmcs_configure_expander(pwp, pptr, iport); 2952 break; 2953 } 2954 pmcs_rele_iport(iport); 2955 2956 mutex_enter(&pwp->config_lock); 2957 if (pwp->config_changed) { 2958 mutex_exit(&pwp->config_lock); 2959 pnext = NULL; 2960 goto next_phy; 2961 } 2962 mutex_exit(&pwp->config_lock); 2963 2964 next_phy: 2965 pmcs_unlock_phy(pptr); 2966 pptr = pnext; 2967 } 2968 2969 if (rval != 0) { 2970 return (rval); 2971 } 2972 2973 /* 2974 * Now walk through each PHY again, recalling ourselves if they 2975 * have children 2976 */ 2977 pptr = orig_pptr; 2978 while (pptr) { 2979 pmcs_lock_phy(pptr); 2980 pnext = pptr->sibling; 2981 pchild = pptr->children; 2982 pmcs_unlock_phy(pptr); 2983 2984 if (pchild) { 2985 rval = pmcs_configure_new_devices(pwp, pchild); 2986 if (rval != 0) { 2987 break; 2988 } 2989 } 2990 2991 pptr = pnext; 2992 } 2993 2994 return (rval); 2995 } 2996 2997 /* 2998 * Set all phys and descendent phys as changed if changed == B_TRUE, otherwise 2999 * mark them all as not changed. 3000 * 3001 * Called with parent PHY locked. 3002 */ 3003 void 3004 pmcs_set_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, boolean_t changed, 3005 int level) 3006 { 3007 pmcs_phy_t *pptr; 3008 3009 if (level == 0) { 3010 if (changed) { 3011 PHY_CHANGED(pwp, parent); 3012 } else { 3013 parent->changed = 0; 3014 } 3015 if (parent->dtype == EXPANDER && parent->level) { 3016 parent->width = 1; 3017 } 3018 if (parent->children) { 3019 pmcs_set_changed(pwp, parent->children, changed, 3020 level + 1); 3021 } 3022 } else { 3023 pptr = parent; 3024 while (pptr) { 3025 if (changed) { 3026 PHY_CHANGED(pwp, pptr); 3027 } else { 3028 pptr->changed = 0; 3029 } 3030 if (pptr->dtype == EXPANDER && pptr->level) { 3031 pptr->width = 1; 3032 } 3033 if (pptr->children) { 3034 pmcs_set_changed(pwp, pptr->children, changed, 3035 level + 1); 3036 } 3037 pptr = pptr->sibling; 3038 } 3039 } 3040 } 3041 3042 /* 3043 * Take the passed phy mark it and its descendants as dead. 3044 * Fire up reconfiguration to abort commands and bury it. 3045 * 3046 * Called with the parent PHY locked. 3047 */ 3048 void 3049 pmcs_kill_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, int level) 3050 { 3051 pmcs_phy_t *pptr = parent; 3052 3053 while (pptr) { 3054 pptr->link_rate = 0; 3055 pptr->abort_sent = 0; 3056 pptr->abort_pending = 1; 3057 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 3058 pptr->need_rl_ext = 0; 3059 3060 if (pptr->dead == 0) { 3061 PHY_CHANGED(pwp, pptr); 3062 RESTART_DISCOVERY(pwp); 3063 } 3064 3065 pptr->dead = 1; 3066 3067 if (pptr->children) { 3068 pmcs_kill_changed(pwp, pptr->children, level + 1); 3069 } 3070 3071 /* 3072 * Only kill siblings at level > 0 3073 */ 3074 if (level == 0) { 3075 return; 3076 } 3077 3078 pptr = pptr->sibling; 3079 } 3080 } 3081 3082 /* 3083 * Go through every PHY and clear any that are dead (unless they're expanders) 3084 */ 3085 static void 3086 pmcs_clear_phys(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3087 { 3088 pmcs_phy_t *pnext, *phyp; 3089 3090 phyp = pptr; 3091 while (phyp) { 3092 if (IS_ROOT_PHY(phyp)) { 3093 pmcs_lock_phy(phyp); 3094 } 3095 3096 if ((phyp->dtype != EXPANDER) && phyp->dead) { 3097 pmcs_clear_phy(pwp, phyp); 3098 } 3099 3100 if (phyp->children) { 3101 pmcs_clear_phys(pwp, phyp->children); 3102 } 3103 3104 pnext = phyp->sibling; 3105 3106 if (IS_ROOT_PHY(phyp)) { 3107 pmcs_unlock_phy(phyp); 3108 } 3109 3110 phyp = pnext; 3111 } 3112 } 3113 3114 /* 3115 * Clear volatile parts of a phy. Called with PHY locked. 3116 */ 3117 void 3118 pmcs_clear_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3119 { 3120 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: %s", 3121 __func__, pptr->path); 3122 ASSERT(mutex_owned(&pptr->phy_lock)); 3123 /* keep sibling */ 3124 /* keep children */ 3125 /* keep parent */ 3126 pptr->device_id = PMCS_INVALID_DEVICE_ID; 3127 /* keep hw_event_ack */ 3128 pptr->ncphy = 0; 3129 /* keep phynum */ 3130 pptr->width = 0; 3131 pptr->ds_recovery_retries = 0; 3132 pptr->ds_prev_good_recoveries = 0; 3133 pptr->last_good_recovery = 0; 3134 pptr->prev_recovery = 0; 3135 3136 /* keep dtype */ 3137 pptr->config_stop = 0; 3138 pptr->spinup_hold = 0; 3139 pptr->atdt = 0; 3140 /* keep portid */ 3141 pptr->link_rate = 0; 3142 pptr->valid_device_id = 0; 3143 pptr->abort_sent = 0; 3144 pptr->abort_pending = 0; 3145 pptr->need_rl_ext = 0; 3146 pptr->subsidiary = 0; 3147 pptr->configured = 0; 3148 pptr->deregister_wait = 0; 3149 pptr->reenumerate = 0; 3150 /* Only mark dead if it's not a root PHY and its dtype isn't NOTHING */ 3151 /* XXX: What about directly attached disks? */ 3152 if (!IS_ROOT_PHY(pptr) && (pptr->dtype != NOTHING)) 3153 pptr->dead = 1; 3154 pptr->changed = 0; 3155 /* keep SAS address */ 3156 /* keep path */ 3157 /* keep ref_count */ 3158 /* Don't clear iport on root PHYs - they are handled in pmcs_intr.c */ 3159 if (!IS_ROOT_PHY(pptr)) { 3160 pptr->last_iport = pptr->iport; 3161 pptr->iport = NULL; 3162 } 3163 /* keep target */ 3164 } 3165 3166 /* 3167 * Allocate softstate for this target if there isn't already one. If there 3168 * is, just redo our internal configuration. If it is actually "new", we'll 3169 * soon get a tran_tgt_init for it. 3170 * 3171 * Called with PHY locked. 3172 */ 3173 static void 3174 pmcs_new_tport(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3175 { 3176 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: phy 0x%p @ %s", 3177 __func__, (void *)pptr, pptr->path); 3178 3179 if (pmcs_configure_phy(pwp, pptr) == B_FALSE) { 3180 /* 3181 * If the config failed, mark the PHY as changed. 3182 */ 3183 PHY_CHANGED(pwp, pptr); 3184 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3185 "%s: pmcs_configure_phy failed for phy 0x%p", __func__, 3186 (void *)pptr); 3187 return; 3188 } 3189 3190 /* Mark PHY as no longer changed */ 3191 pptr->changed = 0; 3192 3193 /* 3194 * If the PHY has no target pointer: 3195 * 3196 * If it's a root PHY, see if another PHY in the iport holds the 3197 * target pointer (primary PHY changed). If so, move it over. 3198 * 3199 * If it's not a root PHY, see if there's a PHY on the dead_phys 3200 * list that matches. 3201 */ 3202 if (pptr->target == NULL) { 3203 if (IS_ROOT_PHY(pptr)) { 3204 pmcs_phy_t *rphy = pwp->root_phys; 3205 3206 while (rphy) { 3207 if (rphy == pptr) { 3208 rphy = rphy->sibling; 3209 continue; 3210 } 3211 3212 mutex_enter(&rphy->phy_lock); 3213 if ((rphy->iport == pptr->iport) && 3214 (rphy->target != NULL)) { 3215 mutex_enter(&rphy->target->statlock); 3216 pptr->target = rphy->target; 3217 rphy->target = NULL; 3218 pptr->target->phy = pptr; 3219 /* The target is now on pptr */ 3220 mutex_exit(&pptr->target->statlock); 3221 mutex_exit(&rphy->phy_lock); 3222 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 3223 pptr, pptr->target, 3224 "%s: Moved target from %s to %s", 3225 __func__, rphy->path, pptr->path); 3226 break; 3227 } 3228 mutex_exit(&rphy->phy_lock); 3229 3230 rphy = rphy->sibling; 3231 } 3232 } else { 3233 pmcs_reap_dead_phy(pptr); 3234 } 3235 } 3236 3237 /* 3238 * Only assign the device if there is a target for this PHY with a 3239 * matching SAS address. If an iport is disconnected from one piece 3240 * of storage and connected to another within the iport stabilization 3241 * time, we can get the PHY/target mismatch situation. 3242 * 3243 * Otherwise, it'll get done in tran_tgt_init. 3244 */ 3245 if (pptr->target) { 3246 mutex_enter(&pptr->target->statlock); 3247 if (pmcs_phy_target_match(pptr) == B_FALSE) { 3248 mutex_exit(&pptr->target->statlock); 3249 if (!IS_ROOT_PHY(pptr)) { 3250 pmcs_dec_phy_ref_count(pptr); 3251 } 3252 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 3253 "%s: Not assigning existing tgt %p for PHY %p " 3254 "(WWN mismatch)", __func__, (void *)pptr->target, 3255 (void *)pptr); 3256 pptr->target = NULL; 3257 return; 3258 } 3259 3260 if (!pmcs_assign_device(pwp, pptr->target)) { 3261 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 3262 "%s: pmcs_assign_device failed for target 0x%p", 3263 __func__, (void *)pptr->target); 3264 } 3265 mutex_exit(&pptr->target->statlock); 3266 } 3267 } 3268 3269 /* 3270 * Called with PHY lock held. 3271 */ 3272 static boolean_t 3273 pmcs_configure_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3274 { 3275 char *dtype; 3276 3277 ASSERT(mutex_owned(&pptr->phy_lock)); 3278 3279 /* 3280 * Mark this device as no longer changed. 3281 */ 3282 pptr->changed = 0; 3283 3284 /* 3285 * If we don't have a device handle, get one. 3286 */ 3287 if (pmcs_get_device_handle(pwp, pptr)) { 3288 return (B_FALSE); 3289 } 3290 3291 pptr->configured = 1; 3292 3293 switch (pptr->dtype) { 3294 case SAS: 3295 dtype = "SAS"; 3296 break; 3297 case SATA: 3298 dtype = "SATA"; 3299 break; 3300 case EXPANDER: 3301 dtype = "SMP"; 3302 break; 3303 default: 3304 dtype = "???"; 3305 } 3306 3307 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "config_dev: %s " 3308 "dev %s " SAS_ADDR_FMT " dev id 0x%x lr 0x%x", dtype, pptr->path, 3309 SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate); 3310 3311 return (B_TRUE); 3312 } 3313 3314 /* 3315 * Called with PHY locked 3316 */ 3317 static void 3318 pmcs_configure_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, pmcs_iport_t *iport) 3319 { 3320 pmcs_phy_t *ctmp, *clist = NULL, *cnext; 3321 int result, i, nphy = 0; 3322 boolean_t root_phy = B_FALSE; 3323 3324 ASSERT(iport); 3325 3326 /* 3327 * Step 1- clear our "changed" bit. If we need to retry/restart due 3328 * to resource shortages, we'll set it again. While we're doing 3329 * configuration, other events may set it again as well. If the PHY 3330 * is a root PHY and is currently marked as having changed, reset the 3331 * config_stop timer as well. 3332 */ 3333 if (IS_ROOT_PHY(pptr) && pptr->changed) { 3334 pptr->config_stop = ddi_get_lbolt() + 3335 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3336 } 3337 pptr->changed = 0; 3338 3339 /* 3340 * Step 2- make sure we don't overflow 3341 */ 3342 if (pptr->level == PMCS_MAX_XPND-1) { 3343 pmcs_prt(pwp, PMCS_PRT_WARN, pptr, NULL, 3344 "%s: SAS expansion tree too deep", __func__); 3345 return; 3346 } 3347 3348 /* 3349 * Step 3- Check if this expander is part of a wide phy that has 3350 * already been configured. 3351 * 3352 * This is known by checking this level for another EXPANDER device 3353 * with the same SAS address and isn't already marked as a subsidiary 3354 * phy and a parent whose SAS address is the same as our SAS address 3355 * (if there are parents). 3356 */ 3357 if (!IS_ROOT_PHY(pptr)) { 3358 /* 3359 * No need to lock the parent here because we're in discovery 3360 * and the only time a PHY's children pointer can change is 3361 * in discovery; either in pmcs_clear_expander (which has 3362 * already been called) or here, down below. Plus, trying to 3363 * grab the parent's lock here can cause deadlock. 3364 */ 3365 ctmp = pptr->parent->children; 3366 } else { 3367 ctmp = pwp->root_phys; 3368 root_phy = B_TRUE; 3369 } 3370 3371 while (ctmp) { 3372 /* 3373 * If we've checked all PHYs up to pptr, we stop. Otherwise, 3374 * we'll be checking for a primary PHY with a higher PHY 3375 * number than pptr, which will never happen. The primary 3376 * PHY on non-root expanders will ALWAYS be the lowest 3377 * numbered PHY. 3378 */ 3379 if (ctmp == pptr) { 3380 break; 3381 } 3382 3383 /* 3384 * If pptr and ctmp are root PHYs, just grab the mutex on 3385 * ctmp. No need to lock the entire tree. If they are not 3386 * root PHYs, there is no need to lock since a non-root PHY's 3387 * SAS address and other characteristics can only change in 3388 * discovery anyway. 3389 */ 3390 if (root_phy) { 3391 mutex_enter(&ctmp->phy_lock); 3392 } 3393 3394 if (ctmp->dtype == EXPANDER && ctmp->width && 3395 memcmp(ctmp->sas_address, pptr->sas_address, 8) == 0) { 3396 int widephy = 0; 3397 /* 3398 * If these phys are not root PHYs, compare their SAS 3399 * addresses too. 3400 */ 3401 if (!root_phy) { 3402 if (memcmp(ctmp->parent->sas_address, 3403 pptr->parent->sas_address, 8) == 0) { 3404 widephy = 1; 3405 } 3406 } else { 3407 widephy = 1; 3408 } 3409 if (widephy) { 3410 ctmp->width++; 3411 pptr->subsidiary = 1; 3412 3413 /* 3414 * Update the primary PHY's attached-port-pm 3415 * and target-port-pm information with the info 3416 * from this subsidiary 3417 */ 3418 pmcs_update_phy_pm_props(ctmp, 3419 pptr->att_port_pm_tmp, 3420 pptr->tgt_port_pm_tmp, B_TRUE); 3421 3422 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3423 "%s: PHY %s part of wide PHY %s " 3424 "(now %d wide)", __func__, pptr->path, 3425 ctmp->path, ctmp->width); 3426 if (root_phy) { 3427 mutex_exit(&ctmp->phy_lock); 3428 } 3429 return; 3430 } 3431 } 3432 3433 cnext = ctmp->sibling; 3434 if (root_phy) { 3435 mutex_exit(&ctmp->phy_lock); 3436 } 3437 ctmp = cnext; 3438 } 3439 3440 /* 3441 * Step 4- If we don't have a device handle, get one. Since this 3442 * is the primary PHY, make sure subsidiary is cleared. 3443 */ 3444 pptr->subsidiary = 0; 3445 pptr->iport = iport; 3446 if (pmcs_get_device_handle(pwp, pptr)) { 3447 goto out; 3448 } 3449 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Config expander %s " 3450 SAS_ADDR_FMT " dev id 0x%x lr 0x%x", pptr->path, 3451 SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate); 3452 3453 /* 3454 * Step 5- figure out how many phys are in this expander. 3455 */ 3456 nphy = pmcs_expander_get_nphy(pwp, pptr); 3457 if (nphy <= 0) { 3458 if (nphy == 0 && ddi_get_lbolt() < pptr->config_stop) { 3459 PHY_CHANGED(pwp, pptr); 3460 RESTART_DISCOVERY(pwp); 3461 } else { 3462 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3463 "%s: Retries exhausted for %s, killing", __func__, 3464 pptr->path); 3465 pptr->config_stop = 0; 3466 pmcs_kill_changed(pwp, pptr, 0); 3467 } 3468 goto out; 3469 } 3470 3471 /* 3472 * Step 6- Allocate a list of phys for this expander and figure out 3473 * what each one is. 3474 */ 3475 for (i = 0; i < nphy; i++) { 3476 ctmp = kmem_cache_alloc(pwp->phy_cache, KM_SLEEP); 3477 bzero(ctmp, sizeof (pmcs_phy_t)); 3478 ctmp->device_id = PMCS_INVALID_DEVICE_ID; 3479 ctmp->sibling = clist; 3480 ctmp->pend_dtype = NEW; /* Init pending dtype */ 3481 ctmp->config_stop = ddi_get_lbolt() + 3482 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3483 clist = ctmp; 3484 } 3485 3486 mutex_enter(&pwp->config_lock); 3487 if (pwp->config_changed) { 3488 RESTART_DISCOVERY_LOCKED(pwp); 3489 mutex_exit(&pwp->config_lock); 3490 /* 3491 * Clean up the newly allocated PHYs and return 3492 */ 3493 while (clist) { 3494 ctmp = clist->sibling; 3495 kmem_cache_free(pwp->phy_cache, clist); 3496 clist = ctmp; 3497 } 3498 return; 3499 } 3500 mutex_exit(&pwp->config_lock); 3501 3502 /* 3503 * Step 7- Now fill in the rest of the static portions of the phy. 3504 */ 3505 for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) { 3506 ctmp->parent = pptr; 3507 ctmp->pwp = pwp; 3508 ctmp->level = pptr->level+1; 3509 ctmp->portid = pptr->portid; 3510 if (ctmp->tolerates_sas2) { 3511 ASSERT(i < SAS2_PHYNUM_MAX); 3512 ctmp->phynum = i & SAS2_PHYNUM_MASK; 3513 } else { 3514 ASSERT(i < SAS_PHYNUM_MAX); 3515 ctmp->phynum = i & SAS_PHYNUM_MASK; 3516 } 3517 pmcs_phy_name(pwp, ctmp, ctmp->path, sizeof (ctmp->path)); 3518 pmcs_lock_phy(ctmp); 3519 } 3520 3521 /* 3522 * Step 8- Discover things about each phy in the expander. 3523 */ 3524 for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) { 3525 result = pmcs_expander_content_discover(pwp, pptr, ctmp); 3526 if (result <= 0) { 3527 if (ddi_get_lbolt() < pptr->config_stop) { 3528 PHY_CHANGED(pwp, pptr); 3529 RESTART_DISCOVERY(pwp); 3530 } else { 3531 pptr->config_stop = 0; 3532 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3533 "%s: Retries exhausted for %s, killing", 3534 __func__, pptr->path); 3535 pmcs_kill_changed(pwp, pptr, 0); 3536 } 3537 goto out; 3538 } 3539 3540 /* Set pend_dtype to dtype for 1st time initialization */ 3541 ctmp->pend_dtype = ctmp->dtype; 3542 } 3543 3544 /* 3545 * Step 9: Install the new list on the next level. There should 3546 * typically be no children pointer on this PHY. There is one known 3547 * case where this can happen, though. If a root PHY goes down and 3548 * comes back up before discovery can run, we will fail to remove the 3549 * children from that PHY since it will no longer be marked dead. 3550 * However, in this case, all children should also be marked dead. If 3551 * we see that, take those children and put them on the dead_phys list. 3552 */ 3553 if (pptr->children != NULL) { 3554 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 3555 "%s: Expander @ %s still has children: Clean up", 3556 __func__, pptr->path); 3557 pmcs_add_dead_phys(pwp, pptr->children); 3558 } 3559 3560 /* 3561 * Set the new children pointer for this expander 3562 */ 3563 pptr->children = clist; 3564 clist = NULL; 3565 pptr->ncphy = nphy; 3566 pptr->configured = 1; 3567 3568 /* 3569 * We only set width if we're greater than level 0. 3570 */ 3571 if (pptr->level) { 3572 pptr->width = 1; 3573 } 3574 3575 /* 3576 * Now tell the rest of the world about us, as an SMP node. 3577 */ 3578 pptr->iport = iport; 3579 pmcs_new_tport(pwp, pptr); 3580 3581 out: 3582 while (clist) { 3583 ctmp = clist->sibling; 3584 pmcs_unlock_phy(clist); 3585 kmem_cache_free(pwp->phy_cache, clist); 3586 clist = ctmp; 3587 } 3588 } 3589 3590 /* 3591 * 2. Check expanders marked changed (but not dead) to see if they still have 3592 * the same number of phys and the same SAS address. Mark them, their subsidiary 3593 * phys (if wide) and their descendents dead if anything has changed. Check the 3594 * the devices they contain to see if *they* have changed. If they've changed 3595 * from type NOTHING we leave them marked changed to be configured later 3596 * (picking up a new SAS address and link rate if possible). Otherwise, any 3597 * change in type, SAS address or removal of target role will cause us to 3598 * mark them (and their descendents) as dead and cause any pending commands 3599 * and associated devices to be removed. 3600 * 3601 * Called with PHY (pptr) locked. 3602 */ 3603 3604 static void 3605 pmcs_check_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3606 { 3607 int nphy, result; 3608 pmcs_phy_t *ctmp, *local, *local_list = NULL, *local_tail = NULL; 3609 boolean_t kill_changed, changed; 3610 3611 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3612 "%s: check %s", __func__, pptr->path); 3613 3614 /* 3615 * Step 1: Mark phy as not changed. We will mark it changed if we need 3616 * to retry. 3617 */ 3618 pptr->changed = 0; 3619 3620 /* 3621 * Reset the config_stop time. Although we're not actually configuring 3622 * anything here, we do want some indication of when to give up trying 3623 * if we can't communicate with the expander. 3624 */ 3625 pptr->config_stop = ddi_get_lbolt() + 3626 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3627 3628 /* 3629 * Step 2: Figure out how many phys are in this expander. If 3630 * pmcs_expander_get_nphy returns 0 we ran out of resources, 3631 * so reschedule and try later. If it returns another error, 3632 * just return. 3633 */ 3634 nphy = pmcs_expander_get_nphy(pwp, pptr); 3635 if (nphy <= 0) { 3636 if ((nphy == 0) && (ddi_get_lbolt() < pptr->config_stop)) { 3637 PHY_CHANGED(pwp, pptr); 3638 RESTART_DISCOVERY(pwp); 3639 } else { 3640 pptr->config_stop = 0; 3641 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3642 "%s: Retries exhausted for %s, killing", __func__, 3643 pptr->path); 3644 pmcs_kill_changed(pwp, pptr, 0); 3645 } 3646 return; 3647 } 3648 3649 /* 3650 * Step 3: If the number of phys don't agree, kill the old sub-tree. 3651 */ 3652 if (nphy != pptr->ncphy) { 3653 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3654 "%s: number of contained phys for %s changed from %d to %d", 3655 __func__, pptr->path, pptr->ncphy, nphy); 3656 /* 3657 * Force a rescan of this expander after dead contents 3658 * are cleared and removed. 3659 */ 3660 pmcs_kill_changed(pwp, pptr, 0); 3661 return; 3662 } 3663 3664 /* 3665 * Step 4: if we're at the bottom of the stack, we're done 3666 * (we can't have any levels below us) 3667 */ 3668 if (pptr->level == PMCS_MAX_XPND-1) { 3669 return; 3670 } 3671 3672 /* 3673 * Step 5: Discover things about each phy in this expander. We do 3674 * this by walking the current list of contained phys and doing a 3675 * content discovery for it to a local phy. 3676 */ 3677 ctmp = pptr->children; 3678 ASSERT(ctmp); 3679 if (ctmp == NULL) { 3680 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3681 "%s: No children attached to expander @ %s?", __func__, 3682 pptr->path); 3683 return; 3684 } 3685 3686 while (ctmp) { 3687 /* 3688 * Allocate a local PHY to contain the proposed new contents 3689 * and link it to the rest of the local PHYs so that they 3690 * can all be freed later. 3691 */ 3692 local = pmcs_clone_phy(ctmp); 3693 3694 if (local_list == NULL) { 3695 local_list = local; 3696 local_tail = local; 3697 } else { 3698 local_tail->sibling = local; 3699 local_tail = local; 3700 } 3701 3702 /* 3703 * Need to lock the local PHY since pmcs_expander_content_ 3704 * discovery may call pmcs_clear_phy on it, which expects 3705 * the PHY to be locked. 3706 */ 3707 pmcs_lock_phy(local); 3708 result = pmcs_expander_content_discover(pwp, pptr, local); 3709 pmcs_unlock_phy(local); 3710 if (result <= 0) { 3711 if (ddi_get_lbolt() < pptr->config_stop) { 3712 PHY_CHANGED(pwp, pptr); 3713 RESTART_DISCOVERY(pwp); 3714 } else { 3715 pptr->config_stop = 0; 3716 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3717 "%s: Retries exhausted for %s, killing", 3718 __func__, pptr->path); 3719 pmcs_kill_changed(pwp, pptr, 0); 3720 } 3721 3722 /* 3723 * Release all the local PHYs that we allocated. 3724 */ 3725 pmcs_free_phys(pwp, local_list); 3726 return; 3727 } 3728 3729 ctmp = ctmp->sibling; 3730 } 3731 3732 /* 3733 * Step 6: Compare the local PHY's contents to our current PHY. If 3734 * there are changes, take the appropriate action. 3735 * This is done in two steps (step 5 above, and 6 here) so that if we 3736 * have to bail during this process (e.g. pmcs_expander_content_discover 3737 * fails), we haven't actually changed the state of any of the real 3738 * PHYs. Next time we come through here, we'll be starting over from 3739 * scratch. This keeps us from marking a changed PHY as no longer 3740 * changed, but then having to bail only to come back next time and 3741 * think that the PHY hadn't changed. If this were to happen, we 3742 * would fail to properly configure the device behind this PHY. 3743 */ 3744 local = local_list; 3745 ctmp = pptr->children; 3746 3747 while (ctmp) { 3748 changed = B_FALSE; 3749 kill_changed = B_FALSE; 3750 3751 /* 3752 * We set local to local_list prior to this loop so that we 3753 * can simply walk the local_list while we walk this list. The 3754 * two lists should be completely in sync. 3755 * 3756 * Clear the changed flag here. 3757 */ 3758 ctmp->changed = 0; 3759 3760 if (ctmp->dtype != local->dtype) { 3761 if (ctmp->dtype != NOTHING) { 3762 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3763 "%s: %s type changed from %s to %s " 3764 "(killing)", __func__, ctmp->path, 3765 PHY_TYPE(ctmp), PHY_TYPE(local)); 3766 /* 3767 * Force a rescan of this expander after dead 3768 * contents are cleared and removed. 3769 */ 3770 changed = B_TRUE; 3771 kill_changed = B_TRUE; 3772 } else { 3773 changed = B_TRUE; 3774 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3775 "%s: %s type changed from NOTHING to %s", 3776 __func__, ctmp->path, PHY_TYPE(local)); 3777 /* 3778 * Since this PHY was nothing and is now 3779 * something, reset the config_stop timer. 3780 */ 3781 ctmp->config_stop = ddi_get_lbolt() + 3782 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3783 } 3784 3785 } else if (ctmp->atdt != local->atdt) { 3786 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, "%s: " 3787 "%s attached device type changed from %d to %d " 3788 "(killing)", __func__, ctmp->path, ctmp->atdt, 3789 local->atdt); 3790 /* 3791 * Force a rescan of this expander after dead 3792 * contents are cleared and removed. 3793 */ 3794 changed = B_TRUE; 3795 3796 if (local->atdt == 0) { 3797 kill_changed = B_TRUE; 3798 } 3799 } else if (ctmp->link_rate != local->link_rate) { 3800 pmcs_prt(pwp, PMCS_PRT_INFO, ctmp, NULL, "%s: %s " 3801 "changed speed from %s to %s", __func__, ctmp->path, 3802 pmcs_get_rate(ctmp->link_rate), 3803 pmcs_get_rate(local->link_rate)); 3804 /* If the speed changed from invalid, force rescan */ 3805 if (!PMCS_VALID_LINK_RATE(ctmp->link_rate)) { 3806 changed = B_TRUE; 3807 RESTART_DISCOVERY(pwp); 3808 } else { 3809 /* Just update to the new link rate */ 3810 ctmp->link_rate = local->link_rate; 3811 } 3812 3813 if (!PMCS_VALID_LINK_RATE(local->link_rate)) { 3814 kill_changed = B_TRUE; 3815 } 3816 } else if (memcmp(ctmp->sas_address, local->sas_address, 3817 sizeof (ctmp->sas_address)) != 0) { 3818 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3819 "%s: SAS Addr for %s changed from " SAS_ADDR_FMT 3820 "to " SAS_ADDR_FMT " (kill old tree)", __func__, 3821 ctmp->path, SAS_ADDR_PRT(ctmp->sas_address), 3822 SAS_ADDR_PRT(local->sas_address)); 3823 /* 3824 * Force a rescan of this expander after dead 3825 * contents are cleared and removed. 3826 */ 3827 changed = B_TRUE; 3828 } else { 3829 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3830 "%s: %s looks the same (type %s)", 3831 __func__, ctmp->path, PHY_TYPE(ctmp)); 3832 /* 3833 * If EXPANDER, still mark it changed so we 3834 * re-evaluate its contents. If it's not an expander, 3835 * but it hasn't been configured, also mark it as 3836 * changed so that it will undergo configuration. 3837 */ 3838 if (ctmp->dtype == EXPANDER) { 3839 changed = B_TRUE; 3840 } else if ((ctmp->dtype != NOTHING) && 3841 !ctmp->configured) { 3842 ctmp->changed = 1; 3843 } else { 3844 /* It simply hasn't changed */ 3845 ctmp->changed = 0; 3846 } 3847 } 3848 3849 /* 3850 * If the PHY changed, call pmcs_kill_changed if indicated, 3851 * update its contents to reflect its current state and mark it 3852 * as changed. 3853 */ 3854 if (changed) { 3855 /* 3856 * pmcs_kill_changed will mark the PHY as changed, so 3857 * only do PHY_CHANGED if we did not do kill_changed. 3858 */ 3859 if (kill_changed) { 3860 pmcs_kill_changed(pwp, ctmp, 0); 3861 } else { 3862 /* 3863 * If we're not killing the device, it's not 3864 * dead. Mark the PHY as changed. 3865 */ 3866 PHY_CHANGED(pwp, ctmp); 3867 3868 if (ctmp->dead) { 3869 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 3870 ctmp, NULL, "%s: Unmarking PHY %s " 3871 "dead, restarting discovery", 3872 __func__, ctmp->path); 3873 ctmp->dead = 0; 3874 RESTART_DISCOVERY(pwp); 3875 } 3876 } 3877 3878 /* 3879 * If the dtype of this PHY is now NOTHING, mark it as 3880 * unconfigured. Set pend_dtype to what the new dtype 3881 * is. It'll get updated at the end of the discovery 3882 * process. 3883 */ 3884 if (local->dtype == NOTHING) { 3885 bzero(ctmp->sas_address, 3886 sizeof (local->sas_address)); 3887 ctmp->atdt = 0; 3888 ctmp->link_rate = 0; 3889 ctmp->pend_dtype = NOTHING; 3890 ctmp->configured = 0; 3891 } else { 3892 (void) memcpy(ctmp->sas_address, 3893 local->sas_address, 3894 sizeof (local->sas_address)); 3895 ctmp->atdt = local->atdt; 3896 ctmp->link_rate = local->link_rate; 3897 ctmp->pend_dtype = local->dtype; 3898 } 3899 } 3900 3901 local = local->sibling; 3902 ctmp = ctmp->sibling; 3903 } 3904 3905 /* 3906 * If we got to here, that means we were able to see all the PHYs 3907 * and we can now update all of the real PHYs with the information 3908 * we got on the local PHYs. Once that's done, free all the local 3909 * PHYs. 3910 */ 3911 3912 pmcs_free_phys(pwp, local_list); 3913 } 3914 3915 /* 3916 * Top level routine to check expanders. We call pmcs_check_expander for 3917 * each expander. Since we're not doing any configuration right now, it 3918 * doesn't matter if this is breadth-first. 3919 */ 3920 static void 3921 pmcs_check_expanders(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3922 { 3923 pmcs_phy_t *phyp, *pnext, *pchild; 3924 3925 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3926 "%s: %s", __func__, pptr->path); 3927 3928 /* 3929 * Check each expander at this level 3930 */ 3931 phyp = pptr; 3932 while (phyp) { 3933 pmcs_lock_phy(phyp); 3934 3935 if ((phyp->dtype == EXPANDER) && phyp->changed && 3936 !phyp->dead && !phyp->subsidiary && 3937 phyp->configured) { 3938 pmcs_check_expander(pwp, phyp); 3939 } 3940 3941 pnext = phyp->sibling; 3942 pmcs_unlock_phy(phyp); 3943 phyp = pnext; 3944 } 3945 3946 /* 3947 * Now check the children 3948 */ 3949 phyp = pptr; 3950 while (phyp) { 3951 pmcs_lock_phy(phyp); 3952 pnext = phyp->sibling; 3953 pchild = phyp->children; 3954 pmcs_unlock_phy(phyp); 3955 3956 if (pchild) { 3957 pmcs_check_expanders(pwp, pchild); 3958 } 3959 3960 phyp = pnext; 3961 } 3962 } 3963 3964 /* 3965 * Called with softstate and PHY locked 3966 */ 3967 static void 3968 pmcs_clear_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, int level) 3969 { 3970 pmcs_phy_t *ctmp; 3971 3972 ASSERT(mutex_owned(&pwp->lock)); 3973 ASSERT(mutex_owned(&pptr->phy_lock)); 3974 ASSERT(pptr->level < PMCS_MAX_XPND - 1); 3975 3976 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3977 "%s: checking %s", __func__, pptr->path); 3978 3979 ctmp = pptr->children; 3980 while (ctmp) { 3981 /* 3982 * If the expander is dead, mark its children dead 3983 */ 3984 if (pptr->dead) { 3985 ctmp->dead = 1; 3986 } 3987 if (ctmp->dtype == EXPANDER) { 3988 pmcs_clear_expander(pwp, ctmp, level + 1); 3989 } 3990 ctmp = ctmp->sibling; 3991 } 3992 3993 /* 3994 * If this expander is not dead, we're done here. 3995 */ 3996 if (!pptr->dead) { 3997 return; 3998 } 3999 4000 /* 4001 * Now snip out the list of children below us and release them 4002 */ 4003 if (pptr->children) { 4004 pmcs_add_dead_phys(pwp, pptr->children); 4005 } 4006 4007 pptr->children = NULL; 4008 4009 /* 4010 * Clear subsidiary phys as well. Getting the parent's PHY lock 4011 * is only necessary if level == 0 since otherwise the parent is 4012 * already locked. 4013 */ 4014 if (!IS_ROOT_PHY(pptr)) { 4015 if (level == 0) { 4016 mutex_enter(&pptr->parent->phy_lock); 4017 } 4018 ctmp = pptr->parent->children; 4019 if (level == 0) { 4020 mutex_exit(&pptr->parent->phy_lock); 4021 } 4022 } else { 4023 ctmp = pwp->root_phys; 4024 } 4025 4026 while (ctmp) { 4027 if (ctmp == pptr) { 4028 ctmp = ctmp->sibling; 4029 continue; 4030 } 4031 /* 4032 * We only need to lock subsidiary PHYs on the level 0 4033 * expander. Any children of that expander, subsidiaries or 4034 * not, will already be locked. 4035 */ 4036 if (level == 0) { 4037 pmcs_lock_phy(ctmp); 4038 } 4039 if (ctmp->dtype != EXPANDER || ctmp->subsidiary == 0 || 4040 memcmp(ctmp->sas_address, pptr->sas_address, 4041 sizeof (ctmp->sas_address)) != 0) { 4042 if (level == 0) { 4043 pmcs_unlock_phy(ctmp); 4044 } 4045 ctmp = ctmp->sibling; 4046 continue; 4047 } 4048 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 4049 "%s: subsidiary %s", __func__, ctmp->path); 4050 pmcs_clear_phy(pwp, ctmp); 4051 if (level == 0) { 4052 pmcs_unlock_phy(ctmp); 4053 } 4054 ctmp = ctmp->sibling; 4055 } 4056 4057 pmcs_clear_phy(pwp, pptr); 4058 } 4059 4060 /* 4061 * Called with PHY locked and with scratch acquired. We return 0 if 4062 * we fail to allocate resources or notice that the configuration 4063 * count changed while we were running the command. We return 4064 * less than zero if we had an I/O error or received an unsupported 4065 * configuration. Otherwise we return the number of phys in the 4066 * expander. 4067 */ 4068 #define DFM(m, y) if (m == NULL) m = y 4069 static int 4070 pmcs_expander_get_nphy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 4071 { 4072 struct pmcwork *pwrk; 4073 char buf[64]; 4074 const uint_t rdoff = 0x100; /* returned data offset */ 4075 smp_response_frame_t *srf; 4076 smp_report_general_resp_t *srgr; 4077 uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status, ival; 4078 int result = 0; 4079 4080 ival = 0x40001100; 4081 4082 again: 4083 if (!pptr->iport || !pptr->valid_device_id) { 4084 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 4085 "%s: Can't reach PHY %s", __func__, pptr->path); 4086 goto out; 4087 } 4088 4089 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 4090 if (pwrk == NULL) { 4091 goto out; 4092 } 4093 (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE); 4094 pwrk->arg = pwp->scratch; 4095 pwrk->dtype = pptr->dtype; 4096 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4097 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4098 if (ptr == NULL) { 4099 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4100 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, NULL, 4101 "%s: GET_IQ_ENTRY failed", __func__); 4102 pmcs_pwork(pwp, pwrk); 4103 goto out; 4104 } 4105 4106 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 4107 msg[1] = LE_32(pwrk->htag); 4108 msg[2] = LE_32(pptr->device_id); 4109 msg[3] = LE_32((4 << SMP_REQUEST_LENGTH_SHIFT) | SMP_INDIRECT_RESPONSE); 4110 /* 4111 * Send SMP REPORT GENERAL (of either SAS1.1 or SAS2 flavors). 4112 */ 4113 msg[4] = BE_32(ival); 4114 msg[5] = 0; 4115 msg[6] = 0; 4116 msg[7] = 0; 4117 msg[8] = 0; 4118 msg[9] = 0; 4119 msg[10] = 0; 4120 msg[11] = 0; 4121 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 4122 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 4123 msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff); 4124 msg[15] = 0; 4125 4126 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 4127 4128 /* SMP serialization */ 4129 pmcs_smp_acquire(pptr->iport); 4130 4131 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4132 htag = pwrk->htag; 4133 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4134 4135 pmcs_unlock_phy(pptr); 4136 WAIT_FOR(pwrk, 1000, result); 4137 /* Release SMP lock before reacquiring PHY lock */ 4138 pmcs_smp_release(pptr->iport); 4139 pmcs_lock_phy(pptr); 4140 4141 pmcs_pwork(pwp, pwrk); 4142 4143 mutex_enter(&pwp->config_lock); 4144 if (pwp->config_changed) { 4145 RESTART_DISCOVERY_LOCKED(pwp); 4146 mutex_exit(&pwp->config_lock); 4147 result = 0; 4148 goto out; 4149 } 4150 mutex_exit(&pwp->config_lock); 4151 4152 if (result) { 4153 pmcs_timed_out(pwp, htag, __func__); 4154 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4155 "%s: Issuing SMP ABORT for htag 0x%08x", __func__, htag); 4156 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 4157 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4158 "%s: Unable to issue SMP ABORT for htag 0x%08x", 4159 __func__, htag); 4160 } else { 4161 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4162 "%s: Issuing SMP ABORT for htag 0x%08x", 4163 __func__, htag); 4164 } 4165 result = 0; 4166 goto out; 4167 } 4168 ptr = (void *)pwp->scratch; 4169 status = LE_32(ptr[2]); 4170 if (status == PMCOUT_STATUS_UNDERFLOW || 4171 status == PMCOUT_STATUS_OVERFLOW) { 4172 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL, 4173 "%s: over/underflow", __func__); 4174 status = PMCOUT_STATUS_OK; 4175 } 4176 srf = (smp_response_frame_t *)&((uint32_t *)pwp->scratch)[rdoff >> 2]; 4177 srgr = (smp_report_general_resp_t *) 4178 &((uint32_t *)pwp->scratch)[(rdoff >> 2)+1]; 4179 4180 if (status != PMCOUT_STATUS_OK) { 4181 char *nag = NULL; 4182 (void) snprintf(buf, sizeof (buf), 4183 "%s: SMP op failed (0x%x)", __func__, status); 4184 switch (status) { 4185 case PMCOUT_STATUS_IO_PORT_IN_RESET: 4186 DFM(nag, "I/O Port In Reset"); 4187 /* FALLTHROUGH */ 4188 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 4189 DFM(nag, "Hardware Timeout"); 4190 /* FALLTHROUGH */ 4191 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 4192 DFM(nag, "Internal SMP Resource Failure"); 4193 /* FALLTHROUGH */ 4194 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 4195 DFM(nag, "PHY Not Ready"); 4196 /* FALLTHROUGH */ 4197 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 4198 DFM(nag, "Connection Rate Not Supported"); 4199 /* FALLTHROUGH */ 4200 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 4201 DFM(nag, "Open Retry Timeout"); 4202 /* FALLTHROUGH */ 4203 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 4204 DFM(nag, "HW Resource Busy"); 4205 /* FALLTHROUGH */ 4206 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 4207 DFM(nag, "Response Connection Error"); 4208 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4209 "%s: expander %s SMP operation failed (%s)", 4210 __func__, pptr->path, nag); 4211 break; 4212 4213 /* 4214 * For the IO_DS_NON_OPERATIONAL case, we need to kick off 4215 * device state recovery and return 0 so that the caller 4216 * doesn't assume this expander is dead for good. 4217 */ 4218 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: { 4219 pmcs_xscsi_t *xp = pptr->target; 4220 4221 pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, pptr, xp, 4222 "%s: expander %s device state non-operational", 4223 __func__, pptr->path); 4224 4225 if (xp == NULL) { 4226 /* 4227 * Kick off recovery right now. 4228 */ 4229 SCHEDULE_WORK(pwp, PMCS_WORK_DS_ERR_RECOVERY); 4230 (void) ddi_taskq_dispatch(pwp->tq, pmcs_worker, 4231 pwp, DDI_NOSLEEP); 4232 } else { 4233 mutex_enter(&xp->statlock); 4234 pmcs_start_dev_state_recovery(xp, pptr); 4235 mutex_exit(&xp->statlock); 4236 } 4237 4238 break; 4239 } 4240 4241 default: 4242 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr); 4243 result = -EIO; 4244 break; 4245 } 4246 } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) { 4247 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4248 "%s: bad response frame type 0x%x", 4249 __func__, srf->srf_frame_type); 4250 result = -EINVAL; 4251 } else if (srf->srf_function != SMP_FUNC_REPORT_GENERAL) { 4252 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4253 "%s: bad response function 0x%x", 4254 __func__, srf->srf_function); 4255 result = -EINVAL; 4256 } else if (srf->srf_result != 0) { 4257 /* 4258 * Check to see if we have a value of 3 for failure and 4259 * whether we were using a SAS2.0 allocation length value 4260 * and retry without it. 4261 */ 4262 if (srf->srf_result == 3 && (ival & 0xff00)) { 4263 ival &= ~0xff00; 4264 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4265 "%s: err 0x%x with SAS2 request- retry with SAS1", 4266 __func__, srf->srf_result); 4267 goto again; 4268 } 4269 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4270 "%s: bad response 0x%x", __func__, srf->srf_result); 4271 result = -EINVAL; 4272 } else if (srgr->srgr_configuring) { 4273 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4274 "%s: expander at phy %s is still configuring", 4275 __func__, pptr->path); 4276 result = 0; 4277 } else { 4278 result = srgr->srgr_number_of_phys; 4279 if (ival & 0xff00) { 4280 pptr->tolerates_sas2 = 1; 4281 } 4282 /* 4283 * Save off the REPORT_GENERAL response 4284 */ 4285 bcopy(srgr, &pptr->rg_resp, sizeof (smp_report_general_resp_t)); 4286 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4287 "%s has %d phys and %s SAS2", pptr->path, result, 4288 pptr->tolerates_sas2? "tolerates" : "does not tolerate"); 4289 } 4290 out: 4291 return (result); 4292 } 4293 4294 /* 4295 * Called with expander locked (and thus, pptr) as well as all PHYs up to 4296 * the root, and scratch acquired. Return 0 if we fail to allocate resources 4297 * or notice that the configuration changed while we were running the command. 4298 * 4299 * We return less than zero if we had an I/O error or received an 4300 * unsupported configuration. 4301 */ 4302 static int 4303 pmcs_expander_content_discover(pmcs_hw_t *pwp, pmcs_phy_t *expander, 4304 pmcs_phy_t *pptr) 4305 { 4306 struct pmcwork *pwrk; 4307 char buf[64]; 4308 uint8_t sas_address[8]; 4309 uint8_t att_sas_address[8]; 4310 smp_response_frame_t *srf; 4311 smp_discover_resp_t *sdr; 4312 const uint_t rdoff = 0x100; /* returned data offset */ 4313 uint8_t *roff; 4314 uint32_t status, *ptr, msg[PMCS_MSG_SIZE], htag; 4315 int result = 0; 4316 uint8_t ini_support; 4317 uint8_t tgt_support; 4318 4319 if (!expander->iport || !expander->valid_device_id) { 4320 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, expander, expander->target, 4321 "%s: Can't reach PHY %s", __func__, expander->path); 4322 goto out; 4323 } 4324 4325 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, expander); 4326 if (pwrk == NULL) { 4327 goto out; 4328 } 4329 (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE); 4330 pwrk->arg = pwp->scratch; 4331 pwrk->dtype = expander->dtype; 4332 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 4333 msg[1] = LE_32(pwrk->htag); 4334 msg[2] = LE_32(expander->device_id); 4335 msg[3] = LE_32((12 << SMP_REQUEST_LENGTH_SHIFT) | 4336 SMP_INDIRECT_RESPONSE); 4337 /* 4338 * Send SMP DISCOVER (of either SAS1.1 or SAS2 flavors). 4339 */ 4340 if (expander->tolerates_sas2) { 4341 msg[4] = BE_32(0x40101B00); 4342 } else { 4343 msg[4] = BE_32(0x40100000); 4344 } 4345 msg[5] = 0; 4346 msg[6] = BE_32((pptr->phynum << 16)); 4347 msg[7] = 0; 4348 msg[8] = 0; 4349 msg[9] = 0; 4350 msg[10] = 0; 4351 msg[11] = 0; 4352 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 4353 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 4354 msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff); 4355 msg[15] = 0; 4356 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4357 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4358 if (ptr == NULL) { 4359 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4360 goto out; 4361 } 4362 4363 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 4364 4365 /* SMP serialization */ 4366 pmcs_smp_acquire(expander->iport); 4367 4368 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4369 htag = pwrk->htag; 4370 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4371 4372 /* 4373 * Drop PHY lock while waiting so other completions aren't potentially 4374 * blocked. 4375 */ 4376 pmcs_unlock_phy(expander); 4377 WAIT_FOR(pwrk, 1000, result); 4378 /* Release SMP lock before reacquiring PHY lock */ 4379 pmcs_smp_release(expander->iport); 4380 pmcs_lock_phy(expander); 4381 4382 pmcs_pwork(pwp, pwrk); 4383 4384 mutex_enter(&pwp->config_lock); 4385 if (pwp->config_changed) { 4386 RESTART_DISCOVERY_LOCKED(pwp); 4387 mutex_exit(&pwp->config_lock); 4388 result = 0; 4389 goto out; 4390 } 4391 mutex_exit(&pwp->config_lock); 4392 4393 if (result) { 4394 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 4395 if (pmcs_abort(pwp, expander, htag, 0, 0)) { 4396 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4397 "%s: Unable to issue SMP ABORT for htag 0x%08x", 4398 __func__, htag); 4399 } else { 4400 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4401 "%s: Issuing SMP ABORT for htag 0x%08x", 4402 __func__, htag); 4403 } 4404 result = -ETIMEDOUT; 4405 goto out; 4406 } 4407 ptr = (void *)pwp->scratch; 4408 /* 4409 * Point roff to the DMA offset for returned data 4410 */ 4411 roff = pwp->scratch; 4412 roff += rdoff; 4413 srf = (smp_response_frame_t *)roff; 4414 sdr = (smp_discover_resp_t *)(roff+4); 4415 status = LE_32(ptr[2]); 4416 if (status == PMCOUT_STATUS_UNDERFLOW || 4417 status == PMCOUT_STATUS_OVERFLOW) { 4418 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL, 4419 "%s: over/underflow", __func__); 4420 status = PMCOUT_STATUS_OK; 4421 } 4422 if (status != PMCOUT_STATUS_OK) { 4423 char *nag = NULL; 4424 (void) snprintf(buf, sizeof (buf), 4425 "%s: SMP op failed (0x%x)", __func__, status); 4426 switch (status) { 4427 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 4428 DFM(nag, "Hardware Timeout"); 4429 /* FALLTHROUGH */ 4430 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 4431 DFM(nag, "Internal SMP Resource Failure"); 4432 /* FALLTHROUGH */ 4433 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 4434 DFM(nag, "PHY Not Ready"); 4435 /* FALLTHROUGH */ 4436 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 4437 DFM(nag, "Connection Rate Not Supported"); 4438 /* FALLTHROUGH */ 4439 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 4440 DFM(nag, "Open Retry Timeout"); 4441 /* FALLTHROUGH */ 4442 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 4443 DFM(nag, "HW Resource Busy"); 4444 /* FALLTHROUGH */ 4445 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 4446 DFM(nag, "Response Connection Error"); 4447 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4448 "%s: expander %s SMP operation failed (%s)", 4449 __func__, pptr->path, nag); 4450 break; 4451 default: 4452 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr); 4453 result = -EIO; 4454 break; 4455 } 4456 goto out; 4457 } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) { 4458 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4459 "%s: bad response frame type 0x%x", 4460 __func__, srf->srf_frame_type); 4461 result = -EINVAL; 4462 goto out; 4463 } else if (srf->srf_function != SMP_FUNC_DISCOVER) { 4464 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4465 "%s: bad response function 0x%x", 4466 __func__, srf->srf_function); 4467 result = -EINVAL; 4468 goto out; 4469 } else if (srf->srf_result != SMP_RES_FUNCTION_ACCEPTED) { 4470 result = pmcs_smp_function_result(pwp, srf); 4471 /* Need not fail if PHY is Vacant */ 4472 if (result != SMP_RES_PHY_VACANT) { 4473 result = -EINVAL; 4474 goto out; 4475 } 4476 } 4477 4478 /* 4479 * Save off the DISCOVER response 4480 */ 4481 bcopy(sdr, &pptr->disc_resp, sizeof (smp_discover_resp_t)); 4482 4483 ini_support = (sdr->sdr_attached_sata_host | 4484 (sdr->sdr_attached_smp_initiator << 1) | 4485 (sdr->sdr_attached_stp_initiator << 2) | 4486 (sdr->sdr_attached_ssp_initiator << 3)); 4487 4488 tgt_support = (sdr->sdr_attached_sata_device | 4489 (sdr->sdr_attached_smp_target << 1) | 4490 (sdr->sdr_attached_stp_target << 2) | 4491 (sdr->sdr_attached_ssp_target << 3)); 4492 4493 pmcs_wwn2barray(BE_64(sdr->sdr_sas_addr), sas_address); 4494 pmcs_wwn2barray(BE_64(sdr->sdr_attached_sas_addr), att_sas_address); 4495 4496 /* 4497 * Set the routing attribute regardless of the PHY type. 4498 */ 4499 pptr->routing_attr = sdr->sdr_routing_attr; 4500 4501 switch (sdr->sdr_attached_device_type) { 4502 case SAS_IF_DTYPE_ENDPOINT: 4503 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4504 "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS=" 4505 SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x", 4506 pptr->path, 4507 sdr->sdr_attached_device_type, 4508 sdr->sdr_negotiated_logical_link_rate, 4509 ini_support, 4510 tgt_support, 4511 SAS_ADDR_PRT(sas_address), 4512 SAS_ADDR_PRT(att_sas_address), 4513 sdr->sdr_attached_phy_identifier); 4514 4515 if (sdr->sdr_attached_sata_device || 4516 sdr->sdr_attached_stp_target) { 4517 pptr->dtype = SATA; 4518 } else if (sdr->sdr_attached_ssp_target) { 4519 pptr->dtype = SAS; 4520 } else if (tgt_support || ini_support) { 4521 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4522 "%s: %s has tgt support=%x init support=(%x)", 4523 __func__, pptr->path, tgt_support, ini_support); 4524 } 4525 4526 switch (pptr->routing_attr) { 4527 case SMP_ROUTING_SUBTRACTIVE: 4528 case SMP_ROUTING_TABLE: 4529 case SMP_ROUTING_DIRECT: 4530 pptr->routing_method = SMP_ROUTING_DIRECT; 4531 break; 4532 default: 4533 pptr->routing_method = 0xff; /* Invalid method */ 4534 break; 4535 } 4536 pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum), 4537 (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE); 4538 break; 4539 case SAS_IF_DTYPE_EDGE: 4540 case SAS_IF_DTYPE_FANOUT: 4541 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4542 "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS=" 4543 SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x", 4544 pptr->path, 4545 sdr->sdr_attached_device_type, 4546 sdr->sdr_negotiated_logical_link_rate, 4547 ini_support, 4548 tgt_support, 4549 SAS_ADDR_PRT(sas_address), 4550 SAS_ADDR_PRT(att_sas_address), 4551 sdr->sdr_attached_phy_identifier); 4552 if (sdr->sdr_attached_smp_target) { 4553 /* 4554 * Avoid configuring phys that just point back 4555 * at a parent phy 4556 */ 4557 if (expander->parent && 4558 memcmp(expander->parent->sas_address, 4559 att_sas_address, 4560 sizeof (expander->parent->sas_address)) == 0) { 4561 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, NULL, 4562 "%s: skipping port back to parent " 4563 "expander (%s)", __func__, pptr->path); 4564 pptr->dtype = NOTHING; 4565 break; 4566 } 4567 pptr->dtype = EXPANDER; 4568 4569 } else if (tgt_support || ini_support) { 4570 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4571 "%s has tgt support=%x init support=(%x)", 4572 pptr->path, tgt_support, ini_support); 4573 pptr->dtype = EXPANDER; 4574 } 4575 if (pptr->routing_attr == SMP_ROUTING_DIRECT) { 4576 pptr->routing_method = 0xff; /* Invalid method */ 4577 } else { 4578 pptr->routing_method = pptr->routing_attr; 4579 } 4580 pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum), 4581 (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE); 4582 break; 4583 default: 4584 pptr->dtype = NOTHING; 4585 break; 4586 } 4587 if (pptr->dtype != NOTHING) { 4588 pmcs_phy_t *ctmp; 4589 4590 /* 4591 * If the attached device is a SATA device and the expander 4592 * is (possibly) a SAS2 compliant expander, check for whether 4593 * there is a NAA=5 WWN field starting at this offset and 4594 * use that for the SAS Address for this device. 4595 */ 4596 if (expander->tolerates_sas2 && pptr->dtype == SATA && 4597 (roff[SAS_ATTACHED_NAME_OFFSET] >> 8) == NAA_IEEE_REG) { 4598 (void) memcpy(pptr->sas_address, 4599 &roff[SAS_ATTACHED_NAME_OFFSET], 8); 4600 } else { 4601 (void) memcpy(pptr->sas_address, att_sas_address, 8); 4602 } 4603 pptr->atdt = (sdr->sdr_attached_device_type); 4604 /* 4605 * Now run up from the expander's parent up to the top to 4606 * make sure we only use the least common link_rate. 4607 */ 4608 for (ctmp = expander->parent; ctmp; ctmp = ctmp->parent) { 4609 if (ctmp->link_rate < 4610 sdr->sdr_negotiated_logical_link_rate) { 4611 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4612 "%s: derating link rate from %x to %x due " 4613 "to %s being slower", pptr->path, 4614 sdr->sdr_negotiated_logical_link_rate, 4615 ctmp->link_rate, 4616 ctmp->path); 4617 sdr->sdr_negotiated_logical_link_rate = 4618 ctmp->link_rate; 4619 } 4620 } 4621 pptr->link_rate = sdr->sdr_negotiated_logical_link_rate; 4622 pptr->state.prog_min_rate = sdr->sdr_prog_min_phys_link_rate; 4623 pptr->state.hw_min_rate = sdr->sdr_hw_min_phys_link_rate; 4624 pptr->state.prog_max_rate = sdr->sdr_prog_max_phys_link_rate; 4625 pptr->state.hw_max_rate = sdr->sdr_hw_max_phys_link_rate; 4626 PHY_CHANGED(pwp, pptr); 4627 } else { 4628 pmcs_clear_phy(pwp, pptr); 4629 } 4630 result = 1; 4631 out: 4632 return (result); 4633 } 4634 4635 /* 4636 * Get a work structure and assign it a tag with type and serial number 4637 * If a structure is returned, it is returned locked. 4638 */ 4639 pmcwork_t * 4640 pmcs_gwork(pmcs_hw_t *pwp, uint32_t tag_type, pmcs_phy_t *phyp) 4641 { 4642 pmcwork_t *p; 4643 uint16_t snum; 4644 uint32_t off; 4645 4646 mutex_enter(&pwp->wfree_lock); 4647 p = STAILQ_FIRST(&pwp->wf); 4648 if (p == NULL) { 4649 /* 4650 * If we couldn't get a work structure, it's time to bite 4651 * the bullet, grab the pfree_lock and copy over all the 4652 * work structures from the pending free list to the actual 4653 * free list (assuming it's not also empty). 4654 */ 4655 mutex_enter(&pwp->pfree_lock); 4656 if (STAILQ_FIRST(&pwp->pf) == NULL) { 4657 mutex_exit(&pwp->pfree_lock); 4658 mutex_exit(&pwp->wfree_lock); 4659 return (NULL); 4660 } 4661 pwp->wf.stqh_first = pwp->pf.stqh_first; 4662 pwp->wf.stqh_last = pwp->pf.stqh_last; 4663 STAILQ_INIT(&pwp->pf); 4664 mutex_exit(&pwp->pfree_lock); 4665 4666 p = STAILQ_FIRST(&pwp->wf); 4667 ASSERT(p != NULL); 4668 } 4669 STAILQ_REMOVE(&pwp->wf, p, pmcwork, next); 4670 snum = pwp->wserno++; 4671 mutex_exit(&pwp->wfree_lock); 4672 4673 off = p - pwp->work; 4674 4675 mutex_enter(&p->lock); 4676 ASSERT(p->state == PMCS_WORK_STATE_NIL); 4677 ASSERT(p->htag == PMCS_TAG_FREE); 4678 p->htag = (tag_type << PMCS_TAG_TYPE_SHIFT) & PMCS_TAG_TYPE_MASK; 4679 p->htag |= ((snum << PMCS_TAG_SERNO_SHIFT) & PMCS_TAG_SERNO_MASK); 4680 p->htag |= ((off << PMCS_TAG_INDEX_SHIFT) & PMCS_TAG_INDEX_MASK); 4681 p->start = gethrtime(); 4682 p->state = PMCS_WORK_STATE_READY; 4683 p->ssp_event = 0; 4684 p->dead = 0; 4685 4686 if (phyp) { 4687 p->phy = phyp; 4688 pmcs_inc_phy_ref_count(phyp); 4689 } 4690 4691 return (p); 4692 } 4693 4694 /* 4695 * Called with pwrk lock held. Returned with lock released. 4696 */ 4697 void 4698 pmcs_pwork(pmcs_hw_t *pwp, pmcwork_t *p) 4699 { 4700 ASSERT(p != NULL); 4701 ASSERT(mutex_owned(&p->lock)); 4702 4703 p->last_ptr = p->ptr; 4704 p->last_arg = p->arg; 4705 p->last_phy = p->phy; 4706 p->last_xp = p->xp; 4707 p->last_htag = p->htag; 4708 p->last_state = p->state; 4709 p->finish = gethrtime(); 4710 4711 if (p->phy) { 4712 pmcs_dec_phy_ref_count(p->phy); 4713 } 4714 4715 p->state = PMCS_WORK_STATE_NIL; 4716 p->htag = PMCS_TAG_FREE; 4717 p->xp = NULL; 4718 p->ptr = NULL; 4719 p->arg = NULL; 4720 p->phy = NULL; 4721 p->abt_htag = 0; 4722 p->timer = 0; 4723 mutex_exit(&p->lock); 4724 4725 if (mutex_tryenter(&pwp->wfree_lock) == 0) { 4726 mutex_enter(&pwp->pfree_lock); 4727 STAILQ_INSERT_TAIL(&pwp->pf, p, next); 4728 mutex_exit(&pwp->pfree_lock); 4729 } else { 4730 STAILQ_INSERT_TAIL(&pwp->wf, p, next); 4731 mutex_exit(&pwp->wfree_lock); 4732 } 4733 } 4734 4735 /* 4736 * Find a work structure based upon a tag and make sure that the tag 4737 * serial number matches the work structure we've found. 4738 * If a structure is found, its lock is held upon return. 4739 */ 4740 pmcwork_t * 4741 pmcs_tag2wp(pmcs_hw_t *pwp, uint32_t htag) 4742 { 4743 pmcwork_t *p; 4744 uint32_t idx = PMCS_TAG_INDEX(htag); 4745 4746 p = &pwp->work[idx]; 4747 4748 mutex_enter(&p->lock); 4749 if (p->htag == htag) { 4750 return (p); 4751 } 4752 mutex_exit(&p->lock); 4753 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 4754 "INDEX 0x%x HTAG 0x%x got p->htag 0x%x", idx, htag, p->htag); 4755 return (NULL); 4756 } 4757 4758 /* 4759 * Issue an abort for a command or for all commands. 4760 * 4761 * Since this can be called from interrupt context, 4762 * we don't wait for completion if wait is not set. 4763 * 4764 * Called with PHY lock held. 4765 */ 4766 int 4767 pmcs_abort(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint32_t tag, int all_cmds, 4768 int wait) 4769 { 4770 pmcwork_t *pwrk; 4771 pmcs_xscsi_t *tgt; 4772 uint32_t msg[PMCS_MSG_SIZE], *ptr; 4773 int result, abt_type; 4774 uint32_t abt_htag, status; 4775 4776 if (pptr->abort_all_start) { 4777 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "%s: ABORT_ALL for " 4778 "(%s) already in progress.", __func__, pptr->path); 4779 return (EBUSY); 4780 } 4781 4782 switch (pptr->dtype) { 4783 case SAS: 4784 abt_type = PMCIN_SSP_ABORT; 4785 break; 4786 case SATA: 4787 abt_type = PMCIN_SATA_ABORT; 4788 break; 4789 case EXPANDER: 4790 abt_type = PMCIN_SMP_ABORT; 4791 break; 4792 default: 4793 return (0); 4794 } 4795 4796 pwrk = pmcs_gwork(pwp, wait ? PMCS_TAG_TYPE_WAIT : PMCS_TAG_TYPE_NONE, 4797 pptr); 4798 4799 if (pwrk == NULL) { 4800 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 4801 return (ENOMEM); 4802 } 4803 4804 pwrk->dtype = pptr->dtype; 4805 if (wait) { 4806 pwrk->arg = msg; 4807 } 4808 if (pptr->valid_device_id == 0) { 4809 pmcs_pwork(pwp, pwrk); 4810 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4811 "%s: Invalid DeviceID", __func__); 4812 return (ENODEV); 4813 } 4814 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, abt_type)); 4815 msg[1] = LE_32(pwrk->htag); 4816 msg[2] = LE_32(pptr->device_id); 4817 if (all_cmds) { 4818 msg[3] = 0; 4819 msg[4] = LE_32(1); 4820 pwrk->ptr = NULL; 4821 pptr->abort_all_start = gethrtime(); 4822 } else { 4823 msg[3] = LE_32(tag); 4824 msg[4] = 0; 4825 pwrk->abt_htag = tag; 4826 } 4827 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4828 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4829 if (ptr == NULL) { 4830 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4831 pmcs_pwork(pwp, pwrk); 4832 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 4833 return (ENOMEM); 4834 } 4835 4836 COPY_MESSAGE(ptr, msg, 5); 4837 if (all_cmds) { 4838 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4839 "%s: aborting all commands for %s device %s. (htag=0x%x)", 4840 __func__, pmcs_get_typename(pptr->dtype), pptr->path, 4841 msg[1]); 4842 } else { 4843 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4844 "%s: aborting tag 0x%x for %s device %s. (htag=0x%x)", 4845 __func__, tag, pmcs_get_typename(pptr->dtype), pptr->path, 4846 msg[1]); 4847 } 4848 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4849 4850 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4851 if (!wait) { 4852 mutex_exit(&pwrk->lock); 4853 return (0); 4854 } 4855 4856 abt_htag = pwrk->htag; 4857 pmcs_unlock_phy(pwrk->phy); 4858 WAIT_FOR(pwrk, 1000, result); 4859 pmcs_lock_phy(pwrk->phy); 4860 4861 tgt = pwrk->xp; 4862 pmcs_pwork(pwp, pwrk); 4863 4864 if (tgt != NULL) { 4865 mutex_enter(&tgt->aqlock); 4866 if (!STAILQ_EMPTY(&tgt->aq)) { 4867 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4868 "%s: Abort complete (result=0x%x), but " 4869 "aq not empty (tgt 0x%p), waiting", 4870 __func__, result, (void *)tgt); 4871 cv_wait(&tgt->abort_cv, &tgt->aqlock); 4872 } 4873 mutex_exit(&tgt->aqlock); 4874 } 4875 4876 if (all_cmds) { 4877 pptr->abort_all_start = 0; 4878 cv_signal(&pptr->abort_all_cv); 4879 } 4880 4881 if (result) { 4882 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4883 "%s: Abort (htag 0x%08x) request timed out", 4884 __func__, abt_htag); 4885 if (tgt != NULL) { 4886 mutex_enter(&tgt->statlock); 4887 if ((tgt->dev_state != PMCS_DEVICE_STATE_IN_RECOVERY) && 4888 (tgt->dev_state != 4889 PMCS_DEVICE_STATE_NON_OPERATIONAL)) { 4890 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4891 "%s: Trying DS error recovery for tgt 0x%p", 4892 __func__, (void *)tgt); 4893 (void) pmcs_send_err_recovery_cmd(pwp, 4894 PMCS_DEVICE_STATE_IN_RECOVERY, pptr, tgt); 4895 } 4896 mutex_exit(&tgt->statlock); 4897 } 4898 return (ETIMEDOUT); 4899 } 4900 4901 status = LE_32(msg[2]); 4902 if (status != PMCOUT_STATUS_OK) { 4903 /* 4904 * The only non-success status are IO_NOT_VALID & 4905 * IO_ABORT_IN_PROGRESS. 4906 * In case of IO_ABORT_IN_PROGRESS, the other ABORT cmd's 4907 * status is of concern and this duplicate cmd status can 4908 * be ignored. 4909 * If IO_NOT_VALID, that's not an error per-se. 4910 * For abort of single I/O complete the command anyway. 4911 * If, however, we were aborting all, that is a problem 4912 * as IO_NOT_VALID really means that the IO or device is 4913 * not there. So, discovery process will take of the cleanup. 4914 */ 4915 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4916 "%s: abort result 0x%x", __func__, LE_32(msg[2])); 4917 if (all_cmds) { 4918 PHY_CHANGED(pwp, pptr); 4919 RESTART_DISCOVERY(pwp); 4920 } else { 4921 return (EINVAL); 4922 } 4923 4924 return (0); 4925 } 4926 4927 if (tgt != NULL) { 4928 mutex_enter(&tgt->statlock); 4929 if (tgt->dev_state == PMCS_DEVICE_STATE_IN_RECOVERY) { 4930 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4931 "%s: Restoring OPERATIONAL dev_state for tgt 0x%p", 4932 __func__, (void *)tgt); 4933 (void) pmcs_send_err_recovery_cmd(pwp, 4934 PMCS_DEVICE_STATE_OPERATIONAL, pptr, tgt); 4935 } 4936 mutex_exit(&tgt->statlock); 4937 } 4938 4939 return (0); 4940 } 4941 4942 /* 4943 * Issue a task management function to an SSP device. 4944 * 4945 * Called with PHY lock held. 4946 * statlock CANNOT be held upon entry. 4947 */ 4948 int 4949 pmcs_ssp_tmf(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t tmf, uint32_t tag, 4950 uint64_t lun, uint32_t *response) 4951 { 4952 int result, ds; 4953 uint8_t local[PMCS_QENTRY_SIZE << 1], *xd; 4954 sas_ssp_rsp_iu_t *rptr = (void *)local; 4955 static const uint8_t ssp_rsp_evec[] = { 4956 0x58, 0x61, 0x56, 0x72, 0x00 4957 }; 4958 uint32_t msg[PMCS_MSG_SIZE], *ptr, status; 4959 struct pmcwork *pwrk; 4960 pmcs_xscsi_t *xp; 4961 4962 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 4963 if (pwrk == NULL) { 4964 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 4965 return (ENOMEM); 4966 } 4967 /* 4968 * NB: We use the PMCS_OQ_GENERAL outbound queue 4969 * NB: so as to not get entangled in normal I/O 4970 * NB: processing. 4971 */ 4972 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 4973 PMCIN_SSP_INI_TM_START)); 4974 msg[1] = LE_32(pwrk->htag); 4975 msg[2] = LE_32(pptr->device_id); 4976 if (tmf == SAS_ABORT_TASK || tmf == SAS_QUERY_TASK) { 4977 msg[3] = LE_32(tag); 4978 } else { 4979 msg[3] = 0; 4980 } 4981 msg[4] = LE_32(tmf); 4982 msg[5] = BE_32((uint32_t)lun); 4983 msg[6] = BE_32((uint32_t)(lun >> 32)); 4984 msg[7] = LE_32(PMCIN_MESSAGE_REPORT); 4985 4986 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4987 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4988 if (ptr == NULL) { 4989 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4990 pmcs_pwork(pwp, pwrk); 4991 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 4992 return (ENOMEM); 4993 } 4994 COPY_MESSAGE(ptr, msg, 7); 4995 pwrk->arg = msg; 4996 pwrk->dtype = pptr->dtype; 4997 xp = pptr->target; 4998 pwrk->xp = xp; 4999 5000 if (xp != NULL) { 5001 mutex_enter(&xp->statlock); 5002 if (xp->dev_state == PMCS_DEVICE_STATE_NON_OPERATIONAL) { 5003 mutex_exit(&xp->statlock); 5004 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5005 pmcs_pwork(pwp, pwrk); 5006 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: Not " 5007 "sending '%s' because DS is '%s'", __func__, 5008 pmcs_tmf2str(tmf), pmcs_status_str 5009 (PMCOUT_STATUS_IO_DS_NON_OPERATIONAL)); 5010 return (EIO); 5011 } 5012 mutex_exit(&xp->statlock); 5013 } 5014 5015 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5016 "%s: sending '%s' to %s (lun %llu) tag 0x%x", __func__, 5017 pmcs_tmf2str(tmf), pptr->path, (unsigned long long) lun, tag); 5018 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5019 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5020 5021 pmcs_unlock_phy(pptr); 5022 /* 5023 * This is a command sent to the target device, so it can take 5024 * significant amount of time to complete when path & device is busy. 5025 * Set a timeout to 20 seconds 5026 */ 5027 WAIT_FOR(pwrk, 20000, result); 5028 pmcs_lock_phy(pptr); 5029 pmcs_pwork(pwp, pwrk); 5030 5031 if (result) { 5032 if (xp == NULL) { 5033 return (ETIMEDOUT); 5034 } 5035 5036 mutex_enter(&xp->statlock); 5037 pmcs_start_dev_state_recovery(xp, pptr); 5038 mutex_exit(&xp->statlock); 5039 return (ETIMEDOUT); 5040 } 5041 5042 status = LE_32(msg[2]); 5043 if (status != PMCOUT_STATUS_OK) { 5044 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5045 "%s: status %s for TMF %s action to %s, lun %llu", 5046 __func__, pmcs_status_str(status), pmcs_tmf2str(tmf), 5047 pptr->path, (unsigned long long) lun); 5048 if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) || 5049 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) || 5050 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) { 5051 ds = PMCS_DEVICE_STATE_NON_OPERATIONAL; 5052 } else if (status == PMCOUT_STATUS_IO_DS_IN_RECOVERY) { 5053 /* 5054 * If the status is IN_RECOVERY, it's an indication 5055 * that it's now time for us to request to have the 5056 * device state set to OPERATIONAL since we're the ones 5057 * that requested recovery to begin with. 5058 */ 5059 ds = PMCS_DEVICE_STATE_OPERATIONAL; 5060 } else { 5061 ds = PMCS_DEVICE_STATE_IN_RECOVERY; 5062 } 5063 if (xp != NULL) { 5064 mutex_enter(&xp->statlock); 5065 if (xp->dev_state != ds) { 5066 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5067 "%s: Sending err recovery cmd" 5068 " for tgt 0x%p (status = %s)", 5069 __func__, (void *)xp, 5070 pmcs_status_str(status)); 5071 (void) pmcs_send_err_recovery_cmd(pwp, ds, 5072 pptr, xp); 5073 } 5074 mutex_exit(&xp->statlock); 5075 } 5076 return (EIO); 5077 } else { 5078 ds = PMCS_DEVICE_STATE_OPERATIONAL; 5079 if (xp != NULL) { 5080 mutex_enter(&xp->statlock); 5081 if (xp->dev_state != ds) { 5082 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5083 "%s: Sending err recovery cmd" 5084 " for tgt 0x%p (status = %s)", 5085 __func__, (void *)xp, 5086 pmcs_status_str(status)); 5087 (void) pmcs_send_err_recovery_cmd(pwp, ds, 5088 pptr, xp); 5089 } 5090 mutex_exit(&xp->statlock); 5091 } 5092 } 5093 if (LE_32(msg[3]) == 0) { 5094 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5095 "TMF completed with no response"); 5096 return (EIO); 5097 } 5098 pmcs_endian_transform(pwp, local, &msg[5], ssp_rsp_evec); 5099 xd = (uint8_t *)(&msg[5]); 5100 xd += SAS_RSP_HDR_SIZE; 5101 if (rptr->datapres != SAS_RSP_DATAPRES_RESPONSE_DATA) { 5102 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5103 "%s: TMF response not RESPONSE DATA (0x%x)", 5104 __func__, rptr->datapres); 5105 return (EIO); 5106 } 5107 if (rptr->response_data_length != 4) { 5108 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 5109 "Bad SAS RESPONSE DATA LENGTH", msg); 5110 return (EIO); 5111 } 5112 (void) memcpy(&status, xd, sizeof (uint32_t)); 5113 status = BE_32(status); 5114 if (response != NULL) 5115 *response = status; 5116 /* 5117 * The status is actually in the low-order byte. The upper three 5118 * bytes contain additional information for the TMFs that support them. 5119 * However, at this time we do not issue any of those. In the other 5120 * cases, the upper three bytes are supposed to be 0, but it appears 5121 * they aren't always. Just mask them off. 5122 */ 5123 switch (status & 0xff) { 5124 case SAS_RSP_TMF_COMPLETE: 5125 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5126 "%s: TMF complete", __func__); 5127 result = 0; 5128 break; 5129 case SAS_RSP_TMF_SUCCEEDED: 5130 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5131 "%s: TMF succeeded", __func__); 5132 result = 0; 5133 break; 5134 case SAS_RSP_INVALID_FRAME: 5135 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5136 "%s: TMF returned INVALID FRAME", __func__); 5137 result = EIO; 5138 break; 5139 case SAS_RSP_TMF_NOT_SUPPORTED: 5140 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5141 "%s: TMF returned TMF NOT SUPPORTED", __func__); 5142 result = EIO; 5143 break; 5144 case SAS_RSP_TMF_FAILED: 5145 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5146 "%s: TMF returned TMF FAILED", __func__); 5147 result = EIO; 5148 break; 5149 case SAS_RSP_TMF_INCORRECT_LUN: 5150 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5151 "%s: TMF returned INCORRECT LUN", __func__); 5152 result = EIO; 5153 break; 5154 case SAS_RSP_OVERLAPPED_OIPTTA: 5155 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5156 "%s: TMF returned OVERLAPPED INITIATOR PORT TRANSFER TAG " 5157 "ATTEMPTED", __func__); 5158 result = EIO; 5159 break; 5160 default: 5161 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5162 "%s: TMF returned unknown code 0x%x", __func__, status); 5163 result = EIO; 5164 break; 5165 } 5166 return (result); 5167 } 5168 5169 /* 5170 * Called with PHY lock held and scratch acquired 5171 */ 5172 int 5173 pmcs_sata_abort_ncq(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 5174 { 5175 const char *utag_fail_fmt = "%s: untagged NCQ command failure"; 5176 const char *tag_fail_fmt = "%s: NCQ command failure (tag 0x%x)"; 5177 uint32_t msg[PMCS_QENTRY_SIZE], *ptr, result, status; 5178 uint8_t *fp = pwp->scratch, ds; 5179 fis_t fis; 5180 pmcwork_t *pwrk; 5181 pmcs_xscsi_t *tgt; 5182 5183 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 5184 if (pwrk == NULL) { 5185 return (ENOMEM); 5186 } 5187 msg[0] = LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, 5188 PMCIN_SATA_HOST_IO_START)); 5189 msg[1] = LE_32(pwrk->htag); 5190 msg[2] = LE_32(pptr->device_id); 5191 msg[3] = LE_32(512); 5192 msg[4] = LE_32(SATA_PROTOCOL_PIO | PMCIN_DATADIR_2_INI); 5193 msg[5] = LE_32((READ_LOG_EXT << 16) | (C_BIT << 8) | FIS_REG_H2DEV); 5194 msg[6] = LE_32(0x10); 5195 msg[8] = LE_32(1); 5196 msg[9] = 0; 5197 msg[10] = 0; 5198 msg[11] = 0; 5199 msg[12] = LE_32(DWORD0(pwp->scratch_dma)); 5200 msg[13] = LE_32(DWORD1(pwp->scratch_dma)); 5201 msg[14] = LE_32(512); 5202 msg[15] = 0; 5203 5204 pwrk->arg = msg; 5205 pwrk->dtype = pptr->dtype; 5206 5207 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5208 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5209 if (ptr == NULL) { 5210 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5211 pmcs_pwork(pwp, pwrk); 5212 return (ENOMEM); 5213 } 5214 COPY_MESSAGE(ptr, msg, PMCS_QENTRY_SIZE); 5215 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5216 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5217 5218 pmcs_unlock_phy(pptr); 5219 WAIT_FOR(pwrk, 250, result); 5220 pmcs_lock_phy(pptr); 5221 pmcs_pwork(pwp, pwrk); 5222 5223 tgt = pptr->target; 5224 if (result) { 5225 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, pmcs_timeo, __func__); 5226 return (EIO); 5227 } 5228 status = LE_32(msg[2]); 5229 if (status != PMCOUT_STATUS_OK || LE_32(msg[3])) { 5230 if (tgt == NULL) { 5231 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5232 "%s: cannot find target for phy 0x%p for " 5233 "dev state recovery", __func__, (void *)pptr); 5234 return (EIO); 5235 } 5236 5237 mutex_enter(&tgt->statlock); 5238 5239 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, "READ LOG EXT", msg); 5240 if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) || 5241 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) || 5242 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) { 5243 ds = PMCS_DEVICE_STATE_NON_OPERATIONAL; 5244 } else { 5245 ds = PMCS_DEVICE_STATE_IN_RECOVERY; 5246 } 5247 if (tgt->dev_state != ds) { 5248 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, "%s: Trying " 5249 "SATA DS Recovery for tgt(0x%p) for status(%s)", 5250 __func__, (void *)tgt, pmcs_status_str(status)); 5251 (void) pmcs_send_err_recovery_cmd(pwp, ds, pptr, tgt); 5252 } 5253 5254 mutex_exit(&tgt->statlock); 5255 return (EIO); 5256 } 5257 fis[0] = (fp[4] << 24) | (fp[3] << 16) | (fp[2] << 8) | FIS_REG_D2H; 5258 fis[1] = (fp[8] << 24) | (fp[7] << 16) | (fp[6] << 8) | fp[5]; 5259 fis[2] = (fp[12] << 24) | (fp[11] << 16) | (fp[10] << 8) | fp[9]; 5260 fis[3] = (fp[16] << 24) | (fp[15] << 16) | (fp[14] << 8) | fp[13]; 5261 fis[4] = 0; 5262 if (fp[0] & 0x80) { 5263 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5264 utag_fail_fmt, __func__); 5265 } else { 5266 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5267 tag_fail_fmt, __func__, fp[0] & 0x1f); 5268 } 5269 pmcs_fis_dump(pwp, fis); 5270 pptr->need_rl_ext = 0; 5271 return (0); 5272 } 5273 5274 /* 5275 * Transform a structure from CPU to Device endian format, or 5276 * vice versa, based upon a transformation vector. 5277 * 5278 * A transformation vector is an array of bytes, each byte 5279 * of which is defined thusly: 5280 * 5281 * bit 7: from CPU to desired endian, otherwise from desired endian 5282 * to CPU format 5283 * bit 6: Big Endian, else Little Endian 5284 * bits 5-4: 5285 * 00 Undefined 5286 * 01 One Byte quantities 5287 * 02 Two Byte quantities 5288 * 03 Four Byte quantities 5289 * 5290 * bits 3-0: 5291 * 00 Undefined 5292 * Number of quantities to transform 5293 * 5294 * The vector is terminated by a 0 value. 5295 */ 5296 5297 void 5298 pmcs_endian_transform(pmcs_hw_t *pwp, void *orig_out, void *orig_in, 5299 const uint8_t *xfvec) 5300 { 5301 uint8_t c, *out = orig_out, *in = orig_in; 5302 5303 if (xfvec == NULL) { 5304 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5305 "%s: null xfvec", __func__); 5306 return; 5307 } 5308 if (out == NULL) { 5309 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5310 "%s: null out", __func__); 5311 return; 5312 } 5313 if (in == NULL) { 5314 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5315 "%s: null in", __func__); 5316 return; 5317 } 5318 while ((c = *xfvec++) != 0) { 5319 int nbyt = (c & 0xf); 5320 int size = (c >> 4) & 0x3; 5321 int bige = (c >> 4) & 0x4; 5322 5323 switch (size) { 5324 case 1: 5325 { 5326 while (nbyt-- > 0) { 5327 *out++ = *in++; 5328 } 5329 break; 5330 } 5331 case 2: 5332 { 5333 uint16_t tmp; 5334 while (nbyt-- > 0) { 5335 (void) memcpy(&tmp, in, sizeof (uint16_t)); 5336 if (bige) { 5337 tmp = BE_16(tmp); 5338 } else { 5339 tmp = LE_16(tmp); 5340 } 5341 (void) memcpy(out, &tmp, sizeof (uint16_t)); 5342 out += sizeof (uint16_t); 5343 in += sizeof (uint16_t); 5344 } 5345 break; 5346 } 5347 case 3: 5348 { 5349 uint32_t tmp; 5350 while (nbyt-- > 0) { 5351 (void) memcpy(&tmp, in, sizeof (uint32_t)); 5352 if (bige) { 5353 tmp = BE_32(tmp); 5354 } else { 5355 tmp = LE_32(tmp); 5356 } 5357 (void) memcpy(out, &tmp, sizeof (uint32_t)); 5358 out += sizeof (uint32_t); 5359 in += sizeof (uint32_t); 5360 } 5361 break; 5362 } 5363 default: 5364 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5365 "%s: bad size", __func__); 5366 return; 5367 } 5368 } 5369 } 5370 5371 const char * 5372 pmcs_get_rate(unsigned int linkrt) 5373 { 5374 const char *rate; 5375 switch (linkrt) { 5376 case SAS_LINK_RATE_1_5GBIT: 5377 rate = "1.5"; 5378 break; 5379 case SAS_LINK_RATE_3GBIT: 5380 rate = "3.0"; 5381 break; 5382 case SAS_LINK_RATE_6GBIT: 5383 rate = "6.0"; 5384 break; 5385 default: 5386 rate = "???"; 5387 break; 5388 } 5389 return (rate); 5390 } 5391 5392 const char * 5393 pmcs_get_typename(pmcs_dtype_t type) 5394 { 5395 switch (type) { 5396 case NOTHING: 5397 return ("NIL"); 5398 case SATA: 5399 return ("SATA"); 5400 case SAS: 5401 return ("SSP"); 5402 case EXPANDER: 5403 return ("EXPANDER"); 5404 } 5405 return ("????"); 5406 } 5407 5408 const char * 5409 pmcs_tmf2str(int tmf) 5410 { 5411 switch (tmf) { 5412 case SAS_ABORT_TASK: 5413 return ("Abort Task"); 5414 case SAS_ABORT_TASK_SET: 5415 return ("Abort Task Set"); 5416 case SAS_CLEAR_TASK_SET: 5417 return ("Clear Task Set"); 5418 case SAS_LOGICAL_UNIT_RESET: 5419 return ("Logical Unit Reset"); 5420 case SAS_I_T_NEXUS_RESET: 5421 return ("I_T Nexus Reset"); 5422 case SAS_CLEAR_ACA: 5423 return ("Clear ACA"); 5424 case SAS_QUERY_TASK: 5425 return ("Query Task"); 5426 case SAS_QUERY_TASK_SET: 5427 return ("Query Task Set"); 5428 case SAS_QUERY_UNIT_ATTENTION: 5429 return ("Query Unit Attention"); 5430 default: 5431 return ("Unknown"); 5432 } 5433 } 5434 5435 const char * 5436 pmcs_status_str(uint32_t status) 5437 { 5438 switch (status) { 5439 case PMCOUT_STATUS_OK: 5440 return ("OK"); 5441 case PMCOUT_STATUS_ABORTED: 5442 return ("ABORTED"); 5443 case PMCOUT_STATUS_OVERFLOW: 5444 return ("OVERFLOW"); 5445 case PMCOUT_STATUS_UNDERFLOW: 5446 return ("UNDERFLOW"); 5447 case PMCOUT_STATUS_FAILED: 5448 return ("FAILED"); 5449 case PMCOUT_STATUS_ABORT_RESET: 5450 return ("ABORT_RESET"); 5451 case PMCOUT_STATUS_IO_NOT_VALID: 5452 return ("IO_NOT_VALID"); 5453 case PMCOUT_STATUS_NO_DEVICE: 5454 return ("NO_DEVICE"); 5455 case PMCOUT_STATUS_ILLEGAL_PARAMETER: 5456 return ("ILLEGAL_PARAMETER"); 5457 case PMCOUT_STATUS_LINK_FAILURE: 5458 return ("LINK_FAILURE"); 5459 case PMCOUT_STATUS_PROG_ERROR: 5460 return ("PROG_ERROR"); 5461 case PMCOUT_STATUS_EDC_IN_ERROR: 5462 return ("EDC_IN_ERROR"); 5463 case PMCOUT_STATUS_EDC_OUT_ERROR: 5464 return ("EDC_OUT_ERROR"); 5465 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 5466 return ("ERROR_HW_TIMEOUT"); 5467 case PMCOUT_STATUS_XFER_ERR_BREAK: 5468 return ("XFER_ERR_BREAK"); 5469 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 5470 return ("XFER_ERR_PHY_NOT_READY"); 5471 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 5472 return ("OPEN_CNX_PROTOCOL_NOT_SUPPORTED"); 5473 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 5474 return ("OPEN_CNX_ERROR_ZONE_VIOLATION"); 5475 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 5476 return ("OPEN_CNX_ERROR_BREAK"); 5477 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 5478 return ("OPEN_CNX_ERROR_IT_NEXUS_LOSS"); 5479 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 5480 return ("OPENCNX_ERROR_BAD_DESTINATION"); 5481 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 5482 return ("OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED"); 5483 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 5484 return ("OPEN_CNX_ERROR_STP_RESOURCES_BUSY"); 5485 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 5486 return ("OPEN_CNX_ERROR_WRONG_DESTINATION"); 5487 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 5488 return ("OPEN_CNX_ERROR_UNKNOWN_ERROR"); 5489 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 5490 return ("IO_XFER_ERROR_NAK_RECEIVED"); 5491 case PMCOUT_STATUS_XFER_ERROR_ACK_NAK_TIMEOUT: 5492 return ("XFER_ERROR_ACK_NAK_TIMEOUT"); 5493 case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED: 5494 return ("XFER_ERROR_PEER_ABORTED"); 5495 case PMCOUT_STATUS_XFER_ERROR_RX_FRAME: 5496 return ("XFER_ERROR_RX_FRAME"); 5497 case PMCOUT_STATUS_IO_XFER_ERROR_DMA: 5498 return ("IO_XFER_ERROR_DMA"); 5499 case PMCOUT_STATUS_XFER_ERROR_CREDIT_TIMEOUT: 5500 return ("XFER_ERROR_CREDIT_TIMEOUT"); 5501 case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT: 5502 return ("XFER_ERROR_SATA_LINK_TIMEOUT"); 5503 case PMCOUT_STATUS_XFER_ERROR_SATA: 5504 return ("XFER_ERROR_SATA"); 5505 case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE: 5506 return ("XFER_ERROR_REJECTED_NCQ_MODE"); 5507 case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST: 5508 return ("XFER_ERROR_ABORTED_DUE_TO_SRST"); 5509 case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE: 5510 return ("XFER_ERROR_ABORTED_NCQ_MODE"); 5511 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 5512 return ("IO_XFER_OPEN_RETRY_TIMEOUT"); 5513 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 5514 return ("SMP_RESP_CONNECTION_ERROR"); 5515 case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE: 5516 return ("XFER_ERROR_UNEXPECTED_PHASE"); 5517 case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN: 5518 return ("XFER_ERROR_RDY_OVERRUN"); 5519 case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED: 5520 return ("XFER_ERROR_RDY_NOT_EXPECTED"); 5521 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 5522 return ("XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT"); 5523 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK: 5524 return ("XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK"); 5525 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK: 5526 return ("XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK"); 5527 case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH: 5528 return ("XFER_ERROR_OFFSET_MISMATCH"); 5529 case PMCOUT_STATUS_XFER_ERROR_ZERO_DATA_LEN: 5530 return ("XFER_ERROR_ZERO_DATA_LEN"); 5531 case PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED: 5532 return ("XFER_CMD_FRAME_ISSUED"); 5533 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 5534 return ("ERROR_INTERNAL_SMP_RESOURCE"); 5535 case PMCOUT_STATUS_IO_PORT_IN_RESET: 5536 return ("IO_PORT_IN_RESET"); 5537 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 5538 return ("DEVICE STATE NON-OPERATIONAL"); 5539 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 5540 return ("DEVICE STATE IN RECOVERY"); 5541 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 5542 return ("OPEN CNX ERR HW RESOURCE BUSY"); 5543 default: 5544 return (NULL); 5545 } 5546 } 5547 5548 uint64_t 5549 pmcs_barray2wwn(uint8_t ba[8]) 5550 { 5551 uint64_t result = 0; 5552 int i; 5553 5554 for (i = 0; i < 8; i++) { 5555 result <<= 8; 5556 result |= ba[i]; 5557 } 5558 return (result); 5559 } 5560 5561 void 5562 pmcs_wwn2barray(uint64_t wwn, uint8_t ba[8]) 5563 { 5564 int i; 5565 for (i = 0; i < 8; i++) { 5566 ba[7 - i] = wwn & 0xff; 5567 wwn >>= 8; 5568 } 5569 } 5570 5571 void 5572 pmcs_report_fwversion(pmcs_hw_t *pwp) 5573 { 5574 const char *fwsupport; 5575 switch (PMCS_FW_TYPE(pwp)) { 5576 case PMCS_FW_TYPE_RELEASED: 5577 fwsupport = "Released"; 5578 break; 5579 case PMCS_FW_TYPE_DEVELOPMENT: 5580 fwsupport = "Development"; 5581 break; 5582 case PMCS_FW_TYPE_ALPHA: 5583 fwsupport = "Alpha"; 5584 break; 5585 case PMCS_FW_TYPE_BETA: 5586 fwsupport = "Beta"; 5587 break; 5588 default: 5589 fwsupport = "Special"; 5590 break; 5591 } 5592 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5593 "Chip Revision: %c; F/W Revision %x.%x.%x %s", 'A' + pwp->chiprev, 5594 PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp), 5595 fwsupport); 5596 } 5597 5598 void 5599 pmcs_phy_name(pmcs_hw_t *pwp, pmcs_phy_t *pptr, char *obuf, size_t olen) 5600 { 5601 if (pptr->parent) { 5602 pmcs_phy_name(pwp, pptr->parent, obuf, olen); 5603 (void) snprintf(obuf, olen, "%s.%02x", obuf, pptr->phynum); 5604 } else { 5605 (void) snprintf(obuf, olen, "pp%02x", pptr->phynum); 5606 } 5607 } 5608 5609 /* 5610 * Implementation for pmcs_find_phy_by_devid. 5611 * If the PHY is found, it is returned locked. 5612 */ 5613 static pmcs_phy_t * 5614 pmcs_find_phy_by_devid_impl(pmcs_phy_t *phyp, uint32_t device_id) 5615 { 5616 pmcs_phy_t *match, *cphyp, *nphyp; 5617 5618 ASSERT(!mutex_owned(&phyp->phy_lock)); 5619 5620 while (phyp) { 5621 pmcs_lock_phy(phyp); 5622 5623 if ((phyp->valid_device_id) && (phyp->device_id == device_id)) { 5624 return (phyp); 5625 } 5626 if (phyp->children) { 5627 cphyp = phyp->children; 5628 pmcs_unlock_phy(phyp); 5629 match = pmcs_find_phy_by_devid_impl(cphyp, device_id); 5630 if (match) { 5631 ASSERT(mutex_owned(&match->phy_lock)); 5632 return (match); 5633 } 5634 pmcs_lock_phy(phyp); 5635 } 5636 5637 if (IS_ROOT_PHY(phyp)) { 5638 pmcs_unlock_phy(phyp); 5639 phyp = NULL; 5640 } else { 5641 nphyp = phyp->sibling; 5642 pmcs_unlock_phy(phyp); 5643 phyp = nphyp; 5644 } 5645 } 5646 5647 return (NULL); 5648 } 5649 5650 /* 5651 * If the PHY is found, it is returned locked 5652 */ 5653 pmcs_phy_t * 5654 pmcs_find_phy_by_devid(pmcs_hw_t *pwp, uint32_t device_id) 5655 { 5656 pmcs_phy_t *phyp, *match = NULL; 5657 5658 phyp = pwp->root_phys; 5659 5660 while (phyp) { 5661 match = pmcs_find_phy_by_devid_impl(phyp, device_id); 5662 if (match) { 5663 ASSERT(mutex_owned(&match->phy_lock)); 5664 return (match); 5665 } 5666 phyp = phyp->sibling; 5667 } 5668 5669 return (NULL); 5670 } 5671 5672 /* 5673 * This function is called as a sanity check to ensure that a newly registered 5674 * PHY doesn't have a device_id that exists with another registered PHY. 5675 */ 5676 static boolean_t 5677 pmcs_validate_devid(pmcs_phy_t *parent, pmcs_phy_t *phyp, uint32_t device_id) 5678 { 5679 pmcs_phy_t *pptr, *pchild; 5680 boolean_t rval; 5681 5682 pptr = parent; 5683 5684 while (pptr) { 5685 if (pptr->valid_device_id && (pptr != phyp) && 5686 (pptr->device_id == device_id)) { 5687 /* 5688 * This can still be OK if both of these PHYs actually 5689 * represent the same device (e.g. expander). It could 5690 * be a case of a new "primary" PHY. If the SAS address 5691 * is the same and they have the same parent, we'll 5692 * accept this if the PHY to be registered is the 5693 * primary. 5694 */ 5695 if ((phyp->parent == pptr->parent) && 5696 (memcmp(phyp->sas_address, 5697 pptr->sas_address, 8) == 0) && (phyp->width > 1)) { 5698 /* 5699 * Move children over to the new primary and 5700 * update both PHYs 5701 */ 5702 pmcs_lock_phy(pptr); 5703 phyp->children = pptr->children; 5704 pchild = phyp->children; 5705 while (pchild) { 5706 pchild->parent = phyp; 5707 pchild = pchild->sibling; 5708 } 5709 phyp->subsidiary = 0; 5710 phyp->ncphy = pptr->ncphy; 5711 /* 5712 * device_id, valid_device_id, and configured 5713 * will be set by the caller 5714 */ 5715 pptr->children = NULL; 5716 pptr->subsidiary = 1; 5717 pptr->ncphy = 0; 5718 pmcs_unlock_phy(pptr); 5719 pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL, 5720 "%s: Moving device_id %d from PHY %s to %s", 5721 __func__, device_id, pptr->path, 5722 phyp->path); 5723 return (B_TRUE); 5724 } 5725 pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL, 5726 "%s: phy %s already exists as %s with " 5727 "device id 0x%x", __func__, phyp->path, 5728 pptr->path, device_id); 5729 return (B_FALSE); 5730 } 5731 5732 if (pptr->children) { 5733 rval = pmcs_validate_devid(pptr->children, phyp, 5734 device_id); 5735 if (rval == B_FALSE) { 5736 return (rval); 5737 } 5738 } 5739 5740 pptr = pptr->sibling; 5741 } 5742 5743 /* This PHY and device_id are valid */ 5744 return (B_TRUE); 5745 } 5746 5747 /* 5748 * If the PHY is found, it is returned locked 5749 */ 5750 static pmcs_phy_t * 5751 pmcs_find_phy_by_wwn_impl(pmcs_phy_t *phyp, uint8_t *wwn) 5752 { 5753 pmcs_phy_t *matched_phy, *cphyp, *nphyp; 5754 5755 ASSERT(!mutex_owned(&phyp->phy_lock)); 5756 5757 while (phyp) { 5758 pmcs_lock_phy(phyp); 5759 5760 if (phyp->valid_device_id) { 5761 if (memcmp(phyp->sas_address, wwn, 8) == 0) { 5762 return (phyp); 5763 } 5764 } 5765 5766 if (phyp->children) { 5767 cphyp = phyp->children; 5768 pmcs_unlock_phy(phyp); 5769 matched_phy = pmcs_find_phy_by_wwn_impl(cphyp, wwn); 5770 if (matched_phy) { 5771 ASSERT(mutex_owned(&matched_phy->phy_lock)); 5772 return (matched_phy); 5773 } 5774 pmcs_lock_phy(phyp); 5775 } 5776 5777 /* 5778 * Only iterate through non-root PHYs 5779 */ 5780 if (IS_ROOT_PHY(phyp)) { 5781 pmcs_unlock_phy(phyp); 5782 phyp = NULL; 5783 } else { 5784 nphyp = phyp->sibling; 5785 pmcs_unlock_phy(phyp); 5786 phyp = nphyp; 5787 } 5788 } 5789 5790 return (NULL); 5791 } 5792 5793 pmcs_phy_t * 5794 pmcs_find_phy_by_wwn(pmcs_hw_t *pwp, uint64_t wwn) 5795 { 5796 uint8_t ebstr[8]; 5797 pmcs_phy_t *pptr, *matched_phy; 5798 5799 pmcs_wwn2barray(wwn, ebstr); 5800 5801 pptr = pwp->root_phys; 5802 while (pptr) { 5803 matched_phy = pmcs_find_phy_by_wwn_impl(pptr, ebstr); 5804 if (matched_phy) { 5805 ASSERT(mutex_owned(&matched_phy->phy_lock)); 5806 return (matched_phy); 5807 } 5808 5809 pptr = pptr->sibling; 5810 } 5811 5812 return (NULL); 5813 } 5814 5815 5816 /* 5817 * pmcs_find_phy_by_sas_address 5818 * 5819 * Find a PHY that both matches "sas_addr" and is on "iport". 5820 * If a matching PHY is found, it is returned locked. 5821 */ 5822 pmcs_phy_t * 5823 pmcs_find_phy_by_sas_address(pmcs_hw_t *pwp, pmcs_iport_t *iport, 5824 pmcs_phy_t *root, char *sas_addr) 5825 { 5826 int ua_form = 1; 5827 uint64_t wwn; 5828 char addr[PMCS_MAX_UA_SIZE]; 5829 pmcs_phy_t *pptr, *pnext, *pchild; 5830 5831 if (root == NULL) { 5832 pptr = pwp->root_phys; 5833 } else { 5834 pptr = root; 5835 } 5836 5837 while (pptr) { 5838 pmcs_lock_phy(pptr); 5839 /* 5840 * If the PHY is dead or does not have a valid device ID, 5841 * skip it. 5842 */ 5843 if ((pptr->dead) || (!pptr->valid_device_id)) { 5844 goto next_phy; 5845 } 5846 5847 if (pptr->iport != iport) { 5848 goto next_phy; 5849 } 5850 5851 wwn = pmcs_barray2wwn(pptr->sas_address); 5852 (void *) scsi_wwn_to_wwnstr(wwn, ua_form, addr); 5853 if (strncmp(addr, sas_addr, strlen(addr)) == 0) { 5854 return (pptr); 5855 } 5856 5857 if (pptr->children) { 5858 pchild = pptr->children; 5859 pmcs_unlock_phy(pptr); 5860 pnext = pmcs_find_phy_by_sas_address(pwp, iport, pchild, 5861 sas_addr); 5862 if (pnext) { 5863 return (pnext); 5864 } 5865 pmcs_lock_phy(pptr); 5866 } 5867 5868 next_phy: 5869 pnext = pptr->sibling; 5870 pmcs_unlock_phy(pptr); 5871 pptr = pnext; 5872 } 5873 5874 return (NULL); 5875 } 5876 5877 void 5878 pmcs_fis_dump(pmcs_hw_t *pwp, fis_t fis) 5879 { 5880 switch (fis[0] & 0xff) { 5881 case FIS_REG_H2DEV: 5882 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5883 "FIS REGISTER HOST TO DEVICE: " 5884 "OP=0x%02x Feature=0x%04x Count=0x%04x Device=0x%02x " 5885 "LBA=%llu", BYTE2(fis[0]), BYTE3(fis[2]) << 8 | 5886 BYTE3(fis[0]), WORD0(fis[3]), BYTE3(fis[1]), 5887 (unsigned long long) 5888 (((uint64_t)fis[2] & 0x00ffffff) << 24 | 5889 ((uint64_t)fis[1] & 0x00ffffff))); 5890 break; 5891 case FIS_REG_D2H: 5892 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5893 "FIS REGISTER DEVICE TO HOST: Status=0x%02x " 5894 "Error=0x%02x Dev=0x%02x Count=0x%04x LBA=%llu", 5895 BYTE2(fis[0]), BYTE3(fis[0]), BYTE3(fis[1]), WORD0(fis[3]), 5896 (unsigned long long)(((uint64_t)fis[2] & 0x00ffffff) << 24 | 5897 ((uint64_t)fis[1] & 0x00ffffff))); 5898 break; 5899 default: 5900 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5901 "FIS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 5902 fis[0], fis[1], fis[2], fis[3], fis[4]); 5903 break; 5904 } 5905 } 5906 5907 void 5908 pmcs_print_entry(pmcs_hw_t *pwp, int level, char *msg, void *arg) 5909 { 5910 uint32_t *mb = arg; 5911 size_t i; 5912 5913 pmcs_prt(pwp, level, NULL, NULL, msg); 5914 for (i = 0; i < (PMCS_QENTRY_SIZE / sizeof (uint32_t)); i += 4) { 5915 pmcs_prt(pwp, level, NULL, NULL, 5916 "Offset %2lu: 0x%08x 0x%08x 0x%08x 0x%08x", 5917 i * sizeof (uint32_t), LE_32(mb[i]), 5918 LE_32(mb[i+1]), LE_32(mb[i+2]), LE_32(mb[i+3])); 5919 } 5920 } 5921 5922 /* 5923 * If phyp == NULL we're being called from the worker thread, in which 5924 * case we need to check all the PHYs. In this case, the softstate lock 5925 * will be held. 5926 * If phyp is non-NULL, just issue the spinup release for the specified PHY 5927 * (which will already be locked). 5928 */ 5929 void 5930 pmcs_spinup_release(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 5931 { 5932 uint32_t *msg; 5933 struct pmcwork *pwrk; 5934 pmcs_phy_t *tphyp; 5935 5936 if (phyp != NULL) { 5937 ASSERT(mutex_owned(&phyp->phy_lock)); 5938 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL, 5939 "%s: Issuing spinup release only for PHY %s", __func__, 5940 phyp->path); 5941 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5942 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5943 if (msg == NULL || (pwrk = 5944 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 5945 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5946 SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE); 5947 return; 5948 } 5949 5950 phyp->spinup_hold = 0; 5951 bzero(msg, PMCS_QENTRY_SIZE); 5952 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 5953 PMCIN_LOCAL_PHY_CONTROL)); 5954 msg[1] = LE_32(pwrk->htag); 5955 msg[2] = LE_32((0x10 << 8) | phyp->phynum); 5956 5957 pwrk->dtype = phyp->dtype; 5958 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5959 mutex_exit(&pwrk->lock); 5960 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5961 return; 5962 } 5963 5964 ASSERT(mutex_owned(&pwp->lock)); 5965 5966 tphyp = pwp->root_phys; 5967 while (tphyp) { 5968 pmcs_lock_phy(tphyp); 5969 if (tphyp->spinup_hold == 0) { 5970 pmcs_unlock_phy(tphyp); 5971 tphyp = tphyp->sibling; 5972 continue; 5973 } 5974 5975 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL, 5976 "%s: Issuing spinup release for PHY %s", __func__, 5977 phyp->path); 5978 5979 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5980 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5981 if (msg == NULL || (pwrk = 5982 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 5983 pmcs_unlock_phy(tphyp); 5984 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5985 SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE); 5986 break; 5987 } 5988 5989 tphyp->spinup_hold = 0; 5990 bzero(msg, PMCS_QENTRY_SIZE); 5991 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 5992 PMCIN_LOCAL_PHY_CONTROL)); 5993 msg[1] = LE_32(pwrk->htag); 5994 msg[2] = LE_32((0x10 << 8) | tphyp->phynum); 5995 5996 pwrk->dtype = phyp->dtype; 5997 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5998 mutex_exit(&pwrk->lock); 5999 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6000 pmcs_unlock_phy(tphyp); 6001 6002 tphyp = tphyp->sibling; 6003 } 6004 } 6005 6006 /* 6007 * Abort commands on dead PHYs and deregister them as well as removing 6008 * the associated targets. 6009 */ 6010 static int 6011 pmcs_kill_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 6012 { 6013 pmcs_phy_t *pnext, *pchild; 6014 boolean_t remove_device; 6015 int rval = 0; 6016 6017 while (phyp) { 6018 pmcs_lock_phy(phyp); 6019 pchild = phyp->children; 6020 pnext = phyp->sibling; 6021 pmcs_unlock_phy(phyp); 6022 6023 if (pchild) { 6024 rval = pmcs_kill_devices(pwp, pchild); 6025 if (rval) { 6026 return (rval); 6027 } 6028 } 6029 6030 /* 6031 * pmcs_remove_device requires the softstate lock. 6032 */ 6033 mutex_enter(&pwp->lock); 6034 pmcs_lock_phy(phyp); 6035 if (phyp->dead && phyp->valid_device_id) { 6036 remove_device = B_TRUE; 6037 } else { 6038 remove_device = B_FALSE; 6039 } 6040 6041 if (remove_device) { 6042 pmcs_remove_device(pwp, phyp); 6043 mutex_exit(&pwp->lock); 6044 6045 rval = pmcs_kill_device(pwp, phyp); 6046 6047 if (rval) { 6048 pmcs_unlock_phy(phyp); 6049 return (rval); 6050 } 6051 } else { 6052 mutex_exit(&pwp->lock); 6053 } 6054 6055 pmcs_unlock_phy(phyp); 6056 phyp = pnext; 6057 } 6058 6059 return (rval); 6060 } 6061 6062 /* 6063 * Called with PHY locked 6064 */ 6065 int 6066 pmcs_kill_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 6067 { 6068 int r, result; 6069 uint32_t msg[PMCS_MSG_SIZE], *ptr, status; 6070 struct pmcwork *pwrk; 6071 6072 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "kill %s device @ %s", 6073 pmcs_get_typename(pptr->dtype), pptr->path); 6074 6075 /* 6076 * There may be an outstanding ABORT_ALL running, which we wouldn't 6077 * know just by checking abort_pending. We can, however, check 6078 * abort_all_start. If it's non-zero, there is one, and we'll just 6079 * sit here and wait for it to complete. If we don't, we'll remove 6080 * the device while there are still commands pending. 6081 */ 6082 if (pptr->abort_all_start) { 6083 while (pptr->abort_all_start) { 6084 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 6085 "%s: Waiting for outstanding ABORT_ALL on PHY 0x%p", 6086 __func__, (void *)pptr); 6087 cv_wait(&pptr->abort_all_cv, &pptr->phy_lock); 6088 } 6089 } else if (pptr->abort_pending) { 6090 r = pmcs_abort(pwp, pptr, pptr->device_id, 1, 1); 6091 6092 if (r) { 6093 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 6094 "%s: ABORT_ALL returned non-zero status (%d) for " 6095 "PHY 0x%p", __func__, r, (void *)pptr); 6096 return (r); 6097 } 6098 pptr->abort_pending = 0; 6099 } 6100 6101 if (pptr->valid_device_id == 0) { 6102 return (0); 6103 } 6104 6105 if ((pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) { 6106 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 6107 return (ENOMEM); 6108 } 6109 pwrk->arg = msg; 6110 pwrk->dtype = pptr->dtype; 6111 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 6112 PMCIN_DEREGISTER_DEVICE_HANDLE)); 6113 msg[1] = LE_32(pwrk->htag); 6114 msg[2] = LE_32(pptr->device_id); 6115 6116 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6117 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6118 if (ptr == NULL) { 6119 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6120 mutex_exit(&pwrk->lock); 6121 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 6122 return (ENOMEM); 6123 } 6124 6125 COPY_MESSAGE(ptr, msg, 3); 6126 pwrk->state = PMCS_WORK_STATE_ONCHIP; 6127 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6128 6129 pmcs_unlock_phy(pptr); 6130 WAIT_FOR(pwrk, 250, result); 6131 pmcs_lock_phy(pptr); 6132 pmcs_pwork(pwp, pwrk); 6133 6134 if (result) { 6135 return (ETIMEDOUT); 6136 } 6137 status = LE_32(msg[2]); 6138 if (status != PMCOUT_STATUS_OK) { 6139 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 6140 "%s: status 0x%x when trying to deregister device %s", 6141 __func__, status, pptr->path); 6142 } 6143 6144 pptr->device_id = PMCS_INVALID_DEVICE_ID; 6145 PHY_CHANGED(pwp, pptr); 6146 RESTART_DISCOVERY(pwp); 6147 pptr->valid_device_id = 0; 6148 return (0); 6149 } 6150 6151 /* 6152 * Acknowledge the SAS h/w events that need acknowledgement. 6153 * This is only needed for first level PHYs. 6154 */ 6155 void 6156 pmcs_ack_events(pmcs_hw_t *pwp) 6157 { 6158 uint32_t msg[PMCS_MSG_SIZE], *ptr; 6159 struct pmcwork *pwrk; 6160 pmcs_phy_t *pptr; 6161 6162 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 6163 pmcs_lock_phy(pptr); 6164 if (pptr->hw_event_ack == 0) { 6165 pmcs_unlock_phy(pptr); 6166 continue; 6167 } 6168 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6169 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6170 6171 if ((ptr == NULL) || (pwrk = 6172 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 6173 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6174 pmcs_unlock_phy(pptr); 6175 SCHEDULE_WORK(pwp, PMCS_WORK_SAS_HW_ACK); 6176 break; 6177 } 6178 6179 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 6180 PMCIN_SAW_HW_EVENT_ACK)); 6181 msg[1] = LE_32(pwrk->htag); 6182 msg[2] = LE_32(pptr->hw_event_ack); 6183 6184 mutex_exit(&pwrk->lock); 6185 pwrk->dtype = pptr->dtype; 6186 pptr->hw_event_ack = 0; 6187 COPY_MESSAGE(ptr, msg, 3); 6188 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6189 pmcs_unlock_phy(pptr); 6190 } 6191 } 6192 6193 /* 6194 * Load DMA 6195 */ 6196 int 6197 pmcs_dma_load(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint32_t *msg) 6198 { 6199 ddi_dma_cookie_t *sg; 6200 pmcs_dmachunk_t *tc; 6201 pmcs_dmasgl_t *sgl, *prior; 6202 int seg, tsc; 6203 uint64_t sgl_addr; 6204 6205 /* 6206 * If we have no data segments, we're done. 6207 */ 6208 if (CMD2PKT(sp)->pkt_numcookies == 0) { 6209 return (0); 6210 } 6211 6212 /* 6213 * Get the S/G list pointer. 6214 */ 6215 sg = CMD2PKT(sp)->pkt_cookies; 6216 6217 /* 6218 * If we only have one dma segment, we can directly address that 6219 * data within the Inbound message itself. 6220 */ 6221 if (CMD2PKT(sp)->pkt_numcookies == 1) { 6222 msg[12] = LE_32(DWORD0(sg->dmac_laddress)); 6223 msg[13] = LE_32(DWORD1(sg->dmac_laddress)); 6224 msg[14] = LE_32(sg->dmac_size); 6225 msg[15] = 0; 6226 return (0); 6227 } 6228 6229 /* 6230 * Otherwise, we'll need one or more external S/G list chunks. 6231 * Get the first one and its dma address into the Inbound message. 6232 */ 6233 mutex_enter(&pwp->dma_lock); 6234 tc = pwp->dma_freelist; 6235 if (tc == NULL) { 6236 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 6237 mutex_exit(&pwp->dma_lock); 6238 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6239 "%s: out of SG lists", __func__); 6240 return (-1); 6241 } 6242 pwp->dma_freelist = tc->nxt; 6243 mutex_exit(&pwp->dma_lock); 6244 6245 tc->nxt = NULL; 6246 sp->cmd_clist = tc; 6247 sgl = tc->chunks; 6248 (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ); 6249 sgl_addr = tc->addr; 6250 msg[12] = LE_32(DWORD0(sgl_addr)); 6251 msg[13] = LE_32(DWORD1(sgl_addr)); 6252 msg[14] = 0; 6253 msg[15] = LE_32(PMCS_DMASGL_EXTENSION); 6254 6255 prior = sgl; 6256 tsc = 0; 6257 6258 for (seg = 0; seg < CMD2PKT(sp)->pkt_numcookies; seg++) { 6259 /* 6260 * If the current segment count for this chunk is one less than 6261 * the number s/g lists per chunk and we have more than one seg 6262 * to go, we need another chunk. Get it, and make sure that the 6263 * tail end of the the previous chunk points the new chunk 6264 * (if remembering an offset can be called 'pointing to'). 6265 * 6266 * Note that we can store the offset into our command area that 6267 * represents the new chunk in the length field of the part 6268 * that points the PMC chip at the next chunk- the PMC chip 6269 * ignores this field when the EXTENSION bit is set. 6270 * 6271 * This is required for dma unloads later. 6272 */ 6273 if (tsc == (PMCS_SGL_NCHUNKS - 1) && 6274 seg < (CMD2PKT(sp)->pkt_numcookies - 1)) { 6275 mutex_enter(&pwp->dma_lock); 6276 tc = pwp->dma_freelist; 6277 if (tc == NULL) { 6278 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 6279 mutex_exit(&pwp->dma_lock); 6280 pmcs_dma_unload(pwp, sp); 6281 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6282 "%s: out of SG lists", __func__); 6283 return (-1); 6284 } 6285 pwp->dma_freelist = tc->nxt; 6286 tc->nxt = sp->cmd_clist; 6287 mutex_exit(&pwp->dma_lock); 6288 6289 sp->cmd_clist = tc; 6290 (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ); 6291 sgl = tc->chunks; 6292 sgl_addr = tc->addr; 6293 prior[PMCS_SGL_NCHUNKS-1].sglal = 6294 LE_32(DWORD0(sgl_addr)); 6295 prior[PMCS_SGL_NCHUNKS-1].sglah = 6296 LE_32(DWORD1(sgl_addr)); 6297 prior[PMCS_SGL_NCHUNKS-1].sglen = 0; 6298 prior[PMCS_SGL_NCHUNKS-1].flags = 6299 LE_32(PMCS_DMASGL_EXTENSION); 6300 prior = sgl; 6301 tsc = 0; 6302 } 6303 sgl[tsc].sglal = LE_32(DWORD0(sg->dmac_laddress)); 6304 sgl[tsc].sglah = LE_32(DWORD1(sg->dmac_laddress)); 6305 sgl[tsc].sglen = LE_32(sg->dmac_size); 6306 sgl[tsc++].flags = 0; 6307 sg++; 6308 } 6309 return (0); 6310 } 6311 6312 /* 6313 * Unload DMA 6314 */ 6315 void 6316 pmcs_dma_unload(pmcs_hw_t *pwp, pmcs_cmd_t *sp) 6317 { 6318 pmcs_dmachunk_t *cp; 6319 6320 mutex_enter(&pwp->dma_lock); 6321 while ((cp = sp->cmd_clist) != NULL) { 6322 sp->cmd_clist = cp->nxt; 6323 cp->nxt = pwp->dma_freelist; 6324 pwp->dma_freelist = cp; 6325 } 6326 mutex_exit(&pwp->dma_lock); 6327 } 6328 6329 /* 6330 * Take a chunk of consistent memory that has just been allocated and inserted 6331 * into the cip indices and prepare it for DMA chunk usage and add it to the 6332 * freelist. 6333 * 6334 * Called with dma_lock locked (except during attach when it's unnecessary) 6335 */ 6336 void 6337 pmcs_idma_chunks(pmcs_hw_t *pwp, pmcs_dmachunk_t *dcp, 6338 pmcs_chunk_t *pchunk, unsigned long lim) 6339 { 6340 unsigned long off, n; 6341 pmcs_dmachunk_t *np = dcp; 6342 pmcs_chunk_t *tmp_chunk; 6343 6344 if (pwp->dma_chunklist == NULL) { 6345 pwp->dma_chunklist = pchunk; 6346 } else { 6347 tmp_chunk = pwp->dma_chunklist; 6348 while (tmp_chunk->next) { 6349 tmp_chunk = tmp_chunk->next; 6350 } 6351 tmp_chunk->next = pchunk; 6352 } 6353 6354 /* 6355 * Install offsets into chunk lists. 6356 */ 6357 for (n = 0, off = 0; off < lim; off += PMCS_SGL_CHUNKSZ, n++) { 6358 np->chunks = (void *)&pchunk->addrp[off]; 6359 np->addr = pchunk->dma_addr + off; 6360 np->acc_handle = pchunk->acc_handle; 6361 np->dma_handle = pchunk->dma_handle; 6362 if ((off + PMCS_SGL_CHUNKSZ) < lim) { 6363 np = np->nxt; 6364 } 6365 } 6366 np->nxt = pwp->dma_freelist; 6367 pwp->dma_freelist = dcp; 6368 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6369 "added %lu DMA chunks ", n); 6370 } 6371 6372 /* 6373 * Change the value of the interrupt coalescing timer. This is done currently 6374 * only for I/O completions. If we're using the "auto clear" feature, it can 6375 * be turned back on when interrupt coalescing is turned off and must be 6376 * turned off when the coalescing timer is on. 6377 * NOTE: PMCS_MSIX_GENERAL and PMCS_OQ_IODONE are the same value. As long 6378 * as that's true, we don't need to distinguish between them. 6379 */ 6380 6381 void 6382 pmcs_set_intr_coal_timer(pmcs_hw_t *pwp, pmcs_coal_timer_adj_t adj) 6383 { 6384 if (adj == DECREASE_TIMER) { 6385 /* If the timer is already off, nothing to do. */ 6386 if (pwp->io_intr_coal.timer_on == B_FALSE) { 6387 return; 6388 } 6389 6390 pwp->io_intr_coal.intr_coal_timer -= PMCS_COAL_TIMER_GRAN; 6391 6392 if (pwp->io_intr_coal.intr_coal_timer == 0) { 6393 /* Disable the timer */ 6394 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 0); 6395 6396 if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) { 6397 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, 6398 pwp->odb_auto_clear); 6399 } 6400 6401 pwp->io_intr_coal.timer_on = B_FALSE; 6402 pwp->io_intr_coal.max_io_completions = B_FALSE; 6403 pwp->io_intr_coal.num_intrs = 0; 6404 pwp->io_intr_coal.int_cleared = B_FALSE; 6405 pwp->io_intr_coal.num_io_completions = 0; 6406 6407 DTRACE_PROBE1(pmcs__intr__coalesce__timer__off, 6408 pmcs_io_intr_coal_t *, &pwp->io_intr_coal); 6409 } else { 6410 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER, 6411 pwp->io_intr_coal.intr_coal_timer); 6412 } 6413 } else { 6414 /* 6415 * If the timer isn't on yet, do the setup for it now. 6416 */ 6417 if (pwp->io_intr_coal.timer_on == B_FALSE) { 6418 /* If auto clear is being used, turn it off. */ 6419 if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) { 6420 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, 6421 (pwp->odb_auto_clear & 6422 ~(1 << PMCS_MSIX_IODONE))); 6423 } 6424 6425 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 6426 (1 << PMCS_MSIX_IODONE)); 6427 pwp->io_intr_coal.timer_on = B_TRUE; 6428 pwp->io_intr_coal.intr_coal_timer = 6429 PMCS_COAL_TIMER_GRAN; 6430 6431 DTRACE_PROBE1(pmcs__intr__coalesce__timer__on, 6432 pmcs_io_intr_coal_t *, &pwp->io_intr_coal); 6433 } else { 6434 pwp->io_intr_coal.intr_coal_timer += 6435 PMCS_COAL_TIMER_GRAN; 6436 } 6437 6438 if (pwp->io_intr_coal.intr_coal_timer > PMCS_MAX_COAL_TIMER) { 6439 pwp->io_intr_coal.intr_coal_timer = PMCS_MAX_COAL_TIMER; 6440 } 6441 6442 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER, 6443 pwp->io_intr_coal.intr_coal_timer); 6444 } 6445 6446 /* 6447 * Adjust the interrupt threshold based on the current timer value 6448 */ 6449 pwp->io_intr_coal.intr_threshold = 6450 PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 / 6451 (pwp->io_intr_coal.intr_latency + 6452 (pwp->io_intr_coal.intr_coal_timer * 1000))); 6453 } 6454 6455 /* 6456 * Register Access functions 6457 */ 6458 uint32_t 6459 pmcs_rd_iqci(pmcs_hw_t *pwp, uint32_t qnum) 6460 { 6461 uint32_t iqci; 6462 6463 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 6464 DDI_SUCCESS) { 6465 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6466 "%s: ddi_dma_sync failed?", __func__); 6467 } 6468 6469 iqci = LE_32( 6470 ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2]); 6471 6472 return (iqci); 6473 } 6474 6475 uint32_t 6476 pmcs_rd_oqpi(pmcs_hw_t *pwp, uint32_t qnum) 6477 { 6478 uint32_t oqpi; 6479 6480 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 6481 DDI_SUCCESS) { 6482 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6483 "%s: ddi_dma_sync failed?", __func__); 6484 } 6485 6486 oqpi = LE_32( 6487 ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2]); 6488 6489 return (oqpi); 6490 } 6491 6492 uint32_t 6493 pmcs_rd_gsm_reg(pmcs_hw_t *pwp, uint32_t off) 6494 { 6495 uint32_t rv, newaxil, oldaxil; 6496 6497 newaxil = off & ~GSM_BASE_MASK; 6498 off &= GSM_BASE_MASK; 6499 mutex_enter(&pwp->axil_lock); 6500 oldaxil = ddi_get32(pwp->top_acc_handle, 6501 &pwp->top_regs[PMCS_AXI_TRANS >> 2]); 6502 ddi_put32(pwp->top_acc_handle, 6503 &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil); 6504 drv_usecwait(10); 6505 if (ddi_get32(pwp->top_acc_handle, 6506 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) { 6507 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6508 "AXIL register update failed"); 6509 } 6510 rv = ddi_get32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2]); 6511 ddi_put32(pwp->top_acc_handle, 6512 &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil); 6513 drv_usecwait(10); 6514 if (ddi_get32(pwp->top_acc_handle, 6515 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) { 6516 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6517 "AXIL register restore failed"); 6518 } 6519 mutex_exit(&pwp->axil_lock); 6520 return (rv); 6521 } 6522 6523 void 6524 pmcs_wr_gsm_reg(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6525 { 6526 uint32_t newaxil, oldaxil; 6527 6528 newaxil = off & ~GSM_BASE_MASK; 6529 off &= GSM_BASE_MASK; 6530 mutex_enter(&pwp->axil_lock); 6531 oldaxil = ddi_get32(pwp->top_acc_handle, 6532 &pwp->top_regs[PMCS_AXI_TRANS >> 2]); 6533 ddi_put32(pwp->top_acc_handle, 6534 &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil); 6535 drv_usecwait(10); 6536 if (ddi_get32(pwp->top_acc_handle, 6537 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) { 6538 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6539 "AXIL register update failed"); 6540 } 6541 ddi_put32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2], val); 6542 ddi_put32(pwp->top_acc_handle, 6543 &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil); 6544 drv_usecwait(10); 6545 if (ddi_get32(pwp->top_acc_handle, 6546 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) { 6547 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6548 "AXIL register restore failed"); 6549 } 6550 mutex_exit(&pwp->axil_lock); 6551 } 6552 6553 uint32_t 6554 pmcs_rd_topunit(pmcs_hw_t *pwp, uint32_t off) 6555 { 6556 switch (off) { 6557 case PMCS_SPC_RESET: 6558 case PMCS_SPC_BOOT_STRAP: 6559 case PMCS_SPC_DEVICE_ID: 6560 case PMCS_DEVICE_REVISION: 6561 off = pmcs_rd_gsm_reg(pwp, off); 6562 break; 6563 default: 6564 off = ddi_get32(pwp->top_acc_handle, 6565 &pwp->top_regs[off >> 2]); 6566 break; 6567 } 6568 return (off); 6569 } 6570 6571 void 6572 pmcs_wr_topunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6573 { 6574 switch (off) { 6575 case PMCS_SPC_RESET: 6576 case PMCS_DEVICE_REVISION: 6577 pmcs_wr_gsm_reg(pwp, off, val); 6578 break; 6579 default: 6580 ddi_put32(pwp->top_acc_handle, &pwp->top_regs[off >> 2], val); 6581 break; 6582 } 6583 } 6584 6585 uint32_t 6586 pmcs_rd_msgunit(pmcs_hw_t *pwp, uint32_t off) 6587 { 6588 return (ddi_get32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2])); 6589 } 6590 6591 uint32_t 6592 pmcs_rd_mpi_tbl(pmcs_hw_t *pwp, uint32_t off) 6593 { 6594 return (ddi_get32(pwp->mpi_acc_handle, 6595 &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2])); 6596 } 6597 6598 uint32_t 6599 pmcs_rd_gst_tbl(pmcs_hw_t *pwp, uint32_t off) 6600 { 6601 return (ddi_get32(pwp->mpi_acc_handle, 6602 &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2])); 6603 } 6604 6605 uint32_t 6606 pmcs_rd_iqc_tbl(pmcs_hw_t *pwp, uint32_t off) 6607 { 6608 return (ddi_get32(pwp->mpi_acc_handle, 6609 &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2])); 6610 } 6611 6612 uint32_t 6613 pmcs_rd_oqc_tbl(pmcs_hw_t *pwp, uint32_t off) 6614 { 6615 return (ddi_get32(pwp->mpi_acc_handle, 6616 &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2])); 6617 } 6618 6619 uint32_t 6620 pmcs_rd_iqpi(pmcs_hw_t *pwp, uint32_t qnum) 6621 { 6622 return (ddi_get32(pwp->mpi_acc_handle, 6623 &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2])); 6624 } 6625 6626 uint32_t 6627 pmcs_rd_oqci(pmcs_hw_t *pwp, uint32_t qnum) 6628 { 6629 return (ddi_get32(pwp->mpi_acc_handle, 6630 &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2])); 6631 } 6632 6633 void 6634 pmcs_wr_msgunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6635 { 6636 ddi_put32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2], val); 6637 } 6638 6639 void 6640 pmcs_wr_mpi_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6641 { 6642 ddi_put32(pwp->mpi_acc_handle, 6643 &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2], (val)); 6644 } 6645 6646 void 6647 pmcs_wr_gst_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6648 { 6649 ddi_put32(pwp->mpi_acc_handle, 6650 &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2], val); 6651 } 6652 6653 void 6654 pmcs_wr_iqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6655 { 6656 ddi_put32(pwp->mpi_acc_handle, 6657 &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2], val); 6658 } 6659 6660 void 6661 pmcs_wr_oqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6662 { 6663 ddi_put32(pwp->mpi_acc_handle, 6664 &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2], val); 6665 } 6666 6667 void 6668 pmcs_wr_iqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6669 { 6670 ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2] = val; 6671 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) != 6672 DDI_SUCCESS) { 6673 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6674 "%s: ddi_dma_sync failed?", __func__); 6675 } 6676 } 6677 6678 void 6679 pmcs_wr_iqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6680 { 6681 ddi_put32(pwp->mpi_acc_handle, 6682 &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2], val); 6683 } 6684 6685 void 6686 pmcs_wr_oqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6687 { 6688 ddi_put32(pwp->mpi_acc_handle, 6689 &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2], val); 6690 } 6691 6692 void 6693 pmcs_wr_oqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6694 { 6695 ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2] = val; 6696 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) != 6697 DDI_SUCCESS) { 6698 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6699 "%s: ddi_dma_sync failed?", __func__); 6700 } 6701 } 6702 6703 /* 6704 * Check the status value of an outbound IOMB and report anything bad 6705 */ 6706 6707 void 6708 pmcs_check_iomb_status(pmcs_hw_t *pwp, uint32_t *iomb) 6709 { 6710 uint16_t opcode; 6711 int offset; 6712 6713 if (iomb == NULL) { 6714 return; 6715 } 6716 6717 opcode = LE_32(iomb[0]) & 0xfff; 6718 6719 switch (opcode) { 6720 /* 6721 * The following have no status field, so ignore them 6722 */ 6723 case PMCOUT_ECHO: 6724 case PMCOUT_SAS_HW_EVENT: 6725 case PMCOUT_GET_DEVICE_HANDLE: 6726 case PMCOUT_SATA_EVENT: 6727 case PMCOUT_SSP_EVENT: 6728 case PMCOUT_DEVICE_HANDLE_ARRIVED: 6729 case PMCOUT_SMP_REQUEST_RECEIVED: 6730 case PMCOUT_GPIO: 6731 case PMCOUT_GPIO_EVENT: 6732 case PMCOUT_GET_TIME_STAMP: 6733 case PMCOUT_SKIP_ENTRIES: 6734 case PMCOUT_GET_NVMD_DATA: /* Actually lower 16 bits of word 3 */ 6735 case PMCOUT_SET_NVMD_DATA: /* but ignore - we don't use these */ 6736 case PMCOUT_DEVICE_HANDLE_REMOVED: 6737 case PMCOUT_SSP_REQUEST_RECEIVED: 6738 return; 6739 6740 case PMCOUT_GENERAL_EVENT: 6741 offset = 1; 6742 break; 6743 6744 case PMCOUT_SSP_COMPLETION: 6745 case PMCOUT_SMP_COMPLETION: 6746 case PMCOUT_DEVICE_REGISTRATION: 6747 case PMCOUT_DEREGISTER_DEVICE_HANDLE: 6748 case PMCOUT_SATA_COMPLETION: 6749 case PMCOUT_DEVICE_INFO: 6750 case PMCOUT_FW_FLASH_UPDATE: 6751 case PMCOUT_SSP_ABORT: 6752 case PMCOUT_SATA_ABORT: 6753 case PMCOUT_SAS_DIAG_MODE_START_END: 6754 case PMCOUT_SAS_HW_EVENT_ACK_ACK: 6755 case PMCOUT_SMP_ABORT: 6756 case PMCOUT_SET_DEVICE_STATE: 6757 case PMCOUT_GET_DEVICE_STATE: 6758 case PMCOUT_SET_DEVICE_INFO: 6759 offset = 2; 6760 break; 6761 6762 case PMCOUT_LOCAL_PHY_CONTROL: 6763 case PMCOUT_SAS_DIAG_EXECUTE: 6764 case PMCOUT_PORT_CONTROL: 6765 offset = 3; 6766 break; 6767 6768 case PMCOUT_GET_INFO: 6769 case PMCOUT_GET_VPD: 6770 case PMCOUT_SAS_ASSISTED_DISCOVERY_EVENT: 6771 case PMCOUT_SATA_ASSISTED_DISCOVERY_EVENT: 6772 case PMCOUT_SET_VPD: 6773 case PMCOUT_TWI: 6774 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6775 "Got response for deprecated opcode", iomb); 6776 return; 6777 6778 default: 6779 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6780 "Got response for unknown opcode", iomb); 6781 return; 6782 } 6783 6784 if (LE_32(iomb[offset]) != PMCOUT_STATUS_OK) { 6785 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6786 "bad status on TAG_TYPE_NONE command", iomb); 6787 } 6788 } 6789 6790 /* 6791 * Called with statlock held 6792 */ 6793 void 6794 pmcs_clear_xp(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 6795 { 6796 _NOTE(ARGUNUSED(pwp)); 6797 6798 ASSERT(mutex_owned(&xp->statlock)); 6799 6800 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: Device 0x%p is gone.", 6801 __func__, (void *)xp); 6802 6803 /* 6804 * Clear the dip now. This keeps pmcs_remove_device from attempting 6805 * to call us on the same device while we're still flushing queues. 6806 * The only side effect is we can no longer update SM-HBA properties, 6807 * but this device is going away anyway, so no matter. 6808 */ 6809 xp->dip = NULL; 6810 xp->smpd = NULL; 6811 xp->special_running = 0; 6812 xp->recovering = 0; 6813 xp->recover_wait = 0; 6814 xp->draining = 0; 6815 xp->new = 0; 6816 xp->assigned = 0; 6817 xp->dev_state = 0; 6818 xp->tagmap = 0; 6819 xp->dev_gone = 1; 6820 xp->event_recovery = 0; 6821 xp->dtype = NOTHING; 6822 xp->wq_recovery_tail = NULL; 6823 /* Don't clear xp->phy */ 6824 /* Don't clear xp->actv_cnt */ 6825 /* Don't clear xp->actv_pkts */ 6826 6827 /* 6828 * Flush all target queues 6829 */ 6830 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES); 6831 } 6832 6833 static int 6834 pmcs_smp_function_result(pmcs_hw_t *pwp, smp_response_frame_t *srf) 6835 { 6836 int result = srf->srf_result; 6837 6838 switch (result) { 6839 case SMP_RES_UNKNOWN_FUNCTION: 6840 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6841 "%s: SMP DISCOVER Response " 6842 "Function Result: Unknown SMP Function(0x%x)", 6843 __func__, result); 6844 break; 6845 case SMP_RES_FUNCTION_FAILED: 6846 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6847 "%s: SMP DISCOVER Response " 6848 "Function Result: SMP Function Failed(0x%x)", 6849 __func__, result); 6850 break; 6851 case SMP_RES_INVALID_REQUEST_FRAME_LENGTH: 6852 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6853 "%s: SMP DISCOVER Response " 6854 "Function Result: Invalid Request Frame Length(0x%x)", 6855 __func__, result); 6856 break; 6857 case SMP_RES_INCOMPLETE_DESCRIPTOR_LIST: 6858 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6859 "%s: SMP DISCOVER Response " 6860 "Function Result: Incomplete Descriptor List(0x%x)", 6861 __func__, result); 6862 break; 6863 case SMP_RES_PHY_DOES_NOT_EXIST: 6864 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6865 "%s: SMP DISCOVER Response " 6866 "Function Result: PHY does not exist(0x%x)", 6867 __func__, result); 6868 break; 6869 case SMP_RES_PHY_VACANT: 6870 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6871 "%s: SMP DISCOVER Response " 6872 "Function Result: PHY Vacant(0x%x)", 6873 __func__, result); 6874 break; 6875 default: 6876 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6877 "%s: SMP DISCOVER Response " 6878 "Function Result: (0x%x)", 6879 __func__, result); 6880 break; 6881 } 6882 6883 return (result); 6884 } 6885 6886 /* 6887 * Do all the repetitive stuff necessary to setup for DMA 6888 * 6889 * pwp: Used for dip 6890 * dma_attr: ddi_dma_attr_t to use for the mapping 6891 * acch: ddi_acc_handle_t to use for the mapping 6892 * dmah: ddi_dma_handle_t to use 6893 * length: Amount of memory for mapping 6894 * kvap: Pointer filled in with kernel virtual address on successful return 6895 * dma_addr: Pointer filled in with DMA address on successful return 6896 */ 6897 boolean_t 6898 pmcs_dma_setup(pmcs_hw_t *pwp, ddi_dma_attr_t *dma_attr, ddi_acc_handle_t *acch, 6899 ddi_dma_handle_t *dmah, size_t length, caddr_t *kvap, uint64_t *dma_addr) 6900 { 6901 dev_info_t *dip = pwp->dip; 6902 ddi_dma_cookie_t cookie; 6903 size_t real_length; 6904 uint_t ddma_flag = DDI_DMA_CONSISTENT; 6905 uint_t ddabh_flag = DDI_DMA_CONSISTENT | DDI_DMA_RDWR; 6906 uint_t cookie_cnt; 6907 ddi_device_acc_attr_t mattr = { 6908 DDI_DEVICE_ATTR_V0, 6909 DDI_NEVERSWAP_ACC, 6910 DDI_STRICTORDER_ACC, 6911 DDI_DEFAULT_ACC 6912 }; 6913 6914 *acch = NULL; 6915 *dmah = NULL; 6916 6917 if (ddi_dma_alloc_handle(dip, dma_attr, DDI_DMA_SLEEP, NULL, dmah) != 6918 DDI_SUCCESS) { 6919 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6920 "Failed to allocate DMA handle"); 6921 return (B_FALSE); 6922 } 6923 6924 if (ddi_dma_mem_alloc(*dmah, length, &mattr, ddma_flag, DDI_DMA_SLEEP, 6925 NULL, kvap, &real_length, acch) != DDI_SUCCESS) { 6926 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6927 "Failed to allocate DMA mem"); 6928 ddi_dma_free_handle(dmah); 6929 *dmah = NULL; 6930 return (B_FALSE); 6931 } 6932 6933 if (ddi_dma_addr_bind_handle(*dmah, NULL, *kvap, real_length, 6934 ddabh_flag, DDI_DMA_SLEEP, NULL, &cookie, &cookie_cnt) 6935 != DDI_DMA_MAPPED) { 6936 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Failed to bind DMA"); 6937 ddi_dma_free_handle(dmah); 6938 ddi_dma_mem_free(acch); 6939 *dmah = NULL; 6940 *acch = NULL; 6941 return (B_FALSE); 6942 } 6943 6944 if (cookie_cnt != 1) { 6945 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Multiple cookies"); 6946 if (ddi_dma_unbind_handle(*dmah) != DDI_SUCCESS) { 6947 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Condition " 6948 "failed at %s():%d", __func__, __LINE__); 6949 } 6950 ddi_dma_free_handle(dmah); 6951 ddi_dma_mem_free(acch); 6952 *dmah = NULL; 6953 *acch = NULL; 6954 return (B_FALSE); 6955 } 6956 6957 *dma_addr = cookie.dmac_laddress; 6958 6959 return (B_TRUE); 6960 } 6961 6962 /* 6963 * Flush requested queues for a particular target. Called with statlock held 6964 */ 6965 void 6966 pmcs_flush_target_queues(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt, uint8_t queues) 6967 { 6968 pmcs_cmd_t *sp, *sp_next; 6969 pmcwork_t *pwrk; 6970 6971 ASSERT(pwp != NULL); 6972 ASSERT(tgt != NULL); 6973 6974 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, tgt, 6975 "%s: Flushing queues (%d) for target 0x%p", __func__, 6976 queues, (void *)tgt); 6977 6978 /* 6979 * Commands on the wait queue (or the special queue below) don't have 6980 * work structures associated with them. 6981 */ 6982 if (queues & PMCS_TGT_WAIT_QUEUE) { 6983 mutex_enter(&tgt->wqlock); 6984 while ((sp = STAILQ_FIRST(&tgt->wq)) != NULL) { 6985 STAILQ_REMOVE(&tgt->wq, sp, pmcs_cmd, cmd_next); 6986 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, tgt, 6987 "%s: Removing cmd 0x%p from wq for target 0x%p", 6988 __func__, (void *)sp, (void *)tgt); 6989 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 6990 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 6991 mutex_exit(&tgt->wqlock); 6992 pmcs_dma_unload(pwp, sp); 6993 mutex_enter(&pwp->cq_lock); 6994 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 6995 mutex_exit(&pwp->cq_lock); 6996 mutex_enter(&tgt->wqlock); 6997 } 6998 mutex_exit(&tgt->wqlock); 6999 } 7000 7001 /* 7002 * Commands on the active queue will have work structures associated 7003 * with them. 7004 */ 7005 if (queues & PMCS_TGT_ACTIVE_QUEUE) { 7006 mutex_enter(&tgt->aqlock); 7007 sp = STAILQ_FIRST(&tgt->aq); 7008 while (sp) { 7009 sp_next = STAILQ_NEXT(sp, cmd_next); 7010 pwrk = pmcs_tag2wp(pwp, sp->cmd_tag); 7011 7012 /* 7013 * If we don't find a work structure, it's because 7014 * the command is already complete. If so, move on 7015 * to the next one. 7016 */ 7017 if (pwrk == NULL) { 7018 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7019 "%s: Not removing cmd 0x%p (htag 0x%x) " 7020 "from aq", __func__, (void *)sp, 7021 sp->cmd_tag); 7022 sp = sp_next; 7023 continue; 7024 } 7025 7026 STAILQ_REMOVE(&tgt->aq, sp, pmcs_cmd, cmd_next); 7027 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7028 "%s: Removing cmd 0x%p (htag 0x%x) from aq for " 7029 "target 0x%p", __func__, (void *)sp, sp->cmd_tag, 7030 (void *)tgt); 7031 mutex_exit(&tgt->aqlock); 7032 mutex_exit(&tgt->statlock); 7033 /* 7034 * Mark the work structure as dead and complete it 7035 */ 7036 pwrk->dead = 1; 7037 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 7038 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 7039 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 7040 pmcs_dma_unload(pwp, sp); 7041 mutex_enter(&pwp->cq_lock); 7042 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 7043 mutex_exit(&pwp->cq_lock); 7044 mutex_enter(&tgt->aqlock); 7045 mutex_enter(&tgt->statlock); 7046 sp = sp_next; 7047 } 7048 mutex_exit(&tgt->aqlock); 7049 } 7050 7051 if (queues & PMCS_TGT_SPECIAL_QUEUE) { 7052 while ((sp = STAILQ_FIRST(&tgt->sq)) != NULL) { 7053 STAILQ_REMOVE(&tgt->sq, sp, pmcs_cmd, cmd_next); 7054 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7055 "%s: Removing cmd 0x%p from sq for target 0x%p", 7056 __func__, (void *)sp, (void *)tgt); 7057 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 7058 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 7059 pmcs_dma_unload(pwp, sp); 7060 mutex_enter(&pwp->cq_lock); 7061 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 7062 mutex_exit(&pwp->cq_lock); 7063 } 7064 } 7065 } 7066 7067 void 7068 pmcs_complete_work_impl(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *iomb, 7069 size_t amt) 7070 { 7071 switch (PMCS_TAG_TYPE(pwrk->htag)) { 7072 case PMCS_TAG_TYPE_CBACK: 7073 { 7074 pmcs_cb_t callback = (pmcs_cb_t)pwrk->ptr; 7075 (*callback)(pwp, pwrk, iomb); 7076 break; 7077 } 7078 case PMCS_TAG_TYPE_WAIT: 7079 if (pwrk->arg && iomb && amt) { 7080 (void) memcpy(pwrk->arg, iomb, amt); 7081 } 7082 cv_signal(&pwrk->sleep_cv); 7083 mutex_exit(&pwrk->lock); 7084 break; 7085 case PMCS_TAG_TYPE_NONE: 7086 #ifdef DEBUG 7087 pmcs_check_iomb_status(pwp, iomb); 7088 #endif 7089 pmcs_pwork(pwp, pwrk); 7090 break; 7091 default: 7092 /* 7093 * We will leak a structure here if we don't know 7094 * what happened 7095 */ 7096 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 7097 "%s: Unknown PMCS_TAG_TYPE (%x)", 7098 __func__, PMCS_TAG_TYPE(pwrk->htag)); 7099 break; 7100 } 7101 } 7102 7103 /* 7104 * Determine if iport still has targets. During detach(9E), if SCSA is 7105 * successfull in its guarantee of tran_tgt_free(9E) before detach(9E), 7106 * this should always return B_FALSE. 7107 */ 7108 boolean_t 7109 pmcs_iport_has_targets(pmcs_hw_t *pwp, pmcs_iport_t *iport) 7110 { 7111 pmcs_xscsi_t *xp; 7112 int i; 7113 7114 mutex_enter(&pwp->lock); 7115 7116 if (!pwp->targets || !pwp->max_dev) { 7117 mutex_exit(&pwp->lock); 7118 return (B_FALSE); 7119 } 7120 7121 for (i = 0; i < pwp->max_dev; i++) { 7122 xp = pwp->targets[i]; 7123 if ((xp == NULL) || (xp->phy == NULL) || 7124 (xp->phy->iport != iport)) { 7125 continue; 7126 } 7127 7128 mutex_exit(&pwp->lock); 7129 return (B_TRUE); 7130 } 7131 7132 mutex_exit(&pwp->lock); 7133 return (B_FALSE); 7134 } 7135 7136 /* 7137 * Called with softstate lock held 7138 */ 7139 void 7140 pmcs_destroy_target(pmcs_xscsi_t *target) 7141 { 7142 pmcs_hw_t *pwp = target->pwp; 7143 pmcs_iport_t *iport; 7144 7145 ASSERT(pwp); 7146 ASSERT(mutex_owned(&pwp->lock)); 7147 7148 if (!target->ua) { 7149 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target, 7150 "%s: target %p iport address is null", 7151 __func__, (void *)target); 7152 } 7153 7154 iport = pmcs_get_iport_by_ua(pwp, target->ua); 7155 if (iport == NULL) { 7156 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target, 7157 "%s: no iport associated with tgt(0x%p)", 7158 __func__, (void *)target); 7159 return; 7160 } 7161 7162 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target, 7163 "%s: free target %p", __func__, (void *)target); 7164 if (target->ua) { 7165 strfree(target->ua); 7166 } 7167 7168 mutex_destroy(&target->wqlock); 7169 mutex_destroy(&target->aqlock); 7170 mutex_destroy(&target->statlock); 7171 cv_destroy(&target->reset_cv); 7172 cv_destroy(&target->abort_cv); 7173 ddi_soft_state_bystr_fini(&target->lun_sstate); 7174 ddi_soft_state_bystr_free(iport->tgt_sstate, target->unit_address); 7175 pmcs_rele_iport(iport); 7176 } 7177 7178 /* 7179 * pmcs_lock_phy_impl 7180 * 7181 * This function is what does the actual work for pmcs_lock_phy. It will 7182 * lock all PHYs from phyp down in a top-down fashion. 7183 * 7184 * Locking notes: 7185 * 1. level starts from 0 for the PHY ("parent") that's passed in. It is 7186 * not a reflection of the actual level of the PHY in the SAS topology. 7187 * 2. If parent is an expander, then parent is locked along with all its 7188 * descendents. 7189 * 3. Expander subsidiary PHYs at level 0 are not locked. It is the 7190 * responsibility of the caller to individually lock expander subsidiary PHYs 7191 * at level 0 if necessary. 7192 * 4. Siblings at level 0 are not traversed due to the possibility that we're 7193 * locking a PHY on the dead list. The siblings could be pointing to invalid 7194 * PHYs. We don't lock siblings at level 0 anyway. 7195 */ 7196 static void 7197 pmcs_lock_phy_impl(pmcs_phy_t *phyp, int level) 7198 { 7199 pmcs_phy_t *tphyp; 7200 7201 ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) || 7202 (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING)); 7203 7204 /* 7205 * Start walking the PHYs. 7206 */ 7207 tphyp = phyp; 7208 while (tphyp) { 7209 /* 7210 * If we're at the top level, only lock ourselves. For anything 7211 * at level > 0, traverse children while locking everything. 7212 */ 7213 if ((level > 0) || (tphyp == phyp)) { 7214 pmcs_prt(tphyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, tphyp, 7215 NULL, "%s: PHY 0x%p parent 0x%p path %s lvl %d", 7216 __func__, (void *)tphyp, (void *)tphyp->parent, 7217 tphyp->path, level); 7218 mutex_enter(&tphyp->phy_lock); 7219 7220 if (tphyp->children) { 7221 pmcs_lock_phy_impl(tphyp->children, level + 1); 7222 } 7223 } 7224 7225 if (level == 0) { 7226 return; 7227 } 7228 7229 tphyp = tphyp->sibling; 7230 } 7231 } 7232 7233 /* 7234 * pmcs_lock_phy 7235 * 7236 * This function is responsible for locking a PHY and all its descendents 7237 */ 7238 void 7239 pmcs_lock_phy(pmcs_phy_t *phyp) 7240 { 7241 #ifdef DEBUG 7242 char *callername = NULL; 7243 ulong_t off; 7244 7245 ASSERT(phyp != NULL); 7246 7247 callername = modgetsymname((uintptr_t)caller(), &off); 7248 7249 if (callername == NULL) { 7250 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7251 "%s: PHY 0x%p path %s caller: unknown", __func__, 7252 (void *)phyp, phyp->path); 7253 } else { 7254 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7255 "%s: PHY 0x%p path %s caller: %s+%lx", __func__, 7256 (void *)phyp, phyp->path, callername, off); 7257 } 7258 #else 7259 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7260 "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path); 7261 #endif 7262 pmcs_lock_phy_impl(phyp, 0); 7263 } 7264 7265 /* 7266 * pmcs_unlock_phy_impl 7267 * 7268 * Unlock all PHYs from phyp down in a bottom-up fashion. 7269 */ 7270 static void 7271 pmcs_unlock_phy_impl(pmcs_phy_t *phyp, int level) 7272 { 7273 pmcs_phy_t *phy_next; 7274 7275 ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) || 7276 (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING)); 7277 7278 /* 7279 * Recurse down to the bottom PHYs 7280 */ 7281 if (level == 0) { 7282 if (phyp->children) { 7283 pmcs_unlock_phy_impl(phyp->children, level + 1); 7284 } 7285 } else { 7286 phy_next = phyp; 7287 while (phy_next) { 7288 if (phy_next->children) { 7289 pmcs_unlock_phy_impl(phy_next->children, 7290 level + 1); 7291 } 7292 phy_next = phy_next->sibling; 7293 } 7294 } 7295 7296 /* 7297 * Iterate through PHYs unlocking all at level > 0 as well the top PHY 7298 */ 7299 phy_next = phyp; 7300 while (phy_next) { 7301 if ((level > 0) || (phy_next == phyp)) { 7302 pmcs_prt(phy_next->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, 7303 phy_next, NULL, 7304 "%s: PHY 0x%p parent 0x%p path %s lvl %d", 7305 __func__, (void *)phy_next, 7306 (void *)phy_next->parent, phy_next->path, level); 7307 mutex_exit(&phy_next->phy_lock); 7308 } 7309 7310 if (level == 0) { 7311 return; 7312 } 7313 7314 phy_next = phy_next->sibling; 7315 } 7316 } 7317 7318 /* 7319 * pmcs_unlock_phy 7320 * 7321 * Unlock a PHY and all its descendents 7322 */ 7323 void 7324 pmcs_unlock_phy(pmcs_phy_t *phyp) 7325 { 7326 #ifdef DEBUG 7327 char *callername = NULL; 7328 ulong_t off; 7329 7330 ASSERT(phyp != NULL); 7331 7332 callername = modgetsymname((uintptr_t)caller(), &off); 7333 7334 if (callername == NULL) { 7335 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7336 "%s: PHY 0x%p path %s caller: unknown", __func__, 7337 (void *)phyp, phyp->path); 7338 } else { 7339 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7340 "%s: PHY 0x%p path %s caller: %s+%lx", __func__, 7341 (void *)phyp, phyp->path, callername, off); 7342 } 7343 #else 7344 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7345 "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path); 7346 #endif 7347 pmcs_unlock_phy_impl(phyp, 0); 7348 } 7349 7350 /* 7351 * pmcs_get_root_phy 7352 * 7353 * For a given phy pointer return its root phy. 7354 * This function must only be called during discovery in order to ensure that 7355 * the chain of PHYs from phyp up to the root PHY doesn't change. 7356 */ 7357 pmcs_phy_t * 7358 pmcs_get_root_phy(pmcs_phy_t *phyp) 7359 { 7360 ASSERT(phyp); 7361 7362 while (phyp) { 7363 if (IS_ROOT_PHY(phyp)) { 7364 break; 7365 } 7366 phyp = phyp->parent; 7367 } 7368 7369 return (phyp); 7370 } 7371 7372 /* 7373 * pmcs_free_dma_chunklist 7374 * 7375 * Free DMA S/G chunk list 7376 */ 7377 void 7378 pmcs_free_dma_chunklist(pmcs_hw_t *pwp) 7379 { 7380 pmcs_chunk_t *pchunk; 7381 7382 while (pwp->dma_chunklist) { 7383 pchunk = pwp->dma_chunklist; 7384 pwp->dma_chunklist = pwp->dma_chunklist->next; 7385 if (pchunk->dma_handle) { 7386 if (ddi_dma_unbind_handle(pchunk->dma_handle) != 7387 DDI_SUCCESS) { 7388 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 7389 "Condition failed at %s():%d", 7390 __func__, __LINE__); 7391 } 7392 ddi_dma_free_handle(&pchunk->dma_handle); 7393 ddi_dma_mem_free(&pchunk->acc_handle); 7394 } 7395 kmem_free(pchunk, sizeof (pmcs_chunk_t)); 7396 } 7397 } 7398 7399 /*ARGSUSED2*/ 7400 int 7401 pmcs_phy_constructor(void *buf, void *arg, int kmflags) 7402 { 7403 pmcs_hw_t *pwp = (pmcs_hw_t *)arg; 7404 pmcs_phy_t *phyp = (pmcs_phy_t *)buf; 7405 7406 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER, 7407 DDI_INTR_PRI(pwp->intr_pri)); 7408 cv_init(&phyp->abort_all_cv, NULL, CV_DRIVER, NULL); 7409 return (0); 7410 } 7411 7412 /*ARGSUSED1*/ 7413 void 7414 pmcs_phy_destructor(void *buf, void *arg) 7415 { 7416 pmcs_phy_t *phyp = (pmcs_phy_t *)buf; 7417 7418 cv_destroy(&phyp->abort_all_cv); 7419 mutex_destroy(&phyp->phy_lock); 7420 } 7421 7422 /* 7423 * Free all PHYs from the kmem_cache starting at phyp as well as everything 7424 * on the dead_phys list. 7425 * 7426 * NOTE: This function does not free root PHYs as they are not allocated 7427 * from the kmem_cache. 7428 * 7429 * No PHY locks are acquired as this should only be called during DDI_DETACH 7430 * or soft reset (while pmcs interrupts are disabled). 7431 */ 7432 void 7433 pmcs_free_all_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 7434 { 7435 pmcs_phy_t *tphyp, *nphyp; 7436 7437 if (phyp == NULL) { 7438 return; 7439 } 7440 7441 tphyp = phyp; 7442 while (tphyp) { 7443 nphyp = tphyp->sibling; 7444 7445 if (tphyp->children) { 7446 pmcs_free_all_phys(pwp, tphyp->children); 7447 tphyp->children = NULL; 7448 } 7449 if (!IS_ROOT_PHY(tphyp)) { 7450 kmem_cache_free(pwp->phy_cache, tphyp); 7451 } 7452 7453 tphyp = nphyp; 7454 } 7455 7456 tphyp = pwp->dead_phys; 7457 while (tphyp) { 7458 nphyp = tphyp->sibling; 7459 kmem_cache_free(pwp->phy_cache, tphyp); 7460 tphyp = nphyp; 7461 } 7462 pwp->dead_phys = NULL; 7463 } 7464 7465 /* 7466 * Free a list of PHYs linked together by the sibling pointer back to the 7467 * kmem cache from whence they came. This function does not recurse, so the 7468 * caller must ensure there are no children. 7469 */ 7470 void 7471 pmcs_free_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 7472 { 7473 pmcs_phy_t *next_phy; 7474 7475 while (phyp) { 7476 next_phy = phyp->sibling; 7477 ASSERT(!mutex_owned(&phyp->phy_lock)); 7478 kmem_cache_free(pwp->phy_cache, phyp); 7479 phyp = next_phy; 7480 } 7481 } 7482 7483 /* 7484 * Make a copy of an existing PHY structure. This is used primarily in 7485 * discovery to compare the contents of an existing PHY with what gets 7486 * reported back by an expander. 7487 * 7488 * This function must not be called from any context where sleeping is 7489 * not possible. 7490 * 7491 * The new PHY is returned unlocked. 7492 */ 7493 static pmcs_phy_t * 7494 pmcs_clone_phy(pmcs_phy_t *orig_phy) 7495 { 7496 pmcs_phy_t *local; 7497 7498 local = kmem_cache_alloc(orig_phy->pwp->phy_cache, KM_SLEEP); 7499 7500 /* 7501 * Go ahead and just copy everything... 7502 */ 7503 *local = *orig_phy; 7504 7505 /* 7506 * But the following must be set appropriately for this copy 7507 */ 7508 local->sibling = NULL; 7509 local->children = NULL; 7510 mutex_init(&local->phy_lock, NULL, MUTEX_DRIVER, 7511 DDI_INTR_PRI(orig_phy->pwp->intr_pri)); 7512 7513 return (local); 7514 } 7515 7516 int 7517 pmcs_check_acc_handle(ddi_acc_handle_t handle) 7518 { 7519 ddi_fm_error_t de; 7520 7521 if (handle == NULL) { 7522 return (DDI_FAILURE); 7523 } 7524 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0); 7525 return (de.fme_status); 7526 } 7527 7528 int 7529 pmcs_check_dma_handle(ddi_dma_handle_t handle) 7530 { 7531 ddi_fm_error_t de; 7532 7533 if (handle == NULL) { 7534 return (DDI_FAILURE); 7535 } 7536 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0); 7537 return (de.fme_status); 7538 } 7539 7540 7541 void 7542 pmcs_fm_ereport(pmcs_hw_t *pwp, char *detail) 7543 { 7544 uint64_t ena; 7545 char buf[FM_MAX_CLASS]; 7546 7547 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 7548 ena = fm_ena_generate(0, FM_ENA_FMT1); 7549 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities)) { 7550 ddi_fm_ereport_post(pwp->dip, buf, ena, DDI_NOSLEEP, 7551 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 7552 } 7553 } 7554 7555 int 7556 pmcs_check_acc_dma_handle(pmcs_hw_t *pwp) 7557 { 7558 pmcs_chunk_t *pchunk; 7559 int i; 7560 7561 /* check all acc & dma handles allocated in attach */ 7562 if ((pmcs_check_acc_handle(pwp->pci_acc_handle) != DDI_SUCCESS) || 7563 (pmcs_check_acc_handle(pwp->msg_acc_handle) != DDI_SUCCESS) || 7564 (pmcs_check_acc_handle(pwp->top_acc_handle) != DDI_SUCCESS) || 7565 (pmcs_check_acc_handle(pwp->mpi_acc_handle) != DDI_SUCCESS) || 7566 (pmcs_check_acc_handle(pwp->gsm_acc_handle) != DDI_SUCCESS)) { 7567 goto check_failed; 7568 } 7569 7570 for (i = 0; i < PMCS_NIQ; i++) { 7571 if ((pmcs_check_dma_handle( 7572 pwp->iqp_handles[i]) != DDI_SUCCESS) || 7573 (pmcs_check_acc_handle( 7574 pwp->iqp_acchdls[i]) != DDI_SUCCESS)) { 7575 goto check_failed; 7576 } 7577 } 7578 7579 for (i = 0; i < PMCS_NOQ; i++) { 7580 if ((pmcs_check_dma_handle( 7581 pwp->oqp_handles[i]) != DDI_SUCCESS) || 7582 (pmcs_check_acc_handle( 7583 pwp->oqp_acchdls[i]) != DDI_SUCCESS)) { 7584 goto check_failed; 7585 } 7586 } 7587 7588 if ((pmcs_check_dma_handle(pwp->cip_handles) != DDI_SUCCESS) || 7589 (pmcs_check_acc_handle(pwp->cip_acchdls) != DDI_SUCCESS)) { 7590 goto check_failed; 7591 } 7592 7593 if (pwp->fwlog && 7594 ((pmcs_check_dma_handle(pwp->fwlog_hndl) != DDI_SUCCESS) || 7595 (pmcs_check_acc_handle(pwp->fwlog_acchdl) != DDI_SUCCESS))) { 7596 goto check_failed; 7597 } 7598 7599 if (pwp->regdump_hndl && pwp->regdump_acchdl && 7600 ((pmcs_check_dma_handle(pwp->regdump_hndl) != DDI_SUCCESS) || 7601 (pmcs_check_acc_handle(pwp->regdump_acchdl) 7602 != DDI_SUCCESS))) { 7603 goto check_failed; 7604 } 7605 7606 7607 pchunk = pwp->dma_chunklist; 7608 while (pchunk) { 7609 if ((pmcs_check_acc_handle(pchunk->acc_handle) 7610 != DDI_SUCCESS) || 7611 (pmcs_check_dma_handle(pchunk->dma_handle) 7612 != DDI_SUCCESS)) { 7613 goto check_failed; 7614 } 7615 pchunk = pchunk->next; 7616 } 7617 7618 return (0); 7619 7620 check_failed: 7621 7622 return (1); 7623 } 7624 7625 /* 7626 * pmcs_handle_dead_phys 7627 * 7628 * If the PHY has no outstanding work associated with it, remove it from 7629 * the dead PHY list and free it. 7630 * 7631 * If pwp->ds_err_recovering or pwp->configuring is set, don't run. 7632 * This keeps routines that need to submit work to the chip from having to 7633 * hold PHY locks to ensure that PHYs don't disappear while they do their work. 7634 */ 7635 void 7636 pmcs_handle_dead_phys(pmcs_hw_t *pwp) 7637 { 7638 pmcs_phy_t *phyp, *nphyp, *pphyp; 7639 7640 mutex_enter(&pwp->lock); 7641 mutex_enter(&pwp->config_lock); 7642 7643 if (pwp->configuring | pwp->ds_err_recovering) { 7644 mutex_exit(&pwp->config_lock); 7645 mutex_exit(&pwp->lock); 7646 return; 7647 } 7648 7649 /* 7650 * Check every PHY in the dead PHY list 7651 */ 7652 mutex_enter(&pwp->dead_phylist_lock); 7653 phyp = pwp->dead_phys; 7654 pphyp = NULL; /* Set previous PHY to NULL */ 7655 7656 while (phyp != NULL) { 7657 pmcs_lock_phy(phyp); 7658 ASSERT(phyp->dead); 7659 7660 nphyp = phyp->dead_next; 7661 7662 /* 7663 * Check for outstanding work 7664 */ 7665 if (phyp->ref_count > 0) { 7666 pmcs_unlock_phy(phyp); 7667 pphyp = phyp; /* This PHY becomes "previous" */ 7668 } else if (phyp->target) { 7669 pmcs_unlock_phy(phyp); 7670 pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, phyp->target, 7671 "%s: Not freeing PHY 0x%p: target 0x%p is not free", 7672 __func__, (void *)phyp, (void *)phyp->target); 7673 pphyp = phyp; 7674 } else { 7675 /* 7676 * No outstanding work or target references. Remove it 7677 * from the list and free it 7678 */ 7679 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 7680 "%s: Freeing inactive dead PHY 0x%p @ %s " 7681 "target = 0x%p", __func__, (void *)phyp, 7682 phyp->path, (void *)phyp->target); 7683 /* 7684 * If pphyp is NULL, then phyp was the head of the list, 7685 * so just reset the head to nphyp. Otherwise, the 7686 * previous PHY will now point to nphyp (the next PHY) 7687 */ 7688 if (pphyp == NULL) { 7689 pwp->dead_phys = nphyp; 7690 } else { 7691 pphyp->dead_next = nphyp; 7692 } 7693 /* 7694 * If the target still points to this PHY, remove 7695 * that linkage now. 7696 */ 7697 if (phyp->target) { 7698 mutex_enter(&phyp->target->statlock); 7699 if (phyp->target->phy == phyp) { 7700 phyp->target->phy = NULL; 7701 } 7702 mutex_exit(&phyp->target->statlock); 7703 } 7704 pmcs_unlock_phy(phyp); 7705 kmem_cache_free(pwp->phy_cache, phyp); 7706 } 7707 7708 phyp = nphyp; 7709 } 7710 7711 mutex_exit(&pwp->dead_phylist_lock); 7712 mutex_exit(&pwp->config_lock); 7713 mutex_exit(&pwp->lock); 7714 } 7715 7716 void 7717 pmcs_inc_phy_ref_count(pmcs_phy_t *phyp) 7718 { 7719 atomic_inc_32(&phyp->ref_count); 7720 } 7721 7722 void 7723 pmcs_dec_phy_ref_count(pmcs_phy_t *phyp) 7724 { 7725 ASSERT(phyp->ref_count != 0); 7726 atomic_dec_32(&phyp->ref_count); 7727 } 7728 7729 /* 7730 * pmcs_reap_dead_phy 7731 * 7732 * This function is called from pmcs_new_tport when we have a PHY 7733 * without a target pointer. It's possible in that case that this PHY 7734 * may have a "brother" on the dead_phys list. That is, it may be the same as 7735 * this one but with a different root PHY number (e.g. pp05 vs. pp04). If 7736 * that's the case, update the dead PHY and this new PHY. If that's not the 7737 * case, we should get a tran_tgt_init on this after it's reported to SCSA. 7738 * 7739 * Called with PHY locked. 7740 */ 7741 static void 7742 pmcs_reap_dead_phy(pmcs_phy_t *phyp) 7743 { 7744 pmcs_hw_t *pwp = phyp->pwp; 7745 pmcs_phy_t *ctmp; 7746 pmcs_iport_t *iport_cmp; 7747 7748 ASSERT(mutex_owned(&phyp->phy_lock)); 7749 7750 /* 7751 * Check the dead PHYs list 7752 */ 7753 mutex_enter(&pwp->dead_phylist_lock); 7754 ctmp = pwp->dead_phys; 7755 while (ctmp) { 7756 /* 7757 * If the iport is NULL, compare against last_iport. 7758 */ 7759 if (ctmp->iport) { 7760 iport_cmp = ctmp->iport; 7761 } else { 7762 iport_cmp = ctmp->last_iport; 7763 } 7764 7765 if ((iport_cmp != phyp->iport) || 7766 (memcmp((void *)&ctmp->sas_address[0], 7767 (void *)&phyp->sas_address[0], 8))) { 7768 ctmp = ctmp->dead_next; 7769 continue; 7770 } 7771 7772 /* 7773 * Same SAS address on same iport. Now check to see if 7774 * the PHY path is the same with the possible exception 7775 * of the root PHY number. 7776 * The "5" is the string length of "pp00." 7777 */ 7778 if ((strnlen(phyp->path, 5) >= 5) && 7779 (strnlen(ctmp->path, 5) >= 5)) { 7780 if (memcmp((void *)&phyp->path[5], 7781 (void *)&ctmp->path[5], 7782 strnlen(phyp->path, 32) - 5) == 0) { 7783 break; 7784 } 7785 } 7786 7787 ctmp = ctmp->dead_next; 7788 } 7789 mutex_exit(&pwp->dead_phylist_lock); 7790 7791 /* 7792 * Found a match. Remove the target linkage and drop the 7793 * ref count on the old PHY. Then, increment the ref count 7794 * on the new PHY to compensate. 7795 */ 7796 if (ctmp) { 7797 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 7798 "%s: Found match in dead PHY list (0x%p) for new PHY %s", 7799 __func__, (void *)ctmp, phyp->path); 7800 /* 7801 * If there is a pointer to the target in the dead PHY, move 7802 * all reference counts to the new PHY. 7803 */ 7804 if (ctmp->target) { 7805 mutex_enter(&ctmp->target->statlock); 7806 phyp->target = ctmp->target; 7807 7808 while (ctmp->ref_count != 0) { 7809 pmcs_inc_phy_ref_count(phyp); 7810 pmcs_dec_phy_ref_count(ctmp); 7811 } 7812 /* 7813 * Update the target's linkage as well 7814 */ 7815 phyp->target->phy = phyp; 7816 phyp->target->dtype = phyp->dtype; 7817 ctmp->target = NULL; 7818 mutex_exit(&phyp->target->statlock); 7819 } 7820 } 7821 } 7822 7823 /* 7824 * Called with iport lock held 7825 */ 7826 void 7827 pmcs_add_phy_to_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp) 7828 { 7829 ASSERT(mutex_owned(&iport->lock)); 7830 ASSERT(phyp); 7831 ASSERT(!list_link_active(&phyp->list_node)); 7832 iport->nphy++; 7833 list_insert_tail(&iport->phys, phyp); 7834 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 7835 &iport->nphy); 7836 mutex_enter(&iport->refcnt_lock); 7837 iport->refcnt++; 7838 mutex_exit(&iport->refcnt_lock); 7839 } 7840 7841 /* 7842 * Called with the iport lock held 7843 */ 7844 void 7845 pmcs_remove_phy_from_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp) 7846 { 7847 pmcs_phy_t *pptr, *next_pptr; 7848 7849 ASSERT(mutex_owned(&iport->lock)); 7850 7851 /* 7852 * If phyp is NULL, remove all PHYs from the iport 7853 */ 7854 if (phyp == NULL) { 7855 for (pptr = list_head(&iport->phys); pptr != NULL; 7856 pptr = next_pptr) { 7857 next_pptr = list_next(&iport->phys, pptr); 7858 mutex_enter(&pptr->phy_lock); 7859 pptr->iport = NULL; 7860 pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp, 7861 pptr->tgt_port_pm_tmp, B_FALSE); 7862 mutex_exit(&pptr->phy_lock); 7863 pmcs_rele_iport(iport); 7864 list_remove(&iport->phys, pptr); 7865 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, 7866 PMCS_NUM_PHYS, &iport->nphy); 7867 } 7868 iport->nphy = 0; 7869 return; 7870 } 7871 7872 ASSERT(phyp); 7873 ASSERT(iport->nphy > 0); 7874 ASSERT(list_link_active(&phyp->list_node)); 7875 iport->nphy--; 7876 list_remove(&iport->phys, phyp); 7877 pmcs_update_phy_pm_props(phyp, phyp->att_port_pm_tmp, 7878 phyp->tgt_port_pm_tmp, B_FALSE); 7879 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 7880 &iport->nphy); 7881 pmcs_rele_iport(iport); 7882 } 7883 7884 /* 7885 * This function checks to see if the target pointed to by phyp is still 7886 * correct. This is done by comparing the target's unit address with the 7887 * SAS address in phyp. 7888 * 7889 * Called with PHY locked and target statlock held 7890 */ 7891 static boolean_t 7892 pmcs_phy_target_match(pmcs_phy_t *phyp) 7893 { 7894 uint64_t wwn; 7895 char unit_address[PMCS_MAX_UA_SIZE]; 7896 boolean_t rval = B_FALSE; 7897 7898 ASSERT(phyp); 7899 ASSERT(phyp->target); 7900 ASSERT(mutex_owned(&phyp->phy_lock)); 7901 ASSERT(mutex_owned(&phyp->target->statlock)); 7902 7903 wwn = pmcs_barray2wwn(phyp->sas_address); 7904 (void) scsi_wwn_to_wwnstr(wwn, 1, unit_address); 7905 7906 if (memcmp((void *)unit_address, (void *)phyp->target->unit_address, 7907 strnlen(phyp->target->unit_address, PMCS_MAX_UA_SIZE)) == 0) { 7908 rval = B_TRUE; 7909 } 7910 7911 return (rval); 7912 } 7913 /* 7914 * Commands used to serialize SMP requests. 7915 * 7916 * The SPC only allows 2 SMP commands per SMP target: 1 cmd pending and 1 cmd 7917 * queued for the same SMP target. If a third SMP cmd is sent to the SPC for an 7918 * SMP target that already has a SMP cmd pending and one queued, then the 7919 * SPC responds with the ERROR_INTERNAL_SMP_RESOURCE response. 7920 * 7921 * Additionally, the SPC has an 8 entry deep cmd queue and the number of SMP 7922 * cmds that can be queued is controlled by the PORT_CONTROL IOMB. The 7923 * SPC default is 1 SMP command/port (iport). These 2 queued SMP cmds would 7924 * have to be for different SMP targets. The INTERNAL_SMP_RESOURCE error will 7925 * also be returned if a 2nd SMP cmd is sent to the controller when there is 7926 * already 1 SMP cmd queued for that port or if a 3rd SMP cmd is sent to the 7927 * queue if there are already 2 queued SMP cmds. 7928 */ 7929 void 7930 pmcs_smp_acquire(pmcs_iport_t *iport) 7931 { 7932 if (iport == NULL) { 7933 return; 7934 } 7935 7936 mutex_enter(&iport->smp_lock); 7937 while (iport->smp_active) { 7938 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 7939 "%s: SMP is active on thread 0x%p, waiting", __func__, 7940 (void *)iport->smp_active_thread); 7941 cv_wait(&iport->smp_cv, &iport->smp_lock); 7942 } 7943 iport->smp_active = B_TRUE; 7944 iport->smp_active_thread = curthread; 7945 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL, 7946 "%s: SMP acquired by thread 0x%p", __func__, 7947 (void *)iport->smp_active_thread); 7948 mutex_exit(&iport->smp_lock); 7949 } 7950 7951 void 7952 pmcs_smp_release(pmcs_iport_t *iport) 7953 { 7954 if (iport == NULL) { 7955 return; 7956 } 7957 7958 mutex_enter(&iport->smp_lock); 7959 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL, 7960 "%s: SMP released by thread 0x%p", __func__, (void *)curthread); 7961 iport->smp_active = B_FALSE; 7962 iport->smp_active_thread = NULL; 7963 cv_signal(&iport->smp_cv); 7964 mutex_exit(&iport->smp_lock); 7965 } 7966 7967 /* 7968 * Update a PHY's attached-port-pm and target-port-pm properties 7969 * 7970 * phyp: PHY whose properties are to be updated 7971 * 7972 * att_bv: Bit value of the attached-port-pm property to be updated in the 7973 * 64-bit holding area for the PHY. 7974 * 7975 * tgt_bv: Bit value of the target-port-pm property to update in the 64-bit 7976 * holding area for the PHY. 7977 * 7978 * prop_add_val: If TRUE, we're adding bits into the property value. 7979 * Otherwise, we're taking them out. Either way, the properties for this 7980 * PHY will be updated. 7981 */ 7982 void 7983 pmcs_update_phy_pm_props(pmcs_phy_t *phyp, uint64_t att_bv, uint64_t tgt_bv, 7984 boolean_t prop_add_val) 7985 { 7986 if (prop_add_val) { 7987 /* 7988 * If the values are currently 0, then we're setting the 7989 * phymask for just this PHY as well. 7990 */ 7991 if (phyp->att_port_pm_tmp == 0) { 7992 phyp->att_port_pm = att_bv; 7993 phyp->tgt_port_pm = tgt_bv; 7994 } 7995 phyp->att_port_pm_tmp |= att_bv; 7996 phyp->tgt_port_pm_tmp |= tgt_bv; 7997 (void) snprintf(phyp->att_port_pm_str, PMCS_PM_MAX_NAMELEN, 7998 "%"PRIx64, phyp->att_port_pm_tmp); 7999 (void) snprintf(phyp->tgt_port_pm_str, PMCS_PM_MAX_NAMELEN, 8000 "%"PRIx64, phyp->tgt_port_pm_tmp); 8001 } else { 8002 phyp->att_port_pm_tmp &= ~att_bv; 8003 phyp->tgt_port_pm_tmp &= ~tgt_bv; 8004 if (phyp->att_port_pm_tmp) { 8005 (void) snprintf(phyp->att_port_pm_str, 8006 PMCS_PM_MAX_NAMELEN, "%"PRIx64, 8007 phyp->att_port_pm_tmp); 8008 } else { 8009 phyp->att_port_pm_str[0] = '\0'; 8010 phyp->att_port_pm = 0; 8011 } 8012 if (phyp->tgt_port_pm_tmp) { 8013 (void) snprintf(phyp->tgt_port_pm_str, 8014 PMCS_PM_MAX_NAMELEN, "%"PRIx64, 8015 phyp->tgt_port_pm_tmp); 8016 } else { 8017 phyp->tgt_port_pm_str[0] = '\0'; 8018 phyp->tgt_port_pm = 0; 8019 } 8020 } 8021 8022 if (phyp->target == NULL) { 8023 return; 8024 } 8025 8026 mutex_enter(&phyp->target->statlock); 8027 if (!list_is_empty(&phyp->target->lun_list)) { 8028 pmcs_lun_t *lunp; 8029 8030 lunp = list_head(&phyp->target->lun_list); 8031 while (lunp) { 8032 (void) scsi_device_prop_update_string(lunp->sd, 8033 SCSI_DEVICE_PROP_PATH, 8034 SCSI_ADDR_PROP_ATTACHED_PORT_PM, 8035 phyp->att_port_pm_str); 8036 (void) scsi_device_prop_update_string(lunp->sd, 8037 SCSI_DEVICE_PROP_PATH, 8038 SCSI_ADDR_PROP_TARGET_PORT_PM, 8039 phyp->tgt_port_pm_str); 8040 lunp = list_next(&phyp->target->lun_list, lunp); 8041 } 8042 } else if (phyp->target->smpd) { 8043 (void) smp_device_prop_update_string(phyp->target->smpd, 8044 SCSI_ADDR_PROP_ATTACHED_PORT_PM, 8045 phyp->att_port_pm_str); 8046 (void) smp_device_prop_update_string(phyp->target->smpd, 8047 SCSI_ADDR_PROP_TARGET_PORT_PM, 8048 phyp->tgt_port_pm_str); 8049 } 8050 mutex_exit(&phyp->target->statlock); 8051 } 8052 8053 /* ARGSUSED */ 8054 void 8055 pmcs_deregister_device_work(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 8056 { 8057 pmcs_phy_t *pptr; 8058 8059 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 8060 pmcs_lock_phy(pptr); 8061 if (pptr->deregister_wait) { 8062 pmcs_deregister_device(pwp, pptr); 8063 } 8064 pmcs_unlock_phy(pptr); 8065 } 8066 } 8067 8068 /* 8069 * pmcs_iport_active 8070 * 8071 * Mark this iport as active. Called with the iport lock held. 8072 */ 8073 static void 8074 pmcs_iport_active(pmcs_iport_t *iport) 8075 { 8076 ASSERT(mutex_owned(&iport->lock)); 8077 8078 iport->ua_state = UA_ACTIVE; 8079 iport->smp_active = B_FALSE; 8080 iport->smp_active_thread = NULL; 8081 } 8082 8083 /* ARGSUSED */ 8084 static void 8085 pmcs_tgtmap_activate_cb(void *tgtmap_priv, char *tgt_addr, 8086 scsi_tgtmap_tgt_type_t tgt_type, void **tgt_privp) 8087 { 8088 pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv; 8089 pmcs_hw_t *pwp = iport->pwp; 8090 pmcs_xscsi_t *target; 8091 8092 /* 8093 * Look up the target. If there is one, and it doesn't have a PHY 8094 * pointer, re-establish that linkage here. 8095 */ 8096 mutex_enter(&pwp->lock); 8097 target = pmcs_get_target(iport, tgt_addr, B_FALSE); 8098 mutex_exit(&pwp->lock); 8099 8100 /* 8101 * If we got a target, it will now have a PHY pointer and the PHY 8102 * will point to the target. The PHY will be locked, so we'll need 8103 * to unlock it. 8104 */ 8105 if (target) { 8106 pmcs_unlock_phy(target->phy); 8107 } 8108 8109 /* 8110 * Update config_restart_time so we don't try to restart discovery 8111 * while enumeration is still in progress. 8112 */ 8113 mutex_enter(&pwp->config_lock); 8114 pwp->config_restart_time = ddi_get_lbolt() + 8115 drv_usectohz(PMCS_REDISCOVERY_DELAY); 8116 mutex_exit(&pwp->config_lock); 8117 } 8118 8119 /* ARGSUSED */ 8120 static boolean_t 8121 pmcs_tgtmap_deactivate_cb(void *tgtmap_priv, char *tgt_addr, 8122 scsi_tgtmap_tgt_type_t tgt_type, void *tgt_priv, 8123 scsi_tgtmap_deact_rsn_t tgt_deact_rsn) 8124 { 8125 pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv; 8126 pmcs_phy_t *phyp; 8127 boolean_t rediscover = B_FALSE; 8128 8129 ASSERT(iport); 8130 8131 phyp = pmcs_find_phy_by_sas_address(iport->pwp, iport, NULL, tgt_addr); 8132 if (phyp == NULL) { 8133 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 8134 "%s: Couldn't find PHY at %s", __func__, tgt_addr); 8135 return (rediscover); 8136 } 8137 /* phyp is locked */ 8138 8139 if (!phyp->reenumerate && phyp->configured) { 8140 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, phyp->target, 8141 "%s: PHY @ %s is configured... re-enumerate", __func__, 8142 tgt_addr); 8143 phyp->reenumerate = 1; 8144 } 8145 8146 /* 8147 * Check to see if reenumerate is set, and if so, if we've reached our 8148 * maximum number of retries. 8149 */ 8150 if (phyp->reenumerate) { 8151 if (phyp->enum_attempts == PMCS_MAX_REENUMERATE) { 8152 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, 8153 phyp->target, 8154 "%s: No more enumeration attempts for %s", __func__, 8155 tgt_addr); 8156 } else { 8157 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, 8158 phyp->target, "%s: Re-attempt enumeration for %s", 8159 __func__, tgt_addr); 8160 ++phyp->enum_attempts; 8161 rediscover = B_TRUE; 8162 } 8163 8164 phyp->reenumerate = 0; 8165 } 8166 8167 pmcs_unlock_phy(phyp); 8168 8169 mutex_enter(&iport->pwp->config_lock); 8170 iport->pwp->config_restart_time = ddi_get_lbolt() + 8171 drv_usectohz(PMCS_REDISCOVERY_DELAY); 8172 if (rediscover) { 8173 iport->pwp->config_restart = B_TRUE; 8174 } else if (iport->pwp->config_restart == B_TRUE) { 8175 /* 8176 * If we aren't asking for rediscovery because of this PHY, 8177 * check to see if we're already asking for it on behalf of 8178 * some other PHY. If so, we'll want to return TRUE, so reset 8179 * "rediscover" here. 8180 */ 8181 rediscover = B_TRUE; 8182 } 8183 8184 mutex_exit(&iport->pwp->config_lock); 8185 8186 return (rediscover); 8187 } 8188 8189 void 8190 pmcs_status_disposition(pmcs_phy_t *phyp, uint32_t status) 8191 { 8192 ASSERT(phyp); 8193 ASSERT(!mutex_owned(&phyp->phy_lock)); 8194 8195 if (phyp == NULL) { 8196 return; 8197 } 8198 8199 pmcs_lock_phy(phyp); 8200 8201 /* 8202 * XXX: Do we need to call this function from an SSP_EVENT? 8203 */ 8204 8205 switch (status) { 8206 case PMCOUT_STATUS_NO_DEVICE: 8207 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 8208 case PMCOUT_STATUS_XFER_ERR_BREAK: 8209 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 8210 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 8211 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 8212 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 8213 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 8214 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 8215 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 8216 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 8217 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 8218 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 8219 case PMCOUT_STATUS_XFER_ERROR_RX_FRAME: 8220 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 8221 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 8222 case PMCOUT_STATUS_IO_PORT_IN_RESET: 8223 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 8224 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 8225 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 8226 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 8227 "%s: status = 0x%x for " SAS_ADDR_FMT ", reenumerate", 8228 __func__, status, SAS_ADDR_PRT(phyp->sas_address)); 8229 phyp->reenumerate = 1; 8230 break; 8231 8232 default: 8233 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 8234 "%s: status = 0x%x for " SAS_ADDR_FMT ", no reenumeration", 8235 __func__, status, SAS_ADDR_PRT(phyp->sas_address)); 8236 break; 8237 } 8238 8239 pmcs_unlock_phy(phyp); 8240 } 8241 8242 /* 8243 * Add the list of PHYs pointed to by phyp to the dead_phys_list 8244 * 8245 * Called with all PHYs in the list locked 8246 */ 8247 static void 8248 pmcs_add_dead_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 8249 { 8250 mutex_enter(&pwp->dead_phylist_lock); 8251 while (phyp) { 8252 pmcs_phy_t *nxt = phyp->sibling; 8253 ASSERT(phyp->dead); 8254 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL, 8255 "%s: dead PHY 0x%p (%s) (ref_count %d)", __func__, 8256 (void *)phyp, phyp->path, phyp->ref_count); 8257 /* 8258 * Put this PHY on the dead PHY list for the watchdog to 8259 * clean up after any outstanding work has completed. 8260 */ 8261 phyp->dead_next = pwp->dead_phys; 8262 pwp->dead_phys = phyp; 8263 pmcs_unlock_phy(phyp); 8264 phyp = nxt; 8265 } 8266 mutex_exit(&pwp->dead_phylist_lock); 8267 } 8268