1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at 9 * http://www.opensource.org/licenses/cddl1.txt. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2004-2012 Emulex. All rights reserved. 24 * Use is subject to license terms. 25 * Copyright 2020 RackTop Systems, Inc. 26 */ 27 28 #include <emlxs.h> 29 30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */ 31 EMLXS_MSG_DEF(EMLXS_SLI3_C); 32 33 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq); 34 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba); 35 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no, 36 uint32_t ha_copy); 37 #ifdef SFCT_SUPPORT 38 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp); 39 #endif /* SFCT_SUPPORT */ 40 41 static uint32_t emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp); 42 43 static uint32_t emlxs_disable_traffic_cop = 1; 44 45 static int emlxs_sli3_map_hdw(emlxs_hba_t *hba); 46 47 static void emlxs_sli3_unmap_hdw(emlxs_hba_t *hba); 48 49 static int32_t emlxs_sli3_online(emlxs_hba_t *hba); 50 51 static void emlxs_sli3_offline(emlxs_hba_t *hba, 52 uint32_t reset_requested); 53 54 static uint32_t emlxs_sli3_hba_reset(emlxs_hba_t *hba, 55 uint32_t restart, uint32_t skip_post, 56 uint32_t quiesce); 57 58 static void emlxs_sli3_hba_kill(emlxs_hba_t *hba); 59 static void emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba); 60 static uint32_t emlxs_sli3_hba_init(emlxs_hba_t *hba); 61 62 static uint32_t emlxs_sli2_bde_setup(emlxs_port_t *port, 63 emlxs_buf_t *sbp); 64 static uint32_t emlxs_sli3_bde_setup(emlxs_port_t *port, 65 emlxs_buf_t *sbp); 66 static uint32_t emlxs_sli2_fct_bde_setup(emlxs_port_t *port, 67 emlxs_buf_t *sbp); 68 static uint32_t emlxs_sli3_fct_bde_setup(emlxs_port_t *port, 69 emlxs_buf_t *sbp); 70 71 72 static void emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, 73 CHANNEL *rp, IOCBQ *iocb_cmd); 74 75 76 static uint32_t emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, 77 MAILBOXQ *mbq, int32_t flg, 78 uint32_t tmo); 79 80 81 #ifdef SFCT_SUPPORT 82 static uint32_t emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, 83 emlxs_buf_t *cmd_sbp, int channel); 84 85 #endif /* SFCT_SUPPORT */ 86 87 static uint32_t emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, 88 emlxs_buf_t *sbp, int ring); 89 90 static uint32_t emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, 91 emlxs_buf_t *sbp); 92 93 static uint32_t emlxs_sli3_prep_els_iocb(emlxs_port_t *port, 94 emlxs_buf_t *sbp); 95 96 97 static uint32_t emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, 98 emlxs_buf_t *sbp); 99 100 101 static void emlxs_sli3_poll_intr(emlxs_hba_t *hba); 102 103 static int32_t emlxs_sli3_intx_intr(char *arg); 104 #ifdef MSI_SUPPORT 105 static uint32_t emlxs_sli3_msi_intr(char *arg1, char *arg2); 106 #endif /* MSI_SUPPORT */ 107 108 static void emlxs_sli3_enable_intr(emlxs_hba_t *hba); 109 110 static void emlxs_sli3_disable_intr(emlxs_hba_t *hba, 111 uint32_t att); 112 113 114 static void emlxs_handle_ff_error(emlxs_hba_t *hba); 115 116 static uint32_t emlxs_handle_mb_event(emlxs_hba_t *hba); 117 118 static void emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba); 119 120 static uint32_t emlxs_mb_config_port(emlxs_hba_t *hba, 121 MAILBOXQ *mbq, uint32_t sli_mode, 122 uint32_t hbainit); 123 static void emlxs_enable_latt(emlxs_hba_t *hba); 124 125 static uint32_t emlxs_check_attention(emlxs_hba_t *hba); 126 127 static uint32_t emlxs_get_attention(emlxs_hba_t *hba, 128 int32_t msgid); 129 static void emlxs_proc_attention(emlxs_hba_t *hba, 130 uint32_t ha_copy); 131 /* static int emlxs_handle_rcv_seq(emlxs_hba_t *hba, */ 132 /* CHANNEL *cp, IOCBQ *iocbq); */ 133 /* static void emlxs_update_HBQ_index(emlxs_hba_t *hba, */ 134 /* uint32_t hbq_id); */ 135 /* static void emlxs_hbq_free_all(emlxs_hba_t *hba, */ 136 /* uint32_t hbq_id); */ 137 static uint32_t emlxs_hbq_setup(emlxs_hba_t *hba, 138 uint32_t hbq_id); 139 static void emlxs_sli3_timer(emlxs_hba_t *hba); 140 141 static void emlxs_sli3_poll_erratt(emlxs_hba_t *hba); 142 143 static uint32_t emlxs_sli3_reg_did(emlxs_port_t *port, 144 uint32_t did, SERV_PARM *param, 145 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, 146 IOCBQ *iocbq); 147 148 static uint32_t emlxs_sli3_unreg_node(emlxs_port_t *port, 149 NODELIST *node, emlxs_buf_t *sbp, 150 fc_unsol_buf_t *ubp, IOCBQ *iocbq); 151 152 153 /* Define SLI3 API functions */ 154 emlxs_sli_api_t emlxs_sli3_api = { 155 emlxs_sli3_map_hdw, 156 emlxs_sli3_unmap_hdw, 157 emlxs_sli3_online, 158 emlxs_sli3_offline, 159 emlxs_sli3_hba_reset, 160 emlxs_sli3_hba_kill, 161 emlxs_sli3_issue_iocb_cmd, 162 emlxs_sli3_issue_mbox_cmd, 163 #ifdef SFCT_SUPPORT 164 emlxs_sli3_prep_fct_iocb, 165 #else 166 NULL, 167 #endif /* SFCT_SUPPORT */ 168 emlxs_sli3_prep_fcp_iocb, 169 emlxs_sli3_prep_ip_iocb, 170 emlxs_sli3_prep_els_iocb, 171 emlxs_sli3_prep_ct_iocb, 172 emlxs_sli3_poll_intr, 173 emlxs_sli3_intx_intr, 174 emlxs_sli3_msi_intr, 175 emlxs_sli3_disable_intr, 176 emlxs_sli3_timer, 177 emlxs_sli3_poll_erratt, 178 emlxs_sli3_reg_did, 179 emlxs_sli3_unreg_node 180 }; 181 182 183 /* 184 * emlxs_sli3_online() 185 * 186 * This routine will start initialization of the SLI2/3 HBA. 187 */ 188 static int32_t 189 emlxs_sli3_online(emlxs_hba_t *hba) 190 { 191 emlxs_port_t *port = &PPORT; 192 emlxs_config_t *cfg; 193 emlxs_vpd_t *vpd; 194 MAILBOX *mb = NULL; 195 MAILBOXQ *mbq = NULL; 196 RING *rp; 197 CHANNEL *cp; 198 MATCHMAP *mp = NULL; 199 MATCHMAP *mp1 = NULL; 200 uint8_t *inptr; 201 uint8_t *outptr; 202 uint32_t status; 203 uint16_t i; 204 uint32_t j; 205 uint32_t read_rev_reset; 206 uint32_t key = 0; 207 uint32_t fw_check; 208 uint32_t kern_update = 0; 209 uint32_t rval = 0; 210 uint32_t offset; 211 uint8_t vpd_data[DMP_VPD_SIZE]; 212 uint32_t MaxRbusSize; 213 uint32_t MaxIbusSize; 214 uint32_t sli_mode; 215 uint32_t sli_mode_mask; 216 217 cfg = &CFG; 218 vpd = &VPD; 219 MaxRbusSize = 0; 220 MaxIbusSize = 0; 221 read_rev_reset = 0; 222 hba->chan_count = MAX_RINGS; 223 224 if (hba->bus_type == SBUS_FC) { 225 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba)); 226 } 227 228 /* Set the fw_check flag */ 229 fw_check = cfg[CFG_FW_CHECK].current; 230 231 if ((fw_check & 0x04) || 232 (hba->fw_flag & FW_UPDATE_KERNEL)) { 233 kern_update = 1; 234 } 235 236 hba->mbox_queue_flag = 0; 237 hba->sli.sli3.hc_copy = 0; 238 hba->fc_edtov = FF_DEF_EDTOV; 239 hba->fc_ratov = FF_DEF_RATOV; 240 hba->fc_altov = FF_DEF_ALTOV; 241 hba->fc_arbtov = FF_DEF_ARBTOV; 242 243 /* 244 * Get a buffer which will be used repeatedly for mailbox commands 245 */ 246 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP); 247 248 mb = (MAILBOX *)mbq; 249 250 /* Initialize sli mode based on configuration parameter */ 251 switch (cfg[CFG_SLI_MODE].current) { 252 case 2: /* SLI2 mode */ 253 sli_mode = EMLXS_HBA_SLI2_MODE; 254 sli_mode_mask = EMLXS_SLI2_MASK; 255 break; 256 257 case 3: /* SLI3 mode */ 258 sli_mode = EMLXS_HBA_SLI3_MODE; 259 sli_mode_mask = EMLXS_SLI3_MASK; 260 break; 261 262 case 0: /* Best available */ 263 case 1: /* Best available */ 264 default: 265 if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) { 266 sli_mode = EMLXS_HBA_SLI3_MODE; 267 sli_mode_mask = EMLXS_SLI3_MASK; 268 } else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) { 269 sli_mode = EMLXS_HBA_SLI2_MODE; 270 sli_mode_mask = EMLXS_SLI2_MASK; 271 } else { 272 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 273 "No SLI mode available."); 274 rval = EIO; 275 goto failed; 276 } 277 break; 278 } 279 /* SBUS adapters only available in SLI2 */ 280 if (hba->bus_type == SBUS_FC) { 281 sli_mode = EMLXS_HBA_SLI2_MODE; 282 sli_mode_mask = EMLXS_SLI2_MASK; 283 } 284 285 reset: 286 /* Reset & Initialize the adapter */ 287 if (emlxs_sli3_hba_init(hba)) { 288 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 289 "Unable to init hba."); 290 291 rval = EIO; 292 goto failed; 293 } 294 295 #ifdef FMA_SUPPORT 296 /* Access handle validation */ 297 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle) 298 != DDI_FM_OK) || 299 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle) 300 != DDI_FM_OK) || 301 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle) 302 != DDI_FM_OK)) { 303 EMLXS_MSGF(EMLXS_CONTEXT, 304 &emlxs_invalid_access_handle_msg, NULL); 305 306 rval = EIO; 307 goto failed; 308 } 309 #endif /* FMA_SUPPORT */ 310 311 /* Check for PEGASUS (This is a special case) */ 312 /* We need to check for dual channel adapter */ 313 if (hba->model_info.vendor_id == PCI_VENDOR_ID_EMULEX && 314 hba->model_info.device_id == PCI_DEVICE_ID_PEGASUS) { 315 /* Try to determine if this is a DC adapter */ 316 if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) { 317 if (MaxRbusSize == REDUCED_SRAM_CFG) { 318 /* LP9802DC */ 319 for (i = 1; i < emlxs_pci_model_count; i++) { 320 if (emlxs_pci_model[i].id == LP9802DC) { 321 bcopy(&emlxs_pci_model[i], 322 &hba->model_info, 323 sizeof (emlxs_model_t)); 324 break; 325 } 326 } 327 } else if (hba->model_info.id != LP9802) { 328 /* LP9802 */ 329 for (i = 1; i < emlxs_pci_model_count; i++) { 330 if (emlxs_pci_model[i].id == LP9802) { 331 bcopy(&emlxs_pci_model[i], 332 &hba->model_info, 333 sizeof (emlxs_model_t)); 334 break; 335 } 336 } 337 } 338 } 339 } 340 341 /* 342 * Setup and issue mailbox READ REV command 343 */ 344 vpd->opFwRev = 0; 345 vpd->postKernRev = 0; 346 vpd->sli1FwRev = 0; 347 vpd->sli2FwRev = 0; 348 vpd->sli3FwRev = 0; 349 vpd->sli4FwRev = 0; 350 351 vpd->postKernName[0] = 0; 352 vpd->opFwName[0] = 0; 353 vpd->sli1FwName[0] = 0; 354 vpd->sli2FwName[0] = 0; 355 vpd->sli3FwName[0] = 0; 356 vpd->sli4FwName[0] = 0; 357 358 vpd->opFwLabel[0] = 0; 359 vpd->sli1FwLabel[0] = 0; 360 vpd->sli2FwLabel[0] = 0; 361 vpd->sli3FwLabel[0] = 0; 362 vpd->sli4FwLabel[0] = 0; 363 364 /* Sanity check */ 365 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) { 366 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 367 "Adapter / SLI mode mismatch mask:x%x", 368 hba->model_info.sli_mask); 369 370 rval = EIO; 371 goto failed; 372 } 373 374 EMLXS_STATE_CHANGE(hba, FC_INIT_REV); 375 emlxs_mb_read_rev(hba, mbq, 0); 376 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) { 377 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 378 "Unable to read rev. Mailbox cmd=%x status=%x", 379 mb->mbxCommand, mb->mbxStatus); 380 381 rval = EIO; 382 goto failed; 383 } 384 385 if (mb->un.varRdRev.rr == 0) { 386 /* Old firmware */ 387 if (read_rev_reset == 0) { 388 read_rev_reset = 1; 389 390 goto reset; 391 } else { 392 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 393 "Outdated firmware detected."); 394 } 395 396 vpd->rBit = 0; 397 } else { 398 if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) { 399 if (read_rev_reset == 0) { 400 read_rev_reset = 1; 401 402 goto reset; 403 } else { 404 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 405 "Non-operational firmware detected. " 406 "type=%x", 407 mb->un.varRdRev.un.b.ProgType); 408 } 409 } 410 411 vpd->rBit = 1; 412 vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1; 413 bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel, 414 16); 415 vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2; 416 bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel, 417 16); 418 419 /* 420 * Lets try to read the SLI3 version 421 * Setup and issue mailbox READ REV(v3) command 422 */ 423 EMLXS_STATE_CHANGE(hba, FC_INIT_REV); 424 425 /* Reuse mbq from previous mbox */ 426 bzero(mbq, sizeof (MAILBOXQ)); 427 428 emlxs_mb_read_rev(hba, mbq, 1); 429 430 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != 431 MBX_SUCCESS) { 432 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 433 "Unable to read rev (v3). Mailbox cmd=%x status=%x", 434 mb->mbxCommand, mb->mbxStatus); 435 436 rval = EIO; 437 goto failed; 438 } 439 440 if (mb->un.varRdRev.rf3) { 441 /* 442 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1; 443 * Not needed 444 */ 445 vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2; 446 bcopy((char *)mb->un.varRdRev.sliFwName2, 447 vpd->sli3FwLabel, 16); 448 } 449 } 450 451 if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) { 452 if (vpd->sli2FwRev) { 453 sli_mode = EMLXS_HBA_SLI2_MODE; 454 sli_mode_mask = EMLXS_SLI2_MASK; 455 } else { 456 sli_mode = 0; 457 sli_mode_mask = 0; 458 } 459 } 460 461 else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) { 462 if (vpd->sli3FwRev) { 463 sli_mode = EMLXS_HBA_SLI3_MODE; 464 sli_mode_mask = EMLXS_SLI3_MASK; 465 } else { 466 sli_mode = 0; 467 sli_mode_mask = 0; 468 } 469 } 470 471 if (!(hba->model_info.sli_mask & sli_mode_mask)) { 472 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 473 "Firmware not available. sli-mode=%d", 474 cfg[CFG_SLI_MODE].current); 475 476 rval = EIO; 477 goto failed; 478 } 479 480 /* Save information as VPD data */ 481 vpd->postKernRev = mb->un.varRdRev.postKernRev; 482 vpd->opFwRev = mb->un.varRdRev.opFwRev; 483 bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16); 484 vpd->biuRev = mb->un.varRdRev.biuRev; 485 vpd->smRev = mb->un.varRdRev.smRev; 486 vpd->smFwRev = mb->un.varRdRev.un.smFwRev; 487 vpd->endecRev = mb->un.varRdRev.endecRev; 488 vpd->fcphHigh = mb->un.varRdRev.fcphHigh; 489 vpd->fcphLow = mb->un.varRdRev.fcphLow; 490 vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 491 vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow; 492 493 /* Decode FW names */ 494 emlxs_decode_version(vpd->postKernRev, vpd->postKernName, 495 sizeof (vpd->postKernName)); 496 emlxs_decode_version(vpd->opFwRev, vpd->opFwName, 497 sizeof (vpd->opFwName)); 498 emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName, 499 sizeof (vpd->sli1FwName)); 500 emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName, 501 sizeof (vpd->sli2FwName)); 502 emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName, 503 sizeof (vpd->sli3FwName)); 504 emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName, 505 sizeof (vpd->sli4FwName)); 506 507 /* Decode FW labels */ 508 emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1, 509 sizeof (vpd->opFwLabel)); 510 emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1, 511 sizeof (vpd->sli1FwLabel)); 512 emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1, 513 sizeof (vpd->sli2FwLabel)); 514 emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1, 515 sizeof (vpd->sli3FwLabel)); 516 emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1, 517 sizeof (vpd->sli4FwLabel)); 518 519 /* Reuse mbq from previous mbox */ 520 bzero(mbq, sizeof (MAILBOXQ)); 521 522 key = emlxs_get_key(hba, mbq); 523 524 /* Get adapter VPD information */ 525 offset = 0; 526 bzero(vpd_data, sizeof (vpd_data)); 527 vpd->port_index = (uint32_t)-1; 528 529 while (offset < DMP_VPD_SIZE) { 530 /* Reuse mbq from previous mbox */ 531 bzero(mbq, sizeof (MAILBOXQ)); 532 533 emlxs_mb_dump_vpd(hba, mbq, offset); 534 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != 535 MBX_SUCCESS) { 536 /* 537 * Let it go through even if failed. 538 * Not all adapter's have VPD info and thus will 539 * fail here. This is not a problem 540 */ 541 542 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 543 "No VPD found. offset=%x status=%x", offset, 544 mb->mbxStatus); 545 break; 546 } else { 547 if (mb->un.varDmp.ra == 1) { 548 uint32_t *lp1, *lp2; 549 uint32_t bsize; 550 uint32_t wsize; 551 552 /* 553 * mb->un.varDmp.word_cnt is actually byte 554 * count for the dump reply 555 */ 556 bsize = mb->un.varDmp.word_cnt; 557 558 /* Stop if no data was received */ 559 if (bsize == 0) { 560 break; 561 } 562 563 /* Check limit on byte size */ 564 bsize = (bsize > 565 (sizeof (vpd_data) - offset)) ? 566 (sizeof (vpd_data) - offset) : bsize; 567 568 /* 569 * Convert size from bytes to words with 570 * minimum of 1 word 571 */ 572 wsize = (bsize > 4) ? (bsize >> 2) : 1; 573 574 /* 575 * Transfer data into vpd_data buffer one 576 * word at a time 577 */ 578 lp1 = (uint32_t *)&mb->un.varDmp.resp_offset; 579 lp2 = (uint32_t *)&vpd_data[offset]; 580 581 for (i = 0; i < wsize; i++) { 582 status = *lp1++; 583 *lp2++ = BE_SWAP32(status); 584 } 585 586 /* Increment total byte count saved */ 587 offset += (wsize << 2); 588 589 /* 590 * Stop if less than a full transfer was 591 * received 592 */ 593 if (wsize < DMP_VPD_DUMP_WCOUNT) { 594 break; 595 } 596 597 } else { 598 EMLXS_MSGF(EMLXS_CONTEXT, 599 &emlxs_init_debug_msg, 600 "No VPD acknowledgment. offset=%x", 601 offset); 602 break; 603 } 604 } 605 606 } 607 608 if (vpd_data[0]) { 609 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset); 610 611 /* 612 * If there is a VPD part number, and it does not 613 * match the current default HBA model info, 614 * replace the default data with an entry that 615 * does match. 616 * 617 * After emlxs_parse_vpd model holds the VPD value 618 * for V2 and part_num hold the value for PN. These 619 * 2 values are NOT necessarily the same. 620 */ 621 622 rval = 0; 623 if ((vpd->model[0] != 0) && 624 (strcmp(&vpd->model[0], hba->model_info.model) != 0)) { 625 626 /* First scan for a V2 match */ 627 628 for (i = 1; i < emlxs_pci_model_count; i++) { 629 if (strcmp(&vpd->model[0], 630 emlxs_pci_model[i].model) == 0) { 631 bcopy(&emlxs_pci_model[i], 632 &hba->model_info, 633 sizeof (emlxs_model_t)); 634 rval = 1; 635 break; 636 } 637 } 638 } 639 640 if (!rval && (vpd->part_num[0] != 0) && 641 (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) { 642 643 /* Next scan for a PN match */ 644 645 for (i = 1; i < emlxs_pci_model_count; i++) { 646 if (strcmp(&vpd->part_num[0], 647 emlxs_pci_model[i].model) == 0) { 648 bcopy(&emlxs_pci_model[i], 649 &hba->model_info, 650 sizeof (emlxs_model_t)); 651 break; 652 } 653 } 654 } 655 656 /* 657 * Now lets update hba->model_info with the real 658 * VPD data, if any. 659 */ 660 661 /* 662 * Replace the default model description with vpd data 663 */ 664 if (vpd->model_desc[0] != 0) { 665 (void) strncpy(hba->model_info.model_desc, 666 vpd->model_desc, 667 (sizeof (hba->model_info.model_desc)-1)); 668 } 669 670 /* Replace the default model with vpd data */ 671 if (vpd->model[0] != 0) { 672 (void) strncpy(hba->model_info.model, vpd->model, 673 (sizeof (hba->model_info.model)-1)); 674 } 675 676 /* Replace the default program types with vpd data */ 677 if (vpd->prog_types[0] != 0) { 678 emlxs_parse_prog_types(hba, vpd->prog_types); 679 } 680 } 681 682 /* 683 * Since the adapter model may have changed with the vpd data 684 * lets double check if adapter is not supported 685 */ 686 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) { 687 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 688 "Unsupported adapter found. " 689 "Id:%d Vendor id:0x%x Device id:0x%x SSDID:0x%x " 690 "Model:%s", hba->model_info.id, hba->model_info.vendor_id, 691 hba->model_info.device_id, hba->model_info.ssdid, 692 hba->model_info.model); 693 694 rval = EIO; 695 goto failed; 696 } 697 698 /* Read the adapter's wakeup parms */ 699 (void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1); 700 emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0], 701 vpd->boot_version, sizeof (vpd->boot_version)); 702 703 /* Get fcode version property */ 704 emlxs_get_fcode_version(hba); 705 706 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 707 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev, 708 vpd->opFwRev, vpd->sli1FwRev); 709 710 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 711 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev, 712 vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh); 713 714 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 715 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version); 716 717 /* 718 * If firmware checking is enabled and the adapter model indicates 719 * a firmware image, then perform firmware version check 720 */ 721 hba->fw_flag = 0; 722 hba->fw_timer = 0; 723 724 if (((fw_check & 0x1) && 725 (hba->model_info.flags & EMLXS_ORACLE_BRANDED) && 726 hba->model_info.fwid) || ((fw_check & 0x2) && 727 hba->model_info.fwid)) { 728 emlxs_firmware_t *fw; 729 730 /* Find firmware image indicated by adapter model */ 731 fw = NULL; 732 for (i = 0; i < emlxs_fw_count; i++) { 733 if (emlxs_fw_table[i].id == hba->model_info.fwid) { 734 fw = &emlxs_fw_table[i]; 735 break; 736 } 737 } 738 739 /* 740 * If the image was found, then verify current firmware 741 * versions of adapter 742 */ 743 if (fw) { 744 if (!kern_update && 745 ((fw->kern && (vpd->postKernRev != fw->kern)) || 746 (fw->stub && (vpd->opFwRev != fw->stub)))) { 747 748 hba->fw_flag |= FW_UPDATE_NEEDED; 749 750 } else if ((fw->kern && (vpd->postKernRev != 751 fw->kern)) || 752 (fw->stub && (vpd->opFwRev != fw->stub)) || 753 (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) || 754 (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) || 755 (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) || 756 (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) { 757 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 758 "Firmware update needed. " 759 "Updating. id=%d fw=%d", 760 hba->model_info.id, hba->model_info.fwid); 761 762 #ifdef MODFW_SUPPORT 763 /* 764 * Load the firmware image now 765 * If MODFW_SUPPORT is not defined, the 766 * firmware image will already be defined 767 * in the emlxs_fw_table 768 */ 769 emlxs_fw_load(hba, fw); 770 #endif /* MODFW_SUPPORT */ 771 772 if (fw->image && fw->size) { 773 uint32_t rc; 774 775 rc = emlxs_fw_download(hba, 776 (char *)fw->image, fw->size, 0); 777 if ((rc != FC_SUCCESS) && 778 (rc != EMLXS_REBOOT_REQUIRED)) { 779 EMLXS_MSGF(EMLXS_CONTEXT, 780 &emlxs_init_msg, 781 "Firmware update failed."); 782 hba->fw_flag |= 783 FW_UPDATE_NEEDED; 784 } 785 #ifdef MODFW_SUPPORT 786 /* 787 * Unload the firmware image from 788 * kernel memory 789 */ 790 emlxs_fw_unload(hba, fw); 791 #endif /* MODFW_SUPPORT */ 792 793 fw_check = 0; 794 795 goto reset; 796 } 797 798 hba->fw_flag |= FW_UPDATE_NEEDED; 799 800 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 801 "Firmware image unavailable."); 802 } else { 803 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 804 "Firmware update not needed."); 805 } 806 } else { 807 /* This should not happen */ 808 809 /* 810 * This means either the adapter database is not 811 * correct or a firmware image is missing from the 812 * compile 813 */ 814 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 815 "Firmware image unavailable. id=%d fw=%d", 816 hba->model_info.id, hba->model_info.fwid); 817 } 818 } 819 820 /* 821 * Add our interrupt routine to kernel's interrupt chain & enable it 822 * If MSI is enabled this will cause Solaris to program the MSI address 823 * and data registers in PCI config space 824 */ 825 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) { 826 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 827 "Unable to add interrupt(s)."); 828 829 rval = EIO; 830 goto failed; 831 } 832 833 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT); 834 835 /* Reuse mbq from previous mbox */ 836 bzero(mbq, sizeof (MAILBOXQ)); 837 838 (void) emlxs_mb_config_port(hba, mbq, sli_mode, key); 839 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) { 840 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 841 "Unable to configure port. " 842 "Mailbox cmd=%x status=%x slimode=%d key=%x", 843 mb->mbxCommand, mb->mbxStatus, sli_mode, key); 844 845 for (sli_mode--; sli_mode > 0; sli_mode--) { 846 /* Check if sli_mode is supported by this adapter */ 847 if (hba->model_info.sli_mask & 848 EMLXS_SLI_MASK(sli_mode)) { 849 sli_mode_mask = EMLXS_SLI_MASK(sli_mode); 850 break; 851 } 852 } 853 854 if (sli_mode) { 855 fw_check = 0; 856 857 goto reset; 858 } 859 860 hba->flag &= ~FC_SLIM2_MODE; 861 862 rval = EIO; 863 goto failed; 864 } 865 866 /* Check if SLI3 mode was achieved */ 867 if (mb->un.varCfgPort.rMA && 868 (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) { 869 870 if (mb->un.varCfgPort.vpi_max > 1) { 871 hba->flag |= FC_NPIV_ENABLED; 872 873 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) { 874 hba->vpi_max = 875 min(mb->un.varCfgPort.vpi_max, 876 MAX_VPORTS - 1); 877 } else { 878 hba->vpi_max = 879 min(mb->un.varCfgPort.vpi_max, 880 MAX_VPORTS_LIMITED - 1); 881 } 882 } 883 884 #if (EMLXS_MODREV >= EMLXS_MODREV5) 885 hba->fca_tran->fca_num_npivports = 886 (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0; 887 #endif /* >= EMLXS_MODREV5 */ 888 889 if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) { 890 hba->flag |= FC_HBQ_ENABLED; 891 } 892 893 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 894 "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max); 895 } else { 896 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 897 "SLI2 mode: flag=%x", hba->flag); 898 sli_mode = EMLXS_HBA_SLI2_MODE; 899 sli_mode_mask = EMLXS_SLI2_MASK; 900 hba->sli_mode = sli_mode; 901 #if (EMLXS_MODREV >= EMLXS_MODREV5) 902 hba->fca_tran->fca_num_npivports = 0; 903 #endif /* >= EMLXS_MODREV5 */ 904 905 } 906 907 /* Get and save the current firmware version (based on sli_mode) */ 908 emlxs_decode_firmware_rev(hba, vpd); 909 910 emlxs_pcix_mxr_update(hba, 0); 911 912 /* Reuse mbq from previous mbox */ 913 bzero(mbq, sizeof (MAILBOXQ)); 914 915 emlxs_mb_read_config(hba, mbq); 916 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) { 917 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 918 "Unable to read configuration. Mailbox cmd=%x status=%x", 919 mb->mbxCommand, mb->mbxStatus); 920 921 rval = EIO; 922 goto failed; 923 } 924 925 /* Save the link speed capabilities */ 926 vpd->link_speed = (uint16_t)mb->un.varRdConfig.lmt; 927 emlxs_process_link_speed(hba); 928 929 /* Set the max node count */ 930 if (cfg[CFG_NUM_NODES].current > 0) { 931 hba->max_nodes = 932 min(cfg[CFG_NUM_NODES].current, 933 mb->un.varRdConfig.max_rpi); 934 } else { 935 hba->max_nodes = mb->un.varRdConfig.max_rpi; 936 } 937 938 /* Set the io throttle */ 939 hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE; 940 941 /* Set max_iotag */ 942 if (cfg[CFG_NUM_IOTAGS].current) { 943 hba->max_iotag = (uint16_t)cfg[CFG_NUM_IOTAGS].current; 944 } else { 945 hba->max_iotag = mb->un.varRdConfig.max_xri; 946 } 947 948 /* Set out-of-range iotag base */ 949 hba->fc_oor_iotag = hba->max_iotag; 950 951 /* 952 * Allocate some memory for buffers 953 */ 954 if (emlxs_mem_alloc_buffer(hba) == 0) { 955 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 956 "Unable to allocate memory buffers."); 957 958 EMLXS_STATE_CHANGE(hba, FC_ERROR); 959 return (ENOMEM); 960 } 961 962 /* 963 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers 964 */ 965 if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) || 966 ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0)) { 967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 968 "Unable to allocate diag buffers."); 969 970 rval = ENOMEM; 971 goto failed; 972 } 973 974 bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt, 975 MEM_ELSBUF_SIZE); 976 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE, 977 DDI_DMA_SYNC_FORDEV); 978 979 bzero(mp1->virt, MEM_ELSBUF_SIZE); 980 EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE, 981 DDI_DMA_SYNC_FORDEV); 982 983 /* Reuse mbq from previous mbox */ 984 bzero(mbq, sizeof (MAILBOXQ)); 985 986 (void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys); 987 988 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) { 989 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 990 "Unable to run BIU diag. Mailbox cmd=%x status=%x", 991 mb->mbxCommand, mb->mbxStatus); 992 993 rval = EIO; 994 goto failed; 995 } 996 997 EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE, 998 DDI_DMA_SYNC_FORKERNEL); 999 1000 #ifdef FMA_SUPPORT 1001 if (mp->dma_handle) { 1002 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle) 1003 != DDI_FM_OK) { 1004 EMLXS_MSGF(EMLXS_CONTEXT, 1005 &emlxs_invalid_dma_handle_msg, 1006 "sli3_online: hdl=%p", 1007 mp->dma_handle); 1008 rval = EIO; 1009 goto failed; 1010 } 1011 } 1012 1013 if (mp1->dma_handle) { 1014 if (emlxs_fm_check_dma_handle(hba, mp1->dma_handle) 1015 != DDI_FM_OK) { 1016 EMLXS_MSGF(EMLXS_CONTEXT, 1017 &emlxs_invalid_dma_handle_msg, 1018 "sli3_online: hdl=%p", 1019 mp1->dma_handle); 1020 rval = EIO; 1021 goto failed; 1022 } 1023 } 1024 #endif /* FMA_SUPPORT */ 1025 1026 outptr = mp->virt; 1027 inptr = mp1->virt; 1028 1029 for (i = 0; i < MEM_ELSBUF_SIZE; i++) { 1030 if (*outptr++ != *inptr++) { 1031 outptr--; 1032 inptr--; 1033 1034 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 1035 "BIU diagnostic failed. " 1036 "offset %x value %x should be %x.", 1037 i, (uint32_t)*inptr, (uint32_t)*outptr); 1038 1039 rval = EIO; 1040 goto failed; 1041 } 1042 } 1043 1044 /* Free the buffers since we were polling */ 1045 emlxs_mem_put(hba, MEM_BUF, (void *)mp); 1046 mp = NULL; 1047 emlxs_mem_put(hba, MEM_BUF, (void *)mp1); 1048 mp1 = NULL; 1049 1050 hba->channel_fcp = FC_FCP_RING; 1051 hba->channel_els = FC_ELS_RING; 1052 hba->channel_ip = FC_IP_RING; 1053 hba->channel_ct = FC_CT_RING; 1054 hba->sli.sli3.ring_count = MAX_RINGS; 1055 1056 hba->channel_tx_count = 0; 1057 hba->io_count = 0; 1058 hba->fc_iotag = 1; 1059 1060 for (i = 0; i < hba->chan_count; i++) { 1061 cp = &hba->chan[i]; 1062 1063 /* 1 to 1 mapping between ring and channel */ 1064 cp->iopath = (void *)&hba->sli.sli3.ring[i]; 1065 1066 cp->hba = hba; 1067 cp->channelno = i; 1068 } 1069 1070 /* 1071 * Setup and issue mailbox CONFIGURE RING command 1072 */ 1073 for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) { 1074 /* 1075 * Initialize cmd/rsp ring pointers 1076 */ 1077 rp = &hba->sli.sli3.ring[i]; 1078 1079 /* 1 to 1 mapping between ring and channel */ 1080 rp->channelp = &hba->chan[i]; 1081 1082 rp->hba = hba; 1083 rp->ringno = (uint8_t)i; 1084 1085 rp->fc_cmdidx = 0; 1086 rp->fc_rspidx = 0; 1087 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING); 1088 1089 /* Reuse mbq from previous mbox */ 1090 bzero(mbq, sizeof (MAILBOXQ)); 1091 1092 emlxs_mb_config_ring(hba, i, mbq); 1093 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != 1094 MBX_SUCCESS) { 1095 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 1096 "Unable to configure ring. " 1097 "Mailbox cmd=%x status=%x", 1098 mb->mbxCommand, mb->mbxStatus); 1099 1100 rval = EIO; 1101 goto failed; 1102 } 1103 } 1104 1105 /* 1106 * Setup link timers 1107 */ 1108 EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK); 1109 1110 /* Reuse mbq from previous mbox */ 1111 bzero(mbq, sizeof (MAILBOXQ)); 1112 1113 emlxs_mb_config_link(hba, mbq); 1114 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) { 1115 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 1116 "Unable to configure link. Mailbox cmd=%x status=%x", 1117 mb->mbxCommand, mb->mbxStatus); 1118 1119 rval = EIO; 1120 goto failed; 1121 } 1122 1123 #ifdef MAX_RRDY_SUPPORT 1124 /* Set MAX_RRDY if one is provided */ 1125 if (cfg[CFG_MAX_RRDY].current) { 1126 1127 /* Reuse mbq from previous mbox */ 1128 bzero(mbq, sizeof (MAILBOXQ)); 1129 1130 emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412, 1131 cfg[CFG_MAX_RRDY].current); 1132 1133 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != 1134 MBX_SUCCESS) { 1135 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1136 "MAX_RRDY: Unable to set. status=%x " \ 1137 "value=%d", 1138 mb->mbxStatus, cfg[CFG_MAX_RRDY].current); 1139 } else { 1140 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1141 "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current); 1142 } 1143 } 1144 #endif /* MAX_RRDY_SUPPORT */ 1145 1146 /* Reuse mbq from previous mbox */ 1147 bzero(mbq, sizeof (MAILBOXQ)); 1148 1149 /* 1150 * We need to get login parameters for NID 1151 */ 1152 (void) emlxs_mb_read_sparam(hba, mbq); 1153 mp = (MATCHMAP *)mbq->bp; 1154 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) { 1155 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 1156 "Unable to read parameters. Mailbox cmd=%x status=%x", 1157 mb->mbxCommand, mb->mbxStatus); 1158 1159 rval = EIO; 1160 goto failed; 1161 } 1162 1163 /* Free the buffer since we were polling */ 1164 emlxs_mem_put(hba, MEM_BUF, (void *)mp); 1165 mp = NULL; 1166 1167 /* If no serial number in VPD data, then use the WWPN */ 1168 if (vpd->serial_num[0] == 0) { 1169 outptr = (uint8_t *)&hba->wwpn.IEEE[0]; 1170 for (i = 0; i < 12; i++) { 1171 status = *outptr++; 1172 j = ((status & 0xf0) >> 4); 1173 if (j <= 9) { 1174 vpd->serial_num[i] = 1175 (char)((uint8_t)'0' + (uint8_t)j); 1176 } else { 1177 vpd->serial_num[i] = 1178 (char)((uint8_t)'A' + (uint8_t)(j - 10)); 1179 } 1180 1181 i++; 1182 j = (status & 0xf); 1183 if (j <= 9) { 1184 vpd->serial_num[i] = 1185 (char)((uint8_t)'0' + (uint8_t)j); 1186 } else { 1187 vpd->serial_num[i] = 1188 (char)((uint8_t)'A' + (uint8_t)(j - 10)); 1189 } 1190 } 1191 1192 /* 1193 * Set port number and port index to zero 1194 * The WWN's are unique to each port and therefore port_num 1195 * must equal zero. This effects the hba_fru_details structure 1196 * in fca_bind_port() 1197 */ 1198 vpd->port_num[0] = 0; 1199 vpd->port_index = 0; 1200 } 1201 1202 /* 1203 * Make first attempt to set a port index 1204 * Check if this is a multifunction adapter 1205 */ 1206 if ((vpd->port_index == (uint32_t)-1) && 1207 (hba->model_info.chip >= EMLXS_THOR_CHIP)) { 1208 char *buffer; 1209 int32_t i; 1210 1211 /* 1212 * The port address looks like this: 1213 * 1 - for port index 0 1214 * 1,1 - for port index 1 1215 * 1,2 - for port index 2 1216 */ 1217 buffer = ddi_get_name_addr(hba->dip); 1218 1219 if (buffer) { 1220 vpd->port_index = 0; 1221 1222 /* Reverse scan for a comma */ 1223 for (i = strlen(buffer) - 1; i > 0; i--) { 1224 if (buffer[i] == ',') { 1225 /* Comma found - set index now */ 1226 vpd->port_index = 1227 emlxs_strtol(&buffer[i + 1], 10); 1228 break; 1229 } 1230 } 1231 } 1232 } 1233 1234 /* Make final attempt to set a port index */ 1235 if (vpd->port_index == (uint32_t)-1) { 1236 dev_info_t *p_dip; 1237 dev_info_t *c_dip; 1238 1239 p_dip = ddi_get_parent(hba->dip); 1240 c_dip = ddi_get_child(p_dip); 1241 1242 vpd->port_index = 0; 1243 while (c_dip && (hba->dip != c_dip)) { 1244 c_dip = ddi_get_next_sibling(c_dip); 1245 vpd->port_index++; 1246 } 1247 } 1248 1249 if (vpd->port_num[0] == 0) { 1250 if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) { 1251 (void) snprintf(vpd->port_num, 1252 (sizeof (vpd->port_num)-1), 1253 "%d", vpd->port_index); 1254 } 1255 } 1256 1257 if (vpd->id[0] == 0) { 1258 (void) strncpy(vpd->id, hba->model_info.model_desc, 1259 (sizeof (vpd->id)-1)); 1260 } 1261 1262 if (vpd->manufacturer[0] == 0) { 1263 (void) strncpy(vpd->manufacturer, hba->model_info.manufacturer, 1264 (sizeof (vpd->manufacturer)-1)); 1265 } 1266 1267 if (vpd->part_num[0] == 0) { 1268 (void) strncpy(vpd->part_num, hba->model_info.model, 1269 (sizeof (vpd->part_num)-1)); 1270 } 1271 1272 if (vpd->model_desc[0] == 0) { 1273 (void) strncpy(vpd->model_desc, hba->model_info.model_desc, 1274 (sizeof (vpd->model_desc)-1)); 1275 } 1276 1277 if (vpd->model[0] == 0) { 1278 (void) strncpy(vpd->model, hba->model_info.model, 1279 (sizeof (vpd->model)-1)); 1280 } 1281 1282 if (vpd->prog_types[0] == 0) { 1283 emlxs_build_prog_types(hba, vpd); 1284 } 1285 1286 /* Create the symbolic names */ 1287 (void) snprintf(hba->snn, (sizeof (hba->snn)-1), 1288 "Emulex %s FV%s DV%s %s", 1289 hba->model_info.model, hba->vpd.fw_version, emlxs_version, 1290 (char *)utsname.nodename); 1291 1292 (void) snprintf(hba->spn, (sizeof (hba->spn)-1), 1293 "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", 1294 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, 1295 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], 1296 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 1297 1298 if (cfg[CFG_NETWORK_ON].current) { 1299 if ((hba->sparam.portName.nameType != NAME_IEEE) || 1300 (hba->sparam.portName.IEEEextMsn != 0) || 1301 (hba->sparam.portName.IEEEextLsb != 0)) { 1302 1303 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 1304 "WWPN doesn't conform to IP profile: " 1305 "nameType=%x. Disabling networking.", 1306 hba->sparam.portName.nameType); 1307 1308 cfg[CFG_NETWORK_ON].current = 0; 1309 } 1310 } 1311 1312 if (cfg[CFG_NETWORK_ON].current) { 1313 /* Reuse mbq from previous mbox */ 1314 bzero(mbq, sizeof (MAILBOXQ)); 1315 1316 /* Issue CONFIG FARP */ 1317 emlxs_mb_config_farp(hba, mbq); 1318 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != 1319 MBX_SUCCESS) { 1320 /* 1321 * Let it go through even if failed. 1322 */ 1323 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 1324 "Unable to configure FARP. " 1325 "Mailbox cmd=%x status=%x", 1326 mb->mbxCommand, mb->mbxStatus); 1327 } 1328 } 1329 #ifdef MSI_SUPPORT 1330 /* Configure MSI map if required */ 1331 if (hba->intr_count > 1) { 1332 1333 if (hba->intr_type == DDI_INTR_TYPE_MSIX) { 1334 /* always start from 0 */ 1335 hba->last_msiid = 0; 1336 } 1337 1338 /* Reuse mbq from previous mbox */ 1339 bzero(mbq, sizeof (MAILBOXQ)); 1340 1341 emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count); 1342 1343 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) == 1344 MBX_SUCCESS) { 1345 goto msi_configured; 1346 } 1347 1348 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1349 "Unable to config MSIX. Mailbox cmd=0x%x status=0x%x", 1350 mb->mbxCommand, mb->mbxStatus); 1351 1352 /* Reuse mbq from previous mbox */ 1353 bzero(mbq, sizeof (MAILBOXQ)); 1354 1355 emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count); 1356 1357 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) == 1358 MBX_SUCCESS) { 1359 goto msi_configured; 1360 } 1361 1362 1363 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1364 "Unable to config MSI. Mailbox cmd=0x%x status=0x%x", 1365 mb->mbxCommand, mb->mbxStatus); 1366 1367 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1368 "Attempting single interrupt mode..."); 1369 1370 /* First cleanup old interrupts */ 1371 (void) emlxs_msi_remove(hba); 1372 (void) emlxs_msi_uninit(hba); 1373 1374 status = emlxs_msi_init(hba, 1); 1375 1376 if (status != DDI_SUCCESS) { 1377 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 1378 "Unable to initialize interrupt. status=%d", 1379 status); 1380 1381 rval = EIO; 1382 goto failed; 1383 } 1384 1385 /* 1386 * Reset adapter - The adapter needs to be reset because 1387 * the bus cannot handle the MSI change without handshaking 1388 * with the adapter again 1389 */ 1390 1391 (void) emlxs_mem_free_buffer(hba); 1392 fw_check = 0; 1393 goto reset; 1394 } 1395 1396 msi_configured: 1397 1398 1399 if ((hba->intr_count >= 1) && 1400 (hba->sli_mode == EMLXS_HBA_SLI3_MODE)) { 1401 /* intr_count is a sequence of msi id */ 1402 /* Setup msi2chan[msi_id] */ 1403 for (i = 0; i < hba->intr_count; i ++) { 1404 hba->msi2chan[i] = i; 1405 if (i >= hba->chan_count) 1406 hba->msi2chan[i] = (i - hba->chan_count); 1407 } 1408 } 1409 #endif /* MSI_SUPPORT */ 1410 1411 /* 1412 * We always disable the firmware traffic cop feature 1413 */ 1414 if (emlxs_disable_traffic_cop) { 1415 /* Reuse mbq from previous mbox */ 1416 bzero(mbq, sizeof (MAILBOXQ)); 1417 1418 emlxs_disable_tc(hba, mbq); 1419 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != 1420 MBX_SUCCESS) { 1421 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 1422 "Unable to disable traffic cop. " 1423 "Mailbox cmd=%x status=%x", 1424 mb->mbxCommand, mb->mbxStatus); 1425 1426 rval = EIO; 1427 goto failed; 1428 } 1429 } 1430 1431 1432 /* Reuse mbq from previous mbox */ 1433 bzero(mbq, sizeof (MAILBOXQ)); 1434 1435 /* Register for async events */ 1436 emlxs_mb_async_event(hba, mbq); 1437 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) { 1438 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1439 "Async events disabled. Mailbox status=%x", 1440 mb->mbxStatus); 1441 } else { 1442 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1443 "Async events enabled."); 1444 hba->flag |= FC_ASYNC_EVENTS; 1445 } 1446 1447 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN); 1448 1449 emlxs_sli3_enable_intr(hba); 1450 1451 if (hba->flag & FC_HBQ_ENABLED) { 1452 if (port->flag & EMLXS_TGT_ENABLED) { 1453 if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) { 1454 EMLXS_MSGF(EMLXS_CONTEXT, 1455 &emlxs_init_failed_msg, 1456 "Unable to setup FCT HBQ."); 1457 1458 rval = ENOMEM; 1459 1460 #ifdef SFCT_SUPPORT 1461 /* Check if we can fall back to just */ 1462 /* initiator mode */ 1463 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) && 1464 (port->flag & EMLXS_INI_ENABLED) && 1465 (cfg[CFG_DTM_ENABLE].current == 1) && 1466 (cfg[CFG_TARGET_MODE].current == 0)) { 1467 1468 cfg[CFG_DTM_ENABLE].current = 0; 1469 1470 EMLXS_MSGF(EMLXS_CONTEXT, 1471 &emlxs_init_failed_msg, 1472 "Disabling dynamic target mode. " 1473 "Enabling initiator mode only."); 1474 1475 /* This will trigger the driver to */ 1476 /* reattach */ 1477 rval = EAGAIN; 1478 } 1479 #endif /* SFCT_SUPPORT */ 1480 goto failed; 1481 } 1482 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1483 "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT); 1484 } 1485 1486 if (cfg[CFG_NETWORK_ON].current) { 1487 if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) { 1488 EMLXS_MSGF(EMLXS_CONTEXT, 1489 &emlxs_init_failed_msg, 1490 "Unable to setup IP HBQ."); 1491 1492 rval = ENOMEM; 1493 goto failed; 1494 } 1495 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1496 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT); 1497 } 1498 1499 if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) { 1500 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 1501 "Unable to setup ELS HBQ."); 1502 rval = ENOMEM; 1503 goto failed; 1504 } 1505 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1506 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT); 1507 1508 if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) { 1509 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 1510 "Unable to setup CT HBQ."); 1511 1512 rval = ENOMEM; 1513 goto failed; 1514 } 1515 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1516 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT); 1517 } else { 1518 if (port->flag & EMLXS_TGT_ENABLED) { 1519 /* Post the FCT unsol buffers */ 1520 rp = &hba->sli.sli3.ring[FC_FCT_RING]; 1521 for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) { 1522 (void) emlxs_post_buffer(hba, rp, 2); 1523 } 1524 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1525 "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT); 1526 } 1527 1528 if (cfg[CFG_NETWORK_ON].current) { 1529 /* Post the IP unsol buffers */ 1530 rp = &hba->sli.sli3.ring[FC_IP_RING]; 1531 for (j = 0; j < MEM_IPBUF_COUNT; j += 2) { 1532 (void) emlxs_post_buffer(hba, rp, 2); 1533 } 1534 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1535 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT); 1536 } 1537 1538 /* Post the ELS unsol buffers */ 1539 rp = &hba->sli.sli3.ring[FC_ELS_RING]; 1540 for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) { 1541 (void) emlxs_post_buffer(hba, rp, 2); 1542 } 1543 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1544 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT); 1545 1546 1547 /* Post the CT unsol buffers */ 1548 rp = &hba->sli.sli3.ring[FC_CT_RING]; 1549 for (j = 0; j < MEM_CTBUF_COUNT; j += 2) { 1550 (void) emlxs_post_buffer(hba, rp, 2); 1551 } 1552 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1553 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT); 1554 } 1555 1556 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ)); 1557 1558 /* Check persist-linkdown */ 1559 if (cfg[CFG_PERSIST_LINKDOWN].current) { 1560 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST); 1561 return (0); 1562 } 1563 1564 #ifdef SFCT_SUPPORT 1565 if ((port->mode == MODE_TARGET) && 1566 !(port->fct_flags & FCT_STATE_PORT_ONLINE)) { 1567 emlxs_enable_latt(hba); 1568 return (0); 1569 } 1570 #endif /* SFCT_SUPPORT */ 1571 1572 /* 1573 * Setup and issue mailbox INITIALIZE LINK command 1574 * At this point, the interrupt will be generated by the HW 1575 */ 1576 mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX); 1577 if (mbq == NULL) { 1578 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 1579 "Unable to allocate mailbox buffer."); 1580 1581 rval = EIO; 1582 goto failed; 1583 } 1584 mb = (MAILBOX *)mbq; 1585 1586 emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current, 1587 cfg[CFG_LINK_SPEED].current); 1588 1589 rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0); 1590 if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) { 1591 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, 1592 "Unable to initialize link. " \ 1593 "Mailbox cmd=%x status=%x", 1594 mb->mbxCommand, mb->mbxStatus); 1595 1596 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 1597 mbq = NULL; 1598 rval = EIO; 1599 goto failed; 1600 } 1601 1602 /* 1603 * Enable link attention interrupt 1604 */ 1605 emlxs_enable_latt(hba); 1606 1607 /* Wait for link to come up */ 1608 i = cfg[CFG_LINKUP_DELAY].current; 1609 while (i && (hba->state < FC_LINK_UP)) { 1610 /* Check for hardware error */ 1611 if (hba->state == FC_ERROR) { 1612 EMLXS_MSGF(EMLXS_CONTEXT, 1613 &emlxs_init_failed_msg, 1614 "Adapter error."); 1615 1616 mbq = NULL; 1617 rval = EIO; 1618 goto failed; 1619 } 1620 1621 BUSYWAIT_MS(1000); 1622 i--; 1623 } 1624 1625 /* 1626 * The leadvile driver will now handle the FLOGI at the driver level 1627 */ 1628 1629 return (0); 1630 1631 failed: 1632 1633 EMLXS_STATE_CHANGE(hba, FC_ERROR); 1634 1635 if (hba->intr_flags & EMLXS_MSI_ADDED) { 1636 (void) EMLXS_INTR_REMOVE(hba); 1637 } 1638 1639 if (mp) { 1640 emlxs_mem_put(hba, MEM_BUF, (void *)mp); 1641 mp = NULL; 1642 } 1643 1644 if (mp1) { 1645 emlxs_mem_put(hba, MEM_BUF, (void *)mp1); 1646 mp1 = NULL; 1647 } 1648 1649 (void) emlxs_mem_free_buffer(hba); 1650 1651 if (mbq) { 1652 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ)); 1653 mbq = NULL; 1654 mb = NULL; 1655 } 1656 1657 if (rval == 0) { 1658 rval = EIO; 1659 } 1660 1661 return (rval); 1662 1663 } /* emlxs_sli3_online() */ 1664 1665 1666 /*ARGSUSED*/ 1667 static void 1668 emlxs_sli3_offline(emlxs_hba_t *hba, uint32_t reset_requested) 1669 { 1670 /* Reverse emlxs_sli3_online */ 1671 1672 /* Kill the adapter */ 1673 emlxs_sli3_hba_kill(hba); 1674 1675 /* Free driver shared memory */ 1676 (void) emlxs_mem_free_buffer(hba); 1677 1678 } /* emlxs_sli3_offline() */ 1679 1680 1681 static int 1682 emlxs_sli3_map_hdw(emlxs_hba_t *hba) 1683 { 1684 emlxs_port_t *port = &PPORT; 1685 dev_info_t *dip; 1686 ddi_device_acc_attr_t dev_attr; 1687 int status; 1688 1689 dip = (dev_info_t *)hba->dip; 1690 dev_attr = emlxs_dev_acc_attr; 1691 1692 if (hba->bus_type == SBUS_FC) { 1693 1694 if (hba->sli.sli3.slim_acc_handle == 0) { 1695 status = ddi_regs_map_setup(dip, 1696 SBUS_DFLY_SLIM_RINDEX, 1697 (caddr_t *)&hba->sli.sli3.slim_addr, 1698 0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle); 1699 if (status != DDI_SUCCESS) { 1700 EMLXS_MSGF(EMLXS_CONTEXT, 1701 &emlxs_attach_failed_msg, 1702 "(SBUS) ddi_regs_map_setup SLIM failed. " 1703 "status=%x", status); 1704 goto failed; 1705 } 1706 } 1707 if (hba->sli.sli3.csr_acc_handle == 0) { 1708 status = ddi_regs_map_setup(dip, 1709 SBUS_DFLY_CSR_RINDEX, 1710 (caddr_t *)&hba->sli.sli3.csr_addr, 1711 0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle); 1712 if (status != DDI_SUCCESS) { 1713 EMLXS_MSGF(EMLXS_CONTEXT, 1714 &emlxs_attach_failed_msg, 1715 "(SBUS) ddi_regs_map_setup DFLY CSR " 1716 "failed. status=%x", status); 1717 goto failed; 1718 } 1719 } 1720 if (hba->sli.sli3.sbus_flash_acc_handle == 0) { 1721 status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR, 1722 (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0, 1723 &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle); 1724 if (status != DDI_SUCCESS) { 1725 EMLXS_MSGF(EMLXS_CONTEXT, 1726 &emlxs_attach_failed_msg, 1727 "(SBUS) ddi_regs_map_setup Fcode Flash " 1728 "failed. status=%x", status); 1729 goto failed; 1730 } 1731 } 1732 if (hba->sli.sli3.sbus_core_acc_handle == 0) { 1733 status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX, 1734 (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0, 1735 &dev_attr, &hba->sli.sli3.sbus_core_acc_handle); 1736 if (status != DDI_SUCCESS) { 1737 EMLXS_MSGF(EMLXS_CONTEXT, 1738 &emlxs_attach_failed_msg, 1739 "(SBUS) ddi_regs_map_setup TITAN CORE " 1740 "failed. status=%x", status); 1741 goto failed; 1742 } 1743 } 1744 1745 if (hba->sli.sli3.sbus_csr_handle == 0) { 1746 status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX, 1747 (caddr_t *)&hba->sli.sli3.sbus_csr_addr, 1748 0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle); 1749 if (status != DDI_SUCCESS) { 1750 EMLXS_MSGF(EMLXS_CONTEXT, 1751 &emlxs_attach_failed_msg, 1752 "(SBUS) ddi_regs_map_setup TITAN CSR " 1753 "failed. status=%x", status); 1754 goto failed; 1755 } 1756 } 1757 } else { /* ****** PCI ****** */ 1758 1759 if (hba->sli.sli3.slim_acc_handle == 0) { 1760 status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX, 1761 (caddr_t *)&hba->sli.sli3.slim_addr, 1762 0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle); 1763 if (status != DDI_SUCCESS) { 1764 EMLXS_MSGF(EMLXS_CONTEXT, 1765 &emlxs_attach_failed_msg, 1766 "(PCI) ddi_regs_map_setup SLIM failed. " 1767 "stat=%d mem=%p attr=%p hdl=%p", 1768 status, &hba->sli.sli3.slim_addr, &dev_attr, 1769 &hba->sli.sli3.slim_acc_handle); 1770 goto failed; 1771 } 1772 } 1773 1774 /* 1775 * Map in control registers, using memory-mapped version of 1776 * the registers rather than the I/O space-mapped registers. 1777 */ 1778 if (hba->sli.sli3.csr_acc_handle == 0) { 1779 status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX, 1780 (caddr_t *)&hba->sli.sli3.csr_addr, 1781 0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle); 1782 if (status != DDI_SUCCESS) { 1783 EMLXS_MSGF(EMLXS_CONTEXT, 1784 &emlxs_attach_failed_msg, 1785 "ddi_regs_map_setup CSR failed. status=%x", 1786 status); 1787 goto failed; 1788 } 1789 } 1790 } 1791 1792 if (hba->sli.sli3.slim2.virt == 0) { 1793 MBUF_INFO *buf_info; 1794 MBUF_INFO bufinfo; 1795 1796 buf_info = &bufinfo; 1797 1798 bzero(buf_info, sizeof (MBUF_INFO)); 1799 buf_info->size = SLI_SLIM2_SIZE; 1800 buf_info->flags = 1801 FC_MBUF_DMA | FC_MBUF_SNGLSG; 1802 buf_info->align = ddi_ptob(dip, 1L); 1803 1804 (void) emlxs_mem_alloc(hba, buf_info); 1805 1806 if (buf_info->virt == NULL) { 1807 goto failed; 1808 } 1809 1810 hba->sli.sli3.slim2.virt = buf_info->virt; 1811 hba->sli.sli3.slim2.phys = buf_info->phys; 1812 hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE; 1813 hba->sli.sli3.slim2.data_handle = buf_info->data_handle; 1814 hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle; 1815 bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE); 1816 } 1817 1818 /* offset from beginning of register space */ 1819 hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr + 1820 (sizeof (uint32_t) * HA_REG_OFFSET)); 1821 hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr + 1822 (sizeof (uint32_t) * CA_REG_OFFSET)); 1823 hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr + 1824 (sizeof (uint32_t) * HS_REG_OFFSET)); 1825 hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr + 1826 (sizeof (uint32_t) * HC_REG_OFFSET)); 1827 hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr + 1828 (sizeof (uint32_t) * BC_REG_OFFSET)); 1829 1830 if (hba->bus_type == SBUS_FC) { 1831 /* offset from beginning of register space */ 1832 /* for TITAN registers */ 1833 hba->sli.sli3.shc_reg_addr = 1834 (uint32_t *)(hba->sli.sli3.sbus_csr_addr + 1835 (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET)); 1836 hba->sli.sli3.shs_reg_addr = 1837 (uint32_t *)(hba->sli.sli3.sbus_csr_addr + 1838 (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET)); 1839 hba->sli.sli3.shu_reg_addr = 1840 (uint32_t *)(hba->sli.sli3.sbus_csr_addr + 1841 (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET)); 1842 } 1843 hba->chan_count = MAX_RINGS; 1844 1845 return (0); 1846 1847 failed: 1848 1849 emlxs_sli3_unmap_hdw(hba); 1850 return (ENOMEM); 1851 1852 } /* emlxs_sli3_map_hdw() */ 1853 1854 1855 static void 1856 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba) 1857 { 1858 MBUF_INFO bufinfo; 1859 MBUF_INFO *buf_info = &bufinfo; 1860 1861 if (hba->sli.sli3.csr_acc_handle) { 1862 ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle); 1863 hba->sli.sli3.csr_acc_handle = 0; 1864 } 1865 1866 if (hba->sli.sli3.slim_acc_handle) { 1867 ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle); 1868 hba->sli.sli3.slim_acc_handle = 0; 1869 } 1870 1871 if (hba->sli.sli3.sbus_flash_acc_handle) { 1872 ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle); 1873 hba->sli.sli3.sbus_flash_acc_handle = 0; 1874 } 1875 1876 if (hba->sli.sli3.sbus_core_acc_handle) { 1877 ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle); 1878 hba->sli.sli3.sbus_core_acc_handle = 0; 1879 } 1880 1881 if (hba->sli.sli3.sbus_csr_handle) { 1882 ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle); 1883 hba->sli.sli3.sbus_csr_handle = 0; 1884 } 1885 1886 if (hba->sli.sli3.slim2.virt) { 1887 bzero(buf_info, sizeof (MBUF_INFO)); 1888 1889 if (hba->sli.sli3.slim2.phys) { 1890 buf_info->phys = hba->sli.sli3.slim2.phys; 1891 buf_info->data_handle = hba->sli.sli3.slim2.data_handle; 1892 buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle; 1893 buf_info->flags = FC_MBUF_DMA; 1894 } 1895 1896 buf_info->virt = hba->sli.sli3.slim2.virt; 1897 buf_info->size = hba->sli.sli3.slim2.size; 1898 emlxs_mem_free(hba, buf_info); 1899 1900 hba->sli.sli3.slim2.virt = NULL; 1901 } 1902 1903 1904 return; 1905 1906 } /* emlxs_sli3_unmap_hdw() */ 1907 1908 1909 static uint32_t 1910 emlxs_sli3_hba_init(emlxs_hba_t *hba) 1911 { 1912 emlxs_port_t *port = &PPORT; 1913 emlxs_port_t *vport; 1914 emlxs_config_t *cfg; 1915 uint16_t i; 1916 VPIobj_t *vpip; 1917 1918 cfg = &CFG; 1919 i = 0; 1920 1921 /* Restart the adapter */ 1922 if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) { 1923 return (1); 1924 } 1925 1926 hba->channel_fcp = FC_FCP_RING; 1927 hba->channel_els = FC_ELS_RING; 1928 hba->channel_ip = FC_IP_RING; 1929 hba->channel_ct = FC_CT_RING; 1930 hba->chan_count = MAX_RINGS; 1931 hba->sli.sli3.ring_count = MAX_RINGS; 1932 1933 /* 1934 * WARNING: There is a max of 6 ring masks allowed 1935 */ 1936 /* RING 0 - FCP */ 1937 if (port->flag & EMLXS_TGT_ENABLED) { 1938 hba->sli.sli3.ring_masks[FC_FCP_RING] = 1; 1939 hba->sli.sli3.ring_rval[i] = FC_FCP_CMND; 1940 hba->sli.sli3.ring_rmask[i] = 0; 1941 hba->sli.sli3.ring_tval[i] = FC_TYPE_SCSI_FCP; 1942 hba->sli.sli3.ring_tmask[i++] = 0xFF; 1943 } else { 1944 hba->sli.sli3.ring_masks[FC_FCP_RING] = 0; 1945 } 1946 1947 hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES; 1948 hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES; 1949 1950 /* RING 1 - IP */ 1951 if (cfg[CFG_NETWORK_ON].current) { 1952 hba->sli.sli3.ring_masks[FC_IP_RING] = 1; 1953 hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */ 1954 hba->sli.sli3.ring_rmask[i] = 0xFF; 1955 hba->sli.sli3.ring_tval[i] = FC_TYPE_IS8802_SNAP; /* LLC/SNAP */ 1956 hba->sli.sli3.ring_tmask[i++] = 0xFF; 1957 } else { 1958 hba->sli.sli3.ring_masks[FC_IP_RING] = 0; 1959 } 1960 1961 hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES; 1962 hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES; 1963 1964 /* RING 2 - ELS */ 1965 hba->sli.sli3.ring_masks[FC_ELS_RING] = 1; 1966 hba->sli.sli3.ring_rval[i] = FC_ELS_REQ; /* ELS request/rsp */ 1967 hba->sli.sli3.ring_rmask[i] = 0xFE; 1968 hba->sli.sli3.ring_tval[i] = FC_TYPE_EXTENDED_LS; /* ELS */ 1969 hba->sli.sli3.ring_tmask[i++] = 0xFF; 1970 1971 hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES; 1972 hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES; 1973 1974 /* RING 3 - CT */ 1975 hba->sli.sli3.ring_masks[FC_CT_RING] = 1; 1976 hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL; /* CT request/rsp */ 1977 hba->sli.sli3.ring_rmask[i] = 0xFE; 1978 hba->sli.sli3.ring_tval[i] = FC_TYPE_FC_SERVICES; /* CT */ 1979 hba->sli.sli3.ring_tmask[i++] = 0xFF; 1980 1981 hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES; 1982 hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES; 1983 1984 if (i > 6) { 1985 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 1986 "hba_init: Too many ring masks defined. cnt=%d", i); 1987 return (1); 1988 } 1989 1990 /* Initialize all the port objects */ 1991 hba->vpi_max = 0; 1992 for (i = 0; i < MAX_VPORTS; i++) { 1993 vport = &VPORT(i); 1994 vport->hba = hba; 1995 vport->vpi = i; 1996 1997 vpip = &vport->VPIobj; 1998 vpip->index = i; 1999 vpip->VPI = i; 2000 vpip->port = vport; 2001 vpip->state = VPI_STATE_OFFLINE; 2002 vport->vpip = vpip; 2003 } 2004 2005 /* 2006 * Initialize the max_node count to a default value if needed 2007 * This determines how many node objects we preallocate in the pool 2008 * The actual max_nodes will be set later based on adapter info 2009 */ 2010 if (hba->max_nodes == 0) { 2011 if (cfg[CFG_NUM_NODES].current > 0) { 2012 hba->max_nodes = cfg[CFG_NUM_NODES].current; 2013 } else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) { 2014 hba->max_nodes = 4096; 2015 } else { 2016 hba->max_nodes = 512; 2017 } 2018 } 2019 2020 return (0); 2021 2022 } /* emlxs_sli3_hba_init() */ 2023 2024 2025 /* 2026 * 0: quiesce indicates the call is not from quiesce routine. 2027 * 1: quiesce indicates the call is from quiesce routine. 2028 */ 2029 static uint32_t 2030 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post, 2031 uint32_t quiesce) 2032 { 2033 emlxs_port_t *port = &PPORT; 2034 MAILBOX swpmb; 2035 MAILBOX *mb; 2036 uint32_t *word0; 2037 uint16_t cfg_value; 2038 uint32_t status = 0; 2039 uint32_t status1; 2040 uint32_t status2; 2041 uint32_t i; 2042 uint32_t ready; 2043 emlxs_port_t *vport; 2044 RING *rp; 2045 emlxs_config_t *cfg = &CFG; 2046 2047 if (!cfg[CFG_RESET_ENABLE].current) { 2048 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg, 2049 "Adapter reset disabled."); 2050 EMLXS_STATE_CHANGE(hba, FC_ERROR); 2051 2052 return (1); 2053 } 2054 2055 /* Kill the adapter first */ 2056 if (quiesce == 0) { 2057 emlxs_sli3_hba_kill(hba); 2058 } else { 2059 emlxs_sli3_hba_kill4quiesce(hba); 2060 } 2061 2062 if (restart) { 2063 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 2064 "Restarting."); 2065 EMLXS_STATE_CHANGE(hba, FC_INIT_START); 2066 2067 ready = (HS_FFRDY | HS_MBRDY); 2068 } else { 2069 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 2070 "Resetting."); 2071 EMLXS_STATE_CHANGE(hba, FC_WARM_START); 2072 2073 ready = HS_MBRDY; 2074 } 2075 2076 hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR); 2077 2078 mb = FC_SLIM1_MAILBOX(hba); 2079 word0 = (uint32_t *)&swpmb; 2080 2081 reset: 2082 2083 i = 0; 2084 2085 /* Save reset time */ 2086 HBASTATS.ResetTime = hba->timer_tics; 2087 2088 if (restart) { 2089 /* First put restart command in mailbox */ 2090 *word0 = 0; 2091 swpmb.mbxCommand = MBX_RESTART; 2092 swpmb.mbxHc = 1; 2093 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), *word0); 2094 2095 /* Only skip post after emlxs_sli3_online is completed */ 2096 if (skip_post) { 2097 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1), 2098 1); 2099 } else { 2100 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1), 2101 0); 2102 } 2103 2104 } 2105 2106 /* 2107 * Turn off SERR, PERR in PCI cmd register 2108 */ 2109 cfg_value = ddi_get16(hba->pci_acc_handle, 2110 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER)); 2111 2112 ddi_put16(hba->pci_acc_handle, 2113 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER), 2114 (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL))); 2115 2116 hba->sli.sli3.hc_copy = HC_INITFF; 2117 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy); 2118 2119 /* Wait 1 msec before restoring PCI config */ 2120 BUSYWAIT_MS(1); 2121 2122 /* Restore PCI cmd register */ 2123 ddi_put16(hba->pci_acc_handle, 2124 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER), 2125 (uint16_t)cfg_value); 2126 2127 /* Wait 3 seconds before checking */ 2128 BUSYWAIT_MS(3000); 2129 i += 3; 2130 2131 /* Wait for reset completion */ 2132 while (i < 30) { 2133 /* Check status register to see what current state is */ 2134 status = READ_CSR_REG(hba, FC_HS_REG(hba)); 2135 2136 /* Check to see if any errors occurred during init */ 2137 if (status & HS_FFERM) { 2138 status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *) 2139 hba->sli.sli3.slim_addr + 0xa8)); 2140 status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *) 2141 hba->sli.sli3.slim_addr + 0xac)); 2142 2143 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg, 2144 "HS_FFERM: status=0x%x status1=0x%x status2=0x%x", 2145 status, status1, status2); 2146 2147 EMLXS_STATE_CHANGE(hba, FC_ERROR); 2148 return (1); 2149 } 2150 2151 if ((status & ready) == ready) { 2152 /* Reset Done !! */ 2153 goto done; 2154 } 2155 2156 /* 2157 * Check every 1 second for 15 seconds, then reset board 2158 * again (w/post), then check every 1 second for 15 * seconds. 2159 */ 2160 BUSYWAIT_MS(1000); 2161 i++; 2162 2163 /* Reset again (w/post) at 15 seconds */ 2164 if (i == 15) { 2165 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 2166 "Reset failed. Retrying..."); 2167 2168 goto reset; 2169 } 2170 } 2171 2172 #ifdef FMA_SUPPORT 2173 reset_fail: 2174 #endif /* FMA_SUPPORT */ 2175 2176 /* Timeout occurred */ 2177 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg, 2178 "Timeout: status=0x%x", status); 2179 EMLXS_STATE_CHANGE(hba, FC_ERROR); 2180 2181 /* Log a dump event */ 2182 emlxs_log_dump_event(port, NULL, 0); 2183 2184 return (1); 2185 2186 done: 2187 2188 /* Initialize hc_copy */ 2189 hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba)); 2190 2191 #ifdef FMA_SUPPORT 2192 /* Access handle validation */ 2193 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle) 2194 != DDI_FM_OK) || 2195 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle) 2196 != DDI_FM_OK) || 2197 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle) 2198 != DDI_FM_OK)) { 2199 EMLXS_MSGF(EMLXS_CONTEXT, 2200 &emlxs_invalid_access_handle_msg, NULL); 2201 goto reset_fail; 2202 } 2203 #endif /* FMA_SUPPORT */ 2204 2205 /* Reset the hba structure */ 2206 hba->flag &= FC_RESET_MASK; 2207 hba->channel_tx_count = 0; 2208 hba->io_count = 0; 2209 hba->iodone_count = 0; 2210 hba->topology = 0; 2211 hba->linkspeed = 0; 2212 hba->heartbeat_active = 0; 2213 hba->discovery_timer = 0; 2214 hba->linkup_timer = 0; 2215 hba->loopback_tics = 0; 2216 2217 /* Reset the ring objects */ 2218 for (i = 0; i < MAX_RINGS; i++) { 2219 rp = &hba->sli.sli3.ring[i]; 2220 rp->fc_mpon = 0; 2221 rp->fc_mpoff = 0; 2222 } 2223 2224 /* Reset the port objects */ 2225 for (i = 0; i < MAX_VPORTS; i++) { 2226 vport = &VPORT(i); 2227 2228 vport->flag &= EMLXS_PORT_RESET_MASK; 2229 vport->did = 0; 2230 vport->prev_did = 0; 2231 vport->lip_type = 0; 2232 bzero(&vport->fabric_sparam, sizeof (SERV_PARM)); 2233 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM)); 2234 2235 bzero((caddr_t)&vport->node_base, sizeof (NODELIST)); 2236 vport->node_base.nlp_Rpi = 0; 2237 vport->node_base.nlp_DID = 0xffffff; 2238 vport->node_base.nlp_list_next = NULL; 2239 vport->node_base.nlp_list_prev = NULL; 2240 vport->node_base.nlp_active = 1; 2241 vport->node_count = 0; 2242 2243 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) { 2244 vport->ub_count = EMLXS_UB_TOKEN_OFFSET; 2245 } 2246 } 2247 2248 return (0); 2249 2250 } /* emlxs_sli3_hba_reset */ 2251 2252 2253 #define BPL_CMD 0 2254 #define BPL_RESP 1 2255 #define BPL_DATA 2 2256 2257 static ULP_BDE64 * 2258 emlxs_pkt_to_bpl(fc_packet_t *pkt, ULP_BDE64 *bpl, uint32_t bpl_type) 2259 { 2260 ddi_dma_cookie_t *cp; 2261 uint_t i; 2262 int32_t size; 2263 uint_t cookie_cnt; 2264 uint8_t bdeFlags; 2265 2266 #if (EMLXS_MODREV >= EMLXS_MODREV3) 2267 switch (bpl_type) { 2268 case BPL_CMD: 2269 cp = pkt->pkt_cmd_cookie; 2270 cookie_cnt = pkt->pkt_cmd_cookie_cnt; 2271 size = (int32_t)pkt->pkt_cmdlen; 2272 bdeFlags = 0; 2273 break; 2274 2275 case BPL_RESP: 2276 cp = pkt->pkt_resp_cookie; 2277 cookie_cnt = pkt->pkt_resp_cookie_cnt; 2278 size = (int32_t)pkt->pkt_rsplen; 2279 bdeFlags = BUFF_USE_RCV; 2280 break; 2281 2282 2283 case BPL_DATA: 2284 cp = pkt->pkt_data_cookie; 2285 cookie_cnt = pkt->pkt_data_cookie_cnt; 2286 size = (int32_t)pkt->pkt_datalen; 2287 bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ? 2288 BUFF_USE_RCV : 0; 2289 break; 2290 2291 default: 2292 return (NULL); 2293 } 2294 2295 #else 2296 switch (bpl_type) { 2297 case BPL_CMD: 2298 cp = &pkt->pkt_cmd_cookie; 2299 cookie_cnt = 1; 2300 size = (int32_t)pkt->pkt_cmdlen; 2301 bdeFlags = 0; 2302 break; 2303 2304 case BPL_RESP: 2305 cp = &pkt->pkt_resp_cookie; 2306 cookie_cnt = 1; 2307 size = (int32_t)pkt->pkt_rsplen; 2308 bdeFlags = BUFF_USE_RCV; 2309 break; 2310 2311 2312 case BPL_DATA: 2313 cp = &pkt->pkt_data_cookie; 2314 cookie_cnt = 1; 2315 size = (int32_t)pkt->pkt_datalen; 2316 bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ? 2317 BUFF_USE_RCV : 0; 2318 break; 2319 2320 default: 2321 return (NULL); 2322 } 2323 #endif /* >= EMLXS_MODREV3 */ 2324 2325 for (i = 0; i < cookie_cnt && size > 0; i++, cp++) { 2326 bpl->addrHigh = 2327 BE_SWAP32(PADDR_HI(cp->dmac_laddress)); 2328 bpl->addrLow = 2329 BE_SWAP32(PADDR_LO(cp->dmac_laddress)); 2330 bpl->tus.f.bdeSize = MIN(size, cp->dmac_size); 2331 bpl->tus.f.bdeFlags = bdeFlags; 2332 bpl->tus.w = BE_SWAP32(bpl->tus.w); 2333 2334 bpl++; 2335 size -= cp->dmac_size; 2336 } 2337 2338 return (bpl); 2339 2340 } /* emlxs_pkt_to_bpl */ 2341 2342 2343 static uint32_t 2344 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp) 2345 { 2346 emlxs_hba_t *hba = HBA; 2347 fc_packet_t *pkt; 2348 MATCHMAP *bmp; 2349 ULP_BDE64 *bpl; 2350 uint64_t bp; 2351 IOCB *iocb; 2352 IOCBQ *iocbq; 2353 CHANNEL *cp; 2354 uint32_t data_cookie_cnt; 2355 uint32_t channelno; 2356 2357 cp = sbp->channel; 2358 iocb = (IOCB *) & sbp->iocbq; 2359 pkt = PRIV2PKT(sbp); 2360 2361 if (hba->sli.sli3.bpl_table) { 2362 bmp = hba->sli.sli3.bpl_table[sbp->iotag]; 2363 } else { 2364 bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL); 2365 } 2366 2367 if (!bmp) { 2368 return (1); 2369 } 2370 2371 sbp->bmp = bmp; 2372 bpl = (ULP_BDE64 *)bmp->virt; 2373 bp = bmp->phys; 2374 2375 #if (EMLXS_MODREV >= EMLXS_MODREV3) 2376 data_cookie_cnt = pkt->pkt_data_cookie_cnt; 2377 #else 2378 data_cookie_cnt = 1; 2379 #endif /* >= EMLXS_MODREV3 */ 2380 2381 iocbq = &sbp->iocbq; 2382 2383 channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno; 2384 switch (channelno) { 2385 case FC_FCP_RING: 2386 2387 /* CMD payload */ 2388 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD); 2389 if (! bpl) { 2390 return (1); 2391 } 2392 2393 /* Check if response & data payloads are needed */ 2394 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) { 2395 break; 2396 } 2397 2398 /* RSP payload */ 2399 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP); 2400 if (! bpl) { 2401 return (1); 2402 } 2403 2404 /* Check if data payload is needed */ 2405 if ((pkt->pkt_datalen == 0) || 2406 (data_cookie_cnt == 0)) { 2407 break; 2408 } 2409 2410 /* DATA payload */ 2411 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_DATA); 2412 if (! bpl) { 2413 return (1); 2414 } 2415 break; 2416 2417 case FC_IP_RING: 2418 2419 /* CMD payload */ 2420 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD); 2421 if (! bpl) { 2422 return (1); 2423 } 2424 break; 2425 2426 case FC_ELS_RING: 2427 2428 /* CMD payload */ 2429 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD); 2430 if (! bpl) { 2431 return (1); 2432 } 2433 2434 /* Check if response payload is needed */ 2435 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) { 2436 break; 2437 } 2438 2439 /* RSP payload */ 2440 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP); 2441 if (! bpl) { 2442 return (1); 2443 } 2444 break; 2445 2446 case FC_CT_RING: 2447 2448 /* CMD payload */ 2449 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD); 2450 if (! bpl) { 2451 return (1); 2452 } 2453 2454 /* Check if response payload is needed */ 2455 if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) && 2456 (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) { 2457 break; 2458 } 2459 2460 /* RSP payload */ 2461 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP); 2462 if (! bpl) { 2463 return (1); 2464 } 2465 break; 2466 2467 } 2468 2469 iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL; 2470 iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp); 2471 iocb->un.genreq64.bdl.addrLow = PADDR_LO(bp); 2472 iocb->un.genreq64.bdl.bdeSize = 2473 (uint32_t)(((uintptr_t)bpl - (uintptr_t)bmp->virt) & 0xFFFFFFFF); 2474 iocb->ULPBDECOUNT = 1; 2475 iocb->ULPLE = 1; 2476 2477 return (0); 2478 2479 } /* emlxs_sli2_bde_setup */ 2480 2481 2482 static uint32_t 2483 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp) 2484 { 2485 ddi_dma_cookie_t *cp_cmd; 2486 ddi_dma_cookie_t *cp_resp; 2487 ddi_dma_cookie_t *cp_data; 2488 fc_packet_t *pkt; 2489 ULP_BDE64 *bde; 2490 int data_cookie_cnt; 2491 uint32_t i; 2492 uint32_t channelno; 2493 IOCB *iocb; 2494 IOCBQ *iocbq; 2495 CHANNEL *cp; 2496 2497 pkt = PRIV2PKT(sbp); 2498 #if (EMLXS_MODREV >= EMLXS_MODREV3) 2499 if ((pkt->pkt_cmd_cookie_cnt > 1) || 2500 (pkt->pkt_resp_cookie_cnt > 1) || 2501 ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt + 2502 pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) { 2503 i = emlxs_sli2_bde_setup(port, sbp); 2504 return (i); 2505 } 2506 2507 cp_cmd = pkt->pkt_cmd_cookie; 2508 cp_resp = pkt->pkt_resp_cookie; 2509 cp_data = pkt->pkt_data_cookie; 2510 data_cookie_cnt = pkt->pkt_data_cookie_cnt; 2511 #else 2512 cp_cmd = &pkt->pkt_cmd_cookie; 2513 cp_resp = &pkt->pkt_resp_cookie; 2514 cp_data = &pkt->pkt_data_cookie; 2515 data_cookie_cnt = 1; 2516 #endif /* >= EMLXS_MODREV3 */ 2517 2518 cp = sbp->channel; 2519 iocbq = &sbp->iocbq; 2520 iocb = (IOCB *)iocbq; 2521 iocb->unsli3.ext_iocb.ebde_count = 0; 2522 2523 channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno; 2524 switch (channelno) { 2525 case FC_FCP_RING: 2526 /* CMD payload */ 2527 iocb->un.fcpi64.bdl.addrHigh = 2528 PADDR_HI(cp_cmd->dmac_laddress); 2529 iocb->un.fcpi64.bdl.addrLow = 2530 PADDR_LO(cp_cmd->dmac_laddress); 2531 iocb->un.fcpi64.bdl.bdeSize = pkt->pkt_cmdlen; 2532 iocb->un.fcpi64.bdl.bdeFlags = 0; 2533 2534 /* Check if a response & data payload are needed */ 2535 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) { 2536 break; 2537 } 2538 2539 /* RSP payload */ 2540 iocb->unsli3.ext_iocb.ebde1.addrHigh = 2541 PADDR_HI(cp_resp->dmac_laddress); 2542 iocb->unsli3.ext_iocb.ebde1.addrLow = 2543 PADDR_LO(cp_resp->dmac_laddress); 2544 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen; 2545 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0; 2546 iocb->unsli3.ext_iocb.ebde_count = 1; 2547 2548 /* Check if a data payload is needed */ 2549 if ((pkt->pkt_datalen == 0) || 2550 (data_cookie_cnt == 0)) { 2551 break; 2552 } 2553 2554 /* DATA payload */ 2555 bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde2; 2556 for (i = 0; i < data_cookie_cnt; i++) { 2557 bde->addrHigh = PADDR_HI(cp_data->dmac_laddress); 2558 bde->addrLow = PADDR_LO(cp_data->dmac_laddress); 2559 bde->tus.f.bdeSize = cp_data->dmac_size; 2560 bde->tus.f.bdeFlags = 0; 2561 cp_data++; 2562 bde++; 2563 } 2564 iocb->unsli3.ext_iocb.ebde_count += data_cookie_cnt; 2565 2566 break; 2567 2568 case FC_IP_RING: 2569 /* CMD payload */ 2570 iocb->un.xseq64.bdl.addrHigh = 2571 PADDR_HI(cp_cmd->dmac_laddress); 2572 iocb->un.xseq64.bdl.addrLow = 2573 PADDR_LO(cp_cmd->dmac_laddress); 2574 iocb->un.xseq64.bdl.bdeSize = pkt->pkt_cmdlen; 2575 iocb->un.xseq64.bdl.bdeFlags = 0; 2576 2577 break; 2578 2579 case FC_ELS_RING: 2580 2581 /* CMD payload */ 2582 iocb->un.elsreq64.bdl.addrHigh = 2583 PADDR_HI(cp_cmd->dmac_laddress); 2584 iocb->un.elsreq64.bdl.addrLow = 2585 PADDR_LO(cp_cmd->dmac_laddress); 2586 iocb->un.elsreq64.bdl.bdeSize = pkt->pkt_cmdlen; 2587 iocb->un.elsreq64.bdl.bdeFlags = 0; 2588 2589 /* Check if a response payload is needed */ 2590 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) { 2591 break; 2592 } 2593 2594 /* RSP payload */ 2595 iocb->unsli3.ext_iocb.ebde1.addrHigh = 2596 PADDR_HI(cp_resp->dmac_laddress); 2597 iocb->unsli3.ext_iocb.ebde1.addrLow = 2598 PADDR_LO(cp_resp->dmac_laddress); 2599 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen; 2600 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV; 2601 iocb->unsli3.ext_iocb.ebde_count = 1; 2602 break; 2603 2604 case FC_CT_RING: 2605 2606 /* CMD payload */ 2607 iocb->un.genreq64.bdl.addrHigh = 2608 PADDR_HI(cp_cmd->dmac_laddress); 2609 iocb->un.genreq64.bdl.addrLow = 2610 PADDR_LO(cp_cmd->dmac_laddress); 2611 iocb->un.genreq64.bdl.bdeSize = pkt->pkt_cmdlen; 2612 iocb->un.genreq64.bdl.bdeFlags = 0; 2613 2614 /* Check if a response payload is needed */ 2615 if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) && 2616 (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) { 2617 break; 2618 } 2619 2620 /* RSP payload */ 2621 iocb->unsli3.ext_iocb.ebde1.addrHigh = 2622 PADDR_HI(cp_resp->dmac_laddress); 2623 iocb->unsli3.ext_iocb.ebde1.addrLow = 2624 PADDR_LO(cp_resp->dmac_laddress); 2625 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen; 2626 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV; 2627 iocb->unsli3.ext_iocb.ebde_count = 1; 2628 break; 2629 } 2630 2631 iocb->ULPBDECOUNT = 0; 2632 iocb->ULPLE = 0; 2633 2634 return (0); 2635 2636 } /* emlxs_sli3_bde_setup */ 2637 2638 2639 /* Only used for FCP Data xfers */ 2640 #ifdef SFCT_SUPPORT 2641 /*ARGSUSED*/ 2642 static uint32_t 2643 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp) 2644 { 2645 emlxs_hba_t *hba = HBA; 2646 scsi_task_t *fct_task; 2647 MATCHMAP *bmp; 2648 ULP_BDE64 *bpl; 2649 uint64_t bp; 2650 uint8_t bdeFlags; 2651 IOCB *iocb; 2652 uint32_t size; 2653 MATCHMAP *mp; 2654 2655 iocb = (IOCB *)&sbp->iocbq.iocb; 2656 sbp->bmp = NULL; 2657 2658 if (!sbp->fct_buf) { 2659 iocb->un.fcpt64.bdl.addrHigh = 0; 2660 iocb->un.fcpt64.bdl.addrLow = 0; 2661 iocb->un.fcpt64.bdl.bdeSize = 0; 2662 iocb->un.fcpt64.bdl.bdeFlags = 0; 2663 iocb->un.fcpt64.fcpt_Offset = 0; 2664 iocb->un.fcpt64.fcpt_Length = 0; 2665 iocb->ULPBDECOUNT = 0; 2666 iocb->ULPLE = 1; 2667 return (0); 2668 } 2669 2670 if (hba->sli.sli3.bpl_table) { 2671 bmp = hba->sli.sli3.bpl_table[sbp->iotag]; 2672 } else { 2673 bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL); 2674 } 2675 2676 if (!bmp) { 2677 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg, 2678 "fct_sli2_bde_setup: Unable to BPL buffer. iotag=%d", 2679 sbp->iotag); 2680 2681 iocb->un.fcpt64.bdl.addrHigh = 0; 2682 iocb->un.fcpt64.bdl.addrLow = 0; 2683 iocb->un.fcpt64.bdl.bdeSize = 0; 2684 iocb->un.fcpt64.bdl.bdeFlags = 0; 2685 iocb->un.fcpt64.fcpt_Offset = 0; 2686 iocb->un.fcpt64.fcpt_Length = 0; 2687 iocb->ULPBDECOUNT = 0; 2688 iocb->ULPLE = 1; 2689 return (1); 2690 } 2691 2692 bpl = (ULP_BDE64 *)bmp->virt; 2693 bp = bmp->phys; 2694 2695 fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific; 2696 2697 size = sbp->fct_buf->db_data_size; 2698 mp = (MATCHMAP *)sbp->fct_buf->db_port_private; 2699 2700 bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0; 2701 2702 /* Init the buffer list */ 2703 bpl->addrHigh = BE_SWAP32(PADDR_HI(mp->phys)); 2704 bpl->addrLow = BE_SWAP32(PADDR_LO(mp->phys)); 2705 bpl->tus.f.bdeSize = size; 2706 bpl->tus.f.bdeFlags = bdeFlags; 2707 bpl->tus.w = BE_SWAP32(bpl->tus.w); 2708 2709 /* Init the IOCB */ 2710 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp); 2711 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp); 2712 iocb->un.fcpt64.bdl.bdeSize = sizeof (ULP_BDE64); 2713 iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL; 2714 2715 iocb->un.fcpt64.fcpt_Length = 2716 (fct_task->task_flags & TF_WRITE_DATA) ? size : 0; 2717 iocb->un.fcpt64.fcpt_Offset = 0; 2718 2719 iocb->ULPBDECOUNT = 1; 2720 iocb->ULPLE = 1; 2721 sbp->bmp = bmp; 2722 2723 return (0); 2724 2725 } /* emlxs_sli2_fct_bde_setup */ 2726 #endif /* SFCT_SUPPORT */ 2727 2728 2729 #ifdef SFCT_SUPPORT 2730 /*ARGSUSED*/ 2731 static uint32_t 2732 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp) 2733 { 2734 scsi_task_t *fct_task; 2735 IOCB *iocb; 2736 MATCHMAP *mp; 2737 uint32_t bdeFlags; 2738 uint32_t size; 2739 2740 iocb = (IOCB *)&sbp->iocbq; 2741 2742 if (!sbp->fct_buf) { 2743 iocb->un.fcpt64.bdl.addrHigh = 0; 2744 iocb->un.fcpt64.bdl.addrLow = 0; 2745 iocb->un.fcpt64.bdl.bdeSize = 0; 2746 iocb->un.fcpt64.bdl.bdeFlags = 0; 2747 iocb->un.fcpt64.fcpt_Offset = 0; 2748 iocb->un.fcpt64.fcpt_Length = 0; 2749 iocb->ULPBDECOUNT = 0; 2750 iocb->ULPLE = 0; 2751 iocb->unsli3.ext_iocb.ebde_count = 0; 2752 return (0); 2753 } 2754 2755 fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific; 2756 2757 size = sbp->fct_buf->db_data_size; 2758 mp = (MATCHMAP *)sbp->fct_buf->db_port_private; 2759 2760 bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0; 2761 2762 /* Init first BDE */ 2763 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(mp->phys); 2764 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(mp->phys); 2765 iocb->un.fcpt64.bdl.bdeSize = size; 2766 iocb->un.fcpt64.bdl.bdeFlags = bdeFlags; 2767 2768 iocb->unsli3.ext_iocb.ebde_count = 0; 2769 iocb->un.fcpt64.fcpt_Length = 2770 (fct_task->task_flags & TF_WRITE_DATA) ? size : 0; 2771 iocb->un.fcpt64.fcpt_Offset = 0; 2772 2773 iocb->ULPBDECOUNT = 0; 2774 iocb->ULPLE = 0; 2775 2776 return (0); 2777 2778 } /* emlxs_sli3_fct_bde_setup */ 2779 #endif /* SFCT_SUPPORT */ 2780 2781 2782 static void 2783 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq) 2784 { 2785 #ifdef FMA_SUPPORT 2786 emlxs_port_t *port = &PPORT; 2787 #endif /* FMA_SUPPORT */ 2788 PGP *pgp; 2789 emlxs_buf_t *sbp; 2790 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt; 2791 RING *rp; 2792 uint32_t nextIdx; 2793 uint32_t status; 2794 void *ioa2; 2795 off_t offset; 2796 uint32_t count = 0; 2797 uint32_t flag; 2798 uint32_t channelno; 2799 int32_t throttle; 2800 #ifdef NODE_THROTTLE_SUPPORT 2801 int32_t node_throttle; 2802 NODELIST *marked_node = NULL; 2803 #endif /* NODE_THROTTLE_SUPPORT */ 2804 2805 channelno = cp->channelno; 2806 rp = (RING *)cp->iopath; 2807 2808 throttle = 0; 2809 2810 /* Check if FCP ring and adapter is not ready */ 2811 /* We may use any ring for FCP_CMD */ 2812 if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) { 2813 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port || 2814 (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) { 2815 emlxs_tx_put(iocbq, 1); 2816 return; 2817 } 2818 } 2819 2820 /* Attempt to acquire CMD_RING lock */ 2821 if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) { 2822 /* Queue it for later */ 2823 if (iocbq) { 2824 if ((hba->io_count - 2825 hba->channel_tx_count) > 10) { 2826 emlxs_tx_put(iocbq, 1); 2827 return; 2828 } else { 2829 2830 /* 2831 * EMLXS_MSGF(EMLXS_CONTEXT, 2832 * &emlxs_ring_watchdog_msg, 2833 * "%s host=%d port=%d cnt=%d,%d RACE 2834 * CONDITION3 DETECTED.", 2835 * emlxs_ring_xlate(channelno), 2836 * rp->fc_cmdidx, rp->fc_port_cmdidx, 2837 * hba->channel_tx_count, 2838 * hba->io_count); 2839 */ 2840 mutex_enter(&EMLXS_CMD_RING_LOCK(channelno)); 2841 } 2842 } else { 2843 return; 2844 } 2845 } 2846 /* CMD_RING_LOCK acquired */ 2847 2848 /* Throttle check only applies to non special iocb */ 2849 if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) { 2850 /* Check if HBA is full */ 2851 throttle = hba->io_throttle - hba->io_active; 2852 if (throttle <= 0) { 2853 /* Hitting adapter throttle limit */ 2854 /* Queue it for later */ 2855 if (iocbq) { 2856 emlxs_tx_put(iocbq, 1); 2857 } 2858 2859 goto busy; 2860 } 2861 } 2862 2863 /* Read adapter's get index */ 2864 pgp = (PGP *) 2865 &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno]; 2866 offset = 2867 (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) - 2868 (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt)); 2869 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4, 2870 DDI_DMA_SYNC_FORKERNEL); 2871 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx); 2872 2873 /* Calculate the next put index */ 2874 nextIdx = 2875 (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1; 2876 2877 /* Check if ring is full */ 2878 if (nextIdx == rp->fc_port_cmdidx) { 2879 /* Try one more time */ 2880 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4, 2881 DDI_DMA_SYNC_FORKERNEL); 2882 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx); 2883 2884 if (nextIdx == rp->fc_port_cmdidx) { 2885 /* Queue it for later */ 2886 if (iocbq) { 2887 emlxs_tx_put(iocbq, 1); 2888 } 2889 2890 goto busy; 2891 } 2892 } 2893 2894 /* 2895 * We have a command ring slot available 2896 * Make sure we have an iocb to send 2897 */ 2898 if (iocbq) { 2899 mutex_enter(&EMLXS_TX_CHANNEL_LOCK); 2900 2901 /* Check if the ring already has iocb's waiting */ 2902 if (cp->nodeq.q_first != NULL) { 2903 /* Put the current iocbq on the tx queue */ 2904 emlxs_tx_put(iocbq, 0); 2905 2906 /* 2907 * Attempt to replace it with the next iocbq 2908 * in the tx queue 2909 */ 2910 iocbq = emlxs_tx_get(cp, 0); 2911 } 2912 2913 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 2914 } else { 2915 /* Try to get the next iocb on the tx queue */ 2916 iocbq = emlxs_tx_get(cp, 1); 2917 } 2918 2919 sendit: 2920 count = 0; 2921 2922 /* Process each iocbq */ 2923 while (iocbq) { 2924 sbp = iocbq->sbp; 2925 2926 #ifdef NODE_THROTTLE_SUPPORT 2927 if (sbp && sbp->node && sbp->node->io_throttle) { 2928 node_throttle = sbp->node->io_throttle - 2929 sbp->node->io_active; 2930 if (node_throttle <= 0) { 2931 /* Node is busy */ 2932 /* Queue this iocb and get next iocb from */ 2933 /* channel */ 2934 2935 if (!marked_node) { 2936 marked_node = sbp->node; 2937 } 2938 2939 mutex_enter(&EMLXS_TX_CHANNEL_LOCK); 2940 emlxs_tx_put(iocbq, 0); 2941 2942 if (cp->nodeq.q_first == marked_node) { 2943 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 2944 goto busy; 2945 } 2946 2947 iocbq = emlxs_tx_get(cp, 0); 2948 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 2949 continue; 2950 } 2951 } 2952 marked_node = 0; 2953 #endif /* NODE_THROTTLE_SUPPORT */ 2954 2955 if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) { 2956 /* 2957 * Update adapter if needed, since we are about to 2958 * delay here 2959 */ 2960 if (count) { 2961 count = 0; 2962 2963 /* Update the adapter's cmd put index */ 2964 if (hba->bus_type == SBUS_FC) { 2965 slim2p->mbx.us.s2.host[channelno]. 2966 cmdPutInx = 2967 BE_SWAP32(rp->fc_cmdidx); 2968 2969 /* DMA sync the index for the adapter */ 2970 offset = (off_t) 2971 ((uint64_t) 2972 ((unsigned long)&(slim2p->mbx.us. 2973 s2.host[channelno].cmdPutInx)) - 2974 (uint64_t)((unsigned long)slim2p)); 2975 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2. 2976 dma_handle, offset, 4, 2977 DDI_DMA_SYNC_FORDEV); 2978 } else { 2979 ioa2 = (void *) 2980 ((char *)hba->sli.sli3.slim_addr + 2981 hba->sli.sli3.hgp_ring_offset + 2982 ((channelno * 2) * 2983 sizeof (uint32_t))); 2984 WRITE_SLIM_ADDR(hba, 2985 (volatile uint32_t *)ioa2, 2986 rp->fc_cmdidx); 2987 } 2988 2989 status = (CA_R0ATT << (channelno * 4)); 2990 WRITE_CSR_REG(hba, FC_CA_REG(hba), 2991 (volatile uint32_t)status); 2992 2993 } 2994 /* Perform delay */ 2995 if ((channelno == FC_ELS_RING) && 2996 !(iocbq->flag & IOCB_FCP_CMD)) { 2997 drv_usecwait(100000); 2998 } else { 2999 drv_usecwait(20000); 3000 } 3001 } 3002 3003 /* 3004 * At this point, we have a command ring slot available 3005 * and an iocb to send 3006 */ 3007 flag = iocbq->flag; 3008 3009 /* Send the iocb */ 3010 emlxs_sli3_issue_iocb(hba, rp, iocbq); 3011 /* 3012 * After this, the sbp / iocb should not be 3013 * accessed in the xmit path. 3014 */ 3015 3016 count++; 3017 if (iocbq && (!(flag & IOCB_SPECIAL))) { 3018 /* Check if HBA is full */ 3019 throttle = hba->io_throttle - hba->io_active; 3020 if (throttle <= 0) { 3021 goto busy; 3022 } 3023 } 3024 3025 /* Calculate the next put index */ 3026 nextIdx = 3027 (rp->fc_cmdidx + 1 >= 3028 rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1; 3029 3030 /* Check if ring is full */ 3031 if (nextIdx == rp->fc_port_cmdidx) { 3032 /* Try one more time */ 3033 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, 3034 offset, 4, DDI_DMA_SYNC_FORKERNEL); 3035 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx); 3036 3037 if (nextIdx == rp->fc_port_cmdidx) { 3038 goto busy; 3039 } 3040 } 3041 3042 /* Get the next iocb from the tx queue if there is one */ 3043 iocbq = emlxs_tx_get(cp, 1); 3044 } 3045 3046 if (count) { 3047 /* Update the adapter's cmd put index */ 3048 if (hba->bus_type == SBUS_FC) { 3049 slim2p->mbx.us.s2.host[channelno]. 3050 cmdPutInx = BE_SWAP32(rp->fc_cmdidx); 3051 3052 /* DMA sync the index for the adapter */ 3053 offset = (off_t) 3054 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2. 3055 host[channelno].cmdPutInx)) - 3056 (uint64_t)((unsigned long)slim2p)); 3057 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, 3058 offset, 4, DDI_DMA_SYNC_FORDEV); 3059 } else { 3060 ioa2 = 3061 (void *)((char *)hba->sli.sli3.slim_addr + 3062 hba->sli.sli3.hgp_ring_offset + 3063 ((channelno * 2) * sizeof (uint32_t))); 3064 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, 3065 rp->fc_cmdidx); 3066 } 3067 3068 status = (CA_R0ATT << (channelno * 4)); 3069 WRITE_CSR_REG(hba, FC_CA_REG(hba), 3070 (volatile uint32_t)status); 3071 3072 /* Check tx queue one more time before releasing */ 3073 if ((iocbq = emlxs_tx_get(cp, 1))) { 3074 /* 3075 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg, 3076 * "%s host=%d port=%d RACE CONDITION1 3077 * DETECTED.", emlxs_ring_xlate(channelno), 3078 * rp->fc_cmdidx, rp->fc_port_cmdidx); 3079 */ 3080 goto sendit; 3081 } 3082 } 3083 3084 #ifdef FMA_SUPPORT 3085 /* Access handle validation */ 3086 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle); 3087 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 3088 #endif /* FMA_SUPPORT */ 3089 3090 mutex_exit(&EMLXS_CMD_RING_LOCK(channelno)); 3091 3092 return; 3093 3094 busy: 3095 3096 /* 3097 * Set ring to SET R0CE_REQ in Chip Att register. 3098 * Chip will tell us when an entry is freed. 3099 */ 3100 if (count) { 3101 /* Update the adapter's cmd put index */ 3102 if (hba->bus_type == SBUS_FC) { 3103 slim2p->mbx.us.s2.host[channelno].cmdPutInx = 3104 BE_SWAP32(rp->fc_cmdidx); 3105 3106 /* DMA sync the index for the adapter */ 3107 offset = (off_t) 3108 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2. 3109 host[channelno].cmdPutInx)) - 3110 (uint64_t)((unsigned long)slim2p)); 3111 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, 3112 offset, 4, DDI_DMA_SYNC_FORDEV); 3113 } else { 3114 ioa2 = 3115 (void *)((char *)hba->sli.sli3.slim_addr + 3116 hba->sli.sli3.hgp_ring_offset + 3117 ((channelno * 2) * sizeof (uint32_t))); 3118 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, 3119 rp->fc_cmdidx); 3120 } 3121 } 3122 3123 status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4)); 3124 WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status); 3125 3126 if (throttle <= 0) { 3127 HBASTATS.IocbThrottled++; 3128 } else { 3129 HBASTATS.IocbRingFull[channelno]++; 3130 } 3131 3132 #ifdef FMA_SUPPORT 3133 /* Access handle validation */ 3134 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle); 3135 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 3136 #endif /* FMA_SUPPORT */ 3137 3138 mutex_exit(&EMLXS_CMD_RING_LOCK(channelno)); 3139 3140 return; 3141 3142 } /* emlxs_sli3_issue_iocb_cmd() */ 3143 3144 3145 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */ 3146 /* MBX_WAIT - returns MBX_TIMEOUT or mailbox_status */ 3147 /* MBX_SLEEP - returns MBX_TIMEOUT or mailbox_status */ 3148 /* MBX_POLL - returns MBX_TIMEOUT or mailbox_status */ 3149 3150 static uint32_t 3151 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag, 3152 uint32_t tmo) 3153 { 3154 emlxs_port_t *port; 3155 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt; 3156 MAILBOX *mbox; 3157 MAILBOX *mb; 3158 uint32_t *word0; 3159 volatile uint32_t ldata; 3160 off_t offset; 3161 MATCHMAP *mbox_bp; 3162 uint32_t tmo_local; 3163 MAILBOX swpmb; 3164 3165 if (!mbq->port) { 3166 mbq->port = &PPORT; 3167 } 3168 3169 port = (emlxs_port_t *)mbq->port; 3170 3171 mb = (MAILBOX *)mbq; 3172 word0 = (uint32_t *)&swpmb; 3173 3174 mb->mbxStatus = MBX_SUCCESS; 3175 3176 /* Check for minimum timeouts */ 3177 switch (mb->mbxCommand) { 3178 /* Mailbox commands that erase/write flash */ 3179 case MBX_DOWN_LOAD: 3180 case MBX_UPDATE_CFG: 3181 case MBX_LOAD_AREA: 3182 case MBX_LOAD_EXP_ROM: 3183 case MBX_WRITE_NV: 3184 case MBX_FLASH_WR_ULA: 3185 case MBX_DEL_LD_ENTRY: 3186 case MBX_LOAD_SM: 3187 if (tmo < 300) { 3188 tmo = 300; 3189 } 3190 break; 3191 3192 default: 3193 if (tmo < 30) { 3194 tmo = 30; 3195 } 3196 break; 3197 } 3198 3199 /* Convert tmo seconds to 10 millisecond tics */ 3200 tmo_local = tmo * 100; 3201 3202 /* Adjust wait flag */ 3203 if (flag != MBX_NOWAIT) { 3204 /* If interrupt is enabled, use sleep, otherwise poll */ 3205 if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) { 3206 flag = MBX_SLEEP; 3207 } else { 3208 flag = MBX_POLL; 3209 } 3210 } 3211 3212 mutex_enter(&EMLXS_PORT_LOCK); 3213 3214 /* Check for hardware error */ 3215 if (hba->flag & FC_HARDWARE_ERROR) { 3216 mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ? 3217 MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR; 3218 3219 mutex_exit(&EMLXS_PORT_LOCK); 3220 3221 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 3222 "Hardware error reported. %s failed. status=%x mb=%p", 3223 emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb); 3224 3225 return (MBX_HARDWARE_ERROR); 3226 } 3227 3228 if (hba->mbox_queue_flag) { 3229 /* If we are not polling, then queue it for later */ 3230 if (flag == MBX_NOWAIT) { 3231 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 3232 "Busy. %s: mb=%p NoWait.", 3233 emlxs_mb_cmd_xlate(mb->mbxCommand), mb); 3234 3235 emlxs_mb_put(hba, mbq); 3236 3237 HBASTATS.MboxBusy++; 3238 3239 mutex_exit(&EMLXS_PORT_LOCK); 3240 3241 return (MBX_BUSY); 3242 } 3243 3244 while (hba->mbox_queue_flag) { 3245 mutex_exit(&EMLXS_PORT_LOCK); 3246 3247 if (tmo_local-- == 0) { 3248 EMLXS_MSGF(EMLXS_CONTEXT, 3249 &emlxs_mbox_event_msg, 3250 "Timeout. %s: mb=%p tmo=%d Waiting.", 3251 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, 3252 tmo); 3253 3254 /* Non-lethalStatus mailbox timeout */ 3255 /* Does not indicate a hardware error */ 3256 mb->mbxStatus = MBX_TIMEOUT; 3257 return (MBX_TIMEOUT); 3258 } 3259 3260 BUSYWAIT_MS(10); 3261 mutex_enter(&EMLXS_PORT_LOCK); 3262 3263 /* Check for hardware error */ 3264 if (hba->flag & FC_HARDWARE_ERROR) { 3265 mb->mbxStatus = 3266 (hba->flag & FC_OVERTEMP_EVENT) ? 3267 MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR; 3268 3269 mutex_exit(&EMLXS_PORT_LOCK); 3270 3271 EMLXS_MSGF(EMLXS_CONTEXT, 3272 &emlxs_mbox_detail_msg, 3273 "Hardware error reported. %s failed. " 3274 "status=%x mb=%p", 3275 emlxs_mb_cmd_xlate(mb->mbxCommand), 3276 mb->mbxStatus, mb); 3277 3278 return (MBX_HARDWARE_ERROR); 3279 } 3280 } 3281 } 3282 3283 /* Initialize mailbox area */ 3284 emlxs_mb_init(hba, mbq, flag, tmo); 3285 3286 switch (flag) { 3287 case MBX_NOWAIT: 3288 3289 if (mb->mbxCommand != MBX_HEARTBEAT) { 3290 if (mb->mbxCommand != MBX_DOWN_LOAD && 3291 mb->mbxCommand != MBX_DUMP_MEMORY) { 3292 EMLXS_MSGF(EMLXS_CONTEXT, 3293 &emlxs_mbox_detail_msg, 3294 "Sending. %s: mb=%p NoWait.", 3295 emlxs_mb_cmd_xlate(mb->mbxCommand), mb); 3296 } 3297 } 3298 3299 break; 3300 3301 case MBX_SLEEP: 3302 if (mb->mbxCommand != MBX_DOWN_LOAD && 3303 mb->mbxCommand != MBX_DUMP_MEMORY) { 3304 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 3305 "Sending. %s: mb=%p Sleep.", 3306 emlxs_mb_cmd_xlate(mb->mbxCommand), mb); 3307 } 3308 3309 break; 3310 3311 case MBX_POLL: 3312 if (mb->mbxCommand != MBX_DOWN_LOAD && 3313 mb->mbxCommand != MBX_DUMP_MEMORY) { 3314 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 3315 "Sending. %s: mb=%p Polled.", 3316 emlxs_mb_cmd_xlate(mb->mbxCommand), mb); 3317 } 3318 break; 3319 } 3320 3321 mb->mbxOwner = OWN_CHIP; 3322 3323 /* Clear the attention bit */ 3324 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT); 3325 3326 if (hba->flag & FC_SLIM2_MODE) { 3327 /* First copy command data */ 3328 mbox = FC_SLIM2_MAILBOX(hba); 3329 offset = 3330 (off_t)((uint64_t)((unsigned long)mbox) 3331 - (uint64_t)((unsigned long)slim2p)); 3332 3333 #ifdef MBOX_EXT_SUPPORT 3334 if (mbq->extbuf) { 3335 uint32_t *mbox_ext = 3336 (uint32_t *)((uint8_t *)mbox + 3337 MBOX_EXTENSION_OFFSET); 3338 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET; 3339 3340 BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf, 3341 (uint8_t *)mbox_ext, mbq->extsize); 3342 3343 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, 3344 offset_ext, mbq->extsize, 3345 DDI_DMA_SYNC_FORDEV); 3346 } 3347 #endif /* MBOX_EXT_SUPPORT */ 3348 3349 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox, 3350 MAILBOX_CMD_BSIZE); 3351 3352 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 3353 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV); 3354 } else { /* SLIM 1 */ 3355 3356 mbox = FC_SLIM1_MAILBOX(hba); 3357 3358 #ifdef MBOX_EXT_SUPPORT 3359 if (mbq->extbuf) { 3360 uint32_t *mbox_ext = 3361 (uint32_t *)((uint8_t *)mbox + 3362 MBOX_EXTENSION_OFFSET); 3363 WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf, 3364 mbox_ext, (mbq->extsize / 4)); 3365 } 3366 #endif /* MBOX_EXT_SUPPORT */ 3367 3368 /* First copy command data */ 3369 WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords, 3370 (MAILBOX_CMD_WSIZE - 1)); 3371 3372 /* copy over last word, with mbxOwner set */ 3373 ldata = *((volatile uint32_t *)mb); 3374 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata); 3375 } 3376 3377 /* Interrupt board to do it right away */ 3378 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT); 3379 3380 mutex_exit(&EMLXS_PORT_LOCK); 3381 3382 #ifdef FMA_SUPPORT 3383 /* Access handle validation */ 3384 if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle) 3385 != DDI_FM_OK) || 3386 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle) 3387 != DDI_FM_OK)) { 3388 EMLXS_MSGF(EMLXS_CONTEXT, 3389 &emlxs_invalid_access_handle_msg, NULL); 3390 return (MBX_HARDWARE_ERROR); 3391 } 3392 #endif /* FMA_SUPPORT */ 3393 3394 switch (flag) { 3395 case MBX_NOWAIT: 3396 return (MBX_SUCCESS); 3397 3398 case MBX_SLEEP: 3399 3400 /* Wait for completion */ 3401 /* The driver clock is timing the mailbox. */ 3402 /* emlxs_mb_fini() will be called externally. */ 3403 3404 mutex_enter(&EMLXS_MBOX_LOCK); 3405 while (!(mbq->flag & MBQ_COMPLETED)) { 3406 cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK); 3407 } 3408 mutex_exit(&EMLXS_MBOX_LOCK); 3409 3410 if (mb->mbxStatus == MBX_TIMEOUT) { 3411 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg, 3412 "Timeout. %s: mb=%p tmo=%d. Sleep.", 3413 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo); 3414 } else { 3415 if (mb->mbxCommand != MBX_DOWN_LOAD && 3416 mb->mbxCommand != MBX_DUMP_MEMORY) { 3417 EMLXS_MSGF(EMLXS_CONTEXT, 3418 &emlxs_mbox_detail_msg, 3419 "Completed. %s: mb=%p status=%x Sleep.", 3420 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, 3421 mb->mbxStatus); 3422 } 3423 } 3424 3425 break; 3426 3427 case MBX_POLL: 3428 3429 /* Convert tmo seconds to 500 usec tics */ 3430 tmo_local = tmo * 2000; 3431 3432 /* Get first word of mailbox */ 3433 if (hba->flag & FC_SLIM2_MODE) { 3434 mbox = FC_SLIM2_MAILBOX(hba); 3435 offset = (off_t)((uint64_t)((unsigned long)mbox) - 3436 (uint64_t)((unsigned long)slim2p)); 3437 3438 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, 3439 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL); 3440 *word0 = *((volatile uint32_t *)mbox); 3441 *word0 = BE_SWAP32(*word0); 3442 } else { 3443 mbox = FC_SLIM1_MAILBOX(hba); 3444 *word0 = 3445 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox)); 3446 } 3447 3448 /* Wait for command to complete */ 3449 while ((swpmb.mbxOwner == OWN_CHIP) && 3450 !(mbq->flag & MBQ_COMPLETED)) { 3451 if (!hba->timer_id && (tmo_local-- == 0)) { 3452 /* self time */ 3453 EMLXS_MSGF(EMLXS_CONTEXT, 3454 &emlxs_mbox_timeout_msg, 3455 "%s: mb=%p tmo=%d Polled.", 3456 emlxs_mb_cmd_xlate(mb->mbxCommand), 3457 mb, tmo); 3458 3459 hba->flag |= FC_MBOX_TIMEOUT; 3460 EMLXS_STATE_CHANGE(hba, FC_ERROR); 3461 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT); 3462 3463 break; 3464 } 3465 3466 BUSYWAIT_US(500); 3467 3468 /* Get first word of mailbox */ 3469 if (hba->flag & FC_SLIM2_MODE) { 3470 EMLXS_MPDATA_SYNC( 3471 hba->sli.sli3.slim2.dma_handle, offset, 3472 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL); 3473 *word0 = *((volatile uint32_t *)mbox); 3474 *word0 = BE_SWAP32(*word0); 3475 } else { 3476 *word0 = 3477 READ_SLIM_ADDR(hba, 3478 ((volatile uint32_t *)mbox)); 3479 } 3480 3481 } /* while */ 3482 3483 if (mb->mbxStatus == MBX_TIMEOUT) { 3484 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg, 3485 "Timeout. %s: mb=%p tmo=%d. Polled.", 3486 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo); 3487 3488 break; 3489 } 3490 3491 /* Check for config port command */ 3492 if ((swpmb.mbxCommand == MBX_CONFIG_PORT) && 3493 (swpmb.mbxStatus == MBX_SUCCESS)) { 3494 /* Setup host mbox for cmpl */ 3495 mbox = FC_SLIM2_MAILBOX(hba); 3496 offset = (off_t)((uint64_t)((unsigned long)mbox) 3497 - (uint64_t)((unsigned long)slim2p)); 3498 3499 hba->flag |= FC_SLIM2_MODE; 3500 } 3501 3502 /* copy results back to user */ 3503 if (hba->flag & FC_SLIM2_MODE) { 3504 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, 3505 offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL); 3506 3507 BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb, 3508 MAILBOX_CMD_BSIZE); 3509 } else { 3510 READ_SLIM_COPY(hba, (uint32_t *)mb, 3511 (uint32_t *)mbox, MAILBOX_CMD_WSIZE); 3512 } 3513 3514 #ifdef MBOX_EXT_SUPPORT 3515 if (mbq->extbuf) { 3516 uint32_t *mbox_ext = 3517 (uint32_t *)((uint8_t *)mbox + 3518 MBOX_EXTENSION_OFFSET); 3519 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET; 3520 3521 if (hba->flag & FC_SLIM2_MODE) { 3522 EMLXS_MPDATA_SYNC( 3523 hba->sli.sli3.slim2.dma_handle, offset_ext, 3524 mbq->extsize, DDI_DMA_SYNC_FORKERNEL); 3525 3526 BE_SWAP32_BCOPY((uint8_t *)mbox_ext, 3527 (uint8_t *)mbq->extbuf, mbq->extsize); 3528 } else { 3529 READ_SLIM_COPY(hba, 3530 (uint32_t *)mbq->extbuf, mbox_ext, 3531 (mbq->extsize / 4)); 3532 } 3533 } 3534 #endif /* MBOX_EXT_SUPPORT */ 3535 3536 /* Sync the memory buffer */ 3537 if (mbq->bp) { 3538 mbox_bp = (MATCHMAP *)mbq->bp; 3539 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, 3540 mbox_bp->size, DDI_DMA_SYNC_FORKERNEL); 3541 } 3542 3543 if (mb->mbxCommand != MBX_DOWN_LOAD && 3544 mb->mbxCommand != MBX_DUMP_MEMORY) { 3545 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 3546 "Completed. %s: mb=%p status=%x Polled.", 3547 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, 3548 mb->mbxStatus); 3549 } 3550 3551 /* Process the result */ 3552 if (!(mbq->flag & MBQ_PASSTHRU)) { 3553 if (mbq->mbox_cmpl) { 3554 (void) (mbq->mbox_cmpl)(hba, mbq); 3555 } 3556 } 3557 3558 /* Clear the attention bit */ 3559 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT); 3560 3561 /* Clean up the mailbox area */ 3562 emlxs_mb_fini(hba, NULL, mb->mbxStatus); 3563 3564 break; 3565 3566 } /* switch (flag) */ 3567 3568 return (mb->mbxStatus); 3569 3570 } /* emlxs_sli3_issue_mbox_cmd() */ 3571 3572 3573 #ifdef SFCT_SUPPORT 3574 /*ARGSUSED*/ 3575 static uint32_t 3576 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, 3577 int channel) 3578 { 3579 emlxs_hba_t *hba = HBA; 3580 emlxs_config_t *cfg = &CFG; 3581 fct_cmd_t *fct_cmd; 3582 stmf_data_buf_t *dbuf; 3583 scsi_task_t *fct_task; 3584 fc_packet_t *pkt; 3585 uint32_t did; 3586 IOCBQ *iocbq; 3587 IOCB *iocb; 3588 uint32_t timeout; 3589 uint32_t iotag; 3590 emlxs_node_t *ndlp; 3591 CHANNEL *cp; 3592 ddi_dma_cookie_t *cp_cmd; 3593 3594 pkt = PRIV2PKT(cmd_sbp); 3595 3596 cp = (CHANNEL *)cmd_sbp->channel; 3597 3598 iocbq = &cmd_sbp->iocbq; 3599 iocb = &iocbq->iocb; 3600 3601 3602 /* Get the iotag by registering the packet */ 3603 iotag = emlxs_register_pkt(cp, cmd_sbp); 3604 3605 if (!iotag) { 3606 /* No more command slots available, retry later */ 3607 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 3608 "Adapter Busy. Unable to allocate iotag. did=0x%x", 3609 cmd_sbp->did); 3610 3611 return (IOERR_NO_RESOURCES); 3612 } 3613 3614 3615 /* Point of no return */ 3616 3617 if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) { 3618 3619 ndlp = cmd_sbp->node; 3620 cp->ulpSendCmd++; 3621 3622 /* Initalize iocbq */ 3623 iocbq->port = (void *)port; 3624 iocbq->node = (void *)ndlp; 3625 iocbq->channel = (void *)cp; 3626 3627 /* 3628 * Don't give the abort priority, we want the IOCB 3629 * we are aborting to be processed first. 3630 */ 3631 iocbq->flag |= IOCB_SPECIAL; 3632 3633 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id; 3634 iocb->ULPIOTAG = (uint16_t)iotag; 3635 iocb->ULPLE = 1; 3636 iocb->ULPCLASS = cmd_sbp->class; 3637 iocb->ULPOWNER = OWN_CHIP; 3638 3639 if (hba->state >= FC_LINK_UP) { 3640 /* Create the abort IOCB */ 3641 iocb->un.acxri.abortType = ABORT_TYPE_ABTS; 3642 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX; 3643 3644 } else { 3645 /* Create the close IOCB */ 3646 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX; 3647 3648 } 3649 3650 iocb->ULPRSVDBYTE = 3651 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 3652 /* Set the pkt timer */ 3653 cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 3654 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 3655 3656 return (IOERR_SUCCESS); 3657 3658 } else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) { 3659 3660 ndlp = cmd_sbp->node; 3661 cp->ulpSendCmd++; 3662 3663 /* Initalize iocbq */ 3664 iocbq->port = (void *)port; 3665 iocbq->node = (void *)ndlp; 3666 iocbq->channel = (void *)cp; 3667 3668 #if (EMLXS_MODREV >= EMLXS_MODREV3) 3669 cp_cmd = pkt->pkt_cmd_cookie; 3670 #else 3671 cp_cmd = &pkt->pkt_cmd_cookie; 3672 #endif /* >= EMLXS_MODREV3 */ 3673 3674 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress); 3675 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress); 3676 iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen; 3677 iocb->un.fcpt64.bdl.bdeFlags = 0; 3678 3679 if (hba->sli_mode < 3) { 3680 iocb->ULPBDECOUNT = 1; 3681 iocb->ULPLE = 1; 3682 } else { /* SLI3 */ 3683 3684 iocb->ULPBDECOUNT = 0; 3685 iocb->ULPLE = 0; 3686 iocb->unsli3.ext_iocb.ebde_count = 0; 3687 } 3688 3689 /* Initalize iocb */ 3690 iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id; 3691 iocb->ULPIOTAG = (uint16_t)iotag; 3692 iocb->ULPRSVDBYTE = 3693 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 3694 iocb->ULPOWNER = OWN_CHIP; 3695 iocb->ULPCLASS = cmd_sbp->class; 3696 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX; 3697 3698 /* Set the pkt timer */ 3699 cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 3700 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 3701 3702 if (pkt->pkt_cmdlen) { 3703 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 3704 DDI_DMA_SYNC_FORDEV); 3705 } 3706 3707 return (IOERR_SUCCESS); 3708 } 3709 3710 dbuf = cmd_sbp->fct_buf; 3711 fct_cmd = cmd_sbp->fct_cmd; 3712 fct_task = (scsi_task_t *)fct_cmd->cmd_specific; 3713 ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private; 3714 did = fct_cmd->cmd_rportid; 3715 3716 iocbq->channel = (void *)cmd_sbp->channel; 3717 3718 if (emlxs_fct_bde_setup(port, cmd_sbp)) { 3719 /* Unregister the packet */ 3720 (void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0); 3721 3722 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 3723 "Adapter Busy. Unable to setup buffer list. did=%x", did); 3724 3725 return (IOERR_INTERNAL_ERROR); 3726 } 3727 3728 if (cfg[CFG_TIMEOUT_ENABLE].current) { 3729 timeout = 3730 ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov); 3731 } else { 3732 timeout = 0x80000000; 3733 } 3734 3735 cmd_sbp->ticks = 3736 hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10); 3737 3738 /* Initalize iocbq */ 3739 iocbq->port = (void *)port; 3740 iocbq->node = (void *)ndlp; 3741 3742 /* Initalize iocb */ 3743 iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid; 3744 iocb->ULPIOTAG = (uint16_t)iotag; 3745 iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout); 3746 iocb->ULPOWNER = OWN_CHIP; 3747 iocb->ULPCLASS = cmd_sbp->class; 3748 3749 iocb->ULPPU = 1; /* Wd4 is relative offset */ 3750 iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset; 3751 3752 if (fct_task->task_flags & TF_WRITE_DATA) { 3753 iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX; 3754 } else { /* TF_READ_DATA */ 3755 3756 iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX; 3757 3758 if ((hba->sli_mode == EMLXS_HBA_SLI3_MODE) && 3759 (dbuf->db_data_size >= 3760 fct_task->task_expected_xfer_length)) { 3761 iocb->ULPCT = 0x1; 3762 /* enable auto-rsp AP feature */ 3763 } 3764 } 3765 3766 return (IOERR_SUCCESS); 3767 3768 } /* emlxs_sli3_prep_fct_iocb() */ 3769 #endif /* SFCT_SUPPORT */ 3770 3771 /* ARGSUSED */ 3772 static uint32_t 3773 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel) 3774 { 3775 emlxs_hba_t *hba = HBA; 3776 fc_packet_t *pkt; 3777 CHANNEL *cp; 3778 IOCBQ *iocbq; 3779 IOCB *iocb; 3780 NODELIST *ndlp; 3781 uint16_t iotag; 3782 uint32_t did; 3783 3784 pkt = PRIV2PKT(sbp); 3785 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 3786 cp = &hba->chan[FC_FCP_RING]; 3787 3788 iocbq = &sbp->iocbq; 3789 iocb = &iocbq->iocb; 3790 3791 /* Find target node object */ 3792 ndlp = (NODELIST *)iocbq->node; 3793 3794 /* Get the iotag by registering the packet */ 3795 iotag = emlxs_register_pkt(cp, sbp); 3796 3797 if (!iotag) { 3798 /* 3799 * No more command slots available, retry later 3800 */ 3801 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 3802 "Adapter Busy. Unable to allocate iotag: did=0x%x", did); 3803 3804 return (FC_TRAN_BUSY); 3805 } 3806 3807 /* Initalize iocbq */ 3808 iocbq->port = (void *) port; 3809 iocbq->channel = (void *) cp; 3810 3811 /* Indicate this is a FCP cmd */ 3812 iocbq->flag |= IOCB_FCP_CMD; 3813 3814 if (emlxs_bde_setup(port, sbp)) { 3815 /* Unregister the packet */ 3816 (void) emlxs_unregister_pkt(cp, iotag, 0); 3817 3818 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 3819 "Adapter Busy. Unable to setup buffer list. did=%x", did); 3820 3821 return (FC_TRAN_BUSY); 3822 } 3823 /* Point of no return */ 3824 3825 /* Initalize iocb */ 3826 iocb->ULPCONTEXT = ndlp->nlp_Rpi; 3827 iocb->ULPIOTAG = iotag; 3828 iocb->ULPRSVDBYTE = 3829 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 3830 iocb->ULPOWNER = OWN_CHIP; 3831 3832 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) { 3833 case FC_TRAN_CLASS1: 3834 iocb->ULPCLASS = CLASS1; 3835 break; 3836 case FC_TRAN_CLASS2: 3837 iocb->ULPCLASS = CLASS2; 3838 /* iocb->ULPCLASS = CLASS3; */ 3839 break; 3840 case FC_TRAN_CLASS3: 3841 default: 3842 iocb->ULPCLASS = CLASS3; 3843 break; 3844 } 3845 3846 /* if device is FCP-2 device, set the following bit */ 3847 /* that says to run the FC-TAPE protocol. */ 3848 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 3849 iocb->ULPFCP2RCVY = 1; 3850 } 3851 3852 if (pkt->pkt_datalen == 0) { 3853 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR; 3854 } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) { 3855 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR; 3856 iocb->ULPPU = PARM_XFER_CHECK; 3857 iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen; 3858 } else { 3859 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR; 3860 } 3861 3862 return (FC_SUCCESS); 3863 3864 } /* emlxs_sli3_prep_fcp_iocb() */ 3865 3866 3867 static uint32_t 3868 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp) 3869 { 3870 emlxs_hba_t *hba = HBA; 3871 fc_packet_t *pkt; 3872 IOCBQ *iocbq; 3873 IOCB *iocb; 3874 CHANNEL *cp; 3875 NODELIST *ndlp; 3876 uint16_t iotag; 3877 uint32_t did; 3878 3879 pkt = PRIV2PKT(sbp); 3880 cp = &hba->chan[FC_IP_RING]; 3881 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 3882 3883 iocbq = &sbp->iocbq; 3884 iocb = &iocbq->iocb; 3885 ndlp = (NODELIST *)iocbq->node; 3886 3887 /* Get the iotag by registering the packet */ 3888 iotag = emlxs_register_pkt(cp, sbp); 3889 3890 if (!iotag) { 3891 /* 3892 * No more command slots available, retry later 3893 */ 3894 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 3895 "Adapter Busy. Unable to allocate iotag: did=0x%x", did); 3896 3897 return (FC_TRAN_BUSY); 3898 } 3899 3900 /* Initalize iocbq */ 3901 iocbq->port = (void *) port; 3902 iocbq->channel = (void *) cp; 3903 3904 if (emlxs_bde_setup(port, sbp)) { 3905 /* Unregister the packet */ 3906 (void) emlxs_unregister_pkt(cp, iotag, 0); 3907 3908 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 3909 "Adapter Busy. Unable to setup buffer list. did=%x", did); 3910 3911 return (FC_TRAN_BUSY); 3912 } 3913 /* Point of no return */ 3914 3915 /* Initalize iocb */ 3916 iocb->un.xseq64.w5.hcsw.Fctl = 0; 3917 3918 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) { 3919 iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ; 3920 } 3921 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) { 3922 iocb->un.xseq64.w5.hcsw.Fctl |= SI; 3923 } 3924 3925 /* network headers */ 3926 iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl; 3927 iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl; 3928 iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type; 3929 3930 iocb->ULPIOTAG = iotag; 3931 iocb->ULPRSVDBYTE = 3932 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 3933 iocb->ULPOWNER = OWN_CHIP; 3934 3935 if (pkt->pkt_tran_type == FC_PKT_BROADCAST) { 3936 HBASTATS.IpBcastIssued++; 3937 3938 iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN; 3939 iocb->ULPCONTEXT = 0; 3940 3941 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) { 3942 if (hba->topology != TOPOLOGY_LOOP) { 3943 iocb->ULPCT = 0x1; 3944 } 3945 iocb->ULPCONTEXT = port->vpi; 3946 } 3947 } else { 3948 HBASTATS.IpSeqIssued++; 3949 3950 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX; 3951 iocb->ULPCONTEXT = ndlp->nlp_Xri; 3952 } 3953 3954 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) { 3955 case FC_TRAN_CLASS1: 3956 iocb->ULPCLASS = CLASS1; 3957 break; 3958 case FC_TRAN_CLASS2: 3959 iocb->ULPCLASS = CLASS2; 3960 break; 3961 case FC_TRAN_CLASS3: 3962 default: 3963 iocb->ULPCLASS = CLASS3; 3964 break; 3965 } 3966 3967 return (FC_SUCCESS); 3968 3969 } /* emlxs_sli3_prep_ip_iocb() */ 3970 3971 3972 static uint32_t 3973 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp) 3974 { 3975 emlxs_hba_t *hba = HBA; 3976 fc_packet_t *pkt; 3977 IOCBQ *iocbq; 3978 IOCB *iocb; 3979 CHANNEL *cp; 3980 uint16_t iotag; 3981 uint32_t did; 3982 uint32_t cmd; 3983 3984 pkt = PRIV2PKT(sbp); 3985 cp = &hba->chan[FC_ELS_RING]; 3986 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 3987 3988 iocbq = &sbp->iocbq; 3989 iocb = &iocbq->iocb; 3990 3991 3992 /* Get the iotag by registering the packet */ 3993 iotag = emlxs_register_pkt(cp, sbp); 3994 3995 if (!iotag) { 3996 /* 3997 * No more command slots available, retry later 3998 */ 3999 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 4000 "Adapter Busy. Unable to allocate iotag. did=0x%x", did); 4001 4002 return (FC_TRAN_BUSY); 4003 } 4004 /* Initalize iocbq */ 4005 iocbq->port = (void *) port; 4006 iocbq->channel = (void *) cp; 4007 4008 if (emlxs_bde_setup(port, sbp)) { 4009 /* Unregister the packet */ 4010 (void) emlxs_unregister_pkt(cp, iotag, 0); 4011 4012 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 4013 "Adapter Busy. Unable to setup buffer list. did=%x", did); 4014 4015 return (FC_TRAN_BUSY); 4016 } 4017 /* Point of no return */ 4018 4019 /* Initalize iocb */ 4020 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) { 4021 /* ELS Response */ 4022 iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id; 4023 iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX; 4024 } else { 4025 /* ELS Request */ 4026 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did; 4027 iocb->ULPCONTEXT = 4028 (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0; 4029 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR; 4030 4031 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) { 4032 if (hba->topology != TOPOLOGY_LOOP) { 4033 cmd = *((uint32_t *)pkt->pkt_cmd); 4034 cmd &= ELS_CMD_MASK; 4035 4036 if ((cmd == ELS_CMD_FLOGI) || 4037 (cmd == ELS_CMD_FDISC)) { 4038 iocb->ULPCT = 0x2; 4039 } else { 4040 iocb->ULPCT = 0x1; 4041 } 4042 } 4043 iocb->ULPCONTEXT = port->vpi; 4044 } 4045 } 4046 iocb->ULPIOTAG = iotag; 4047 iocb->ULPRSVDBYTE = 4048 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 4049 iocb->ULPOWNER = OWN_CHIP; 4050 4051 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) { 4052 case FC_TRAN_CLASS1: 4053 iocb->ULPCLASS = CLASS1; 4054 break; 4055 case FC_TRAN_CLASS2: 4056 iocb->ULPCLASS = CLASS2; 4057 break; 4058 case FC_TRAN_CLASS3: 4059 default: 4060 iocb->ULPCLASS = CLASS3; 4061 break; 4062 } 4063 sbp->class = iocb->ULPCLASS; 4064 4065 return (FC_SUCCESS); 4066 4067 } /* emlxs_sli3_prep_els_iocb() */ 4068 4069 4070 static uint32_t 4071 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp) 4072 { 4073 emlxs_hba_t *hba = HBA; 4074 fc_packet_t *pkt; 4075 IOCBQ *iocbq; 4076 IOCB *iocb; 4077 CHANNEL *cp; 4078 NODELIST *ndlp; 4079 uint16_t iotag; 4080 uint32_t did; 4081 4082 pkt = PRIV2PKT(sbp); 4083 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 4084 cp = &hba->chan[FC_CT_RING]; 4085 4086 iocbq = &sbp->iocbq; 4087 iocb = &iocbq->iocb; 4088 ndlp = (NODELIST *)iocbq->node; 4089 4090 /* Get the iotag by registering the packet */ 4091 iotag = emlxs_register_pkt(cp, sbp); 4092 4093 if (!iotag) { 4094 /* 4095 * No more command slots available, retry later 4096 */ 4097 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 4098 "Adapter Busy. Unable to allocate iotag. did=0x%x", did); 4099 4100 return (FC_TRAN_BUSY); 4101 } 4102 4103 if (emlxs_bde_setup(port, sbp)) { 4104 /* Unregister the packet */ 4105 (void) emlxs_unregister_pkt(cp, iotag, 0); 4106 4107 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 4108 "Adapter Busy. Unable to setup buffer list. did=%x", did); 4109 4110 return (FC_TRAN_BUSY); 4111 } 4112 4113 /* Point of no return */ 4114 4115 /* Initalize iocbq */ 4116 iocbq->port = (void *) port; 4117 iocbq->channel = (void *) cp; 4118 4119 /* Fill in rest of iocb */ 4120 iocb->un.genreq64.w5.hcsw.Fctl = LA; 4121 4122 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) { 4123 iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ; 4124 } 4125 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) { 4126 iocb->un.genreq64.w5.hcsw.Fctl |= SI; 4127 } 4128 4129 /* Initalize iocb */ 4130 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) { 4131 /* CT Response */ 4132 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX; 4133 iocb->un.genreq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl; 4134 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id; 4135 } else { 4136 /* CT Request */ 4137 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR; 4138 iocb->un.genreq64.w5.hcsw.Dfctl = 0; 4139 iocb->ULPCONTEXT = ndlp->nlp_Rpi; 4140 } 4141 4142 iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl; 4143 iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type; 4144 4145 iocb->ULPIOTAG = iotag; 4146 iocb->ULPRSVDBYTE = 4147 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 4148 iocb->ULPOWNER = OWN_CHIP; 4149 4150 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) { 4151 case FC_TRAN_CLASS1: 4152 iocb->ULPCLASS = CLASS1; 4153 break; 4154 case FC_TRAN_CLASS2: 4155 iocb->ULPCLASS = CLASS2; 4156 break; 4157 case FC_TRAN_CLASS3: 4158 default: 4159 iocb->ULPCLASS = CLASS3; 4160 break; 4161 } 4162 4163 return (FC_SUCCESS); 4164 4165 } /* emlxs_sli3_prep_ct_iocb() */ 4166 4167 4168 #ifdef SFCT_SUPPORT 4169 static uint32_t 4170 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp) 4171 { 4172 emlxs_hba_t *hba = HBA; 4173 uint32_t rval; 4174 4175 if (sbp->fct_buf->db_sglist_length != 1) { 4176 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg, 4177 "fct_bde_setup: Only 1 sglist entry supported: %d", 4178 sbp->fct_buf->db_sglist_length); 4179 return (1); 4180 } 4181 4182 if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) { 4183 rval = emlxs_sli2_fct_bde_setup(port, sbp); 4184 } else { 4185 rval = emlxs_sli3_fct_bde_setup(port, sbp); 4186 } 4187 4188 return (rval); 4189 4190 } /* emlxs_fct_bde_setup() */ 4191 #endif /* SFCT_SUPPORT */ 4192 4193 4194 static uint32_t 4195 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp) 4196 { 4197 uint32_t rval; 4198 emlxs_hba_t *hba = HBA; 4199 4200 if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) { 4201 rval = emlxs_sli2_bde_setup(port, sbp); 4202 } else { 4203 rval = emlxs_sli3_bde_setup(port, sbp); 4204 } 4205 4206 return (rval); 4207 4208 } /* emlxs_bde_setup() */ 4209 4210 4211 static void 4212 emlxs_sli3_poll_intr(emlxs_hba_t *hba) 4213 { 4214 uint32_t ha_copy; 4215 4216 /* Check attention bits once and process if required */ 4217 4218 ha_copy = emlxs_check_attention(hba); 4219 4220 if (ha_copy == 0) { 4221 return; 4222 } 4223 4224 mutex_enter(&EMLXS_PORT_LOCK); 4225 ha_copy = emlxs_get_attention(hba, -1); 4226 mutex_exit(&EMLXS_PORT_LOCK); 4227 4228 emlxs_proc_attention(hba, ha_copy); 4229 4230 return; 4231 4232 } /* emlxs_sli3_poll_intr() */ 4233 4234 4235 #ifdef MSI_SUPPORT 4236 static uint32_t 4237 emlxs_sli3_msi_intr(char *arg1, char *arg2) 4238 { 4239 emlxs_hba_t *hba = (emlxs_hba_t *)arg1; 4240 #ifdef FMA_SUPPORT 4241 emlxs_port_t *port = &PPORT; 4242 #endif /* FMA_SUPPORT */ 4243 uint16_t msgid; 4244 uint32_t hc_copy; 4245 uint32_t ha_copy; 4246 uint32_t restore = 0; 4247 4248 /* 4249 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 4250 * "sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2); 4251 */ 4252 4253 /* Check for legacy interrupt handling */ 4254 if (hba->intr_type == DDI_INTR_TYPE_FIXED) { 4255 mutex_enter(&EMLXS_PORT_LOCK); 4256 4257 if (hba->flag & FC_OFFLINE_MODE) { 4258 mutex_exit(&EMLXS_PORT_LOCK); 4259 4260 if (hba->bus_type == SBUS_FC) { 4261 return (DDI_INTR_CLAIMED); 4262 } else { 4263 return (DDI_INTR_UNCLAIMED); 4264 } 4265 } 4266 4267 /* Get host attention bits */ 4268 ha_copy = emlxs_get_attention(hba, -1); 4269 4270 if (ha_copy == 0) { 4271 if (hba->intr_unclaimed) { 4272 mutex_exit(&EMLXS_PORT_LOCK); 4273 return (DDI_INTR_UNCLAIMED); 4274 } 4275 4276 hba->intr_unclaimed = 1; 4277 } else { 4278 hba->intr_unclaimed = 0; 4279 } 4280 4281 mutex_exit(&EMLXS_PORT_LOCK); 4282 4283 /* Process the interrupt */ 4284 emlxs_proc_attention(hba, ha_copy); 4285 4286 return (DDI_INTR_CLAIMED); 4287 } 4288 4289 /* DDI_INTR_TYPE_MSI */ 4290 /* DDI_INTR_TYPE_MSIX */ 4291 4292 /* Get MSI message id */ 4293 msgid = (uint16_t)((unsigned long)arg2); 4294 4295 /* Validate the message id */ 4296 if (msgid >= hba->intr_count) { 4297 msgid = 0; 4298 } 4299 4300 mutex_enter(&EMLXS_INTR_LOCK(msgid)); 4301 4302 mutex_enter(&EMLXS_PORT_LOCK); 4303 4304 /* Check if adapter is offline */ 4305 if (hba->flag & FC_OFFLINE_MODE) { 4306 mutex_exit(&EMLXS_PORT_LOCK); 4307 mutex_exit(&EMLXS_INTR_LOCK(msgid)); 4308 4309 /* Always claim an MSI interrupt */ 4310 return (DDI_INTR_CLAIMED); 4311 } 4312 4313 /* Disable interrupts associated with this msgid */ 4314 if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) { 4315 hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask; 4316 WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy); 4317 restore = 1; 4318 } 4319 4320 /* Get host attention bits */ 4321 ha_copy = emlxs_get_attention(hba, msgid); 4322 4323 mutex_exit(&EMLXS_PORT_LOCK); 4324 4325 /* Process the interrupt */ 4326 emlxs_proc_attention(hba, ha_copy); 4327 4328 /* Restore interrupts */ 4329 if (restore) { 4330 mutex_enter(&EMLXS_PORT_LOCK); 4331 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy); 4332 #ifdef FMA_SUPPORT 4333 /* Access handle validation */ 4334 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 4335 #endif /* FMA_SUPPORT */ 4336 mutex_exit(&EMLXS_PORT_LOCK); 4337 } 4338 4339 mutex_exit(&EMLXS_INTR_LOCK(msgid)); 4340 4341 return (DDI_INTR_CLAIMED); 4342 4343 } /* emlxs_sli3_msi_intr() */ 4344 #endif /* MSI_SUPPORT */ 4345 4346 4347 static int 4348 emlxs_sli3_intx_intr(char *arg) 4349 { 4350 emlxs_hba_t *hba = (emlxs_hba_t *)arg; 4351 uint32_t ha_copy = 0; 4352 4353 mutex_enter(&EMLXS_PORT_LOCK); 4354 4355 if (hba->flag & FC_OFFLINE_MODE) { 4356 mutex_exit(&EMLXS_PORT_LOCK); 4357 4358 if (hba->bus_type == SBUS_FC) { 4359 return (DDI_INTR_CLAIMED); 4360 } else { 4361 return (DDI_INTR_UNCLAIMED); 4362 } 4363 } 4364 4365 /* Get host attention bits */ 4366 ha_copy = emlxs_get_attention(hba, -1); 4367 4368 if (ha_copy == 0) { 4369 if (hba->intr_unclaimed) { 4370 mutex_exit(&EMLXS_PORT_LOCK); 4371 return (DDI_INTR_UNCLAIMED); 4372 } 4373 4374 hba->intr_unclaimed = 1; 4375 } else { 4376 hba->intr_unclaimed = 0; 4377 } 4378 4379 mutex_exit(&EMLXS_PORT_LOCK); 4380 4381 /* Process the interrupt */ 4382 emlxs_proc_attention(hba, ha_copy); 4383 4384 return (DDI_INTR_CLAIMED); 4385 4386 } /* emlxs_sli3_intx_intr() */ 4387 4388 4389 /* EMLXS_PORT_LOCK must be held when call this routine */ 4390 static uint32_t 4391 emlxs_get_attention(emlxs_hba_t *hba, int32_t msgid) 4392 { 4393 #ifdef FMA_SUPPORT 4394 emlxs_port_t *port = &PPORT; 4395 #endif /* FMA_SUPPORT */ 4396 uint32_t ha_copy = 0; 4397 uint32_t ha_copy2; 4398 uint32_t mask = hba->sli.sli3.hc_copy; 4399 4400 #ifdef MSI_SUPPORT 4401 4402 read_ha_register: 4403 4404 /* Check for default MSI interrupt */ 4405 if (msgid == 0) { 4406 /* Read host attention register to determine interrupt source */ 4407 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba)); 4408 4409 /* Filter out MSI non-default attention bits */ 4410 ha_copy2 &= ~(hba->intr_cond); 4411 } 4412 4413 /* Check for polled or fixed type interrupt */ 4414 else if (msgid == -1) { 4415 /* Read host attention register to determine interrupt source */ 4416 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba)); 4417 } 4418 4419 /* Otherwise, assume a mapped MSI interrupt */ 4420 else { 4421 /* Convert MSI msgid to mapped attention bits */ 4422 ha_copy2 = hba->intr_map[msgid]; 4423 } 4424 4425 #else /* !MSI_SUPPORT */ 4426 4427 /* Read host attention register to determine interrupt source */ 4428 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba)); 4429 4430 #endif /* MSI_SUPPORT */ 4431 4432 /* Check if Hardware error interrupt is enabled */ 4433 if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) { 4434 ha_copy2 &= ~HA_ERATT; 4435 } 4436 4437 /* Check if link interrupt is enabled */ 4438 if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) { 4439 ha_copy2 &= ~HA_LATT; 4440 } 4441 4442 /* Check if Mailbox interrupt is enabled */ 4443 if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) { 4444 ha_copy2 &= ~HA_MBATT; 4445 } 4446 4447 /* Check if ring0 interrupt is enabled */ 4448 if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) { 4449 ha_copy2 &= ~HA_R0ATT; 4450 } 4451 4452 /* Check if ring1 interrupt is enabled */ 4453 if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) { 4454 ha_copy2 &= ~HA_R1ATT; 4455 } 4456 4457 /* Check if ring2 interrupt is enabled */ 4458 if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) { 4459 ha_copy2 &= ~HA_R2ATT; 4460 } 4461 4462 /* Check if ring3 interrupt is enabled */ 4463 if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) { 4464 ha_copy2 &= ~HA_R3ATT; 4465 } 4466 4467 /* Accumulate attention bits */ 4468 ha_copy |= ha_copy2; 4469 4470 /* Clear attentions except for error, link, and autoclear(MSIX) */ 4471 ha_copy2 &= ~(HA_ERATT | HA_LATT); /* | hba->intr_autoClear */ 4472 4473 if (ha_copy2) { 4474 WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2); 4475 } 4476 4477 #ifdef FMA_SUPPORT 4478 /* Access handle validation */ 4479 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 4480 #endif /* FMA_SUPPORT */ 4481 4482 return (ha_copy); 4483 4484 } /* emlxs_get_attention() */ 4485 4486 4487 static void 4488 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy) 4489 { 4490 #ifdef FMA_SUPPORT 4491 emlxs_port_t *port = &PPORT; 4492 #endif /* FMA_SUPPORT */ 4493 4494 /* ha_copy should be pre-filtered */ 4495 4496 /* 4497 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 4498 * "proc_attention: ha_copy=%x", ha_copy); 4499 */ 4500 4501 if (hba->state < FC_WARM_START) { 4502 return; 4503 } 4504 4505 if (!ha_copy) { 4506 return; 4507 } 4508 4509 if (hba->bus_type == SBUS_FC) { 4510 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba)); 4511 } 4512 4513 /* Adapter error */ 4514 if (ha_copy & HA_ERATT) { 4515 HBASTATS.IntrEvent[6]++; 4516 emlxs_handle_ff_error(hba); 4517 return; 4518 } 4519 4520 /* Mailbox interrupt */ 4521 if (ha_copy & HA_MBATT) { 4522 HBASTATS.IntrEvent[5]++; 4523 (void) emlxs_handle_mb_event(hba); 4524 } 4525 4526 /* Link Attention interrupt */ 4527 if (ha_copy & HA_LATT) { 4528 HBASTATS.IntrEvent[4]++; 4529 emlxs_sli3_handle_link_event(hba); 4530 } 4531 4532 /* event on ring 0 - FCP Ring */ 4533 if (ha_copy & HA_R0ATT) { 4534 HBASTATS.IntrEvent[0]++; 4535 emlxs_sli3_handle_ring_event(hba, 0, ha_copy); 4536 } 4537 4538 /* event on ring 1 - IP Ring */ 4539 if (ha_copy & HA_R1ATT) { 4540 HBASTATS.IntrEvent[1]++; 4541 emlxs_sli3_handle_ring_event(hba, 1, ha_copy); 4542 } 4543 4544 /* event on ring 2 - ELS Ring */ 4545 if (ha_copy & HA_R2ATT) { 4546 HBASTATS.IntrEvent[2]++; 4547 emlxs_sli3_handle_ring_event(hba, 2, ha_copy); 4548 } 4549 4550 /* event on ring 3 - CT Ring */ 4551 if (ha_copy & HA_R3ATT) { 4552 HBASTATS.IntrEvent[3]++; 4553 emlxs_sli3_handle_ring_event(hba, 3, ha_copy); 4554 } 4555 4556 if (hba->bus_type == SBUS_FC) { 4557 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP); 4558 } 4559 4560 /* Set heartbeat flag to show activity */ 4561 hba->heartbeat_flag = 1; 4562 4563 #ifdef FMA_SUPPORT 4564 if (hba->bus_type == SBUS_FC) { 4565 /* Access handle validation */ 4566 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle); 4567 } 4568 #endif /* FMA_SUPPORT */ 4569 4570 return; 4571 4572 } /* emlxs_proc_attention() */ 4573 4574 4575 /* 4576 * emlxs_handle_ff_error() 4577 * 4578 * Description: Processes a FireFly error 4579 * Runs at Interrupt level 4580 */ 4581 static void 4582 emlxs_handle_ff_error(emlxs_hba_t *hba) 4583 { 4584 emlxs_port_t *port = &PPORT; 4585 uint32_t status; 4586 uint32_t status1; 4587 uint32_t status2; 4588 int i = 0; 4589 4590 /* do what needs to be done, get error from STATUS REGISTER */ 4591 status = READ_CSR_REG(hba, FC_HS_REG(hba)); 4592 4593 /* Clear Chip error bit */ 4594 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT); 4595 4596 /* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */ 4597 if (status & HS_FFER1) { 4598 4599 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg, 4600 "HS_FFER1 received"); 4601 EMLXS_STATE_CHANGE(hba, FC_ERROR); 4602 (void) emlxs_offline(hba, 1); 4603 while ((status & HS_FFER1) && (i < 300)) { 4604 status = 4605 READ_CSR_REG(hba, FC_HS_REG(hba)); 4606 BUSYWAIT_MS(1000); 4607 i++; 4608 } 4609 } 4610 4611 if (i == 300) { 4612 /* 5 minutes is up, shutdown HBA */ 4613 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg, 4614 "HS_FFER1 clear timeout"); 4615 4616 EMLXS_STATE_CHANGE(hba, FC_ERROR); 4617 emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL); 4618 4619 goto done; 4620 } 4621 4622 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg, 4623 "HS_FFER1 cleared"); 4624 4625 if (status & HS_OVERTEMP) { 4626 status1 = 4627 READ_SLIM_ADDR(hba, 4628 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0)); 4629 4630 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg, 4631 "Maximum adapter temperature exceeded (%d �C).", status1); 4632 4633 hba->temperature = status1; 4634 hba->flag |= FC_OVERTEMP_EVENT; 4635 4636 EMLXS_STATE_CHANGE(hba, FC_ERROR); 4637 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 4638 NULL, NULL); 4639 4640 } else { 4641 status1 = 4642 READ_SLIM_ADDR(hba, 4643 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8)); 4644 status2 = 4645 READ_SLIM_ADDR(hba, 4646 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac)); 4647 4648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg, 4649 "Host Error Attention: " 4650 "status=0x%x status1=0x%x status2=0x%x", 4651 status, status1, status2); 4652 4653 EMLXS_STATE_CHANGE(hba, FC_ERROR); 4654 4655 if (status & HS_FFER6) { 4656 emlxs_thread_spawn(hba, emlxs_restart_thread, 4657 NULL, NULL); 4658 } else { 4659 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 4660 NULL, NULL); 4661 } 4662 } 4663 4664 done: 4665 #ifdef FMA_SUPPORT 4666 /* Access handle validation */ 4667 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle); 4668 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 4669 #endif /* FMA_SUPPORT */ 4670 4671 return; 4672 4673 } /* emlxs_handle_ff_error() */ 4674 4675 4676 /* 4677 * emlxs_sli3_handle_link_event() 4678 * 4679 * Description: Process a Link Attention. 4680 */ 4681 static void 4682 emlxs_sli3_handle_link_event(emlxs_hba_t *hba) 4683 { 4684 emlxs_port_t *port = &PPORT; 4685 MAILBOXQ *mbq; 4686 int rc; 4687 4688 HBASTATS.LinkEvent++; 4689 4690 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x", 4691 HBASTATS.LinkEvent); 4692 4693 /* Make sure link is declared down */ 4694 emlxs_linkdown(hba); 4695 4696 /* Get a buffer which will be used for mailbox commands */ 4697 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) { 4698 /* Get link attention message */ 4699 if (emlxs_mb_read_la(hba, mbq) == 0) { 4700 rc = emlxs_sli3_issue_mbox_cmd(hba, mbq, 4701 MBX_NOWAIT, 0); 4702 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4703 emlxs_mem_put(hba, MEM_MBOX, 4704 (void *)mbq); 4705 } 4706 4707 mutex_enter(&EMLXS_PORT_LOCK); 4708 4709 /* 4710 * Clear Link Attention in HA REG 4711 */ 4712 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT); 4713 4714 #ifdef FMA_SUPPORT 4715 /* Access handle validation */ 4716 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 4717 #endif /* FMA_SUPPORT */ 4718 4719 mutex_exit(&EMLXS_PORT_LOCK); 4720 } else { 4721 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 4722 } 4723 } 4724 4725 } /* emlxs_sli3_handle_link_event() */ 4726 4727 4728 /* 4729 * emlxs_sli3_handle_ring_event() 4730 * 4731 * Description: Process a Ring Attention. 4732 */ 4733 static void 4734 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no, 4735 uint32_t ha_copy) 4736 { 4737 emlxs_port_t *port = &PPORT; 4738 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt; 4739 CHANNEL *cp; 4740 RING *rp; 4741 IOCB *entry; 4742 IOCBQ *iocbq; 4743 IOCBQ local_iocbq; 4744 PGP *pgp; 4745 uint32_t count; 4746 volatile uint32_t chipatt; 4747 void *ioa2; 4748 uint32_t reg; 4749 uint32_t channel_no; 4750 off_t offset; 4751 IOCBQ *rsp_head = NULL; 4752 IOCBQ *rsp_tail = NULL; 4753 emlxs_buf_t *sbp = NULL; 4754 4755 count = 0; 4756 rp = &hba->sli.sli3.ring[ring_no]; 4757 cp = rp->channelp; 4758 channel_no = cp->channelno; 4759 4760 /* 4761 * Isolate this ring's host attention bits 4762 * This makes all ring attention bits equal 4763 * to Ring0 attention bits 4764 */ 4765 reg = (ha_copy >> (ring_no * 4)) & 0x0f; 4766 4767 /* 4768 * Gather iocb entries off response ring. 4769 * Ensure entry is owned by the host. 4770 */ 4771 pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no]; 4772 offset = 4773 (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) - 4774 (uint64_t)((unsigned long)slim2p)); 4775 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4, 4776 DDI_DMA_SYNC_FORKERNEL); 4777 rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx); 4778 4779 /* While ring is not empty */ 4780 while (rp->fc_rspidx != rp->fc_port_rspidx) { 4781 HBASTATS.IocbReceived[channel_no]++; 4782 4783 /* Get the next response ring iocb */ 4784 entry = 4785 (IOCB *)(((char *)rp->fc_rspringaddr + 4786 (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size))); 4787 4788 /* DMA sync the response ring iocb for the adapter */ 4789 offset = (off_t)((uint64_t)((unsigned long)entry) 4790 - (uint64_t)((unsigned long)slim2p)); 4791 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4792 hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL); 4793 4794 count++; 4795 4796 /* Copy word6 and word7 to local iocb for now */ 4797 iocbq = &local_iocbq; 4798 4799 BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6), 4800 (uint8_t *)iocbq + (sizeof (uint32_t) * 6), 4801 (sizeof (uint32_t) * 2)); 4802 4803 /* when LE is not set, entire Command has not been received */ 4804 if (!iocbq->iocb.ULPLE) { 4805 /* This should never happen */ 4806 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg, 4807 "ulpLE is not set. " 4808 "ring=%d iotag=%d cmd=%x status=%x", 4809 channel_no, iocbq->iocb.ULPIOTAG, 4810 iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS); 4811 4812 goto next; 4813 } 4814 4815 sbp = NULL; 4816 switch (iocbq->iocb.ULPCOMMAND) { 4817 #ifdef SFCT_SUPPORT 4818 case CMD_CLOSE_XRI_CX: 4819 case CMD_CLOSE_XRI_CN: 4820 case CMD_ABORT_XRI_CX: 4821 if (port->mode == MODE_TARGET) { 4822 sbp = emlxs_unregister_pkt(cp, 4823 iocbq->iocb.ULPIOTAG, 0); 4824 } 4825 break; 4826 #endif /* SFCT_SUPPORT */ 4827 4828 /* Ring 0 registered commands */ 4829 case CMD_FCP_ICMND_CR: 4830 case CMD_FCP_ICMND_CX: 4831 case CMD_FCP_IREAD_CR: 4832 case CMD_FCP_IREAD_CX: 4833 case CMD_FCP_IWRITE_CR: 4834 case CMD_FCP_IWRITE_CX: 4835 case CMD_FCP_ICMND64_CR: 4836 case CMD_FCP_ICMND64_CX: 4837 case CMD_FCP_IREAD64_CR: 4838 case CMD_FCP_IREAD64_CX: 4839 case CMD_FCP_IWRITE64_CR: 4840 case CMD_FCP_IWRITE64_CX: 4841 #ifdef SFCT_SUPPORT 4842 case CMD_FCP_TSEND_CX: 4843 case CMD_FCP_TSEND64_CX: 4844 case CMD_FCP_TRECEIVE_CX: 4845 case CMD_FCP_TRECEIVE64_CX: 4846 case CMD_FCP_TRSP_CX: 4847 case CMD_FCP_TRSP64_CX: 4848 #endif /* SFCT_SUPPORT */ 4849 4850 /* Ring 1 registered commands */ 4851 case CMD_XMIT_BCAST_CN: 4852 case CMD_XMIT_BCAST_CX: 4853 case CMD_XMIT_SEQUENCE_CX: 4854 case CMD_XMIT_SEQUENCE_CR: 4855 case CMD_XMIT_BCAST64_CN: 4856 case CMD_XMIT_BCAST64_CX: 4857 case CMD_XMIT_SEQUENCE64_CX: 4858 case CMD_XMIT_SEQUENCE64_CR: 4859 case CMD_CREATE_XRI_CR: 4860 case CMD_CREATE_XRI_CX: 4861 4862 /* Ring 2 registered commands */ 4863 case CMD_ELS_REQUEST_CR: 4864 case CMD_ELS_REQUEST_CX: 4865 case CMD_XMIT_ELS_RSP_CX: 4866 case CMD_ELS_REQUEST64_CR: 4867 case CMD_ELS_REQUEST64_CX: 4868 case CMD_XMIT_ELS_RSP64_CX: 4869 4870 /* Ring 3 registered commands */ 4871 case CMD_GEN_REQUEST64_CR: 4872 case CMD_GEN_REQUEST64_CX: 4873 4874 sbp = 4875 emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0); 4876 break; 4877 } 4878 4879 /* If packet is stale, then drop it. */ 4880 if (sbp == STALE_PACKET) { 4881 cp->hbaCmplCmd_sbp++; 4882 /* Copy entry to the local iocbq */ 4883 BE_SWAP32_BCOPY((uint8_t *)entry, 4884 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size); 4885 4886 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg, 4887 "channelno=%d iocb=%p cmd=%x status=%x " 4888 "error=%x iotag=%d context=%x info=%x", 4889 channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND, 4890 iocbq->iocb.ULPSTATUS, 4891 (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError, 4892 (uint16_t)iocbq->iocb.ULPIOTAG, 4893 (uint16_t)iocbq->iocb.ULPCONTEXT, 4894 (uint8_t)iocbq->iocb.ULPRSVDBYTE); 4895 4896 goto next; 4897 } 4898 4899 /* 4900 * If a packet was found, then queue the packet's 4901 * iocb for deferred processing 4902 */ 4903 else if (sbp) { 4904 #ifdef SFCT_SUPPORT 4905 fct_cmd_t *fct_cmd; 4906 emlxs_buf_t *cmd_sbp; 4907 4908 fct_cmd = sbp->fct_cmd; 4909 if (fct_cmd) { 4910 cmd_sbp = 4911 (emlxs_buf_t *)fct_cmd->cmd_fca_private; 4912 mutex_enter(&cmd_sbp->fct_mtx); 4913 EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, 4914 EMLXS_FCT_IOCB_COMPLETE); 4915 mutex_exit(&cmd_sbp->fct_mtx); 4916 } 4917 #endif /* SFCT_SUPPORT */ 4918 cp->hbaCmplCmd_sbp++; 4919 atomic_dec_32(&hba->io_active); 4920 #ifdef NODE_THROTTLE_SUPPORT 4921 if (sbp->node) { 4922 atomic_dec_32(&sbp->node->io_active); 4923 } 4924 #endif /* NODE_THROTTLE_SUPPORT */ 4925 4926 /* Copy entry to sbp's iocbq */ 4927 iocbq = &sbp->iocbq; 4928 BE_SWAP32_BCOPY((uint8_t *)entry, 4929 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size); 4930 4931 iocbq->next = NULL; 4932 4933 /* 4934 * If this is NOT a polled command completion 4935 * or a driver allocated pkt, then defer pkt 4936 * completion. 4937 */ 4938 if (!(sbp->pkt_flags & 4939 (PACKET_POLLED | PACKET_ALLOCATED))) { 4940 /* Add the IOCB to the local list */ 4941 if (!rsp_head) { 4942 rsp_head = iocbq; 4943 } else { 4944 rsp_tail->next = iocbq; 4945 } 4946 4947 rsp_tail = iocbq; 4948 4949 goto next; 4950 } 4951 } else { 4952 cp->hbaCmplCmd++; 4953 /* Copy entry to the local iocbq */ 4954 BE_SWAP32_BCOPY((uint8_t *)entry, 4955 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size); 4956 4957 iocbq->next = NULL; 4958 iocbq->bp = NULL; 4959 iocbq->port = &PPORT; 4960 iocbq->channel = cp; 4961 iocbq->node = NULL; 4962 iocbq->sbp = NULL; 4963 iocbq->flag = 0; 4964 } 4965 4966 /* process the channel event now */ 4967 emlxs_proc_channel_event(hba, cp, iocbq); 4968 4969 next: 4970 /* Increment the driver's local response get index */ 4971 if (++rp->fc_rspidx >= rp->fc_numRiocb) { 4972 rp->fc_rspidx = 0; 4973 } 4974 4975 } /* while (TRUE) */ 4976 4977 if (rsp_head) { 4978 mutex_enter(&cp->rsp_lock); 4979 if (cp->rsp_head == NULL) { 4980 cp->rsp_head = rsp_head; 4981 cp->rsp_tail = rsp_tail; 4982 } else { 4983 cp->rsp_tail->next = rsp_head; 4984 cp->rsp_tail = rsp_tail; 4985 } 4986 mutex_exit(&cp->rsp_lock); 4987 4988 emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp); 4989 } 4990 4991 /* Check if at least one response entry was processed */ 4992 if (count) { 4993 /* Update response get index for the adapter */ 4994 if (hba->bus_type == SBUS_FC) { 4995 slim2p->mbx.us.s2.host[channel_no].rspGetInx 4996 = BE_SWAP32(rp->fc_rspidx); 4997 4998 /* DMA sync the index for the adapter */ 4999 offset = (off_t) 5000 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2. 5001 host[channel_no].rspGetInx)) 5002 - (uint64_t)((unsigned long)slim2p)); 5003 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, 5004 offset, 4, DDI_DMA_SYNC_FORDEV); 5005 } else { 5006 ioa2 = 5007 (void *)((char *)hba->sli.sli3.slim_addr + 5008 hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) + 5009 1) * sizeof (uint32_t))); 5010 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, 5011 rp->fc_rspidx); 5012 #ifdef FMA_SUPPORT 5013 /* Access handle validation */ 5014 EMLXS_CHK_ACC_HANDLE(hba, 5015 hba->sli.sli3.slim_acc_handle); 5016 #endif /* FMA_SUPPORT */ 5017 } 5018 5019 if (reg & HA_R0RE_REQ) { 5020 /* HBASTATS.chipRingFree++; */ 5021 5022 mutex_enter(&EMLXS_PORT_LOCK); 5023 5024 /* Tell the adapter we serviced the ring */ 5025 chipatt = ((CA_R0ATT | CA_R0RE_RSP) << 5026 (channel_no * 4)); 5027 WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt); 5028 5029 #ifdef FMA_SUPPORT 5030 /* Access handle validation */ 5031 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 5032 #endif /* FMA_SUPPORT */ 5033 5034 mutex_exit(&EMLXS_PORT_LOCK); 5035 } 5036 } 5037 5038 if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) { 5039 /* HBASTATS.hostRingFree++; */ 5040 5041 /* Cmd ring may be available. Try sending more iocbs */ 5042 emlxs_sli3_issue_iocb_cmd(hba, cp, 0); 5043 } 5044 5045 /* HBASTATS.ringEvent++; */ 5046 5047 return; 5048 5049 } /* emlxs_sli3_handle_ring_event() */ 5050 5051 5052 extern int 5053 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq) 5054 { 5055 emlxs_port_t *port = &PPORT; 5056 IOCB *iocb; 5057 RING *rp; 5058 MATCHMAP *mp = NULL; 5059 uint64_t bdeAddr; 5060 uint32_t vpi = 0; 5061 uint32_t channelno; 5062 uint32_t size = 0; 5063 uint32_t *RcvError; 5064 uint32_t *RcvDropped; 5065 uint32_t *UbPosted; 5066 emlxs_msg_t *dropped_msg; 5067 char error_str[64]; 5068 uint32_t buf_type; 5069 uint32_t *word; 5070 5071 channelno = cp->channelno; 5072 rp = &hba->sli.sli3.ring[channelno]; 5073 5074 iocb = &iocbq->iocb; 5075 word = (uint32_t *)iocb; 5076 5077 switch (channelno) { 5078 #ifdef SFCT_SUPPORT 5079 case FC_FCT_RING: 5080 HBASTATS.FctRingEvent++; 5081 RcvError = &HBASTATS.FctRingError; 5082 RcvDropped = &HBASTATS.FctRingDropped; 5083 UbPosted = &HBASTATS.FctUbPosted; 5084 dropped_msg = &emlxs_fct_detail_msg; 5085 buf_type = MEM_FCTBUF; 5086 break; 5087 #endif /* SFCT_SUPPORT */ 5088 5089 case FC_IP_RING: 5090 HBASTATS.IpRcvEvent++; 5091 RcvError = &HBASTATS.IpDropped; 5092 RcvDropped = &HBASTATS.IpDropped; 5093 UbPosted = &HBASTATS.IpUbPosted; 5094 dropped_msg = &emlxs_unsol_ip_dropped_msg; 5095 buf_type = MEM_IPBUF; 5096 break; 5097 5098 case FC_ELS_RING: 5099 HBASTATS.ElsRcvEvent++; 5100 RcvError = &HBASTATS.ElsRcvError; 5101 RcvDropped = &HBASTATS.ElsRcvDropped; 5102 UbPosted = &HBASTATS.ElsUbPosted; 5103 dropped_msg = &emlxs_unsol_els_dropped_msg; 5104 buf_type = MEM_ELSBUF; 5105 break; 5106 5107 case FC_CT_RING: 5108 HBASTATS.CtRcvEvent++; 5109 RcvError = &HBASTATS.CtRcvError; 5110 RcvDropped = &HBASTATS.CtRcvDropped; 5111 UbPosted = &HBASTATS.CtUbPosted; 5112 dropped_msg = &emlxs_unsol_ct_dropped_msg; 5113 buf_type = MEM_CTBUF; 5114 break; 5115 5116 default: 5117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg, 5118 "channel=%d cmd=%x %s %x %x %x %x", 5119 channelno, iocb->ULPCOMMAND, 5120 emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5], 5121 word[6], word[7]); 5122 return (1); 5123 } 5124 5125 if (iocb->ULPSTATUS) { 5126 if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) && 5127 (iocb->un.grsp.perr.statLocalError == 5128 IOERR_RCV_BUFFER_TIMEOUT)) { 5129 (void) strlcpy(error_str, "Out of posted buffers:", 5130 sizeof (error_str)); 5131 iocb->ULPBDECOUNT = 0; 5132 } else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) && 5133 (iocb->un.grsp.perr.statLocalError == 5134 IOERR_RCV_BUFFER_WAITING)) { 5135 (void) strlcpy(error_str, "Buffer waiting:", 5136 sizeof (error_str)); 5137 iocb->ULPBDECOUNT = 0; 5138 goto done; 5139 } else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) { 5140 (void) strlcpy(error_str, "Need Buffer Entry:", 5141 sizeof (error_str)); 5142 iocb->ULPBDECOUNT = 0; 5143 goto done; 5144 } else { 5145 (void) strlcpy(error_str, "General error:", 5146 sizeof (error_str)); 5147 } 5148 5149 goto failed; 5150 } 5151 5152 if (hba->flag & FC_HBQ_ENABLED) { 5153 HBQ_INIT_t *hbq; 5154 HBQE_t *hbqE; 5155 uint32_t hbqe_tag; 5156 uint32_t hbq_id; 5157 5158 (*UbPosted)--; 5159 5160 hbqE = (HBQE_t *)iocb; 5161 hbq_id = hbqE->unt.ext.HBQ_tag; 5162 hbqe_tag = hbqE->unt.ext.HBQE_tag; 5163 5164 hbq = &hba->sli.sli3.hbq_table[hbq_id]; 5165 5166 if (hbqe_tag >= hbq->HBQ_numEntries) { 5167 (void) snprintf(error_str, sizeof (error_str), 5168 "Invalid HBQE iotag=%d:", hbqe_tag); 5169 goto dropped; 5170 } 5171 5172 mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag]; 5173 5174 size = iocb->unsli3.ext_rcv.seq_len; 5175 } else { 5176 bdeAddr = 5177 PADDR(iocb->un.cont64[0].addrHigh, 5178 iocb->un.cont64[0].addrLow); 5179 5180 /* Check for invalid buffer */ 5181 if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) { 5182 (void) strlcpy(error_str, "Invalid buffer:", 5183 sizeof (error_str)); 5184 goto dropped; 5185 } 5186 5187 mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr); 5188 5189 size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize; 5190 } 5191 5192 if (!mp) { 5193 (void) strlcpy(error_str, "Buffer not mapped:", 5194 sizeof (error_str)); 5195 goto dropped; 5196 } 5197 5198 #ifdef FMA_SUPPORT 5199 if (mp->dma_handle) { 5200 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle) 5201 != DDI_FM_OK) { 5202 EMLXS_MSGF(EMLXS_CONTEXT, 5203 &emlxs_invalid_dma_handle_msg, 5204 "handle_rcv_seq: hdl=%p", 5205 mp->dma_handle); 5206 goto dropped; 5207 } 5208 } 5209 #endif /* FMA_SUPPORT */ 5210 5211 if (!size) { 5212 (void) strlcpy(error_str, "Buffer empty:", sizeof (error_str)); 5213 goto dropped; 5214 } 5215 5216 /* To avoid we drop the broadcast packets */ 5217 if (channelno != FC_IP_RING) { 5218 /* Get virtual port */ 5219 if (hba->flag & FC_NPIV_ENABLED) { 5220 vpi = iocb->unsli3.ext_rcv.vpi; 5221 if (vpi >= hba->vpi_max) { 5222 (void) snprintf(error_str, sizeof (error_str), 5223 "Invalid VPI=%d:", vpi); 5224 goto dropped; 5225 } 5226 5227 port = &VPORT(vpi); 5228 } 5229 } 5230 5231 /* Process request */ 5232 switch (channelno) { 5233 case FC_FCT_RING: 5234 if (port->mode == MODE_INITIATOR) { 5235 (void) strlcpy(error_str, "Target mode disabled:", 5236 sizeof (error_str)); 5237 goto dropped; 5238 #ifdef SFCT_SUPPORT 5239 } else if (port->mode == MODE_TARGET) { 5240 (void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp, 5241 size); 5242 #endif /* SFCT_SUPPORT */ 5243 } else { 5244 (void) snprintf(error_str, sizeof (error_str), 5245 "Invalid mode=%x:", port->mode); 5246 goto dropped; 5247 } 5248 break; 5249 5250 case FC_IP_RING: 5251 if (port->mode == MODE_INITIATOR) { 5252 (void) emlxs_ip_handle_unsol_req(port, cp, iocbq, 5253 mp, size); 5254 #ifdef SFCT_SUPPORT 5255 } else if (port->mode == MODE_TARGET) { 5256 (void) strlcpy(error_str, "Initiator mode disabled:", 5257 sizeof (error_str)); 5258 goto dropped; 5259 #endif /* SFCT_SUPPORT */ 5260 } else { 5261 (void) snprintf(error_str, sizeof (error_str), 5262 "Invalid mode=%x:", port->mode); 5263 goto dropped; 5264 } 5265 break; 5266 5267 case FC_ELS_RING: 5268 if (port->mode == MODE_INITIATOR) { 5269 (void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp, 5270 size); 5271 #ifdef SFCT_SUPPORT 5272 } else if (port->mode == MODE_TARGET) { 5273 (void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp, 5274 size); 5275 #endif /* SFCT_SUPPORT */ 5276 } else { 5277 (void) snprintf(error_str, sizeof (error_str), 5278 "Invalid mode=%x:", port->mode); 5279 goto dropped; 5280 } 5281 break; 5282 5283 case FC_CT_RING: 5284 (void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size); 5285 break; 5286 } 5287 5288 goto done; 5289 5290 dropped: 5291 (*RcvDropped)++; 5292 5293 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg, 5294 "%s: cmd=%x %s %x %x %x %x", 5295 error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS), 5296 word[4], word[5], word[6], word[7]); 5297 5298 if (channelno == FC_FCT_RING) { 5299 uint32_t sid; 5300 5301 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) { 5302 emlxs_node_t *ndlp; 5303 ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG); 5304 if (! ndlp) { 5305 goto done; 5306 } 5307 sid = ndlp->nlp_DID; 5308 } else { 5309 sid = iocb->un.ulpWord[4] & 0xFFFFFF; 5310 } 5311 5312 emlxs_send_logo(port, sid); 5313 } 5314 5315 goto done; 5316 5317 failed: 5318 (*RcvError)++; 5319 5320 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg, 5321 "%s: cmd=%x %s %x %x %x %x hba:%x %x", 5322 error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS), 5323 word[4], word[5], word[6], word[7], hba->state, hba->flag); 5324 5325 done: 5326 5327 if (hba->flag & FC_HBQ_ENABLED) { 5328 if (iocb->ULPBDECOUNT) { 5329 HBQE_t *hbqE; 5330 uint32_t hbq_id; 5331 5332 hbqE = (HBQE_t *)iocb; 5333 hbq_id = hbqE->unt.ext.HBQ_tag; 5334 5335 emlxs_update_HBQ_index(hba, hbq_id); 5336 } 5337 } else { 5338 if (mp) { 5339 emlxs_mem_put(hba, buf_type, (void *)mp); 5340 } 5341 5342 if (iocb->ULPBDECOUNT) { 5343 (void) emlxs_post_buffer(hba, rp, 1); 5344 } 5345 } 5346 5347 return (0); 5348 5349 } /* emlxs_handle_rcv_seq() */ 5350 5351 5352 /* EMLXS_CMD_RING_LOCK must be held when calling this function */ 5353 static void 5354 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq) 5355 { 5356 emlxs_port_t *port; 5357 IOCB *icmd; 5358 IOCB *iocb; 5359 emlxs_buf_t *sbp; 5360 off_t offset; 5361 uint32_t ringno; 5362 5363 ringno = rp->ringno; 5364 sbp = iocbq->sbp; 5365 icmd = &iocbq->iocb; 5366 port = iocbq->port; 5367 5368 HBASTATS.IocbIssued[ringno]++; 5369 5370 /* Check for ULP pkt request */ 5371 if (sbp) { 5372 mutex_enter(&sbp->mtx); 5373 5374 if (sbp->node == NULL) { 5375 /* Set node to base node by default */ 5376 iocbq->node = (void *)&port->node_base; 5377 sbp->node = (void *)&port->node_base; 5378 } 5379 5380 sbp->pkt_flags |= PACKET_IN_CHIPQ; 5381 mutex_exit(&sbp->mtx); 5382 5383 atomic_inc_32(&hba->io_active); 5384 #ifdef NODE_THROTTLE_SUPPORT 5385 if (sbp->node) { 5386 atomic_inc_32(&sbp->node->io_active); 5387 } 5388 #endif /* NODE_THROTTLE_SUPPORT */ 5389 5390 #ifdef SFCT_SUPPORT 5391 #ifdef FCT_IO_TRACE 5392 if (sbp->fct_cmd) { 5393 emlxs_fct_io_trace(port, sbp->fct_cmd, 5394 EMLXS_FCT_IOCB_ISSUED); 5395 emlxs_fct_io_trace(port, sbp->fct_cmd, 5396 icmd->ULPCOMMAND); 5397 } 5398 #endif /* FCT_IO_TRACE */ 5399 #endif /* SFCT_SUPPORT */ 5400 5401 rp->channelp->hbaSendCmd_sbp++; 5402 iocbq->channel = rp->channelp; 5403 } else { 5404 rp->channelp->hbaSendCmd++; 5405 } 5406 5407 /* get the next available command ring iocb */ 5408 iocb = 5409 (IOCB *)(((char *)rp->fc_cmdringaddr + 5410 (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size))); 5411 5412 /* Copy the local iocb to the command ring iocb */ 5413 BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb, 5414 hba->sli.sli3.iocb_cmd_size); 5415 5416 /* DMA sync the command ring iocb for the adapter */ 5417 offset = (off_t)((uint64_t)((unsigned long)iocb) 5418 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt)); 5419 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 5420 hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV); 5421 5422 /* 5423 * After this, the sbp / iocb should not be 5424 * accessed in the xmit path. 5425 */ 5426 5427 /* Free the local iocb if there is no sbp tracking it */ 5428 if (!sbp) { 5429 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq); 5430 } 5431 5432 /* update local ring index to next available ring index */ 5433 rp->fc_cmdidx = 5434 (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1; 5435 5436 5437 return; 5438 5439 } /* emlxs_sli3_issue_iocb() */ 5440 5441 5442 static void 5443 emlxs_sli3_hba_kill(emlxs_hba_t *hba) 5444 { 5445 emlxs_port_t *port = &PPORT; 5446 MAILBOX swpmb; 5447 MAILBOX *mb2; 5448 MAILBOX *mb1; 5449 uint32_t *word0; 5450 uint32_t j; 5451 uint32_t interlock_failed; 5452 uint32_t ha_copy; 5453 uint32_t value; 5454 off_t offset; 5455 uint32_t size; 5456 5457 /* Perform adapter interlock to kill adapter */ 5458 interlock_failed = 0; 5459 5460 mutex_enter(&EMLXS_PORT_LOCK); 5461 if (hba->flag & FC_INTERLOCKED) { 5462 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED); 5463 5464 mutex_exit(&EMLXS_PORT_LOCK); 5465 5466 return; 5467 } 5468 5469 j = 0; 5470 while (j++ < 10000) { 5471 if (hba->mbox_queue_flag == 0) { 5472 break; 5473 } 5474 5475 mutex_exit(&EMLXS_PORT_LOCK); 5476 BUSYWAIT_US(100); 5477 mutex_enter(&EMLXS_PORT_LOCK); 5478 } 5479 5480 if (hba->mbox_queue_flag != 0) { 5481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 5482 "Interlock failed. Mailbox busy."); 5483 mutex_exit(&EMLXS_PORT_LOCK); 5484 return; 5485 } 5486 5487 hba->flag |= FC_INTERLOCKED; 5488 hba->mbox_queue_flag = 1; 5489 5490 /* Disable all host interrupts */ 5491 hba->sli.sli3.hc_copy = 0; 5492 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy); 5493 WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff); 5494 5495 mb2 = FC_SLIM2_MAILBOX(hba); 5496 mb1 = FC_SLIM1_MAILBOX(hba); 5497 word0 = (uint32_t *)&swpmb; 5498 5499 if (!(hba->flag & FC_SLIM2_MODE)) { 5500 goto mode_B; 5501 } 5502 5503 mode_A: 5504 5505 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 5506 "Attempting SLIM2 Interlock..."); 5507 5508 interlock_A: 5509 5510 value = 0x55555555; 5511 *word0 = 0; 5512 swpmb.mbxCommand = MBX_KILL_BOARD; 5513 swpmb.mbxOwner = OWN_CHIP; 5514 5515 /* Write value to SLIM */ 5516 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value); 5517 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), *word0); 5518 5519 /* Send Kill board request */ 5520 mb2->un.varWords[0] = value; 5521 mb2->mbxCommand = MBX_KILL_BOARD; 5522 mb2->mbxOwner = OWN_CHIP; 5523 5524 /* Sync the memory */ 5525 offset = (off_t)((uint64_t)((unsigned long)mb2) 5526 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt)); 5527 size = (sizeof (uint32_t) * 2); 5528 5529 BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size); 5530 5531 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size, 5532 DDI_DMA_SYNC_FORDEV); 5533 5534 /* interrupt board to do it right away */ 5535 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT); 5536 5537 /* First wait for command acceptence */ 5538 j = 0; 5539 while (j++ < 1000) { 5540 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1)); 5541 5542 if (value == 0xAAAAAAAA) { 5543 break; 5544 } 5545 5546 BUSYWAIT_US(50); 5547 } 5548 5549 if (value == 0xAAAAAAAA) { 5550 /* Now wait for mailbox ownership to clear */ 5551 while (j++ < 10000) { 5552 *word0 = 5553 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1)); 5554 5555 if (swpmb.mbxOwner == 0) { 5556 break; 5557 } 5558 5559 BUSYWAIT_US(50); 5560 } 5561 5562 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 5563 "Interlock succeeded."); 5564 5565 goto done; 5566 } 5567 5568 /* Interlock failed !!! */ 5569 interlock_failed = 1; 5570 5571 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed."); 5572 5573 mode_B: 5574 5575 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 5576 "Attempting SLIM1 Interlock..."); 5577 5578 interlock_B: 5579 5580 value = 0x55555555; 5581 *word0 = 0; 5582 swpmb.mbxCommand = MBX_KILL_BOARD; 5583 swpmb.mbxOwner = OWN_CHIP; 5584 5585 /* Write KILL BOARD to mailbox */ 5586 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value); 5587 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), *word0); 5588 5589 /* interrupt board to do it right away */ 5590 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT); 5591 5592 /* First wait for command acceptence */ 5593 j = 0; 5594 while (j++ < 1000) { 5595 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1)); 5596 5597 if (value == 0xAAAAAAAA) { 5598 break; 5599 } 5600 5601 BUSYWAIT_US(50); 5602 } 5603 5604 if (value == 0xAAAAAAAA) { 5605 /* Now wait for mailbox ownership to clear */ 5606 while (j++ < 10000) { 5607 *word0 = 5608 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1)); 5609 5610 if (swpmb.mbxOwner == 0) { 5611 break; 5612 } 5613 5614 BUSYWAIT_US(50); 5615 } 5616 5617 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 5618 "Interlock succeeded."); 5619 5620 goto done; 5621 } 5622 5623 /* Interlock failed !!! */ 5624 5625 /* If this is the first time then try again */ 5626 if (interlock_failed == 0) { 5627 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 5628 "Interlock failed. Retrying..."); 5629 5630 /* Try again */ 5631 interlock_failed = 1; 5632 goto interlock_B; 5633 } 5634 5635 /* 5636 * Now check for error attention to indicate the board has 5637 * been kiilled 5638 */ 5639 j = 0; 5640 while (j++ < 10000) { 5641 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba)); 5642 5643 if (ha_copy & HA_ERATT) { 5644 break; 5645 } 5646 5647 BUSYWAIT_US(50); 5648 } 5649 5650 if (ha_copy & HA_ERATT) { 5651 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 5652 "Interlock failed. Board killed."); 5653 } else { 5654 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 5655 "Interlock failed. Board not killed."); 5656 } 5657 5658 done: 5659 5660 hba->mbox_queue_flag = 0; 5661 5662 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED); 5663 5664 #ifdef FMA_SUPPORT 5665 /* Access handle validation */ 5666 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle); 5667 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 5668 #endif /* FMA_SUPPORT */ 5669 5670 mutex_exit(&EMLXS_PORT_LOCK); 5671 5672 return; 5673 5674 } /* emlxs_sli3_hba_kill() */ 5675 5676 5677 static void 5678 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba) 5679 { 5680 emlxs_port_t *port = &PPORT; 5681 MAILBOX swpmb; 5682 MAILBOX *mb2; 5683 MAILBOX *mb1; 5684 uint32_t *word0; 5685 off_t offset; 5686 uint32_t j; 5687 uint32_t value; 5688 uint32_t size; 5689 5690 /* Disable all host interrupts */ 5691 hba->sli.sli3.hc_copy = 0; 5692 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy); 5693 WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff); 5694 5695 mb2 = FC_SLIM2_MAILBOX(hba); 5696 mb1 = FC_SLIM1_MAILBOX(hba); 5697 word0 = (uint32_t *)&swpmb; 5698 5699 value = 0x55555555; 5700 *word0 = 0; 5701 swpmb.mbxCommand = MBX_KILL_BOARD; 5702 swpmb.mbxOwner = OWN_CHIP; 5703 5704 /* Write value to SLIM */ 5705 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value); 5706 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), *word0); 5707 5708 /* Send Kill board request */ 5709 mb2->un.varWords[0] = value; 5710 mb2->mbxCommand = MBX_KILL_BOARD; 5711 mb2->mbxOwner = OWN_CHIP; 5712 5713 /* Sync the memory */ 5714 offset = (off_t)((uint64_t)((unsigned long)mb2) 5715 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt)); 5716 size = (sizeof (uint32_t) * 2); 5717 5718 BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size); 5719 5720 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size, 5721 DDI_DMA_SYNC_FORDEV); 5722 5723 /* interrupt board to do it right away */ 5724 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT); 5725 5726 /* First wait for command acceptence */ 5727 j = 0; 5728 while (j++ < 1000) { 5729 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1)); 5730 5731 if (value == 0xAAAAAAAA) { 5732 break; 5733 } 5734 BUSYWAIT_US(50); 5735 } 5736 if (value == 0xAAAAAAAA) { 5737 /* Now wait for mailbox ownership to clear */ 5738 while (j++ < 10000) { 5739 *word0 = 5740 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1)); 5741 if (swpmb.mbxOwner == 0) { 5742 break; 5743 } 5744 BUSYWAIT_US(50); 5745 } 5746 goto done; 5747 } 5748 5749 done: 5750 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED); 5751 5752 #ifdef FMA_SUPPORT 5753 /* Access handle validation */ 5754 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle); 5755 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 5756 #endif /* FMA_SUPPORT */ 5757 return; 5758 5759 } /* emlxs_sli3_hba_kill4quiesce */ 5760 5761 5762 5763 5764 /* 5765 * emlxs_handle_mb_event 5766 * 5767 * Description: Process a Mailbox Attention. 5768 * Called from host_interrupt to process MBATT 5769 * 5770 * Returns: 5771 * 5772 */ 5773 static uint32_t 5774 emlxs_handle_mb_event(emlxs_hba_t *hba) 5775 { 5776 emlxs_port_t *port = &PPORT; 5777 MAILBOX *mb; 5778 MAILBOX swpmb; 5779 MAILBOX *mbox; 5780 MAILBOXQ *mbq = NULL; 5781 uint32_t *word0; 5782 MATCHMAP *mbox_bp; 5783 off_t offset; 5784 uint32_t i; 5785 int rc; 5786 5787 word0 = (uint32_t *)&swpmb; 5788 5789 mutex_enter(&EMLXS_PORT_LOCK); 5790 switch (hba->mbox_queue_flag) { 5791 case 0: 5792 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg, 5793 "No mailbox active."); 5794 5795 mutex_exit(&EMLXS_PORT_LOCK); 5796 return (0); 5797 5798 case MBX_POLL: 5799 5800 /* Mark mailbox complete, this should wake up any polling */ 5801 /* threads. This can happen if interrupts are enabled while */ 5802 /* a polled mailbox command is outstanding. If we don't set */ 5803 /* MBQ_COMPLETED here, the polling thread may wait until */ 5804 /* timeout error occurs */ 5805 5806 mutex_enter(&EMLXS_MBOX_LOCK); 5807 mbq = (MAILBOXQ *)hba->mbox_mbq; 5808 if (mbq) { 5809 port = (emlxs_port_t *)mbq->port; 5810 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 5811 "Mailbox event. Completing Polled command."); 5812 mbq->flag |= MBQ_COMPLETED; 5813 } 5814 mutex_exit(&EMLXS_MBOX_LOCK); 5815 5816 mutex_exit(&EMLXS_PORT_LOCK); 5817 return (0); 5818 5819 case MBX_SLEEP: 5820 case MBX_NOWAIT: 5821 /* Check mbox_timer, it acts as a service flag too */ 5822 /* The first to service the mbox queue will clear the timer */ 5823 if (hba->mbox_timer) { 5824 hba->mbox_timer = 0; 5825 5826 mutex_enter(&EMLXS_MBOX_LOCK); 5827 mbq = (MAILBOXQ *)hba->mbox_mbq; 5828 mutex_exit(&EMLXS_MBOX_LOCK); 5829 } 5830 5831 if (!mbq) { 5832 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 5833 "Mailbox event. No service required."); 5834 mutex_exit(&EMLXS_PORT_LOCK); 5835 return (0); 5836 } 5837 5838 mb = (MAILBOX *)mbq; 5839 mutex_exit(&EMLXS_PORT_LOCK); 5840 break; 5841 5842 default: 5843 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg, 5844 "Invalid Mailbox flag (%x)."); 5845 5846 mutex_exit(&EMLXS_PORT_LOCK); 5847 return (0); 5848 } 5849 5850 /* Set port context */ 5851 port = (emlxs_port_t *)mbq->port; 5852 5853 /* Get first word of mailbox */ 5854 if (hba->flag & FC_SLIM2_MODE) { 5855 mbox = FC_SLIM2_MAILBOX(hba); 5856 offset = (off_t)((uint64_t)((unsigned long)mbox) 5857 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt)); 5858 5859 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 5860 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL); 5861 *word0 = *((volatile uint32_t *)mbox); 5862 *word0 = BE_SWAP32(*word0); 5863 } else { 5864 mbox = FC_SLIM1_MAILBOX(hba); 5865 *word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox)); 5866 } 5867 5868 i = 0; 5869 while (swpmb.mbxOwner == OWN_CHIP) { 5870 if (i++ > 10000) { 5871 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg, 5872 "OWN_CHIP: %s: status=%x", 5873 emlxs_mb_cmd_xlate(swpmb.mbxCommand), 5874 swpmb.mbxStatus); 5875 5876 return (1); 5877 } 5878 5879 /* Get first word of mailbox */ 5880 if (hba->flag & FC_SLIM2_MODE) { 5881 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, 5882 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL); 5883 *word0 = *((volatile uint32_t *)mbox); 5884 *word0 = BE_SWAP32(*word0); 5885 } else { 5886 *word0 = 5887 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox)); 5888 } 5889 } 5890 5891 /* Now that we are the owner, DMA Sync entire mailbox if needed */ 5892 if (hba->flag & FC_SLIM2_MODE) { 5893 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 5894 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL); 5895 5896 BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb, 5897 MAILBOX_CMD_BSIZE); 5898 } else { 5899 READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox, 5900 MAILBOX_CMD_WSIZE); 5901 } 5902 5903 #ifdef MBOX_EXT_SUPPORT 5904 if (mbq->extbuf) { 5905 uint32_t *mbox_ext = 5906 (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET); 5907 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET; 5908 5909 if (hba->flag & FC_SLIM2_MODE) { 5910 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, 5911 offset_ext, mbq->extsize, 5912 DDI_DMA_SYNC_FORKERNEL); 5913 BE_SWAP32_BCOPY((uint8_t *)mbox_ext, 5914 (uint8_t *)mbq->extbuf, mbq->extsize); 5915 } else { 5916 READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf, 5917 mbox_ext, (mbq->extsize / 4)); 5918 } 5919 } 5920 #endif /* MBOX_EXT_SUPPORT */ 5921 5922 #ifdef FMA_SUPPORT 5923 if (!(hba->flag & FC_SLIM2_MODE)) { 5924 /* Access handle validation */ 5925 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle); 5926 } 5927 #endif /* FMA_SUPPORT */ 5928 5929 /* Now sync the memory buffer if one was used */ 5930 if (mbq->bp) { 5931 mbox_bp = (MATCHMAP *)mbq->bp; 5932 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size, 5933 DDI_DMA_SYNC_FORKERNEL); 5934 } 5935 5936 /* Mailbox has been completely received at this point */ 5937 5938 if (mb->mbxCommand == MBX_HEARTBEAT) { 5939 hba->heartbeat_active = 0; 5940 goto done; 5941 } 5942 5943 if (hba->mbox_queue_flag == MBX_SLEEP) { 5944 if (swpmb.mbxCommand != MBX_DOWN_LOAD && 5945 swpmb.mbxCommand != MBX_DUMP_MEMORY) { 5946 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 5947 "Received. %s: status=%x Sleep.", 5948 emlxs_mb_cmd_xlate(swpmb.mbxCommand), 5949 swpmb.mbxStatus); 5950 } 5951 } else { 5952 if (swpmb.mbxCommand != MBX_DOWN_LOAD && 5953 swpmb.mbxCommand != MBX_DUMP_MEMORY) { 5954 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 5955 "Completed. %s: status=%x", 5956 emlxs_mb_cmd_xlate(swpmb.mbxCommand), 5957 swpmb.mbxStatus); 5958 } 5959 } 5960 5961 /* Filter out passthru mailbox */ 5962 if (mbq->flag & MBQ_PASSTHRU) { 5963 goto done; 5964 } 5965 5966 if (mb->mbxStatus) { 5967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 5968 "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand), 5969 (uint32_t)mb->mbxStatus); 5970 } 5971 5972 if (mbq->mbox_cmpl) { 5973 rc = (mbq->mbox_cmpl)(hba, mbq); 5974 /* If mbox was retried, return immediately */ 5975 if (rc) { 5976 return (0); 5977 } 5978 } 5979 5980 done: 5981 5982 /* Clean up the mailbox area */ 5983 emlxs_mb_fini(hba, mb, mb->mbxStatus); 5984 5985 mbq = (MAILBOXQ *)emlxs_mb_get(hba); 5986 if (mbq) { 5987 /* Attempt to send pending mailboxes */ 5988 rc = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0); 5989 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 5990 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 5991 } 5992 } 5993 return (0); 5994 5995 } /* emlxs_handle_mb_event() */ 5996 5997 5998 static void 5999 emlxs_sli3_timer(emlxs_hba_t *hba) 6000 { 6001 /* Perform SLI3 level timer checks */ 6002 6003 emlxs_sli3_timer_check_mbox(hba); 6004 6005 } /* emlxs_sli3_timer() */ 6006 6007 6008 static void 6009 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba) 6010 { 6011 emlxs_port_t *port = &PPORT; 6012 emlxs_config_t *cfg = &CFG; 6013 MAILBOX *mb = NULL; 6014 MAILBOX swpmb; 6015 uint32_t *word0; 6016 uint32_t offset; 6017 uint32_t ha_copy = 0; 6018 6019 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 6020 return; 6021 } 6022 6023 mutex_enter(&EMLXS_PORT_LOCK); 6024 6025 /* Return if timer hasn't expired */ 6026 if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) { 6027 mutex_exit(&EMLXS_PORT_LOCK); 6028 return; 6029 } 6030 6031 /* Mailbox timed out, first check for error attention */ 6032 ha_copy = emlxs_check_attention(hba); 6033 6034 if (ha_copy & HA_ERATT) { 6035 hba->mbox_timer = 0; 6036 mutex_exit(&EMLXS_PORT_LOCK); 6037 emlxs_handle_ff_error(hba); 6038 return; 6039 } 6040 6041 word0 = (uint32_t *)&swpmb; 6042 6043 if (hba->mbox_queue_flag) { 6044 /* Get first word of mailbox */ 6045 if (hba->flag & FC_SLIM2_MODE) { 6046 mb = FC_SLIM2_MAILBOX(hba); 6047 offset = 6048 (off_t)((uint64_t)((unsigned long)mb) - (uint64_t) 6049 ((unsigned long)hba->sli.sli3.slim2.virt)); 6050 6051 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, 6052 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL); 6053 *word0 = *((volatile uint32_t *)mb); 6054 *word0 = BE_SWAP32(*word0); 6055 } else { 6056 mb = FC_SLIM1_MAILBOX(hba); 6057 *word0 = 6058 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb)); 6059 #ifdef FMA_SUPPORT 6060 /* Access handle validation */ 6061 EMLXS_CHK_ACC_HANDLE(hba, 6062 hba->sli.sli3.slim_acc_handle); 6063 #endif /* FMA_SUPPORT */ 6064 } 6065 6066 mb = &swpmb; 6067 6068 /* Check if mailbox has actually completed */ 6069 if (mb->mbxOwner == OWN_HOST) { 6070 /* Read host attention register to determine */ 6071 /* interrupt source */ 6072 uint32_t ha_copy = emlxs_check_attention(hba); 6073 6074 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 6075 "Mailbox attention missed: %s. Forcing event. " 6076 "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand), 6077 hba->sli.sli3.hc_copy, ha_copy); 6078 6079 mutex_exit(&EMLXS_PORT_LOCK); 6080 6081 (void) emlxs_handle_mb_event(hba); 6082 6083 return; 6084 } 6085 6086 /* The first to service the mbox queue will clear the timer */ 6087 /* We will service the mailbox here */ 6088 hba->mbox_timer = 0; 6089 6090 mutex_enter(&EMLXS_MBOX_LOCK); 6091 mb = (MAILBOX *)hba->mbox_mbq; 6092 mutex_exit(&EMLXS_MBOX_LOCK); 6093 } 6094 6095 if (mb) { 6096 switch (hba->mbox_queue_flag) { 6097 case MBX_NOWAIT: 6098 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, 6099 "%s: Nowait.", 6100 emlxs_mb_cmd_xlate(mb->mbxCommand)); 6101 break; 6102 6103 case MBX_SLEEP: 6104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, 6105 "%s: mb=%p Sleep.", 6106 emlxs_mb_cmd_xlate(mb->mbxCommand), 6107 mb); 6108 break; 6109 6110 case MBX_POLL: 6111 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, 6112 "%s: mb=%p Polled.", 6113 emlxs_mb_cmd_xlate(mb->mbxCommand), 6114 mb); 6115 break; 6116 6117 default: 6118 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, 6119 "%s: mb=%p (%d).", 6120 emlxs_mb_cmd_xlate(mb->mbxCommand), 6121 mb, hba->mbox_queue_flag); 6122 break; 6123 } 6124 } else { 6125 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL); 6126 } 6127 6128 hba->flag |= FC_MBOX_TIMEOUT; 6129 EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR); 6130 6131 mutex_exit(&EMLXS_PORT_LOCK); 6132 6133 /* Perform mailbox cleanup */ 6134 /* This will wake any sleeping or polling threads */ 6135 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT); 6136 6137 /* Trigger adapter shutdown */ 6138 emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL); 6139 6140 return; 6141 6142 } /* emlxs_sli3_timer_check_mbox() */ 6143 6144 6145 /* 6146 * emlxs_mb_config_port Issue a CONFIG_PORT mailbox command 6147 */ 6148 static uint32_t 6149 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode, 6150 uint32_t hbainit) 6151 { 6152 MAILBOX *mb = (MAILBOX *)mbq; 6153 emlxs_vpd_t *vpd = &VPD; 6154 emlxs_port_t *port = &PPORT; 6155 emlxs_config_t *cfg; 6156 RING *rp; 6157 uint64_t pcb; 6158 uint64_t mbx; 6159 uint64_t hgp; 6160 uint64_t pgp; 6161 uint64_t rgp; 6162 MAILBOX *mbox; 6163 SLIM2 *slim; 6164 SLI2_RDSC *rdsc; 6165 uint64_t offset; 6166 uint32_t Laddr; 6167 uint32_t i; 6168 6169 cfg = &CFG; 6170 bzero((void *)mb, MAILBOX_CMD_BSIZE); 6171 mbox = NULL; 6172 slim = NULL; 6173 6174 mb->mbxCommand = MBX_CONFIG_PORT; 6175 mb->mbxOwner = OWN_HOST; 6176 mbq->mbox_cmpl = NULL; 6177 6178 mb->un.varCfgPort.pcbLen = sizeof (PCB); 6179 mb->un.varCfgPort.hbainit[0] = hbainit; 6180 6181 pcb = hba->sli.sli3.slim2.phys + 6182 (uint64_t)((unsigned long)&(slim->pcb)); 6183 mb->un.varCfgPort.pcbLow = PADDR_LO(pcb); 6184 mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb); 6185 6186 /* Set Host pointers in SLIM flag */ 6187 mb->un.varCfgPort.hps = 1; 6188 6189 /* Initialize hba structure for assumed default SLI2 mode */ 6190 /* If config port succeeds, then we will update it then */ 6191 hba->sli_mode = sli_mode; 6192 hba->vpi_max = 0; 6193 hba->flag &= ~FC_NPIV_ENABLED; 6194 6195 if (sli_mode == EMLXS_HBA_SLI3_MODE) { 6196 mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE; 6197 mb->un.varCfgPort.cerbm = 1; 6198 mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ; 6199 6200 if (cfg[CFG_NPIV_ENABLE].current) { 6201 if (vpd->feaLevelHigh >= 0x09) { 6202 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) { 6203 mb->un.varCfgPort.vpi_max = 6204 MAX_VPORTS - 1; 6205 } else { 6206 mb->un.varCfgPort.vpi_max = 6207 MAX_VPORTS_LIMITED - 1; 6208 } 6209 6210 mb->un.varCfgPort.cmv = 1; 6211 } else { 6212 EMLXS_MSGF(EMLXS_CONTEXT, 6213 &emlxs_init_debug_msg, 6214 "CFGPORT: Firmware does not support NPIV. " 6215 "level=%d", vpd->feaLevelHigh); 6216 } 6217 6218 } 6219 } 6220 6221 /* 6222 * Now setup pcb 6223 */ 6224 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2; 6225 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2; 6226 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing = 6227 (hba->sli.sli3.ring_count - 1); 6228 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize = 6229 sizeof (MAILBOX) + MBOX_EXTENSION_SIZE; 6230 6231 mbx = hba->sli.sli3.slim2.phys + 6232 (uint64_t)((unsigned long)&(slim->mbx)); 6233 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx); 6234 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx); 6235 6236 6237 /* 6238 * Set up HGP - Port Memory 6239 * 6240 * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80 6241 * RR0Get 0xc4 0x84 6242 * CR1Put 0xc8 0x88 6243 * RR1Get 0xcc 0x8c 6244 * CR2Put 0xd0 0x90 6245 * RR2Get 0xd4 0x94 6246 * CR3Put 0xd8 0x98 6247 * RR3Get 0xdc 0x9c 6248 * 6249 * Reserved 0xa0-0xbf 6250 * 6251 * If HBQs configured: 6252 * HBQ 0 Put ptr 0xc0 6253 * HBQ 1 Put ptr 0xc4 6254 * HBQ 2 Put ptr 0xc8 6255 * ... 6256 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4 6257 */ 6258 6259 if (sli_mode >= EMLXS_HBA_SLI3_MODE) { 6260 /* ERBM is enabled */ 6261 hba->sli.sli3.hgp_ring_offset = 0x80; 6262 hba->sli.sli3.hgp_hbq_offset = 0xC0; 6263 6264 hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 6265 hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 6266 6267 } else { /* SLI2 */ 6268 /* ERBM is disabled */ 6269 hba->sli.sli3.hgp_ring_offset = 0xC0; 6270 hba->sli.sli3.hgp_hbq_offset = 0; 6271 6272 hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 6273 hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 6274 } 6275 6276 /* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */ 6277 if (hba->bus_type == SBUS_FC) { 6278 hgp = hba->sli.sli3.slim2.phys + 6279 (uint64_t)((unsigned long)&(mbox->us.s2.host)); 6280 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh = 6281 PADDR_HI(hgp); 6282 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow = 6283 PADDR_LO(hgp); 6284 } else { 6285 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh = 6286 (uint32_t)ddi_get32(hba->pci_acc_handle, 6287 (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER)); 6288 6289 Laddr = 6290 ddi_get32(hba->pci_acc_handle, 6291 (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER)); 6292 Laddr &= ~0x4; 6293 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow = 6294 (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset); 6295 6296 #ifdef FMA_SUPPORT 6297 /* Access handle validation */ 6298 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle); 6299 #endif /* FMA_SUPPORT */ 6300 6301 } 6302 6303 pgp = hba->sli.sli3.slim2.phys + 6304 (uint64_t)((unsigned long)&(mbox->us.s2.port)); 6305 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh = 6306 PADDR_HI(pgp); 6307 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow = 6308 PADDR_LO(pgp); 6309 6310 offset = 0; 6311 for (i = 0; i < 4; i++) { 6312 rp = &hba->sli.sli3.ring[i]; 6313 rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i]; 6314 6315 /* Setup command ring */ 6316 rgp = hba->sli.sli3.slim2.phys + 6317 (uint64_t)((unsigned long)&(slim->IOCBs[offset])); 6318 rdsc->cmdAddrHigh = PADDR_HI(rgp); 6319 rdsc->cmdAddrLow = PADDR_LO(rgp); 6320 rdsc->cmdEntries = rp->fc_numCiocb; 6321 6322 rp->fc_cmdringaddr = 6323 (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset]; 6324 offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size; 6325 6326 /* Setup response ring */ 6327 rgp = hba->sli.sli3.slim2.phys + 6328 (uint64_t)((unsigned long)&(slim->IOCBs[offset])); 6329 rdsc->rspAddrHigh = PADDR_HI(rgp); 6330 rdsc->rspAddrLow = PADDR_LO(rgp); 6331 rdsc->rspEntries = rp->fc_numRiocb; 6332 6333 rp->fc_rspringaddr = 6334 (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset]; 6335 offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size; 6336 } 6337 6338 BE_SWAP32_BCOPY((uint8_t *) 6339 (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb), 6340 (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb), 6341 sizeof (PCB)); 6342 6343 offset = ((uint64_t)((unsigned long) 6344 &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) - 6345 (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt)); 6346 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset, 6347 sizeof (PCB), DDI_DMA_SYNC_FORDEV); 6348 6349 return (0); 6350 6351 } /* emlxs_mb_config_port() */ 6352 6353 6354 static uint32_t 6355 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id) 6356 { 6357 emlxs_port_t *port = &PPORT; 6358 HBQ_INIT_t *hbq; 6359 MATCHMAP *mp; 6360 HBQE_t *hbqE; 6361 MAILBOX *mb; 6362 MAILBOXQ *mbq; 6363 void *ioa2; 6364 uint32_t j; 6365 uint32_t count; 6366 uint32_t size; 6367 uint32_t ringno; 6368 uint32_t seg; 6369 6370 switch (hbq_id) { 6371 case EMLXS_ELS_HBQ_ID: 6372 count = MEM_ELSBUF_COUNT; 6373 size = MEM_ELSBUF_SIZE; 6374 ringno = FC_ELS_RING; 6375 seg = MEM_ELSBUF; 6376 HBASTATS.ElsUbPosted = count; 6377 break; 6378 6379 case EMLXS_IP_HBQ_ID: 6380 count = MEM_IPBUF_COUNT; 6381 size = MEM_IPBUF_SIZE; 6382 ringno = FC_IP_RING; 6383 seg = MEM_IPBUF; 6384 HBASTATS.IpUbPosted = count; 6385 break; 6386 6387 case EMLXS_CT_HBQ_ID: 6388 count = MEM_CTBUF_COUNT; 6389 size = MEM_CTBUF_SIZE; 6390 ringno = FC_CT_RING; 6391 seg = MEM_CTBUF; 6392 HBASTATS.CtUbPosted = count; 6393 break; 6394 6395 #ifdef SFCT_SUPPORT 6396 case EMLXS_FCT_HBQ_ID: 6397 count = MEM_FCTBUF_COUNT; 6398 size = MEM_FCTBUF_SIZE; 6399 ringno = FC_FCT_RING; 6400 seg = MEM_FCTBUF; 6401 HBASTATS.FctUbPosted = count; 6402 break; 6403 #endif /* SFCT_SUPPORT */ 6404 6405 default: 6406 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, 6407 "hbq_setup: Invalid HBQ id. (%x)", hbq_id); 6408 return (1); 6409 } 6410 6411 /* Configure HBQ */ 6412 hbq = &hba->sli.sli3.hbq_table[hbq_id]; 6413 hbq->HBQ_numEntries = count; 6414 6415 /* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */ 6416 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == 0) { 6417 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, 6418 "hbq_setup: Unable to get mailbox."); 6419 return (1); 6420 } 6421 mb = (MAILBOX *)mbq; 6422 6423 /* Allocate HBQ Host buffer and Initialize the HBQEs */ 6424 if (emlxs_hbq_alloc(hba, hbq_id)) { 6425 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, 6426 "hbq_setup: Unable to allocate HBQ."); 6427 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 6428 return (1); 6429 } 6430 6431 hbq->HBQ_recvNotify = 1; 6432 hbq->HBQ_num_mask = 0; /* Bind to ring */ 6433 hbq->HBQ_profile = 0; /* Selection profile */ 6434 /* 0=all, 7=logentry */ 6435 hbq->HBQ_ringMask = 1 << ringno; /* b0100 * ringno - Binds */ 6436 /* HBQ to a ring */ 6437 /* Ring0=b0001, Ring1=b0010, */ 6438 /* Ring2=b0100 */ 6439 hbq->HBQ_headerLen = 0; /* 0 if not profile 4 or 5 */ 6440 hbq->HBQ_logEntry = 0; /* Set to 1 if this HBQ will */ 6441 /* be used for */ 6442 hbq->HBQ_id = hbq_id; 6443 hbq->HBQ_PutIdx_next = 0; 6444 hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1; 6445 hbq->HBQ_GetIdx = 0; 6446 hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries; 6447 bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs)); 6448 6449 /* Fill in POST BUFFERs in HBQE */ 6450 hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt; 6451 for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) { 6452 /* Allocate buffer to post */ 6453 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, 6454 seg)) == 0) { 6455 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, 6456 "hbq_setup: Unable to allocate HBQ buffer. " 6457 "cnt=%d", j); 6458 emlxs_hbq_free_all(hba, hbq_id); 6459 return (1); 6460 } 6461 6462 hbq->HBQ_PostBufs[j] = mp; 6463 6464 hbqE->unt.ext.HBQ_tag = hbq_id; 6465 hbqE->unt.ext.HBQE_tag = j; 6466 hbqE->bde.tus.f.bdeSize = size; 6467 hbqE->bde.tus.f.bdeFlags = 0; 6468 hbqE->unt.w = BE_SWAP32(hbqE->unt.w); 6469 hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w); 6470 hbqE->bde.addrLow = 6471 BE_SWAP32(PADDR_LO(mp->phys)); 6472 hbqE->bde.addrHigh = 6473 BE_SWAP32(PADDR_HI(mp->phys)); 6474 } 6475 6476 /* Issue CONFIG_HBQ */ 6477 emlxs_mb_config_hbq(hba, mbq, hbq_id); 6478 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) { 6479 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 6480 "hbq_setup: Unable to config HBQ. cmd=%x status=%x", 6481 mb->mbxCommand, mb->mbxStatus); 6482 6483 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 6484 emlxs_hbq_free_all(hba, hbq_id); 6485 return (1); 6486 } 6487 6488 /* Setup HBQ Get/Put indexes */ 6489 ioa2 = (void *)((char *)hba->sli.sli3.slim_addr + 6490 (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t)))); 6491 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx); 6492 6493 hba->sli.sli3.hbq_count++; 6494 6495 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 6496 6497 #ifdef FMA_SUPPORT 6498 /* Access handle validation */ 6499 if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle) 6500 != DDI_FM_OK) { 6501 EMLXS_MSGF(EMLXS_CONTEXT, 6502 &emlxs_invalid_access_handle_msg, NULL); 6503 emlxs_hbq_free_all(hba, hbq_id); 6504 return (1); 6505 } 6506 #endif /* FMA_SUPPORT */ 6507 6508 return (0); 6509 6510 } /* emlxs_hbq_setup() */ 6511 6512 6513 extern void 6514 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id) 6515 { 6516 HBQ_INIT_t *hbq; 6517 MBUF_INFO *buf_info; 6518 MBUF_INFO bufinfo; 6519 uint32_t seg; 6520 uint32_t j; 6521 6522 switch (hbq_id) { 6523 case EMLXS_ELS_HBQ_ID: 6524 seg = MEM_ELSBUF; 6525 HBASTATS.ElsUbPosted = 0; 6526 break; 6527 6528 case EMLXS_IP_HBQ_ID: 6529 seg = MEM_IPBUF; 6530 HBASTATS.IpUbPosted = 0; 6531 break; 6532 6533 case EMLXS_CT_HBQ_ID: 6534 seg = MEM_CTBUF; 6535 HBASTATS.CtUbPosted = 0; 6536 break; 6537 6538 #ifdef SFCT_SUPPORT 6539 case EMLXS_FCT_HBQ_ID: 6540 seg = MEM_FCTBUF; 6541 HBASTATS.FctUbPosted = 0; 6542 break; 6543 #endif /* SFCT_SUPPORT */ 6544 6545 default: 6546 return; 6547 } 6548 6549 6550 hbq = &hba->sli.sli3.hbq_table[hbq_id]; 6551 6552 if (hbq->HBQ_host_buf.virt != 0) { 6553 for (j = 0; j < hbq->HBQ_PostBufCnt; j++) { 6554 emlxs_mem_put(hba, seg, 6555 (void *)hbq->HBQ_PostBufs[j]); 6556 hbq->HBQ_PostBufs[j] = NULL; 6557 } 6558 hbq->HBQ_PostBufCnt = 0; 6559 6560 buf_info = &bufinfo; 6561 bzero(buf_info, sizeof (MBUF_INFO)); 6562 6563 buf_info->size = hbq->HBQ_host_buf.size; 6564 buf_info->virt = hbq->HBQ_host_buf.virt; 6565 buf_info->phys = hbq->HBQ_host_buf.phys; 6566 buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle; 6567 buf_info->data_handle = hbq->HBQ_host_buf.data_handle; 6568 buf_info->flags = FC_MBUF_DMA; 6569 6570 emlxs_mem_free(hba, buf_info); 6571 6572 hbq->HBQ_host_buf.virt = NULL; 6573 } 6574 6575 return; 6576 6577 } /* emlxs_hbq_free_all() */ 6578 6579 6580 extern void 6581 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id) 6582 { 6583 #ifdef FMA_SUPPORT 6584 emlxs_port_t *port = &PPORT; 6585 #endif /* FMA_SUPPORT */ 6586 void *ioa2; 6587 uint32_t status; 6588 uint32_t HBQ_PortGetIdx; 6589 HBQ_INIT_t *hbq; 6590 6591 switch (hbq_id) { 6592 case EMLXS_ELS_HBQ_ID: 6593 HBASTATS.ElsUbPosted++; 6594 break; 6595 6596 case EMLXS_IP_HBQ_ID: 6597 HBASTATS.IpUbPosted++; 6598 break; 6599 6600 case EMLXS_CT_HBQ_ID: 6601 HBASTATS.CtUbPosted++; 6602 break; 6603 6604 #ifdef SFCT_SUPPORT 6605 case EMLXS_FCT_HBQ_ID: 6606 HBASTATS.FctUbPosted++; 6607 break; 6608 #endif /* SFCT_SUPPORT */ 6609 6610 default: 6611 return; 6612 } 6613 6614 hbq = &hba->sli.sli3.hbq_table[hbq_id]; 6615 6616 hbq->HBQ_PutIdx = 6617 (hbq->HBQ_PutIdx + 1 >= 6618 hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1; 6619 6620 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) { 6621 HBQ_PortGetIdx = 6622 BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2. 6623 HBQ_PortGetIdx[hbq_id]); 6624 6625 hbq->HBQ_GetIdx = HBQ_PortGetIdx; 6626 6627 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) { 6628 return; 6629 } 6630 } 6631 6632 ioa2 = (void *)((char *)hba->sli.sli3.slim_addr + 6633 (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t)))); 6634 status = hbq->HBQ_PutIdx; 6635 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status); 6636 6637 #ifdef FMA_SUPPORT 6638 /* Access handle validation */ 6639 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle); 6640 #endif /* FMA_SUPPORT */ 6641 6642 return; 6643 6644 } /* emlxs_update_HBQ_index() */ 6645 6646 6647 static void 6648 emlxs_sli3_enable_intr(emlxs_hba_t *hba) 6649 { 6650 #ifdef FMA_SUPPORT 6651 emlxs_port_t *port = &PPORT; 6652 #endif /* FMA_SUPPORT */ 6653 uint32_t status; 6654 6655 /* Enable mailbox, error attention interrupts */ 6656 status = (uint32_t)(HC_MBINT_ENA); 6657 6658 /* Enable ring interrupts */ 6659 if (hba->sli.sli3.ring_count >= 4) { 6660 status |= 6661 (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA | 6662 HC_R0INT_ENA); 6663 } else if (hba->sli.sli3.ring_count == 3) { 6664 status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA); 6665 } else if (hba->sli.sli3.ring_count == 2) { 6666 status |= (HC_R1INT_ENA | HC_R0INT_ENA); 6667 } else if (hba->sli.sli3.ring_count == 1) { 6668 status |= (HC_R0INT_ENA); 6669 } 6670 6671 hba->sli.sli3.hc_copy = status; 6672 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy); 6673 6674 #ifdef FMA_SUPPORT 6675 /* Access handle validation */ 6676 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 6677 #endif /* FMA_SUPPORT */ 6678 6679 } /* emlxs_sli3_enable_intr() */ 6680 6681 6682 static void 6683 emlxs_enable_latt(emlxs_hba_t *hba) 6684 { 6685 #ifdef FMA_SUPPORT 6686 emlxs_port_t *port = &PPORT; 6687 #endif /* FMA_SUPPORT */ 6688 6689 mutex_enter(&EMLXS_PORT_LOCK); 6690 hba->sli.sli3.hc_copy |= HC_LAINT_ENA; 6691 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy); 6692 #ifdef FMA_SUPPORT 6693 /* Access handle validation */ 6694 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 6695 #endif /* FMA_SUPPORT */ 6696 mutex_exit(&EMLXS_PORT_LOCK); 6697 6698 } /* emlxs_enable_latt() */ 6699 6700 6701 static void 6702 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att) 6703 { 6704 #ifdef FMA_SUPPORT 6705 emlxs_port_t *port = &PPORT; 6706 #endif /* FMA_SUPPORT */ 6707 6708 /* Disable all adapter interrupts */ 6709 hba->sli.sli3.hc_copy = att; 6710 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy); 6711 #ifdef FMA_SUPPORT 6712 /* Access handle validation */ 6713 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 6714 #endif /* FMA_SUPPORT */ 6715 6716 } /* emlxs_sli3_disable_intr() */ 6717 6718 6719 static uint32_t 6720 emlxs_check_attention(emlxs_hba_t *hba) 6721 { 6722 #ifdef FMA_SUPPORT 6723 emlxs_port_t *port = &PPORT; 6724 #endif /* FMA_SUPPORT */ 6725 uint32_t ha_copy; 6726 6727 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba)); 6728 #ifdef FMA_SUPPORT 6729 /* Access handle validation */ 6730 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 6731 #endif /* FMA_SUPPORT */ 6732 return (ha_copy); 6733 6734 } /* emlxs_check_attention() */ 6735 6736 6737 static void 6738 emlxs_sli3_poll_erratt(emlxs_hba_t *hba) 6739 { 6740 uint32_t ha_copy; 6741 6742 ha_copy = emlxs_check_attention(hba); 6743 6744 /* Adapter error */ 6745 if (ha_copy & HA_ERATT) { 6746 HBASTATS.IntrEvent[6]++; 6747 emlxs_handle_ff_error(hba); 6748 } 6749 6750 } /* emlxs_sli3_poll_erratt() */ 6751 6752 6753 static uint32_t 6754 emlxs_sli3_reg_did_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq) 6755 { 6756 emlxs_port_t *port = (emlxs_port_t *)mbq->port; 6757 MAILBOXQ *mboxq; 6758 MAILBOX *mb; 6759 MATCHMAP *mp; 6760 NODELIST *ndlp; 6761 emlxs_port_t *vport; 6762 SERV_PARM *sp; 6763 int32_t i; 6764 uint32_t control; 6765 uint32_t ldata; 6766 uint32_t ldid; 6767 uint16_t lrpi; 6768 uint16_t lvpi; 6769 uint32_t rval; 6770 6771 mb = (MAILBOX *)mbq; 6772 6773 if (mb->mbxStatus) { 6774 if (mb->mbxStatus == MBXERR_NO_RESOURCES) { 6775 control = mb->un.varRegLogin.un.sp.bdeSize; 6776 if (control == 0) { 6777 /* Special handle for vport PLOGI */ 6778 if (mbq->iocbq == (uint8_t *)1) { 6779 mbq->iocbq = NULL; 6780 } 6781 return (0); 6782 } 6783 emlxs_mb_retry(hba, mbq); 6784 return (1); 6785 } 6786 if (mb->mbxStatus == MBXERR_RPI_FULL) { 6787 EMLXS_MSGF(EMLXS_CONTEXT, 6788 &emlxs_node_create_failed_msg, 6789 "Limit reached. count=%d", port->node_count); 6790 } 6791 6792 /* Special handle for vport PLOGI */ 6793 if (mbq->iocbq == (uint8_t *)1) { 6794 mbq->iocbq = NULL; 6795 } 6796 6797 return (0); 6798 } 6799 6800 mp = (MATCHMAP *)mbq->bp; 6801 if (!mp) { 6802 return (0); 6803 } 6804 6805 ldata = mb->un.varWords[5]; 6806 lvpi = (ldata & 0xffff); 6807 port = &VPORT(lvpi); 6808 6809 /* First copy command data */ 6810 ldata = mb->un.varWords[0]; /* get rpi */ 6811 lrpi = ldata & 0xffff; 6812 6813 ldata = mb->un.varWords[1]; /* get did */ 6814 ldid = ldata & MASK_DID; 6815 6816 sp = (SERV_PARM *)mp->virt; 6817 6818 /* Create or update the node */ 6819 ndlp = emlxs_node_create(port, ldid, lrpi, sp); 6820 6821 if (ndlp == NULL) { 6822 emlxs_ub_priv_t *ub_priv; 6823 6824 /* 6825 * Fake a mailbox error, so the mbox_fini 6826 * can take appropriate action 6827 */ 6828 mb->mbxStatus = MBXERR_RPI_FULL; 6829 if (mbq->ubp) { 6830 ub_priv = ((fc_unsol_buf_t *)mbq->ubp)->ub_fca_private; 6831 ub_priv->flags |= EMLXS_UB_REPLY; 6832 } 6833 6834 /* This must be (0xFFFFFE) which was registered by vport */ 6835 if (lrpi == 0) { 6836 return (0); 6837 } 6838 6839 if (!(mboxq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) { 6840 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 6841 "reg_did_mbcmpl:failed. Unable to allocate mbox"); 6842 return (0); 6843 } 6844 6845 mb = (MAILBOX *)mboxq->mbox; 6846 mb->un.varUnregLogin.rpi = lrpi; 6847 mb->un.varUnregLogin.vpi = lvpi; 6848 6849 mb->mbxCommand = MBX_UNREG_LOGIN; 6850 mb->mbxOwner = OWN_HOST; 6851 mboxq->sbp = NULL; 6852 mboxq->ubp = NULL; 6853 mboxq->iocbq = NULL; 6854 mboxq->mbox_cmpl = NULL; 6855 mboxq->context = NULL; 6856 mboxq->port = (void *)port; 6857 6858 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mboxq, MBX_NOWAIT, 0); 6859 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) { 6860 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 6861 "reg_did_mbcmpl:failed. Unable to send request."); 6862 6863 emlxs_mem_put(hba, MEM_MBOX, (void *)mboxq); 6864 return (0); 6865 } 6866 6867 return (0); 6868 } 6869 6870 if (ndlp->nlp_DID == FABRIC_DID) { 6871 /* FLOGI/FDISC successfully completed on this port */ 6872 mutex_enter(&EMLXS_PORT_LOCK); 6873 port->flag |= EMLXS_PORT_FLOGI_CMPL; 6874 mutex_exit(&EMLXS_PORT_LOCK); 6875 6876 /* If CLEAR_LA has been sent, then attempt to */ 6877 /* register the vpi now */ 6878 if (hba->state == FC_READY) { 6879 (void) emlxs_mb_reg_vpi(port, NULL); 6880 } 6881 6882 /* 6883 * If NPIV Fabric support has just been established on 6884 * the physical port, then notify the vports of the 6885 * link up 6886 */ 6887 if ((lvpi == 0) && 6888 (hba->flag & FC_NPIV_ENABLED) && 6889 (hba->flag & FC_NPIV_SUPPORTED)) { 6890 /* Skip the physical port */ 6891 for (i = 1; i < MAX_VPORTS; i++) { 6892 vport = &VPORT(i); 6893 6894 if (!(vport->flag & EMLXS_PORT_BOUND) || 6895 !(vport->flag & 6896 EMLXS_PORT_ENABLED)) { 6897 continue; 6898 } 6899 6900 emlxs_port_online(vport); 6901 } 6902 } 6903 } 6904 6905 /* Check for special restricted login flag */ 6906 if (mbq->iocbq == (uint8_t *)1) { 6907 mbq->iocbq = NULL; 6908 (void) EMLXS_SLI_UNREG_NODE(port, ndlp, NULL, NULL, NULL); 6909 return (0); 6910 } 6911 6912 /* Needed for FCT trigger in emlxs_mb_deferred_cmpl */ 6913 if (mbq->sbp) { 6914 ((emlxs_buf_t *)mbq->sbp)->node = ndlp; 6915 } 6916 6917 #ifdef DHCHAP_SUPPORT 6918 if (mbq->sbp || mbq->ubp) { 6919 if (emlxs_dhc_auth_start(port, ndlp, mbq->sbp, 6920 mbq->ubp) == 0) { 6921 /* Auth started - auth completion will */ 6922 /* handle sbp and ubp now */ 6923 mbq->sbp = NULL; 6924 mbq->ubp = NULL; 6925 } 6926 } 6927 #endif /* DHCHAP_SUPPORT */ 6928 6929 return (0); 6930 6931 } /* emlxs_sli3_reg_did_mbcmpl() */ 6932 6933 6934 static uint32_t 6935 emlxs_sli3_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param, 6936 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq) 6937 { 6938 emlxs_hba_t *hba = HBA; 6939 MATCHMAP *mp; 6940 MAILBOXQ *mbq; 6941 MAILBOX *mb; 6942 uint32_t rval; 6943 6944 /* Check for invalid node ids to register */ 6945 if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) { 6946 return (1); 6947 } 6948 6949 if (did & 0xff000000) { 6950 return (1); 6951 } 6952 6953 if ((rval = emlxs_mb_check_sparm(hba, param))) { 6954 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg, 6955 "Invalid service parameters. did=%06x rval=%d", did, 6956 rval); 6957 6958 return (1); 6959 } 6960 6961 /* Check if the node limit has been reached */ 6962 if (port->node_count >= hba->max_nodes) { 6963 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg, 6964 "Limit reached. did=%06x count=%d", did, 6965 port->node_count); 6966 6967 return (1); 6968 } 6969 6970 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) { 6971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg, 6972 "Unable to allocate mailbox. did=%x", did); 6973 6974 return (1); 6975 } 6976 mb = (MAILBOX *)mbq->mbox; 6977 bzero((void *)mb, MAILBOX_CMD_BSIZE); 6978 6979 /* Build login request */ 6980 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) { 6981 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 6982 6983 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg, 6984 "Unable to allocate buffer. did=%x", did); 6985 return (1); 6986 } 6987 bcopy((void *)param, (void *)mp->virt, sizeof (SERV_PARM)); 6988 6989 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM); 6990 mb->un.varRegLogin.un.sp64.addrHigh = PADDR_HI(mp->phys); 6991 mb->un.varRegLogin.un.sp64.addrLow = PADDR_LO(mp->phys); 6992 mb->un.varRegLogin.did = did; 6993 mb->un.varWords[30] = 0; /* flags */ 6994 mb->mbxCommand = MBX_REG_LOGIN64; 6995 mb->mbxOwner = OWN_HOST; 6996 mb->un.varRegLogin.vpi = port->vpi; 6997 mb->un.varRegLogin.rpi = 0; 6998 6999 mbq->sbp = (void *)sbp; 7000 mbq->ubp = (void *)ubp; 7001 mbq->iocbq = (void *)iocbq; 7002 mbq->bp = (void *)mp; 7003 mbq->mbox_cmpl = emlxs_sli3_reg_did_mbcmpl; 7004 mbq->context = NULL; 7005 mbq->port = (void *)port; 7006 7007 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0); 7008 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) { 7009 emlxs_mem_put(hba, MEM_BUF, (void *)mp); 7010 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 7011 7012 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg, 7013 "Unable to send mbox. did=%x", did); 7014 return (1); 7015 } 7016 7017 return (0); 7018 7019 } /* emlxs_sli3_reg_did() */ 7020 7021 7022 /*ARGSUSED*/ 7023 static uint32_t 7024 emlxs_sli3_unreg_node_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq) 7025 { 7026 emlxs_port_t *port = (emlxs_port_t *)mbq->port; 7027 MAILBOX *mb; 7028 NODELIST *node; 7029 uint16_t rpi; 7030 7031 node = (NODELIST *)mbq->context; 7032 mb = (MAILBOX *)mbq; 7033 rpi = (node)? node->nlp_Rpi:0xffff; 7034 7035 if (mb->mbxStatus) { 7036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 7037 "unreg_node_mbcmpl:failed. node=%p rpi=%d status=%x", 7038 node, rpi, mb->mbxStatus); 7039 7040 return (0); 7041 } 7042 7043 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 7044 "unreg_node_mbcmpl: node=%p rpi=%d", 7045 node, rpi); 7046 7047 if (node) { 7048 emlxs_node_rm(port, node); 7049 7050 } else { /* All nodes */ 7051 emlxs_node_destroy_all(port); 7052 } 7053 7054 return (0); 7055 7056 } /* emlxs_sli3_unreg_node_mbcmpl */ 7057 7058 7059 static uint32_t 7060 emlxs_sli3_unreg_node(emlxs_port_t *port, NODELIST *node, emlxs_buf_t *sbp, 7061 fc_unsol_buf_t *ubp, IOCBQ *iocbq) 7062 { 7063 emlxs_hba_t *hba = HBA; 7064 MAILBOXQ *mbq; 7065 MAILBOX *mb; 7066 uint16_t rpi; 7067 uint32_t rval; 7068 7069 if (node) { 7070 /* Check for base node */ 7071 if (node == &port->node_base) { 7072 /* just flush base node */ 7073 (void) emlxs_tx_node_flush(port, &port->node_base, 7074 0, 0, 0); 7075 (void) emlxs_chipq_node_flush(port, 0, 7076 &port->node_base, 0); 7077 7078 port->did = 0; 7079 7080 /* Return now */ 7081 return (1); 7082 } 7083 7084 rpi = (uint16_t)node->nlp_Rpi; 7085 7086 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 7087 "unreg_node:%p rpi=%d", node, rpi); 7088 7089 /* This node must be (0xFFFFFE) which registered by vport */ 7090 if (rpi == 0) { 7091 emlxs_node_rm(port, node); 7092 return (0); 7093 } 7094 7095 } else { /* Unreg all nodes */ 7096 rpi = 0xffff; 7097 7098 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 7099 "unreg_node: All"); 7100 } 7101 7102 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) { 7103 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 7104 "unreg_node:failed. Unable to allocate mbox"); 7105 return (1); 7106 } 7107 7108 mb = (MAILBOX *)mbq->mbox; 7109 mb->un.varUnregLogin.rpi = rpi; 7110 mb->un.varUnregLogin.vpi = port->vpip->VPI; 7111 7112 mb->mbxCommand = MBX_UNREG_LOGIN; 7113 mb->mbxOwner = OWN_HOST; 7114 mbq->sbp = (void *)sbp; 7115 mbq->ubp = (void *)ubp; 7116 mbq->iocbq = (void *)iocbq; 7117 mbq->mbox_cmpl = emlxs_sli3_unreg_node_mbcmpl; 7118 mbq->context = (void *)node; 7119 mbq->port = (void *)port; 7120 7121 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0); 7122 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) { 7123 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 7124 "unreg_node:failed. Unable to send request."); 7125 7126 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 7127 return (1); 7128 } 7129 7130 return (0); 7131 7132 } /* emlxs_sli3_unreg_node() */ 7133