1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at 9 * http://www.opensource.org/licenses/cddl1.txt. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2004-2012 Emulex. All rights reserved. 24 * Use is subject to license terms. 25 * Copyright 2020 RackTop Systems, Inc. 26 */ 27 28 #include <emlxs.h> 29 30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */ 31 EMLXS_MSG_DEF(EMLXS_MBOX_C); 32 33 34 emlxs_table_t emlxs_mb_status_table[] = { 35 {MBX_SUCCESS, "SUCCESS"}, 36 {MBX_FAILURE, "FAILURE"}, 37 {MBXERR_NUM_IOCBS, "NUM_IOCBS"}, 38 {MBXERR_IOCBS_EXCEEDED, "IOCBS_EXCEEDED"}, 39 {MBXERR_BAD_RING_NUMBER, "BAD_RING_NUMBER"}, 40 {MBXERR_MASK_ENTRIES_RANGE, "MASK_ENTRIES_RANGE"}, 41 {MBXERR_MASKS_EXCEEDED, "MASKS_EXCEEDED"}, 42 {MBXERR_BAD_PROFILE, "BAD_PROFILE"}, 43 {MBXERR_BAD_DEF_CLASS, "BAD_DEF_CLASS"}, 44 {MBXERR_BAD_MAX_RESPONDER, "BAD_MAX_RESPONDER"}, 45 {MBXERR_BAD_MAX_ORIGINATOR, "BAD_MAX_ORIGINATOR"}, 46 {MBXERR_RPI_REGISTERED, "RPI_REGISTERED"}, 47 {MBXERR_RPI_FULL, "RPI_FULL"}, 48 {MBXERR_NO_RESOURCES, "NO_RESOURCES"}, 49 {MBXERR_BAD_RCV_LENGTH, "BAD_RCV_LENGTH"}, 50 {MBXERR_DMA_ERROR, "DMA_ERROR"}, 51 {MBXERR_NOT_SUPPORTED, "NOT_SUPPORTED"}, 52 {MBXERR_UNSUPPORTED_FEATURE, "UNSUPPORTED_FEATURE"}, 53 {MBXERR_UNKNOWN_COMMAND, "UNKNOWN_COMMAND"}, 54 {MBXERR_BAD_IP_BIT, "BAD_IP_BIT"}, 55 {MBXERR_BAD_PCB_ALIGN, "BAD_PCB_ALIGN"}, 56 {MBXERR_BAD_HBQ_ID, "BAD_HBQ_ID"}, 57 {MBXERR_BAD_HBQ_STATE, "BAD_HBQ_STATE"}, 58 {MBXERR_BAD_HBQ_MASK_NUM, "BAD_HBQ_MASK_NUM"}, 59 {MBXERR_BAD_HBQ_MASK_SUBSET, "BAD_HBQ_MASK_SUBSET"}, 60 {MBXERR_HBQ_CREATE_FAIL, "HBQ_CREATE_FAIL"}, 61 {MBXERR_HBQ_EXISTING, "HBQ_EXISTING"}, 62 {MBXERR_HBQ_RSPRING_FULL, "HBQ_RSPRING_FULL"}, 63 {MBXERR_HBQ_DUP_MASK, "HBQ_DUP_MASK"}, 64 {MBXERR_HBQ_INVAL_GET_PTR, "HBQ_INVAL_GET_PTR"}, 65 {MBXERR_BAD_HBQ_SIZE, "BAD_HBQ_SIZE"}, 66 {MBXERR_BAD_HBQ_ORDER, "BAD_HBQ_ORDER"}, 67 {MBXERR_INVALID_ID, "INVALID_ID"}, 68 {MBXERR_INVALID_VFI, "INVALID_VFI"}, 69 {MBXERR_FLASH_WRITE_FAILED, "FLASH_WRITE_FAILED"}, 70 {MBXERR_INVALID_LINKSPEED, "INVALID_LINKSPEED"}, 71 {MBXERR_BAD_REDIRECT, "BAD_REDIRECT"}, 72 {MBXERR_RING_ALREADY_CONFIG, "RING_ALREADY_CONFIG"}, 73 {MBXERR_RING_INACTIVE, "RING_INACTIVE"}, 74 {MBXERR_RPI_INACTIVE, "RPI_INACTIVE"}, 75 {MBXERR_NO_ACTIVE_XRI, "NO_ACTIVE_XRI"}, 76 {MBXERR_XRI_NOT_ACTIVE, "XRI_NOT_ACTIVE"}, 77 {MBXERR_RPI_INUSE, "RPI_INUSE"}, 78 {MBXERR_NO_LINK_ATTENTION, "NO_LINK_ATTENTION"}, 79 {MBXERR_INVALID_SLI_MODE, "INVALID_SLI_MODE"}, 80 {MBXERR_INVALID_HOST_PTR, "INVALID_HOST_PTR"}, 81 {MBXERR_CANT_CFG_SLI_MODE, "CANT_CFG_SLI_MODE"}, 82 {MBXERR_BAD_OVERLAY, "BAD_OVERLAY"}, 83 {MBXERR_INVALID_FEAT_REQ, "INVALID_FEAT_REQ"}, 84 {MBXERR_CONFIG_CANT_COMPLETE, "CONFIG_CANT_COMPLETE"}, 85 {MBXERR_DID_ALREADY_REGISTERED, "DID_ALREADY_REGISTERED"}, 86 {MBXERR_DID_INCONSISTENT, "DID_INCONSISTENT"}, 87 {MBXERR_VPI_TOO_LARGE, "VPI_TOO_LARGE"}, 88 {MBXERR_STILL_ASSOCIATED, "STILL_ASSOCIATED"}, 89 {MBXERR_INVALID_VF_STATE, "INVALID_VF_STATE"}, 90 {MBXERR_VFI_ALREADY_REGISTERED, "VFI_ALREADY_REGISTERED"}, 91 {MBXERR_VFI_TOO_LARGE, "VFI_TOO_LARGE"}, 92 {MBXERR_LOAD_FW_FAILED, "LOAD_FW_FAILED"}, 93 {MBXERR_FIND_FW_FAILED, "FIND_FW_FAILED"}, 94 }; 95 96 emlxs_table_t emlxs_mb_cmd_table[] = { 97 {MBX_SHUTDOWN, "SHUTDOWN"}, 98 {MBX_LOAD_SM, "LOAD_SM"}, 99 {MBX_READ_NV, "READ_NV"}, 100 {MBX_WRITE_NV, "WRITE_NV"}, 101 {MBX_RUN_BIU_DIAG, "RUN_BIU_DIAG"}, 102 {MBX_INIT_LINK, "INIT_LINK"}, 103 {MBX_DOWN_LINK, "DOWN_LINK"}, 104 {MBX_CONFIG_LINK, "CONFIG_LINK"}, 105 {MBX_PART_SLIM, "PART_SLIM"}, 106 {MBX_CONFIG_RING, "CONFIG_RING"}, 107 {MBX_RESET_RING, "RESET_RING"}, 108 {MBX_READ_CONFIG, "READ_CONFIG"}, 109 {MBX_READ_RCONFIG, "READ_RCONFIG"}, 110 {MBX_READ_SPARM, "READ_SPARM"}, 111 {MBX_READ_STATUS, "READ_STATUS"}, 112 {MBX_READ_RPI, "READ_RPI"}, 113 {MBX_READ_XRI, "READ_XRI"}, 114 {MBX_READ_REV, "READ_REV"}, 115 {MBX_READ_LNK_STAT, "READ_LNK_STAT"}, 116 {MBX_REG_LOGIN, "REG_LOGIN"}, 117 {MBX_UNREG_LOGIN, "UNREG_RPI"}, 118 {MBX_READ_LA, "READ_LA"}, 119 {MBX_CLEAR_LA, "CLEAR_LA"}, 120 {MBX_DUMP_MEMORY, "DUMP_MEMORY"}, 121 {MBX_DUMP_CONTEXT, "DUMP_CONTEXT"}, 122 {MBX_RUN_DIAGS, "RUN_DIAGS"}, 123 {MBX_RESTART, "RESTART"}, 124 {MBX_UPDATE_CFG, "UPDATE_CFG"}, 125 {MBX_DOWN_LOAD, "DOWN_LOAD"}, 126 {MBX_DEL_LD_ENTRY, "DEL_LD_ENTRY"}, 127 {MBX_RUN_PROGRAM, "RUN_PROGRAM"}, 128 {MBX_SET_MASK, "SET_MASK"}, 129 {MBX_SET_VARIABLE, "SET_VARIABLE"}, 130 {MBX_UNREG_D_ID, "UNREG_D_ID"}, 131 {MBX_KILL_BOARD, "KILL_BOARD"}, 132 {MBX_CONFIG_FARP, "CONFIG_FARP"}, 133 {MBX_LOAD_AREA, "LOAD_AREA"}, 134 {MBX_RUN_BIU_DIAG64, "RUN_BIU_DIAG64"}, 135 {MBX_CONFIG_PORT, "CONFIG_PORT"}, 136 {MBX_READ_SPARM64, "READ_SPARM64"}, 137 {MBX_READ_RPI64, "READ_RPI64"}, 138 {MBX_CONFIG_MSI, "CONFIG_MSI"}, 139 {MBX_CONFIG_MSIX, "CONFIG_MSIX"}, 140 {MBX_REG_LOGIN64, "REG_RPI"}, 141 {MBX_READ_LA64, "READ_LA64"}, 142 {MBX_FLASH_WR_ULA, "FLASH_WR_ULA"}, 143 {MBX_SET_DEBUG, "SET_DEBUG"}, 144 {MBX_GET_DEBUG, "GET_DEBUG"}, 145 {MBX_LOAD_EXP_ROM, "LOAD_EXP_ROM"}, 146 {MBX_BEACON, "BEACON"}, 147 {MBX_CONFIG_HBQ, "CONFIG_HBQ"}, /* SLI3 */ 148 {MBX_REG_VPI, "REG_VPI"}, /* NPIV */ 149 {MBX_UNREG_VPI, "UNREG_VPI"}, /* NPIV */ 150 {MBX_ASYNC_EVENT, "ASYNC_EVENT"}, 151 {MBX_HEARTBEAT, "HEARTBEAT"}, 152 {MBX_READ_EVENT_LOG_STATUS, "READ_EVENT_LOG_STATUS"}, 153 {MBX_READ_EVENT_LOG, "READ_EVENT_LOG"}, 154 {MBX_WRITE_EVENT_LOG, "WRITE_EVENT_LOG"}, 155 {MBX_NV_LOG, "NV_LOG"}, 156 {MBX_PORT_CAPABILITIES, "PORT_CAPABILITIES"}, 157 {MBX_IOV_CONTROL, "IOV_CONTROL"}, 158 {MBX_IOV_MBX, "IOV_MBX"}, 159 {MBX_SLI_CONFIG, "SLI_CONFIG"}, 160 {MBX_REQUEST_FEATURES, "REQUEST_FEATURES"}, 161 {MBX_RESUME_RPI, "RESUME_RPI"}, 162 {MBX_REG_VFI, "REG_VFI"}, 163 {MBX_REG_FCFI, "REG_FCFI"}, 164 {MBX_UNREG_VFI, "UNREG_VFI"}, 165 {MBX_UNREG_FCFI, "UNREG_FCFI"}, 166 {MBX_INIT_VFI, "INIT_VFI"}, 167 {MBX_INIT_VPI, "INIT_VPI"}, 168 {MBX_WRITE_VPARMS, "WRITE_VPARMS"}, 169 {MBX_ACCESS_VDATA, "ACCESS_VDATA"} 170 }; /* emlxs_mb_cmd_table */ 171 172 173 emlxs_table_t emlxs_request_feature_table[] = { 174 {SLI4_FEATURE_INHIBIT_AUTO_ABTS, "IAA "}, /* Bit 0 */ 175 {SLI4_FEATURE_NPIV, "NPIV "}, /* Bit 1 */ 176 {SLI4_FEATURE_DIF, "DIF "}, /* Bit 2 */ 177 {SLI4_FEATURE_VIRTUAL_FABRICS, "VF "}, /* Bit 3 */ 178 {SLI4_FEATURE_FCP_INITIATOR, "FCPI "}, /* Bit 4 */ 179 {SLI4_FEATURE_FCP_TARGET, "FCPT "}, /* Bit 5 */ 180 {SLI4_FEATURE_FCP_COMBO, "FCPC "}, /* Bit 6 */ 181 {SLI4_FEATURE_RSVD1, "RSVD1 "}, /* Bit 7 */ 182 {SLI4_FEATURE_RQD, "RQD "}, /* Bit 8 */ 183 {SLI4_FEATURE_INHIBIT_AUTO_ABTS_R, "IAAR "}, /* Bit 9 */ 184 {SLI4_FEATURE_HIGH_LOGIN_MODE, "HLM "}, /* Bit 10 */ 185 {SLI4_FEATURE_PERF_HINT, "PERFH "} /* Bit 11 */ 186 }; /* emlxs_request_feature_table */ 187 188 189 extern char * 190 emlxs_mb_xlate_status(uint32_t status) 191 { 192 static char buffer[32]; 193 uint32_t i; 194 uint32_t count; 195 196 count = sizeof (emlxs_mb_status_table) / sizeof (emlxs_table_t); 197 for (i = 0; i < count; i++) { 198 if (status == emlxs_mb_status_table[i].code) { 199 return (emlxs_mb_status_table[i].string); 200 } 201 } 202 203 (void) snprintf(buffer, sizeof (buffer), "status=%x", status); 204 return (buffer); 205 206 } /* emlxs_mb_xlate_status() */ 207 208 209 /* SLI4 */ 210 /*ARGSUSED*/ 211 extern void 212 emlxs_mb_resetport(emlxs_hba_t *hba, MAILBOXQ *mbq) 213 { 214 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 215 216 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 217 mbq->nonembed = NULL; 218 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 219 mbq->port = (void *)&PPORT; 220 221 /* 222 * Signifies an embedded command 223 */ 224 mb4->un.varSLIConfig.be.embedded = 1; 225 226 mb4->mbxCommand = MBX_SLI_CONFIG; 227 mb4->mbxOwner = OWN_HOST; 228 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ; 229 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 230 IOCTL_SUBSYSTEM_COMMON; 231 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_RESET; 232 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 233 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0; 234 235 return; 236 237 } /* emlxs_mb_resetport() */ 238 239 240 /* SLI4 */ 241 /*ARGSUSED*/ 242 extern void 243 emlxs_mb_request_features(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t mask) 244 { 245 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 246 247 hba->flag &= ~FC_NPIV_ENABLED; 248 hba->sli.sli4.flag &= ~(EMLXS_SLI4_PHON | EMLXS_SLI4_PHWQ); 249 250 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 251 mbq->nonembed = NULL; 252 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 253 mbq->port = (void *)&PPORT; 254 255 mb4->mbxCommand = MBX_REQUEST_FEATURES; 256 mb4->mbxOwner = OWN_HOST; 257 258 mb4->un.varReqFeatures.featuresRequested = mask; 259 return; 260 261 } /* emlxs_mb_request_features() */ 262 263 264 /* SLI4 */ 265 /*ARGSUSED*/ 266 extern void 267 emlxs_mb_noop(emlxs_hba_t *hba, MAILBOXQ *mbq) 268 { 269 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 270 IOCTL_COMMON_NOP *nop; 271 272 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 273 mbq->nonembed = NULL; 274 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 275 mbq->port = (void *)&PPORT; 276 277 /* 278 * Signifies an embedded command 279 */ 280 mb4->un.varSLIConfig.be.embedded = 1; 281 282 mb4->mbxCommand = MBX_SLI_CONFIG; 283 mb4->mbxOwner = OWN_HOST; 284 mb4->un.varSLIConfig.be.payload_length = sizeof (IOCTL_COMMON_NOP) + 285 IOCTL_HEADER_SZ; 286 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 287 IOCTL_SUBSYSTEM_COMMON; 288 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_NOP; 289 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 290 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 291 sizeof (IOCTL_COMMON_NOP); 292 nop = (IOCTL_COMMON_NOP *)&mb4->un.varSLIConfig.payload; 293 nop->params.request.context = -1; 294 295 return; 296 297 } /* emlxs_mb_noop() */ 298 299 300 /* SLI4 */ 301 /*ARGSUSED*/ 302 extern int 303 emlxs_mbext_noop(emlxs_hba_t *hba, MAILBOXQ *mbq) 304 { 305 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 306 IOCTL_COMMON_NOP *nop; 307 MATCHMAP *mp; 308 mbox_req_hdr_t *hdr_req; 309 310 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 311 312 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) { 313 return (1); 314 } 315 /* 316 * Save address for completion 317 * Signifies a non-embedded command 318 */ 319 mb4->un.varSLIConfig.be.embedded = 0; 320 mbq->nonembed = (void *)mp; 321 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 322 mbq->port = (void *)&PPORT; 323 324 mb4->mbxCommand = MBX_SLI_CONFIG; 325 mb4->mbxOwner = OWN_HOST; 326 327 hdr_req = (mbox_req_hdr_t *)mp->virt; 328 hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON; 329 hdr_req->opcode = COMMON_OPCODE_NOP; 330 hdr_req->timeout = 0; 331 hdr_req->req_length = sizeof (IOCTL_COMMON_NOP); 332 nop = (IOCTL_COMMON_NOP *)(hdr_req + 1); 333 nop->params.request.context = -1; 334 335 return (0); 336 337 } /* emlxs_mbext_noop() */ 338 339 340 /* SLI4 */ 341 /*ARGSUSED*/ 342 extern void 343 emlxs_mb_eq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num) 344 { 345 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 346 IOCTL_COMMON_EQ_CREATE *qp; 347 uint64_t addr; 348 349 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 350 mbq->nonembed = NULL; 351 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 352 mbq->port = (void *)&PPORT; 353 354 /* 355 * Signifies an embedded command 356 */ 357 mb4->un.varSLIConfig.be.embedded = 1; 358 359 mb4->mbxCommand = MBX_SLI_CONFIG; 360 mb4->mbxOwner = OWN_HOST; 361 mb4->un.varSLIConfig.be.payload_length = 362 sizeof (IOCTL_COMMON_EQ_CREATE) + IOCTL_HEADER_SZ; 363 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 364 IOCTL_SUBSYSTEM_COMMON; 365 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_EQ_CREATE; 366 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 367 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 368 sizeof (IOCTL_COMMON_EQ_CREATE); 369 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; 370 371 qp = (IOCTL_COMMON_EQ_CREATE *)&mb4->un.varSLIConfig.payload; 372 373 /* 1024 * 4 bytes = 4K */ 374 qp->params.request.EQContext.Count = EQ_ELEMENT_COUNT_1024; 375 qp->params.request.EQContext.Valid = 1; 376 qp->params.request.EQContext.DelayMult = EQ_DELAY_MULT; 377 378 addr = hba->sli.sli4.eq[num].addr.phys; 379 qp->params.request.NumPages = 1; 380 qp->params.request.Pages[0].addrLow = PADDR_LO(addr); 381 qp->params.request.Pages[0].addrHigh = PADDR_HI(addr); 382 383 return; 384 385 } /* emlxs_mb_eq_create() */ 386 387 388 /* SLI4 */ 389 /*ARGSUSED*/ 390 extern void 391 emlxs_mb_cq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num) 392 { 393 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 394 IOCTL_COMMON_CQ_CREATE *qp; 395 IOCTL_COMMON_CQ_CREATE_V2 *qp2; 396 uint64_t addr; 397 uint32_t i; 398 399 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 400 mbq->nonembed = NULL; 401 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 402 mbq->port = (void *)&PPORT; 403 404 /* 405 * Signifies an embedded command 406 */ 407 mb4->un.varSLIConfig.be.embedded = 1; 408 409 mb4->mbxCommand = MBX_SLI_CONFIG; 410 mb4->mbxOwner = OWN_HOST; 411 412 switch (hba->sli.sli4.param.CQV) { 413 case 0: 414 mb4->un.varSLIConfig.be.payload_length = 415 sizeof (IOCTL_COMMON_CQ_CREATE) + IOCTL_HEADER_SZ; 416 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 417 IOCTL_SUBSYSTEM_COMMON; 418 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = 419 COMMON_OPCODE_CQ_CREATE; 420 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 421 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 422 sizeof (IOCTL_COMMON_CQ_CREATE); 423 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; 424 425 qp = (IOCTL_COMMON_CQ_CREATE *) 426 &mb4->un.varSLIConfig.payload; 427 428 /* 256 * 16 bytes = 4K */ 429 qp->params.request.CQContext.Count = CQ_ELEMENT_COUNT_256; 430 qp->params.request.CQContext.EQId = 431 (uint8_t)hba->sli.sli4.cq[num].eqid; 432 qp->params.request.CQContext.Valid = 1; 433 qp->params.request.CQContext.Eventable = 1; 434 qp->params.request.CQContext.NoDelay = 0; 435 qp->params.request.CQContext.CoalesceWM = 0; 436 437 addr = hba->sli.sli4.cq[num].addr.phys; 438 qp->params.request.NumPages = 1; 439 qp->params.request.Pages[0].addrLow = PADDR_LO(addr); 440 qp->params.request.Pages[0].addrHigh = PADDR_HI(addr); 441 442 break; 443 444 case 2: 445 default: 446 mb4->un.varSLIConfig.be.payload_length = 447 sizeof (IOCTL_COMMON_CQ_CREATE_V2) + IOCTL_HEADER_SZ; 448 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 449 IOCTL_SUBSYSTEM_COMMON; 450 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = 451 COMMON_OPCODE_CQ_CREATE; 452 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 453 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 454 sizeof (IOCTL_COMMON_CQ_CREATE_V2); 455 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 2; 456 457 qp2 = (IOCTL_COMMON_CQ_CREATE_V2 *) 458 &mb4->un.varSLIConfig.payload; 459 460 qp2->params.request.CQContext.CqeCnt = CQ_ELEMENT_COUNT_1024; 461 qp2->params.request.CQContext.CqeSize = CQE_SIZE_16_BYTES; 462 qp2->params.request.CQContext.EQId = hba->sli.sli4.cq[num].eqid; 463 qp2->params.request.CQContext.Valid = 1; 464 qp2->params.request.CQContext.AutoValid = 0; 465 qp2->params.request.CQContext.Eventable = 1; 466 qp2->params.request.CQContext.NoDelay = 0; 467 qp2->params.request.CQContext.Count1 = 0; 468 qp2->params.request.CQContext.CoalesceWM = 0; 469 470 addr = hba->sli.sli4.cq[num].addr.phys; 471 qp2->params.request.PageSize = CQ_PAGE_SIZE_4K; 472 qp2->params.request.NumPages = EMLXS_NUM_CQ_PAGES_V2; 473 474 for (i = 0; i < EMLXS_NUM_CQ_PAGES_V2; i++) { 475 qp2->params.request.Pages[i].addrLow = PADDR_LO(addr); 476 qp2->params.request.Pages[i].addrHigh = PADDR_HI(addr); 477 addr += 4096; 478 } 479 480 break; 481 } 482 return; 483 484 } /* emlxs_mb_cq_create() */ 485 486 487 /* SLI4 */ 488 /*ARGSUSED*/ 489 extern void 490 emlxs_mb_get_port_name(emlxs_hba_t *hba, MAILBOXQ *mbq) 491 { 492 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 493 494 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 495 mbq->nonembed = NULL; 496 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 497 mbq->port = (void *)&PPORT; 498 499 mb4->un.varSLIConfig.be.embedded = 1; 500 mb4->mbxCommand = MBX_SLI_CONFIG; 501 mb4->mbxOwner = OWN_HOST; 502 503 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ; 504 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 505 IOCTL_SUBSYSTEM_COMMON; 506 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = 507 COMMON_OPCODE_GET_PORT_NAME; 508 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 509 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0; 510 511 if (hba->model_info.chip & EMLXS_BE_CHIPS) { 512 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; /* V0 */ 513 } else { 514 IOCTL_COMMON_GET_PORT_NAME_V1 *pn; 515 516 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1; /* V1 */ 517 518 pn = (IOCTL_COMMON_GET_PORT_NAME_V1 *) 519 &mb4->un.varSLIConfig.payload; 520 pn->params.request.pt = PORT_TYPE_FC; 521 } 522 523 return; 524 525 } /* emlxs_mb_get_port_name() */ 526 527 528 /* SLI4 */ 529 /*ARGSUSED*/ 530 extern void 531 emlxs_mb_get_sli4_params(emlxs_hba_t *hba, MAILBOXQ *mbq) 532 { 533 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 534 535 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 536 mbq->nonembed = NULL; 537 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 538 mbq->port = (void *)&PPORT; 539 540 mb4->un.varSLIConfig.be.embedded = 1; 541 mb4->mbxCommand = MBX_SLI_CONFIG; 542 mb4->mbxOwner = OWN_HOST; 543 544 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ; 545 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 546 IOCTL_SUBSYSTEM_COMMON; 547 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = 548 COMMON_OPCODE_GET_SLI4_PARAMS; 549 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 550 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0; 551 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; /* V0 */ 552 553 return; 554 555 } /* emlxs_mb_get_sli4_params() */ 556 557 558 /* SLI4 */ 559 /*ARGSUSED*/ 560 extern void 561 emlxs_mb_get_extents_info(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type) 562 { 563 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 564 IOCTL_COMMON_EXTENTS *ep; 565 566 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 567 mbq->nonembed = NULL; 568 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 569 mbq->port = (void *)&PPORT; 570 571 mb4->un.varSLIConfig.be.embedded = 1; 572 mb4->mbxCommand = MBX_SLI_CONFIG; 573 mb4->mbxOwner = OWN_HOST; 574 575 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ; 576 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem = 577 IOCTL_SUBSYSTEM_COMMON; 578 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode = 579 COMMON_OPCODE_GET_EXTENTS_INFO; 580 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0; 581 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length = 582 sizeof (IOCTL_COMMON_EXTENTS); 583 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0; 584 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0; 585 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0; 586 587 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */ 588 589 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload; 590 ep->params.request.RscType = type; 591 592 return; 593 594 } /* emlxs_mb_get_extents_info() */ 595 596 597 /* SLI4 */ 598 /*ARGSUSED*/ 599 extern void 600 emlxs_mb_get_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type) 601 { 602 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 603 IOCTL_COMMON_EXTENTS *ep; 604 605 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 606 mbq->nonembed = NULL; 607 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 608 mbq->port = (void *)&PPORT; 609 610 mb4->un.varSLIConfig.be.embedded = 1; 611 mb4->mbxCommand = MBX_SLI_CONFIG; 612 mb4->mbxOwner = OWN_HOST; 613 614 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ; 615 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem = 616 IOCTL_SUBSYSTEM_COMMON; 617 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode = 618 COMMON_OPCODE_GET_EXTENTS; 619 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0; 620 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length = 621 sizeof (IOCTL_COMMON_EXTENTS); 622 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0; 623 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0; 624 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0; 625 626 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */ 627 628 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload; 629 ep->params.request.RscType = type; 630 631 return; 632 633 } /* emlxs_mb_get_extents() */ 634 635 636 /* SLI4 */ 637 /*ARGSUSED*/ 638 extern void 639 emlxs_mb_alloc_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type, 640 uint16_t count) 641 { 642 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 643 IOCTL_COMMON_EXTENTS *ep; 644 645 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 646 mbq->nonembed = NULL; 647 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 648 mbq->port = (void *)&PPORT; 649 650 mb4->un.varSLIConfig.be.embedded = 1; 651 mb4->mbxCommand = MBX_SLI_CONFIG; 652 mb4->mbxOwner = OWN_HOST; 653 654 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ; 655 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem = 656 IOCTL_SUBSYSTEM_COMMON; 657 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode = 658 COMMON_OPCODE_ALLOC_EXTENTS; 659 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0; 660 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length = 661 sizeof (IOCTL_COMMON_EXTENTS); 662 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0; 663 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0; 664 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0; 665 666 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */ 667 668 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload; 669 ep->params.request.RscType = type; 670 671 count = min(count, MAX_EXTENTS); 672 ep->params.request.RscCnt = count; 673 674 return; 675 676 } /* emlxs_mb_alloc_extents() */ 677 678 679 /* SLI4 */ 680 /*ARGSUSED*/ 681 extern void 682 emlxs_mb_dealloc_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type) 683 { 684 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 685 IOCTL_COMMON_EXTENTS *ep; 686 687 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 688 mbq->nonembed = NULL; 689 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 690 mbq->port = (void *)&PPORT; 691 692 mb4->un.varSLIConfig.be.embedded = 1; 693 mb4->mbxCommand = MBX_SLI_CONFIG; 694 mb4->mbxOwner = OWN_HOST; 695 696 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ; 697 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem = 698 IOCTL_SUBSYSTEM_COMMON; 699 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode = 700 COMMON_OPCODE_DEALLOC_EXTENTS; 701 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0; 702 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length = 703 sizeof (IOCTL_COMMON_EXTENTS); 704 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0; 705 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0; 706 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0; 707 708 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */ 709 710 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload; 711 ep->params.request.RscType = type; 712 713 return; 714 715 } /* emlxs_mb_dealloc_extents() */ 716 717 718 /* SLI4 */ 719 /*ARGSUSED*/ 720 extern void 721 emlxs_mb_wq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num) 722 { 723 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 724 IOCTL_FCOE_WQ_CREATE *qp; 725 IOCTL_FCOE_WQ_CREATE_V1 *qp1; 726 uint64_t addr; 727 int i; 728 729 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 730 mbq->nonembed = NULL; 731 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 732 mbq->port = (void *)&PPORT; 733 734 /* 735 * Signifies an embedded command 736 */ 737 mb4->un.varSLIConfig.be.embedded = 1; 738 739 mb4->mbxCommand = MBX_SLI_CONFIG; 740 mb4->mbxOwner = OWN_HOST; 741 742 switch (hba->sli.sli4.param.WQV) { 743 case 0: 744 mb4->un.varSLIConfig.be.payload_length = 745 sizeof (IOCTL_FCOE_WQ_CREATE) + IOCTL_HEADER_SZ; 746 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 747 IOCTL_SUBSYSTEM_FCOE; 748 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = 749 FCOE_OPCODE_WQ_CREATE; 750 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 751 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 752 sizeof (IOCTL_FCOE_WQ_CREATE); 753 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; 754 755 addr = hba->sli.sli4.wq[num].addr.phys; 756 qp = (IOCTL_FCOE_WQ_CREATE *)&mb4->un.varSLIConfig.payload; 757 758 qp->params.request.CQId = hba->sli.sli4.wq[num].cqid; 759 760 qp->params.request.NumPages = EMLXS_NUM_WQ_PAGES; 761 for (i = 0; i < EMLXS_NUM_WQ_PAGES; i++) { 762 qp->params.request.Pages[i].addrLow = PADDR_LO(addr); 763 qp->params.request.Pages[i].addrHigh = PADDR_HI(addr); 764 addr += 4096; 765 } 766 767 break; 768 769 case 1: 770 default: 771 mb4->un.varSLIConfig.be.payload_length = 772 sizeof (IOCTL_FCOE_WQ_CREATE_V1) + IOCTL_HEADER_SZ; 773 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 774 IOCTL_SUBSYSTEM_FCOE; 775 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = 776 FCOE_OPCODE_WQ_CREATE; 777 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 778 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 779 sizeof (IOCTL_FCOE_WQ_CREATE_V1); 780 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1; 781 782 addr = hba->sli.sli4.wq[num].addr.phys; 783 qp1 = (IOCTL_FCOE_WQ_CREATE_V1 *)&mb4->un.varSLIConfig.payload; 784 785 qp1->params.request.CQId = hba->sli.sli4.wq[num].cqid; 786 qp1->params.request.NumPages = EMLXS_NUM_WQ_PAGES; 787 788 qp1->params.request.WqeCnt = WQ_DEPTH; 789 qp1->params.request.WqeSize = WQE_SIZE_64_BYTES; 790 qp1->params.request.PageSize = WQ_PAGE_SIZE_4K; 791 792 for (i = 0; i < EMLXS_NUM_WQ_PAGES; i++) { 793 qp1->params.request.Pages[i].addrLow = PADDR_LO(addr); 794 qp1->params.request.Pages[i].addrHigh = PADDR_HI(addr); 795 addr += 4096; 796 } 797 798 break; 799 } 800 801 return; 802 803 } /* emlxs_mb_wq_create() */ 804 805 806 /* SLI4 */ 807 /*ARGSUSED*/ 808 extern void 809 emlxs_mb_rq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num) 810 { 811 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 812 IOCTL_FCOE_RQ_CREATE *qp; 813 IOCTL_FCOE_RQ_CREATE_V1 *qp1; 814 uint64_t addr; 815 816 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 817 mbq->nonembed = NULL; 818 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 819 mbq->port = (void *)&PPORT; 820 821 /* 822 * Signifies an embedded command 823 */ 824 mb4->un.varSLIConfig.be.embedded = 1; 825 826 mb4->mbxCommand = MBX_SLI_CONFIG; 827 mb4->mbxOwner = OWN_HOST; 828 829 switch (hba->sli.sli4.param.RQV) { 830 case 0: 831 mb4->un.varSLIConfig.be.payload_length = 832 sizeof (IOCTL_FCOE_RQ_CREATE) + IOCTL_HEADER_SZ; 833 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 834 IOCTL_SUBSYSTEM_FCOE; 835 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = 836 FCOE_OPCODE_RQ_CREATE; 837 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 838 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 839 sizeof (IOCTL_FCOE_RQ_CREATE); 840 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; 841 842 addr = hba->sli.sli4.rq[num].addr.phys; 843 844 qp = (IOCTL_FCOE_RQ_CREATE *)&mb4->un.varSLIConfig.payload; 845 846 qp->params.request.RQContext.RqeCnt = RQ_DEPTH_EXPONENT; 847 qp->params.request.RQContext.BufferSize = RQB_DATA_SIZE; 848 qp->params.request.RQContext.CQId = 849 hba->sli.sli4.rq[num].cqid; 850 851 qp->params.request.NumPages = 1; 852 qp->params.request.Pages[0].addrLow = PADDR_LO(addr); 853 qp->params.request.Pages[0].addrHigh = PADDR_HI(addr); 854 855 break; 856 857 case 1: 858 default: 859 mb4->un.varSLIConfig.be.payload_length = 860 sizeof (IOCTL_FCOE_RQ_CREATE_V1) + IOCTL_HEADER_SZ; 861 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 862 IOCTL_SUBSYSTEM_FCOE; 863 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = 864 FCOE_OPCODE_RQ_CREATE; 865 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 866 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 867 sizeof (IOCTL_FCOE_RQ_CREATE_V1); 868 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1; 869 870 addr = hba->sli.sli4.rq[num].addr.phys; 871 872 qp1 = (IOCTL_FCOE_RQ_CREATE_V1 *)&mb4->un.varSLIConfig.payload; 873 874 qp1->params.request.RQContext.RqeCnt = RQ_DEPTH; 875 qp1->params.request.RQContext.RqeSize = RQE_SIZE_8_BYTES; 876 qp1->params.request.RQContext.PageSize = RQ_PAGE_SIZE_4K; 877 878 qp1->params.request.RQContext.BufferSize = RQB_DATA_SIZE; 879 qp1->params.request.RQContext.CQId = 880 hba->sli.sli4.rq[num].cqid; 881 882 qp1->params.request.NumPages = 1; 883 qp1->params.request.Pages[0].addrLow = PADDR_LO(addr); 884 qp1->params.request.Pages[0].addrHigh = PADDR_HI(addr); 885 886 break; 887 } 888 889 return; 890 891 } /* emlxs_mb_rq_create() */ 892 893 894 /* SLI4 */ 895 /*ARGSUSED*/ 896 extern void 897 emlxs_mb_mq_create(emlxs_hba_t *hba, MAILBOXQ *mbq) 898 { 899 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 900 IOCTL_COMMON_MQ_CREATE *qp; 901 uint64_t addr; 902 903 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 904 mbq->nonembed = NULL; 905 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 906 mbq->port = (void *)&PPORT; 907 908 /* 909 * Signifies an embedded command 910 */ 911 mb4->un.varSLIConfig.be.embedded = 1; 912 913 mb4->mbxCommand = MBX_SLI_CONFIG; 914 mb4->mbxOwner = OWN_HOST; 915 mb4->un.varSLIConfig.be.payload_length = 916 sizeof (IOCTL_COMMON_MQ_CREATE) + IOCTL_HEADER_SZ; 917 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 918 IOCTL_SUBSYSTEM_COMMON; 919 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_MQ_CREATE; 920 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 921 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 922 sizeof (IOCTL_COMMON_MQ_CREATE); 923 924 addr = hba->sli.sli4.mq.addr.phys; 925 qp = (IOCTL_COMMON_MQ_CREATE *)&mb4->un.varSLIConfig.payload; 926 927 qp->params.request.MQContext.Size = MQ_ELEMENT_COUNT_16; 928 qp->params.request.MQContext.Valid = 1; 929 qp->params.request.MQContext.CQId = hba->sli.sli4.mq.cqid; 930 931 qp->params.request.NumPages = 1; 932 qp->params.request.Pages[0].addrLow = PADDR_LO(addr); 933 qp->params.request.Pages[0].addrHigh = PADDR_HI(addr); 934 935 return; 936 937 } /* emlxs_mb_mq_create() */ 938 939 940 /* SLI4 */ 941 /*ARGSUSED*/ 942 extern void 943 emlxs_mb_mq_create_ext(emlxs_hba_t *hba, MAILBOXQ *mbq) 944 { 945 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 946 IOCTL_COMMON_MQ_CREATE_EXT *qp; 947 IOCTL_COMMON_MQ_CREATE_EXT_V1 *qp1; 948 uint64_t addr; 949 950 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 951 mbq->nonembed = NULL; 952 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 953 mbq->port = (void *)&PPORT; 954 955 /* 956 * Signifies an embedded command 957 */ 958 mb4->un.varSLIConfig.be.embedded = 1; 959 960 mb4->mbxCommand = MBX_SLI_CONFIG; 961 mb4->mbxOwner = OWN_HOST; 962 963 switch (hba->sli.sli4.param.MQV) { 964 case 0: 965 mb4->un.varSLIConfig.be.payload_length = 966 sizeof (IOCTL_COMMON_MQ_CREATE_EXT) + IOCTL_HEADER_SZ; 967 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 968 IOCTL_SUBSYSTEM_COMMON; 969 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = 970 COMMON_OPCODE_MQ_CREATE_EXT; 971 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 972 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 973 sizeof (IOCTL_COMMON_MQ_CREATE_EXT); 974 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; 975 976 addr = hba->sli.sli4.mq.addr.phys; 977 qp = (IOCTL_COMMON_MQ_CREATE_EXT *) 978 &mb4->un.varSLIConfig.payload; 979 980 qp->params.request.num_pages = 1; 981 qp->params.request.async_event_bitmap = 982 ASYNC_LINK_EVENT | ASYNC_FCF_EVENT | ASYNC_GROUP5_EVENT; 983 qp->params.request.context.Size = MQ_ELEMENT_COUNT_16; 984 qp->params.request.context.Valid = 1; 985 qp->params.request.context.CQId = hba->sli.sli4.mq.cqid; 986 987 qp->params.request.pages[0].addrLow = PADDR_LO(addr); 988 qp->params.request.pages[0].addrHigh = PADDR_HI(addr); 989 990 break; 991 992 case 1: 993 default: 994 mb4->un.varSLIConfig.be.payload_length = 995 sizeof (IOCTL_COMMON_MQ_CREATE_EXT_V1) + IOCTL_HEADER_SZ; 996 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem = 997 IOCTL_SUBSYSTEM_COMMON; 998 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = 999 COMMON_OPCODE_MQ_CREATE_EXT; 1000 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0; 1001 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 1002 sizeof (IOCTL_COMMON_MQ_CREATE_EXT_V1); 1003 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1; 1004 1005 addr = hba->sli.sli4.mq.addr.phys; 1006 qp1 = (IOCTL_COMMON_MQ_CREATE_EXT_V1 *) 1007 &mb4->un.varSLIConfig.payload; 1008 1009 qp1->params.request.num_pages = 1; 1010 qp1->params.request.async_event_bitmap = 1011 ASYNC_LINK_EVENT | ASYNC_FCF_EVENT | ASYNC_GROUP5_EVENT | 1012 ASYNC_FC_EVENT | ASYNC_PORT_EVENT; 1013 qp1->params.request.context.Size = MQ_ELEMENT_COUNT_16; 1014 qp1->params.request.context.Valid = 1; 1015 qp1->params.request.CQId = hba->sli.sli4.mq.cqid; 1016 1017 qp1->params.request.pages[0].addrLow = PADDR_LO(addr); 1018 qp1->params.request.pages[0].addrHigh = PADDR_HI(addr); 1019 1020 break; 1021 } 1022 1023 return; 1024 1025 } /* emlxs_mb_mq_create_ext() */ 1026 1027 1028 /*ARGSUSED*/ 1029 extern void 1030 emlxs_mb_async_event(emlxs_hba_t *hba, MAILBOXQ *mbq) 1031 { 1032 MAILBOX *mb = (MAILBOX *)mbq; 1033 1034 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1035 1036 mb->mbxCommand = MBX_ASYNC_EVENT; 1037 mb->mbxOwner = OWN_HOST; 1038 mb->un.varWords[0] = hba->channel_els; 1039 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 1040 mbq->port = (void *)&PPORT; 1041 1042 return; 1043 1044 } /* emlxs_mb_async_event() */ 1045 1046 1047 /*ARGSUSED*/ 1048 extern void 1049 emlxs_mb_heartbeat(emlxs_hba_t *hba, MAILBOXQ *mbq) 1050 { 1051 MAILBOX *mb = (MAILBOX *)mbq; 1052 1053 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1054 1055 mb->mbxCommand = MBX_HEARTBEAT; 1056 mb->mbxOwner = OWN_HOST; 1057 mbq->mbox_cmpl = NULL; /* no cmpl needed for hbeat */ 1058 mbq->port = (void *)&PPORT; 1059 1060 return; 1061 1062 } /* emlxs_mb_heartbeat() */ 1063 1064 1065 /*ARGSUSED*/ 1066 extern void 1067 emlxs_mb_gpio_write(emlxs_hba_t *hba, MAILBOXQ *mbq, uint8_t pin, uint8_t val) 1068 { 1069 emlxs_port_t *port = &PPORT; 1070 MAILBOX4 *mb4; 1071 be_req_hdr_t *be_req; 1072 mbox_req_hdr_t *hdr_req; 1073 IOCTL_LOWLEVEL_GPIO_RDWR *gpio; 1074 1075 bzero((void *) mbq, sizeof (MAILBOXQ)); 1076 1077 mbq->port = port; 1078 1079 mb4 = (MAILBOX4 *)mbq->mbox; 1080 mb4->mbxCommand = MBX_SLI_CONFIG; 1081 mb4->mbxOwner = OWN_HOST; 1082 1083 be_req = (be_req_hdr_t *)&mb4->un.varSLIConfig.be; 1084 be_req->embedded = 1; 1085 be_req->payload_length = sizeof (mbox_req_hdr_t) + 1086 sizeof (IOCTL_LOWLEVEL_GPIO_RDWR); 1087 1088 hdr_req = &be_req->un_hdr.hdr_req; 1089 hdr_req->subsystem = IOCTL_SUBSYSTEM_LOWLEVEL; 1090 hdr_req->opcode = LOWLEVEL_OPCODE_GPIO_RDWR; 1091 hdr_req->timeout = 0; 1092 hdr_req->req_length = sizeof (IOCTL_LOWLEVEL_GPIO_RDWR); 1093 1094 gpio = (IOCTL_LOWLEVEL_GPIO_RDWR *)&mb4->un.varSLIConfig.payload; 1095 gpio->params.request.GpioAction = LOWLEVEL_GPIO_ACT_WRITE; 1096 gpio->params.request.LogicalPin = pin; 1097 gpio->params.request.PinValue = val; 1098 } /* emlxs_mb_gpio_write */ 1099 1100 #ifdef MSI_SUPPORT 1101 1102 /*ARGSUSED*/ 1103 extern void 1104 emlxs_mb_config_msi(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t *intr_map, 1105 uint32_t intr_count) 1106 { 1107 MAILBOX *mb = (MAILBOX *)mbq; 1108 uint16_t i; 1109 uint32_t mask; 1110 1111 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1112 1113 mb->mbxCommand = MBX_CONFIG_MSI; 1114 1115 /* Set the default message id to zero */ 1116 mb->un.varCfgMSI.defaultPresent = 1; 1117 mb->un.varCfgMSI.defaultMessageNumber = 0; 1118 1119 for (i = 1; i < intr_count; i++) { 1120 mask = intr_map[i]; 1121 1122 mb->un.varCfgMSI.attConditions |= mask; 1123 1124 #ifdef EMLXS_BIG_ENDIAN 1125 if (mask & HA_R0ATT) { 1126 mb->un.varCfgMSI.messageNumberByHA[3] = i; 1127 } 1128 if (mask & HA_R1ATT) { 1129 mb->un.varCfgMSI.messageNumberByHA[7] = i; 1130 } 1131 if (mask & HA_R2ATT) { 1132 mb->un.varCfgMSI.messageNumberByHA[11] = i; 1133 } 1134 if (mask & HA_R3ATT) { 1135 mb->un.varCfgMSI.messageNumberByHA[15] = i; 1136 } 1137 if (mask & HA_LATT) { 1138 mb->un.varCfgMSI.messageNumberByHA[29] = i; 1139 } 1140 if (mask & HA_MBATT) { 1141 mb->un.varCfgMSI.messageNumberByHA[30] = i; 1142 } 1143 if (mask & HA_ERATT) { 1144 mb->un.varCfgMSI.messageNumberByHA[31] = i; 1145 } 1146 #endif /* EMLXS_BIG_ENDIAN */ 1147 1148 #ifdef EMLXS_LITTLE_ENDIAN 1149 /* Accounts for half word swap of LE architecture */ 1150 if (mask & HA_R0ATT) { 1151 mb->un.varCfgMSI.messageNumberByHA[2] = i; 1152 } 1153 if (mask & HA_R1ATT) { 1154 mb->un.varCfgMSI.messageNumberByHA[6] = i; 1155 } 1156 if (mask & HA_R2ATT) { 1157 mb->un.varCfgMSI.messageNumberByHA[10] = i; 1158 } 1159 if (mask & HA_R3ATT) { 1160 mb->un.varCfgMSI.messageNumberByHA[14] = i; 1161 } 1162 if (mask & HA_LATT) { 1163 mb->un.varCfgMSI.messageNumberByHA[28] = i; 1164 } 1165 if (mask & HA_MBATT) { 1166 mb->un.varCfgMSI.messageNumberByHA[31] = i; 1167 } 1168 if (mask & HA_ERATT) { 1169 mb->un.varCfgMSI.messageNumberByHA[30] = i; 1170 } 1171 #endif /* EMLXS_LITTLE_ENDIAN */ 1172 } 1173 1174 mb->mbxOwner = OWN_HOST; 1175 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 1176 mbq->port = (void *)&PPORT; 1177 1178 return; 1179 1180 } /* emlxs_mb_config_msi() */ 1181 1182 1183 /*ARGSUSED*/ 1184 extern void 1185 emlxs_mb_config_msix(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t *intr_map, 1186 uint32_t intr_count) 1187 { 1188 MAILBOX *mb = (MAILBOX *)mbq; 1189 uint8_t i; 1190 uint32_t mask; 1191 1192 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1193 1194 mb->mbxCommand = MBX_CONFIG_MSIX; 1195 1196 /* Set the default message id to zero */ 1197 mb->un.varCfgMSIX.defaultPresent = 1; 1198 mb->un.varCfgMSIX.defaultMessageNumber = 0; 1199 1200 for (i = 1; i < intr_count; i++) { 1201 mask = intr_map[i]; 1202 1203 mb->un.varCfgMSIX.attConditions1 |= mask; 1204 1205 #ifdef EMLXS_BIG_ENDIAN 1206 if (mask & HA_R0ATT) { 1207 mb->un.varCfgMSIX.messageNumberByHA[3] = i; 1208 } 1209 if (mask & HA_R1ATT) { 1210 mb->un.varCfgMSIX.messageNumberByHA[7] = i; 1211 } 1212 if (mask & HA_R2ATT) { 1213 mb->un.varCfgMSIX.messageNumberByHA[11] = i; 1214 } 1215 if (mask & HA_R3ATT) { 1216 mb->un.varCfgMSIX.messageNumberByHA[15] = i; 1217 } 1218 if (mask & HA_LATT) { 1219 mb->un.varCfgMSIX.messageNumberByHA[29] = i; 1220 } 1221 if (mask & HA_MBATT) { 1222 mb->un.varCfgMSIX.messageNumberByHA[30] = i; 1223 } 1224 if (mask & HA_ERATT) { 1225 mb->un.varCfgMSIX.messageNumberByHA[31] = i; 1226 } 1227 #endif /* EMLXS_BIG_ENDIAN */ 1228 1229 #ifdef EMLXS_LITTLE_ENDIAN 1230 /* Accounts for word swap of LE architecture */ 1231 if (mask & HA_R0ATT) { 1232 mb->un.varCfgMSIX.messageNumberByHA[0] = i; 1233 } 1234 if (mask & HA_R1ATT) { 1235 mb->un.varCfgMSIX.messageNumberByHA[4] = i; 1236 } 1237 if (mask & HA_R2ATT) { 1238 mb->un.varCfgMSIX.messageNumberByHA[8] = i; 1239 } 1240 if (mask & HA_R3ATT) { 1241 mb->un.varCfgMSIX.messageNumberByHA[12] = i; 1242 } 1243 if (mask & HA_LATT) { 1244 mb->un.varCfgMSIX.messageNumberByHA[30] = i; 1245 } 1246 if (mask & HA_MBATT) { 1247 mb->un.varCfgMSIX.messageNumberByHA[29] = i; 1248 } 1249 if (mask & HA_ERATT) { 1250 mb->un.varCfgMSIX.messageNumberByHA[28] = i; 1251 } 1252 #endif /* EMLXS_LITTLE_ENDIAN */ 1253 } 1254 1255 mb->mbxOwner = OWN_HOST; 1256 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 1257 mbq->port = (void *)&PPORT; 1258 1259 return; 1260 1261 } /* emlxs_mb_config_msix() */ 1262 1263 1264 #endif /* MSI_SUPPORT */ 1265 1266 1267 /*ARGSUSED*/ 1268 extern void 1269 emlxs_mb_reset_ring(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t ringno) 1270 { 1271 MAILBOX *mb = (MAILBOX *)mbq; 1272 1273 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1274 1275 mb->mbxCommand = MBX_RESET_RING; 1276 mb->un.varRstRing.ring_no = ringno; 1277 mb->mbxOwner = OWN_HOST; 1278 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 1279 mbq->port = (void *)&PPORT; 1280 1281 return; 1282 1283 } /* emlxs_mb_reset_ring() */ 1284 1285 1286 /*ARGSUSED*/ 1287 extern void 1288 emlxs_mb_dump_vpd(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset) 1289 { 1290 1291 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 1292 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 1293 1294 /* Clear the local dump_region */ 1295 bzero(hba->sli.sli4.dump_region.virt, 1296 hba->sli.sli4.dump_region.size); 1297 1298 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 1299 1300 mb4->mbxCommand = MBX_DUMP_MEMORY; 1301 mb4->un.varDmp4.type = DMP_NV_PARAMS; 1302 mb4->un.varDmp4.entry_index = offset; 1303 mb4->un.varDmp4.region_id = DMP_VPD_REGION; 1304 1305 mb4->un.varDmp4.available_cnt = hba->sli.sli4.dump_region.size; 1306 mb4->un.varDmp4.addrHigh = 1307 PADDR_HI(hba->sli.sli4.dump_region.phys); 1308 mb4->un.varDmp4.addrLow = 1309 PADDR_LO(hba->sli.sli4.dump_region.phys); 1310 mb4->un.varDmp4.rsp_cnt = 0; 1311 1312 mb4->mbxOwner = OWN_HOST; 1313 1314 } else { 1315 MAILBOX *mb = (MAILBOX *)mbq; 1316 1317 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1318 1319 mb->mbxCommand = MBX_DUMP_MEMORY; 1320 mb->un.varDmp.cv = 1; 1321 mb->un.varDmp.type = DMP_NV_PARAMS; 1322 mb->un.varDmp.entry_index = offset; 1323 mb->un.varDmp.region_id = DMP_VPD_REGION; 1324 1325 /* limited by mailbox size */ 1326 mb->un.varDmp.word_cnt = DMP_VPD_DUMP_WCOUNT; 1327 1328 mb->un.varDmp.co = 0; 1329 mb->un.varDmp.resp_offset = 0; 1330 mb->mbxOwner = OWN_HOST; 1331 } 1332 1333 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 1334 mbq->port = (void *)&PPORT; 1335 1336 } /* emlxs_mb_dump_vpd() */ 1337 1338 1339 /* SLI4 */ 1340 /*ARGSUSED*/ 1341 extern void 1342 emlxs_mb_dump_fcoe(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset) 1343 { 1344 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 1345 1346 if (hba->sli_mode < EMLXS_HBA_SLI4_MODE) { 1347 return; 1348 } 1349 1350 /* Clear the local dump_region */ 1351 bzero(hba->sli.sli4.dump_region.virt, 1352 hba->sli.sli4.dump_region.size); 1353 1354 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 1355 1356 mb4->mbxCommand = MBX_DUMP_MEMORY; 1357 mb4->un.varDmp4.type = DMP_NV_PARAMS; 1358 mb4->un.varDmp4.entry_index = offset; 1359 mb4->un.varDmp4.region_id = DMP_FCOE_REGION; 1360 1361 mb4->un.varDmp4.available_cnt = hba->sli.sli4.dump_region.size; 1362 mb4->un.varDmp4.addrHigh = 1363 PADDR_HI(hba->sli.sli4.dump_region.phys); 1364 mb4->un.varDmp4.addrLow = 1365 PADDR_LO(hba->sli.sli4.dump_region.phys); 1366 mb4->un.varDmp4.rsp_cnt = 0; 1367 1368 mb4->mbxOwner = OWN_HOST; 1369 1370 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 1371 mbq->port = (void *)&PPORT; 1372 1373 } /* emlxs_mb_dump_fcoe() */ 1374 1375 1376 /*ARGSUSED*/ 1377 extern void 1378 emlxs_mb_dump(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset, uint32_t words) 1379 { 1380 1381 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 1382 MAILBOX4 *mb4 = (MAILBOX4 *)mbq; 1383 1384 /* Clear the local dump_region */ 1385 bzero(hba->sli.sli4.dump_region.virt, 1386 hba->sli.sli4.dump_region.size); 1387 1388 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE); 1389 1390 mb4->mbxCommand = MBX_DUMP_MEMORY; 1391 mb4->un.varDmp4.type = DMP_MEM_REG; 1392 mb4->un.varDmp4.entry_index = offset; 1393 mb4->un.varDmp4.region_id = 0; 1394 1395 mb4->un.varDmp4.available_cnt = min((words*4), 1396 hba->sli.sli4.dump_region.size); 1397 mb4->un.varDmp4.addrHigh = 1398 PADDR_HI(hba->sli.sli4.dump_region.phys); 1399 mb4->un.varDmp4.addrLow = 1400 PADDR_LO(hba->sli.sli4.dump_region.phys); 1401 mb4->un.varDmp4.rsp_cnt = 0; 1402 1403 mb4->mbxOwner = OWN_HOST; 1404 1405 } else { 1406 1407 MAILBOX *mb = (MAILBOX *)mbq; 1408 1409 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1410 1411 mb->mbxCommand = MBX_DUMP_MEMORY; 1412 mb->un.varDmp.type = DMP_MEM_REG; 1413 mb->un.varDmp.word_cnt = words; 1414 mb->un.varDmp.base_adr = offset; 1415 1416 mb->un.varDmp.co = 0; 1417 mb->un.varDmp.resp_offset = 0; 1418 mb->mbxOwner = OWN_HOST; 1419 } 1420 1421 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 1422 mbq->port = (void *)&PPORT; 1423 1424 return; 1425 1426 } /* emlxs_mb_dump() */ 1427 1428 1429 /* 1430 * emlxs_mb_read_nv Issue a READ NVPARAM mailbox command 1431 */ 1432 /*ARGSUSED*/ 1433 extern void 1434 emlxs_mb_read_nv(emlxs_hba_t *hba, MAILBOXQ *mbq) 1435 { 1436 MAILBOX *mb = (MAILBOX *)mbq; 1437 1438 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1439 1440 mb->mbxCommand = MBX_READ_NV; 1441 mb->mbxOwner = OWN_HOST; 1442 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 1443 mbq->port = (void *)&PPORT; 1444 1445 } /* emlxs_mb_read_nv() */ 1446 1447 1448 /* 1449 * emlxs_mb_read_rev Issue a READ REV mailbox command 1450 */ 1451 /*ARGSUSED*/ 1452 extern void 1453 emlxs_mb_read_rev(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t v3) 1454 { 1455 MAILBOX *mb = (MAILBOX *)mbq; 1456 1457 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 1458 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE); 1459 mbq->nonembed = NULL; 1460 } else { 1461 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1462 1463 mb->un.varRdRev.cv = 1; 1464 1465 if (v3) { 1466 mb->un.varRdRev.cv3 = 1; 1467 } 1468 } 1469 1470 mb->mbxCommand = MBX_READ_REV; 1471 mb->mbxOwner = OWN_HOST; 1472 mbq->mbox_cmpl = NULL; 1473 mbq->port = (void *)&PPORT; 1474 1475 } /* emlxs_mb_read_rev() */ 1476 1477 1478 /* 1479 * emlxs_mb_run_biu_diag Issue a RUN_BIU_DIAG mailbox command 1480 */ 1481 /*ARGSUSED*/ 1482 extern uint32_t 1483 emlxs_mb_run_biu_diag(emlxs_hba_t *hba, MAILBOXQ *mbq, uint64_t out, 1484 uint64_t in) 1485 { 1486 MAILBOX *mb = (MAILBOX *)mbq; 1487 1488 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1489 1490 mb->mbxCommand = MBX_RUN_BIU_DIAG64; 1491 mb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE; 1492 mb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = PADDR_HI(out); 1493 mb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = PADDR_LO(out); 1494 mb->un.varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE; 1495 mb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = PADDR_HI(in); 1496 mb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = PADDR_LO(in); 1497 mb->mbxOwner = OWN_HOST; 1498 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 1499 mbq->port = (void *)&PPORT; 1500 1501 return (0); 1502 } /* emlxs_mb_run_biu_diag() */ 1503 1504 1505 /* This should only be called with active MBX_NOWAIT mailboxes */ 1506 void 1507 emlxs_mb_retry(emlxs_hba_t *hba, MAILBOXQ *mbq) 1508 { 1509 MAILBOX *mb; 1510 MAILBOX *mbox; 1511 int rc; 1512 1513 mbox = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX); 1514 if (!mbox) { 1515 return; 1516 } 1517 mb = (MAILBOX *)mbq; 1518 bcopy((uint8_t *)mb, (uint8_t *)mbox, MAILBOX_CMD_BSIZE); 1519 mbox->mbxOwner = OWN_HOST; 1520 mbox->mbxStatus = 0; 1521 1522 mutex_enter(&EMLXS_PORT_LOCK); 1523 1524 HBASTATS.MboxCompleted++; 1525 1526 if (mb->mbxStatus != 0) { 1527 HBASTATS.MboxError++; 1528 } else { 1529 HBASTATS.MboxGood++; 1530 } 1531 1532 hba->mbox_mbq = NULL; 1533 hba->mbox_queue_flag = 0; 1534 1535 mutex_exit(&EMLXS_PORT_LOCK); 1536 1537 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0); 1538 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 1539 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox); 1540 } 1541 return; 1542 1543 } /* emlxs_mb_retry() */ 1544 1545 1546 /* SLI3 */ 1547 static uint32_t 1548 emlxs_read_la_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq) 1549 { 1550 emlxs_port_t *port = (emlxs_port_t *)mbq->port; 1551 MAILBOX *mb; 1552 MAILBOXQ *mbox; 1553 MATCHMAP *mp; 1554 READ_LA_VAR la; 1555 int i; 1556 uint32_t control; 1557 1558 mb = (MAILBOX *)mbq; 1559 if (mb->mbxStatus) { 1560 if (mb->mbxStatus == MBXERR_NO_RESOURCES) { 1561 control = mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize; 1562 if (control == 0) { 1563 (void) emlxs_mb_read_la(hba, mbq); 1564 } 1565 emlxs_mb_retry(hba, mbq); 1566 return (1); 1567 } 1568 /* Enable Link Attention interrupts */ 1569 mutex_enter(&EMLXS_PORT_LOCK); 1570 1571 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) { 1572 hba->sli.sli3.hc_copy |= HC_LAINT_ENA; 1573 WRITE_CSR_REG(hba, FC_HC_REG(hba), 1574 hba->sli.sli3.hc_copy); 1575 #ifdef FMA_SUPPORT 1576 /* Access handle validation */ 1577 EMLXS_CHK_ACC_HANDLE(hba, 1578 hba->sli.sli3.csr_acc_handle); 1579 #endif /* FMA_SUPPORT */ 1580 } 1581 1582 mutex_exit(&EMLXS_PORT_LOCK); 1583 return (0); 1584 } 1585 bcopy((void *)&mb->un.varReadLA, (void *)&la, sizeof (READ_LA_VAR)); 1586 1587 mp = (MATCHMAP *)mbq->bp; 1588 if (mp) { 1589 bcopy((caddr_t)mp->virt, (caddr_t)port->alpa_map, 128); 1590 } else { 1591 bzero((caddr_t)port->alpa_map, 128); 1592 } 1593 1594 if (la.attType == AT_LINK_UP) { 1595 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_linkup_atten_msg, 1596 "tag=%d -> %d ALPA=%x", 1597 (uint32_t)hba->link_event_tag, 1598 (uint32_t)la.eventTag, 1599 (uint32_t)la.granted_AL_PA); 1600 } else { 1601 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_linkdown_atten_msg, 1602 "tag=%d -> %d ALPA=%x", 1603 (uint32_t)hba->link_event_tag, 1604 (uint32_t)la.eventTag, 1605 (uint32_t)la.granted_AL_PA); 1606 } 1607 1608 if (la.pb) { 1609 hba->flag |= FC_BYPASSED_MODE; 1610 } else { 1611 hba->flag &= ~FC_BYPASSED_MODE; 1612 } 1613 1614 if (hba->link_event_tag == la.eventTag) { 1615 HBASTATS.LinkMultiEvent++; 1616 } else if (hba->link_event_tag + 1 < la.eventTag) { 1617 HBASTATS.LinkMultiEvent++; 1618 1619 /* Make sure link is declared down */ 1620 emlxs_linkdown(hba); 1621 } 1622 1623 hba->link_event_tag = la.eventTag; 1624 port->lip_type = 0; 1625 1626 /* If link not already up then declare it up now */ 1627 if ((la.attType == AT_LINK_UP) && (hba->state < FC_LINK_UP)) { 1628 1629 #ifdef MENLO_SUPPORT 1630 if (hba->model_info.vendor_id == PCI_VENDOR_ID_EMULEX && 1631 hba->model_info.device_id == PCI_DEVICE_ID_HORNET && 1632 (hba->flag & (FC_ILB_MODE | FC_ELB_MODE))) { 1633 la.topology = TOPOLOGY_LOOP; 1634 la.granted_AL_PA = 0; 1635 port->alpa_map[0] = 1; 1636 port->alpa_map[1] = 0; 1637 la.lipType = LT_PORT_INIT; 1638 } 1639 #endif /* MENLO_SUPPORT */ 1640 /* Save the linkspeed */ 1641 hba->linkspeed = la.UlnkSpeed; 1642 1643 /* Check for old model adapters that only */ 1644 /* supported 1Gb */ 1645 if ((hba->linkspeed == 0) && 1646 (hba->model_info.chip & EMLXS_DRAGONFLY_CHIP)) { 1647 hba->linkspeed = LA_1GHZ_LINK; 1648 } 1649 1650 if ((hba->topology = la.topology) == TOPOLOGY_LOOP) { 1651 port->granted_alpa = la.granted_AL_PA; 1652 port->did = port->granted_alpa; 1653 port->lip_type = la.lipType; 1654 if (hba->flag & FC_SLIM2_MODE) { 1655 i = la.un.lilpBde64.tus.f.bdeSize; 1656 } else { 1657 i = la.un.lilpBde.bdeSize; 1658 } 1659 1660 if (i == 0) { 1661 port->alpa_map[0] = 0; 1662 } else { 1663 uint8_t *alpa_map; 1664 uint32_t j; 1665 1666 /* Check number of devices in map */ 1667 if (port->alpa_map[0] > 127) { 1668 port->alpa_map[0] = 127; 1669 } 1670 1671 alpa_map = (uint8_t *)port->alpa_map; 1672 1673 EMLXS_MSGF(EMLXS_CONTEXT, 1674 &emlxs_link_atten_msg, 1675 "alpa_map: %d device(s): " 1676 "%02x %02x %02x %02x %02x %02x " 1677 "%02x", alpa_map[0], alpa_map[1], 1678 alpa_map[2], alpa_map[3], 1679 alpa_map[4], alpa_map[5], 1680 alpa_map[6], alpa_map[7]); 1681 1682 for (j = 8; j <= alpa_map[0]; j += 8) { 1683 EMLXS_MSGF(EMLXS_CONTEXT, 1684 &emlxs_link_atten_msg, 1685 "alpa_map: " 1686 "%02x %02x %02x %02x %02x " 1687 "%02x %02x %02x", 1688 alpa_map[j], 1689 alpa_map[j + 1], 1690 alpa_map[j + 2], 1691 alpa_map[j + 3], 1692 alpa_map[j + 4], 1693 alpa_map[j + 5], 1694 alpa_map[j + 6], 1695 alpa_map[j + 7]); 1696 } 1697 } 1698 } 1699 #ifdef MENLO_SUPPORT 1700 /* Check if Menlo maintenance mode is enabled */ 1701 if (hba->model_info.vendor_id == PCI_VENDOR_ID_EMULEX && 1702 hba->model_info.device_id == PCI_DEVICE_ID_HORNET) { 1703 if (la.mm == 1) { 1704 EMLXS_MSGF(EMLXS_CONTEXT, 1705 &emlxs_link_atten_msg, 1706 "Maintenance Mode enabled."); 1707 1708 mutex_enter(&EMLXS_PORT_LOCK); 1709 hba->flag |= FC_MENLO_MODE; 1710 mutex_exit(&EMLXS_PORT_LOCK); 1711 1712 mutex_enter(&EMLXS_LINKUP_LOCK); 1713 cv_broadcast(&EMLXS_LINKUP_CV); 1714 mutex_exit(&EMLXS_LINKUP_LOCK); 1715 } else { 1716 EMLXS_MSGF(EMLXS_CONTEXT, 1717 &emlxs_link_atten_msg, 1718 "Maintenance Mode disabled."); 1719 } 1720 1721 /* Check FCoE attention bit */ 1722 if (la.fa == 1) { 1723 emlxs_thread_spawn(hba, 1724 emlxs_fcoe_attention_thread, 1725 0, 0); 1726 } 1727 } 1728 #endif /* MENLO_SUPPORT */ 1729 1730 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 1731 MEM_MBOX))) { 1732 /* This should turn on DELAYED ABTS for */ 1733 /* ELS timeouts */ 1734 emlxs_mb_set_var(hba, mbox, 0x00052198, 0x1); 1735 1736 emlxs_mb_put(hba, mbox); 1737 } 1738 1739 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 1740 MEM_MBOX))) { 1741 /* If link not already down then */ 1742 /* declare it down now */ 1743 if (emlxs_mb_read_sparam(hba, mbox) == 0) { 1744 emlxs_mb_put(hba, mbox); 1745 } else { 1746 emlxs_mem_put(hba, MEM_MBOX, 1747 (void *)mbox); 1748 } 1749 } 1750 1751 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 1752 MEM_MBOX))) { 1753 emlxs_mb_config_link(hba, mbox); 1754 1755 emlxs_mb_put(hba, mbox); 1756 } 1757 1758 /* Declare the linkup here */ 1759 emlxs_linkup(hba); 1760 } 1761 1762 /* If link not already down then declare it down now */ 1763 else if (la.attType == AT_LINK_DOWN) { 1764 /* Make sure link is declared down */ 1765 emlxs_linkdown(hba); 1766 } 1767 1768 /* Enable Link attention interrupt */ 1769 mutex_enter(&EMLXS_PORT_LOCK); 1770 1771 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) { 1772 hba->sli.sli3.hc_copy |= HC_LAINT_ENA; 1773 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy); 1774 #ifdef FMA_SUPPORT 1775 /* Access handle validation */ 1776 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 1777 #endif /* FMA_SUPPORT */ 1778 } 1779 1780 mutex_exit(&EMLXS_PORT_LOCK); 1781 1782 return (0); 1783 1784 } /* emlxs_read_la_mbcmpl() */ 1785 1786 1787 extern uint32_t 1788 emlxs_mb_read_la(emlxs_hba_t *hba, MAILBOXQ *mbq) 1789 { 1790 MAILBOX *mb = (MAILBOX *)mbq; 1791 MATCHMAP *mp; 1792 1793 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1794 1795 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) { 1796 mb->mbxCommand = MBX_READ_LA64; 1797 1798 return (1); 1799 } 1800 1801 mb->mbxCommand = MBX_READ_LA64; 1802 mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128; 1803 mb->un.varReadLA.un.lilpBde64.addrHigh = PADDR_HI(mp->phys); 1804 mb->un.varReadLA.un.lilpBde64.addrLow = PADDR_LO(mp->phys); 1805 mb->mbxOwner = OWN_HOST; 1806 mbq->mbox_cmpl = emlxs_read_la_mbcmpl; 1807 mbq->port = (void *)&PPORT; 1808 1809 /* 1810 * save address for completion 1811 */ 1812 mbq->bp = (void *)mp; 1813 1814 return (0); 1815 1816 } /* emlxs_mb_read_la() */ 1817 1818 1819 /* SLI3 */ 1820 static uint32_t 1821 emlxs_clear_la_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq) 1822 { 1823 emlxs_port_t *port = (emlxs_port_t *)mbq->port; 1824 MAILBOX *mb; 1825 MAILBOXQ *mbox; 1826 emlxs_port_t *vport; 1827 uint32_t la_enable; 1828 int i, rc; 1829 1830 mb = (MAILBOX *)mbq; 1831 if (mb->mbxStatus) { 1832 la_enable = 1; 1833 1834 if (mb->mbxStatus == 0x1601) { 1835 /* Get a buffer which will be used for */ 1836 /* mailbox commands */ 1837 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 1838 MEM_MBOX))) { 1839 /* Get link attention message */ 1840 if (emlxs_mb_read_la(hba, mbox) == 0) { 1841 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, 1842 (MAILBOX *)mbox, MBX_NOWAIT, 0); 1843 if ((rc != MBX_BUSY) && 1844 (rc != MBX_SUCCESS)) { 1845 emlxs_mem_put(hba, 1846 MEM_MBOX, (void *)mbox); 1847 } 1848 la_enable = 0; 1849 } else { 1850 emlxs_mem_put(hba, MEM_MBOX, 1851 (void *)mbox); 1852 } 1853 } 1854 } 1855 1856 mutex_enter(&EMLXS_PORT_LOCK); 1857 if (la_enable) { 1858 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) { 1859 /* Enable Link Attention interrupts */ 1860 hba->sli.sli3.hc_copy |= HC_LAINT_ENA; 1861 WRITE_CSR_REG(hba, FC_HC_REG(hba), 1862 hba->sli.sli3.hc_copy); 1863 #ifdef FMA_SUPPORT 1864 /* Access handle validation */ 1865 EMLXS_CHK_ACC_HANDLE(hba, 1866 hba->sli.sli3.csr_acc_handle); 1867 #endif /* FMA_SUPPORT */ 1868 } 1869 } else { 1870 if (hba->sli.sli3.hc_copy & HC_LAINT_ENA) { 1871 /* Disable Link Attention interrupts */ 1872 hba->sli.sli3.hc_copy &= ~HC_LAINT_ENA; 1873 WRITE_CSR_REG(hba, FC_HC_REG(hba), 1874 hba->sli.sli3.hc_copy); 1875 #ifdef FMA_SUPPORT 1876 /* Access handle validation */ 1877 EMLXS_CHK_ACC_HANDLE(hba, 1878 hba->sli.sli3.csr_acc_handle); 1879 #endif /* FMA_SUPPORT */ 1880 } 1881 } 1882 mutex_exit(&EMLXS_PORT_LOCK); 1883 1884 return (0); 1885 } 1886 /* Enable on Link Attention interrupts */ 1887 mutex_enter(&EMLXS_PORT_LOCK); 1888 1889 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) { 1890 hba->sli.sli3.hc_copy |= HC_LAINT_ENA; 1891 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy); 1892 #ifdef FMA_SUPPORT 1893 /* Access handle validation */ 1894 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle); 1895 #endif /* FMA_SUPPORT */ 1896 } 1897 1898 if (hba->state >= FC_LINK_UP) { 1899 EMLXS_STATE_CHANGE_LOCKED(hba, FC_READY); 1900 } 1901 1902 mutex_exit(&EMLXS_PORT_LOCK); 1903 1904 /* Adapter is now ready for FCP traffic */ 1905 if (hba->state == FC_READY) { 1906 1907 /* Register vpi's for all ports that have did's */ 1908 for (i = 0; i < MAX_VPORTS; i++) { 1909 vport = &VPORT(i); 1910 1911 if (!(vport->flag & EMLXS_PORT_BOUND) || 1912 !(vport->did)) { 1913 continue; 1914 } 1915 1916 (void) emlxs_mb_reg_vpi(vport, NULL); 1917 } 1918 1919 /* Attempt to send any pending IO */ 1920 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[hba->channel_fcp], 0); 1921 } 1922 return (0); 1923 1924 } /* emlxs_clear_la_mbcmpl() */ 1925 1926 1927 /* SLI3 */ 1928 extern void 1929 emlxs_mb_clear_la(emlxs_hba_t *hba, MAILBOXQ *mbq) 1930 { 1931 MAILBOX *mb = (MAILBOX *)mbq; 1932 1933 #ifdef FC_RPI_CHECK 1934 emlxs_rpi_check(hba); 1935 #endif /* FC_RPI_CHECK */ 1936 1937 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1938 1939 mb->un.varClearLA.eventTag = hba->link_event_tag; 1940 mb->mbxCommand = MBX_CLEAR_LA; 1941 mb->mbxOwner = OWN_HOST; 1942 mbq->mbox_cmpl = emlxs_clear_la_mbcmpl; 1943 mbq->port = (void *)&PPORT; 1944 1945 return; 1946 1947 } /* emlxs_mb_clear_la() */ 1948 1949 1950 /* 1951 * emlxs_mb_read_status Issue a READ STATUS mailbox command 1952 */ 1953 /*ARGSUSED*/ 1954 extern void 1955 emlxs_mb_read_status(emlxs_hba_t *hba, MAILBOXQ *mbq) 1956 { 1957 MAILBOX *mb = (MAILBOX *)mbq; 1958 1959 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1960 1961 mb->mbxCommand = MBX_READ_STATUS; 1962 mb->mbxOwner = OWN_HOST; 1963 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 1964 mbq->port = (void *)&PPORT; 1965 1966 } /* fc_read_status() */ 1967 1968 1969 /* 1970 * emlxs_mb_read_lnk_stat Issue a LINK STATUS mailbox command 1971 */ 1972 /*ARGSUSED*/ 1973 extern void 1974 emlxs_mb_read_lnk_stat(emlxs_hba_t *hba, MAILBOXQ *mbq) 1975 { 1976 MAILBOX *mb = (MAILBOX *)mbq; 1977 1978 bzero((void *)mb, MAILBOX_CMD_BSIZE); 1979 1980 mb->mbxCommand = MBX_READ_LNK_STAT; 1981 mb->mbxOwner = OWN_HOST; 1982 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 1983 mbq->port = (void *)&PPORT; 1984 1985 } /* emlxs_mb_read_lnk_stat() */ 1986 1987 1988 1989 1990 1991 1992 /* 1993 * emlxs_mb_config_ring Issue a CONFIG RING mailbox command 1994 */ 1995 extern void 1996 emlxs_mb_config_ring(emlxs_hba_t *hba, int32_t ring, MAILBOXQ *mbq) 1997 { 1998 MAILBOX *mb = (MAILBOX *)mbq; 1999 int32_t i; 2000 int32_t j; 2001 2002 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2003 2004 j = 0; 2005 for (i = 0; i < ring; i++) { 2006 j += hba->sli.sli3.ring_masks[i]; 2007 } 2008 2009 for (i = 0; i < hba->sli.sli3.ring_masks[ring]; i++) { 2010 if ((j + i) >= 6) { 2011 break; 2012 } 2013 2014 mb->un.varCfgRing.rrRegs[i].rval = 2015 hba->sli.sli3.ring_rval[j + i]; 2016 mb->un.varCfgRing.rrRegs[i].rmask = 2017 hba->sli.sli3.ring_rmask[j + i]; 2018 mb->un.varCfgRing.rrRegs[i].tval = 2019 hba->sli.sli3.ring_tval[j + i]; 2020 mb->un.varCfgRing.rrRegs[i].tmask = 2021 hba->sli.sli3.ring_tmask[j + i]; 2022 } 2023 2024 mb->un.varCfgRing.ring = ring; 2025 mb->un.varCfgRing.profile = 0; 2026 mb->un.varCfgRing.maxOrigXchg = 0; 2027 mb->un.varCfgRing.maxRespXchg = 0; 2028 mb->un.varCfgRing.recvNotify = 1; 2029 mb->un.varCfgRing.numMask = hba->sli.sli3.ring_masks[ring]; 2030 mb->mbxCommand = MBX_CONFIG_RING; 2031 mb->mbxOwner = OWN_HOST; 2032 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 2033 mbq->port = (void *)&PPORT; 2034 2035 return; 2036 2037 } /* emlxs_mb_config_ring() */ 2038 2039 2040 /* 2041 * emlxs_mb_config_link Issue a CONFIG LINK mailbox command 2042 */ 2043 extern void 2044 emlxs_mb_config_link(emlxs_hba_t *hba, MAILBOXQ *mbq) 2045 { 2046 MAILBOX *mb = (MAILBOX *)mbq; 2047 emlxs_port_t *port = &PPORT; 2048 emlxs_config_t *cfg = &CFG; 2049 2050 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2051 2052 /* 2053 * NEW_FEATURE SLI-2, Coalescing Response Feature. 2054 */ 2055 if (cfg[CFG_CR_DELAY].current) { 2056 mb->un.varCfgLnk.cr = 1; 2057 mb->un.varCfgLnk.ci = 1; 2058 mb->un.varCfgLnk.cr_delay = cfg[CFG_CR_DELAY].current; 2059 mb->un.varCfgLnk.cr_count = cfg[CFG_CR_COUNT].current; 2060 } 2061 2062 if (cfg[CFG_ACK0].current) { 2063 mb->un.varCfgLnk.ack0_enable = 1; 2064 } 2065 2066 mb->un.varCfgLnk.myId = port->did; 2067 mb->un.varCfgLnk.edtov = hba->fc_edtov; 2068 mb->un.varCfgLnk.arbtov = hba->fc_arbtov; 2069 mb->un.varCfgLnk.ratov = hba->fc_ratov; 2070 mb->un.varCfgLnk.rttov = hba->fc_rttov; 2071 mb->un.varCfgLnk.altov = hba->fc_altov; 2072 mb->un.varCfgLnk.crtov = hba->fc_crtov; 2073 mb->un.varCfgLnk.citov = hba->fc_citov; 2074 mb->mbxCommand = MBX_CONFIG_LINK; 2075 mb->mbxOwner = OWN_HOST; 2076 mbq->mbox_cmpl = NULL; 2077 mbq->port = (void *)port; 2078 2079 return; 2080 2081 } /* emlxs_mb_config_link() */ 2082 2083 2084 static uint32_t 2085 emlxs_init_link_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq) 2086 { 2087 emlxs_port_t *port = (emlxs_port_t *)mbq->port; 2088 emlxs_config_t *cfg = &CFG; 2089 MAILBOX *mb; 2090 2091 mb = (MAILBOX *)mbq; 2092 if (mb->mbxStatus) { 2093 if ((hba->flag & FC_SLIM2_MODE) && 2094 (hba->mbox_queue_flag == MBX_NOWAIT)) { 2095 /* Retry only MBX_NOWAIT requests */ 2096 2097 if ((cfg[CFG_LINK_SPEED].current > 0) && 2098 ((mb->mbxStatus == 0x0011) || 2099 (mb->mbxStatus == 0x0500))) { 2100 2101 EMLXS_MSGF(EMLXS_CONTEXT, 2102 &emlxs_mbox_event_msg, 2103 "Retrying. %s: status=%x. Auto-speed set.", 2104 emlxs_mb_cmd_xlate(mb->mbxCommand), 2105 (uint32_t)mb->mbxStatus); 2106 2107 mb->un.varInitLnk.link_flags &= 2108 ~FLAGS_LINK_SPEED; 2109 mb->un.varInitLnk.link_speed = 0; 2110 2111 emlxs_mb_retry(hba, mbq); 2112 return (1); 2113 } 2114 } 2115 } 2116 return (0); 2117 2118 } /* emlxs_init_link_mbcmpl() */ 2119 2120 2121 /* 2122 * emlxs_mb_init_link Issue an INIT LINK mailbox command 2123 */ 2124 extern void 2125 emlxs_mb_init_link(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t topology, 2126 uint32_t linkspeed) 2127 { 2128 MAILBOX *mb = (MAILBOX *)mbq; 2129 emlxs_vpd_t *vpd = &VPD; 2130 emlxs_config_t *cfg = &CFG; 2131 2132 if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) && 2133 (SLI4_FCOE_MODE)) { 2134 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE); 2135 mbq->nonembed = NULL; 2136 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 2137 mbq->port = (void *)&PPORT; 2138 2139 mb->mbxCommand = (volatile uint8_t) MBX_INIT_LINK; 2140 mb->mbxOwner = OWN_HOST; 2141 return; 2142 } 2143 2144 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2145 2146 switch (topology) { 2147 case FLAGS_LOCAL_LB: 2148 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; 2149 mb->un.varInitLnk.link_flags |= FLAGS_LOCAL_LB; 2150 break; 2151 case FLAGS_TOPOLOGY_MODE_LOOP_PT: 2152 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; 2153 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; 2154 break; 2155 case FLAGS_TOPOLOGY_MODE_PT_PT: 2156 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; 2157 break; 2158 case FLAGS_TOPOLOGY_MODE_LOOP: 2159 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; 2160 break; 2161 case FLAGS_TOPOLOGY_MODE_PT_LOOP: 2162 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; 2163 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; 2164 break; 2165 } 2166 2167 if (cfg[CFG_LILP_ENABLE].current == 0) { 2168 /* Disable LIRP/LILP support */ 2169 mb->un.varInitLnk.link_flags |= FLAGS_LIRP_LILP; 2170 } 2171 2172 /* 2173 * Setting up the link speed 2174 */ 2175 switch (linkspeed) { 2176 case 0: 2177 break; 2178 2179 case 1: 2180 linkspeed = (vpd->link_speed & LMT_1GB_CAPABLE) == 0 ? 0 : 2181 LINK_SPEED_1G; 2182 break; 2183 2184 case 2: 2185 linkspeed = (vpd->link_speed & LMT_2GB_CAPABLE) == 0 ? 0 : 2186 LINK_SPEED_2G; 2187 break; 2188 2189 case 4: 2190 linkspeed = (vpd->link_speed & LMT_4GB_CAPABLE) == 0 ? 0 : 2191 LINK_SPEED_4G; 2192 break; 2193 2194 case 8: 2195 linkspeed = (vpd->link_speed & LMT_8GB_CAPABLE) == 0 ? 0 : 2196 LINK_SPEED_8G; 2197 break; 2198 2199 case 10: 2200 linkspeed = (vpd->link_speed & LMT_10GB_CAPABLE) == 0 ? 0 : 2201 LINK_SPEED_10G; 2202 break; 2203 2204 case 16: 2205 linkspeed = (vpd->link_speed & LMT_16GB_CAPABLE) == 0 ? 0 : 2206 LINK_SPEED_16G; 2207 break; 2208 2209 case 32: 2210 linkspeed = (vpd->link_speed & LMT_32GB_CAPABLE) == 0 ? 0 : 2211 LINK_SPEED_32G; 2212 break; 2213 2214 default: 2215 linkspeed = 0; 2216 break; 2217 2218 } 2219 2220 if ((linkspeed > 0) && (vpd->feaLevelHigh >= 0x02)) { 2221 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; 2222 mb->un.varInitLnk.link_speed = linkspeed; 2223 } 2224 2225 mb->un.varInitLnk.link_flags |= FLAGS_PREABORT_RETURN; 2226 2227 mb->un.varInitLnk.fabric_AL_PA = 2228 (uint8_t)cfg[CFG_ASSIGN_ALPA].current; 2229 mb->mbxCommand = (volatile uint8_t) MBX_INIT_LINK; 2230 mb->mbxOwner = OWN_HOST; 2231 mbq->mbox_cmpl = emlxs_init_link_mbcmpl; 2232 mbq->port = (void *)&PPORT; 2233 2234 2235 return; 2236 2237 } /* emlxs_mb_init_link() */ 2238 2239 2240 /* 2241 * emlxs_mb_down_link Issue a DOWN LINK mailbox command 2242 */ 2243 /*ARGSUSED*/ 2244 extern void 2245 emlxs_mb_down_link(emlxs_hba_t *hba, MAILBOXQ *mbq) 2246 { 2247 MAILBOX *mb = (MAILBOX *)mbq; 2248 2249 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2250 2251 mb->mbxCommand = MBX_DOWN_LINK; 2252 mb->mbxOwner = OWN_HOST; 2253 mbq->mbox_cmpl = NULL; 2254 mbq->port = (void *)&PPORT; 2255 2256 return; 2257 2258 } /* emlxs_mb_down_link() */ 2259 2260 2261 static uint32_t 2262 emlxs_read_sparam_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq) 2263 { 2264 emlxs_port_t *port = &PPORT; 2265 MAILBOX *mb; 2266 MATCHMAP *mp; 2267 emlxs_port_t *vport; 2268 int32_t i; 2269 uint32_t control; 2270 uint8_t null_wwn[8]; 2271 2272 mb = (MAILBOX *)mbq; 2273 if (mb->mbxStatus) { 2274 if (mb->mbxStatus == MBXERR_NO_RESOURCES) { 2275 control = mb->un.varRdSparm.un.sp64.tus.f.bdeSize; 2276 if (control == 0) { 2277 (void) emlxs_mb_read_sparam(hba, mbq); 2278 } 2279 emlxs_mb_retry(hba, mbq); 2280 return (1); 2281 } 2282 return (0); 2283 } 2284 mp = (MATCHMAP *)mbq->bp; 2285 if (!mp) { 2286 return (0); 2287 } 2288 2289 bcopy((caddr_t)mp->virt, (caddr_t)&hba->sparam, sizeof (SERV_PARM)); 2290 2291 /* Initialize the node name and port name only once */ 2292 bzero(null_wwn, 8); 2293 if ((bcmp((caddr_t)&hba->wwnn, (caddr_t)null_wwn, 8) == 0) && 2294 (bcmp((caddr_t)&hba->wwpn, (caddr_t)null_wwn, 8) == 0)) { 2295 bcopy((caddr_t)&hba->sparam.nodeName, 2296 (caddr_t)&hba->wwnn, sizeof (NAME_TYPE)); 2297 2298 bcopy((caddr_t)&hba->sparam.portName, 2299 (caddr_t)&hba->wwpn, sizeof (NAME_TYPE)); 2300 } else { 2301 bcopy((caddr_t)&hba->wwnn, 2302 (caddr_t)&hba->sparam.nodeName, sizeof (NAME_TYPE)); 2303 2304 bcopy((caddr_t)&hba->wwpn, 2305 (caddr_t)&hba->sparam.portName, sizeof (NAME_TYPE)); 2306 } 2307 2308 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2309 "SPARAM: EDTOV hba=%x mbox_csp=%x BBC=%x", 2310 hba->fc_edtov, hba->sparam.cmn.e_d_tov, 2311 hba->sparam.cmn.bbCreditlsb); 2312 2313 /* Initialize the physical port */ 2314 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam, 2315 sizeof (SERV_PARM)); 2316 bcopy((caddr_t)&hba->wwpn, (caddr_t)&port->wwpn, 2317 sizeof (NAME_TYPE)); 2318 bcopy((caddr_t)&hba->wwnn, (caddr_t)&port->wwnn, 2319 sizeof (NAME_TYPE)); 2320 2321 /* Initialize the virtual ports */ 2322 for (i = 1; i < MAX_VPORTS; i++) { 2323 vport = &VPORT(i); 2324 if (! (vport->flag & EMLXS_PORT_BOUND)) { 2325 continue; 2326 } 2327 2328 bcopy((caddr_t)&hba->sparam, 2329 (caddr_t)&vport->sparam, 2330 sizeof (SERV_PARM)); 2331 2332 bcopy((caddr_t)&vport->wwnn, 2333 (caddr_t)&vport->sparam.nodeName, 2334 sizeof (NAME_TYPE)); 2335 2336 bcopy((caddr_t)&vport->wwpn, 2337 (caddr_t)&vport->sparam.portName, 2338 sizeof (NAME_TYPE)); 2339 } 2340 2341 return (0); 2342 2343 } /* emlxs_read_sparam_mbcmpl() */ 2344 2345 2346 /* 2347 * emlxs_mb_read_sparam Issue a READ SPARAM mailbox command 2348 */ 2349 extern uint32_t 2350 emlxs_mb_read_sparam(emlxs_hba_t *hba, MAILBOXQ *mbq) 2351 { 2352 MAILBOX *mb = (MAILBOX *)mbq; 2353 MATCHMAP *mp; 2354 2355 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2356 2357 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) { 2358 mb->mbxCommand = MBX_READ_SPARM64; 2359 2360 return (1); 2361 } 2362 2363 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM); 2364 mb->un.varRdSparm.un.sp64.addrHigh = PADDR_HI(mp->phys); 2365 mb->un.varRdSparm.un.sp64.addrLow = PADDR_LO(mp->phys); 2366 mb->mbxCommand = MBX_READ_SPARM64; 2367 mb->mbxOwner = OWN_HOST; 2368 mbq->mbox_cmpl = emlxs_read_sparam_mbcmpl; 2369 mbq->port = (void *)&PPORT; 2370 2371 /* 2372 * save address for completion 2373 */ 2374 mbq->bp = (void *)mp; 2375 2376 return (0); 2377 2378 } /* emlxs_mb_read_sparam() */ 2379 2380 2381 /* 2382 * emlxs_mb_read_rpi Issue a READ RPI mailbox command 2383 */ 2384 /*ARGSUSED*/ 2385 extern uint32_t 2386 emlxs_mb_read_rpi(emlxs_hba_t *hba, uint32_t rpi, MAILBOXQ *mbq, 2387 uint32_t flag) 2388 { 2389 MAILBOX *mb = (MAILBOX *)mbq; 2390 2391 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2392 2393 /* 2394 * Set flag to issue action on cmpl 2395 */ 2396 mb->un.varWords[30] = flag; 2397 mb->un.varRdRPI.reqRpi = (volatile uint16_t) rpi; 2398 mb->mbxCommand = MBX_READ_RPI64; 2399 mb->mbxOwner = OWN_HOST; 2400 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 2401 mbq->port = (void *)&PPORT; 2402 2403 return (0); 2404 } /* emlxs_mb_read_rpi() */ 2405 2406 2407 /* 2408 * emlxs_mb_read_xri Issue a READ XRI mailbox command 2409 */ 2410 /*ARGSUSED*/ 2411 extern uint32_t 2412 emlxs_mb_read_xri(emlxs_hba_t *hba, uint32_t xri, MAILBOXQ *mbq, 2413 uint32_t flag) 2414 { 2415 MAILBOX *mb = (MAILBOX *)mbq; 2416 2417 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2418 2419 /* 2420 * Set flag to issue action on cmpl 2421 */ 2422 mb->un.varWords[30] = flag; 2423 mb->un.varRdXRI.reqXri = (volatile uint16_t)xri; 2424 mb->mbxCommand = MBX_READ_XRI; 2425 mb->mbxOwner = OWN_HOST; 2426 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 2427 mbq->port = (void *)&PPORT; 2428 2429 return (0); 2430 } /* emlxs_mb_read_xri() */ 2431 2432 2433 /*ARGSUSED*/ 2434 extern int32_t 2435 emlxs_mb_check_sparm(emlxs_hba_t *hba, SERV_PARM *nsp) 2436 { 2437 uint32_t nsp_value; 2438 uint32_t *iptr; 2439 2440 if (nsp->cmn.fPort) { 2441 return (0); 2442 } 2443 2444 /* Validate the service parameters */ 2445 iptr = (uint32_t *)&nsp->portName; 2446 if (iptr[0] == 0 && iptr[1] == 0) { 2447 return (1); 2448 } 2449 2450 iptr = (uint32_t *)&nsp->nodeName; 2451 if (iptr[0] == 0 && iptr[1] == 0) { 2452 return (2); 2453 } 2454 2455 if (nsp->cls2.classValid) { 2456 nsp_value = 2457 ((nsp->cls2.rcvDataSizeMsb & 0x0f) << 8) | nsp->cls2. 2458 rcvDataSizeLsb; 2459 2460 /* If the receive data length is zero then set it to */ 2461 /* the CSP value */ 2462 if (!nsp_value) { 2463 nsp->cls2.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb; 2464 nsp->cls2.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb; 2465 return (0); 2466 } 2467 } 2468 2469 if (nsp->cls3.classValid) { 2470 nsp_value = 2471 ((nsp->cls3.rcvDataSizeMsb & 0x0f) << 8) | nsp->cls3. 2472 rcvDataSizeLsb; 2473 2474 /* If the receive data length is zero then set it to */ 2475 /* the CSP value */ 2476 if (!nsp_value) { 2477 nsp->cls3.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb; 2478 nsp->cls3.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb; 2479 return (0); 2480 } 2481 } 2482 2483 return (0); 2484 2485 } /* emlxs_mb_check_sparm() */ 2486 2487 2488 2489 2490 /* 2491 * emlxs_mb_set_var Issue a special debug mbox command to write slim 2492 */ 2493 /*ARGSUSED*/ 2494 extern void 2495 emlxs_mb_set_var(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t addr, 2496 uint32_t value) 2497 { 2498 MAILBOX *mb = (MAILBOX *)mbq; 2499 2500 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2501 2502 /* addr = 0x090597 is AUTO ABTS disable for ELS commands */ 2503 /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */ 2504 /* addr = 0x100506 is for setting PCI MAX READ value */ 2505 2506 /* 2507 * Always turn on DELAYED ABTS for ELS timeouts 2508 */ 2509 if ((addr == 0x052198) && (value == 0)) { 2510 value = 1; 2511 } 2512 2513 mb->un.varWords[0] = addr; 2514 mb->un.varWords[1] = value; 2515 mb->mbxCommand = MBX_SET_VARIABLE; 2516 mb->mbxOwner = OWN_HOST; 2517 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 2518 mbq->port = (void *)&PPORT; 2519 2520 } /* emlxs_mb_set_var() */ 2521 2522 2523 /* 2524 * Disable Traffic Cop 2525 */ 2526 /*ARGSUSED*/ 2527 extern void 2528 emlxs_disable_tc(emlxs_hba_t *hba, MAILBOXQ *mbq) 2529 { 2530 MAILBOX *mb = (MAILBOX *)mbq; 2531 2532 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2533 2534 mb->un.varWords[0] = 0x50797; 2535 mb->un.varWords[1] = 0; 2536 mb->un.varWords[2] = 0xfffffffe; 2537 mb->mbxCommand = MBX_SET_VARIABLE; 2538 mb->mbxOwner = OWN_HOST; 2539 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 2540 mbq->port = (void *)&PPORT; 2541 2542 } /* emlxs_disable_tc() */ 2543 2544 2545 extern void 2546 emlxs_mb_config_hbq(emlxs_hba_t *hba, MAILBOXQ *mbq, int hbq_id) 2547 { 2548 HBQ_INIT_t *hbq; 2549 MAILBOX *mb = (MAILBOX *)mbq; 2550 int i; 2551 2552 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2553 2554 hbq = &hba->sli.sli3.hbq_table[hbq_id]; 2555 2556 mb->un.varCfgHbq.hbqId = hbq_id; 2557 mb->un.varCfgHbq.numEntries = hbq->HBQ_numEntries; 2558 mb->un.varCfgHbq.recvNotify = hbq->HBQ_recvNotify; 2559 mb->un.varCfgHbq.numMask = hbq->HBQ_num_mask; 2560 mb->un.varCfgHbq.profile = hbq->HBQ_profile; 2561 mb->un.varCfgHbq.ringMask = hbq->HBQ_ringMask; 2562 mb->un.varCfgHbq.headerLen = hbq->HBQ_headerLen; 2563 mb->un.varCfgHbq.logEntry = hbq->HBQ_logEntry; 2564 mb->un.varCfgHbq.hbqaddrLow = PADDR_LO(hbq->HBQ_host_buf.phys); 2565 mb->un.varCfgHbq.hbqaddrHigh = PADDR_HI(hbq->HBQ_host_buf.phys); 2566 mb->mbxCommand = MBX_CONFIG_HBQ; 2567 mb->mbxOwner = OWN_HOST; 2568 mbq->mbox_cmpl = NULL; 2569 mbq->port = (void *)&PPORT; 2570 2571 /* Copy info for profiles 2,3,5. Other profiles this area is reserved */ 2572 if ((hbq->HBQ_profile == 2) || (hbq->HBQ_profile == 3) || 2573 (hbq->HBQ_profile == 5)) { 2574 bcopy(&hbq->profiles.allprofiles, 2575 (void *)&mb->un.varCfgHbq.profiles.allprofiles, 2576 sizeof (hbq->profiles)); 2577 } 2578 2579 /* Return if no rctl / type masks for this HBQ */ 2580 if (!hbq->HBQ_num_mask) { 2581 return; 2582 } 2583 2584 /* Otherwise we setup specific rctl / type masks for this HBQ */ 2585 for (i = 0; i < hbq->HBQ_num_mask; i++) { 2586 mb->un.varCfgHbq.hbqMasks[i].tmatch = 2587 hbq->HBQ_Masks[i].tmatch; 2588 mb->un.varCfgHbq.hbqMasks[i].tmask = hbq->HBQ_Masks[i].tmask; 2589 mb->un.varCfgHbq.hbqMasks[i].rctlmatch = 2590 hbq->HBQ_Masks[i].rctlmatch; 2591 mb->un.varCfgHbq.hbqMasks[i].rctlmask = 2592 hbq->HBQ_Masks[i].rctlmask; 2593 } 2594 2595 return; 2596 2597 } /* emlxs_mb_config_hbq() */ 2598 2599 2600 /* SLI3 */ 2601 static uint32_t 2602 emlxs_reg_vpi_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq) 2603 { 2604 emlxs_port_t *port = (emlxs_port_t *)mbq->port; 2605 MAILBOX *mb; 2606 2607 mb = (MAILBOX *)mbq; 2608 2609 mutex_enter(&EMLXS_PORT_LOCK); 2610 2611 if (mb->mbxStatus != MBX_SUCCESS) { 2612 port->flag &= ~EMLXS_PORT_REG_VPI; 2613 mutex_exit(&EMLXS_PORT_LOCK); 2614 2615 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2616 "cmpl_reg_vpi:%d failed. status=%x", 2617 port->vpi, mb->mbxStatus); 2618 return (0); 2619 } 2620 2621 port->flag |= EMLXS_PORT_REG_VPI_CMPL; 2622 2623 mutex_exit(&EMLXS_PORT_LOCK); 2624 2625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2626 "cmpl_reg_vpi:%d ", 2627 port->vpi); 2628 2629 return (0); 2630 2631 } /* emlxs_reg_vpi_mbcmpl */ 2632 2633 2634 /* SLI3 */ 2635 extern uint32_t 2636 emlxs_mb_reg_vpi(emlxs_port_t *port, emlxs_buf_t *sbp) 2637 { 2638 emlxs_hba_t *hba = HBA; 2639 MAILBOXQ *mbq; 2640 MAILBOX *mb; 2641 int rval; 2642 2643 if (hba->sli_mode > EMLXS_HBA_SLI3_MODE) { 2644 return (1); 2645 } 2646 2647 if (!(hba->flag & FC_NPIV_ENABLED)) { 2648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2649 "reg_vpi:%d failed. NPIV disabled.", 2650 port->vpi); 2651 return (1); 2652 } 2653 2654 if (port->flag & EMLXS_PORT_REG_VPI) { 2655 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2656 "reg_vpi:%d failed. Already registered.", 2657 port->vpi); 2658 return (0); 2659 } 2660 2661 mutex_enter(&EMLXS_PORT_LOCK); 2662 2663 /* Can't reg vpi until ClearLA is sent */ 2664 if (hba->state != FC_READY) { 2665 mutex_exit(&EMLXS_PORT_LOCK); 2666 2667 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2668 "reg_vpi:%d failed. HBA state not READY", 2669 port->vpi); 2670 return (1); 2671 } 2672 2673 /* Must have port id */ 2674 if (!port->did) { 2675 mutex_exit(&EMLXS_PORT_LOCK); 2676 2677 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2678 "reg_vpi:%d failed. Port did=0", 2679 port->vpi); 2680 return (1); 2681 } 2682 2683 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) { 2684 mutex_exit(&EMLXS_PORT_LOCK); 2685 2686 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2687 "reg_vpi:%d failed. Unable to allocate mbox.", 2688 port->vpi); 2689 return (1); 2690 } 2691 2692 port->flag |= EMLXS_PORT_REG_VPI; 2693 2694 mutex_exit(&EMLXS_PORT_LOCK); 2695 2696 mb = (MAILBOX *)mbq->mbox; 2697 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2698 2699 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2700 "reg_vpi:%d", port->vpi); 2701 2702 mb->un.varRegVpi.vpi = port->vpi; 2703 mb->un.varRegVpi.sid = port->did; 2704 mb->mbxCommand = MBX_REG_VPI; 2705 mb->mbxOwner = OWN_HOST; 2706 2707 mbq->sbp = (void *)sbp; 2708 mbq->mbox_cmpl = emlxs_reg_vpi_mbcmpl; 2709 mbq->context = NULL; 2710 mbq->port = (void *)port; 2711 2712 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0); 2713 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) { 2714 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2715 "reg_vpi:%d failed. Unable to send request.", 2716 port->vpi); 2717 2718 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 2719 return (1); 2720 } 2721 2722 return (0); 2723 2724 } /* emlxs_mb_reg_vpi() */ 2725 2726 2727 /* SLI3 */ 2728 static uint32_t 2729 emlxs_unreg_vpi_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq) 2730 { 2731 emlxs_port_t *port = (emlxs_port_t *)mbq->port; 2732 MAILBOX *mb; 2733 2734 mb = (MAILBOX *)mbq->mbox; 2735 2736 if (mb->mbxStatus != MBX_SUCCESS) { 2737 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2738 "unreg_vpi_mbcmpl:%d failed. status=%x", 2739 port->vpi, mb->mbxStatus); 2740 return (0); 2741 } 2742 2743 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2744 "unreg_vpi_mbcmpl:%d", port->vpi); 2745 2746 mutex_enter(&EMLXS_PORT_LOCK); 2747 port->flag &= ~EMLXS_PORT_REG_VPI_CMPL; 2748 mutex_exit(&EMLXS_PORT_LOCK); 2749 2750 return (0); 2751 2752 } /* emlxs_unreg_vpi_mbcmpl() */ 2753 2754 2755 /* SLI3 */ 2756 extern uint32_t 2757 emlxs_mb_unreg_vpi(emlxs_port_t *port) 2758 { 2759 emlxs_hba_t *hba = HBA; 2760 MAILBOXQ *mbq; 2761 MAILBOX *mb; 2762 int rval; 2763 2764 if (hba->sli_mode > EMLXS_HBA_SLI3_MODE) { 2765 return (1); 2766 } 2767 2768 mutex_enter(&EMLXS_PORT_LOCK); 2769 2770 if (!(port->flag & EMLXS_PORT_REG_VPI) || 2771 !(port->flag & EMLXS_PORT_REG_VPI_CMPL)) { 2772 2773 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2774 "unreg_vpi:%d failed. Not registered. flag=%x", 2775 port->vpi, port->flag); 2776 2777 mutex_exit(&EMLXS_PORT_LOCK); 2778 return (0); 2779 } 2780 2781 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) { 2782 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2783 "unreg_vpi:%d failed. Unable to allocate mbox.", 2784 port->vpi); 2785 2786 mutex_exit(&EMLXS_PORT_LOCK); 2787 return (1); 2788 } 2789 2790 port->flag &= ~EMLXS_PORT_REG_VPI; 2791 2792 mutex_exit(&EMLXS_PORT_LOCK); 2793 2794 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2795 "unreg_vpi:%d", port->vpi); 2796 2797 mb = (MAILBOX *)mbq->mbox; 2798 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2799 mb->un.varUnregVpi.vpi = port->vpi; 2800 mb->mbxCommand = MBX_UNREG_VPI; 2801 mb->mbxOwner = OWN_HOST; 2802 2803 mbq->mbox_cmpl = emlxs_unreg_vpi_mbcmpl; 2804 mbq->context = NULL; 2805 mbq->port = (void *)port; 2806 2807 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0); 2808 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) { 2809 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 2810 "unreg_vpi:%d failed. Unable to send request.", 2811 port->vpi); 2812 2813 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 2814 return (1); 2815 } 2816 2817 return (0); 2818 2819 } /* emlxs_mb_unreg_vpi() */ 2820 2821 2822 /* 2823 * emlxs_mb_config_farp Issue a CONFIG FARP mailbox command 2824 */ 2825 extern void 2826 emlxs_mb_config_farp(emlxs_hba_t *hba, MAILBOXQ *mbq) 2827 { 2828 MAILBOX *mb = (MAILBOX *)mbq; 2829 2830 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2831 2832 bcopy((uint8_t *)&hba->wwpn, 2833 (uint8_t *)&mb->un.varCfgFarp.portname, sizeof (NAME_TYPE)); 2834 2835 bcopy((uint8_t *)&hba->wwpn, 2836 (uint8_t *)&mb->un.varCfgFarp.nodename, sizeof (NAME_TYPE)); 2837 2838 mb->un.varCfgFarp.filterEnable = 1; 2839 mb->un.varCfgFarp.portName = 1; 2840 mb->un.varCfgFarp.nodeName = 1; 2841 mb->mbxCommand = MBX_CONFIG_FARP; 2842 mb->mbxOwner = OWN_HOST; 2843 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 2844 mbq->port = (void *)&PPORT; 2845 2846 } /* emlxs_mb_config_farp() */ 2847 2848 2849 /* 2850 * emlxs_mb_read_nv Issue a READ CONFIG mailbox command 2851 */ 2852 /*ARGSUSED*/ 2853 extern void 2854 emlxs_mb_read_config(emlxs_hba_t *hba, MAILBOXQ *mbq) 2855 { 2856 MAILBOX *mb = (MAILBOX *)mbq; 2857 2858 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 2859 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE); 2860 mbq->nonembed = NULL; 2861 } else { 2862 bzero((void *)mb, MAILBOX_CMD_BSIZE); 2863 } 2864 2865 mb->mbxCommand = MBX_READ_CONFIG; 2866 mb->mbxOwner = OWN_HOST; 2867 mbq->mbox_cmpl = NULL; /* no cmpl needed */ 2868 mbq->port = (void *)&PPORT; 2869 2870 } /* emlxs_mb_read_config() */ 2871 2872 2873 /* 2874 * NAME: emlxs_mb_put 2875 * 2876 * FUNCTION: put mailbox cmd onto the mailbox queue. 2877 * 2878 * EXECUTION ENVIRONMENT: process and interrupt level. 2879 * 2880 * NOTES: 2881 * 2882 * CALLED FROM: EMLXS_SLI_ISSUE_MBOX_CMD 2883 * 2884 * INPUT: hba - pointer to the device info area 2885 * mbp - pointer to mailbox queue entry of mailbox cmd 2886 * 2887 * RETURNS: NULL - command queued 2888 */ 2889 extern void 2890 emlxs_mb_put(emlxs_hba_t *hba, MAILBOXQ *mbq) 2891 { 2892 2893 mutex_enter(&EMLXS_MBOX_LOCK); 2894 2895 if (hba->mbox_queue.q_first) { 2896 2897 /* 2898 * queue command to end of list 2899 */ 2900 ((MAILBOXQ *)hba->mbox_queue.q_last)->next = mbq; 2901 hba->mbox_queue.q_last = (uint8_t *)mbq; 2902 hba->mbox_queue.q_cnt++; 2903 } else { 2904 2905 /* 2906 * add command to empty list 2907 */ 2908 hba->mbox_queue.q_first = (uint8_t *)mbq; 2909 hba->mbox_queue.q_last = (uint8_t *)mbq; 2910 hba->mbox_queue.q_cnt = 1; 2911 } 2912 2913 mbq->next = NULL; 2914 2915 mutex_exit(&EMLXS_MBOX_LOCK); 2916 } /* emlxs_mb_put() */ 2917 2918 2919 /* 2920 * NAME: emlxs_mb_get 2921 * 2922 * FUNCTION: get a mailbox command from mailbox command queue 2923 * 2924 * EXECUTION ENVIRONMENT: interrupt level. 2925 * 2926 * NOTES: 2927 * 2928 * CALLED FROM: emlxs_handle_mb_event 2929 * 2930 * INPUT: hba - pointer to the device info area 2931 * 2932 * RETURNS: NULL - no match found mb pointer - pointer to a mailbox command 2933 */ 2934 extern MAILBOXQ * 2935 emlxs_mb_get(emlxs_hba_t *hba) 2936 { 2937 MAILBOXQ *p_first = NULL; 2938 2939 mutex_enter(&EMLXS_MBOX_LOCK); 2940 2941 if (hba->mbox_queue.q_first) { 2942 p_first = (MAILBOXQ *)hba->mbox_queue.q_first; 2943 hba->mbox_queue.q_first = (uint8_t *)p_first->next; 2944 2945 if (hba->mbox_queue.q_first == NULL) { 2946 hba->mbox_queue.q_last = NULL; 2947 hba->mbox_queue.q_cnt = 0; 2948 } else { 2949 hba->mbox_queue.q_cnt--; 2950 } 2951 2952 p_first->next = NULL; 2953 } 2954 2955 mutex_exit(&EMLXS_MBOX_LOCK); 2956 2957 return (p_first); 2958 2959 } /* emlxs_mb_get() */ 2960 2961 2962 /* EMLXS_PORT_LOCK must be held when calling this */ 2963 void 2964 emlxs_mb_init(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t flag, uint32_t tmo) 2965 { 2966 MATCHMAP *mp; 2967 2968 HBASTATS.MboxIssued++; 2969 hba->mbox_queue_flag = flag; 2970 2971 /* Set the Mailbox timer */ 2972 if (hba->timer_tics) { 2973 hba->mbox_timer = hba->timer_tics + tmo; 2974 } else { 2975 hba->mbox_timer = DRV_TIME + tmo; 2976 } 2977 2978 /* Initialize mailbox */ 2979 mbq->flag &= MBQ_INIT_MASK; 2980 mbq->next = 0; 2981 2982 mutex_enter(&EMLXS_MBOX_LOCK); 2983 hba->mbox_mbq = (void *)mbq; 2984 mutex_exit(&EMLXS_MBOX_LOCK); 2985 2986 if (mbq->nonembed) { 2987 mp = (MATCHMAP *) mbq->nonembed; 2988 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size, 2989 DDI_DMA_SYNC_FORDEV); 2990 } 2991 2992 if (mbq->bp) { 2993 mp = (MATCHMAP *) mbq->bp; 2994 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size, 2995 DDI_DMA_SYNC_FORDEV); 2996 } 2997 return; 2998 2999 } /* emlxs_mb_init() */ 3000 3001 3002 extern void 3003 emlxs_mb_fini(emlxs_hba_t *hba, MAILBOX *mb, uint32_t mbxStatus) 3004 { 3005 emlxs_port_t *port = &PPORT; 3006 MATCHMAP *mbox_nonembed; 3007 MATCHMAP *mbox_bp; 3008 emlxs_buf_t *mbox_sbp; 3009 fc_unsol_buf_t *mbox_ubp; 3010 IOCBQ *mbox_iocbq; 3011 MAILBOXQ *mbox_mbq; 3012 MAILBOX *mbox; 3013 uint32_t mbox_queue_flag; 3014 3015 mutex_enter(&EMLXS_PORT_LOCK); 3016 3017 if (hba->mbox_queue_flag) { 3018 HBASTATS.MboxCompleted++; 3019 3020 if (mbxStatus != MBX_SUCCESS) { 3021 HBASTATS.MboxError++; 3022 } else { 3023 HBASTATS.MboxGood++; 3024 } 3025 } 3026 3027 mutex_enter(&EMLXS_MBOX_LOCK); 3028 mbox_queue_flag = hba->mbox_queue_flag; 3029 mbox_mbq = (MAILBOXQ *)hba->mbox_mbq; 3030 3031 if (mbox_mbq) { 3032 mbox_nonembed = (MATCHMAP *)mbox_mbq->nonembed; 3033 mbox_bp = (MATCHMAP *)mbox_mbq->bp; 3034 mbox_sbp = (emlxs_buf_t *)mbox_mbq->sbp; 3035 mbox_ubp = (fc_unsol_buf_t *)mbox_mbq->ubp; 3036 mbox_iocbq = (IOCBQ *)mbox_mbq->iocbq; 3037 } else { 3038 mbox_nonembed = NULL; 3039 mbox_bp = NULL; 3040 mbox_sbp = NULL; 3041 mbox_ubp = NULL; 3042 mbox_iocbq = NULL; 3043 } 3044 3045 hba->mbox_mbq = NULL; 3046 hba->mbox_queue_flag = 0; 3047 hba->mbox_timer = 0; 3048 mutex_exit(&EMLXS_MBOX_LOCK); 3049 3050 mutex_exit(&EMLXS_PORT_LOCK); 3051 3052 #ifdef SFCT_SUPPORT 3053 if (mb && mbox_sbp && mbox_sbp->fct_cmd) { 3054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_detail_msg, 3055 "FCT mailbox: %s: status=%x", 3056 emlxs_mb_cmd_xlate(mb->mbxCommand), 3057 mb->mbxStatus); 3058 } 3059 #endif /* SFCT_SUPPORT */ 3060 3061 if (mbox_queue_flag == MBX_NOWAIT) { 3062 /* Check for deferred MBUF cleanup */ 3063 if (mbox_bp) { 3064 emlxs_mem_put(hba, MEM_BUF, (void *)mbox_bp); 3065 } 3066 if (mbox_nonembed) { 3067 emlxs_mem_put(hba, MEM_BUF, 3068 (void *)mbox_nonembed); 3069 } 3070 if (mbox_mbq) { 3071 emlxs_mem_put(hba, MEM_MBOX, 3072 (void *)mbox_mbq); 3073 } 3074 } else { /* MBX_WAIT */ 3075 if (mbox_mbq) { 3076 if (mb) { 3077 /* Copy the local mailbox provided back into */ 3078 /* the original mailbox */ 3079 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 3080 bcopy((uint32_t *)mb, 3081 (uint32_t *)mbox_mbq, 3082 MAILBOX_CMD_SLI4_BSIZE); 3083 } else { 3084 bcopy((uint32_t *)mb, 3085 (uint32_t *)mbox_mbq, 3086 MAILBOX_CMD_BSIZE); 3087 } 3088 } 3089 3090 mbox = (MAILBOX *)mbox_mbq; 3091 mbox->mbxStatus = (uint16_t)mbxStatus; 3092 3093 /* Mark mailbox complete */ 3094 mbox_mbq->flag |= MBQ_COMPLETED; 3095 } 3096 3097 /* Wake up the sleeping thread */ 3098 if (mbox_queue_flag == MBX_SLEEP) { 3099 mutex_enter(&EMLXS_MBOX_LOCK); 3100 cv_broadcast(&EMLXS_MBOX_CV); 3101 mutex_exit(&EMLXS_MBOX_LOCK); 3102 } 3103 } 3104 3105 emlxs_mb_deferred_cmpl(port, mbxStatus, mbox_sbp, mbox_ubp, mbox_iocbq); 3106 3107 return; 3108 3109 } /* emlxs_mb_fini() */ 3110 3111 3112 extern void 3113 emlxs_mb_deferred_cmpl(emlxs_port_t *port, uint32_t mbxStatus, emlxs_buf_t *sbp, 3114 fc_unsol_buf_t *ubp, IOCBQ *iocbq) 3115 { 3116 emlxs_hba_t *hba = HBA; 3117 emlxs_ub_priv_t *ub_priv; 3118 3119 #ifdef SFCT_SUPPORT 3120 if (sbp && sbp->fct_cmd && (sbp->fct_state == EMLXS_FCT_REG_PENDING)) { 3121 mutex_enter(&EMLXS_PKT_LOCK); 3122 sbp->fct_flags |= EMLXS_FCT_REGISTERED; 3123 cv_broadcast(&EMLXS_PKT_CV); 3124 mutex_exit(&EMLXS_PKT_LOCK); 3125 3126 sbp = NULL; 3127 } 3128 #endif /* SFCT_SUPPORT */ 3129 3130 /* Check for deferred pkt completion */ 3131 if (sbp) { 3132 if (mbxStatus != MBX_SUCCESS) { 3133 /* Set error status */ 3134 sbp->pkt_flags &= ~PACKET_STATE_VALID; 3135 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT, 3136 IOERR_NO_RESOURCES, 1); 3137 } 3138 3139 emlxs_pkt_complete(sbp, -1, 0, 1); 3140 } 3141 3142 /* Check for deferred ub completion */ 3143 if (ubp) { 3144 ub_priv = ubp->ub_fca_private; 3145 3146 if (mbxStatus == MBX_SUCCESS) { 3147 emlxs_ub_callback(ub_priv->port, ubp); 3148 } else { 3149 (void) emlxs_fca_ub_release(ub_priv->port, 1, 3150 &ubp->ub_token); 3151 } 3152 } 3153 3154 /* Special handling for restricted login */ 3155 if (iocbq == (IOCBQ *)1) { 3156 iocbq = NULL; 3157 } 3158 3159 /* Check for deferred iocb tx */ 3160 if (iocbq) { 3161 /* Check for driver special codes */ 3162 /* These indicate the mailbox is being flushed */ 3163 if (mbxStatus >= MBX_DRIVER_RESERVED) { 3164 /* Set the error status and return it */ 3165 iocbq->iocb.ULPSTATUS = IOSTAT_LOCAL_REJECT; 3166 iocbq->iocb.un.grsp.perr.statLocalError = 3167 IOERR_ABORT_REQUESTED; 3168 3169 emlxs_proc_channel_event(hba, iocbq->channel, 3170 iocbq); 3171 } else { 3172 EMLXS_SLI_ISSUE_IOCB_CMD(hba, iocbq->channel, 3173 iocbq); 3174 } 3175 } 3176 3177 return; 3178 3179 } /* emlxs_mb_deferred_cmpl() */ 3180 3181 3182 extern void 3183 emlxs_mb_flush(emlxs_hba_t *hba) 3184 { 3185 MAILBOXQ *mbq; 3186 uint32_t mbxStatus; 3187 3188 mbxStatus = (hba->flag & FC_HARDWARE_ERROR) ? 3189 MBX_HARDWARE_ERROR : MBX_NOT_FINISHED; 3190 3191 /* Flush out the active mbox command */ 3192 emlxs_mb_fini(hba, NULL, mbxStatus); 3193 3194 /* Flush out the queued mbox commands */ 3195 while (mbq = (MAILBOXQ *)emlxs_mb_get(hba)) { 3196 mutex_enter(&EMLXS_MBOX_LOCK); 3197 hba->mbox_queue_flag = MBX_NOWAIT; 3198 hba->mbox_mbq = (void *)mbq; 3199 mutex_exit(&EMLXS_MBOX_LOCK); 3200 3201 emlxs_mb_fini(hba, NULL, mbxStatus); 3202 } 3203 3204 return; 3205 3206 } /* emlxs_mb_flush */ 3207 3208 3209 extern char * 3210 emlxs_mb_cmd_xlate(uint8_t cmd) 3211 { 3212 static char buffer[32]; 3213 uint32_t i; 3214 uint32_t count; 3215 3216 count = sizeof (emlxs_mb_cmd_table) / sizeof (emlxs_table_t); 3217 for (i = 0; i < count; i++) { 3218 if (cmd == emlxs_mb_cmd_table[i].code) { 3219 return (emlxs_mb_cmd_table[i].string); 3220 } 3221 } 3222 3223 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", cmd); 3224 return (buffer); 3225 3226 } /* emlxs_mb_cmd_xlate() */ 3227 3228 extern char * 3229 emlxs_request_feature_xlate(uint32_t mask) 3230 { 3231 static char buffer[64]; 3232 uint32_t i; 3233 3234 bzero((char *)&buffer[0], 64); 3235 for (i = 0; i < 12; i++) { 3236 if (mask & (1<<i)) { 3237 (void) strlcat(buffer, 3238 emlxs_request_feature_table[i].string, 3239 sizeof (buffer)); 3240 } 3241 } 3242 return (buffer); 3243 } 3244