1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Emulex. All rights reserved. 24 * Use is subject to License terms. 25 */ 26 27 #define DEF_ICFG 1 28 29 #include <emlxs.h> 30 #include <emlxs_version.h> 31 32 char emlxs_revision[] = EMLXS_REVISION; 33 char emlxs_version[] = EMLXS_VERSION; 34 char emlxs_name[] = EMLXS_NAME; 35 char emlxs_label[] = EMLXS_LABEL; 36 37 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */ 38 EMLXS_MSG_DEF(EMLXS_SOLARIS_C); 39 40 #ifdef MENLO_SUPPORT 41 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp); 42 #endif /* MENLO_SUPPORT */ 43 44 static void emlxs_fca_attach(emlxs_hba_t *hba); 45 static void emlxs_fca_detach(emlxs_hba_t *hba); 46 static void emlxs_drv_banner(emlxs_hba_t *hba); 47 48 static int32_t emlxs_get_props(emlxs_hba_t *hba); 49 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp); 50 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp); 51 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp); 52 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp); 53 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp); 54 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 55 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp); 56 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 57 static uint32_t emlxs_add_instance(int32_t ddiinst); 58 static void emlxs_iodone(emlxs_buf_t *sbp); 59 static int emlxs_pm_lower_power(dev_info_t *dip); 60 static int emlxs_pm_raise_power(dev_info_t *dip); 61 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, 62 uint32_t failed); 63 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3); 64 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba); 65 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code, 66 uint32_t args, uint32_t *arg); 67 68 #ifdef SLI3_SUPPORT 69 static void emlxs_read_vport_prop(emlxs_hba_t *hba); 70 #endif /* SLI3_SUPPORT */ 71 72 73 /* 74 * Driver Entry Routines. 75 */ 76 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t); 77 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t); 78 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *); 79 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *); 80 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t, 81 cred_t *, int32_t *); 82 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 83 84 85 /* 86 * FC_AL Transport Functions. 87 */ 88 static opaque_t emlxs_bind_port(dev_info_t *, fc_fca_port_info_t *, 89 fc_fca_bind_info_t *); 90 static void emlxs_unbind_port(opaque_t); 91 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *); 92 static int32_t emlxs_get_cap(opaque_t, char *, void *); 93 static int32_t emlxs_set_cap(opaque_t, char *, void *); 94 static int32_t emlxs_get_map(opaque_t, fc_lilpmap_t *); 95 static int32_t emlxs_ub_alloc(opaque_t, uint64_t *, uint32_t, 96 uint32_t *, uint32_t); 97 static int32_t emlxs_ub_free(opaque_t, uint32_t, uint64_t *); 98 99 static opaque_t emlxs_get_device(opaque_t, fc_portid_t); 100 static int32_t emlxs_notify(opaque_t, uint32_t); 101 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *); 102 103 /* 104 * Driver Internal Functions. 105 */ 106 107 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *); 108 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t); 109 #ifdef EMLXS_I386 110 #ifdef S11 111 static int32_t emlxs_quiesce(dev_info_t *); 112 #endif 113 #endif 114 static int32_t emlxs_hba_resume(dev_info_t *); 115 static int32_t emlxs_hba_suspend(dev_info_t *); 116 static int32_t emlxs_hba_detach(dev_info_t *); 117 static int32_t emlxs_hba_attach(dev_info_t *); 118 static void emlxs_lock_destroy(emlxs_hba_t *); 119 static void emlxs_lock_init(emlxs_hba_t *); 120 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *, fc_packet_t *, 121 uint32_t, uint8_t); 122 123 char *emlxs_pm_components[] = { 124 "NAME=emlxx000", 125 "0=Device D3 State", 126 "1=Device D0 State" 127 }; 128 129 130 /* 131 * Default emlx dma limits 132 */ 133 ddi_dma_lim_t emlxs_dma_lim = { 134 (uint32_t)0, /* dlim_addr_lo */ 135 (uint32_t)0xffffffff, /* dlim_addr_hi */ 136 (uint_t)0x00ffffff, /* dlim_cntr_max */ 137 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */ 138 1, /* dlim_minxfer */ 139 0x00ffffff /* dlim_dmaspeed */ 140 }; 141 142 /* 143 * Be careful when using these attributes; the defaults listed below are 144 * (almost) the most general case, permitting allocation in almost any 145 * way supported by the LightPulse family. The sole exception is the 146 * alignment specified as requiring memory allocation on a 4-byte boundary; 147 * the Lightpulse can DMA memory on any byte boundary. 148 * 149 * The LightPulse family currently is limited to 16M transfers; 150 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields. 151 */ 152 ddi_dma_attr_t emlxs_dma_attr = { 153 DMA_ATTR_V0, /* dma_attr_version */ 154 (uint64_t)0, /* dma_attr_addr_lo */ 155 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 156 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 157 1, /* dma_attr_align */ 158 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 159 1, /* dma_attr_minxfer */ 160 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 161 (uint64_t)0xffffffff, /* dma_attr_seg */ 162 EMLXS_SGLLEN, /* dma_attr_sgllen */ 163 1, /* dma_attr_granular */ 164 0 /* dma_attr_flags */ 165 }; 166 167 ddi_dma_attr_t emlxs_dma_attr_ro = { 168 DMA_ATTR_V0, /* dma_attr_version */ 169 (uint64_t)0, /* dma_attr_addr_lo */ 170 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 171 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 172 1, /* dma_attr_align */ 173 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 174 1, /* dma_attr_minxfer */ 175 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 176 (uint64_t)0xffffffff, /* dma_attr_seg */ 177 EMLXS_SGLLEN, /* dma_attr_sgllen */ 178 1, /* dma_attr_granular */ 179 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */ 180 }; 181 182 ddi_dma_attr_t emlxs_dma_attr_1sg = { 183 DMA_ATTR_V0, /* dma_attr_version */ 184 (uint64_t)0, /* dma_attr_addr_lo */ 185 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 186 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 187 1, /* dma_attr_align */ 188 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 189 1, /* dma_attr_minxfer */ 190 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 191 (uint64_t)0xffffffff, /* dma_attr_seg */ 192 1, /* dma_attr_sgllen */ 193 1, /* dma_attr_granular */ 194 0 /* dma_attr_flags */ 195 }; 196 197 #if (EMLXS_MODREV >= EMLXS_MODREV3) 198 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = { 199 DMA_ATTR_V0, /* dma_attr_version */ 200 (uint64_t)0, /* dma_attr_addr_lo */ 201 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 202 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 203 1, /* dma_attr_align */ 204 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 205 1, /* dma_attr_minxfer */ 206 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 207 (uint64_t)0xffffffff, /* dma_attr_seg */ 208 EMLXS_SGLLEN, /* dma_attr_sgllen */ 209 1, /* dma_attr_granular */ 210 0 /* dma_attr_flags */ 211 }; 212 #endif /* >= EMLXS_MODREV3 */ 213 214 /* 215 * DDI access attributes for device 216 */ 217 ddi_device_acc_attr_t emlxs_dev_acc_attr = { 218 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 219 DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */ 220 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 221 DDI_DEFAULT_ACC /* devacc_attr_access */ 222 }; 223 224 /* 225 * DDI access attributes for data 226 */ 227 ddi_device_acc_attr_t emlxs_data_acc_attr = { 228 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 229 DDI_NEVERSWAP_ACC, /* don't swap for Data */ 230 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 231 DDI_DEFAULT_ACC /* devacc_attr_access */ 232 }; 233 234 /* 235 * Fill in the FC Transport structure, 236 * as defined in the Fibre Channel Transport Programmming Guide. 237 */ 238 #if (EMLXS_MODREV == EMLXS_MODREV5) 239 static fc_fca_tran_t emlxs_fca_tran = { 240 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */ 241 MAX_VPORTS, /* fca numerb of ports */ 242 sizeof (emlxs_buf_t), /* fca pkt size */ 243 2048, /* fca cmd max */ 244 &emlxs_dma_lim, /* fca dma limits */ 245 0, /* fca iblock, to be filled in later */ 246 &emlxs_dma_attr, /* fca dma attributes */ 247 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 248 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 249 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 250 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 251 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 252 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 253 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 254 &emlxs_data_acc_attr, /* fca access atributes */ 255 0, /* fca_num_npivports */ 256 {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */ 257 emlxs_bind_port, 258 emlxs_unbind_port, 259 emlxs_pkt_init, 260 emlxs_pkt_uninit, 261 emlxs_transport, 262 emlxs_get_cap, 263 emlxs_set_cap, 264 emlxs_get_map, 265 emlxs_transport, 266 emlxs_ub_alloc, 267 emlxs_ub_free, 268 emlxs_ub_release, 269 emlxs_pkt_abort, 270 emlxs_reset, 271 emlxs_port_manage, 272 emlxs_get_device, 273 emlxs_notify 274 }; 275 #endif /* EMLXS_MODREV5 */ 276 277 278 #if (EMLXS_MODREV == EMLXS_MODREV4) 279 static fc_fca_tran_t emlxs_fca_tran = { 280 FCTL_FCA_MODREV_4, /* fca_version */ 281 MAX_VPORTS, /* fca numerb of ports */ 282 sizeof (emlxs_buf_t), /* fca pkt size */ 283 2048, /* fca cmd max */ 284 &emlxs_dma_lim, /* fca dma limits */ 285 0, /* fca iblock, to be filled in later */ 286 &emlxs_dma_attr, /* fca dma attributes */ 287 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 288 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 289 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 290 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 291 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 292 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 293 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 294 &emlxs_data_acc_attr, /* fca access atributes */ 295 emlxs_bind_port, 296 emlxs_unbind_port, 297 emlxs_pkt_init, 298 emlxs_pkt_uninit, 299 emlxs_transport, 300 emlxs_get_cap, 301 emlxs_set_cap, 302 emlxs_get_map, 303 emlxs_transport, 304 emlxs_ub_alloc, 305 emlxs_ub_free, 306 emlxs_ub_release, 307 emlxs_pkt_abort, 308 emlxs_reset, 309 emlxs_port_manage, 310 emlxs_get_device, 311 emlxs_notify 312 }; 313 #endif /* EMLXS_MODEREV4 */ 314 315 316 #if (EMLXS_MODREV == EMLXS_MODREV3) 317 static fc_fca_tran_t emlxs_fca_tran = { 318 FCTL_FCA_MODREV_3, /* fca_version */ 319 MAX_VPORTS, /* fca numerb of ports */ 320 sizeof (emlxs_buf_t), /* fca pkt size */ 321 2048, /* fca cmd max */ 322 &emlxs_dma_lim, /* fca dma limits */ 323 0, /* fca iblock, to be filled in later */ 324 &emlxs_dma_attr, /* fca dma attributes */ 325 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 326 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 327 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 328 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 329 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 330 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 331 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 332 &emlxs_data_acc_attr, /* fca access atributes */ 333 emlxs_bind_port, 334 emlxs_unbind_port, 335 emlxs_pkt_init, 336 emlxs_pkt_uninit, 337 emlxs_transport, 338 emlxs_get_cap, 339 emlxs_set_cap, 340 emlxs_get_map, 341 emlxs_transport, 342 emlxs_ub_alloc, 343 emlxs_ub_free, 344 emlxs_ub_release, 345 emlxs_pkt_abort, 346 emlxs_reset, 347 emlxs_port_manage, 348 emlxs_get_device, 349 emlxs_notify 350 }; 351 #endif /* EMLXS_MODREV3 */ 352 353 354 #if (EMLXS_MODREV == EMLXS_MODREV2) 355 static fc_fca_tran_t emlxs_fca_tran = { 356 FCTL_FCA_MODREV_2, /* fca_version */ 357 MAX_VPORTS, /* number of ports */ 358 sizeof (emlxs_buf_t), /* pkt size */ 359 2048, /* max cmds */ 360 &emlxs_dma_lim, /* DMA limits */ 361 0, /* iblock, to be filled in later */ 362 &emlxs_dma_attr, /* dma attributes */ 363 &emlxs_data_acc_attr, /* access atributes */ 364 emlxs_bind_port, 365 emlxs_unbind_port, 366 emlxs_pkt_init, 367 emlxs_pkt_uninit, 368 emlxs_transport, 369 emlxs_get_cap, 370 emlxs_set_cap, 371 emlxs_get_map, 372 emlxs_transport, 373 emlxs_ub_alloc, 374 emlxs_ub_free, 375 emlxs_ub_release, 376 emlxs_pkt_abort, 377 emlxs_reset, 378 emlxs_port_manage, 379 emlxs_get_device, 380 emlxs_notify 381 }; 382 #endif /* EMLXS_MODREV2 */ 383 384 /* 385 * This is needed when the module gets loaded by the kernel 386 * so ddi library calls get resolved. 387 */ 388 #ifndef MODSYM_SUPPORT 389 char _depends_on[] = "misc/fctl"; 390 #endif /* MODSYM_SUPPORT */ 391 392 /* 393 * state pointer which the implementation uses as a place to 394 * hang a set of per-driver structures; 395 * 396 */ 397 void *emlxs_soft_state = NULL; 398 399 /* 400 * Driver Global variables. 401 */ 402 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */ 403 404 emlxs_device_t emlxs_device; 405 406 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */ 407 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */ 408 409 410 /* 411 * Single private "global" lock used to gain access to 412 * the hba_list and/or any other case where we want need to be 413 * single-threaded. 414 */ 415 uint32_t emlxs_diag_state; 416 417 /* 418 * CB ops vector. Used for administration only. 419 */ 420 static struct cb_ops emlxs_cb_ops = { 421 emlxs_open, /* cb_open */ 422 emlxs_close, /* cb_close */ 423 nodev, /* cb_strategy */ 424 nodev, /* cb_print */ 425 nodev, /* cb_dump */ 426 nodev, /* cb_read */ 427 nodev, /* cb_write */ 428 emlxs_ioctl, /* cb_ioctl */ 429 nodev, /* cb_devmap */ 430 nodev, /* cb_mmap */ 431 nodev, /* cb_segmap */ 432 nochpoll, /* cb_chpoll */ 433 ddi_prop_op, /* cb_prop_op */ 434 0, /* cb_stream */ 435 #ifdef _LP64 436 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 437 #else 438 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 439 #endif 440 CB_REV, /* rev */ 441 nodev, /* cb_aread */ 442 nodev /* cb_awrite */ 443 }; 444 445 static struct dev_ops emlxs_ops = { 446 DEVO_REV, /* rev */ 447 0, /* refcnt */ 448 emlxs_info, /* getinfo */ 449 nulldev, /* identify */ 450 nulldev, /* probe */ 451 emlxs_attach, /* attach */ 452 emlxs_detach, /* detach */ 453 nodev, /* reset */ 454 &emlxs_cb_ops, /* devo_cb_ops */ 455 NULL, /* devo_bus_ops */ 456 emlxs_power, /* power ops */ 457 #ifdef EMLXS_I386 458 #ifdef S11 459 emlxs_quiesce, /* quiesce */ 460 #endif 461 #endif 462 }; 463 464 #include <sys/modctl.h> 465 extern struct mod_ops mod_driverops; 466 467 #ifdef SAN_DIAG_SUPPORT 468 extern kmutex_t sd_bucket_mutex; 469 extern sd_bucket_info_t sd_bucket; 470 #endif /* SAN_DIAG_SUPPORT */ 471 472 /* 473 * Module linkage information for the kernel. 474 */ 475 static struct modldrv emlxs_modldrv = { 476 &mod_driverops, /* module type - driver */ 477 emlxs_name, /* module name */ 478 &emlxs_ops, /* driver ops */ 479 }; 480 481 482 /* 483 * Driver module linkage structure 484 */ 485 static struct modlinkage emlxs_modlinkage = { 486 MODREV_1, /* ml_rev - must be MODREV_1 */ 487 &emlxs_modldrv, /* ml_linkage */ 488 NULL /* end of driver linkage */ 489 }; 490 491 492 /* We only need to add entries for non-default return codes. */ 493 /* Entries do not need to be in order. */ 494 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 495 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */ 496 497 emlxs_xlat_err_t emlxs_iostat_tbl[] = { 498 /* {f/w code, pkt_state, pkt_reason, */ 499 /* pkt_expln, pkt_action} */ 500 501 /* 0x00 - Do not remove */ 502 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE, 503 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 504 505 /* 0x01 - Do not remove */ 506 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE, 507 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 508 509 /* 0x02 */ 510 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS, 511 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE}, 512 513 /* 514 * This is a default entry. 515 * The real codes are written dynamically in emlxs_els.c 516 */ 517 /* 0x09 */ 518 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE, 519 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 520 521 /* Special error code */ 522 /* 0x10 */ 523 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 524 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 525 526 /* Special error code */ 527 /* 0x11 */ 528 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, 529 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 530 531 /* CLASS 2 only */ 532 /* 0x04 */ 533 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR, 534 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 535 536 /* CLASS 2 only */ 537 /* 0x05 */ 538 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR, 539 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 540 541 /* CLASS 2 only */ 542 /* 0x06 */ 543 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY, 544 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 545 546 /* CLASS 2 only */ 547 /* 0x07 */ 548 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY, 549 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 550 }; 551 552 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t)) 553 554 555 /* We only need to add entries for non-default return codes. */ 556 /* Entries do not need to be in order. */ 557 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 558 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */ 559 560 emlxs_xlat_err_t emlxs_ioerr_tbl[] = { 561 /* {f/w code, pkt_state, pkt_reason, */ 562 /* pkt_expln, pkt_action} */ 563 564 /* 0x01 */ 565 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 566 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 567 568 /* 0x02 */ 569 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 570 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 571 572 /* 0x04 */ 573 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 574 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 575 576 /* 0x05 */ 577 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 578 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 579 580 /* 0x06 */ 581 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 582 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 583 584 /* 0x07 */ 585 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 586 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 587 588 /* 0x08 */ 589 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 590 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 591 592 /* 0x0B */ 593 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 594 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 595 596 /* 0x0D */ 597 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 598 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 599 600 /* 0x0E */ 601 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 602 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 603 604 /* 0x0F */ 605 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME, 606 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 607 608 /* 0x11 */ 609 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 610 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 611 612 /* 0x13 */ 613 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH, 614 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 615 616 /* 0x14 */ 617 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED, 618 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 619 620 /* 0x15 */ 621 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 622 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 623 624 /* 0x16 */ 625 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 626 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 627 628 /* 0x17 */ 629 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT, 630 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 631 632 /* 0x18 */ 633 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL, 634 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 635 636 /* 0x1A */ 637 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 638 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 639 640 /* 0x21 */ 641 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID, 642 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 643 644 /* Occurs at link down */ 645 /* 0x28 */ 646 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 647 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 648 649 /* 0xF0 */ 650 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 651 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 652 }; 653 654 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t)) 655 656 657 658 emlxs_table_t emlxs_error_table[] = { 659 {IOERR_SUCCESS, "No error."}, 660 {IOERR_MISSING_CONTINUE, "Missing continue."}, 661 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."}, 662 {IOERR_INTERNAL_ERROR, "Internal error."}, 663 {IOERR_INVALID_RPI, "Invalid RPI."}, 664 {IOERR_NO_XRI, "No XRI."}, 665 {IOERR_ILLEGAL_COMMAND, "Illegal command."}, 666 {IOERR_XCHG_DROPPED, "Exchange dropped."}, 667 {IOERR_ILLEGAL_FIELD, "Illegal field."}, 668 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."}, 669 {IOERR_TX_DMA_FAILED, "TX DMA failed."}, 670 {IOERR_RX_DMA_FAILED, "RX DMA failed."}, 671 {IOERR_ILLEGAL_FRAME, "Illegal frame."}, 672 {IOERR_NO_RESOURCES, "No resources."}, 673 {IOERR_ILLEGAL_LENGTH, "Illegal length."}, 674 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."}, 675 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."}, 676 {IOERR_ABORT_REQUESTED, "Abort requested."}, 677 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."}, 678 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."}, 679 {IOERR_RING_RESET, "Ring reset."}, 680 {IOERR_LINK_DOWN, "Link down."}, 681 {IOERR_CORRUPTED_DATA, "Corrupted data."}, 682 {IOERR_CORRUPTED_RPI, "Corrupted RPI."}, 683 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."}, 684 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."}, 685 {IOERR_DUP_FRAME, "Duplicate frame."}, 686 {IOERR_LINK_CONTROL_FRAME, "Link control frame."}, 687 {IOERR_BAD_HOST_ADDRESS, "Bad host address."}, 688 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."}, 689 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."}, 690 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."}, 691 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."}, 692 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."}, 693 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"}, 694 {IOERR_XRIBUF_MISSING, "XRI buffer missing"}, 695 {IOERR_ROFFSET_INVAL, "Relative offset invalid."}, 696 {IOERR_ROFFSET_MISSING, "Relative offset missing."}, 697 {IOERR_INSUF_BUFFER, "Buffer too small."}, 698 {IOERR_MISSING_SI, "ELS frame missing SI"}, 699 {IOERR_MISSING_ES, "Exhausted burst without ES"}, 700 {IOERR_INCOMP_XFER, "Transfer incomplete."}, 701 {IOERR_ABORT_TIMEOUT, "Abort timeout."} 702 703 }; /* emlxs_error_table */ 704 705 706 emlxs_table_t emlxs_state_table[] = { 707 {IOSTAT_SUCCESS, "Success."}, 708 {IOSTAT_FCP_RSP_ERROR, "FCP response error."}, 709 {IOSTAT_REMOTE_STOP, "Remote stop."}, 710 {IOSTAT_LOCAL_REJECT, "Local reject."}, 711 {IOSTAT_NPORT_RJT, "NPort reject."}, 712 {IOSTAT_FABRIC_RJT, "Fabric reject."}, 713 {IOSTAT_NPORT_BSY, "Nport busy."}, 714 {IOSTAT_FABRIC_BSY, "Fabric busy."}, 715 {IOSTAT_INTERMED_RSP, "Intermediate response."}, 716 {IOSTAT_LS_RJT, "LS reject."}, 717 {IOSTAT_CMD_REJECT, "Cmd reject."}, 718 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."}, 719 {IOSTAT_NEED_BUF_ENTRY, "Need buffer entry."}, 720 {IOSTAT_ILLEGAL_FRAME_RCVD, "Illegal frame."}, 721 {IOSTAT_DATA_UNDERRUN, "Data underrun."}, 722 {IOSTAT_DATA_OVERRUN, "Data overrun."}, 723 724 }; /* emlxs_state_table */ 725 726 727 #ifdef MENLO_SUPPORT 728 emlxs_table_t emlxs_menlo_cmd_table[] = { 729 {MENLO_CMD_INITIALIZE, "MENLO_INIT"}, 730 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"}, 731 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"}, 732 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"}, 733 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"}, 734 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"}, 735 736 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"}, 737 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"}, 738 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"}, 739 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"}, 740 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"}, 741 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"}, 742 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"}, 743 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"}, 744 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"}, 745 746 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"}, 747 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"}, 748 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"}, 749 750 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"}, 751 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"}, 752 753 {MENLO_CMD_RESET, "MENLO_RESET"}, 754 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"} 755 756 }; /* emlxs_menlo_cmd_table */ 757 758 emlxs_table_t emlxs_menlo_rsp_table[] = { 759 {MENLO_RSP_SUCCESS, "SUCCESS"}, 760 {MENLO_ERR_FAILED, "FAILED"}, 761 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"}, 762 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"}, 763 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"}, 764 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"}, 765 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"}, 766 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"}, 767 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"}, 768 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"}, 769 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"}, 770 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"}, 771 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"}, 772 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"}, 773 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"}, 774 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"}, 775 {MENLO_ERR_BUSY, "BUSY"}, 776 777 }; /* emlxs_menlo_rsp_table */ 778 779 #endif /* MENLO_SUPPORT */ 780 781 782 emlxs_table_t emlxs_mscmd_table[] = { 783 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 784 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 785 {MS_GTIN, "MS_GTIN"}, 786 {MS_GIEL, "MS_GIEL"}, 787 {MS_GIET, "MS_GIET"}, 788 {MS_GDID, "MS_GDID"}, 789 {MS_GMID, "MS_GMID"}, 790 {MS_GFN, "MS_GFN"}, 791 {MS_GIELN, "MS_GIELN"}, 792 {MS_GMAL, "MS_GMAL"}, 793 {MS_GIEIL, "MS_GIEIL"}, 794 {MS_GPL, "MS_GPL"}, 795 {MS_GPT, "MS_GPT"}, 796 {MS_GPPN, "MS_GPPN"}, 797 {MS_GAPNL, "MS_GAPNL"}, 798 {MS_GPS, "MS_GPS"}, 799 {MS_GPSC, "MS_GPSC"}, 800 {MS_GATIN, "MS_GATIN"}, 801 {MS_GSES, "MS_GSES"}, 802 {MS_GPLNL, "MS_GPLNL"}, 803 {MS_GPLT, "MS_GPLT"}, 804 {MS_GPLML, "MS_GPLML"}, 805 {MS_GPAB, "MS_GPAB"}, 806 {MS_GNPL, "MS_GNPL"}, 807 {MS_GPNL, "MS_GPNL"}, 808 {MS_GPFCP, "MS_GPFCP"}, 809 {MS_GPLI, "MS_GPLI"}, 810 {MS_GNID, "MS_GNID"}, 811 {MS_RIELN, "MS_RIELN"}, 812 {MS_RPL, "MS_RPL"}, 813 {MS_RPLN, "MS_RPLN"}, 814 {MS_RPLT, "MS_RPLT"}, 815 {MS_RPLM, "MS_RPLM"}, 816 {MS_RPAB, "MS_RPAB"}, 817 {MS_RPFCP, "MS_RPFCP"}, 818 {MS_RPLI, "MS_RPLI"}, 819 {MS_DPL, "MS_DPL"}, 820 {MS_DPLN, "MS_DPLN"}, 821 {MS_DPLM, "MS_DPLM"}, 822 {MS_DPLML, "MS_DPLML"}, 823 {MS_DPLI, "MS_DPLI"}, 824 {MS_DPAB, "MS_DPAB"}, 825 {MS_DPALL, "MS_DPALL"} 826 827 }; /* emlxs_mscmd_table */ 828 829 830 emlxs_table_t emlxs_ctcmd_table[] = { 831 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 832 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 833 {SLI_CTNS_GA_NXT, "GA_NXT"}, 834 {SLI_CTNS_GPN_ID, "GPN_ID"}, 835 {SLI_CTNS_GNN_ID, "GNN_ID"}, 836 {SLI_CTNS_GCS_ID, "GCS_ID"}, 837 {SLI_CTNS_GFT_ID, "GFT_ID"}, 838 {SLI_CTNS_GSPN_ID, "GSPN_ID"}, 839 {SLI_CTNS_GPT_ID, "GPT_ID"}, 840 {SLI_CTNS_GID_PN, "GID_PN"}, 841 {SLI_CTNS_GID_NN, "GID_NN"}, 842 {SLI_CTNS_GIP_NN, "GIP_NN"}, 843 {SLI_CTNS_GIPA_NN, "GIPA_NN"}, 844 {SLI_CTNS_GSNN_NN, "GSNN_NN"}, 845 {SLI_CTNS_GNN_IP, "GNN_IP"}, 846 {SLI_CTNS_GIPA_IP, "GIPA_IP"}, 847 {SLI_CTNS_GID_FT, "GID_FT"}, 848 {SLI_CTNS_GID_PT, "GID_PT"}, 849 {SLI_CTNS_RPN_ID, "RPN_ID"}, 850 {SLI_CTNS_RNN_ID, "RNN_ID"}, 851 {SLI_CTNS_RCS_ID, "RCS_ID"}, 852 {SLI_CTNS_RFT_ID, "RFT_ID"}, 853 {SLI_CTNS_RSPN_ID, "RSPN_ID"}, 854 {SLI_CTNS_RPT_ID, "RPT_ID"}, 855 {SLI_CTNS_RIP_NN, "RIP_NN"}, 856 {SLI_CTNS_RIPA_NN, "RIPA_NN"}, 857 {SLI_CTNS_RSNN_NN, "RSNN_NN"}, 858 {SLI_CTNS_DA_ID, "DA_ID"}, 859 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 860 861 }; /* emlxs_ctcmd_table */ 862 863 864 865 emlxs_table_t emlxs_rmcmd_table[] = { 866 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 867 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 868 {CT_OP_GSAT, "RM_GSAT"}, 869 {CT_OP_GHAT, "RM_GHAT"}, 870 {CT_OP_GPAT, "RM_GPAT"}, 871 {CT_OP_GDAT, "RM_GDAT"}, 872 {CT_OP_GPST, "RM_GPST"}, 873 {CT_OP_GDP, "RM_GDP"}, 874 {CT_OP_GDPG, "RM_GDPG"}, 875 {CT_OP_GEPS, "RM_GEPS"}, 876 {CT_OP_GLAT, "RM_GLAT"}, 877 {CT_OP_SSAT, "RM_SSAT"}, 878 {CT_OP_SHAT, "RM_SHAT"}, 879 {CT_OP_SPAT, "RM_SPAT"}, 880 {CT_OP_SDAT, "RM_SDAT"}, 881 {CT_OP_SDP, "RM_SDP"}, 882 {CT_OP_SBBS, "RM_SBBS"}, 883 {CT_OP_RPST, "RM_RPST"}, 884 {CT_OP_VFW, "RM_VFW"}, 885 {CT_OP_DFW, "RM_DFW"}, 886 {CT_OP_RES, "RM_RES"}, 887 {CT_OP_RHD, "RM_RHD"}, 888 {CT_OP_UFW, "RM_UFW"}, 889 {CT_OP_RDP, "RM_RDP"}, 890 {CT_OP_GHDR, "RM_GHDR"}, 891 {CT_OP_CHD, "RM_CHD"}, 892 {CT_OP_SSR, "RM_SSR"}, 893 {CT_OP_RSAT, "RM_RSAT"}, 894 {CT_OP_WSAT, "RM_WSAT"}, 895 {CT_OP_RSAH, "RM_RSAH"}, 896 {CT_OP_WSAH, "RM_WSAH"}, 897 {CT_OP_RACT, "RM_RACT"}, 898 {CT_OP_WACT, "RM_WACT"}, 899 {CT_OP_RKT, "RM_RKT"}, 900 {CT_OP_WKT, "RM_WKT"}, 901 {CT_OP_SSC, "RM_SSC"}, 902 {CT_OP_QHBA, "RM_QHBA"}, 903 {CT_OP_GST, "RM_GST"}, 904 {CT_OP_GFTM, "RM_GFTM"}, 905 {CT_OP_SRL, "RM_SRL"}, 906 {CT_OP_SI, "RM_SI"}, 907 {CT_OP_SRC, "RM_SRC"}, 908 {CT_OP_GPB, "RM_GPB"}, 909 {CT_OP_SPB, "RM_SPB"}, 910 {CT_OP_RPB, "RM_RPB"}, 911 {CT_OP_RAPB, "RM_RAPB"}, 912 {CT_OP_GBC, "RM_GBC"}, 913 {CT_OP_GBS, "RM_GBS"}, 914 {CT_OP_SBS, "RM_SBS"}, 915 {CT_OP_GANI, "RM_GANI"}, 916 {CT_OP_GRV, "RM_GRV"}, 917 {CT_OP_GAPBS, "RM_GAPBS"}, 918 {CT_OP_APBC, "RM_APBC"}, 919 {CT_OP_GDT, "RM_GDT"}, 920 {CT_OP_GDLMI, "RM_GDLMI"}, 921 {CT_OP_GANA, "RM_GANA"}, 922 {CT_OP_GDLV, "RM_GDLV"}, 923 {CT_OP_GWUP, "RM_GWUP"}, 924 {CT_OP_GLM, "RM_GLM"}, 925 {CT_OP_GABS, "RM_GABS"}, 926 {CT_OP_SABS, "RM_SABS"}, 927 {CT_OP_RPR, "RM_RPR"}, 928 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 929 930 }; /* emlxs_rmcmd_table */ 931 932 933 emlxs_table_t emlxs_elscmd_table[] = { 934 {ELS_CMD_ACC, "ACC"}, 935 {ELS_CMD_LS_RJT, "LS_RJT"}, 936 {ELS_CMD_PLOGI, "PLOGI"}, 937 {ELS_CMD_FLOGI, "FLOGI"}, 938 {ELS_CMD_LOGO, "LOGO"}, 939 {ELS_CMD_ABTX, "ABTX"}, 940 {ELS_CMD_RCS, "RCS"}, 941 {ELS_CMD_RES, "RES"}, 942 {ELS_CMD_RSS, "RSS"}, 943 {ELS_CMD_RSI, "RSI"}, 944 {ELS_CMD_ESTS, "ESTS"}, 945 {ELS_CMD_ESTC, "ESTC"}, 946 {ELS_CMD_ADVC, "ADVC"}, 947 {ELS_CMD_RTV, "RTV"}, 948 {ELS_CMD_RLS, "RLS"}, 949 {ELS_CMD_ECHO, "ECHO"}, 950 {ELS_CMD_TEST, "TEST"}, 951 {ELS_CMD_RRQ, "RRQ"}, 952 {ELS_CMD_PRLI, "PRLI"}, 953 {ELS_CMD_PRLO, "PRLO"}, 954 {ELS_CMD_SCN, "SCN"}, 955 {ELS_CMD_TPLS, "TPLS"}, 956 {ELS_CMD_GPRLO, "GPRLO"}, 957 {ELS_CMD_GAID, "GAID"}, 958 {ELS_CMD_FACT, "FACT"}, 959 {ELS_CMD_FDACT, "FDACT"}, 960 {ELS_CMD_NACT, "NACT"}, 961 {ELS_CMD_NDACT, "NDACT"}, 962 {ELS_CMD_QoSR, "QoSR"}, 963 {ELS_CMD_RVCS, "RVCS"}, 964 {ELS_CMD_PDISC, "PDISC"}, 965 {ELS_CMD_FDISC, "FDISC"}, 966 {ELS_CMD_ADISC, "ADISC"}, 967 {ELS_CMD_FARP, "FARP"}, 968 {ELS_CMD_FARPR, "FARPR"}, 969 {ELS_CMD_FAN, "FAN"}, 970 {ELS_CMD_RSCN, "RSCN"}, 971 {ELS_CMD_SCR, "SCR"}, 972 {ELS_CMD_LINIT, "LINIT"}, 973 {ELS_CMD_RNID, "RNID"}, 974 {ELS_CMD_AUTH, "AUTH"} 975 976 }; /* emlxs_elscmd_table */ 977 978 979 /* 980 * 981 * Device Driver Entry Routines 982 * 983 */ 984 985 #ifdef MODSYM_SUPPORT 986 static void emlxs_fca_modclose(); 987 static int emlxs_fca_modopen(); 988 emlxs_modsym_t emlxs_modsym; 989 990 static int 991 emlxs_fca_modopen() 992 { 993 int err; 994 995 if (emlxs_modsym.mod_fctl) { 996 return (EEXIST); 997 } 998 999 /* Leadville (fctl) */ 1000 err = 0; 1001 emlxs_modsym.mod_fctl = 1002 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err); 1003 if (!emlxs_modsym.mod_fctl) { 1004 cmn_err(CE_WARN, 1005 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d", 1006 DRIVER_NAME, err); 1007 1008 goto failed; 1009 } 1010 1011 err = 0; 1012 /* Check if the fctl fc_fca_attach is present */ 1013 emlxs_modsym.fc_fca_attach = 1014 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach", 1015 &err); 1016 if ((void *)emlxs_modsym.fc_fca_attach == NULL) { 1017 cmn_err(CE_WARN, 1018 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME); 1019 goto failed; 1020 } 1021 1022 err = 0; 1023 /* Check if the fctl fc_fca_detach is present */ 1024 emlxs_modsym.fc_fca_detach = 1025 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach", 1026 &err); 1027 if ((void *)emlxs_modsym.fc_fca_detach == NULL) { 1028 cmn_err(CE_WARN, 1029 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME); 1030 goto failed; 1031 } 1032 1033 err = 0; 1034 /* Check if the fctl fc_fca_init is present */ 1035 emlxs_modsym.fc_fca_init = 1036 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err); 1037 if ((void *)emlxs_modsym.fc_fca_init == NULL) { 1038 cmn_err(CE_WARN, 1039 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME); 1040 goto failed; 1041 } 1042 1043 return (0); 1044 1045 failed: 1046 1047 emlxs_fca_modclose(); 1048 1049 return (ENODEV); 1050 1051 1052 } /* emlxs_fca_modopen() */ 1053 1054 1055 static void 1056 emlxs_fca_modclose() 1057 { 1058 if (emlxs_modsym.mod_fctl) { 1059 (void) ddi_modclose(emlxs_modsym.mod_fctl); 1060 emlxs_modsym.mod_fctl = 0; 1061 } 1062 1063 emlxs_modsym.fc_fca_attach = NULL; 1064 emlxs_modsym.fc_fca_detach = NULL; 1065 emlxs_modsym.fc_fca_init = NULL; 1066 1067 return; 1068 1069 } /* emlxs_fca_modclose() */ 1070 1071 #endif /* MODSYM_SUPPORT */ 1072 1073 1074 1075 /* 1076 * Global driver initialization, called once when driver is loaded 1077 */ 1078 int 1079 _init(void) 1080 { 1081 int ret; 1082 char buf[64]; 1083 1084 /* 1085 * First init call for this driver, 1086 * so initialize the emlxs_dev_ctl structure. 1087 */ 1088 bzero(&emlxs_device, sizeof (emlxs_device)); 1089 1090 #ifdef MODSYM_SUPPORT 1091 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t)); 1092 #endif /* MODSYM_SUPPORT */ 1093 1094 (void) sprintf(buf, "%s_device mutex", DRIVER_NAME); 1095 mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL); 1096 1097 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp); 1098 emlxs_device.drv_timestamp = ddi_get_time(); 1099 1100 for (ret = 0; ret < MAX_FC_BRDS; ret++) { 1101 emlxs_instance[ret] = (uint32_t)-1; 1102 } 1103 1104 /* 1105 * Provide for one ddiinst of the emlxs_dev_ctl structure 1106 * for each possible board in the system. 1107 */ 1108 if ((ret = ddi_soft_state_init(&emlxs_soft_state, 1109 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) { 1110 cmn_err(CE_WARN, 1111 "?%s: _init: ddi_soft_state_init failed. rval=%x", 1112 DRIVER_NAME, ret); 1113 1114 return (ret); 1115 } 1116 1117 #ifdef MODSYM_SUPPORT 1118 /* Open SFS */ 1119 (void) emlxs_fca_modopen(); 1120 #ifdef SFCT_SUPPORT 1121 /* Open FCT */ 1122 (void) emlxs_fct_modopen(); 1123 #endif /* SFCT_SUPPORT */ 1124 #endif /* MODSYM_SUPPORT */ 1125 1126 /* Setup devops for SFS */ 1127 MODSYM(fc_fca_init)(&emlxs_ops); 1128 1129 if ((ret = mod_install(&emlxs_modlinkage)) != 0) { 1130 (void) ddi_soft_state_fini(&emlxs_soft_state); 1131 #ifdef MODSYM_SUPPORT 1132 /* Close SFS */ 1133 emlxs_fca_modclose(); 1134 #ifdef SFCT_SUPPORT 1135 /* Close FCT */ 1136 emlxs_fct_modclose(); 1137 #endif /* SFCT_SUPPORT */ 1138 #endif /* MODSYM_SUPPORT */ 1139 1140 return (ret); 1141 } 1142 1143 #ifdef SAN_DIAG_SUPPORT 1144 (void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME); 1145 mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL); 1146 #endif /* SAN_DIAG_SUPPORT */ 1147 1148 return (ret); 1149 1150 } /* _init() */ 1151 1152 1153 /* 1154 * Called when driver is unloaded. 1155 */ 1156 int 1157 _fini(void) 1158 { 1159 int ret; 1160 1161 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) { 1162 return (ret); 1163 } 1164 #ifdef MODSYM_SUPPORT 1165 /* Close SFS */ 1166 emlxs_fca_modclose(); 1167 #ifdef SFCT_SUPPORT 1168 /* Close FCT */ 1169 emlxs_fct_modclose(); 1170 #endif /* SFCT_SUPPORT */ 1171 #endif /* MODSYM_SUPPORT */ 1172 1173 /* 1174 * Destroy the soft state structure 1175 */ 1176 (void) ddi_soft_state_fini(&emlxs_soft_state); 1177 1178 /* Destroy the global device lock */ 1179 mutex_destroy(&emlxs_device.lock); 1180 1181 #ifdef SAN_DIAG_SUPPORT 1182 mutex_destroy(&sd_bucket_mutex); 1183 #endif /* SAN_DIAG_SUPPORT */ 1184 1185 return (ret); 1186 1187 } /* _fini() */ 1188 1189 1190 1191 int 1192 _info(struct modinfo *modinfop) 1193 { 1194 1195 return (mod_info(&emlxs_modlinkage, modinfop)); 1196 1197 } /* _info() */ 1198 1199 1200 /* 1201 * Attach an ddiinst of an emlx host adapter. 1202 * Allocate data structures, initialize the adapter and we're ready to fly. 1203 */ 1204 static int 1205 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1206 { 1207 emlxs_hba_t *hba; 1208 int ddiinst; 1209 int emlxinst; 1210 int rval; 1211 1212 switch (cmd) { 1213 case DDI_ATTACH: 1214 /* If successful this will set EMLXS_PM_IN_ATTACH */ 1215 rval = emlxs_hba_attach(dip); 1216 break; 1217 1218 case DDI_PM_RESUME: 1219 /* This will resume the driver */ 1220 rval = emlxs_pm_raise_power(dip); 1221 break; 1222 1223 case DDI_RESUME: 1224 /* This will resume the driver */ 1225 rval = emlxs_hba_resume(dip); 1226 break; 1227 1228 default: 1229 rval = DDI_FAILURE; 1230 } 1231 1232 if (rval == DDI_SUCCESS) { 1233 ddiinst = ddi_get_instance(dip); 1234 emlxinst = emlxs_get_instance(ddiinst); 1235 hba = emlxs_device.hba[emlxinst]; 1236 1237 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) { 1238 1239 /* Enable driver dump feature */ 1240 mutex_enter(&EMLXS_PORT_LOCK); 1241 hba->flag |= FC_DUMP_SAFE; 1242 mutex_exit(&EMLXS_PORT_LOCK); 1243 } 1244 } 1245 1246 return (rval); 1247 1248 } /* emlxs_attach() */ 1249 1250 1251 /* 1252 * Detach/prepare driver to unload (see detach(9E)). 1253 */ 1254 static int 1255 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1256 { 1257 emlxs_hba_t *hba; 1258 emlxs_port_t *port; 1259 int ddiinst; 1260 int emlxinst; 1261 int rval; 1262 1263 ddiinst = ddi_get_instance(dip); 1264 emlxinst = emlxs_get_instance(ddiinst); 1265 hba = emlxs_device.hba[emlxinst]; 1266 1267 if (hba == NULL) { 1268 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME); 1269 1270 return (DDI_FAILURE); 1271 } 1272 1273 if (hba == (emlxs_hba_t *)-1) { 1274 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.", 1275 DRIVER_NAME); 1276 1277 return (DDI_FAILURE); 1278 } 1279 1280 port = &PPORT; 1281 rval = DDI_SUCCESS; 1282 1283 /* Check driver dump */ 1284 mutex_enter(&EMLXS_PORT_LOCK); 1285 1286 if (hba->flag & FC_DUMP_ACTIVE) { 1287 mutex_exit(&EMLXS_PORT_LOCK); 1288 1289 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1290 "emlxs_detach: Driver busy. Driver dump active."); 1291 1292 return (DDI_FAILURE); 1293 } 1294 1295 hba->flag &= ~FC_DUMP_SAFE; 1296 mutex_exit(&EMLXS_PORT_LOCK); 1297 1298 switch (cmd) { 1299 case DDI_DETACH: 1300 1301 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1302 "DDI_DETACH"); 1303 1304 rval = emlxs_hba_detach(dip); 1305 1306 if (rval != DDI_SUCCESS) { 1307 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1308 "Unable to detach."); 1309 } 1310 break; 1311 1312 1313 case DDI_PM_SUSPEND: 1314 1315 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1316 "DDI_PM_SUSPEND"); 1317 1318 /* This will suspend the driver */ 1319 rval = emlxs_pm_lower_power(dip); 1320 1321 if (rval != DDI_SUCCESS) { 1322 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1323 "Unable to lower power."); 1324 } 1325 1326 break; 1327 1328 1329 case DDI_SUSPEND: 1330 1331 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1332 "DDI_SUSPEND"); 1333 1334 /* Suspend the driver */ 1335 rval = emlxs_hba_suspend(dip); 1336 1337 if (rval != DDI_SUCCESS) { 1338 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1339 "Unable to suspend driver."); 1340 } 1341 break; 1342 1343 1344 default: 1345 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x", 1346 DRIVER_NAME, cmd); 1347 rval = DDI_FAILURE; 1348 } 1349 1350 if (rval == DDI_FAILURE) { 1351 /* Re-Enable driver dump feature */ 1352 mutex_enter(&EMLXS_PORT_LOCK); 1353 hba->flag |= FC_DUMP_SAFE; 1354 mutex_exit(&EMLXS_PORT_LOCK); 1355 } 1356 1357 return (rval); 1358 1359 } /* emlxs_detach() */ 1360 1361 1362 /* EMLXS_PORT_LOCK must be held when calling this */ 1363 extern void 1364 emlxs_port_init(emlxs_port_t *port) 1365 { 1366 emlxs_hba_t *hba = HBA; 1367 1368 /* Initialize the base node */ 1369 bzero((caddr_t)&port->node_base, sizeof (NODELIST)); 1370 port->node_base.nlp_Rpi = 0; 1371 port->node_base.nlp_DID = 0xffffff; 1372 port->node_base.nlp_list_next = NULL; 1373 port->node_base.nlp_list_prev = NULL; 1374 port->node_base.nlp_active = 1; 1375 port->node_base.nlp_base = 1; 1376 port->node_count = 0; 1377 1378 if (!(port->flag & EMLXS_PORT_ENABLE)) { 1379 uint8_t dummy_wwn[8] = 1380 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 1381 1382 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn, 1383 sizeof (NAME_TYPE)); 1384 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn, 1385 sizeof (NAME_TYPE)); 1386 } 1387 1388 if (!(port->flag & EMLXS_PORT_CONFIG)) { 1389 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256); 1390 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256); 1391 } 1392 1393 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam, 1394 sizeof (SERV_PARM)); 1395 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName, 1396 sizeof (NAME_TYPE)); 1397 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName, 1398 sizeof (NAME_TYPE)); 1399 1400 return; 1401 1402 } /* emlxs_port_init() */ 1403 1404 1405 1406 /* 1407 * emlxs_bind_port 1408 * 1409 * Arguments: 1410 * 1411 * dip: the dev_info pointer for the ddiinst 1412 * port_info: pointer to info handed back to the transport 1413 * bind_info: pointer to info from the transport 1414 * 1415 * Return values: a port handle for this port, NULL for failure 1416 * 1417 */ 1418 static opaque_t 1419 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info, 1420 fc_fca_bind_info_t *bind_info) 1421 { 1422 emlxs_hba_t *hba; 1423 emlxs_port_t *port; 1424 emlxs_port_t *vport; 1425 int ddiinst; 1426 emlxs_vpd_t *vpd; 1427 emlxs_config_t *cfg; 1428 char *dptr; 1429 char buffer[16]; 1430 uint32_t length; 1431 uint32_t len; 1432 char topology[32]; 1433 char linkspeed[32]; 1434 1435 ddiinst = ddi_get_instance(dip); 1436 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 1437 port = &PPORT; 1438 1439 ddiinst = hba->ddiinst; 1440 vpd = &VPD; 1441 cfg = &CFG; 1442 1443 mutex_enter(&EMLXS_PORT_LOCK); 1444 1445 if (bind_info->port_num > 0) { 1446 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1447 if (!(hba->flag & FC_NPIV_ENABLED) || 1448 !(bind_info->port_npiv) || 1449 (bind_info->port_num > hba->vpi_max)) 1450 #elif (EMLXS_MODREV >= EMLXS_MODREV3) 1451 if (!(hba->flag & FC_NPIV_ENABLED) || 1452 (bind_info->port_num > hba->vpi_high)) 1453 #endif 1454 { 1455 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1456 "emlxs_port_bind: Port %d not supported.", 1457 bind_info->port_num); 1458 1459 mutex_exit(&EMLXS_PORT_LOCK); 1460 1461 port_info->pi_error = FC_OUTOFBOUNDS; 1462 return (NULL); 1463 } 1464 } 1465 1466 /* Get true port pointer */ 1467 port = &VPORT(bind_info->port_num); 1468 1469 if (port->tgt_mode) { 1470 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1471 "emlxs_port_bind: Port %d is in target mode.", 1472 bind_info->port_num); 1473 1474 mutex_exit(&EMLXS_PORT_LOCK); 1475 1476 port_info->pi_error = FC_OUTOFBOUNDS; 1477 return (NULL); 1478 } 1479 1480 if (!port->ini_mode) { 1481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1482 "emlxs_port_bind: Port %d is not in initiator mode.", 1483 bind_info->port_num); 1484 1485 mutex_exit(&EMLXS_PORT_LOCK); 1486 1487 port_info->pi_error = FC_OUTOFBOUNDS; 1488 return (NULL); 1489 } 1490 1491 /* Make sure the port is not already bound to the transport */ 1492 if (port->flag & EMLXS_PORT_BOUND) { 1493 1494 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1495 "emlxs_port_bind: Port %d already bound. flag=%x", 1496 bind_info->port_num, port->flag); 1497 1498 mutex_exit(&EMLXS_PORT_LOCK); 1499 1500 port_info->pi_error = FC_ALREADY; 1501 return (NULL); 1502 } 1503 1504 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1505 "fca_bind_port: Port %d: port_info=%p bind_info=%p", 1506 bind_info->port_num, port_info, bind_info); 1507 1508 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1509 if (bind_info->port_npiv) { 1510 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn, 1511 sizeof (NAME_TYPE)); 1512 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn, 1513 sizeof (NAME_TYPE)); 1514 if (port->snn[0] == 0) { 1515 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 1516 256); 1517 } 1518 1519 if (port->spn[0] == 0) { 1520 (void) sprintf((caddr_t)port->spn, "%s VPort-%d", 1521 (caddr_t)hba->spn, port->vpi); 1522 } 1523 port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 1524 } 1525 #endif /* >= EMLXS_MODREV5 */ 1526 1527 /* 1528 * Restricted login should apply both physical and 1529 * virtual ports. 1530 */ 1531 if (cfg[CFG_VPORT_RESTRICTED].current) { 1532 port->flag |= EMLXS_PORT_RESTRICTED; 1533 } 1534 1535 /* Perform generic port initialization */ 1536 emlxs_port_init(port); 1537 1538 /* Perform SFS specific initialization */ 1539 port->ulp_handle = bind_info->port_handle; 1540 port->ulp_statec_cb = bind_info->port_statec_cb; 1541 port->ulp_unsol_cb = bind_info->port_unsol_cb; 1542 port->ub_count = EMLXS_UB_TOKEN_OFFSET; 1543 port->ub_pool = NULL; 1544 1545 /* Update the port info structure */ 1546 1547 /* Set the topology and state */ 1548 if ((hba->state < FC_LINK_UP) || 1549 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) || 1550 !(hba->flag & FC_NPIV_SUPPORTED)))) { 1551 port_info->pi_port_state = FC_STATE_OFFLINE; 1552 port_info->pi_topology = FC_TOP_UNKNOWN; 1553 } 1554 #ifdef MENLO_SUPPORT 1555 else if (hba->flag & FC_MENLO_MODE) { 1556 port_info->pi_port_state = FC_STATE_OFFLINE; 1557 port_info->pi_topology = FC_TOP_UNKNOWN; 1558 } 1559 #endif /* MENLO_SUPPORT */ 1560 else { 1561 /* Check for loop topology */ 1562 if (hba->topology == TOPOLOGY_LOOP) { 1563 port_info->pi_port_state = FC_STATE_LOOP; 1564 (void) strcpy(topology, ", loop"); 1565 1566 if (hba->flag & FC_FABRIC_ATTACHED) { 1567 port_info->pi_topology = FC_TOP_PUBLIC_LOOP; 1568 } else { 1569 port_info->pi_topology = FC_TOP_PRIVATE_LOOP; 1570 } 1571 } else { 1572 port_info->pi_topology = FC_TOP_FABRIC; 1573 port_info->pi_port_state = FC_STATE_ONLINE; 1574 (void) strcpy(topology, ", fabric"); 1575 } 1576 1577 /* Set the link speed */ 1578 switch (hba->linkspeed) { 1579 case 0: 1580 (void) strcpy(linkspeed, "Gb"); 1581 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1582 break; 1583 1584 case LA_1GHZ_LINK: 1585 (void) strcpy(linkspeed, "1Gb"); 1586 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1587 break; 1588 case LA_2GHZ_LINK: 1589 (void) strcpy(linkspeed, "2Gb"); 1590 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED; 1591 break; 1592 case LA_4GHZ_LINK: 1593 (void) strcpy(linkspeed, "4Gb"); 1594 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED; 1595 break; 1596 case LA_8GHZ_LINK: 1597 (void) strcpy(linkspeed, "8Gb"); 1598 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED; 1599 break; 1600 case LA_10GHZ_LINK: 1601 (void) strcpy(linkspeed, "10Gb"); 1602 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED; 1603 break; 1604 default: 1605 (void) sprintf(linkspeed, "unknown(0x%x)", 1606 hba->linkspeed); 1607 break; 1608 } 1609 1610 /* Adjusting port context for link up messages */ 1611 vport = port; 1612 port = &PPORT; 1613 if (vport->vpi == 0) { 1614 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s", 1615 linkspeed, topology); 1616 } else if (!(hba->flag & FC_NPIV_LINKUP)) { 1617 hba->flag |= FC_NPIV_LINKUP; 1618 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg, 1619 "%s%s", linkspeed, topology); 1620 } 1621 port = vport; 1622 1623 } 1624 1625 /* Save initial state */ 1626 port->ulp_statec = port_info->pi_port_state; 1627 1628 /* 1629 * The transport needs a copy of the common service parameters 1630 * for this port. The transport can get any updates through 1631 * the getcap entry point. 1632 */ 1633 bcopy((void *) &port->sparam, 1634 (void *) &port_info->pi_login_params.common_service, 1635 sizeof (SERV_PARM)); 1636 1637 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 1638 /* Swap the service parameters for ULP */ 1639 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params. 1640 common_service); 1641 #endif /* EMLXS_MODREV2X */ 1642 1643 port_info->pi_login_params.common_service.btob_credit = 0xffff; 1644 1645 bcopy((void *) &port->wwnn, 1646 (void *) &port_info->pi_login_params.node_ww_name, 1647 sizeof (NAME_TYPE)); 1648 1649 bcopy((void *) &port->wwpn, 1650 (void *) &port_info->pi_login_params.nport_ww_name, 1651 sizeof (NAME_TYPE)); 1652 1653 /* 1654 * We need to turn off CLASS2 support. 1655 * Otherwise, FC transport will use CLASS2 as default class 1656 * and never try with CLASS3. 1657 */ 1658 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1659 #if (EMLXS_MODREVX >= EMLXS_MODREV3X) 1660 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) { 1661 port_info->pi_login_params.class_1.class_opt &= ~0x0080; 1662 } 1663 1664 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) { 1665 port_info->pi_login_params.class_2.class_opt &= ~0x0080; 1666 } 1667 #else /* EMLXS_SPARC or EMLXS_MODREV2X */ 1668 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) { 1669 port_info->pi_login_params.class_1.class_opt &= ~0x8000; 1670 } 1671 1672 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) { 1673 port_info->pi_login_params.class_2.class_opt &= ~0x8000; 1674 } 1675 #endif /* >= EMLXS_MODREV3X */ 1676 #endif /* >= EMLXS_MODREV3 */ 1677 1678 1679 #if (EMLXS_MODREV <= EMLXS_MODREV2) 1680 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) { 1681 port_info->pi_login_params.class_1.data[0] &= ~0x80; 1682 } 1683 1684 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) { 1685 port_info->pi_login_params.class_2.data[0] &= ~0x80; 1686 } 1687 #endif /* <= EMLXS_MODREV2 */ 1688 1689 /* Additional parameters */ 1690 port_info->pi_s_id.port_id = port->did; 1691 port_info->pi_s_id.priv_lilp_posit = 0; 1692 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current; 1693 1694 /* Initialize the RNID parameters */ 1695 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params)); 1696 1697 (void) sprintf((char *)port_info->pi_rnid_params.params.global_id, 1698 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType, 1699 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 1700 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 1701 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 1702 1703 port_info->pi_rnid_params.params.unit_type = RNID_HBA; 1704 port_info->pi_rnid_params.params.port_id = port->did; 1705 port_info->pi_rnid_params.params.ip_version = RNID_IPV4; 1706 1707 /* Initialize the port attributes */ 1708 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs)); 1709 1710 (void) strcpy(port_info->pi_attrs.manufacturer, "Emulex"); 1711 1712 port_info->pi_rnid_params.status = FC_SUCCESS; 1713 1714 (void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num); 1715 1716 (void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)", 1717 vpd->fw_version, vpd->fw_label); 1718 1719 #ifdef EMLXS_I386 1720 (void) sprintf(port_info->pi_attrs.option_rom_version, 1721 "Boot:%s", vpd->boot_version); 1722 #else /* EMLXS_SPARC */ 1723 (void) sprintf(port_info->pi_attrs.option_rom_version, 1724 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version); 1725 #endif /* EMLXS_I386 */ 1726 1727 1728 (void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)", 1729 emlxs_version, emlxs_revision); 1730 1731 (void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME); 1732 1733 port_info->pi_attrs.vendor_specific_id = 1734 ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX); 1735 1736 port_info->pi_attrs.supported_cos = SWAP_DATA32(FC_NS_CLASS3); 1737 1738 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE; 1739 1740 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1741 1742 port_info->pi_rnid_params.params.num_attached = 0; 1743 1744 /* 1745 * Copy the serial number string (right most 16 chars) into the right 1746 * justified local buffer 1747 */ 1748 bzero(buffer, sizeof (buffer)); 1749 length = strlen(vpd->serial_num); 1750 len = (length > 16) ? 16 : length; 1751 bcopy(&vpd->serial_num[(length - len)], 1752 &buffer[(sizeof (buffer) - len)], len); 1753 1754 port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index; 1755 1756 #endif /* >= EMLXS_MODREV5 */ 1757 1758 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4)) 1759 1760 port_info->pi_rnid_params.params.num_attached = 0; 1761 1762 if (hba->flag & FC_NPIV_ENABLED) { 1763 uint8_t byte; 1764 uint8_t *wwpn; 1765 uint32_t i; 1766 uint32_t j; 1767 1768 /* Copy the WWPN as a string into the local buffer */ 1769 wwpn = (uint8_t *)&hba->wwpn; 1770 for (i = 0; i < 16; i++) { 1771 byte = *wwpn++; 1772 j = ((byte & 0xf0) >> 4); 1773 if (j <= 9) { 1774 buffer[i] = 1775 (char)((uint8_t)'0' + (uint8_t)j); 1776 } else { 1777 buffer[i] = 1778 (char)((uint8_t)'A' + (uint8_t)(j - 1779 10)); 1780 } 1781 1782 i++; 1783 j = (byte & 0xf); 1784 if (j <= 9) { 1785 buffer[i] = 1786 (char)((uint8_t)'0' + (uint8_t)j); 1787 } else { 1788 buffer[i] = 1789 (char)((uint8_t)'A' + (uint8_t)(j - 1790 10)); 1791 } 1792 } 1793 1794 port_info->pi_attrs.hba_fru_details.port_index = port->vpi; 1795 } else { 1796 /* Copy the serial number string (right most 16 chars) */ 1797 /* into the right justified local buffer */ 1798 bzero(buffer, sizeof (buffer)); 1799 length = strlen(vpd->serial_num); 1800 len = (length > 16) ? 16 : length; 1801 bcopy(&vpd->serial_num[(length - len)], 1802 &buffer[(sizeof (buffer) - len)], len); 1803 1804 port_info->pi_attrs.hba_fru_details.port_index = 1805 vpd->port_index; 1806 } 1807 1808 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */ 1809 1810 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1811 1812 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high; 1813 dptr[0] = buffer[0]; 1814 dptr[1] = buffer[1]; 1815 dptr[2] = buffer[2]; 1816 dptr[3] = buffer[3]; 1817 dptr[4] = buffer[4]; 1818 dptr[5] = buffer[5]; 1819 dptr[6] = buffer[6]; 1820 dptr[7] = buffer[7]; 1821 port_info->pi_attrs.hba_fru_details.high = 1822 SWAP_DATA64(port_info->pi_attrs.hba_fru_details.high); 1823 1824 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low; 1825 dptr[0] = buffer[8]; 1826 dptr[1] = buffer[9]; 1827 dptr[2] = buffer[10]; 1828 dptr[3] = buffer[11]; 1829 dptr[4] = buffer[12]; 1830 dptr[5] = buffer[13]; 1831 dptr[6] = buffer[14]; 1832 dptr[7] = buffer[15]; 1833 port_info->pi_attrs.hba_fru_details.low = 1834 SWAP_DATA64(port_info->pi_attrs.hba_fru_details.low); 1835 1836 #endif /* >= EMLXS_MODREV3 */ 1837 1838 #if (EMLXS_MODREV >= EMLXS_MODREV4) 1839 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name, 1840 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN); 1841 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name, 1842 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN); 1843 #endif /* >= EMLXS_MODREV4 */ 1844 1845 (void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev); 1846 1847 /* Set the hba speed limit */ 1848 if (vpd->link_speed & LMT_10GB_CAPABLE) { 1849 port_info->pi_attrs.supported_speed |= 1850 FC_HBA_PORTSPEED_10GBIT; 1851 } 1852 if (vpd->link_speed & LMT_8GB_CAPABLE) { 1853 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT; 1854 } 1855 if (vpd->link_speed & LMT_4GB_CAPABLE) { 1856 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT; 1857 } 1858 if (vpd->link_speed & LMT_2GB_CAPABLE) { 1859 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT; 1860 } 1861 if (vpd->link_speed & LMT_1GB_CAPABLE) { 1862 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT; 1863 } 1864 1865 /* Set the hba model info */ 1866 (void) strcpy(port_info->pi_attrs.model, hba->model_info.model); 1867 (void) strcpy(port_info->pi_attrs.model_description, 1868 hba->model_info.model_desc); 1869 1870 1871 /* Log information */ 1872 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1873 "Bind info: port_num = %d", bind_info->port_num); 1874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1875 "Bind info: port_handle = %p", bind_info->port_handle); 1876 1877 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1878 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1879 "Bind info: port_npiv = %d", bind_info->port_npiv); 1880 #endif /* >= EMLXS_MODREV5 */ 1881 1882 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1883 "Port info: pi_topology = %x", port_info->pi_topology); 1884 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1885 "Port info: pi_error = %x", port_info->pi_error); 1886 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1887 "Port info: pi_port_state = %x", port_info->pi_port_state); 1888 1889 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1890 "Port info: port_id = %x", port_info->pi_s_id.port_id); 1891 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1892 "Port info: priv_lilp_posit = %x", 1893 port_info->pi_s_id.priv_lilp_posit); 1894 1895 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1896 "Port info: hard_addr = %x", 1897 port_info->pi_hard_addr.hard_addr); 1898 1899 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1900 "Port info: rnid.status = %x", 1901 port_info->pi_rnid_params.status); 1902 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1903 "Port info: rnid.global_id = %16s", 1904 port_info->pi_rnid_params.params.global_id); 1905 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1906 "Port info: rnid.unit_type = %x", 1907 port_info->pi_rnid_params.params.unit_type); 1908 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1909 "Port info: rnid.port_id = %x", 1910 port_info->pi_rnid_params.params.port_id); 1911 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1912 "Port info: rnid.num_attached = %x", 1913 port_info->pi_rnid_params.params.num_attached); 1914 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1915 "Port info: rnid.ip_version = %x", 1916 port_info->pi_rnid_params.params.ip_version); 1917 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1918 "Port info: rnid.udp_port = %x", 1919 port_info->pi_rnid_params.params.udp_port); 1920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1921 "Port info: rnid.ip_addr = %16s", 1922 port_info->pi_rnid_params.params.ip_addr); 1923 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1924 "Port info: rnid.spec_id_resv = %x", 1925 port_info->pi_rnid_params.params.specific_id_resv); 1926 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1927 "Port info: rnid.topo_flags = %x", 1928 port_info->pi_rnid_params.params.topo_flags); 1929 1930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1931 "Port info: manufacturer = %s", 1932 port_info->pi_attrs.manufacturer); 1933 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1934 "Port info: serial_num = %s", 1935 port_info->pi_attrs.serial_number); 1936 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1937 "Port info: model = %s", port_info->pi_attrs.model); 1938 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1939 "Port info: model_description = %s", 1940 port_info->pi_attrs.model_description); 1941 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1942 "Port info: hardware_version = %s", 1943 port_info->pi_attrs.hardware_version); 1944 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1945 "Port info: driver_version = %s", 1946 port_info->pi_attrs.driver_version); 1947 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1948 "Port info: option_rom_version = %s", 1949 port_info->pi_attrs.option_rom_version); 1950 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1951 "Port info: firmware_version = %s", 1952 port_info->pi_attrs.firmware_version); 1953 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1954 "Port info: driver_name = %s", 1955 port_info->pi_attrs.driver_name); 1956 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1957 "Port info: vendor_specific_id = %x", 1958 port_info->pi_attrs.vendor_specific_id); 1959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1960 "Port info: supported_cos = %x", 1961 port_info->pi_attrs.supported_cos); 1962 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1963 "Port info: supported_speed = %x", 1964 port_info->pi_attrs.supported_speed); 1965 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1966 "Port info: max_frame_size = %x", 1967 port_info->pi_attrs.max_frame_size); 1968 1969 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1970 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1971 "Port info: fru_port_index = %x", 1972 port_info->pi_attrs.hba_fru_details.port_index); 1973 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1974 "Port info: fru_high = %llx", 1975 port_info->pi_attrs.hba_fru_details.high); 1976 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1977 "Port info: fru_low = %llx", 1978 port_info->pi_attrs.hba_fru_details.low); 1979 #endif /* >= EMLXS_MODREV3 */ 1980 1981 #if (EMLXS_MODREV >= EMLXS_MODREV4) 1982 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1983 "Port info: sym_node_name = %s", 1984 port_info->pi_attrs.sym_node_name); 1985 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1986 "Port info: sym_port_name = %s", 1987 port_info->pi_attrs.sym_port_name); 1988 #endif /* >= EMLXS_MODREV4 */ 1989 1990 /* Set the bound flag */ 1991 port->flag |= EMLXS_PORT_BOUND; 1992 hba->num_of_ports++; 1993 1994 mutex_exit(&EMLXS_PORT_LOCK); 1995 1996 return ((opaque_t)port); 1997 1998 } /* emlxs_bind_port() */ 1999 2000 2001 static void 2002 emlxs_unbind_port(opaque_t fca_port_handle) 2003 { 2004 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2005 emlxs_hba_t *hba = HBA; 2006 uint32_t count; 2007 2008 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2009 "fca_unbind_port: port=%p", port); 2010 2011 /* Check ub buffer pools */ 2012 if (port->ub_pool) { 2013 mutex_enter(&EMLXS_UB_LOCK); 2014 2015 /* Wait up to 10 seconds for all ub pools to be freed */ 2016 count = 10 * 2; 2017 while (port->ub_pool && count) { 2018 mutex_exit(&EMLXS_UB_LOCK); 2019 delay(drv_usectohz(500000)); /* half second wait */ 2020 count--; 2021 mutex_enter(&EMLXS_UB_LOCK); 2022 } 2023 2024 if (port->ub_pool) { 2025 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2026 "fca_unbind_port: Unsolicited buffers still " 2027 "active. port=%p. Destroying...", port); 2028 2029 /* Destroy all pools */ 2030 while (port->ub_pool) { 2031 emlxs_ub_destroy(port, port->ub_pool); 2032 } 2033 } 2034 2035 mutex_exit(&EMLXS_UB_LOCK); 2036 } 2037 2038 /* Destroy & flush all port nodes, if they exist */ 2039 if (port->node_count) { 2040 (void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0); 2041 } 2042 #if (EMLXS_MODREV >= EMLXS_MODREV5) 2043 if ((hba->flag & FC_NPIV_ENABLED) && 2044 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) { 2045 (void) emlxs_mb_unreg_vpi(port); 2046 } 2047 #endif 2048 2049 mutex_enter(&EMLXS_PORT_LOCK); 2050 2051 if (!(port->flag & EMLXS_PORT_BOUND)) { 2052 mutex_exit(&EMLXS_PORT_LOCK); 2053 return; 2054 } 2055 2056 port->flag &= ~EMLXS_PORT_BOUND; 2057 hba->num_of_ports--; 2058 2059 port->ulp_handle = 0; 2060 port->ulp_statec = FC_STATE_OFFLINE; 2061 port->ulp_statec_cb = NULL; 2062 port->ulp_unsol_cb = NULL; 2063 2064 mutex_exit(&EMLXS_PORT_LOCK); 2065 2066 return; 2067 2068 } /* emlxs_unbind_port() */ 2069 2070 2071 /*ARGSUSED*/ 2072 extern int 2073 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 2074 { 2075 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2076 emlxs_hba_t *hba = HBA; 2077 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 2078 2079 if (!sbp) { 2080 return (FC_FAILURE); 2081 } 2082 2083 bzero((void *)sbp, sizeof (emlxs_buf_t)); 2084 2085 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *)hba->intr_arg); 2086 sbp->pkt_flags = 2087 PACKET_VALID | PACKET_RETURNED; 2088 sbp->port = port; 2089 sbp->pkt = pkt; 2090 sbp->iocbq.sbp = sbp; 2091 2092 return (FC_SUCCESS); 2093 2094 } /* emlxs_pkt_init() */ 2095 2096 2097 2098 static void 2099 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp) 2100 { 2101 emlxs_hba_t *hba = HBA; 2102 emlxs_config_t *cfg = &CFG; 2103 fc_packet_t *pkt = PRIV2PKT(sbp); 2104 uint32_t *iptr; 2105 2106 mutex_enter(&sbp->mtx); 2107 2108 /* Reinitialize */ 2109 sbp->pkt = pkt; 2110 sbp->port = port; 2111 sbp->bmp = NULL; 2112 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED); 2113 sbp->iotag = 0; 2114 sbp->ticks = 0; 2115 sbp->abort_attempts = 0; 2116 sbp->fpkt = NULL; 2117 sbp->flush_count = 0; 2118 sbp->next = NULL; 2119 2120 if (!port->tgt_mode) { 2121 sbp->node = NULL; 2122 sbp->did = 0; 2123 sbp->lun = 0; 2124 sbp->class = 0; 2125 sbp->ring = NULL; 2126 sbp->class = 0; 2127 } 2128 2129 bzero((void *)&sbp->iocbq, sizeof (IOCBQ)); 2130 sbp->iocbq.sbp = sbp; 2131 2132 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp || 2133 ddi_in_panic()) { 2134 sbp->pkt_flags |= PACKET_POLLED; 2135 } 2136 2137 /* Prepare the fc packet */ 2138 pkt->pkt_state = FC_PKT_SUCCESS; 2139 pkt->pkt_reason = 0; 2140 pkt->pkt_action = 0; 2141 pkt->pkt_expln = 0; 2142 pkt->pkt_data_resid = 0; 2143 pkt->pkt_resp_resid = 0; 2144 2145 /* Make sure all pkt's have a proper timeout */ 2146 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 2147 /* This disables all IOCB on chip timeouts */ 2148 pkt->pkt_timeout = 0x80000000; 2149 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) { 2150 pkt->pkt_timeout = 60; 2151 } 2152 2153 /* Clear the response buffer */ 2154 if (pkt->pkt_rsplen) { 2155 /* Check for FCP commands */ 2156 if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) || 2157 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) { 2158 iptr = (uint32_t *)pkt->pkt_resp; 2159 iptr[2] = 0; 2160 iptr[3] = 0; 2161 } else { 2162 bzero(pkt->pkt_resp, pkt->pkt_rsplen); 2163 } 2164 } 2165 2166 mutex_exit(&sbp->mtx); 2167 2168 return; 2169 2170 } /* emlxs_initialize_pkt() */ 2171 2172 2173 2174 /* 2175 * We may not need this routine 2176 */ 2177 /*ARGSUSED*/ 2178 extern int 2179 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt) 2180 { 2181 emlxs_buf_t *sbp = PKT2PRIV(pkt); 2182 2183 if (!sbp) { 2184 return (FC_FAILURE); 2185 } 2186 2187 if (!(sbp->pkt_flags & PACKET_VALID)) { 2188 return (FC_FAILURE); 2189 } 2190 2191 sbp->pkt_flags &= ~PACKET_VALID; 2192 mutex_destroy(&sbp->mtx); 2193 2194 return (FC_SUCCESS); 2195 2196 } /* emlxs_pkt_uninit() */ 2197 2198 2199 static int 2200 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2201 { 2202 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2203 emlxs_hba_t *hba = HBA; 2204 int32_t rval; 2205 2206 if (!(port->flag & EMLXS_PORT_BOUND)) { 2207 return (FC_CAP_ERROR); 2208 } 2209 2210 if (strcmp(cap, FC_NODE_WWN) == 0) { 2211 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2212 "fca_get_cap: FC_NODE_WWN"); 2213 2214 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE)); 2215 rval = FC_CAP_FOUND; 2216 2217 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) { 2218 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2219 "fca_get_cap: FC_LOGIN_PARAMS"); 2220 2221 /* 2222 * We need to turn off CLASS2 support. 2223 * Otherwise, FC transport will use CLASS2 as default class 2224 * and never try with CLASS3. 2225 */ 2226 hba->sparam.cls2.classValid = 0; 2227 2228 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM)); 2229 2230 rval = FC_CAP_FOUND; 2231 2232 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) { 2233 int32_t *num_bufs; 2234 emlxs_config_t *cfg = &CFG; 2235 2236 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2237 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)", 2238 cfg[CFG_UB_BUFS].current); 2239 2240 num_bufs = (int32_t *)ptr; 2241 2242 /* We multiply by MAX_VPORTS because ULP uses a */ 2243 /* formula to calculate ub bufs from this */ 2244 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS); 2245 2246 rval = FC_CAP_FOUND; 2247 2248 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) { 2249 int32_t *size; 2250 2251 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2252 "fca_get_cap: FC_CAP_PAYLOAD_SIZE"); 2253 2254 size = (int32_t *)ptr; 2255 *size = -1; 2256 rval = FC_CAP_FOUND; 2257 2258 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) { 2259 fc_reset_action_t *action; 2260 2261 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2262 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR"); 2263 2264 action = (fc_reset_action_t *)ptr; 2265 *action = FC_RESET_RETURN_ALL; 2266 rval = FC_CAP_FOUND; 2267 2268 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) { 2269 fc_dma_behavior_t *behavior; 2270 2271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2272 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF"); 2273 2274 behavior = (fc_dma_behavior_t *)ptr; 2275 *behavior = FC_ALLOW_STREAMING; 2276 rval = FC_CAP_FOUND; 2277 2278 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) { 2279 fc_fcp_dma_t *fcp_dma; 2280 2281 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2282 "fca_get_cap: FC_CAP_FCP_DMA"); 2283 2284 fcp_dma = (fc_fcp_dma_t *)ptr; 2285 *fcp_dma = FC_DVMA_SPACE; 2286 rval = FC_CAP_FOUND; 2287 2288 } else { 2289 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2290 "fca_get_cap: Unknown capability. [%s]", cap); 2291 2292 rval = FC_CAP_ERROR; 2293 2294 } 2295 2296 return (rval); 2297 2298 } /* emlxs_get_cap() */ 2299 2300 2301 2302 static int 2303 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2304 { 2305 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2306 2307 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2308 "fca_set_cap: cap=[%s] arg=%p", cap, ptr); 2309 2310 return (FC_CAP_ERROR); 2311 2312 } /* emlxs_set_cap() */ 2313 2314 2315 static opaque_t 2316 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id) 2317 { 2318 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2319 2320 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2321 "fca_get_device: did=%x", d_id); 2322 2323 return (NULL); 2324 2325 } /* emlxs_get_device() */ 2326 2327 2328 static int32_t 2329 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd) 2330 { 2331 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2332 2333 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x", 2334 cmd); 2335 2336 return (FC_SUCCESS); 2337 2338 } /* emlxs_notify */ 2339 2340 2341 2342 static int 2343 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf) 2344 { 2345 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2346 emlxs_hba_t *hba = HBA; 2347 uint32_t lilp_length; 2348 2349 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2350 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf, 2351 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2], 2352 port->alpa_map[3], port->alpa_map[4]); 2353 2354 if (!(port->flag & EMLXS_PORT_BOUND)) { 2355 return (FC_NOMAP); 2356 } 2357 2358 if (hba->topology != TOPOLOGY_LOOP) { 2359 return (FC_NOMAP); 2360 } 2361 2362 /* Check if alpa map is available */ 2363 if (port->alpa_map[0] != 0) { 2364 mapbuf->lilp_magic = MAGIC_LILP; 2365 } else { /* No LILP map available */ 2366 2367 /* Set lilp_magic to MAGIC_LISA and this will */ 2368 /* trigger an ALPA scan in ULP */ 2369 mapbuf->lilp_magic = MAGIC_LISA; 2370 } 2371 2372 mapbuf->lilp_myalpa = port->did; 2373 2374 /* The first byte of the alpa_map is the lilp map length */ 2375 /* Add one to include the lilp length byte itself */ 2376 lilp_length = (uint32_t)port->alpa_map[0] + 1; 2377 2378 /* Make sure the max transfer is 128 bytes */ 2379 if (lilp_length > 128) { 2380 lilp_length = 128; 2381 } 2382 2383 /* We start copying from the lilp_length field */ 2384 /* in order to get a word aligned address */ 2385 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length, 2386 lilp_length); 2387 2388 return (FC_SUCCESS); 2389 2390 } /* emlxs_get_map() */ 2391 2392 2393 2394 extern int 2395 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt) 2396 { 2397 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2398 emlxs_hba_t *hba = HBA; 2399 emlxs_buf_t *sbp; 2400 uint32_t rval; 2401 uint32_t pkt_flags; 2402 2403 /* Make sure adapter is online */ 2404 if (!(hba->flag & FC_ONLINE_MODE)) { 2405 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 2406 "Adapter offline."); 2407 2408 return (FC_OFFLINE); 2409 } 2410 2411 /* Validate packet */ 2412 sbp = PKT2PRIV(pkt); 2413 2414 /* Make sure ULP was told that the port was online */ 2415 if ((port->ulp_statec == FC_STATE_OFFLINE) && 2416 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2417 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 2418 "Port offline."); 2419 2420 return (FC_OFFLINE); 2421 } 2422 2423 if (sbp->port != port) { 2424 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2425 "Invalid port handle. sbp=%p port=%p flags=%x", sbp, 2426 sbp->port, sbp->pkt_flags); 2427 return (FC_BADPACKET); 2428 } 2429 2430 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_RETURNED))) { 2431 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2432 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp, 2433 sbp->port, sbp->pkt_flags); 2434 return (FC_BADPACKET); 2435 } 2436 #ifdef SFCT_SUPPORT 2437 if (port->tgt_mode && !sbp->fct_cmd && 2438 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2439 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2440 "Packet blocked. Target mode."); 2441 return (FC_TRANSPORT_ERROR); 2442 } 2443 #endif /* SFCT_SUPPORT */ 2444 2445 #ifdef IDLE_TIMER 2446 emlxs_pm_busy_component(hba); 2447 #endif /* IDLE_TIMER */ 2448 2449 /* Prepare the packet for transport */ 2450 emlxs_initialize_pkt(port, sbp); 2451 2452 /* Save a copy of the pkt flags. */ 2453 /* We will check the polling flag later */ 2454 pkt_flags = sbp->pkt_flags; 2455 2456 /* Send the packet */ 2457 switch (pkt->pkt_tran_type) { 2458 case FC_PKT_FCP_READ: 2459 case FC_PKT_FCP_WRITE: 2460 rval = emlxs_send_fcp_cmd(port, sbp); 2461 break; 2462 2463 case FC_PKT_IP_WRITE: 2464 case FC_PKT_BROADCAST: 2465 rval = emlxs_send_ip(port, sbp); 2466 break; 2467 2468 case FC_PKT_EXCHANGE: 2469 switch (pkt->pkt_cmd_fhdr.type) { 2470 case FC_TYPE_SCSI_FCP: 2471 rval = emlxs_send_fcp_cmd(port, sbp); 2472 break; 2473 2474 case FC_TYPE_FC_SERVICES: 2475 rval = emlxs_send_ct(port, sbp); 2476 break; 2477 2478 #ifdef MENLO_SUPPORT 2479 case EMLXS_MENLO_TYPE: 2480 rval = emlxs_send_menlo(port, sbp); 2481 break; 2482 #endif /* MENLO_SUPPORT */ 2483 2484 default: 2485 rval = emlxs_send_els(port, sbp); 2486 } 2487 break; 2488 2489 case FC_PKT_OUTBOUND: 2490 switch (pkt->pkt_cmd_fhdr.type) { 2491 #ifdef SFCT_SUPPORT 2492 case FC_TYPE_SCSI_FCP: 2493 rval = emlxs_send_fct_status(port, sbp); 2494 break; 2495 2496 case FC_TYPE_BASIC_LS: 2497 rval = emlxs_send_fct_abort(port, sbp); 2498 break; 2499 #endif /* SFCT_SUPPORT */ 2500 2501 case FC_TYPE_FC_SERVICES: 2502 rval = emlxs_send_ct_rsp(port, sbp); 2503 break; 2504 #ifdef MENLO_SUPPORT 2505 case EMLXS_MENLO_TYPE: 2506 rval = emlxs_send_menlo(port, sbp); 2507 break; 2508 #endif /* MENLO_SUPPORT */ 2509 2510 default: 2511 rval = emlxs_send_els_rsp(port, sbp); 2512 } 2513 break; 2514 2515 default: 2516 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2517 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type); 2518 rval = FC_TRANSPORT_ERROR; 2519 break; 2520 } 2521 2522 /* Check if send was not successful */ 2523 if (rval != FC_SUCCESS) { 2524 /* Return packet to ULP */ 2525 mutex_enter(&sbp->mtx); 2526 sbp->pkt_flags |= PACKET_RETURNED; 2527 mutex_exit(&sbp->mtx); 2528 2529 return (rval); 2530 } 2531 2532 /* Check if this packet should be polled for completion before */ 2533 /* returning. This check must be done with a saved copy of the */ 2534 /* pkt_flags because the packet itself could already be freed from */ 2535 /* memory if it was not polled. */ 2536 if (pkt_flags & PACKET_POLLED) { 2537 emlxs_poll(port, sbp); 2538 } 2539 2540 return (FC_SUCCESS); 2541 2542 } /* emlxs_transport() */ 2543 2544 2545 2546 static void 2547 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp) 2548 { 2549 emlxs_hba_t *hba = HBA; 2550 fc_packet_t *pkt = PRIV2PKT(sbp); 2551 clock_t timeout; 2552 clock_t time; 2553 uint32_t att_bit; 2554 emlxs_ring_t *rp; 2555 2556 mutex_enter(&EMLXS_PORT_LOCK); 2557 hba->io_poll_count++; 2558 mutex_exit(&EMLXS_PORT_LOCK); 2559 2560 /* Check for panic situation */ 2561 if (ddi_in_panic()) { 2562 /* 2563 * In panic situations there will be one thread with 2564 * no interrrupts (hard or soft) and no timers 2565 */ 2566 2567 /* 2568 * We must manually poll everything in this thread 2569 * to keep the driver going. 2570 */ 2571 rp = (emlxs_ring_t *)sbp->ring; 2572 switch (rp->ringno) { 2573 case FC_FCP_RING: 2574 att_bit = HA_R0ATT; 2575 break; 2576 2577 case FC_IP_RING: 2578 att_bit = HA_R1ATT; 2579 break; 2580 2581 case FC_ELS_RING: 2582 att_bit = HA_R2ATT; 2583 break; 2584 2585 case FC_CT_RING: 2586 att_bit = HA_R3ATT; 2587 break; 2588 } 2589 2590 /* Keep polling the chip until our IO is completed */ 2591 /* Driver's timer will not function during panics. */ 2592 /* Therefore, timer checks must be performed manually. */ 2593 (void) drv_getparm(LBOLT, &time); 2594 timeout = time + drv_usectohz(1000000); 2595 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2596 emlxs_sli_poll_intr(hba, att_bit); 2597 (void) drv_getparm(LBOLT, &time); 2598 2599 /* Trigger timer checks periodically */ 2600 if (time >= timeout) { 2601 emlxs_timer_checks(hba); 2602 timeout = time + drv_usectohz(1000000); 2603 } 2604 } 2605 } else { 2606 /* Wait for IO completion */ 2607 /* The driver's timer will detect */ 2608 /* any timeout and abort the I/O. */ 2609 mutex_enter(&EMLXS_PKT_LOCK); 2610 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2611 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK); 2612 } 2613 mutex_exit(&EMLXS_PKT_LOCK); 2614 } 2615 2616 /* Check for fcp reset pkt */ 2617 if (sbp->pkt_flags & PACKET_FCP_RESET) { 2618 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2619 /* Flush the IO's on the chipq */ 2620 (void) emlxs_chipq_node_flush(port, 2621 &hba->ring[FC_FCP_RING], sbp->node, sbp); 2622 } else { 2623 /* Flush the IO's on the chipq for this lun */ 2624 (void) emlxs_chipq_lun_flush(port, 2625 sbp->node, sbp->lun, sbp); 2626 } 2627 2628 if (sbp->flush_count == 0) { 2629 emlxs_node_open(port, sbp->node, FC_FCP_RING); 2630 goto done; 2631 } 2632 2633 /* Set the timeout so the flush has time to complete */ 2634 timeout = emlxs_timeout(hba, 60); 2635 (void) drv_getparm(LBOLT, &time); 2636 while ((time < timeout) && sbp->flush_count > 0) { 2637 delay(drv_usectohz(500000)); 2638 (void) drv_getparm(LBOLT, &time); 2639 } 2640 2641 if (sbp->flush_count == 0) { 2642 emlxs_node_open(port, sbp->node, FC_FCP_RING); 2643 goto done; 2644 } 2645 2646 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2647 "sbp=%p flush_count=%d. Waiting...", sbp, 2648 sbp->flush_count); 2649 2650 /* Let's try this one more time */ 2651 2652 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2653 /* Flush the IO's on the chipq */ 2654 (void) emlxs_chipq_node_flush(port, 2655 &hba->ring[FC_FCP_RING], sbp->node, sbp); 2656 } else { 2657 /* Flush the IO's on the chipq for this lun */ 2658 (void) emlxs_chipq_lun_flush(port, 2659 sbp->node, sbp->lun, sbp); 2660 } 2661 2662 /* Reset the timeout so the flush has time to complete */ 2663 timeout = emlxs_timeout(hba, 60); 2664 (void) drv_getparm(LBOLT, &time); 2665 while ((time < timeout) && sbp->flush_count > 0) { 2666 delay(drv_usectohz(500000)); 2667 (void) drv_getparm(LBOLT, &time); 2668 } 2669 2670 if (sbp->flush_count == 0) { 2671 emlxs_node_open(port, sbp->node, FC_FCP_RING); 2672 goto done; 2673 } 2674 2675 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2676 "sbp=%p flush_count=%d. Resetting link.", sbp, 2677 sbp->flush_count); 2678 2679 /* Let's first try to reset the link */ 2680 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 2681 2682 if (sbp->flush_count == 0) { 2683 goto done; 2684 } 2685 2686 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2687 "sbp=%p flush_count=%d. Resetting HBA.", sbp, 2688 sbp->flush_count); 2689 2690 /* If that doesn't work, reset the adapter */ 2691 (void) emlxs_reset(port, FC_FCA_RESET); 2692 2693 if (sbp->flush_count != 0) { 2694 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2695 "sbp=%p flush_count=%d. Giving up.", sbp, 2696 sbp->flush_count); 2697 } 2698 2699 } 2700 /* PACKET_FCP_RESET */ 2701 done: 2702 2703 /* Packet has been declared completed and is now ready to be returned */ 2704 2705 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 2706 emlxs_unswap_pkt(sbp); 2707 #endif /* EMLXS_MODREV2X */ 2708 2709 mutex_enter(&sbp->mtx); 2710 sbp->pkt_flags |= PACKET_RETURNED; 2711 mutex_exit(&sbp->mtx); 2712 2713 mutex_enter(&EMLXS_PORT_LOCK); 2714 hba->io_poll_count--; 2715 mutex_exit(&EMLXS_PORT_LOCK); 2716 2717 /* Make ULP completion callback if required */ 2718 if (pkt->pkt_comp) { 2719 (*pkt->pkt_comp) (pkt); 2720 } 2721 2722 return; 2723 2724 } /* emlxs_poll() */ 2725 2726 2727 static int 2728 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size, 2729 uint32_t *count, uint32_t type) 2730 { 2731 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2732 emlxs_hba_t *hba = HBA; 2733 2734 char *err = NULL; 2735 emlxs_unsol_buf_t *pool; 2736 emlxs_unsol_buf_t *new_pool; 2737 int32_t i; 2738 int result; 2739 uint32_t free_resv; 2740 uint32_t free; 2741 emlxs_config_t *cfg = &CFG; 2742 fc_unsol_buf_t *ubp; 2743 emlxs_ub_priv_t *ub_priv; 2744 2745 if (port->tgt_mode) { 2746 if (tokens && count) { 2747 bzero(tokens, (sizeof (uint64_t) * (*count))); 2748 } 2749 return (FC_SUCCESS); 2750 } 2751 2752 if (!(port->flag & EMLXS_PORT_BOUND)) { 2753 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2754 "ub_alloc failed: Port not bound! size=%x count=%d " 2755 "type=%x", size, *count, type); 2756 2757 return (FC_FAILURE); 2758 } 2759 2760 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2761 "ub_alloc: size=%x count=%d type=%x", size, *count, type); 2762 2763 if (count && (*count > EMLXS_MAX_UBUFS)) { 2764 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2765 "ub_alloc failed: Too many unsolicted buffers requested. " 2766 "count=%x", *count); 2767 2768 return (FC_FAILURE); 2769 2770 } 2771 2772 if (tokens == NULL) { 2773 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2774 "ub_alloc failed: Token array is NULL."); 2775 2776 return (FC_FAILURE); 2777 } 2778 2779 /* Clear the token array */ 2780 bzero(tokens, (sizeof (uint64_t) * (*count))); 2781 2782 free_resv = 0; 2783 free = *count; 2784 switch (type) { 2785 case FC_TYPE_BASIC_LS: 2786 err = "BASIC_LS"; 2787 break; 2788 case FC_TYPE_EXTENDED_LS: 2789 err = "EXTENDED_LS"; 2790 free = *count / 2; /* Hold 50% for normal use */ 2791 free_resv = *count - free; /* Reserve 50% for RSCN use */ 2792 break; 2793 case FC_TYPE_IS8802: 2794 err = "IS8802"; 2795 break; 2796 case FC_TYPE_IS8802_SNAP: 2797 err = "IS8802_SNAP"; 2798 2799 if (cfg[CFG_NETWORK_ON].current == 0) { 2800 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2801 "ub_alloc failed: IP support is disabled."); 2802 2803 return (FC_FAILURE); 2804 } 2805 break; 2806 case FC_TYPE_SCSI_FCP: 2807 err = "SCSI_FCP"; 2808 break; 2809 case FC_TYPE_SCSI_GPP: 2810 err = "SCSI_GPP"; 2811 break; 2812 case FC_TYPE_HIPP_FP: 2813 err = "HIPP_FP"; 2814 break; 2815 case FC_TYPE_IPI3_MASTER: 2816 err = "IPI3_MASTER"; 2817 break; 2818 case FC_TYPE_IPI3_SLAVE: 2819 err = "IPI3_SLAVE"; 2820 break; 2821 case FC_TYPE_IPI3_PEER: 2822 err = "IPI3_PEER"; 2823 break; 2824 case FC_TYPE_FC_SERVICES: 2825 err = "FC_SERVICES"; 2826 break; 2827 } 2828 2829 mutex_enter(&EMLXS_UB_LOCK); 2830 2831 /* 2832 * Walk through the list of the unsolicited buffers 2833 * for this ddiinst of emlx. 2834 */ 2835 2836 pool = port->ub_pool; 2837 2838 /* 2839 * The emlxs_ub_alloc() can be called more than once with different 2840 * size. We will reject the call if there are 2841 * duplicate size with the same FC-4 type. 2842 */ 2843 while (pool) { 2844 if ((pool->pool_type == type) && 2845 (pool->pool_buf_size == size)) { 2846 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2847 "ub_alloc failed: Unsolicited buffer pool for %s " 2848 "of size 0x%x bytes already exists.", err, size); 2849 2850 result = FC_FAILURE; 2851 goto fail; 2852 } 2853 2854 pool = pool->pool_next; 2855 } 2856 2857 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t), 2858 KM_SLEEP); 2859 if (new_pool == NULL) { 2860 result = FC_FAILURE; 2861 goto fail; 2862 } 2863 2864 new_pool->pool_next = NULL; 2865 new_pool->pool_type = type; 2866 new_pool->pool_buf_size = size; 2867 new_pool->pool_nentries = *count; 2868 new_pool->pool_available = new_pool->pool_nentries; 2869 new_pool->pool_free = free; 2870 new_pool->pool_free_resv = free_resv; 2871 new_pool->fc_ubufs = 2872 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP); 2873 2874 if (new_pool->fc_ubufs == NULL) { 2875 kmem_free(new_pool, sizeof (emlxs_unsol_buf_t)); 2876 result = FC_FAILURE; 2877 goto fail; 2878 } 2879 2880 new_pool->pool_first_token = port->ub_count; 2881 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries; 2882 2883 for (i = 0; i < new_pool->pool_nentries; i++) { 2884 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i]; 2885 ubp->ub_port_handle = port->ulp_handle; 2886 ubp->ub_token = (uint64_t)((unsigned long)ubp); 2887 ubp->ub_bufsize = size; 2888 ubp->ub_class = FC_TRAN_CLASS3; 2889 ubp->ub_port_private = NULL; 2890 ubp->ub_fca_private = 2891 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t), 2892 KM_SLEEP); 2893 2894 if (ubp->ub_fca_private == NULL) { 2895 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2896 "ub_alloc failed: Unable to allocate fca_private " 2897 "object."); 2898 2899 result = FC_FAILURE; 2900 goto fail; 2901 } 2902 2903 /* 2904 * Initialize emlxs_ub_priv_t 2905 */ 2906 ub_priv = ubp->ub_fca_private; 2907 ub_priv->ubp = ubp; 2908 ub_priv->port = port; 2909 ub_priv->flags = EMLXS_UB_FREE; 2910 ub_priv->available = 1; 2911 ub_priv->pool = new_pool; 2912 ub_priv->time = 0; 2913 ub_priv->timeout = 0; 2914 ub_priv->token = port->ub_count; 2915 ub_priv->cmd = 0; 2916 2917 /* Allocate the actual buffer */ 2918 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP); 2919 2920 /* Check if we were not successful */ 2921 if (ubp->ub_buffer == NULL) { 2922 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2923 "ub_alloc failed: Unable to allocate buffer."); 2924 2925 /* Free the private area of the current object */ 2926 kmem_free(ubp->ub_fca_private, 2927 sizeof (emlxs_ub_priv_t)); 2928 2929 result = FC_FAILURE; 2930 goto fail; 2931 } 2932 2933 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 2934 "ub_alloc: buffer=%p token=%x size=%x type=%x ", ubp, 2935 ub_priv->token, ubp->ub_bufsize, type); 2936 2937 tokens[i] = (uint64_t)((unsigned long)ubp); 2938 port->ub_count++; 2939 } 2940 2941 /* Add the pool to the top of the pool list */ 2942 new_pool->pool_prev = NULL; 2943 new_pool->pool_next = port->ub_pool; 2944 2945 if (port->ub_pool) { 2946 port->ub_pool->pool_prev = new_pool; 2947 } 2948 port->ub_pool = new_pool; 2949 2950 /* Set the post counts */ 2951 if (type == FC_TYPE_IS8802_SNAP) { 2952 MAILBOXQ *mbox; 2953 2954 port->ub_post[FC_IP_RING] += new_pool->pool_nentries; 2955 2956 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 2957 MEM_MBOX | MEM_PRI))) { 2958 emlxs_mb_config_farp(hba, (MAILBOX *)mbox); 2959 if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mbox, 2960 MBX_NOWAIT, 0) != MBX_BUSY) { 2961 (void) emlxs_mem_put(hba, MEM_MBOX, 2962 (uint8_t *)mbox); 2963 } 2964 } 2965 port->flag |= EMLXS_PORT_IP_UP; 2966 } else if (type == FC_TYPE_EXTENDED_LS) { 2967 port->ub_post[FC_ELS_RING] += new_pool->pool_nentries; 2968 } else if (type == FC_TYPE_FC_SERVICES) { 2969 port->ub_post[FC_CT_RING] += new_pool->pool_nentries; 2970 } 2971 2972 mutex_exit(&EMLXS_UB_LOCK); 2973 2974 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2975 "%d unsolicited buffers allocated for %s of size 0x%x bytes.", 2976 *count, err, size); 2977 2978 return (FC_SUCCESS); 2979 2980 fail: 2981 2982 /* Clean the pool */ 2983 for (i = 0; tokens[i] != NULL; i++) { 2984 /* Get the buffer object */ 2985 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 2986 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 2987 2988 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 2989 "ub_alloc failed: Freed buffer=%p token=%x size=%x " 2990 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type); 2991 2992 /* Free the actual buffer */ 2993 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 2994 2995 /* Free the private area of the buffer object */ 2996 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 2997 2998 tokens[i] = 0; 2999 port->ub_count--; 3000 } 3001 3002 /* Free the array of buffer objects in the pool */ 3003 kmem_free((caddr_t)new_pool->fc_ubufs, 3004 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries)); 3005 3006 /* Free the pool object */ 3007 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t)); 3008 3009 mutex_exit(&EMLXS_UB_LOCK); 3010 3011 return (result); 3012 3013 } /* emlxs_ub_alloc() */ 3014 3015 3016 static void 3017 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp) 3018 { 3019 emlxs_hba_t *hba = HBA; 3020 emlxs_ub_priv_t *ub_priv; 3021 fc_packet_t *pkt; 3022 ELS_PKT *els; 3023 uint32_t sid; 3024 3025 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3026 3027 if (hba->state <= FC_LINK_DOWN) { 3028 return; 3029 } 3030 3031 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) + 3032 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) { 3033 return; 3034 } 3035 3036 sid = SWAP_DATA24_LO(ubp->ub_frame.s_id); 3037 3038 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg, 3039 "%s dropped: sid=%x. Rejecting.", 3040 emlxs_elscmd_xlate(ub_priv->cmd), sid); 3041 3042 pkt->pkt_tran_type = FC_PKT_OUTBOUND; 3043 pkt->pkt_timeout = (2 * hba->fc_ratov); 3044 3045 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) { 3046 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3; 3047 pkt->pkt_tran_flags |= FC_TRAN_CLASS2; 3048 } 3049 3050 /* Build the fc header */ 3051 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id; 3052 pkt->pkt_cmd_fhdr.r_ctl = 3053 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL; 3054 pkt->pkt_cmd_fhdr.s_id = SWAP_DATA24_LO(port->did); 3055 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 3056 pkt->pkt_cmd_fhdr.f_ctl = 3057 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ; 3058 pkt->pkt_cmd_fhdr.seq_id = 0; 3059 pkt->pkt_cmd_fhdr.df_ctl = 0; 3060 pkt->pkt_cmd_fhdr.seq_cnt = 0; 3061 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff; 3062 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id; 3063 pkt->pkt_cmd_fhdr.ro = 0; 3064 3065 /* Build the command */ 3066 els = (ELS_PKT *) pkt->pkt_cmd; 3067 els->elsCode = 0x01; 3068 els->un.lsRjt.un.b.lsRjtRsvd0 = 0; 3069 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3070 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 3071 els->un.lsRjt.un.b.vendorUnique = 0x02; 3072 3073 /* Send the pkt later in another thread */ 3074 (void) emlxs_pkt_send(pkt, 0); 3075 3076 return; 3077 3078 } /* emlxs_ub_els_reject() */ 3079 3080 extern int 3081 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3082 { 3083 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3084 emlxs_hba_t *hba = HBA; 3085 fc_unsol_buf_t *ubp; 3086 emlxs_ub_priv_t *ub_priv; 3087 uint32_t i; 3088 uint32_t time; 3089 emlxs_unsol_buf_t *pool; 3090 3091 if (count == 0) { 3092 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3093 "ub_release: Nothing to do. count=%d", count); 3094 3095 return (FC_SUCCESS); 3096 } 3097 3098 if (!(port->flag & EMLXS_PORT_BOUND)) { 3099 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3100 "ub_release failed: Port not bound. count=%d token[0]=%p", 3101 count, tokens[0]); 3102 3103 return (FC_UNBOUND); 3104 } 3105 3106 mutex_enter(&EMLXS_UB_LOCK); 3107 3108 if (!port->ub_pool) { 3109 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3110 "ub_release failed: No pools! count=%d token[0]=%p", 3111 count, tokens[0]); 3112 3113 mutex_exit(&EMLXS_UB_LOCK); 3114 return (FC_UB_BADTOKEN); 3115 } 3116 3117 for (i = 0; i < count; i++) { 3118 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3119 3120 if (!ubp) { 3121 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3122 "ub_release failed: count=%d tokens[%d]=0", count, 3123 i); 3124 3125 mutex_exit(&EMLXS_UB_LOCK); 3126 return (FC_UB_BADTOKEN); 3127 } 3128 3129 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3130 3131 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3132 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3133 "ub_release failed: Dead buffer found. ubp=%p", 3134 ubp); 3135 3136 mutex_exit(&EMLXS_UB_LOCK); 3137 return (FC_UB_BADTOKEN); 3138 } 3139 3140 if (ub_priv->flags == EMLXS_UB_FREE) { 3141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3142 "ub_release: Buffer already free! ubp=%p token=%x", 3143 ubp, ub_priv->token); 3144 3145 continue; 3146 } 3147 3148 /* Check for dropped els buffer */ 3149 /* ULP will do this sometimes without sending a reply */ 3150 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) && 3151 !(ub_priv->flags & EMLXS_UB_REPLY)) { 3152 emlxs_ub_els_reject(port, ubp); 3153 } 3154 3155 /* Mark the buffer free */ 3156 ub_priv->flags = EMLXS_UB_FREE; 3157 bzero(ubp->ub_buffer, ubp->ub_bufsize); 3158 3159 time = hba->timer_tics - ub_priv->time; 3160 ub_priv->time = 0; 3161 ub_priv->timeout = 0; 3162 3163 pool = ub_priv->pool; 3164 3165 if (ub_priv->flags & EMLXS_UB_RESV) { 3166 pool->pool_free_resv++; 3167 } else { 3168 pool->pool_free++; 3169 } 3170 3171 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3172 "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)", 3173 ubp, ub_priv->token, time, ub_priv->available, 3174 pool->pool_nentries, pool->pool_available, 3175 pool->pool_free, pool->pool_free_resv); 3176 3177 /* Check if pool can be destroyed now */ 3178 if ((pool->pool_available == 0) && 3179 (pool->pool_free + pool->pool_free_resv == 3180 pool->pool_nentries)) { 3181 emlxs_ub_destroy(port, pool); 3182 } 3183 } 3184 3185 mutex_exit(&EMLXS_UB_LOCK); 3186 3187 return (FC_SUCCESS); 3188 3189 } /* emlxs_ub_release() */ 3190 3191 3192 static int 3193 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3194 { 3195 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3196 emlxs_unsol_buf_t *pool; 3197 fc_unsol_buf_t *ubp; 3198 emlxs_ub_priv_t *ub_priv; 3199 uint32_t i; 3200 3201 if (port->tgt_mode) { 3202 return (FC_SUCCESS); 3203 } 3204 3205 if (count == 0) { 3206 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3207 "ub_free: Nothing to do. count=%d token[0]=%p", count, 3208 tokens[0]); 3209 3210 return (FC_SUCCESS); 3211 } 3212 3213 if (!(port->flag & EMLXS_PORT_BOUND)) { 3214 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3215 "ub_free: Port not bound. count=%d token[0]=%p", count, 3216 tokens[0]); 3217 3218 return (FC_SUCCESS); 3219 } 3220 3221 mutex_enter(&EMLXS_UB_LOCK); 3222 3223 if (!port->ub_pool) { 3224 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3225 "ub_free failed: No pools! count=%d token[0]=%p", count, 3226 tokens[0]); 3227 3228 mutex_exit(&EMLXS_UB_LOCK); 3229 return (FC_UB_BADTOKEN); 3230 } 3231 3232 /* Process buffer list */ 3233 for (i = 0; i < count; i++) { 3234 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3235 3236 if (!ubp) { 3237 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3238 "ub_free failed: count=%d tokens[%d]=0", count, 3239 i); 3240 3241 mutex_exit(&EMLXS_UB_LOCK); 3242 return (FC_UB_BADTOKEN); 3243 } 3244 3245 /* Mark buffer unavailable */ 3246 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3247 3248 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3249 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3250 "ub_free failed: Dead buffer found. ubp=%p", ubp); 3251 3252 mutex_exit(&EMLXS_UB_LOCK); 3253 return (FC_UB_BADTOKEN); 3254 } 3255 3256 ub_priv->available = 0; 3257 3258 /* Mark one less buffer available in the parent pool */ 3259 pool = ub_priv->pool; 3260 3261 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3262 "ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 3263 ub_priv->token, pool->pool_nentries, 3264 pool->pool_available - 1, pool->pool_free, 3265 pool->pool_free_resv); 3266 3267 if (pool->pool_available) { 3268 pool->pool_available--; 3269 3270 /* Check if pool can be destroyed */ 3271 if ((pool->pool_available == 0) && 3272 (pool->pool_free + pool->pool_free_resv == 3273 pool->pool_nentries)) { 3274 emlxs_ub_destroy(port, pool); 3275 } 3276 } 3277 } 3278 3279 mutex_exit(&EMLXS_UB_LOCK); 3280 3281 return (FC_SUCCESS); 3282 3283 } /* emlxs_ub_free() */ 3284 3285 3286 /* EMLXS_UB_LOCK must be held when calling this routine */ 3287 extern void 3288 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool) 3289 { 3290 emlxs_unsol_buf_t *next; 3291 emlxs_unsol_buf_t *prev; 3292 fc_unsol_buf_t *ubp; 3293 uint32_t i; 3294 3295 /* Remove the pool object from the pool list */ 3296 next = pool->pool_next; 3297 prev = pool->pool_prev; 3298 3299 if (port->ub_pool == pool) { 3300 port->ub_pool = next; 3301 } 3302 3303 if (prev) { 3304 prev->pool_next = next; 3305 } 3306 3307 if (next) { 3308 next->pool_prev = prev; 3309 } 3310 3311 pool->pool_prev = NULL; 3312 pool->pool_next = NULL; 3313 3314 /* Clear the post counts */ 3315 switch (pool->pool_type) { 3316 case FC_TYPE_IS8802_SNAP: 3317 port->ub_post[FC_IP_RING] -= pool->pool_nentries; 3318 break; 3319 3320 case FC_TYPE_EXTENDED_LS: 3321 port->ub_post[FC_ELS_RING] -= pool->pool_nentries; 3322 break; 3323 3324 case FC_TYPE_FC_SERVICES: 3325 port->ub_post[FC_CT_RING] -= pool->pool_nentries; 3326 break; 3327 } 3328 3329 /* Now free the pool memory */ 3330 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3331 "ub_destroy: pool=%p type=%d size=%d count=%d", pool, 3332 pool->pool_type, pool->pool_buf_size, pool->pool_nentries); 3333 3334 /* Process the array of buffer objects in the pool */ 3335 for (i = 0; i < pool->pool_nentries; i++) { 3336 /* Get the buffer object */ 3337 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 3338 3339 /* Free the memory the buffer object represents */ 3340 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 3341 3342 /* Free the private area of the buffer object */ 3343 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 3344 } 3345 3346 /* Free the array of buffer objects in the pool */ 3347 kmem_free((caddr_t)pool->fc_ubufs, 3348 (sizeof (fc_unsol_buf_t)*pool->pool_nentries)); 3349 3350 /* Free the pool object */ 3351 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t)); 3352 3353 return; 3354 3355 } /* emlxs_ub_destroy() */ 3356 3357 3358 /*ARGSUSED*/ 3359 extern int 3360 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 3361 { 3362 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3363 emlxs_hba_t *hba = HBA; 3364 3365 emlxs_buf_t *sbp; 3366 NODELIST *nlp; 3367 NODELIST *prev_nlp; 3368 uint8_t ringno; 3369 RING *rp; 3370 clock_t timeout; 3371 clock_t time; 3372 int32_t pkt_ret; 3373 IOCBQ *iocbq; 3374 IOCBQ *next; 3375 IOCBQ *prev; 3376 uint32_t found; 3377 uint32_t att_bit; 3378 uint32_t pass = 0; 3379 3380 sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 3381 iocbq = &sbp->iocbq; 3382 nlp = (NODELIST *)sbp->node; 3383 rp = (RING *)sbp->ring; 3384 ringno = (rp) ? rp->ringno : 0; 3385 3386 if (!(port->flag & EMLXS_PORT_BOUND)) { 3387 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3388 "Port not bound."); 3389 return (FC_UNBOUND); 3390 } 3391 3392 if (!(hba->flag & FC_ONLINE_MODE)) { 3393 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3394 "Adapter offline."); 3395 return (FC_OFFLINE); 3396 } 3397 3398 /* ULP requires the aborted pkt to be completed */ 3399 /* back to ULP before returning from this call. */ 3400 /* SUN knows of problems with this call so they suggested that we */ 3401 /* always return a FC_FAILURE for this call, until it is worked out. */ 3402 3403 /* Check if pkt is no good */ 3404 if (!(sbp->pkt_flags & PACKET_VALID) || 3405 (sbp->pkt_flags & PACKET_RETURNED)) { 3406 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3407 "Bad sbp. flags=%x", sbp->pkt_flags); 3408 return (FC_FAILURE); 3409 } 3410 3411 /* Tag this now */ 3412 /* This will prevent any thread except ours from completing it */ 3413 mutex_enter(&sbp->mtx); 3414 3415 /* Check again if we still own this */ 3416 if (!(sbp->pkt_flags & PACKET_VALID) || 3417 (sbp->pkt_flags & PACKET_RETURNED)) { 3418 mutex_exit(&sbp->mtx); 3419 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3420 "Bad sbp. flags=%x", sbp->pkt_flags); 3421 return (FC_FAILURE); 3422 } 3423 3424 /* Check if pkt is a real polled command */ 3425 if (!(sbp->pkt_flags & PACKET_IN_ABORT) && 3426 (sbp->pkt_flags & PACKET_POLLED)) { 3427 mutex_exit(&sbp->mtx); 3428 3429 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3430 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp, 3431 sbp->pkt_flags); 3432 return (FC_FAILURE); 3433 } 3434 3435 sbp->pkt_flags |= PACKET_POLLED; 3436 sbp->pkt_flags |= PACKET_IN_ABORT; 3437 3438 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH | 3439 PACKET_IN_TIMEOUT)) { 3440 mutex_exit(&sbp->mtx); 3441 3442 /* Do nothing, pkt already on its way out */ 3443 goto done; 3444 } 3445 3446 mutex_exit(&sbp->mtx); 3447 3448 begin: 3449 pass++; 3450 3451 mutex_enter(&EMLXS_RINGTX_LOCK); 3452 3453 if (sbp->pkt_flags & PACKET_IN_TXQ) { 3454 /* Find it on the queue */ 3455 found = 0; 3456 if (iocbq->flag & IOCB_PRIORITY) { 3457 /* Search the priority queue */ 3458 prev = NULL; 3459 next = (IOCBQ *) nlp->nlp_ptx[ringno].q_first; 3460 3461 while (next) { 3462 if (next == iocbq) { 3463 /* Remove it */ 3464 if (prev) { 3465 prev->next = iocbq->next; 3466 } 3467 3468 if (nlp->nlp_ptx[ringno].q_last == 3469 (void *)iocbq) { 3470 nlp->nlp_ptx[ringno].q_last = 3471 (void *)prev; 3472 } 3473 3474 if (nlp->nlp_ptx[ringno].q_first == 3475 (void *)iocbq) { 3476 nlp->nlp_ptx[ringno].q_first = 3477 (void *)iocbq->next; 3478 } 3479 3480 nlp->nlp_ptx[ringno].q_cnt--; 3481 iocbq->next = NULL; 3482 found = 1; 3483 break; 3484 } 3485 3486 prev = next; 3487 next = next->next; 3488 } 3489 } else { 3490 /* Search the normal queue */ 3491 prev = NULL; 3492 next = (IOCBQ *) nlp->nlp_tx[ringno].q_first; 3493 3494 while (next) { 3495 if (next == iocbq) { 3496 /* Remove it */ 3497 if (prev) { 3498 prev->next = iocbq->next; 3499 } 3500 3501 if (nlp->nlp_tx[ringno].q_last == 3502 (void *)iocbq) { 3503 nlp->nlp_tx[ringno].q_last = 3504 (void *)prev; 3505 } 3506 3507 if (nlp->nlp_tx[ringno].q_first == 3508 (void *)iocbq) { 3509 nlp->nlp_tx[ringno].q_first = 3510 (void *)iocbq->next; 3511 } 3512 3513 nlp->nlp_tx[ringno].q_cnt--; 3514 iocbq->next = NULL; 3515 found = 1; 3516 break; 3517 } 3518 3519 prev = next; 3520 next = (IOCBQ *) next->next; 3521 } 3522 } 3523 3524 if (!found) { 3525 mutex_exit(&EMLXS_RINGTX_LOCK); 3526 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3527 "I/O not found in driver. sbp=%p flags=%x", sbp, 3528 sbp->pkt_flags); 3529 goto done; 3530 } 3531 3532 /* Check if node still needs servicing */ 3533 if ((nlp->nlp_ptx[ringno].q_first) || 3534 (nlp->nlp_tx[ringno].q_first && 3535 !(nlp->nlp_flag[ringno] & NLP_CLOSED))) { 3536 3537 /* 3538 * If this is the base node, 3539 * then don't shift the pointers 3540 */ 3541 /* We want to drain the base node before moving on */ 3542 if (!nlp->nlp_base) { 3543 /* Just shift ring queue */ 3544 /* pointers to next node */ 3545 rp->nodeq.q_last = (void *) nlp; 3546 rp->nodeq.q_first = nlp->nlp_next[ringno]; 3547 } 3548 } else { 3549 /* Remove node from ring queue */ 3550 3551 /* If this is the only node on list */ 3552 if (rp->nodeq.q_first == (void *)nlp && 3553 rp->nodeq.q_last == (void *)nlp) { 3554 rp->nodeq.q_last = NULL; 3555 rp->nodeq.q_first = NULL; 3556 rp->nodeq.q_cnt = 0; 3557 } else if (rp->nodeq.q_first == (void *)nlp) { 3558 rp->nodeq.q_first = nlp->nlp_next[ringno]; 3559 ((NODELIST *) rp->nodeq.q_last)-> 3560 nlp_next[ringno] = rp->nodeq.q_first; 3561 rp->nodeq.q_cnt--; 3562 } else { 3563 /* 3564 * This is a little more difficult find the 3565 * previous node in the circular ring queue 3566 */ 3567 prev_nlp = nlp; 3568 while (prev_nlp->nlp_next[ringno] != nlp) { 3569 prev_nlp = prev_nlp->nlp_next[ringno]; 3570 } 3571 3572 prev_nlp->nlp_next[ringno] = 3573 nlp->nlp_next[ringno]; 3574 3575 if (rp->nodeq.q_last == (void *)nlp) { 3576 rp->nodeq.q_last = (void *)prev_nlp; 3577 } 3578 rp->nodeq.q_cnt--; 3579 3580 } 3581 3582 /* Clear node */ 3583 nlp->nlp_next[ringno] = NULL; 3584 } 3585 3586 mutex_enter(&sbp->mtx); 3587 3588 if (sbp->pkt_flags & PACKET_IN_TXQ) { 3589 sbp->pkt_flags &= ~PACKET_IN_TXQ; 3590 hba->ring_tx_count[ringno]--; 3591 } 3592 3593 mutex_exit(&sbp->mtx); 3594 3595 /* Free the ulpIoTag and the bmp */ 3596 (void) emlxs_unregister_pkt(rp, sbp->iotag, 0); 3597 3598 mutex_exit(&EMLXS_RINGTX_LOCK); 3599 3600 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 3601 IOERR_ABORT_REQUESTED, 1); 3602 3603 goto done; 3604 } 3605 3606 mutex_exit(&EMLXS_RINGTX_LOCK); 3607 3608 3609 /* Check the chip queue */ 3610 mutex_enter(&EMLXS_FCTAB_LOCK(ringno)); 3611 3612 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) && 3613 !(sbp->pkt_flags & PACKET_XRI_CLOSED) && 3614 (sbp == rp->fc_table[sbp->iotag])) { 3615 3616 /* Create the abort IOCB */ 3617 if (hba->state >= FC_LINK_UP) { 3618 iocbq = 3619 emlxs_create_abort_xri_cn(port, sbp->node, 3620 sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS); 3621 3622 mutex_enter(&sbp->mtx); 3623 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3624 sbp->ticks = 3625 hba->timer_tics + (4 * hba->fc_ratov) + 10; 3626 sbp->abort_attempts++; 3627 mutex_exit(&sbp->mtx); 3628 } else { 3629 iocbq = 3630 emlxs_create_close_xri_cn(port, sbp->node, 3631 sbp->iotag, rp); 3632 3633 mutex_enter(&sbp->mtx); 3634 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3635 sbp->ticks = hba->timer_tics + 30; 3636 sbp->abort_attempts++; 3637 mutex_exit(&sbp->mtx); 3638 } 3639 3640 mutex_exit(&EMLXS_FCTAB_LOCK(ringno)); 3641 3642 /* Send this iocbq */ 3643 if (iocbq) { 3644 emlxs_sli_issue_iocb_cmd(hba, rp, iocbq); 3645 iocbq = NULL; 3646 } 3647 3648 goto done; 3649 } 3650 3651 mutex_exit(&EMLXS_FCTAB_LOCK(ringno)); 3652 3653 /* Pkt was not on any queues */ 3654 3655 /* Check again if we still own this */ 3656 if (!(sbp->pkt_flags & PACKET_VALID) || 3657 (sbp->pkt_flags & 3658 (PACKET_RETURNED | PACKET_IN_COMPLETION | 3659 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3660 goto done; 3661 } 3662 3663 /* Apparently the pkt was not found. Let's delay and try again */ 3664 if (pass < 5) { 3665 delay(drv_usectohz(5000000)); /* 5 seconds */ 3666 3667 /* Check again if we still own this */ 3668 if (!(sbp->pkt_flags & PACKET_VALID) || 3669 (sbp->pkt_flags & 3670 (PACKET_RETURNED | PACKET_IN_COMPLETION | 3671 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3672 goto done; 3673 } 3674 3675 goto begin; 3676 } 3677 3678 force_it: 3679 3680 /* Force the completion now */ 3681 3682 /* Unregister the pkt */ 3683 (void) emlxs_unregister_pkt(rp, sbp->iotag, 1); 3684 3685 /* Now complete it */ 3686 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED, 3687 1); 3688 3689 done: 3690 3691 /* Now wait for the pkt to complete */ 3692 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3693 /* Set thread timeout */ 3694 timeout = emlxs_timeout(hba, 30); 3695 3696 /* Check for panic situation */ 3697 if (ddi_in_panic()) { 3698 3699 /* 3700 * In panic situations there will be one thread with no 3701 * interrrupts (hard or soft) and no timers 3702 */ 3703 3704 /* 3705 * We must manually poll everything in this thread 3706 * to keep the driver going. 3707 */ 3708 3709 rp = (emlxs_ring_t *)sbp->ring; 3710 switch (rp->ringno) { 3711 case FC_FCP_RING: 3712 att_bit = HA_R0ATT; 3713 break; 3714 3715 case FC_IP_RING: 3716 att_bit = HA_R1ATT; 3717 break; 3718 3719 case FC_ELS_RING: 3720 att_bit = HA_R2ATT; 3721 break; 3722 3723 case FC_CT_RING: 3724 att_bit = HA_R3ATT; 3725 break; 3726 } 3727 3728 /* Keep polling the chip until our IO is completed */ 3729 (void) drv_getparm(LBOLT, &time); 3730 while ((time < timeout) && 3731 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3732 emlxs_sli_poll_intr(hba, att_bit); 3733 (void) drv_getparm(LBOLT, &time); 3734 } 3735 } else { 3736 /* Wait for IO completion or timeout */ 3737 mutex_enter(&EMLXS_PKT_LOCK); 3738 pkt_ret = 0; 3739 while ((pkt_ret != -1) && 3740 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3741 pkt_ret = 3742 cv_timedwait(&EMLXS_PKT_CV, 3743 &EMLXS_PKT_LOCK, timeout); 3744 } 3745 mutex_exit(&EMLXS_PKT_LOCK); 3746 } 3747 3748 /* Check if timeout occured. This is not good. */ 3749 /* Something happened to our IO. */ 3750 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3751 /* Force the completion now */ 3752 goto force_it; 3753 } 3754 } 3755 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 3756 emlxs_unswap_pkt(sbp); 3757 #endif /* EMLXS_MODREV2X */ 3758 3759 /* Check again if we still own this */ 3760 if ((sbp->pkt_flags & PACKET_VALID) && 3761 !(sbp->pkt_flags & PACKET_RETURNED)) { 3762 mutex_enter(&sbp->mtx); 3763 if ((sbp->pkt_flags & PACKET_VALID) && 3764 !(sbp->pkt_flags & PACKET_RETURNED)) { 3765 sbp->pkt_flags |= PACKET_RETURNED; 3766 } 3767 mutex_exit(&sbp->mtx); 3768 } 3769 #ifdef ULP_PATCH5 3770 return (FC_FAILURE); 3771 3772 #else 3773 return (FC_SUCCESS); 3774 3775 #endif /* ULP_PATCH5 */ 3776 3777 3778 } /* emlxs_pkt_abort() */ 3779 3780 3781 extern int32_t 3782 emlxs_reset(opaque_t fca_port_handle, uint32_t cmd) 3783 { 3784 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3785 emlxs_hba_t *hba = HBA; 3786 int rval; 3787 int ret; 3788 clock_t timeout; 3789 3790 if (!(port->flag & EMLXS_PORT_BOUND)) { 3791 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3792 "fca_reset failed. Port not bound."); 3793 3794 return (FC_UNBOUND); 3795 } 3796 3797 switch (cmd) { 3798 case FC_FCA_LINK_RESET: 3799 3800 if (!(hba->flag & FC_ONLINE_MODE) || 3801 (hba->state <= FC_LINK_DOWN)) { 3802 return (FC_SUCCESS); 3803 } 3804 3805 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3806 "fca_reset: Resetting Link."); 3807 3808 mutex_enter(&EMLXS_LINKUP_LOCK); 3809 hba->linkup_wait_flag = TRUE; 3810 mutex_exit(&EMLXS_LINKUP_LOCK); 3811 3812 if (emlxs_reset_link(hba, 1)) { 3813 mutex_enter(&EMLXS_LINKUP_LOCK); 3814 hba->linkup_wait_flag = FALSE; 3815 mutex_exit(&EMLXS_LINKUP_LOCK); 3816 3817 return (FC_FAILURE); 3818 } 3819 3820 mutex_enter(&EMLXS_LINKUP_LOCK); 3821 timeout = emlxs_timeout(hba, 60); 3822 ret = 0; 3823 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) { 3824 ret = 3825 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK, 3826 timeout); 3827 } 3828 3829 hba->linkup_wait_flag = FALSE; 3830 mutex_exit(&EMLXS_LINKUP_LOCK); 3831 3832 if (ret == -1) { 3833 return (FC_FAILURE); 3834 } 3835 3836 return (FC_SUCCESS); 3837 3838 case FC_FCA_CORE: 3839 #ifdef DUMP_SUPPORT 3840 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3841 "fca_reset: Core dump."); 3842 3843 /* Schedule a USER dump */ 3844 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 3845 3846 /* Wait for dump to complete */ 3847 emlxs_dump_wait(hba); 3848 3849 return (FC_SUCCESS); 3850 #endif /* DUMP_SUPPORT */ 3851 3852 case FC_FCA_RESET: 3853 case FC_FCA_RESET_CORE: 3854 3855 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3856 "fca_reset: Resetting Adapter."); 3857 3858 rval = FC_SUCCESS; 3859 3860 if (emlxs_offline(hba) == 0) { 3861 (void) emlxs_online(hba); 3862 } else { 3863 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3864 "fca_reset: Adapter reset failed. Device busy."); 3865 3866 rval = FC_DEVICE_BUSY; 3867 } 3868 3869 return (rval); 3870 3871 default: 3872 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3873 "fca_reset: Unknown command. cmd=%x", cmd); 3874 3875 break; 3876 } 3877 3878 return (FC_FAILURE); 3879 3880 } /* emlxs_reset() */ 3881 3882 3883 extern uint32_t emlxs_core_dump(emlxs_hba_t *hba, char *buffer, 3884 uint32_t size); 3885 extern uint32_t emlxs_core_size(emlxs_hba_t *hba); 3886 3887 extern int 3888 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm) 3889 { 3890 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3891 emlxs_hba_t *hba = HBA; 3892 int32_t ret; 3893 emlxs_vpd_t *vpd = &VPD; 3894 3895 3896 ret = FC_SUCCESS; 3897 3898 if (!(port->flag & EMLXS_PORT_BOUND)) { 3899 return (FC_UNBOUND); 3900 } 3901 3902 3903 #ifdef IDLE_TIMER 3904 emlxs_pm_busy_component(hba); 3905 #endif /* IDLE_TIMER */ 3906 3907 switch (pm->pm_cmd_code) { 3908 3909 case FC_PORT_GET_FW_REV: 3910 { 3911 char buffer[128]; 3912 3913 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3914 "fca_port_manage: FC_PORT_GET_FW_REV"); 3915 3916 (void) sprintf(buffer, "%s %s", hba->model_info.model, 3917 vpd->fw_version); 3918 bzero(pm->pm_data_buf, pm->pm_data_len); 3919 3920 if (pm->pm_data_len < strlen(buffer) + 1) { 3921 ret = FC_NOMEM; 3922 3923 break; 3924 } 3925 3926 (void) strcpy(pm->pm_data_buf, buffer); 3927 break; 3928 } 3929 3930 case FC_PORT_GET_FCODE_REV: 3931 { 3932 char buffer[128]; 3933 3934 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3935 "fca_port_manage: FC_PORT_GET_FCODE_REV"); 3936 3937 /* Force update here just to be sure */ 3938 emlxs_get_fcode_version(hba); 3939 3940 (void) sprintf(buffer, "%s %s", hba->model_info.model, 3941 vpd->fcode_version); 3942 bzero(pm->pm_data_buf, pm->pm_data_len); 3943 3944 if (pm->pm_data_len < strlen(buffer) + 1) { 3945 ret = FC_NOMEM; 3946 break; 3947 } 3948 3949 (void) strcpy(pm->pm_data_buf, buffer); 3950 break; 3951 } 3952 3953 case FC_PORT_GET_DUMP_SIZE: 3954 { 3955 #ifdef DUMP_SUPPORT 3956 uint32_t dump_size = 0; 3957 3958 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3959 "fca_port_manage: FC_PORT_GET_DUMP_SIZE"); 3960 3961 if (pm->pm_data_len < sizeof (uint32_t)) { 3962 ret = FC_NOMEM; 3963 break; 3964 } 3965 3966 (void) emlxs_get_dump(hba, NULL, &dump_size); 3967 3968 *((uint32_t *)pm->pm_data_buf) = dump_size; 3969 3970 #else 3971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3972 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported."); 3973 3974 #endif /* DUMP_SUPPORT */ 3975 3976 break; 3977 } 3978 3979 case FC_PORT_GET_DUMP: 3980 { 3981 #ifdef DUMP_SUPPORT 3982 uint32_t dump_size = 0; 3983 3984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3985 "fca_port_manage: FC_PORT_GET_DUMP"); 3986 3987 (void) emlxs_get_dump(hba, NULL, &dump_size); 3988 3989 if (pm->pm_data_len < dump_size) { 3990 ret = FC_NOMEM; 3991 break; 3992 } 3993 3994 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf, 3995 (uint32_t *)&dump_size); 3996 #else 3997 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3998 "fca_port_manage: FC_PORT_GET_DUMP unsupported."); 3999 4000 #endif /* DUMP_SUPPORT */ 4001 4002 break; 4003 } 4004 4005 case FC_PORT_FORCE_DUMP: 4006 { 4007 #ifdef DUMP_SUPPORT 4008 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4009 "fca_port_manage: FC_PORT_FORCE_DUMP"); 4010 4011 /* Schedule a USER dump */ 4012 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 4013 4014 /* Wait for dump to complete */ 4015 emlxs_dump_wait(hba); 4016 #else 4017 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4018 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported."); 4019 4020 #endif /* DUMP_SUPPORT */ 4021 break; 4022 } 4023 4024 case FC_PORT_LINK_STATE: 4025 { 4026 uint32_t *link_state; 4027 4028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4029 "fca_port_manage: FC_PORT_LINK_STATE"); 4030 4031 if (pm->pm_stat_len != sizeof (*link_state)) { 4032 ret = FC_NOMEM; 4033 break; 4034 } 4035 4036 if (pm->pm_cmd_buf != NULL) { 4037 /* 4038 * Can't look beyond the FCA port. 4039 */ 4040 ret = FC_INVALID_REQUEST; 4041 break; 4042 } 4043 4044 link_state = (uint32_t *)pm->pm_stat_buf; 4045 4046 /* Set the state */ 4047 if (hba->state >= FC_LINK_UP) { 4048 /* Check for loop topology */ 4049 if (hba->topology == TOPOLOGY_LOOP) { 4050 *link_state = FC_STATE_LOOP; 4051 } else { 4052 *link_state = FC_STATE_ONLINE; 4053 } 4054 4055 /* Set the link speed */ 4056 switch (hba->linkspeed) { 4057 case LA_2GHZ_LINK: 4058 *link_state |= FC_STATE_2GBIT_SPEED; 4059 break; 4060 case LA_4GHZ_LINK: 4061 *link_state |= FC_STATE_4GBIT_SPEED; 4062 break; 4063 case LA_8GHZ_LINK: 4064 *link_state |= FC_STATE_8GBIT_SPEED; 4065 break; 4066 case LA_10GHZ_LINK: 4067 *link_state |= FC_STATE_10GBIT_SPEED; 4068 break; 4069 case LA_1GHZ_LINK: 4070 default: 4071 *link_state |= FC_STATE_1GBIT_SPEED; 4072 break; 4073 } 4074 } else { 4075 *link_state = FC_STATE_OFFLINE; 4076 } 4077 4078 break; 4079 } 4080 4081 4082 case FC_PORT_ERR_STATS: 4083 case FC_PORT_RLS: 4084 { 4085 MAILBOX *mb; 4086 fc_rls_acc_t *bp; 4087 4088 if (!(hba->flag & FC_ONLINE_MODE)) { 4089 return (FC_OFFLINE); 4090 } 4091 4092 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4093 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS"); 4094 4095 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) { 4096 ret = FC_NOMEM; 4097 break; 4098 } 4099 4100 if ((mb = (MAILBOX *)emlxs_mem_get(hba, 4101 MEM_MBOX | MEM_PRI)) == 0) { 4102 ret = FC_NOMEM; 4103 break; 4104 } 4105 4106 emlxs_mb_read_lnk_stat(hba, mb); 4107 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) 4108 != MBX_SUCCESS) { 4109 ret = FC_PBUSY; 4110 } else { 4111 bp = (fc_rls_acc_t *)pm->pm_data_buf; 4112 4113 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt; 4114 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt; 4115 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt; 4116 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt; 4117 bp->rls_invalid_word = 4118 mb->un.varRdLnk.invalidXmitWord; 4119 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt; 4120 } 4121 4122 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb); 4123 break; 4124 } 4125 4126 case FC_PORT_DOWNLOAD_FW: 4127 if (!(hba->flag & FC_ONLINE_MODE)) { 4128 return (FC_OFFLINE); 4129 } 4130 4131 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4132 "fca_port_manage: FC_PORT_DOWNLOAD_FW"); 4133 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4134 pm->pm_data_len, 1); 4135 break; 4136 4137 case FC_PORT_DOWNLOAD_FCODE: 4138 if (!(hba->flag & FC_ONLINE_MODE)) { 4139 return (FC_OFFLINE); 4140 } 4141 4142 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4143 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE"); 4144 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4145 pm->pm_data_len, 1); 4146 break; 4147 4148 case FC_PORT_DIAG: 4149 { 4150 uint32_t errno = 0; 4151 uint32_t did = 0; 4152 uint32_t pattern = 0; 4153 4154 switch (pm->pm_cmd_flags) { 4155 case EMLXS_DIAG_BIU: 4156 4157 if (!(hba->flag & FC_ONLINE_MODE)) { 4158 return (FC_OFFLINE); 4159 } 4160 4161 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4162 "fca_port_manage: EMLXS_DIAG_BIU"); 4163 4164 if (pm->pm_data_len) { 4165 pattern = *((uint32_t *)pm->pm_data_buf); 4166 } 4167 4168 errno = emlxs_diag_biu_run(hba, pattern); 4169 4170 if (pm->pm_stat_len == sizeof (errno)) { 4171 *(int *)pm->pm_stat_buf = errno; 4172 } 4173 4174 break; 4175 4176 4177 case EMLXS_DIAG_POST: 4178 4179 if (!(hba->flag & FC_ONLINE_MODE)) { 4180 return (FC_OFFLINE); 4181 } 4182 4183 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4184 "fca_port_manage: EMLXS_DIAG_POST"); 4185 4186 errno = emlxs_diag_post_run(hba); 4187 4188 if (pm->pm_stat_len == sizeof (errno)) { 4189 *(int *)pm->pm_stat_buf = errno; 4190 } 4191 4192 break; 4193 4194 4195 case EMLXS_DIAG_ECHO: 4196 4197 if (!(hba->flag & FC_ONLINE_MODE)) { 4198 return (FC_OFFLINE); 4199 } 4200 4201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4202 "fca_port_manage: EMLXS_DIAG_ECHO"); 4203 4204 if (pm->pm_cmd_len != sizeof (uint32_t)) { 4205 ret = FC_INVALID_REQUEST; 4206 break; 4207 } 4208 4209 did = *((uint32_t *)pm->pm_cmd_buf); 4210 4211 if (pm->pm_data_len) { 4212 pattern = *((uint32_t *)pm->pm_data_buf); 4213 } 4214 4215 errno = emlxs_diag_echo_run(port, did, pattern); 4216 4217 if (pm->pm_stat_len == sizeof (errno)) { 4218 *(int *)pm->pm_stat_buf = errno; 4219 } 4220 4221 break; 4222 4223 4224 case EMLXS_PARM_GET_NUM: 4225 { 4226 uint32_t *num; 4227 emlxs_config_t *cfg; 4228 uint32_t i; 4229 uint32_t count; 4230 4231 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4232 "fca_port_manage: EMLXS_PARM_GET_NUM"); 4233 4234 if (pm->pm_stat_len < sizeof (uint32_t)) { 4235 ret = FC_NOMEM; 4236 break; 4237 } 4238 4239 num = (uint32_t *)pm->pm_stat_buf; 4240 count = 0; 4241 cfg = &CFG; 4242 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4243 if (!(cfg->flags & PARM_HIDDEN)) { 4244 count++; 4245 } 4246 4247 } 4248 4249 *num = count; 4250 4251 break; 4252 } 4253 4254 case EMLXS_PARM_GET_LIST: 4255 { 4256 emlxs_parm_t *parm; 4257 emlxs_config_t *cfg; 4258 uint32_t i; 4259 uint32_t max_count; 4260 4261 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4262 "fca_port_manage: EMLXS_PARM_GET_LIST"); 4263 4264 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4265 ret = FC_NOMEM; 4266 break; 4267 } 4268 4269 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t); 4270 4271 parm = (emlxs_parm_t *)pm->pm_stat_buf; 4272 cfg = &CFG; 4273 for (i = 0; i < NUM_CFG_PARAM && max_count; i++, 4274 cfg++) { 4275 if (!(cfg->flags & PARM_HIDDEN)) { 4276 (void) strcpy(parm->label, cfg->string); 4277 parm->min = cfg->low; 4278 parm->max = cfg->hi; 4279 parm->def = cfg->def; 4280 parm->current = cfg->current; 4281 parm->flags = cfg->flags; 4282 (void) strcpy(parm->help, cfg->help); 4283 parm++; 4284 max_count--; 4285 } 4286 } 4287 4288 break; 4289 } 4290 4291 case EMLXS_PARM_GET: 4292 { 4293 emlxs_parm_t *parm_in; 4294 emlxs_parm_t *parm_out; 4295 emlxs_config_t *cfg; 4296 uint32_t i; 4297 uint32_t len; 4298 4299 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4300 EMLXS_MSGF(EMLXS_CONTEXT, 4301 &emlxs_sfs_debug_msg, 4302 "fca_port_manage: EMLXS_PARM_GET. " 4303 "inbuf too small."); 4304 4305 ret = FC_BADCMD; 4306 break; 4307 } 4308 4309 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4310 EMLXS_MSGF(EMLXS_CONTEXT, 4311 &emlxs_sfs_debug_msg, 4312 "fca_port_manage: EMLXS_PARM_GET. " 4313 "outbuf too small"); 4314 4315 ret = FC_BADCMD; 4316 break; 4317 } 4318 4319 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4320 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4321 len = strlen(parm_in->label); 4322 cfg = &CFG; 4323 ret = FC_BADOBJECT; 4324 4325 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4326 "fca_port_manage: EMLXS_PARM_GET: %s", 4327 parm_in->label); 4328 4329 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4330 if (len == strlen(cfg->string) && 4331 (strcmp(parm_in->label, 4332 cfg->string) == 0)) { 4333 (void) strcpy(parm_out->label, 4334 cfg->string); 4335 parm_out->min = cfg->low; 4336 parm_out->max = cfg->hi; 4337 parm_out->def = cfg->def; 4338 parm_out->current = cfg->current; 4339 parm_out->flags = cfg->flags; 4340 (void) strcpy(parm_out->help, 4341 cfg->help); 4342 4343 ret = FC_SUCCESS; 4344 break; 4345 } 4346 } 4347 4348 break; 4349 } 4350 4351 case EMLXS_PARM_SET: 4352 { 4353 emlxs_parm_t *parm_in; 4354 emlxs_parm_t *parm_out; 4355 emlxs_config_t *cfg; 4356 uint32_t i; 4357 uint32_t len; 4358 4359 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4360 EMLXS_MSGF(EMLXS_CONTEXT, 4361 &emlxs_sfs_debug_msg, 4362 "fca_port_manage: EMLXS_PARM_GET. " 4363 "inbuf too small."); 4364 4365 ret = FC_BADCMD; 4366 break; 4367 } 4368 4369 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4370 EMLXS_MSGF(EMLXS_CONTEXT, 4371 &emlxs_sfs_debug_msg, 4372 "fca_port_manage: EMLXS_PARM_GET. " 4373 "outbuf too small"); 4374 ret = FC_BADCMD; 4375 break; 4376 } 4377 4378 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4379 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4380 len = strlen(parm_in->label); 4381 cfg = &CFG; 4382 ret = FC_BADOBJECT; 4383 4384 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4385 "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d", 4386 parm_in->label, parm_in->current, 4387 parm_in->current); 4388 4389 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4390 /* Find matching parameter string */ 4391 if (len == strlen(cfg->string) && 4392 (strcmp(parm_in->label, 4393 cfg->string) == 0)) { 4394 /* Attempt to update parameter */ 4395 if (emlxs_set_parm(hba, i, 4396 parm_in->current) == FC_SUCCESS) { 4397 (void) strcpy(parm_out->label, 4398 cfg->string); 4399 parm_out->min = cfg->low; 4400 parm_out->max = cfg->hi; 4401 parm_out->def = cfg->def; 4402 parm_out->current = 4403 cfg->current; 4404 parm_out->flags = cfg->flags; 4405 (void) strcpy(parm_out->help, 4406 cfg->help); 4407 4408 ret = FC_SUCCESS; 4409 } 4410 4411 break; 4412 } 4413 } 4414 4415 break; 4416 } 4417 4418 case EMLXS_LOG_GET: 4419 { 4420 emlxs_log_req_t *req; 4421 emlxs_log_resp_t *resp; 4422 uint32_t len; 4423 4424 /* Check command size */ 4425 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) { 4426 ret = FC_BADCMD; 4427 break; 4428 } 4429 4430 /* Get the request */ 4431 req = (emlxs_log_req_t *)pm->pm_cmd_buf; 4432 4433 /* Calculate the response length from the request */ 4434 len = sizeof (emlxs_log_resp_t) + 4435 (req->count * MAX_LOG_MSG_LENGTH); 4436 4437 /* Check the response buffer length */ 4438 if (pm->pm_stat_len < len) { 4439 ret = FC_BADCMD; 4440 break; 4441 } 4442 4443 /* Get the response pointer */ 4444 resp = (emlxs_log_resp_t *)pm->pm_stat_buf; 4445 4446 /* Get the request log enties */ 4447 (void) emlxs_msg_log_get(hba, req, resp); 4448 4449 ret = FC_SUCCESS; 4450 break; 4451 } 4452 4453 case EMLXS_GET_BOOT_REV: 4454 { 4455 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4456 "fca_port_manage: EMLXS_GET_BOOT_REV"); 4457 4458 if (pm->pm_stat_len < strlen(vpd->boot_version)) { 4459 ret = FC_NOMEM; 4460 break; 4461 } 4462 4463 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4464 (void) sprintf(pm->pm_stat_buf, "%s %s", 4465 hba->model_info.model, vpd->boot_version); 4466 4467 break; 4468 } 4469 4470 case EMLXS_DOWNLOAD_BOOT: 4471 if (!(hba->flag & FC_ONLINE_MODE)) { 4472 return (FC_OFFLINE); 4473 } 4474 4475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4476 "fca_port_manage: EMLXS_DOWNLOAD_BOOT"); 4477 4478 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4479 pm->pm_data_len, 1); 4480 break; 4481 4482 case EMLXS_DOWNLOAD_CFL: 4483 { 4484 uint32_t *buffer; 4485 uint32_t region; 4486 uint32_t length; 4487 4488 if (!(hba->flag & FC_ONLINE_MODE)) { 4489 return (FC_OFFLINE); 4490 } 4491 4492 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4493 "fca_port_manage: EMLXS_DOWNLOAD_CFL"); 4494 4495 /* Extract the region number from the first word. */ 4496 buffer = (uint32_t *)pm->pm_data_buf; 4497 region = *buffer++; 4498 4499 /* Adjust the image length for the header word */ 4500 length = pm->pm_data_len - 4; 4501 4502 ret = 4503 emlxs_cfl_download(hba, region, (caddr_t)buffer, 4504 length); 4505 break; 4506 } 4507 4508 case EMLXS_VPD_GET: 4509 { 4510 emlxs_vpd_desc_t *vpd_out; 4511 4512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4513 "fca_port_manage: EMLXS_VPD_GET"); 4514 4515 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) { 4516 ret = FC_BADCMD; 4517 break; 4518 } 4519 4520 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf; 4521 bzero(vpd_out, sizeof (emlxs_vpd_desc_t)); 4522 4523 (void) strncpy(vpd_out->id, vpd->id, 4524 sizeof (vpd_out->id)); 4525 (void) strncpy(vpd_out->part_num, vpd->part_num, 4526 sizeof (vpd_out->part_num)); 4527 (void) strncpy(vpd_out->eng_change, vpd->eng_change, 4528 sizeof (vpd_out->eng_change)); 4529 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer, 4530 sizeof (vpd_out->manufacturer)); 4531 (void) strncpy(vpd_out->serial_num, vpd->serial_num, 4532 sizeof (vpd_out->serial_num)); 4533 (void) strncpy(vpd_out->model, vpd->model, 4534 sizeof (vpd_out->model)); 4535 (void) strncpy(vpd_out->model_desc, vpd->model_desc, 4536 sizeof (vpd_out->model_desc)); 4537 (void) strncpy(vpd_out->port_num, vpd->port_num, 4538 sizeof (vpd_out->port_num)); 4539 (void) strncpy(vpd_out->prog_types, vpd->prog_types, 4540 sizeof (vpd_out->prog_types)); 4541 4542 ret = FC_SUCCESS; 4543 4544 break; 4545 } 4546 4547 case EMLXS_GET_FCIO_REV: 4548 { 4549 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4550 "fca_port_manage: EMLXS_GET_FCIO_REV"); 4551 4552 if (pm->pm_stat_len < sizeof (uint32_t)) { 4553 ret = FC_NOMEM; 4554 break; 4555 } 4556 4557 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4558 *(uint32_t *)pm->pm_stat_buf = FCIO_REV; 4559 4560 break; 4561 } 4562 4563 case EMLXS_GET_DFC_REV: 4564 { 4565 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4566 "fca_port_manage: EMLXS_GET_DFC_REV"); 4567 4568 if (pm->pm_stat_len < sizeof (uint32_t)) { 4569 ret = FC_NOMEM; 4570 break; 4571 } 4572 4573 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4574 *(uint32_t *)pm->pm_stat_buf = DFC_REV; 4575 4576 break; 4577 } 4578 4579 case EMLXS_SET_BOOT_STATE: 4580 case EMLXS_SET_BOOT_STATE_old: 4581 { 4582 uint32_t state; 4583 4584 if (!(hba->flag & FC_ONLINE_MODE)) { 4585 return (FC_OFFLINE); 4586 } 4587 4588 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4589 EMLXS_MSGF(EMLXS_CONTEXT, 4590 &emlxs_sfs_debug_msg, 4591 "fca_port_manage: EMLXS_SET_BOOT_STATE"); 4592 ret = FC_BADCMD; 4593 break; 4594 } 4595 4596 state = *(uint32_t *)pm->pm_cmd_buf; 4597 4598 if (state == 0) { 4599 EMLXS_MSGF(EMLXS_CONTEXT, 4600 &emlxs_sfs_debug_msg, 4601 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4602 "Disable"); 4603 ret = emlxs_boot_code_disable(hba); 4604 } else { 4605 EMLXS_MSGF(EMLXS_CONTEXT, 4606 &emlxs_sfs_debug_msg, 4607 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4608 "Enable"); 4609 ret = emlxs_boot_code_enable(hba); 4610 } 4611 4612 break; 4613 } 4614 4615 case EMLXS_GET_BOOT_STATE: 4616 case EMLXS_GET_BOOT_STATE_old: 4617 { 4618 if (!(hba->flag & FC_ONLINE_MODE)) { 4619 return (FC_OFFLINE); 4620 } 4621 4622 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4623 "fca_port_manage: EMLXS_GET_BOOT_STATE"); 4624 4625 if (pm->pm_stat_len < sizeof (uint32_t)) { 4626 ret = FC_NOMEM; 4627 break; 4628 } 4629 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4630 4631 ret = emlxs_boot_code_state(hba); 4632 4633 if (ret == FC_SUCCESS) { 4634 *(uint32_t *)pm->pm_stat_buf = 1; 4635 ret = FC_SUCCESS; 4636 } else if (ret == FC_FAILURE) { 4637 ret = FC_SUCCESS; 4638 } 4639 4640 break; 4641 } 4642 4643 case EMLXS_HW_ERROR_TEST: 4644 { 4645 if (!(hba->flag & FC_ONLINE_MODE)) { 4646 return (FC_OFFLINE); 4647 } 4648 4649 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4650 "fca_port_manage: EMLXS_HW_ERROR_TEST"); 4651 4652 /* Trigger a mailbox timeout */ 4653 hba->mbox_timer = hba->timer_tics; 4654 4655 break; 4656 } 4657 4658 case EMLXS_TEST_CODE: 4659 { 4660 uint32_t *cmd; 4661 4662 if (!(hba->flag & FC_ONLINE_MODE)) { 4663 return (FC_OFFLINE); 4664 } 4665 4666 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4667 "fca_port_manage: EMLXS_TEST_CODE"); 4668 4669 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4670 EMLXS_MSGF(EMLXS_CONTEXT, 4671 &emlxs_sfs_debug_msg, 4672 "fca_port_manage: EMLXS_TEST_CODE. " 4673 "inbuf to small."); 4674 4675 ret = FC_BADCMD; 4676 break; 4677 } 4678 4679 cmd = (uint32_t *)pm->pm_cmd_buf; 4680 4681 ret = emlxs_test(hba, cmd[0], 4682 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]); 4683 4684 break; 4685 } 4686 4687 default: 4688 4689 ret = FC_INVALID_REQUEST; 4690 break; 4691 } 4692 4693 break; 4694 4695 } 4696 4697 case FC_PORT_INITIALIZE: 4698 if (!(hba->flag & FC_ONLINE_MODE)) { 4699 return (FC_OFFLINE); 4700 } 4701 4702 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4703 "fca_port_manage: FC_PORT_INITIALIZE"); 4704 break; 4705 4706 case FC_PORT_LOOPBACK: 4707 if (!(hba->flag & FC_ONLINE_MODE)) { 4708 return (FC_OFFLINE); 4709 } 4710 4711 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4712 "fca_port_manage: FC_PORT_LOOPBACK"); 4713 break; 4714 4715 case FC_PORT_BYPASS: 4716 if (!(hba->flag & FC_ONLINE_MODE)) { 4717 return (FC_OFFLINE); 4718 } 4719 4720 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4721 "fca_port_manage: FC_PORT_BYPASS"); 4722 ret = FC_INVALID_REQUEST; 4723 break; 4724 4725 case FC_PORT_UNBYPASS: 4726 if (!(hba->flag & FC_ONLINE_MODE)) { 4727 return (FC_OFFLINE); 4728 } 4729 4730 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4731 "fca_port_manage: FC_PORT_UNBYPASS"); 4732 ret = FC_INVALID_REQUEST; 4733 break; 4734 4735 case FC_PORT_GET_NODE_ID: 4736 { 4737 fc_rnid_t *rnid; 4738 4739 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4740 "fca_port_manage: FC_PORT_GET_NODE_ID"); 4741 4742 bzero(pm->pm_data_buf, pm->pm_data_len); 4743 4744 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 4745 ret = FC_NOMEM; 4746 break; 4747 } 4748 4749 rnid = (fc_rnid_t *)pm->pm_data_buf; 4750 4751 (void) sprintf((char *)rnid->global_id, 4752 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", 4753 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, 4754 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 4755 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 4756 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 4757 4758 rnid->unit_type = RNID_HBA; 4759 rnid->port_id = port->did; 4760 rnid->ip_version = RNID_IPV4; 4761 4762 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4763 "GET_NODE_ID: wwpn: %s", rnid->global_id); 4764 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4765 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 4766 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4767 "GET_NODE_ID: port_id: 0x%x", rnid->port_id); 4768 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4769 "GET_NODE_ID: num_attach: %d", rnid->num_attached); 4770 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4771 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 4772 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4773 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 4774 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4775 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr); 4776 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4777 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 4778 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4779 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 4780 4781 ret = FC_SUCCESS; 4782 break; 4783 } 4784 4785 case FC_PORT_SET_NODE_ID: 4786 { 4787 fc_rnid_t *rnid; 4788 4789 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4790 "fca_port_manage: FC_PORT_SET_NODE_ID"); 4791 4792 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 4793 ret = FC_NOMEM; 4794 break; 4795 } 4796 4797 rnid = (fc_rnid_t *)pm->pm_data_buf; 4798 4799 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4800 "SET_NODE_ID: wwpn: %s", rnid->global_id); 4801 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4802 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 4803 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4804 "SET_NODE_ID: port_id: 0x%x", rnid->port_id); 4805 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4806 "SET_NODE_ID: num_attach: %d", rnid->num_attached); 4807 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4808 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 4809 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4810 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 4811 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4812 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr); 4813 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4814 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 4815 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4816 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 4817 4818 ret = FC_SUCCESS; 4819 break; 4820 } 4821 4822 default: 4823 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4824 "fca_port_manage: code=%x", pm->pm_cmd_code); 4825 ret = FC_INVALID_REQUEST; 4826 break; 4827 4828 } 4829 4830 return (ret); 4831 4832 } /* emlxs_port_manage() */ 4833 4834 4835 /*ARGSUSED*/ 4836 static uint32_t 4837 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args, 4838 uint32_t *arg) 4839 { 4840 uint32_t rval = 0; 4841 emlxs_port_t *port = &PPORT; 4842 4843 switch (test_code) { 4844 #ifdef TEST_SUPPORT 4845 case 1: /* SCSI underrun */ 4846 { 4847 hba->underrun_counter = (args)? arg[0]:1; 4848 break; 4849 } 4850 #endif /* TEST_SUPPORT */ 4851 4852 default: 4853 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4854 "emlxs_test: Unsupported test code. (0x%x)", test_code); 4855 rval = FC_INVALID_REQUEST; 4856 } 4857 4858 return (rval); 4859 4860 } /* emlxs_test() */ 4861 4862 4863 /* 4864 * Given the device number, return the devinfo pointer or the ddiinst number. 4865 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even 4866 * before attach. 4867 * 4868 * Translate "dev_t" to a pointer to the associated "dev_info_t". 4869 */ 4870 /*ARGSUSED*/ 4871 static int 4872 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 4873 { 4874 emlxs_hba_t *hba; 4875 int32_t ddiinst; 4876 4877 ddiinst = getminor((dev_t)arg); 4878 4879 switch (infocmd) { 4880 case DDI_INFO_DEVT2DEVINFO: 4881 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 4882 if (hba) 4883 *result = hba->dip; 4884 else 4885 *result = NULL; 4886 break; 4887 4888 case DDI_INFO_DEVT2INSTANCE: 4889 *result = (void *)((unsigned long)ddiinst); 4890 break; 4891 4892 default: 4893 return (DDI_FAILURE); 4894 } 4895 4896 return (DDI_SUCCESS); 4897 4898 } /* emlxs_info() */ 4899 4900 4901 static int32_t 4902 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level) 4903 { 4904 emlxs_hba_t *hba; 4905 emlxs_port_t *port; 4906 int32_t ddiinst; 4907 int rval = DDI_SUCCESS; 4908 4909 ddiinst = ddi_get_instance(dip); 4910 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 4911 port = &PPORT; 4912 4913 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4914 "fca_power: comp=%x level=%x", comp, level); 4915 4916 if (hba == NULL || comp != EMLXS_PM_ADAPTER) { 4917 return (DDI_FAILURE); 4918 } 4919 4920 mutex_enter(&hba->pm_lock); 4921 4922 /* If we are already at the proper level then return success */ 4923 if (hba->pm_level == level) { 4924 mutex_exit(&hba->pm_lock); 4925 return (DDI_SUCCESS); 4926 } 4927 4928 switch (level) { 4929 case EMLXS_PM_ADAPTER_UP: 4930 4931 /* 4932 * If we are already in emlxs_attach, 4933 * let emlxs_hba_attach take care of things 4934 */ 4935 if (hba->pm_state & EMLXS_PM_IN_ATTACH) { 4936 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4937 break; 4938 } 4939 4940 /* Check if adapter is suspended */ 4941 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 4942 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4943 4944 /* Try to resume the port */ 4945 rval = emlxs_hba_resume(dip); 4946 4947 if (rval != DDI_SUCCESS) { 4948 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4949 } 4950 break; 4951 } 4952 4953 /* Set adapter up */ 4954 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4955 break; 4956 4957 case EMLXS_PM_ADAPTER_DOWN: 4958 4959 4960 /* 4961 * If we are already in emlxs_detach, 4962 * let emlxs_hba_detach take care of things 4963 */ 4964 if (hba->pm_state & EMLXS_PM_IN_DETACH) { 4965 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4966 break; 4967 } 4968 4969 /* Check if adapter is not suspended */ 4970 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 4971 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4972 4973 /* Try to suspend the port */ 4974 rval = emlxs_hba_suspend(dip); 4975 4976 if (rval != DDI_SUCCESS) { 4977 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4978 } 4979 4980 break; 4981 } 4982 4983 /* Set adapter down */ 4984 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4985 break; 4986 4987 default: 4988 rval = DDI_FAILURE; 4989 break; 4990 4991 } 4992 4993 mutex_exit(&hba->pm_lock); 4994 4995 return (rval); 4996 4997 } /* emlxs_power() */ 4998 4999 5000 #ifdef EMLXS_I386 5001 #ifdef S11 5002 /* 5003 * quiesce(9E) entry point. 5004 * 5005 * This function is called when the system is single-thread at hight PIL 5006 * with preemption disabled. Therefore, this function must not be blocked. 5007 * 5008 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 5009 * DDI_FAILURE indicates an eerror condition and should almost never happen. 5010 */ 5011 static int 5012 emlxs_quiesce(dev_info_t *dip) 5013 { 5014 emlxs_hba_t *hba; 5015 emlxs_port_t *port; 5016 int32_t ddiinst; 5017 int rval = DDI_SUCCESS; 5018 5019 ddiinst = ddi_get_instance(dip); 5020 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5021 port = &PPORT; 5022 5023 if (hba == NULL || port == NULL) { 5024 return (DDI_FAILURE); 5025 } 5026 5027 if (emlxs_sli_hba_reset(hba, 1, 1) == 0) { 5028 return (rval); 5029 } else { 5030 return (DDI_FAILURE); 5031 } 5032 5033 } /* emlxs_quiesce */ 5034 #endif 5035 #endif /* EMLXS_I386 */ 5036 5037 5038 static int 5039 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p) 5040 { 5041 emlxs_hba_t *hba; 5042 emlxs_port_t *port; 5043 int ddiinst; 5044 5045 ddiinst = getminor(*dev_p); 5046 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5047 5048 if (hba == NULL) { 5049 return (ENXIO); 5050 } 5051 5052 port = &PPORT; 5053 5054 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5055 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5056 "open failed: Driver suspended."); 5057 return (ENXIO); 5058 } 5059 5060 if (otype != OTYP_CHR) { 5061 return (EINVAL); 5062 } 5063 5064 if (drv_priv(cred_p)) { 5065 return (EPERM); 5066 } 5067 5068 mutex_enter(&EMLXS_IOCTL_LOCK); 5069 5070 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) { 5071 mutex_exit(&EMLXS_IOCTL_LOCK); 5072 return (EBUSY); 5073 } 5074 5075 if (flag & FEXCL) { 5076 if (hba->ioctl_flags & EMLXS_OPEN) { 5077 mutex_exit(&EMLXS_IOCTL_LOCK); 5078 return (EBUSY); 5079 } 5080 5081 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE; 5082 } 5083 5084 hba->ioctl_flags |= EMLXS_OPEN; 5085 5086 mutex_exit(&EMLXS_IOCTL_LOCK); 5087 5088 return (0); 5089 5090 } /* emlxs_open() */ 5091 5092 5093 /*ARGSUSED*/ 5094 static int 5095 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p) 5096 { 5097 emlxs_hba_t *hba; 5098 int ddiinst; 5099 5100 ddiinst = getminor(dev); 5101 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5102 5103 if (hba == NULL) { 5104 return (ENXIO); 5105 } 5106 5107 if (otype != OTYP_CHR) { 5108 return (EINVAL); 5109 } 5110 5111 mutex_enter(&EMLXS_IOCTL_LOCK); 5112 5113 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5114 mutex_exit(&EMLXS_IOCTL_LOCK); 5115 return (ENODEV); 5116 } 5117 5118 hba->ioctl_flags &= ~EMLXS_OPEN; 5119 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE; 5120 5121 mutex_exit(&EMLXS_IOCTL_LOCK); 5122 5123 return (0); 5124 5125 } /* emlxs_close() */ 5126 5127 5128 /*ARGSUSED*/ 5129 static int 5130 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode, 5131 cred_t *cred_p, int32_t *rval_p) 5132 { 5133 emlxs_hba_t *hba; 5134 emlxs_port_t *port; 5135 int rval = 0; /* return code */ 5136 int ddiinst; 5137 5138 ddiinst = getminor(dev); 5139 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5140 5141 if (hba == NULL) { 5142 return (ENXIO); 5143 } 5144 5145 port = &PPORT; 5146 5147 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5148 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5149 "ioctl failed: Driver suspended."); 5150 5151 return (ENXIO); 5152 } 5153 5154 mutex_enter(&EMLXS_IOCTL_LOCK); 5155 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5156 mutex_exit(&EMLXS_IOCTL_LOCK); 5157 return (ENXIO); 5158 } 5159 mutex_exit(&EMLXS_IOCTL_LOCK); 5160 5161 #ifdef IDLE_TIMER 5162 emlxs_pm_busy_component(hba); 5163 #endif /* IDLE_TIMER */ 5164 5165 switch (cmd) { 5166 #ifdef DFC_SUPPORT 5167 case EMLXS_DFC_COMMAND: 5168 rval = emlxs_dfc_manage(hba, (void *)arg, mode); 5169 break; 5170 #endif /* DFC_SUPPORT */ 5171 5172 default: 5173 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5174 "ioctl: Invalid command received. cmd=%x", cmd); 5175 rval = EINVAL; 5176 } 5177 5178 done: 5179 return (rval); 5180 5181 } /* emlxs_ioctl() */ 5182 5183 5184 5185 /* 5186 * 5187 * Device Driver Common Routines 5188 * 5189 */ 5190 5191 /* emlxs_pm_lock must be held for this call */ 5192 static int 5193 emlxs_hba_resume(dev_info_t *dip) 5194 { 5195 emlxs_hba_t *hba; 5196 emlxs_port_t *port; 5197 int ddiinst; 5198 5199 ddiinst = ddi_get_instance(dip); 5200 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5201 port = &PPORT; 5202 5203 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL); 5204 5205 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 5206 return (DDI_SUCCESS); 5207 } 5208 5209 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5210 5211 /* Take the adapter online */ 5212 if (emlxs_power_up(hba)) { 5213 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg, 5214 "Unable to take adapter online."); 5215 5216 hba->pm_state |= EMLXS_PM_SUSPENDED; 5217 5218 return (DDI_FAILURE); 5219 } 5220 5221 return (DDI_SUCCESS); 5222 5223 } /* emlxs_hba_resume() */ 5224 5225 5226 /* emlxs_pm_lock must be held for this call */ 5227 static int 5228 emlxs_hba_suspend(dev_info_t *dip) 5229 { 5230 emlxs_hba_t *hba; 5231 emlxs_port_t *port; 5232 int ddiinst; 5233 5234 ddiinst = ddi_get_instance(dip); 5235 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5236 port = &PPORT; 5237 5238 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL); 5239 5240 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5241 return (DDI_SUCCESS); 5242 } 5243 5244 hba->pm_state |= EMLXS_PM_SUSPENDED; 5245 5246 /* Take the adapter offline */ 5247 if (emlxs_power_down(hba)) { 5248 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5249 5250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg, 5251 "Unable to take adapter offline."); 5252 5253 return (DDI_FAILURE); 5254 } 5255 5256 return (DDI_SUCCESS); 5257 5258 } /* emlxs_hba_suspend() */ 5259 5260 5261 5262 static void 5263 emlxs_lock_init(emlxs_hba_t *hba) 5264 { 5265 emlxs_port_t *port = &PPORT; 5266 int32_t ddiinst; 5267 char buf[64]; 5268 uint32_t i; 5269 5270 ddiinst = hba->ddiinst; 5271 5272 /* Initialize the power management */ 5273 (void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst); 5274 mutex_init(&hba->pm_lock, buf, MUTEX_DRIVER, (void *)hba->intr_arg); 5275 5276 (void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst); 5277 mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER, 5278 (void *)hba->intr_arg); 5279 5280 (void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst); 5281 cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL); 5282 5283 (void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst); 5284 mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER, 5285 (void *)hba->intr_arg); 5286 5287 (void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst); 5288 mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER, 5289 (void *)hba->intr_arg); 5290 5291 (void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst); 5292 cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL); 5293 5294 (void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst); 5295 mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER, 5296 (void *)hba->intr_arg); 5297 5298 (void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst); 5299 cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL); 5300 5301 (void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst); 5302 mutex_init(&EMLXS_RINGTX_LOCK, buf, MUTEX_DRIVER, 5303 (void *)hba->intr_arg); 5304 5305 for (i = 0; i < MAX_RINGS; i++) { 5306 (void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME, 5307 ddiinst, i); 5308 mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER, 5309 (void *)hba->intr_arg); 5310 5311 (void) sprintf(buf, "%s%d_fctab%d_lock mutex", DRIVER_NAME, 5312 ddiinst, i); 5313 mutex_init(&EMLXS_FCTAB_LOCK(i), buf, MUTEX_DRIVER, 5314 (void *)hba->intr_arg); 5315 } 5316 5317 (void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst); 5318 mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER, 5319 (void *)hba->intr_arg); 5320 5321 (void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst); 5322 mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER, 5323 (void *)hba->intr_arg); 5324 5325 (void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst); 5326 mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER, 5327 (void *)hba->intr_arg); 5328 5329 #ifdef DUMP_SUPPORT 5330 (void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst); 5331 mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER, 5332 (void *)hba->intr_arg); 5333 #endif /* DUMP_SUPPORT */ 5334 5335 /* Create per port locks */ 5336 for (i = 0; i < MAX_VPORTS; i++) { 5337 port = &VPORT(i); 5338 5339 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL); 5340 5341 if (i == 0) { 5342 (void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME, 5343 ddiinst); 5344 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5345 (void *)hba->intr_arg); 5346 5347 (void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME, 5348 ddiinst); 5349 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5350 5351 (void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME, 5352 ddiinst); 5353 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5354 (void *)hba->intr_arg); 5355 } else { 5356 (void) sprintf(buf, "%s%d.%d_pkt_lock mutex", 5357 DRIVER_NAME, ddiinst, port->vpi); 5358 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5359 (void *)hba->intr_arg); 5360 5361 (void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME, 5362 ddiinst, port->vpi); 5363 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5364 5365 (void) sprintf(buf, "%s%d.%d_ub_lock mutex", 5366 DRIVER_NAME, ddiinst, port->vpi); 5367 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5368 (void *)hba->intr_arg); 5369 } 5370 } 5371 5372 return; 5373 5374 } /* emlxs_lock_init() */ 5375 5376 5377 5378 static void 5379 emlxs_lock_destroy(emlxs_hba_t *hba) 5380 { 5381 emlxs_port_t *port = &PPORT; 5382 uint32_t i; 5383 5384 mutex_destroy(&EMLXS_TIMER_LOCK); 5385 cv_destroy(&hba->timer_lock_cv); 5386 5387 mutex_destroy(&EMLXS_PORT_LOCK); 5388 5389 cv_destroy(&EMLXS_MBOX_CV); 5390 cv_destroy(&EMLXS_LINKUP_CV); 5391 5392 mutex_destroy(&EMLXS_LINKUP_LOCK); 5393 mutex_destroy(&EMLXS_MBOX_LOCK); 5394 5395 mutex_destroy(&EMLXS_RINGTX_LOCK); 5396 5397 for (i = 0; i < MAX_RINGS; i++) { 5398 mutex_destroy(&EMLXS_CMD_RING_LOCK(i)); 5399 mutex_destroy(&EMLXS_FCTAB_LOCK(i)); 5400 } 5401 5402 mutex_destroy(&EMLXS_MEMGET_LOCK); 5403 mutex_destroy(&EMLXS_MEMPUT_LOCK); 5404 mutex_destroy(&EMLXS_IOCTL_LOCK); 5405 mutex_destroy(&hba->pm_lock); 5406 5407 #ifdef DUMP_SUPPORT 5408 mutex_destroy(&EMLXS_DUMP_LOCK); 5409 #endif /* DUMP_SUPPORT */ 5410 5411 /* Destroy per port locks */ 5412 for (i = 0; i < MAX_VPORTS; i++) { 5413 port = &VPORT(i); 5414 rw_destroy(&port->node_rwlock); 5415 mutex_destroy(&EMLXS_PKT_LOCK); 5416 cv_destroy(&EMLXS_PKT_CV); 5417 mutex_destroy(&EMLXS_UB_LOCK); 5418 } 5419 5420 return; 5421 5422 } /* emlxs_lock_destroy() */ 5423 5424 5425 /* init_flag values */ 5426 #define ATTACH_SOFT_STATE 0x00000001 5427 #define ATTACH_FCA_TRAN 0x00000002 5428 #define ATTACH_HBA 0x00000004 5429 #define ATTACH_LOG 0x00000008 5430 #define ATTACH_MAP_BUS 0x00000010 5431 #define ATTACH_INTR_INIT 0x00000020 5432 #define ATTACH_PROP 0x00000040 5433 #define ATTACH_LOCK 0x00000080 5434 #define ATTACH_THREAD 0x00000100 5435 #define ATTACH_INTR_ADD 0x00000200 5436 #define ATTACH_ONLINE 0x00000400 5437 #define ATTACH_NODE 0x00000800 5438 #define ATTACH_FCT 0x00001000 5439 #define ATTACH_FCA 0x00002000 5440 #define ATTACH_KSTAT 0x00004000 5441 #define ATTACH_DHCHAP 0x00008000 5442 #define ATTACH_FM 0x00010000 5443 #define ATTACH_MAP_SLI 0x00020000 5444 #define ATTACH_SPAWN 0x00040000 5445 5446 static void 5447 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed) 5448 { 5449 emlxs_hba_t *hba = NULL; 5450 int ddiinst; 5451 5452 ddiinst = ddi_get_instance(dip); 5453 5454 if (init_flag & ATTACH_HBA) { 5455 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5456 5457 if (init_flag & ATTACH_SPAWN) { 5458 emlxs_thread_spawn_destroy(hba); 5459 } 5460 5461 if (init_flag & ATTACH_ONLINE) { 5462 (void) emlxs_offline(hba); 5463 } 5464 5465 if (init_flag & ATTACH_INTR_ADD) { 5466 (void) EMLXS_INTR_REMOVE(hba); 5467 } 5468 #ifdef SFCT_SUPPORT 5469 if (init_flag & ATTACH_FCT) { 5470 emlxs_fct_detach(hba); 5471 } 5472 #endif /* SFCT_SUPPORT */ 5473 5474 #ifdef DHCHAP_SUPPORT 5475 if (init_flag & ATTACH_DHCHAP) { 5476 emlxs_dhc_detach(hba); 5477 } 5478 #endif /* DHCHAP_SUPPORT */ 5479 5480 if (init_flag & ATTACH_KSTAT) { 5481 kstat_delete(hba->kstat); 5482 } 5483 5484 if (init_flag & ATTACH_FCA) { 5485 emlxs_fca_detach(hba); 5486 } 5487 5488 if (init_flag & ATTACH_NODE) { 5489 (void) ddi_remove_minor_node(hba->dip, "devctl"); 5490 } 5491 5492 if (init_flag & ATTACH_THREAD) { 5493 emlxs_thread_destroy(&hba->iodone_thread); 5494 } 5495 5496 if (init_flag & ATTACH_PROP) { 5497 (void) ddi_prop_remove_all(hba->dip); 5498 } 5499 5500 if (init_flag & ATTACH_LOCK) { 5501 emlxs_lock_destroy(hba); 5502 } 5503 5504 if (init_flag & ATTACH_INTR_INIT) { 5505 (void) EMLXS_INTR_UNINIT(hba); 5506 } 5507 5508 if (init_flag & ATTACH_MAP_BUS) { 5509 emlxs_unmap_bus(hba); 5510 } 5511 5512 if (init_flag & ATTACH_MAP_SLI) { 5513 emlxs_sli_unmap_hdw(hba); 5514 } 5515 5516 #ifdef FMA_SUPPORT 5517 if (init_flag & ATTACH_FM) { 5518 emlxs_fm_fini(hba); 5519 } 5520 #endif /* FMA_SUPPORT */ 5521 5522 if (init_flag & ATTACH_LOG) { 5523 (void) emlxs_msg_log_destroy(hba); 5524 } 5525 5526 if (init_flag & ATTACH_FCA_TRAN) { 5527 (void) ddi_set_driver_private(hba->dip, NULL); 5528 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t)); 5529 hba->fca_tran = NULL; 5530 } 5531 5532 if (init_flag & ATTACH_HBA) { 5533 emlxs_device.log[hba->emlxinst] = 0; 5534 emlxs_device.hba[hba->emlxinst] = 5535 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0)); 5536 5537 #ifdef DUMP_SUPPORT 5538 emlxs_device.dump_txtfile[hba->emlxinst] = 0; 5539 emlxs_device.dump_dmpfile[hba->emlxinst] = 0; 5540 emlxs_device.dump_ceefile[hba->emlxinst] = 0; 5541 #endif /* DUMP_SUPPORT */ 5542 5543 } 5544 } 5545 5546 if (init_flag & ATTACH_SOFT_STATE) { 5547 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst); 5548 } 5549 5550 return; 5551 5552 } /* emlxs_driver_remove() */ 5553 5554 5555 5556 /* This determines which ports will be initiator mode */ 5557 static void 5558 emlxs_fca_init(emlxs_hba_t *hba) 5559 { 5560 emlxs_port_t *port = &PPORT; 5561 emlxs_port_t *vport; 5562 uint32_t i; 5563 5564 if (!hba->ini_mode) { 5565 return; 5566 } 5567 5568 /* Check if SFS present */ 5569 if (((void *)MODSYM(fc_fca_init) == NULL) || 5570 ((void *)MODSYM(fc_fca_attach) == NULL)) { 5571 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5572 "SFS not present. Initiator mode disabled."); 5573 goto failed; 5574 } 5575 5576 /* Check if our SFS driver interface matches the current SFS stack */ 5577 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) { 5578 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5579 "SFS/FCA version mismatch. FCA=0x%x", 5580 hba->fca_tran->fca_version); 5581 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5582 "SFS present. Initiator mode disabled."); 5583 5584 goto failed; 5585 } 5586 5587 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5588 "SFS present. Initiator mode enabled."); 5589 5590 return; 5591 5592 failed: 5593 5594 hba->ini_mode = 0; 5595 for (i = 0; i < MAX_VPORTS; i++) { 5596 vport = &VPORT(i); 5597 vport->ini_mode = 0; 5598 } 5599 5600 return; 5601 5602 } /* emlxs_fca_init() */ 5603 5604 5605 /* This determines which ports will be initiator or target mode */ 5606 static void 5607 emlxs_set_mode(emlxs_hba_t *hba) 5608 { 5609 emlxs_port_t *port = &PPORT; 5610 emlxs_port_t *vport; 5611 uint32_t i; 5612 uint32_t tgt_mode = 0; 5613 5614 #ifdef SFCT_SUPPORT 5615 emlxs_config_t *cfg; 5616 5617 cfg = &hba->config[CFG_TARGET_MODE]; 5618 tgt_mode = cfg->current; 5619 5620 port->fct_flags = 0; 5621 #endif /* SFCT_SUPPORT */ 5622 5623 /* Initialize physical port */ 5624 if (tgt_mode) { 5625 hba->tgt_mode = 1; 5626 hba->ini_mode = 0; 5627 5628 port->tgt_mode = 1; 5629 port->ini_mode = 0; 5630 } else { 5631 hba->tgt_mode = 0; 5632 hba->ini_mode = 1; 5633 5634 port->tgt_mode = 0; 5635 port->ini_mode = 1; 5636 } 5637 5638 /* Initialize virtual ports */ 5639 /* Virtual ports take on the mode of the parent physical port */ 5640 for (i = 1; i < MAX_VPORTS; i++) { 5641 vport = &VPORT(i); 5642 5643 #ifdef SFCT_SUPPORT 5644 vport->fct_flags = 0; 5645 #endif /* SFCT_SUPPORT */ 5646 5647 vport->ini_mode = port->ini_mode; 5648 vport->tgt_mode = port->tgt_mode; 5649 } 5650 5651 /* Check if initiator mode is requested */ 5652 if (hba->ini_mode) { 5653 emlxs_fca_init(hba); 5654 } else { 5655 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5656 "Initiator mode not enabled."); 5657 } 5658 5659 #ifdef SFCT_SUPPORT 5660 /* Check if target mode is requested */ 5661 if (hba->tgt_mode) { 5662 emlxs_fct_init(hba); 5663 } else { 5664 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5665 "Target mode not enabled."); 5666 } 5667 #endif /* SFCT_SUPPORT */ 5668 5669 return; 5670 5671 } /* emlxs_set_mode() */ 5672 5673 5674 5675 static void 5676 emlxs_fca_attach(emlxs_hba_t *hba) 5677 { 5678 /* Update our transport structure */ 5679 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg; 5680 hba->fca_tran->fca_cmd_max = hba->io_throttle; 5681 5682 #if (EMLXS_MODREV >= EMLXS_MODREV5) 5683 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn, 5684 sizeof (NAME_TYPE)); 5685 #endif /* >= EMLXS_MODREV5 */ 5686 5687 return; 5688 5689 } /* emlxs_fca_attach() */ 5690 5691 5692 static void 5693 emlxs_fca_detach(emlxs_hba_t *hba) 5694 { 5695 uint32_t i; 5696 emlxs_port_t *vport; 5697 5698 if (hba->ini_mode) { 5699 if ((void *)MODSYM(fc_fca_detach) != NULL) { 5700 MODSYM(fc_fca_detach)(hba->dip); 5701 } 5702 5703 hba->ini_mode = 0; 5704 5705 for (i = 0; i < MAX_VPORTS; i++) { 5706 vport = &VPORT(i); 5707 vport->ini_mode = 0; 5708 } 5709 } 5710 5711 return; 5712 5713 } /* emlxs_fca_detach() */ 5714 5715 5716 5717 static void 5718 emlxs_drv_banner(emlxs_hba_t *hba) 5719 { 5720 emlxs_port_t *port = &PPORT; 5721 uint32_t i; 5722 char msi_mode[16]; 5723 char npiv_mode[16]; 5724 emlxs_vpd_t *vpd = &VPD; 5725 emlxs_config_t *cfg = &CFG; 5726 uint8_t *wwpn; 5727 uint8_t *wwnn; 5728 5729 /* Display firmware library one time */ 5730 if (emlxs_instance_count == 1) { 5731 emlxs_fw_show(hba); 5732 } 5733 5734 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label, 5735 emlxs_revision); 5736 5737 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5738 "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model, 5739 hba->model_info.device_id, hba->model_info.ssdid, 5740 hba->model_info.id); 5741 5742 #ifdef EMLXS_I386 5743 5744 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5745 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label, 5746 vpd->boot_version); 5747 5748 #else /* EMLXS_SPARC */ 5749 5750 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5751 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version, 5752 vpd->fw_label, vpd->boot_version, vpd->fcode_version); 5753 5754 #endif /* EMLXS_I386 */ 5755 5756 (void) strcpy(msi_mode, " INTX:1"); 5757 5758 #ifdef MSI_SUPPORT 5759 if (hba->intr_flags & EMLXS_MSI_ENABLED) { 5760 switch (hba->intr_type) { 5761 case DDI_INTR_TYPE_FIXED: 5762 (void) strcpy(msi_mode, " MSI:0"); 5763 break; 5764 5765 case DDI_INTR_TYPE_MSI: 5766 (void) sprintf(msi_mode, " MSI:%d", hba->intr_count); 5767 break; 5768 5769 case DDI_INTR_TYPE_MSIX: 5770 (void) sprintf(msi_mode, " MSIX:%d", hba->intr_count); 5771 break; 5772 } 5773 } 5774 #endif 5775 5776 (void) strcpy(npiv_mode, ""); 5777 5778 #ifdef SLI3_SUPPORT 5779 if (hba->flag & FC_NPIV_ENABLED) { 5780 (void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max); 5781 } else { 5782 (void) strcpy(npiv_mode, " NPIV:0"); 5783 } 5784 #endif /* SLI3_SUPPORT */ 5785 5786 5787 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "SLI:%d%s%s%s%s", 5788 hba->sli_mode, msi_mode, npiv_mode, 5789 ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":"")); 5790 5791 wwpn = (uint8_t *)&hba->wwpn; 5792 wwnn = (uint8_t *)&hba->wwnn; 5793 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5794 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 5795 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 5796 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6], 5797 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5], 5798 wwnn[6], wwnn[7]); 5799 5800 #ifdef SLI3_SUPPORT 5801 for (i = 0; i < MAX_VPORTS; i++) { 5802 port = &VPORT(i); 5803 5804 if (!(port->flag & EMLXS_PORT_CONFIG)) { 5805 continue; 5806 } 5807 5808 wwpn = (uint8_t *)&port->wwpn; 5809 wwnn = (uint8_t *)&port->wwnn; 5810 5811 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5812 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 5813 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 5814 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], 5815 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], 5816 wwnn[4], wwnn[5], wwnn[6], wwnn[7]); 5817 } 5818 port = &PPORT; 5819 5820 #ifdef NPIV_SUPPORT 5821 /* 5822 * No dependency for Restricted login parameter. 5823 */ 5824 if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) { 5825 port->flag |= EMLXS_PORT_RESTRICTED; 5826 } else { 5827 port->flag &= ~EMLXS_PORT_RESTRICTED; 5828 } 5829 #endif /* NPIV_SUPPORT */ 5830 5831 #endif /* SLI3_SUPPORT */ 5832 5833 /* 5834 * Announce the device: ddi_report_dev() prints a banner at boot time, 5835 * announcing the device pointed to by dip. 5836 */ 5837 (void) ddi_report_dev(hba->dip); 5838 5839 return; 5840 5841 } /* emlxs_drv_banner() */ 5842 5843 5844 extern void 5845 emlxs_get_fcode_version(emlxs_hba_t *hba) 5846 { 5847 emlxs_vpd_t *vpd = &VPD; 5848 char *prop_str; 5849 int status; 5850 5851 /* Setup fcode version property */ 5852 prop_str = NULL; 5853 status = 5854 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0, 5855 "fcode-version", (char **)&prop_str); 5856 5857 if (status == DDI_PROP_SUCCESS) { 5858 bcopy(prop_str, vpd->fcode_version, strlen(prop_str)); 5859 (void) ddi_prop_free((void *)prop_str); 5860 } else { 5861 (void) strcpy(vpd->fcode_version, "none"); 5862 } 5863 5864 return; 5865 5866 } /* emlxs_get_fcode_version() */ 5867 5868 5869 static int 5870 emlxs_hba_attach(dev_info_t *dip) 5871 { 5872 emlxs_hba_t *hba; 5873 emlxs_port_t *port; 5874 emlxs_config_t *cfg; 5875 char *prop_str; 5876 int ddiinst; 5877 int32_t emlxinst; 5878 int status; 5879 uint32_t rval; 5880 uint32_t init_flag = 0; 5881 char local_pm_components[32]; 5882 #ifdef EMLXS_I386 5883 uint32_t i; 5884 #endif /* EMLXS_I386 */ 5885 5886 ddiinst = ddi_get_instance(dip); 5887 emlxinst = emlxs_add_instance(ddiinst); 5888 5889 if (emlxinst >= MAX_FC_BRDS) { 5890 cmn_err(CE_WARN, 5891 "?%s: fca_hba_attach failed. Too many driver ddiinsts. " 5892 "inst=%x", DRIVER_NAME, ddiinst); 5893 return (DDI_FAILURE); 5894 } 5895 5896 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) { 5897 return (DDI_FAILURE); 5898 } 5899 5900 if (emlxs_device.hba[emlxinst]) { 5901 return (DDI_SUCCESS); 5902 } 5903 5904 /* An adapter can accidentally be plugged into a slave-only PCI slot */ 5905 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 5906 cmn_err(CE_WARN, 5907 "?%s%d: fca_hba_attach failed. Device in slave-only slot.", 5908 DRIVER_NAME, ddiinst); 5909 return (DDI_FAILURE); 5910 } 5911 5912 /* Allocate emlxs_dev_ctl structure. */ 5913 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) { 5914 cmn_err(CE_WARN, 5915 "?%s%d: fca_hba_attach failed. Unable to allocate soft " 5916 "state.", DRIVER_NAME, ddiinst); 5917 return (DDI_FAILURE); 5918 } 5919 init_flag |= ATTACH_SOFT_STATE; 5920 5921 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state, 5922 ddiinst)) == NULL) { 5923 cmn_err(CE_WARN, 5924 "?%s%d: fca_hba_attach failed. Unable to get soft state.", 5925 DRIVER_NAME, ddiinst); 5926 goto failed; 5927 } 5928 bzero((char *)hba, sizeof (emlxs_hba_t)); 5929 5930 emlxs_device.hba[emlxinst] = hba; 5931 emlxs_device.log[emlxinst] = &hba->log; 5932 5933 #ifdef DUMP_SUPPORT 5934 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile; 5935 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile; 5936 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile; 5937 #endif /* DUMP_SUPPORT */ 5938 5939 hba->dip = dip; 5940 hba->emlxinst = emlxinst; 5941 hba->ddiinst = ddiinst; 5942 hba->ini_mode = 0; 5943 hba->tgt_mode = 0; 5944 hba->mem_bpl_size = MEM_BPL_SIZE; 5945 5946 init_flag |= ATTACH_HBA; 5947 5948 /* Enable the physical port on this HBA */ 5949 port = &PPORT; 5950 port->hba = hba; 5951 port->vpi = 0; 5952 port->flag |= EMLXS_PORT_ENABLE; 5953 5954 /* Allocate a transport structure */ 5955 hba->fca_tran = 5956 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP); 5957 if (hba->fca_tran == NULL) { 5958 cmn_err(CE_WARN, 5959 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran " 5960 "memory.", DRIVER_NAME, ddiinst); 5961 goto failed; 5962 } 5963 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran, 5964 sizeof (fc_fca_tran_t)); 5965 5966 /* Set the transport structure pointer in our dip */ 5967 /* SFS may panic if we are in target only mode */ 5968 /* We will update the transport structure later */ 5969 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran); 5970 init_flag |= ATTACH_FCA_TRAN; 5971 5972 /* Perform driver integrity check */ 5973 rval = emlxs_integrity_check(hba); 5974 if (rval) { 5975 cmn_err(CE_WARN, 5976 "?%s%d: fca_hba_attach failed. Driver integrity check " 5977 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval); 5978 goto failed; 5979 } 5980 5981 cfg = &CFG; 5982 5983 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg)); 5984 #ifdef MSI_SUPPORT 5985 if ((void *)&ddi_intr_get_supported_types != NULL) { 5986 hba->intr_flags |= EMLXS_MSI_ENABLED; 5987 } 5988 #endif /* MSI_SUPPORT */ 5989 5990 5991 /* Create the msg log file */ 5992 if (emlxs_msg_log_create(hba) == 0) { 5993 cmn_err(CE_WARN, 5994 "?%s%d: fca_hba_attach failed. Unable to create message " 5995 "log", DRIVER_NAME, ddiinst); 5996 goto failed; 5997 5998 } 5999 init_flag |= ATTACH_LOG; 6000 6001 /* We can begin to use EMLXS_MSGF from this point on */ 6002 6003 /* 6004 * Find the I/O bus type If it is not a SBUS card, 6005 * then it is a PCI card. Default is PCI_FC (0). 6006 */ 6007 prop_str = NULL; 6008 status = ddi_prop_lookup_string(DDI_DEV_T_ANY, 6009 (dev_info_t *)dip, 0, "name", (char **)&prop_str); 6010 6011 if (status == DDI_PROP_SUCCESS) { 6012 if (strncmp(prop_str, "lpfs", 4) == 0) { 6013 hba->bus_type = SBUS_FC; 6014 } 6015 6016 (void) ddi_prop_free((void *)prop_str); 6017 } 6018 #ifdef EMLXS_I386 6019 /* Update BPL size based on max_xfer_size */ 6020 i = cfg[CFG_MAX_XFER_SIZE].current; 6021 if (i > 688128) { 6022 /* 688128 = (((2048 / 12) - 2) * 4096) */ 6023 hba->mem_bpl_size = 4096; 6024 } else if (i > 339968) { 6025 /* 339968 = (((1024 / 12) - 2) * 4096) */ 6026 hba->mem_bpl_size = 2048; 6027 } else { 6028 hba->mem_bpl_size = 1024; 6029 } 6030 6031 /* Update dma_attr_sgllen based on BPL size */ 6032 i = BPL_TO_SGLLEN(hba->mem_bpl_size); 6033 emlxs_dma_attr.dma_attr_sgllen = i; 6034 emlxs_dma_attr_ro.dma_attr_sgllen = i; 6035 emlxs_dma_attr_fcip_rsp.dma_attr_sgllen = i; 6036 #endif /* EMLXS_I386 */ 6037 6038 /* 6039 * Copy DDS from the config method and update configuration parameters 6040 */ 6041 (void) emlxs_get_props(hba); 6042 6043 #ifdef FMA_SUPPORT 6044 hba->fm_caps = cfg[CFG_FM_CAPS].current; 6045 6046 emlxs_fm_init(hba); 6047 6048 init_flag |= ATTACH_FM; 6049 #endif /* FMA_SUPPORT */ 6050 6051 if (emlxs_map_bus(hba)) { 6052 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6053 "Unable to map memory"); 6054 goto failed; 6055 6056 } 6057 init_flag |= ATTACH_MAP_BUS; 6058 6059 /* Attempt to identify the adapter */ 6060 rval = emlxs_init_adapter_info(hba); 6061 6062 if (rval == 0) { 6063 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6064 "Unable to get adapter info. Id:%d Device id:0x%x " 6065 "Model:%s", hba->model_info.id, 6066 hba->model_info.device_id, hba->model_info.model); 6067 goto failed; 6068 } 6069 6070 /* Check if adapter is not supported */ 6071 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) { 6072 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6073 "Unsupported adapter found. Id:%d Device id:0x%x " 6074 "SSDID:0x%x Model:%s", hba->model_info.id, 6075 hba->model_info.device_id, 6076 hba->model_info.ssdid, hba->model_info.model); 6077 goto failed; 6078 } 6079 6080 if (emlxs_sli_map_hdw(hba)) { 6081 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6082 "Unable to map memory"); 6083 goto failed; 6084 6085 } 6086 init_flag |= ATTACH_MAP_SLI; 6087 6088 /* Initialize the interrupts. But don't add them yet */ 6089 status = EMLXS_INTR_INIT(hba, 0); 6090 if (status != DDI_SUCCESS) { 6091 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6092 "Unable to initalize interrupt(s)."); 6093 goto failed; 6094 6095 } 6096 init_flag |= ATTACH_INTR_INIT; 6097 6098 /* Initialize LOCKs */ 6099 emlxs_lock_init(hba); 6100 init_flag |= ATTACH_LOCK; 6101 6102 /* Initialize the power management */ 6103 mutex_enter(&hba->pm_lock); 6104 hba->pm_state = EMLXS_PM_IN_ATTACH; 6105 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 6106 hba->pm_busy = 0; 6107 #ifdef IDLE_TIMER 6108 hba->pm_active = 1; 6109 hba->pm_idle_timer = 0; 6110 #endif /* IDLE_TIMER */ 6111 mutex_exit(&hba->pm_lock); 6112 6113 /* Set the pm component name */ 6114 (void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME, 6115 ddiinst); 6116 emlxs_pm_components[0] = local_pm_components; 6117 6118 /* Check if power management support is enabled */ 6119 if (cfg[CFG_PM_SUPPORT].current) { 6120 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 6121 "pm-components", emlxs_pm_components, 6122 sizeof (emlxs_pm_components) / 6123 sizeof (emlxs_pm_components[0])) != 6124 DDI_PROP_SUCCESS) { 6125 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6126 "Unable to create pm components."); 6127 goto failed; 6128 } 6129 } 6130 6131 /* Needed for suspend and resume support */ 6132 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state", 6133 "needs-suspend-resume"); 6134 init_flag |= ATTACH_PROP; 6135 6136 emlxs_thread_spawn_create(hba); 6137 init_flag |= ATTACH_SPAWN; 6138 6139 emlxs_thread_create(hba, &hba->iodone_thread); 6140 init_flag |= ATTACH_THREAD; 6141 6142 /* Setup initiator / target ports */ 6143 emlxs_set_mode(hba); 6144 6145 /* If driver did not attach to either stack, */ 6146 /* then driver attach failed */ 6147 if (!hba->tgt_mode && !hba->ini_mode) { 6148 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6149 "Driver interfaces not enabled."); 6150 goto failed; 6151 } 6152 6153 /* 6154 * Initialize HBA 6155 */ 6156 6157 /* Set initial state */ 6158 mutex_enter(&EMLXS_PORT_LOCK); 6159 emlxs_diag_state = DDI_OFFDI; 6160 hba->flag |= FC_OFFLINE_MODE; 6161 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE); 6162 mutex_exit(&EMLXS_PORT_LOCK); 6163 6164 if (status = emlxs_online(hba)) { 6165 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6166 "Unable to initialize adapter."); 6167 goto failed; 6168 } 6169 init_flag |= ATTACH_ONLINE; 6170 6171 /* This is to ensure that the model property is properly set */ 6172 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model", 6173 hba->model_info.model); 6174 6175 /* Create the device node. */ 6176 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) == 6177 DDI_FAILURE) { 6178 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6179 "Unable to create device node."); 6180 goto failed; 6181 } 6182 init_flag |= ATTACH_NODE; 6183 6184 /* Attach initiator now */ 6185 /* This must come after emlxs_online() */ 6186 emlxs_fca_attach(hba); 6187 init_flag |= ATTACH_FCA; 6188 6189 /* Initialize kstat information */ 6190 hba->kstat = kstat_create(DRIVER_NAME, 6191 ddiinst, "statistics", "controller", 6192 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t), 6193 KSTAT_FLAG_VIRTUAL); 6194 6195 if (hba->kstat == NULL) { 6196 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6197 "kstat_create failed."); 6198 } else { 6199 hba->kstat->ks_data = (void *)&hba->stats; 6200 kstat_install(hba->kstat); 6201 init_flag |= ATTACH_KSTAT; 6202 } 6203 6204 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4) 6205 /* Setup virtual port properties */ 6206 emlxs_read_vport_prop(hba); 6207 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */ 6208 6209 6210 #ifdef DHCHAP_SUPPORT 6211 emlxs_dhc_attach(hba); 6212 init_flag |= ATTACH_DHCHAP; 6213 #endif /* DHCHAP_SUPPORT */ 6214 6215 /* Display the driver banner now */ 6216 emlxs_drv_banner(hba); 6217 6218 /* Raise the power level */ 6219 6220 /* 6221 * This will not execute emlxs_hba_resume because 6222 * EMLXS_PM_IN_ATTACH is set 6223 */ 6224 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) { 6225 /* Set power up anyway. This should not happen! */ 6226 mutex_enter(&hba->pm_lock); 6227 hba->pm_level = EMLXS_PM_ADAPTER_UP; 6228 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6229 mutex_exit(&hba->pm_lock); 6230 } else { 6231 mutex_enter(&hba->pm_lock); 6232 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6233 mutex_exit(&hba->pm_lock); 6234 } 6235 6236 #ifdef SFCT_SUPPORT 6237 /* Do this last */ 6238 emlxs_fct_attach(hba); 6239 init_flag |= ATTACH_FCT; 6240 #endif /* SFCT_SUPPORT */ 6241 6242 return (DDI_SUCCESS); 6243 6244 failed: 6245 6246 emlxs_driver_remove(dip, init_flag, 1); 6247 6248 return (DDI_FAILURE); 6249 6250 } /* emlxs_hba_attach() */ 6251 6252 6253 static int 6254 emlxs_hba_detach(dev_info_t *dip) 6255 { 6256 emlxs_hba_t *hba; 6257 emlxs_port_t *port; 6258 int ddiinst; 6259 uint32_t init_flag = (uint32_t)-1; 6260 6261 ddiinst = ddi_get_instance(dip); 6262 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 6263 port = &PPORT; 6264 6265 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL); 6266 6267 mutex_enter(&hba->pm_lock); 6268 hba->pm_state |= EMLXS_PM_IN_DETACH; 6269 mutex_exit(&hba->pm_lock); 6270 6271 /* Lower the power level */ 6272 /* 6273 * This will not suspend the driver since the 6274 * EMLXS_PM_IN_DETACH has been set 6275 */ 6276 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) { 6277 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6278 "Unable to lower power."); 6279 6280 mutex_enter(&hba->pm_lock); 6281 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6282 mutex_exit(&hba->pm_lock); 6283 6284 return (DDI_FAILURE); 6285 } 6286 6287 /* Take the adapter offline first, if not already */ 6288 if (emlxs_offline(hba) != 0) { 6289 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6290 "Unable to take adapter offline."); 6291 6292 mutex_enter(&hba->pm_lock); 6293 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6294 mutex_exit(&hba->pm_lock); 6295 6296 (void) emlxs_pm_raise_power(dip); 6297 6298 return (DDI_FAILURE); 6299 } 6300 init_flag &= ~ATTACH_ONLINE; 6301 6302 /* Remove the driver instance */ 6303 emlxs_driver_remove(dip, init_flag, 0); 6304 6305 return (DDI_SUCCESS); 6306 6307 } /* emlxs_hba_detach() */ 6308 6309 6310 extern int 6311 emlxs_map_bus(emlxs_hba_t *hba) 6312 { 6313 emlxs_port_t *port = &PPORT; 6314 dev_info_t *dip; 6315 ddi_device_acc_attr_t dev_attr; 6316 int status; 6317 6318 dip = (dev_info_t *)hba->dip; 6319 dev_attr = emlxs_dev_acc_attr; 6320 6321 if (hba->bus_type == SBUS_FC) { 6322 if (hba->pci_acc_handle == 0) { 6323 status = ddi_regs_map_setup(dip, 6324 SBUS_DFLY_PCI_CFG_RINDEX, 6325 (caddr_t *)&hba->pci_addr, 6326 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6327 if (status != DDI_SUCCESS) { 6328 EMLXS_MSGF(EMLXS_CONTEXT, 6329 &emlxs_attach_failed_msg, 6330 "(SBUS) ddi_regs_map_setup PCI failed. " 6331 "status=%x", status); 6332 goto failed; 6333 } 6334 } 6335 6336 if (hba->sbus_pci_handle == 0) { 6337 status = ddi_regs_map_setup(dip, 6338 SBUS_TITAN_PCI_CFG_RINDEX, 6339 (caddr_t *)&hba->sbus_pci_addr, 6340 0, 0, &dev_attr, &hba->sbus_pci_handle); 6341 if (status != DDI_SUCCESS) { 6342 EMLXS_MSGF(EMLXS_CONTEXT, 6343 &emlxs_attach_failed_msg, 6344 "(SBUS) ddi_regs_map_setup TITAN PCI " 6345 "failed. status=%x", status); 6346 goto failed; 6347 } 6348 } 6349 6350 } else { /* ****** PCI ****** */ 6351 6352 if (hba->pci_acc_handle == 0) { 6353 status = ddi_regs_map_setup(dip, 6354 PCI_CFG_RINDEX, 6355 (caddr_t *)&hba->pci_addr, 6356 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6357 if (status != DDI_SUCCESS) { 6358 EMLXS_MSGF(EMLXS_CONTEXT, 6359 &emlxs_attach_failed_msg, 6360 "(PCI) ddi_regs_map_setup PCI failed. " 6361 "status=%x", status); 6362 goto failed; 6363 } 6364 } 6365 #ifdef EMLXS_I386 6366 /* Setting up PCI configure space */ 6367 (void) ddi_put16(hba->pci_acc_handle, 6368 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER), 6369 CMD_CFG_VALUE | CMD_IO_ENBL); 6370 6371 #ifdef FMA_SUPPORT 6372 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle) 6373 != DDI_FM_OK) { 6374 EMLXS_MSGF(EMLXS_CONTEXT, 6375 &emlxs_invalid_access_handle_msg, NULL); 6376 goto failed; 6377 } 6378 #endif /* FMA_SUPPORT */ 6379 6380 #endif /* EMLXS_I386 */ 6381 6382 } 6383 return (0); 6384 6385 failed: 6386 6387 emlxs_unmap_bus(hba); 6388 return (ENOMEM); 6389 6390 } /* emlxs_map_bus() */ 6391 6392 6393 extern void 6394 emlxs_unmap_bus(emlxs_hba_t *hba) 6395 { 6396 if (hba->pci_acc_handle) { 6397 (void) ddi_regs_map_free(&hba->pci_acc_handle); 6398 hba->pci_acc_handle = 0; 6399 } 6400 6401 if (hba->sbus_pci_handle) { 6402 (void) ddi_regs_map_free(&hba->sbus_pci_handle); 6403 hba->sbus_pci_handle = 0; 6404 } 6405 6406 return; 6407 6408 } /* emlxs_unmap_bus() */ 6409 6410 6411 static int 6412 emlxs_get_props(emlxs_hba_t *hba) 6413 { 6414 emlxs_config_t *cfg; 6415 uint32_t i; 6416 char string[256]; 6417 uint32_t new_value; 6418 6419 /* Initialize each parameter */ 6420 for (i = 0; i < NUM_CFG_PARAM; i++) { 6421 cfg = &hba->config[i]; 6422 6423 /* Ensure strings are terminated */ 6424 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0; 6425 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0; 6426 6427 /* Set the current value to the default value */ 6428 new_value = cfg->def; 6429 6430 /* First check for the global setting */ 6431 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6432 (void *)hba->dip, DDI_PROP_DONTPASS, 6433 cfg->string, new_value); 6434 6435 /* Now check for the per adapter ddiinst setting */ 6436 (void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst, 6437 cfg->string); 6438 6439 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6440 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value); 6441 6442 /* Now check the parameter */ 6443 cfg->current = emlxs_check_parm(hba, i, new_value); 6444 } 6445 6446 return (0); 6447 6448 } /* emlxs_get_props() */ 6449 6450 6451 extern uint32_t 6452 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 6453 { 6454 emlxs_port_t *port = &PPORT; 6455 uint32_t i; 6456 emlxs_config_t *cfg; 6457 emlxs_vpd_t *vpd = &VPD; 6458 6459 if (index > NUM_CFG_PARAM) { 6460 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6461 "emlxs_check_parm failed. Invalid index = %d", index); 6462 6463 return (new_value); 6464 } 6465 6466 cfg = &hba->config[index]; 6467 6468 if (new_value > cfg->hi) { 6469 new_value = cfg->def; 6470 } else if (new_value < cfg->low) { 6471 new_value = cfg->def; 6472 } 6473 6474 /* Perform additional checks */ 6475 switch (index) { 6476 #ifdef NPIV_SUPPORT 6477 case CFG_NPIV_ENABLE: 6478 if (hba->tgt_mode) { 6479 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6480 "enable-npiv: Not supported in target mode. " 6481 "Disabling."); 6482 6483 new_value = 0; 6484 } 6485 break; 6486 #endif /* NPIV_SUPPORT */ 6487 6488 #ifdef DHCHAP_SUPPORT 6489 case CFG_AUTH_ENABLE: 6490 if (hba->tgt_mode) { 6491 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6492 "enable-auth: Not supported in target mode. " 6493 "Disabling."); 6494 6495 new_value = 0; 6496 } 6497 break; 6498 #endif /* DHCHAP_SUPPORT */ 6499 6500 case CFG_NUM_NODES: 6501 switch (new_value) { 6502 case 1: 6503 case 2: 6504 /* Must have at least 3 if not 0 */ 6505 return (3); 6506 6507 default: 6508 break; 6509 } 6510 break; 6511 6512 case CFG_LINK_SPEED: 6513 if (vpd->link_speed) { 6514 switch (new_value) { 6515 case 0: 6516 break; 6517 6518 case 1: 6519 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) { 6520 new_value = 0; 6521 6522 EMLXS_MSGF(EMLXS_CONTEXT, 6523 &emlxs_init_msg, 6524 "link-speed: 1Gb not supported " 6525 "by adapter. Switching to auto " 6526 "detect."); 6527 } 6528 break; 6529 6530 case 2: 6531 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) { 6532 new_value = 0; 6533 6534 EMLXS_MSGF(EMLXS_CONTEXT, 6535 &emlxs_init_msg, 6536 "link-speed: 2Gb not supported " 6537 "by adapter. Switching to auto " 6538 "detect."); 6539 } 6540 break; 6541 case 4: 6542 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) { 6543 new_value = 0; 6544 6545 EMLXS_MSGF(EMLXS_CONTEXT, 6546 &emlxs_init_msg, 6547 "link-speed: 4Gb not supported " 6548 "by adapter. Switching to auto " 6549 "detect."); 6550 } 6551 break; 6552 6553 case 8: 6554 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) { 6555 new_value = 0; 6556 6557 EMLXS_MSGF(EMLXS_CONTEXT, 6558 &emlxs_init_msg, 6559 "link-speed: 8Gb not supported " 6560 "by adapter. Switching to auto " 6561 "detect."); 6562 } 6563 break; 6564 6565 case 10: 6566 if (!(vpd->link_speed & LMT_10GB_CAPABLE)) { 6567 new_value = 0; 6568 6569 EMLXS_MSGF(EMLXS_CONTEXT, 6570 &emlxs_init_msg, 6571 "link-speed: 10Gb not supported " 6572 "by adapter. Switching to auto " 6573 "detect."); 6574 } 6575 break; 6576 6577 default: 6578 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6579 "link-speed: Invalid value=%d provided. " 6580 "Switching to auto detect.", 6581 new_value); 6582 6583 new_value = 0; 6584 } 6585 } else { /* Perform basic validity check */ 6586 6587 /* Perform additional check on link speed */ 6588 switch (new_value) { 6589 case 0: 6590 case 1: 6591 case 2: 6592 case 4: 6593 case 8: 6594 case 10: 6595 /* link-speed is a valid choice */ 6596 break; 6597 6598 default: 6599 new_value = cfg->def; 6600 } 6601 } 6602 break; 6603 6604 case CFG_TOPOLOGY: 6605 /* Perform additional check on topology */ 6606 switch (new_value) { 6607 case 0: 6608 case 2: 6609 case 4: 6610 case 6: 6611 /* topology is a valid choice */ 6612 break; 6613 6614 default: 6615 return (cfg->def); 6616 } 6617 break; 6618 6619 #ifdef DHCHAP_SUPPORT 6620 case CFG_AUTH_TYPE: 6621 { 6622 uint32_t shift; 6623 uint32_t mask; 6624 6625 /* Perform additional check on auth type */ 6626 shift = 12; 6627 mask = 0xF000; 6628 for (i = 0; i < 4; i++) { 6629 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) { 6630 return (cfg->def); 6631 } 6632 6633 shift -= 4; 6634 mask >>= 4; 6635 } 6636 break; 6637 } 6638 6639 case CFG_AUTH_HASH: 6640 { 6641 uint32_t shift; 6642 uint32_t mask; 6643 6644 /* Perform additional check on auth hash */ 6645 shift = 12; 6646 mask = 0xF000; 6647 for (i = 0; i < 4; i++) { 6648 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) { 6649 return (cfg->def); 6650 } 6651 6652 shift -= 4; 6653 mask >>= 4; 6654 } 6655 break; 6656 } 6657 6658 case CFG_AUTH_GROUP: 6659 { 6660 uint32_t shift; 6661 uint32_t mask; 6662 6663 /* Perform additional check on auth group */ 6664 shift = 28; 6665 mask = 0xF0000000; 6666 for (i = 0; i < 8; i++) { 6667 if (((new_value & mask) >> shift) > 6668 DFC_AUTH_GROUP_MAX) { 6669 return (cfg->def); 6670 } 6671 6672 shift -= 4; 6673 mask >>= 4; 6674 } 6675 break; 6676 } 6677 6678 case CFG_AUTH_INTERVAL: 6679 if (new_value < 10) { 6680 return (10); 6681 } 6682 break; 6683 6684 6685 #endif /* DHCHAP_SUPPORT */ 6686 6687 } /* switch */ 6688 6689 return (new_value); 6690 6691 } /* emlxs_check_parm() */ 6692 6693 6694 extern uint32_t 6695 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 6696 { 6697 emlxs_port_t *port = &PPORT; 6698 emlxs_port_t *vport; 6699 uint32_t vpi; 6700 emlxs_config_t *cfg; 6701 uint32_t old_value; 6702 6703 if (index > NUM_CFG_PARAM) { 6704 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6705 "emlxs_set_parm failed. Invalid index = %d", index); 6706 6707 return ((uint32_t)FC_FAILURE); 6708 } 6709 6710 cfg = &hba->config[index]; 6711 6712 if (!(cfg->flags & PARM_DYNAMIC)) { 6713 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6714 "emlxs_set_parm failed. %s is not dynamic.", cfg->string); 6715 6716 return ((uint32_t)FC_FAILURE); 6717 } 6718 6719 /* Check new value */ 6720 old_value = new_value; 6721 new_value = emlxs_check_parm(hba, index, new_value); 6722 6723 if (old_value != new_value) { 6724 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6725 "emlxs_set_parm: %s invalid. 0x%x --> 0x%x", 6726 cfg->string, old_value, new_value); 6727 } 6728 6729 /* Return now if no actual change */ 6730 if (new_value == cfg->current) { 6731 return (FC_SUCCESS); 6732 } 6733 6734 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6735 "emlxs_set_parm: %s changing. 0x%x --> 0x%x", 6736 cfg->string, cfg->current, new_value); 6737 6738 old_value = cfg->current; 6739 cfg->current = new_value; 6740 6741 /* React to change if needed */ 6742 switch (index) { 6743 case CFG_PCI_MAX_READ: 6744 /* Update MXR */ 6745 emlxs_pcix_mxr_update(hba, 1); 6746 break; 6747 6748 #ifdef SLI3_SUPPORT 6749 case CFG_SLI_MODE: 6750 /* Check SLI mode */ 6751 if ((hba->sli_mode == 3) && (new_value == 2)) { 6752 /* All vports must be disabled first */ 6753 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 6754 vport = &VPORT(vpi); 6755 6756 if (vport->flag & EMLXS_PORT_ENABLE) { 6757 /* Reset current value */ 6758 cfg->current = old_value; 6759 6760 EMLXS_MSGF(EMLXS_CONTEXT, 6761 &emlxs_sfs_debug_msg, 6762 "emlxs_set_parm failed. %s: vpi=%d " 6763 "still enabled. Value restored to " 6764 "0x%x.", cfg->string, vpi, 6765 old_value); 6766 6767 return (2); 6768 } 6769 } 6770 } 6771 break; 6772 6773 #ifdef NPIV_SUPPORT 6774 case CFG_NPIV_ENABLE: 6775 /* Check if NPIV is being disabled */ 6776 if ((old_value == 1) && (new_value == 0)) { 6777 /* All vports must be disabled first */ 6778 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 6779 vport = &VPORT(vpi); 6780 6781 if (vport->flag & EMLXS_PORT_ENABLE) { 6782 /* Reset current value */ 6783 cfg->current = old_value; 6784 6785 EMLXS_MSGF(EMLXS_CONTEXT, 6786 &emlxs_sfs_debug_msg, 6787 "emlxs_set_parm failed. %s: vpi=%d " 6788 "still enabled. Value restored to " 6789 "0x%x.", cfg->string, vpi, 6790 old_value); 6791 6792 return (2); 6793 } 6794 } 6795 } 6796 6797 /* Trigger adapter reset */ 6798 /* (void) emlxs_reset(port, FC_FCA_RESET); */ 6799 6800 break; 6801 6802 6803 case CFG_VPORT_RESTRICTED: 6804 for (vpi = 0; vpi < MAX_VPORTS; vpi++) { 6805 vport = &VPORT(vpi); 6806 6807 if (!(vport->flag & EMLXS_PORT_CONFIG)) { 6808 continue; 6809 } 6810 6811 if (vport->options & EMLXS_OPT_RESTRICT_MASK) { 6812 continue; 6813 } 6814 6815 if (new_value) { 6816 vport->flag |= EMLXS_PORT_RESTRICTED; 6817 } else { 6818 vport->flag &= ~EMLXS_PORT_RESTRICTED; 6819 } 6820 } 6821 6822 break; 6823 #endif /* NPIV_SUPPORT */ 6824 #endif /* SLI3_SUPPORT */ 6825 6826 #ifdef DHCHAP_SUPPORT 6827 case CFG_AUTH_ENABLE: 6828 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 6829 break; 6830 6831 case CFG_AUTH_TMO: 6832 hba->auth_cfg.authentication_timeout = cfg->current; 6833 break; 6834 6835 case CFG_AUTH_MODE: 6836 hba->auth_cfg.authentication_mode = cfg->current; 6837 break; 6838 6839 case CFG_AUTH_BIDIR: 6840 hba->auth_cfg.bidirectional = cfg->current; 6841 break; 6842 6843 case CFG_AUTH_TYPE: 6844 hba->auth_cfg.authentication_type_priority[0] = 6845 (cfg->current & 0xF000) >> 12; 6846 hba->auth_cfg.authentication_type_priority[1] = 6847 (cfg->current & 0x0F00) >> 8; 6848 hba->auth_cfg.authentication_type_priority[2] = 6849 (cfg->current & 0x00F0) >> 4; 6850 hba->auth_cfg.authentication_type_priority[3] = 6851 (cfg->current & 0x000F); 6852 break; 6853 6854 case CFG_AUTH_HASH: 6855 hba->auth_cfg.hash_priority[0] = 6856 (cfg->current & 0xF000) >> 12; 6857 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8; 6858 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4; 6859 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F); 6860 break; 6861 6862 case CFG_AUTH_GROUP: 6863 hba->auth_cfg.dh_group_priority[0] = 6864 (cfg->current & 0xF0000000) >> 28; 6865 hba->auth_cfg.dh_group_priority[1] = 6866 (cfg->current & 0x0F000000) >> 24; 6867 hba->auth_cfg.dh_group_priority[2] = 6868 (cfg->current & 0x00F00000) >> 20; 6869 hba->auth_cfg.dh_group_priority[3] = 6870 (cfg->current & 0x000F0000) >> 16; 6871 hba->auth_cfg.dh_group_priority[4] = 6872 (cfg->current & 0x0000F000) >> 12; 6873 hba->auth_cfg.dh_group_priority[5] = 6874 (cfg->current & 0x00000F00) >> 8; 6875 hba->auth_cfg.dh_group_priority[6] = 6876 (cfg->current & 0x000000F0) >> 4; 6877 hba->auth_cfg.dh_group_priority[7] = 6878 (cfg->current & 0x0000000F); 6879 break; 6880 6881 case CFG_AUTH_INTERVAL: 6882 hba->auth_cfg.reauthenticate_time_interval = cfg->current; 6883 break; 6884 #endif /* DHCAHP_SUPPORT */ 6885 6886 } 6887 6888 return (FC_SUCCESS); 6889 6890 } /* emlxs_set_parm() */ 6891 6892 6893 /* 6894 * emlxs_mem_alloc OS specific routine for memory allocation / mapping 6895 * 6896 * The buf_info->flags field describes the memory operation requested. 6897 * 6898 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA 6899 * Virtual address is supplied in buf_info->virt 6900 * DMA mapping flag is in buf_info->align 6901 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE) 6902 * The mapped physical address is returned buf_info->phys 6903 * 6904 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and 6905 * if FC_MBUF_DMA is set the memory is also mapped for DMA 6906 * The byte alignment of the memory request is supplied in buf_info->align 6907 * The byte size of the memory request is supplied in buf_info->size 6908 * The virtual address is returned buf_info->virt 6909 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA) 6910 */ 6911 extern uint8_t * 6912 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info) 6913 { 6914 emlxs_port_t *port = &PPORT; 6915 ddi_dma_attr_t dma_attr; 6916 ddi_device_acc_attr_t dev_attr; 6917 uint_t cookie_count; 6918 size_t dma_reallen; 6919 ddi_dma_cookie_t dma_cookie; 6920 uint_t dma_flag; 6921 int status; 6922 6923 dma_attr = emlxs_dma_attr_1sg; 6924 dev_attr = emlxs_data_acc_attr; 6925 6926 if (buf_info->flags & FC_MBUF_SNGLSG) { 6927 buf_info->flags &= ~FC_MBUF_SNGLSG; 6928 dma_attr.dma_attr_sgllen = 1; 6929 } 6930 6931 if (buf_info->flags & FC_MBUF_DMA32) { 6932 buf_info->flags &= ~FC_MBUF_DMA32; 6933 dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff; 6934 } 6935 6936 buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL); 6937 6938 switch (buf_info->flags) { 6939 case 0: /* allocate host memory */ 6940 6941 buf_info->virt = 6942 (uint32_t *)kmem_zalloc((size_t)buf_info->size, 6943 KM_SLEEP); 6944 buf_info->phys = 0; 6945 buf_info->data_handle = 0; 6946 buf_info->dma_handle = 0; 6947 6948 if (buf_info->virt == (uint32_t *)0) { 6949 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 6950 "size=%x align=%x flags=%x", buf_info->size, 6951 buf_info->align, buf_info->flags); 6952 } 6953 break; 6954 6955 case FC_MBUF_PHYSONLY: 6956 case FC_MBUF_DMA | FC_MBUF_PHYSONLY: /* fill in physical address */ 6957 6958 if (buf_info->virt == 0) 6959 break; 6960 6961 /* 6962 * Allocate the DMA handle for this DMA object 6963 */ 6964 status = ddi_dma_alloc_handle((void *)hba->dip, 6965 &dma_attr, DDI_DMA_SLEEP, 6966 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle); 6967 if (status != DDI_SUCCESS) { 6968 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 6969 "ddi_dma_alloc_handle failed: size=%x align=%x " 6970 "flags=%x", buf_info->size, buf_info->align, 6971 buf_info->flags); 6972 6973 buf_info->phys = 0; 6974 buf_info->dma_handle = 0; 6975 break; 6976 } 6977 6978 switch (buf_info->align) { 6979 case DMA_READ_WRITE: 6980 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT); 6981 break; 6982 case DMA_READ_ONLY: 6983 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT); 6984 break; 6985 case DMA_WRITE_ONLY: 6986 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT); 6987 break; 6988 } 6989 6990 /* Map this page of memory */ 6991 status = ddi_dma_addr_bind_handle( 6992 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 6993 (caddr_t)buf_info->virt, (size_t)buf_info->size, 6994 dma_flag, DDI_DMA_SLEEP, NULL, &dma_cookie, 6995 &cookie_count); 6996 6997 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 6998 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 6999 "ddi_dma_addr_bind_handle failed: status=%x " 7000 "count=%x flags=%x", status, cookie_count, 7001 buf_info->flags); 7002 7003 (void) ddi_dma_free_handle( 7004 (ddi_dma_handle_t *)&buf_info->dma_handle); 7005 buf_info->phys = 0; 7006 buf_info->dma_handle = 0; 7007 break; 7008 } 7009 7010 if (hba->bus_type == SBUS_FC) { 7011 7012 int32_t burstsizes_limit = 0xff; 7013 int32_t ret_burst; 7014 7015 ret_burst = ddi_dma_burstsizes( 7016 buf_info->dma_handle) & burstsizes_limit; 7017 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7018 ret_burst) == DDI_FAILURE) { 7019 EMLXS_MSGF(EMLXS_CONTEXT, 7020 &emlxs_mem_alloc_failed_msg, 7021 "ddi_dma_set_sbus64 failed."); 7022 } 7023 } 7024 7025 /* Save Physical address */ 7026 buf_info->phys = dma_cookie.dmac_laddress; 7027 7028 /* 7029 * Just to be sure, let's add this 7030 */ 7031 emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle, 7032 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7033 7034 break; 7035 7036 case FC_MBUF_DMA: /* allocate and map DMA mem */ 7037 7038 dma_attr.dma_attr_align = buf_info->align; 7039 7040 /* 7041 * Allocate the DMA handle for this DMA object 7042 */ 7043 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr, 7044 DDI_DMA_SLEEP, NULL, 7045 (ddi_dma_handle_t *)&buf_info->dma_handle); 7046 if (status != DDI_SUCCESS) { 7047 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7048 "ddi_dma_alloc_handle failed: size=%x align=%x " 7049 "flags=%x", buf_info->size, buf_info->align, 7050 buf_info->flags); 7051 7052 buf_info->virt = 0; 7053 buf_info->phys = 0; 7054 buf_info->data_handle = 0; 7055 buf_info->dma_handle = 0; 7056 break; 7057 } 7058 7059 status = ddi_dma_mem_alloc( 7060 (ddi_dma_handle_t)buf_info->dma_handle, 7061 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT, 7062 DDI_DMA_SLEEP, NULL, (caddr_t *)&buf_info->virt, 7063 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle); 7064 7065 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) { 7066 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7067 "ddi_dma_mem_alloc failed: size=%x align=%x " 7068 "flags=%x", buf_info->size, buf_info->align, 7069 buf_info->flags); 7070 7071 (void) ddi_dma_free_handle( 7072 (ddi_dma_handle_t *)&buf_info->dma_handle); 7073 7074 buf_info->virt = 0; 7075 buf_info->phys = 0; 7076 buf_info->data_handle = 0; 7077 buf_info->dma_handle = 0; 7078 break; 7079 } 7080 7081 /* Map this page of memory */ 7082 status = ddi_dma_addr_bind_handle( 7083 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 7084 (caddr_t)buf_info->virt, (size_t)buf_info->size, 7085 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 7086 &dma_cookie, &cookie_count); 7087 7088 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 7089 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7090 "ddi_dma_addr_bind_handle failed: status=%x " 7091 "count=%d size=%x align=%x flags=%x", status, 7092 cookie_count, buf_info->size, buf_info->align, 7093 buf_info->flags); 7094 7095 (void) ddi_dma_mem_free( 7096 (ddi_acc_handle_t *)&buf_info->data_handle); 7097 (void) ddi_dma_free_handle( 7098 (ddi_dma_handle_t *)&buf_info->dma_handle); 7099 7100 buf_info->virt = 0; 7101 buf_info->phys = 0; 7102 buf_info->dma_handle = 0; 7103 buf_info->data_handle = 0; 7104 break; 7105 } 7106 7107 if (hba->bus_type == SBUS_FC) { 7108 int32_t burstsizes_limit = 0xff; 7109 int32_t ret_burst; 7110 7111 ret_burst = 7112 ddi_dma_burstsizes(buf_info-> 7113 dma_handle) & burstsizes_limit; 7114 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7115 ret_burst) == DDI_FAILURE) { 7116 EMLXS_MSGF(EMLXS_CONTEXT, 7117 &emlxs_mem_alloc_failed_msg, 7118 "ddi_dma_set_sbus64 failed."); 7119 } 7120 } 7121 7122 /* Save Physical address */ 7123 buf_info->phys = dma_cookie.dmac_laddress; 7124 7125 /* Just to be sure, let's add this */ 7126 emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle, 7127 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7128 7129 break; 7130 } /* End of switch */ 7131 7132 return ((uint8_t *)buf_info->virt); 7133 7134 } /* emlxs_mem_alloc() */ 7135 7136 7137 7138 /* 7139 * emlxs_mem_free: 7140 * 7141 * OS specific routine for memory de-allocation / unmapping 7142 * 7143 * The buf_info->flags field describes the memory operation requested. 7144 * 7145 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped 7146 * for DMA, but not freed. The mapped physical address to be unmapped is in 7147 * buf_info->phys 7148 * 7149 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only 7150 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in 7151 * buf_info->phys. The virtual address to be freed is in buf_info->virt 7152 */ 7153 /*ARGSUSED*/ 7154 extern void 7155 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info) 7156 { 7157 buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL); 7158 7159 switch (buf_info->flags) { 7160 case 0: /* free host memory */ 7161 7162 if (buf_info->virt) { 7163 kmem_free(buf_info->virt, (size_t)buf_info->size); 7164 buf_info->virt = NULL; 7165 } 7166 7167 break; 7168 7169 case FC_MBUF_PHYSONLY: 7170 case FC_MBUF_DMA | FC_MBUF_PHYSONLY: /* nothing to do */ 7171 7172 if (buf_info->dma_handle) { 7173 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7174 (void) ddi_dma_free_handle( 7175 (ddi_dma_handle_t *)&buf_info->dma_handle); 7176 buf_info->dma_handle = NULL; 7177 } 7178 7179 break; 7180 7181 case FC_MBUF_DMA: /* unmap free DMA-able memory */ 7182 7183 7184 if (buf_info->dma_handle) { 7185 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7186 (void) ddi_dma_mem_free( 7187 (ddi_acc_handle_t *)&buf_info->data_handle); 7188 (void) ddi_dma_free_handle( 7189 (ddi_dma_handle_t *)&buf_info->dma_handle); 7190 buf_info->dma_handle = NULL; 7191 buf_info->data_handle = NULL; 7192 } 7193 7194 break; 7195 } 7196 7197 } /* emlxs_mem_free() */ 7198 7199 7200 static int32_t 7201 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp) 7202 { 7203 emlxs_hba_t *hba = HBA; 7204 fc_packet_t *pkt; 7205 IOCBQ *iocbq; 7206 IOCB *iocb; 7207 RING *rp; 7208 NODELIST *ndlp; 7209 char *cmd; 7210 uint16_t lun; 7211 FCP_CMND *fcp_cmd; 7212 uint32_t did; 7213 7214 pkt = PRIV2PKT(sbp); 7215 fcp_cmd = (FCP_CMND *)pkt->pkt_cmd; 7216 rp = &hba->ring[FC_FCP_RING]; 7217 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7218 7219 /* Find target node object */ 7220 ndlp = emlxs_node_find_did(port, did); 7221 7222 if (!ndlp || !ndlp->nlp_active) { 7223 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7224 "Node not found. did=%x", did); 7225 7226 return (FC_BADPACKET); 7227 } 7228 7229 /* If gate is closed */ 7230 if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) { 7231 return (FC_TRAN_BUSY); 7232 } 7233 7234 #ifdef SAN_DIAG_SUPPORT 7235 sbp->sd_start_time = gethrtime(); 7236 #endif /* SAN_DIAG_SUPPORT */ 7237 7238 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7239 emlxs_swap_fcp_pkt(sbp); 7240 #endif /* EMLXS_MODREV2X */ 7241 7242 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) { 7243 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE; 7244 } 7245 7246 iocbq = &sbp->iocbq; 7247 iocb = &iocbq->iocb; 7248 7249 iocbq->node = (void *)ndlp; 7250 if (emlxs_sli_prep_fcp_iocb(port, sbp) != FC_SUCCESS) { 7251 return (FC_TRAN_BUSY); 7252 } 7253 7254 /* Snoop for target or lun resets */ 7255 cmd = (char *)pkt->pkt_cmd; 7256 lun = *((uint16_t *)cmd); 7257 lun = SWAP_DATA16(lun); 7258 7259 /* Check for target reset */ 7260 if (cmd[10] & 0x20) { 7261 mutex_enter(&sbp->mtx); 7262 sbp->pkt_flags |= PACKET_FCP_TGT_RESET; 7263 sbp->pkt_flags |= PACKET_POLLED; 7264 mutex_exit(&sbp->mtx); 7265 7266 #ifdef SAN_DIAG_SUPPORT 7267 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET, 7268 (HBA_WWN *)&ndlp->nlp_portname, -1); 7269 #endif 7270 7271 iocbq->flag |= IOCB_PRIORITY; 7272 7273 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7274 "Target Reset: did=%x", did); 7275 7276 /* Close the node for any further normal IO */ 7277 emlxs_node_close(port, ndlp, FC_FCP_RING, pkt->pkt_timeout); 7278 7279 /* Flush the IO's on the tx queues */ 7280 (void) emlxs_tx_node_flush(port, ndlp, rp, 0, sbp); 7281 } 7282 7283 /* Check for lun reset */ 7284 else if (cmd[10] & 0x10) { 7285 mutex_enter(&sbp->mtx); 7286 sbp->pkt_flags |= PACKET_FCP_LUN_RESET; 7287 sbp->pkt_flags |= PACKET_POLLED; 7288 mutex_exit(&sbp->mtx); 7289 7290 #ifdef SAN_DIAG_SUPPORT 7291 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET, 7292 (HBA_WWN *)&ndlp->nlp_portname, lun); 7293 #endif 7294 7295 iocbq->flag |= IOCB_PRIORITY; 7296 7297 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7298 "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]); 7299 7300 /* Flush the IO's on the tx queues for this lun */ 7301 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp); 7302 } 7303 7304 /* Initalize sbp */ 7305 mutex_enter(&sbp->mtx); 7306 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7307 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7308 sbp->node = (void *)ndlp; 7309 sbp->lun = lun; 7310 sbp->class = iocb->ulpClass; 7311 sbp->did = ndlp->nlp_DID; 7312 mutex_exit(&sbp->mtx); 7313 7314 if (pkt->pkt_cmdlen) { 7315 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7316 DDI_DMA_SYNC_FORDEV); 7317 } 7318 7319 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) { 7320 emlxs_mpdata_sync(pkt->pkt_data_dma, 0, pkt->pkt_datalen, 7321 DDI_DMA_SYNC_FORDEV); 7322 } 7323 7324 HBASTATS.FcpIssued++; 7325 7326 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_FCP_RING], iocbq); 7327 7328 return (FC_SUCCESS); 7329 7330 } /* emlxs_send_fcp_cmd() */ 7331 7332 7333 #ifdef SFCT_SUPPORT 7334 static int32_t 7335 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp) 7336 { 7337 emlxs_hba_t *hba = HBA; 7338 fc_packet_t *pkt; 7339 IOCBQ *iocbq; 7340 IOCB *iocb; 7341 NODELIST *ndlp; 7342 uint16_t iotag; 7343 uint32_t did; 7344 ddi_dma_cookie_t *cp_cmd; 7345 7346 pkt = PRIV2PKT(sbp); 7347 7348 did = sbp->did; 7349 ndlp = sbp->node; 7350 7351 iocbq = &sbp->iocbq; 7352 iocb = &iocbq->iocb; 7353 7354 /* Make sure node is still active */ 7355 if (!ndlp->nlp_active) { 7356 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7357 "*Node not found. did=%x", did); 7358 7359 return (FC_BADPACKET); 7360 } 7361 7362 /* If gate is closed */ 7363 if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) { 7364 return (FC_TRAN_BUSY); 7365 } 7366 7367 /* Get the iotag by registering the packet */ 7368 iotag = emlxs_register_pkt(sbp->ring, sbp); 7369 7370 if (!iotag) { 7371 /* No more command slots available, retry later */ 7372 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7373 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 7374 7375 return (FC_TRAN_BUSY); 7376 } 7377 7378 /* Point of no return */ 7379 7380 #if (EMLXS_MODREV >= EMLXS_MODREV3) 7381 cp_cmd = pkt->pkt_cmd_cookie; 7382 #else 7383 cp_cmd = &pkt->pkt_cmd_cookie; 7384 #endif /* >= EMLXS_MODREV3 */ 7385 7386 iocb->un.fcpt64.bdl.addrHigh = putPaddrHigh(cp_cmd->dmac_laddress); 7387 iocb->un.fcpt64.bdl.addrLow = putPaddrLow(cp_cmd->dmac_laddress); 7388 iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen; 7389 iocb->un.fcpt64.bdl.bdeFlags = 0; 7390 7391 if (hba->sli_mode < 3) { 7392 iocb->ulpBdeCount = 1; 7393 iocb->ulpLe = 1; 7394 } else { /* SLI3 */ 7395 7396 iocb->ulpBdeCount = 0; 7397 iocb->ulpLe = 0; 7398 iocb->unsli3.ext_iocb.ebde_count = 0; 7399 } 7400 7401 /* Initalize iocbq */ 7402 iocbq->port = (void *)port; 7403 iocbq->node = (void *)ndlp; 7404 iocbq->ring = (void *)sbp->ring; 7405 7406 /* Initalize iocb */ 7407 iocb->ulpContext = (uint16_t)pkt->pkt_cmd_fhdr.rx_id; 7408 iocb->ulpIoTag = iotag; 7409 iocb->ulpRsvdByte = 7410 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 7411 iocb->ulpOwner = OWN_CHIP; 7412 iocb->ulpClass = sbp->class; 7413 iocb->ulpCommand = CMD_FCP_TRSP64_CX; 7414 7415 /* Set the pkt timer */ 7416 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7417 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7418 7419 if (pkt->pkt_cmdlen) { 7420 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7421 DDI_DMA_SYNC_FORDEV); 7422 } 7423 7424 HBASTATS.FcpIssued++; 7425 7426 emlxs_sli_issue_iocb_cmd(hba, sbp->ring, iocbq); 7427 7428 return (FC_SUCCESS); 7429 7430 } /* emlxs_send_fct_status() */ 7431 7432 7433 static int32_t 7434 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp) 7435 { 7436 emlxs_hba_t *hba = HBA; 7437 fc_packet_t *pkt; 7438 IOCBQ *iocbq; 7439 IOCB *iocb; 7440 NODELIST *ndlp; 7441 uint16_t iotag; 7442 uint32_t did; 7443 7444 pkt = PRIV2PKT(sbp); 7445 7446 did = sbp->did; 7447 ndlp = sbp->node; 7448 7449 7450 iocbq = &sbp->iocbq; 7451 iocb = &iocbq->iocb; 7452 7453 /* Make sure node is still active */ 7454 if ((ndlp == NULL) || (!ndlp->nlp_active)) { 7455 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7456 "*Node not found. did=%x", did); 7457 7458 return (FC_BADPACKET); 7459 } 7460 7461 /* If gate is closed */ 7462 if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) { 7463 return (FC_TRAN_BUSY); 7464 } 7465 7466 /* Get the iotag by registering the packet */ 7467 iotag = emlxs_register_pkt(sbp->ring, sbp); 7468 7469 if (!iotag) { 7470 /* No more command slots available, retry later */ 7471 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7472 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 7473 7474 return (FC_TRAN_BUSY); 7475 } 7476 7477 /* Point of no return */ 7478 iocbq->port = (void *)port; 7479 iocbq->node = (void *)ndlp; 7480 iocbq->ring = (void *)sbp->ring; 7481 /* 7482 * Don't give the abort priority, we want the IOCB 7483 * we are aborting to be processed first. 7484 */ 7485 iocbq->flag |= IOCB_SPECIAL; 7486 7487 iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id; 7488 iocb->ulpIoTag = iotag; 7489 iocb->ulpLe = 1; 7490 iocb->ulpClass = sbp->class; 7491 iocb->ulpOwner = OWN_CHIP; 7492 7493 if (hba->state >= FC_LINK_UP) { 7494 /* Create the abort IOCB */ 7495 iocb->un.acxri.abortType = ABORT_TYPE_ABTS; 7496 iocb->ulpCommand = CMD_ABORT_XRI_CX; 7497 7498 } else { 7499 /* Create the close IOCB */ 7500 iocb->ulpCommand = CMD_CLOSE_XRI_CX; 7501 7502 } 7503 7504 iocb->ulpRsvdByte = 7505 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 7506 /* Set the pkt timer */ 7507 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7508 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7509 7510 emlxs_sli_issue_iocb_cmd(hba, sbp->ring, iocbq); 7511 7512 return (FC_SUCCESS); 7513 7514 } /* emlxs_send_fct_abort() */ 7515 7516 #endif /* SFCT_SUPPORT */ 7517 7518 7519 static int32_t 7520 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp) 7521 { 7522 emlxs_hba_t *hba = HBA; 7523 fc_packet_t *pkt; 7524 IOCBQ *iocbq; 7525 IOCB *iocb; 7526 RING *rp; 7527 uint32_t i; 7528 NODELIST *ndlp; 7529 uint32_t did; 7530 7531 pkt = PRIV2PKT(sbp); 7532 rp = &hba->ring[FC_IP_RING]; 7533 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7534 7535 /* Check if node exists */ 7536 /* Broadcast did is always a success */ 7537 ndlp = emlxs_node_find_did(port, did); 7538 7539 if (!ndlp || !ndlp->nlp_active) { 7540 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7541 "Node not found. did=0x%x", did); 7542 7543 return (FC_BADPACKET); 7544 } 7545 7546 /* Check if gate is temporarily closed */ 7547 if (ndlp->nlp_flag[FC_IP_RING] & NLP_CLOSED) { 7548 return (FC_TRAN_BUSY); 7549 } 7550 7551 /* Check if an exchange has been created */ 7552 if ((ndlp->nlp_Xri == 0) && (did != Bcast_DID)) { 7553 /* No exchange. Try creating one */ 7554 (void) emlxs_create_xri(port, rp, ndlp); 7555 7556 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7557 "Adapter Busy. Exchange not found. did=0x%x", did); 7558 7559 return (FC_TRAN_BUSY); 7560 } 7561 7562 /* ULP PATCH: pkt_cmdlen was found to be set to zero */ 7563 /* on BROADCAST commands */ 7564 if (pkt->pkt_cmdlen == 0) { 7565 /* Set the pkt_cmdlen to the cookie size */ 7566 #if (EMLXS_MODREV >= EMLXS_MODREV3) 7567 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) { 7568 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size; 7569 } 7570 #else 7571 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size; 7572 #endif /* >= EMLXS_MODREV3 */ 7573 7574 } 7575 7576 iocbq = &sbp->iocbq; 7577 iocb = &iocbq->iocb; 7578 7579 iocbq->node = (void *)ndlp; 7580 if (emlxs_sli_prep_ip_iocb(port, sbp) != FC_SUCCESS) { 7581 return (FC_TRAN_BUSY); 7582 } 7583 7584 /* Initalize sbp */ 7585 mutex_enter(&sbp->mtx); 7586 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7587 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7588 sbp->node = (void *)ndlp; 7589 sbp->lun = 0; 7590 sbp->class = iocb->ulpClass; 7591 sbp->did = did; 7592 mutex_exit(&sbp->mtx); 7593 7594 if (pkt->pkt_cmdlen) { 7595 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7596 DDI_DMA_SYNC_FORDEV); 7597 } 7598 7599 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_IP_RING], iocbq); 7600 7601 return (FC_SUCCESS); 7602 7603 } /* emlxs_send_ip() */ 7604 7605 7606 static int32_t 7607 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp) 7608 { 7609 emlxs_hba_t *hba = HBA; 7610 emlxs_port_t *vport; 7611 fc_packet_t *pkt; 7612 IOCBQ *iocbq; 7613 IOCB *iocb; 7614 uint32_t cmd; 7615 int i; 7616 ELS_PKT *els_pkt; 7617 NODELIST *ndlp; 7618 uint32_t did; 7619 char fcsp_msg[32]; 7620 7621 fcsp_msg[0] = 0; 7622 pkt = PRIV2PKT(sbp); 7623 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 7624 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7625 7626 iocbq = &sbp->iocbq; 7627 iocb = &iocbq->iocb; 7628 7629 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7630 emlxs_swap_els_pkt(sbp); 7631 #endif /* EMLXS_MODREV2X */ 7632 7633 cmd = *((uint32_t *)pkt->pkt_cmd); 7634 cmd &= ELS_CMD_MASK; 7635 7636 /* Point of no return, except for ADISC & PLOGI */ 7637 7638 /* Check node */ 7639 switch (cmd) { 7640 case ELS_CMD_FLOGI: 7641 if (port->vpi > 0) { 7642 cmd = ELS_CMD_FDISC; 7643 *((uint32_t *)pkt->pkt_cmd) = cmd; 7644 } 7645 ndlp = NULL; 7646 7647 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 7648 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 7649 } 7650 7651 /* We will process these cmds at the bottom of this routine */ 7652 break; 7653 7654 case ELS_CMD_PLOGI: 7655 /* Make sure we don't log into ourself */ 7656 for (i = 0; i < MAX_VPORTS; i++) { 7657 vport = &VPORT(i); 7658 7659 if (!(vport->flag & EMLXS_PORT_BOUND)) { 7660 continue; 7661 } 7662 7663 if (did == vport->did) { 7664 pkt->pkt_state = FC_PKT_NPORT_RJT; 7665 7666 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7667 emlxs_unswap_pkt(sbp); 7668 #endif /* EMLXS_MODREV2X */ 7669 7670 return (FC_FAILURE); 7671 } 7672 } 7673 7674 ndlp = NULL; 7675 7676 /* Check if this is the first PLOGI */ 7677 /* after a PT_TO_PT connection */ 7678 if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) { 7679 MAILBOXQ *mbox; 7680 7681 /* ULP bug fix */ 7682 if (pkt->pkt_cmd_fhdr.s_id == 0) { 7683 pkt->pkt_cmd_fhdr.s_id = 7684 pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID + 7685 FP_DEFAULT_SID; 7686 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, 7687 "PLOGI: P2P Fix. sid=0-->%x did=%x", 7688 pkt->pkt_cmd_fhdr.s_id, 7689 pkt->pkt_cmd_fhdr.d_id); 7690 } 7691 7692 mutex_enter(&EMLXS_PORT_LOCK); 7693 port->did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.s_id); 7694 mutex_exit(&EMLXS_PORT_LOCK); 7695 7696 /* Update our service parms */ 7697 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 7698 MEM_MBOX | MEM_PRI))) { 7699 emlxs_mb_config_link(hba, (MAILBOX *) mbox); 7700 7701 if (emlxs_sli_issue_mbox_cmd(hba, 7702 (MAILBOX *)mbox, MBX_NOWAIT, 0) 7703 != MBX_BUSY) { 7704 (void) emlxs_mem_put(hba, MEM_MBOX, 7705 (uint8_t *)mbox); 7706 } 7707 7708 } 7709 } 7710 7711 /* We will process these cmds at the bottom of this routine */ 7712 break; 7713 7714 default: 7715 ndlp = emlxs_node_find_did(port, did); 7716 7717 /* If an ADISC is being sent and we have no node, */ 7718 /* then we must fail the ADISC now */ 7719 if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) { 7720 7721 /* Build the LS_RJT response */ 7722 els_pkt = (ELS_PKT *)pkt->pkt_resp; 7723 els_pkt->elsCode = 0x01; 7724 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0; 7725 els_pkt->un.lsRjt.un.b.lsRjtRsnCode = 7726 LSRJT_LOGICAL_ERR; 7727 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp = 7728 LSEXP_NOTHING_MORE; 7729 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03; 7730 7731 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7732 "ADISC Rejected. Node not found. did=0x%x", did); 7733 7734 /* Return this as rejected by the target */ 7735 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1); 7736 7737 return (FC_SUCCESS); 7738 } 7739 } 7740 7741 /* DID == Bcast_DID is special case to indicate that */ 7742 /* RPI is being passed in seq_id field */ 7743 /* This is used by emlxs_send_logo() for target mode */ 7744 7745 /* Initalize iocbq */ 7746 iocbq->node = (void *)ndlp; 7747 if (emlxs_sli_prep_els_iocb(port, sbp) != FC_SUCCESS) { 7748 return (FC_TRAN_BUSY); 7749 } 7750 7751 /* Check cmd */ 7752 switch (cmd) { 7753 case ELS_CMD_PRLI: 7754 { 7755 /* 7756 * if our firmware version is 3.20 or later, 7757 * set the following bits for FC-TAPE support. 7758 */ 7759 7760 if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) { 7761 els_pkt->un.prli.ConfmComplAllowed = 1; 7762 els_pkt->un.prli.Retry = 1; 7763 els_pkt->un.prli.TaskRetryIdReq = 1; 7764 } else { 7765 els_pkt->un.prli.ConfmComplAllowed = 0; 7766 els_pkt->un.prli.Retry = 0; 7767 els_pkt->un.prli.TaskRetryIdReq = 0; 7768 } 7769 7770 break; 7771 } 7772 7773 /* This is a patch for the ULP stack. */ 7774 7775 /* 7776 * ULP only reads our service paramters once during bind_port, 7777 * but the service parameters change due to topology. 7778 */ 7779 case ELS_CMD_FLOGI: 7780 case ELS_CMD_FDISC: 7781 case ELS_CMD_PLOGI: 7782 case ELS_CMD_PDISC: 7783 { 7784 /* Copy latest service parameters to payload */ 7785 bcopy((void *) &port->sparam, 7786 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 7787 7788 #ifdef NPIV_SUPPORT 7789 if ((hba->flag & FC_NPIV_ENABLED) && 7790 (hba->flag & FC_NPIV_SUPPORTED) && 7791 (cmd == ELS_CMD_PLOGI)) { 7792 SERV_PARM *sp; 7793 emlxs_vvl_fmt_t *vvl; 7794 7795 sp = (SERV_PARM *)&els_pkt->un.logi; 7796 sp->valid_vendor_version = 1; 7797 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0]; 7798 vvl->un0.w0.oui = 0x0000C9; 7799 vvl->un0.word0 = SWAP_DATA32(vvl->un0.word0); 7800 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0; 7801 vvl->un1.word1 = SWAP_DATA32(vvl->un1.word1); 7802 } 7803 #endif /* NPIV_SUPPORT */ 7804 7805 #ifdef DHCHAP_SUPPORT 7806 emlxs_dhc_init_sp(port, did, 7807 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 7808 #endif /* DHCHAP_SUPPORT */ 7809 7810 break; 7811 } 7812 7813 } 7814 7815 /* Initialize the sbp */ 7816 mutex_enter(&sbp->mtx); 7817 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7818 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7819 sbp->node = (void *)ndlp; 7820 sbp->lun = 0; 7821 sbp->class = iocb->ulpClass; 7822 sbp->did = did; 7823 mutex_exit(&sbp->mtx); 7824 7825 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s", 7826 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg); 7827 7828 if (pkt->pkt_cmdlen) { 7829 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7830 DDI_DMA_SYNC_FORDEV); 7831 } 7832 7833 /* Check node */ 7834 switch (cmd) { 7835 case ELS_CMD_FLOGI: 7836 if (port->ini_mode) { 7837 /* Make sure fabric node is destroyed */ 7838 /* It should already have been destroyed at link down */ 7839 /* Unregister the fabric did and attempt a deferred */ 7840 /* iocb send */ 7841 if (emlxs_mb_unreg_did(port, Fabric_DID, NULL, NULL, 7842 iocbq) == 0) { 7843 /* Deferring iocb tx until */ 7844 /* completion of unreg */ 7845 return (FC_SUCCESS); 7846 } 7847 } 7848 break; 7849 7850 case ELS_CMD_PLOGI: 7851 7852 ndlp = emlxs_node_find_did(port, did); 7853 7854 if (ndlp && ndlp->nlp_active) { 7855 /* Close the node for any further normal IO */ 7856 emlxs_node_close(port, ndlp, FC_FCP_RING, 7857 pkt->pkt_timeout + 10); 7858 emlxs_node_close(port, ndlp, FC_IP_RING, 7859 pkt->pkt_timeout + 10); 7860 7861 /* Flush tx queues */ 7862 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 7863 7864 /* Flush chip queues */ 7865 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 7866 } 7867 7868 break; 7869 7870 case ELS_CMD_PRLI: 7871 7872 ndlp = emlxs_node_find_did(port, did); 7873 7874 if (ndlp && ndlp->nlp_active) { 7875 /* Close the node for any further FCP IO */ 7876 emlxs_node_close(port, ndlp, FC_FCP_RING, 7877 pkt->pkt_timeout + 10); 7878 7879 /* Flush tx queues */ 7880 (void) emlxs_tx_node_flush(port, ndlp, 7881 &hba->ring[FC_FCP_RING], 0, 0); 7882 7883 /* Flush chip queues */ 7884 (void) emlxs_chipq_node_flush(port, 7885 &hba->ring[FC_FCP_RING], ndlp, 0); 7886 } 7887 7888 break; 7889 7890 } 7891 7892 HBASTATS.ElsCmdIssued++; 7893 7894 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq); 7895 7896 return (FC_SUCCESS); 7897 7898 } /* emlxs_send_els() */ 7899 7900 7901 7902 7903 static int32_t 7904 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 7905 { 7906 emlxs_hba_t *hba = HBA; 7907 fc_packet_t *pkt; 7908 IOCBQ *iocbq; 7909 IOCB *iocb; 7910 NODELIST *ndlp; 7911 int i; 7912 uint32_t cmd; 7913 uint32_t ucmd; 7914 ELS_PKT *els_pkt; 7915 fc_unsol_buf_t *ubp; 7916 emlxs_ub_priv_t *ub_priv; 7917 uint32_t did; 7918 char fcsp_msg[32]; 7919 uint8_t *ub_buffer; 7920 7921 fcsp_msg[0] = 0; 7922 pkt = PRIV2PKT(sbp); 7923 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 7924 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7925 7926 iocbq = &sbp->iocbq; 7927 iocb = &iocbq->iocb; 7928 7929 /* Acquire the unsolicited command this pkt is replying to */ 7930 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) { 7931 /* This is for auto replies when no ub's are used */ 7932 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT; 7933 ubp = NULL; 7934 ub_priv = NULL; 7935 ub_buffer = NULL; 7936 7937 #ifdef SFCT_SUPPORT 7938 if (sbp->fct_cmd) { 7939 fct_els_t *els = 7940 (fct_els_t *)sbp->fct_cmd->cmd_specific; 7941 ub_buffer = (uint8_t *)els->els_req_payload; 7942 } 7943 #endif /* SFCT_SUPPORT */ 7944 7945 } else { 7946 /* Find the ub buffer that goes with this reply */ 7947 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) { 7948 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 7949 "ELS reply: Invalid oxid=%x", 7950 pkt->pkt_cmd_fhdr.ox_id); 7951 return (FC_BADPACKET); 7952 } 7953 7954 ub_buffer = (uint8_t *)ubp->ub_buffer; 7955 ub_priv = ubp->ub_fca_private; 7956 ucmd = ub_priv->cmd; 7957 7958 ub_priv->flags |= EMLXS_UB_REPLY; 7959 7960 /* Reset oxid to ELS command */ 7961 /* We do this because the ub is only valid */ 7962 /* until we return from this thread */ 7963 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff; 7964 } 7965 7966 /* Save the result */ 7967 sbp->ucmd = ucmd; 7968 7969 /* Check for interceptions */ 7970 switch (ucmd) { 7971 7972 #ifdef ULP_PATCH2 7973 case ELS_CMD_LOGO: 7974 { 7975 /* Check if this was generated by ULP and not us */ 7976 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 7977 7978 /* 7979 * Since we replied to this already, 7980 * we won't need to send this now 7981 */ 7982 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 7983 7984 return (FC_SUCCESS); 7985 } 7986 7987 break; 7988 } 7989 #endif 7990 7991 #ifdef ULP_PATCH3 7992 case ELS_CMD_PRLI: 7993 { 7994 /* Check if this was generated by ULP and not us */ 7995 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 7996 7997 /* 7998 * Since we replied to this already, 7999 * we won't need to send this now 8000 */ 8001 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8002 8003 return (FC_SUCCESS); 8004 } 8005 8006 break; 8007 } 8008 #endif 8009 8010 8011 #ifdef ULP_PATCH4 8012 case ELS_CMD_PRLO: 8013 { 8014 /* Check if this was generated by ULP and not us */ 8015 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8016 /* 8017 * Since we replied to this already, 8018 * we won't need to send this now 8019 */ 8020 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8021 8022 return (FC_SUCCESS); 8023 } 8024 8025 break; 8026 } 8027 #endif 8028 8029 #ifdef ULP_PATCH6 8030 case ELS_CMD_RSCN: 8031 { 8032 /* Check if this RSCN was generated by us */ 8033 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8034 cmd = *((uint32_t *)pkt->pkt_cmd); 8035 cmd = SWAP_DATA32(cmd); 8036 cmd &= ELS_CMD_MASK; 8037 8038 /* 8039 * If ULP is accepting this, 8040 * then close affected node 8041 */ 8042 if (port->ini_mode && ub_buffer && cmd 8043 == ELS_CMD_ACC) { 8044 fc_rscn_t *rscn; 8045 uint32_t count; 8046 uint32_t *lp; 8047 8048 /* 8049 * Only the Leadville code path will 8050 * come thru here. The RSCN data is NOT 8051 * swapped properly for the Comstar code 8052 * path. 8053 */ 8054 lp = (uint32_t *)ub_buffer; 8055 rscn = (fc_rscn_t *)lp++; 8056 count = 8057 ((rscn->rscn_payload_len - 4) / 4); 8058 8059 /* Close affected ports */ 8060 for (i = 0; i < count; i++, lp++) { 8061 (void) emlxs_port_offline(port, 8062 *lp); 8063 } 8064 } 8065 8066 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8067 "RSCN %s: did=%x oxid=%x rxid=%x. " 8068 "Intercepted.", emlxs_elscmd_xlate(cmd), 8069 did, pkt->pkt_cmd_fhdr.ox_id, 8070 pkt->pkt_cmd_fhdr.rx_id); 8071 8072 /* 8073 * Since we generated this RSCN, 8074 * we won't need to send this reply 8075 */ 8076 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8077 8078 return (FC_SUCCESS); 8079 } 8080 8081 break; 8082 } 8083 #endif 8084 8085 case ELS_CMD_PLOGI: 8086 { 8087 /* Check if this PLOGI was generated by us */ 8088 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8089 cmd = *((uint32_t *)pkt->pkt_cmd); 8090 cmd = SWAP_DATA32(cmd); 8091 cmd &= ELS_CMD_MASK; 8092 8093 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8094 "PLOGI %s: did=%x oxid=%x rxid=%x. " 8095 "Intercepted.", emlxs_elscmd_xlate(cmd), 8096 did, pkt->pkt_cmd_fhdr.ox_id, 8097 pkt->pkt_cmd_fhdr.rx_id); 8098 8099 /* 8100 * Since we generated this PLOGI, 8101 * we won't need to send this reply 8102 */ 8103 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8104 8105 return (FC_SUCCESS); 8106 } 8107 8108 break; 8109 } 8110 8111 } 8112 8113 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8114 emlxs_swap_els_pkt(sbp); 8115 #endif /* EMLXS_MODREV2X */ 8116 8117 8118 cmd = *((uint32_t *)pkt->pkt_cmd); 8119 cmd &= ELS_CMD_MASK; 8120 8121 /* Check if modifications are needed */ 8122 switch (ucmd) { 8123 case (ELS_CMD_PRLI): 8124 8125 if (cmd == ELS_CMD_ACC) { 8126 /* This is a patch for the ULP stack. */ 8127 /* ULP does not keep track of FCP2 support */ 8128 8129 if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) { 8130 els_pkt->un.prli.ConfmComplAllowed = 1; 8131 els_pkt->un.prli.Retry = 1; 8132 els_pkt->un.prli.TaskRetryIdReq = 1; 8133 } else { 8134 els_pkt->un.prli.ConfmComplAllowed = 0; 8135 els_pkt->un.prli.Retry = 0; 8136 els_pkt->un.prli.TaskRetryIdReq = 0; 8137 } 8138 } 8139 8140 break; 8141 8142 case ELS_CMD_FLOGI: 8143 case ELS_CMD_PLOGI: 8144 case ELS_CMD_FDISC: 8145 case ELS_CMD_PDISC: 8146 8147 if (cmd == ELS_CMD_ACC) { 8148 /* This is a patch for the ULP stack. */ 8149 8150 /* 8151 * ULP only reads our service parameters 8152 * once during bind_port, but the service 8153 * parameters change due to topology. 8154 */ 8155 8156 /* Copy latest service parameters to payload */ 8157 bcopy((void *)&port->sparam, 8158 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 8159 8160 #ifdef DHCHAP_SUPPORT 8161 emlxs_dhc_init_sp(port, did, 8162 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 8163 #endif /* DHCHAP_SUPPORT */ 8164 8165 } 8166 8167 break; 8168 8169 } 8170 8171 /* Initalize iocbq */ 8172 iocbq->node = (void *)NULL; 8173 if (emlxs_sli_prep_els_iocb(port, sbp) != FC_SUCCESS) { 8174 return (FC_TRAN_BUSY); 8175 } 8176 8177 /* Initalize sbp */ 8178 mutex_enter(&sbp->mtx); 8179 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8180 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8181 sbp->node = (void *) NULL; 8182 sbp->lun = 0; 8183 sbp->class = iocb->ulpClass; 8184 sbp->did = did; 8185 mutex_exit(&sbp->mtx); 8186 8187 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8188 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd), 8189 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id, 8190 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg); 8191 8192 /* Process nodes */ 8193 switch (ucmd) { 8194 case ELS_CMD_RSCN: 8195 { 8196 if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) { 8197 fc_rscn_t *rscn; 8198 uint32_t count; 8199 uint32_t *lp = NULL; 8200 8201 /* 8202 * Only the Leadville code path will come thru 8203 * here. The RSCN data is NOT swapped properly 8204 * for the Comstar code path. 8205 */ 8206 lp = (uint32_t *)ub_buffer; 8207 rscn = (fc_rscn_t *)lp++; 8208 count = ((rscn->rscn_payload_len - 4) / 4); 8209 8210 /* Close affected ports */ 8211 for (i = 0; i < count; i++, lp++) { 8212 (void) emlxs_port_offline(port, *lp); 8213 } 8214 } 8215 break; 8216 } 8217 case ELS_CMD_PLOGI: 8218 8219 if (cmd == ELS_CMD_ACC) { 8220 ndlp = emlxs_node_find_did(port, did); 8221 8222 if (ndlp && ndlp->nlp_active) { 8223 /* Close the node for any further normal IO */ 8224 emlxs_node_close(port, ndlp, FC_FCP_RING, 8225 pkt->pkt_timeout + 10); 8226 emlxs_node_close(port, ndlp, FC_IP_RING, 8227 pkt->pkt_timeout + 10); 8228 8229 /* Flush tx queue */ 8230 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8231 8232 /* Flush chip queue */ 8233 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8234 } 8235 } 8236 8237 break; 8238 8239 case ELS_CMD_PRLI: 8240 8241 if (cmd == ELS_CMD_ACC) { 8242 ndlp = emlxs_node_find_did(port, did); 8243 8244 if (ndlp && ndlp->nlp_active) { 8245 /* Close the node for any further normal IO */ 8246 emlxs_node_close(port, ndlp, FC_FCP_RING, 8247 pkt->pkt_timeout + 10); 8248 8249 /* Flush tx queues */ 8250 (void) emlxs_tx_node_flush(port, ndlp, 8251 &hba->ring[FC_FCP_RING], 0, 0); 8252 8253 /* Flush chip queues */ 8254 (void) emlxs_chipq_node_flush(port, 8255 &hba->ring[FC_FCP_RING], ndlp, 0); 8256 } 8257 } 8258 8259 break; 8260 8261 case ELS_CMD_PRLO: 8262 8263 if (cmd == ELS_CMD_ACC) { 8264 ndlp = emlxs_node_find_did(port, did); 8265 8266 if (ndlp && ndlp->nlp_active) { 8267 /* Close the node for any further normal IO */ 8268 emlxs_node_close(port, ndlp, FC_FCP_RING, 60); 8269 8270 /* Flush tx queues */ 8271 (void) emlxs_tx_node_flush(port, ndlp, 8272 &hba->ring[FC_FCP_RING], 0, 0); 8273 8274 /* Flush chip queues */ 8275 (void) emlxs_chipq_node_flush(port, 8276 &hba->ring[FC_FCP_RING], ndlp, 0); 8277 } 8278 } 8279 8280 break; 8281 8282 case ELS_CMD_LOGO: 8283 8284 if (cmd == ELS_CMD_ACC) { 8285 ndlp = emlxs_node_find_did(port, did); 8286 8287 if (ndlp && ndlp->nlp_active) { 8288 /* Close the node for any further normal IO */ 8289 emlxs_node_close(port, ndlp, FC_FCP_RING, 60); 8290 emlxs_node_close(port, ndlp, FC_IP_RING, 60); 8291 8292 /* Flush tx queues */ 8293 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8294 8295 /* Flush chip queues */ 8296 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8297 } 8298 } 8299 8300 break; 8301 } 8302 8303 if (pkt->pkt_cmdlen) { 8304 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8305 DDI_DMA_SYNC_FORDEV); 8306 } 8307 8308 HBASTATS.ElsRspIssued++; 8309 8310 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq); 8311 8312 return (FC_SUCCESS); 8313 8314 } /* emlxs_send_els_rsp() */ 8315 8316 8317 #ifdef MENLO_SUPPORT 8318 static int32_t 8319 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp) 8320 { 8321 emlxs_hba_t *hba = HBA; 8322 fc_packet_t *pkt; 8323 IOCBQ *iocbq; 8324 IOCB *iocb; 8325 NODELIST *ndlp; 8326 uint32_t did; 8327 uint32_t *lp; 8328 8329 pkt = PRIV2PKT(sbp); 8330 did = EMLXS_MENLO_DID; 8331 lp = (uint32_t *)pkt->pkt_cmd; 8332 8333 iocbq = &sbp->iocbq; 8334 iocb = &iocbq->iocb; 8335 8336 ndlp = emlxs_node_find_did(port, did); 8337 8338 if (!ndlp || !ndlp->nlp_active) { 8339 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8340 "Node not found. did=0x%x", did); 8341 8342 return (FC_BADPACKET); 8343 } 8344 8345 iocbq->node = (void *) ndlp; 8346 if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) { 8347 return (FC_TRAN_BUSY); 8348 } 8349 8350 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) { 8351 /* Cmd phase */ 8352 8353 /* Initalize iocb */ 8354 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id; 8355 iocb->ulpContext = 0; 8356 iocb->ulpPU = 3; 8357 8358 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8359 "%s: [%08x,%08x,%08x,%08x]", 8360 emlxs_menlo_cmd_xlate(SWAP_LONG(lp[0])), SWAP_LONG(lp[1]), 8361 SWAP_LONG(lp[2]), SWAP_LONG(lp[3]), SWAP_LONG(lp[4])); 8362 8363 } else { /* FC_PKT_OUTBOUND */ 8364 8365 /* MENLO_CMD_FW_DOWNLOAD Data Phase */ 8366 iocb->ulpCommand = CMD_GEN_REQUEST64_CX; 8367 8368 /* Initalize iocb */ 8369 iocb->un.genreq64.param = 0; 8370 iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id; 8371 iocb->ulpPU = 1; 8372 8373 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8374 "%s: Data: rxid=0x%x size=%d", 8375 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD), 8376 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen); 8377 } 8378 8379 /* Initalize sbp */ 8380 mutex_enter(&sbp->mtx); 8381 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8382 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8383 sbp->node = (void *) ndlp; 8384 sbp->lun = 0; 8385 sbp->class = iocb->ulpClass; 8386 sbp->did = did; 8387 mutex_exit(&sbp->mtx); 8388 8389 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8390 DDI_DMA_SYNC_FORDEV); 8391 8392 HBASTATS.CtCmdIssued++; 8393 8394 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq); 8395 8396 return (FC_SUCCESS); 8397 8398 } /* emlxs_send_menlo() */ 8399 #endif /* MENLO_SUPPORT */ 8400 8401 8402 static int32_t 8403 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp) 8404 { 8405 emlxs_hba_t *hba = HBA; 8406 fc_packet_t *pkt; 8407 IOCBQ *iocbq; 8408 IOCB *iocb; 8409 NODELIST *ndlp; 8410 uint32_t did; 8411 8412 pkt = PRIV2PKT(sbp); 8413 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 8414 8415 iocbq = &sbp->iocbq; 8416 iocb = &iocbq->iocb; 8417 8418 ndlp = emlxs_node_find_did(port, did); 8419 8420 if (!ndlp || !ndlp->nlp_active) { 8421 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8422 "Node not found. did=0x%x", did); 8423 8424 return (FC_BADPACKET); 8425 } 8426 8427 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8428 emlxs_swap_ct_pkt(sbp); 8429 #endif /* EMLXS_MODREV2X */ 8430 8431 iocbq->node = (void *)ndlp; 8432 if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) { 8433 return (FC_TRAN_BUSY); 8434 } 8435 8436 /* Initalize sbp */ 8437 mutex_enter(&sbp->mtx); 8438 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8439 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8440 sbp->node = (void *)ndlp; 8441 sbp->lun = 0; 8442 sbp->class = iocb->ulpClass; 8443 sbp->did = did; 8444 mutex_exit(&sbp->mtx); 8445 8446 if (did == NameServer_DID) { 8447 SLI_CT_REQUEST *CtCmd; 8448 uint32_t *lp0; 8449 8450 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8451 lp0 = (uint32_t *)pkt->pkt_cmd; 8452 8453 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8454 "%s: did=%x [%08x,%08x]", 8455 emlxs_ctcmd_xlate( 8456 SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), 8457 did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5])); 8458 8459 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 8460 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 8461 } 8462 8463 } else if (did == FDMI_DID) { 8464 SLI_CT_REQUEST *CtCmd; 8465 uint32_t *lp0; 8466 8467 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8468 lp0 = (uint32_t *)pkt->pkt_cmd; 8469 8470 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8471 "%s: did=%x [%08x,%08x]", 8472 emlxs_mscmd_xlate( 8473 SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), 8474 did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5])); 8475 } else { 8476 SLI_CT_REQUEST *CtCmd; 8477 uint32_t *lp0; 8478 8479 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8480 lp0 = (uint32_t *)pkt->pkt_cmd; 8481 8482 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8483 "%s: did=%x [%08x,%08x]", 8484 emlxs_rmcmd_xlate( 8485 SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), 8486 did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5])); 8487 } 8488 8489 if (pkt->pkt_cmdlen) { 8490 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8491 DDI_DMA_SYNC_FORDEV); 8492 } 8493 8494 HBASTATS.CtCmdIssued++; 8495 8496 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq); 8497 8498 return (FC_SUCCESS); 8499 8500 } /* emlxs_send_ct() */ 8501 8502 8503 static int32_t 8504 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 8505 { 8506 emlxs_hba_t *hba = HBA; 8507 fc_packet_t *pkt; 8508 IOCBQ *iocbq; 8509 IOCB *iocb; 8510 uint32_t did; 8511 uint32_t *cmd; 8512 SLI_CT_REQUEST *CtCmd; 8513 8514 pkt = PRIV2PKT(sbp); 8515 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 8516 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8517 cmd = (uint32_t *)pkt->pkt_cmd; 8518 8519 iocbq = &sbp->iocbq; 8520 iocb = &iocbq->iocb; 8521 8522 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8523 emlxs_swap_ct_pkt(sbp); 8524 #endif /* EMLXS_MODREV2X */ 8525 8526 iocbq->node = (void *)NULL; 8527 if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) { 8528 return (FC_TRAN_BUSY); 8529 } 8530 8531 /* Initalize sbp */ 8532 mutex_enter(&sbp->mtx); 8533 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8534 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8535 sbp->node = NULL; 8536 sbp->lun = 0; 8537 sbp->class = iocb->ulpClass; 8538 sbp->did = did; 8539 mutex_exit(&sbp->mtx); 8540 8541 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg, 8542 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ", 8543 emlxs_rmcmd_xlate(SWAP_DATA16( 8544 CtCmd->CommandResponse.bits.CmdRsp)), 8545 CtCmd->ReasonCode, CtCmd->Explanation, 8546 SWAP_DATA32(cmd[4]), SWAP_DATA32(cmd[5]), 8547 pkt->pkt_cmd_fhdr.rx_id); 8548 8549 if (pkt->pkt_cmdlen) { 8550 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8551 DDI_DMA_SYNC_FORDEV); 8552 } 8553 8554 HBASTATS.CtRspIssued++; 8555 8556 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq); 8557 8558 return (FC_SUCCESS); 8559 8560 } /* emlxs_send_ct_rsp() */ 8561 8562 8563 /* 8564 * emlxs_get_instance() 8565 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst. 8566 */ 8567 extern uint32_t 8568 emlxs_get_instance(int32_t ddiinst) 8569 { 8570 uint32_t i; 8571 uint32_t inst; 8572 8573 mutex_enter(&emlxs_device.lock); 8574 8575 inst = MAX_FC_BRDS; 8576 for (i = 0; i < emlxs_instance_count; i++) { 8577 if (emlxs_instance[i] == ddiinst) { 8578 inst = i; 8579 break; 8580 } 8581 } 8582 8583 mutex_exit(&emlxs_device.lock); 8584 8585 return (inst); 8586 8587 } /* emlxs_get_instance() */ 8588 8589 8590 /* 8591 * emlxs_add_instance() 8592 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst. 8593 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0. 8594 */ 8595 static uint32_t 8596 emlxs_add_instance(int32_t ddiinst) 8597 { 8598 uint32_t i; 8599 8600 mutex_enter(&emlxs_device.lock); 8601 8602 /* First see if the ddiinst already exists */ 8603 for (i = 0; i < emlxs_instance_count; i++) { 8604 if (emlxs_instance[i] == ddiinst) { 8605 break; 8606 } 8607 } 8608 8609 /* If it doesn't already exist, add it */ 8610 if (i >= emlxs_instance_count) { 8611 if ((i = emlxs_instance_count) < MAX_FC_BRDS) { 8612 emlxs_instance[i] = ddiinst; 8613 emlxs_instance_count++; 8614 emlxs_device.hba_count = emlxs_instance_count; 8615 } 8616 } 8617 8618 mutex_exit(&emlxs_device.lock); 8619 8620 return (i); 8621 8622 } /* emlxs_add_instance() */ 8623 8624 8625 /*ARGSUSED*/ 8626 extern void 8627 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 8628 uint32_t doneq) 8629 { 8630 emlxs_hba_t *hba; 8631 emlxs_port_t *port; 8632 emlxs_buf_t *fpkt; 8633 8634 port = sbp->port; 8635 8636 if (!port) { 8637 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg, 8638 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags); 8639 8640 return; 8641 } 8642 8643 hba = HBA; 8644 8645 mutex_enter(&sbp->mtx); 8646 8647 /* Check for error conditions */ 8648 if (sbp->pkt_flags & (PACKET_RETURNED | PACKET_COMPLETED | 8649 PACKET_IN_DONEQ | PACKET_IN_COMPLETION | 8650 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) { 8651 if (sbp->pkt_flags & PACKET_RETURNED) { 8652 EMLXS_MSGF(EMLXS_CONTEXT, 8653 &emlxs_pkt_completion_error_msg, 8654 "Packet already returned. sbp=%p flags=%x", sbp, 8655 sbp->pkt_flags); 8656 } 8657 8658 else if (sbp->pkt_flags & PACKET_COMPLETED) { 8659 EMLXS_MSGF(EMLXS_CONTEXT, 8660 &emlxs_pkt_completion_error_msg, 8661 "Packet already completed. sbp=%p flags=%x", sbp, 8662 sbp->pkt_flags); 8663 } 8664 8665 else if (sbp->pkt_flags & PACKET_IN_DONEQ) { 8666 EMLXS_MSGF(EMLXS_CONTEXT, 8667 &emlxs_pkt_completion_error_msg, 8668 "Pkt already on done queue. sbp=%p flags=%x", sbp, 8669 sbp->pkt_flags); 8670 } 8671 8672 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) { 8673 EMLXS_MSGF(EMLXS_CONTEXT, 8674 &emlxs_pkt_completion_error_msg, 8675 "Packet already in completion. sbp=%p flags=%x", 8676 sbp, sbp->pkt_flags); 8677 } 8678 8679 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) { 8680 EMLXS_MSGF(EMLXS_CONTEXT, 8681 &emlxs_pkt_completion_error_msg, 8682 "Packet still on chip queue. sbp=%p flags=%x", 8683 sbp, sbp->pkt_flags); 8684 } 8685 8686 else if (sbp->pkt_flags & PACKET_IN_TXQ) { 8687 EMLXS_MSGF(EMLXS_CONTEXT, 8688 &emlxs_pkt_completion_error_msg, 8689 "Packet still on tx queue. sbp=%p flags=%x", sbp, 8690 sbp->pkt_flags); 8691 } 8692 8693 mutex_exit(&sbp->mtx); 8694 return; 8695 } 8696 8697 /* Packet is now in completion */ 8698 sbp->pkt_flags |= PACKET_IN_COMPLETION; 8699 8700 /* Set the state if not already set */ 8701 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 8702 emlxs_set_pkt_state(sbp, iostat, localstat, 0); 8703 } 8704 8705 /* Check for parent flush packet */ 8706 /* If pkt has a parent flush packet then adjust its count now */ 8707 fpkt = sbp->fpkt; 8708 if (fpkt) { 8709 /* 8710 * We will try to NULL sbp->fpkt inside the 8711 * fpkt's mutex if possible 8712 */ 8713 8714 if (!(fpkt->pkt_flags & PACKET_RETURNED)) { 8715 mutex_enter(&fpkt->mtx); 8716 if (fpkt->flush_count) { 8717 fpkt->flush_count--; 8718 } 8719 sbp->fpkt = NULL; 8720 mutex_exit(&fpkt->mtx); 8721 } else { /* fpkt has been returned already */ 8722 8723 sbp->fpkt = NULL; 8724 } 8725 } 8726 8727 /* If pkt is polled, then wake up sleeping thread */ 8728 if (sbp->pkt_flags & PACKET_POLLED) { 8729 /* Don't set the PACKET_RETURNED flag here */ 8730 /* because the polling thread will do it */ 8731 sbp->pkt_flags |= PACKET_COMPLETED; 8732 mutex_exit(&sbp->mtx); 8733 8734 /* Wake up sleeping thread */ 8735 mutex_enter(&EMLXS_PKT_LOCK); 8736 cv_broadcast(&EMLXS_PKT_CV); 8737 mutex_exit(&EMLXS_PKT_LOCK); 8738 } 8739 8740 /* If packet was generated by our driver, */ 8741 /* then complete it immediately */ 8742 else if (sbp->pkt_flags & PACKET_ALLOCATED) { 8743 mutex_exit(&sbp->mtx); 8744 8745 emlxs_iodone(sbp); 8746 } 8747 8748 /* Put the pkt on the done queue for callback */ 8749 /* completion in another thread */ 8750 else { 8751 sbp->pkt_flags |= PACKET_IN_DONEQ; 8752 sbp->next = NULL; 8753 mutex_exit(&sbp->mtx); 8754 8755 /* Put pkt on doneq, so I/O's will be completed in order */ 8756 mutex_enter(&EMLXS_PORT_LOCK); 8757 if (hba->iodone_tail == NULL) { 8758 hba->iodone_list = sbp; 8759 hba->iodone_count = 1; 8760 } else { 8761 hba->iodone_tail->next = sbp; 8762 hba->iodone_count++; 8763 } 8764 hba->iodone_tail = sbp; 8765 mutex_exit(&EMLXS_PORT_LOCK); 8766 8767 /* Trigger a thread to service the doneq */ 8768 emlxs_thread_trigger1(&hba->iodone_thread, 8769 emlxs_iodone_server); 8770 } 8771 8772 return; 8773 8774 } /* emlxs_pkt_complete() */ 8775 8776 8777 #ifdef SAN_DIAG_SUPPORT 8778 /* 8779 * This routine is called with EMLXS_PORT_LOCK held so we can just increment 8780 * normally. Don't have to use atomic operations. 8781 */ 8782 extern void 8783 emlxs_update_sd_bucket(emlxs_buf_t *sbp) 8784 { 8785 emlxs_port_t *vport; 8786 fc_packet_t *pkt; 8787 uint32_t did; 8788 hrtime_t t; 8789 hrtime_t delta_time; 8790 int i; 8791 NODELIST *ndlp; 8792 8793 vport = sbp->port; 8794 8795 if ((sd_bucket.search_type == 0) || 8796 (vport->sd_io_latency_state != SD_COLLECTING)) 8797 return; 8798 8799 /* Compute the iolatency time in microseconds */ 8800 t = gethrtime(); 8801 delta_time = t - sbp->sd_start_time; 8802 pkt = PRIV2PKT(sbp); 8803 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 8804 ndlp = emlxs_node_find_did(vport, did); 8805 8806 if (ndlp) { 8807 if (delta_time >= 8808 sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) 8809 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1]. 8810 count++; 8811 else if (delta_time <= sd_bucket.values[0]) 8812 ndlp->sd_dev_bucket[0].count++; 8813 else { 8814 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) { 8815 if ((delta_time > sd_bucket.values[i-1]) && 8816 (delta_time <= sd_bucket.values[i])) { 8817 ndlp->sd_dev_bucket[i].count++; 8818 break; 8819 } 8820 } 8821 } 8822 } 8823 } 8824 #endif /* SAN_DIAG_SUPPORT */ 8825 8826 /*ARGSUSED*/ 8827 static void 8828 emlxs_iodone_server(void *arg1, void *arg2, void *arg3) 8829 { 8830 emlxs_hba_t *hba = (emlxs_hba_t *)arg1; 8831 emlxs_buf_t *sbp; 8832 8833 mutex_enter(&EMLXS_PORT_LOCK); 8834 8835 /* Remove one pkt from the doneq head and complete it */ 8836 while ((sbp = hba->iodone_list) != NULL) { 8837 if ((hba->iodone_list = sbp->next) == NULL) { 8838 hba->iodone_tail = NULL; 8839 hba->iodone_count = 0; 8840 } else { 8841 hba->iodone_count--; 8842 } 8843 8844 mutex_exit(&EMLXS_PORT_LOCK); 8845 8846 /* Prepare the pkt for completion */ 8847 mutex_enter(&sbp->mtx); 8848 sbp->next = NULL; 8849 sbp->pkt_flags &= ~PACKET_IN_DONEQ; 8850 mutex_exit(&sbp->mtx); 8851 8852 /* Complete the IO now */ 8853 emlxs_iodone(sbp); 8854 8855 /* Reacquire lock and check if more work is to be done */ 8856 mutex_enter(&EMLXS_PORT_LOCK); 8857 } 8858 8859 mutex_exit(&EMLXS_PORT_LOCK); 8860 8861 return; 8862 8863 } /* End emlxs_iodone_server */ 8864 8865 8866 static void 8867 emlxs_iodone(emlxs_buf_t *sbp) 8868 { 8869 fc_packet_t *pkt; 8870 8871 pkt = PRIV2PKT(sbp); 8872 8873 /* Check one more time that the pkt has not already been returned */ 8874 if (sbp->pkt_flags & PACKET_RETURNED) { 8875 return; 8876 } 8877 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8878 emlxs_unswap_pkt(sbp); 8879 #endif /* EMLXS_MODREV2X */ 8880 8881 mutex_enter(&sbp->mtx); 8882 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_RETURNED); 8883 mutex_exit(&sbp->mtx); 8884 8885 if (pkt->pkt_comp) { 8886 (*pkt->pkt_comp) (pkt); 8887 } 8888 8889 return; 8890 8891 } /* emlxs_iodone() */ 8892 8893 8894 8895 extern fc_unsol_buf_t * 8896 emlxs_ub_find(emlxs_port_t *port, uint32_t token) 8897 { 8898 emlxs_unsol_buf_t *pool; 8899 fc_unsol_buf_t *ubp; 8900 emlxs_ub_priv_t *ub_priv; 8901 8902 /* Check if this is a valid ub token */ 8903 if (token < EMLXS_UB_TOKEN_OFFSET) { 8904 return (NULL); 8905 } 8906 8907 mutex_enter(&EMLXS_UB_LOCK); 8908 8909 pool = port->ub_pool; 8910 while (pool) { 8911 /* Find a pool with the proper token range */ 8912 if (token >= pool->pool_first_token && 8913 token <= pool->pool_last_token) { 8914 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token - 8915 pool->pool_first_token)]; 8916 ub_priv = ubp->ub_fca_private; 8917 8918 if (ub_priv->token != token) { 8919 EMLXS_MSGF(EMLXS_CONTEXT, 8920 &emlxs_sfs_debug_msg, 8921 "ub_find: Invalid token=%x", ubp, token, 8922 ub_priv->token); 8923 8924 ubp = NULL; 8925 } 8926 8927 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) { 8928 EMLXS_MSGF(EMLXS_CONTEXT, 8929 &emlxs_sfs_debug_msg, 8930 "ub_find: Buffer not in use. buffer=%p " 8931 "token=%x", ubp, token); 8932 8933 ubp = NULL; 8934 } 8935 8936 mutex_exit(&EMLXS_UB_LOCK); 8937 8938 return (ubp); 8939 } 8940 8941 pool = pool->pool_next; 8942 } 8943 8944 mutex_exit(&EMLXS_UB_LOCK); 8945 8946 return (NULL); 8947 8948 } /* emlxs_ub_find() */ 8949 8950 8951 8952 extern fc_unsol_buf_t * 8953 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type, 8954 uint32_t reserve) 8955 { 8956 emlxs_hba_t *hba = HBA; 8957 emlxs_unsol_buf_t *pool; 8958 fc_unsol_buf_t *ubp; 8959 emlxs_ub_priv_t *ub_priv; 8960 uint32_t i; 8961 uint32_t resv_flag; 8962 uint32_t pool_free; 8963 uint32_t pool_free_resv; 8964 8965 mutex_enter(&EMLXS_UB_LOCK); 8966 8967 pool = port->ub_pool; 8968 while (pool) { 8969 /* Find a pool of the appropriate type and size */ 8970 if ((pool->pool_available == 0) || 8971 (pool->pool_type != type) || 8972 (pool->pool_buf_size < size)) { 8973 goto next_pool; 8974 } 8975 8976 8977 /* Adjust free counts based on availablity */ 8978 /* The free reserve count gets first priority */ 8979 pool_free_resv = 8980 min(pool->pool_free_resv, pool->pool_available); 8981 pool_free = 8982 min(pool->pool_free, 8983 (pool->pool_available - pool_free_resv)); 8984 8985 /* Initialize reserve flag */ 8986 resv_flag = reserve; 8987 8988 if (resv_flag) { 8989 if (pool_free_resv == 0) { 8990 if (pool_free == 0) { 8991 goto next_pool; 8992 } 8993 resv_flag = 0; 8994 } 8995 } else if (pool_free == 0) { 8996 goto next_pool; 8997 } 8998 8999 /* Find next available free buffer in this pool */ 9000 for (i = 0; i < pool->pool_nentries; i++) { 9001 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 9002 ub_priv = ubp->ub_fca_private; 9003 9004 if (!ub_priv->available || 9005 ub_priv->flags != EMLXS_UB_FREE) { 9006 continue; 9007 } 9008 9009 ub_priv->time = hba->timer_tics; 9010 9011 /* Timeout in 5 minutes */ 9012 ub_priv->timeout = (5 * 60); 9013 9014 ub_priv->flags = EMLXS_UB_IN_USE; 9015 9016 /* Alloc the buffer from the pool */ 9017 if (resv_flag) { 9018 ub_priv->flags |= EMLXS_UB_RESV; 9019 pool->pool_free_resv--; 9020 } else { 9021 pool->pool_free--; 9022 } 9023 9024 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 9025 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 9026 ub_priv->token, pool->pool_nentries, 9027 pool->pool_available, pool->pool_free, 9028 pool->pool_free_resv); 9029 9030 mutex_exit(&EMLXS_UB_LOCK); 9031 9032 return (ubp); 9033 } 9034 next_pool: 9035 9036 pool = pool->pool_next; 9037 } 9038 9039 mutex_exit(&EMLXS_UB_LOCK); 9040 9041 return (NULL); 9042 9043 } /* emlxs_ub_get() */ 9044 9045 9046 9047 extern void 9048 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 9049 uint32_t lock) 9050 { 9051 fc_packet_t *pkt; 9052 fcp_rsp_t *fcp_rsp; 9053 uint32_t i; 9054 emlxs_xlat_err_t *tptr; 9055 emlxs_xlat_err_t *entry; 9056 9057 9058 pkt = PRIV2PKT(sbp); 9059 9060 if (lock) { 9061 mutex_enter(&sbp->mtx); 9062 } 9063 9064 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 9065 sbp->pkt_flags |= PACKET_STATE_VALID; 9066 9067 /* Perform table lookup */ 9068 entry = NULL; 9069 if (iostat != IOSTAT_LOCAL_REJECT) { 9070 tptr = emlxs_iostat_tbl; 9071 for (i = 0; i < IOSTAT_MAX; i++, tptr++) { 9072 if (iostat == tptr->emlxs_status) { 9073 entry = tptr; 9074 break; 9075 } 9076 } 9077 } else { /* iostate == IOSTAT_LOCAL_REJECT */ 9078 9079 tptr = emlxs_ioerr_tbl; 9080 for (i = 0; i < IOERR_MAX; i++, tptr++) { 9081 if (localstat == tptr->emlxs_status) { 9082 entry = tptr; 9083 break; 9084 } 9085 } 9086 } 9087 9088 if (entry) { 9089 pkt->pkt_state = entry->pkt_state; 9090 pkt->pkt_reason = entry->pkt_reason; 9091 pkt->pkt_expln = entry->pkt_expln; 9092 pkt->pkt_action = entry->pkt_action; 9093 } else { 9094 /* Set defaults */ 9095 pkt->pkt_state = FC_PKT_TRAN_ERROR; 9096 pkt->pkt_reason = FC_REASON_ABORTED; 9097 pkt->pkt_expln = FC_EXPLN_NONE; 9098 pkt->pkt_action = FC_ACTION_RETRYABLE; 9099 } 9100 9101 9102 /* Set the residual counts and response frame */ 9103 /* Check if response frame was received from the chip */ 9104 /* If so, then the residual counts will already be set */ 9105 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID | 9106 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) { 9107 /* We have to create the response frame */ 9108 if (iostat == IOSTAT_SUCCESS) { 9109 pkt->pkt_resp_resid = 0; 9110 pkt->pkt_data_resid = 0; 9111 9112 if ((pkt->pkt_cmd_fhdr.type == 9113 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen && 9114 pkt->pkt_resp) { 9115 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp; 9116 9117 fcp_rsp->fcp_u.fcp_status. 9118 rsp_len_set = 1; 9119 fcp_rsp->fcp_response_len = 8; 9120 } 9121 } else { 9122 /* Otherwise assume no data */ 9123 /* and no response received */ 9124 pkt->pkt_data_resid = pkt->pkt_datalen; 9125 pkt->pkt_resp_resid = pkt->pkt_rsplen; 9126 } 9127 } 9128 } 9129 9130 if (lock) { 9131 mutex_exit(&sbp->mtx); 9132 } 9133 9134 return; 9135 9136 } /* emlxs_set_pkt_state() */ 9137 9138 9139 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9140 9141 extern void 9142 emlxs_swap_service_params(SERV_PARM *sp) 9143 { 9144 uint16_t *p; 9145 int size; 9146 int i; 9147 9148 size = (sizeof (CSP) - 4) / 2; 9149 p = (uint16_t *)&sp->cmn; 9150 for (i = 0; i < size; i++) { 9151 p[i] = SWAP_DATA16(p[i]); 9152 } 9153 sp->cmn.e_d_tov = SWAP_DATA32(sp->cmn.e_d_tov); 9154 9155 size = sizeof (CLASS_PARMS) / 2; 9156 p = (uint16_t *)&sp->cls1; 9157 for (i = 0; i < size; i++, p++) { 9158 *p = SWAP_DATA16(*p); 9159 } 9160 9161 size = sizeof (CLASS_PARMS) / 2; 9162 p = (uint16_t *)&sp->cls2; 9163 for (i = 0; i < size; i++, p++) { 9164 *p = SWAP_DATA16(*p); 9165 } 9166 9167 size = sizeof (CLASS_PARMS) / 2; 9168 p = (uint16_t *)&sp->cls3; 9169 for (i = 0; i < size; i++, p++) { 9170 *p = SWAP_DATA16(*p); 9171 } 9172 9173 size = sizeof (CLASS_PARMS) / 2; 9174 p = (uint16_t *)&sp->cls4; 9175 for (i = 0; i < size; i++, p++) { 9176 *p = SWAP_DATA16(*p); 9177 } 9178 9179 return; 9180 9181 } /* emlxs_swap_service_params() */ 9182 9183 extern void 9184 emlxs_unswap_pkt(emlxs_buf_t *sbp) 9185 { 9186 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 9187 emlxs_swap_fcp_pkt(sbp); 9188 } 9189 9190 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 9191 emlxs_swap_els_pkt(sbp); 9192 } 9193 9194 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 9195 emlxs_swap_ct_pkt(sbp); 9196 } 9197 9198 } /* emlxs_unswap_pkt() */ 9199 9200 9201 extern void 9202 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp) 9203 { 9204 fc_packet_t *pkt; 9205 FCP_CMND *cmd; 9206 fcp_rsp_t *rsp; 9207 uint16_t *lunp; 9208 uint32_t i; 9209 9210 mutex_enter(&sbp->mtx); 9211 9212 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9213 mutex_exit(&sbp->mtx); 9214 return; 9215 } 9216 9217 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 9218 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED; 9219 } else { 9220 sbp->pkt_flags |= PACKET_FCP_SWAPPED; 9221 } 9222 9223 mutex_exit(&sbp->mtx); 9224 9225 pkt = PRIV2PKT(sbp); 9226 9227 cmd = (FCP_CMND *)pkt->pkt_cmd; 9228 rsp = (pkt->pkt_rsplen && 9229 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ? 9230 (fcp_rsp_t *)pkt->pkt_resp : NULL; 9231 9232 /* The size of data buffer needs to be swapped. */ 9233 cmd->fcpDl = SWAP_DATA32(cmd->fcpDl); 9234 9235 /* 9236 * Swap first 2 words of FCP CMND payload. 9237 */ 9238 lunp = (uint16_t *)&cmd->fcpLunMsl; 9239 for (i = 0; i < 4; i++) { 9240 lunp[i] = SWAP_DATA16(lunp[i]); 9241 } 9242 9243 if (rsp) { 9244 rsp->fcp_resid = SWAP_DATA32(rsp->fcp_resid); 9245 rsp->fcp_sense_len = SWAP_DATA32(rsp->fcp_sense_len); 9246 rsp->fcp_response_len = SWAP_DATA32(rsp->fcp_response_len); 9247 } 9248 9249 return; 9250 9251 } /* emlxs_swap_fcp_pkt() */ 9252 9253 9254 extern void 9255 emlxs_swap_els_pkt(emlxs_buf_t *sbp) 9256 { 9257 fc_packet_t *pkt; 9258 uint32_t *cmd; 9259 uint32_t *rsp; 9260 uint32_t command; 9261 uint16_t *c; 9262 uint32_t i; 9263 uint32_t swapped; 9264 9265 mutex_enter(&sbp->mtx); 9266 9267 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9268 mutex_exit(&sbp->mtx); 9269 return; 9270 } 9271 9272 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 9273 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED; 9274 swapped = 1; 9275 } else { 9276 sbp->pkt_flags |= PACKET_ELS_SWAPPED; 9277 swapped = 0; 9278 } 9279 9280 mutex_exit(&sbp->mtx); 9281 9282 pkt = PRIV2PKT(sbp); 9283 9284 cmd = (uint32_t *)pkt->pkt_cmd; 9285 rsp = (pkt->pkt_rsplen && 9286 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ? 9287 (uint32_t *)pkt->pkt_resp : NULL; 9288 9289 if (!swapped) { 9290 cmd[0] = SWAP_DATA32(cmd[0]); 9291 command = cmd[0] & ELS_CMD_MASK; 9292 } else { 9293 command = cmd[0] & ELS_CMD_MASK; 9294 cmd[0] = SWAP_DATA32(cmd[0]); 9295 } 9296 9297 if (rsp) { 9298 rsp[0] = SWAP_DATA32(rsp[0]); 9299 } 9300 9301 switch (command) { 9302 case ELS_CMD_ACC: 9303 if (sbp->ucmd == ELS_CMD_ADISC) { 9304 /* Hard address of originator */ 9305 cmd[1] = SWAP_DATA32(cmd[1]); 9306 9307 /* N_Port ID of originator */ 9308 cmd[6] = SWAP_DATA32(cmd[6]); 9309 } 9310 break; 9311 9312 case ELS_CMD_PLOGI: 9313 case ELS_CMD_FLOGI: 9314 case ELS_CMD_FDISC: 9315 if (rsp) { 9316 emlxs_swap_service_params((SERV_PARM *) & rsp[1]); 9317 } 9318 break; 9319 9320 case ELS_CMD_RLS: 9321 cmd[1] = SWAP_DATA32(cmd[1]); 9322 9323 if (rsp) { 9324 for (i = 0; i < 6; i++) { 9325 rsp[1 + i] = SWAP_DATA32(rsp[1 + i]); 9326 } 9327 } 9328 break; 9329 9330 case ELS_CMD_ADISC: 9331 cmd[1] = SWAP_DATA32(cmd[1]); /* Hard address of originator */ 9332 cmd[6] = SWAP_DATA32(cmd[6]); /* N_Port ID of originator */ 9333 break; 9334 9335 case ELS_CMD_PRLI: 9336 c = (uint16_t *)&cmd[1]; 9337 c[1] = SWAP_DATA16(c[1]); 9338 9339 cmd[4] = SWAP_DATA32(cmd[4]); 9340 9341 if (rsp) { 9342 rsp[4] = SWAP_DATA32(rsp[4]); 9343 } 9344 break; 9345 9346 case ELS_CMD_SCR: 9347 cmd[1] = SWAP_DATA32(cmd[1]); 9348 break; 9349 9350 case ELS_CMD_LINIT: 9351 if (rsp) { 9352 rsp[1] = SWAP_DATA32(rsp[1]); 9353 } 9354 break; 9355 9356 default: 9357 break; 9358 } 9359 9360 return; 9361 9362 } /* emlxs_swap_els_pkt() */ 9363 9364 9365 extern void 9366 emlxs_swap_ct_pkt(emlxs_buf_t *sbp) 9367 { 9368 fc_packet_t *pkt; 9369 uint32_t *cmd; 9370 uint32_t *rsp; 9371 uint32_t command; 9372 uint32_t i; 9373 uint32_t swapped; 9374 9375 mutex_enter(&sbp->mtx); 9376 9377 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9378 mutex_exit(&sbp->mtx); 9379 return; 9380 } 9381 9382 if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 9383 sbp->pkt_flags &= ~PACKET_CT_SWAPPED; 9384 swapped = 1; 9385 } else { 9386 sbp->pkt_flags |= PACKET_CT_SWAPPED; 9387 swapped = 0; 9388 } 9389 9390 mutex_exit(&sbp->mtx); 9391 9392 pkt = PRIV2PKT(sbp); 9393 9394 cmd = (uint32_t *)pkt->pkt_cmd; 9395 rsp = (pkt->pkt_rsplen && 9396 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ? 9397 (uint32_t *)pkt->pkt_resp : NULL; 9398 9399 if (!swapped) { 9400 cmd[0] = 0x01000000; 9401 command = cmd[2]; 9402 } 9403 9404 cmd[0] = SWAP_DATA32(cmd[0]); 9405 cmd[1] = SWAP_DATA32(cmd[1]); 9406 cmd[2] = SWAP_DATA32(cmd[2]); 9407 cmd[3] = SWAP_DATA32(cmd[3]); 9408 9409 if (swapped) { 9410 command = cmd[2]; 9411 } 9412 9413 switch ((command >> 16)) { 9414 case SLI_CTNS_GA_NXT: 9415 cmd[4] = SWAP_DATA32(cmd[4]); 9416 break; 9417 9418 case SLI_CTNS_GPN_ID: 9419 case SLI_CTNS_GNN_ID: 9420 case SLI_CTNS_RPN_ID: 9421 case SLI_CTNS_RNN_ID: 9422 cmd[4] = SWAP_DATA32(cmd[4]); 9423 break; 9424 9425 case SLI_CTNS_RCS_ID: 9426 case SLI_CTNS_RPT_ID: 9427 cmd[4] = SWAP_DATA32(cmd[4]); 9428 cmd[5] = SWAP_DATA32(cmd[5]); 9429 break; 9430 9431 case SLI_CTNS_RFT_ID: 9432 cmd[4] = SWAP_DATA32(cmd[4]); 9433 9434 /* Swap FC4 types */ 9435 for (i = 0; i < 8; i++) { 9436 cmd[5 + i] = SWAP_DATA32(cmd[5 + i]); 9437 } 9438 break; 9439 9440 case SLI_CTNS_GFT_ID: 9441 if (rsp) { 9442 /* Swap FC4 types */ 9443 for (i = 0; i < 8; i++) { 9444 rsp[4 + i] = SWAP_DATA32(rsp[4 + i]); 9445 } 9446 } 9447 break; 9448 9449 case SLI_CTNS_GCS_ID: 9450 case SLI_CTNS_GSPN_ID: 9451 case SLI_CTNS_GSNN_NN: 9452 case SLI_CTNS_GIP_NN: 9453 case SLI_CTNS_GIPA_NN: 9454 9455 case SLI_CTNS_GPT_ID: 9456 case SLI_CTNS_GID_NN: 9457 case SLI_CTNS_GNN_IP: 9458 case SLI_CTNS_GIPA_IP: 9459 case SLI_CTNS_GID_FT: 9460 case SLI_CTNS_GID_PT: 9461 case SLI_CTNS_GID_PN: 9462 case SLI_CTNS_RSPN_ID: 9463 case SLI_CTNS_RIP_NN: 9464 case SLI_CTNS_RIPA_NN: 9465 case SLI_CTNS_RSNN_NN: 9466 case SLI_CTNS_DA_ID: 9467 case SLI_CT_RESPONSE_FS_RJT: 9468 case SLI_CT_RESPONSE_FS_ACC: 9469 9470 default: 9471 break; 9472 } 9473 return; 9474 9475 } /* emlxs_swap_ct_pkt() */ 9476 9477 9478 extern void 9479 emlxs_swap_els_ub(fc_unsol_buf_t *ubp) 9480 { 9481 emlxs_ub_priv_t *ub_priv; 9482 fc_rscn_t *rscn; 9483 uint32_t count; 9484 uint32_t i; 9485 uint32_t *lp; 9486 la_els_logi_t *logi; 9487 9488 ub_priv = ubp->ub_fca_private; 9489 9490 switch (ub_priv->cmd) { 9491 case ELS_CMD_RSCN: 9492 rscn = (fc_rscn_t *)ubp->ub_buffer; 9493 9494 rscn->rscn_payload_len = SWAP_DATA16(rscn->rscn_payload_len); 9495 9496 count = ((rscn->rscn_payload_len - 4) / 4); 9497 lp = (uint32_t *)ubp->ub_buffer + 1; 9498 for (i = 0; i < count; i++, lp++) { 9499 *lp = SWAP_DATA32(*lp); 9500 } 9501 9502 break; 9503 9504 case ELS_CMD_FLOGI: 9505 case ELS_CMD_PLOGI: 9506 case ELS_CMD_FDISC: 9507 case ELS_CMD_PDISC: 9508 logi = (la_els_logi_t *)ubp->ub_buffer; 9509 emlxs_swap_service_params( 9510 (SERV_PARM *)&logi->common_service); 9511 break; 9512 9513 /* ULP handles this */ 9514 case ELS_CMD_LOGO: 9515 case ELS_CMD_PRLI: 9516 case ELS_CMD_PRLO: 9517 case ELS_CMD_ADISC: 9518 default: 9519 break; 9520 } 9521 9522 return; 9523 9524 } /* emlxs_swap_els_ub() */ 9525 9526 9527 #endif /* EMLXS_MODREV2X */ 9528 9529 9530 extern char * 9531 emlxs_elscmd_xlate(uint32_t elscmd) 9532 { 9533 static char buffer[32]; 9534 uint32_t i; 9535 uint32_t count; 9536 9537 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t); 9538 for (i = 0; i < count; i++) { 9539 if (elscmd == emlxs_elscmd_table[i].code) { 9540 return (emlxs_elscmd_table[i].string); 9541 } 9542 } 9543 9544 (void) sprintf(buffer, "ELS=0x%x", elscmd); 9545 return (buffer); 9546 9547 } /* emlxs_elscmd_xlate() */ 9548 9549 9550 extern char * 9551 emlxs_ctcmd_xlate(uint32_t ctcmd) 9552 { 9553 static char buffer[32]; 9554 uint32_t i; 9555 uint32_t count; 9556 9557 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t); 9558 for (i = 0; i < count; i++) { 9559 if (ctcmd == emlxs_ctcmd_table[i].code) { 9560 return (emlxs_ctcmd_table[i].string); 9561 } 9562 } 9563 9564 (void) sprintf(buffer, "cmd=0x%x", ctcmd); 9565 return (buffer); 9566 9567 } /* emlxs_ctcmd_xlate() */ 9568 9569 9570 #ifdef MENLO_SUPPORT 9571 extern char * 9572 emlxs_menlo_cmd_xlate(uint32_t cmd) 9573 { 9574 static char buffer[32]; 9575 uint32_t i; 9576 uint32_t count; 9577 9578 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t); 9579 for (i = 0; i < count; i++) { 9580 if (cmd == emlxs_menlo_cmd_table[i].code) { 9581 return (emlxs_menlo_cmd_table[i].string); 9582 } 9583 } 9584 9585 (void) sprintf(buffer, "Cmd=0x%x", cmd); 9586 return (buffer); 9587 9588 } /* emlxs_menlo_cmd_xlate() */ 9589 9590 extern char * 9591 emlxs_menlo_rsp_xlate(uint32_t rsp) 9592 { 9593 static char buffer[32]; 9594 uint32_t i; 9595 uint32_t count; 9596 9597 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t); 9598 for (i = 0; i < count; i++) { 9599 if (rsp == emlxs_menlo_rsp_table[i].code) { 9600 return (emlxs_menlo_rsp_table[i].string); 9601 } 9602 } 9603 9604 (void) sprintf(buffer, "Rsp=0x%x", rsp); 9605 return (buffer); 9606 9607 } /* emlxs_menlo_rsp_xlate() */ 9608 9609 #endif /* MENLO_SUPPORT */ 9610 9611 9612 extern char * 9613 emlxs_rmcmd_xlate(uint32_t rmcmd) 9614 { 9615 static char buffer[32]; 9616 uint32_t i; 9617 uint32_t count; 9618 9619 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t); 9620 for (i = 0; i < count; i++) { 9621 if (rmcmd == emlxs_rmcmd_table[i].code) { 9622 return (emlxs_rmcmd_table[i].string); 9623 } 9624 } 9625 9626 (void) sprintf(buffer, "RM=0x%x", rmcmd); 9627 return (buffer); 9628 9629 } /* emlxs_rmcmd_xlate() */ 9630 9631 9632 9633 extern char * 9634 emlxs_mscmd_xlate(uint16_t mscmd) 9635 { 9636 static char buffer[32]; 9637 uint32_t i; 9638 uint32_t count; 9639 9640 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t); 9641 for (i = 0; i < count; i++) { 9642 if (mscmd == emlxs_mscmd_table[i].code) { 9643 return (emlxs_mscmd_table[i].string); 9644 } 9645 } 9646 9647 (void) sprintf(buffer, "Cmd=0x%x", mscmd); 9648 return (buffer); 9649 9650 } /* emlxs_mscmd_xlate() */ 9651 9652 9653 extern char * 9654 emlxs_state_xlate(uint8_t state) 9655 { 9656 static char buffer[32]; 9657 uint32_t i; 9658 uint32_t count; 9659 9660 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t); 9661 for (i = 0; i < count; i++) { 9662 if (state == emlxs_state_table[i].code) { 9663 return (emlxs_state_table[i].string); 9664 } 9665 } 9666 9667 (void) sprintf(buffer, "State=0x%x", state); 9668 return (buffer); 9669 9670 } /* emlxs_state_xlate() */ 9671 9672 9673 extern char * 9674 emlxs_error_xlate(uint8_t errno) 9675 { 9676 static char buffer[32]; 9677 uint32_t i; 9678 uint32_t count; 9679 9680 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t); 9681 for (i = 0; i < count; i++) { 9682 if (errno == emlxs_error_table[i].code) { 9683 return (emlxs_error_table[i].string); 9684 } 9685 } 9686 9687 (void) sprintf(buffer, "Errno=0x%x", errno); 9688 return (buffer); 9689 9690 } /* emlxs_error_xlate() */ 9691 9692 9693 static int 9694 emlxs_pm_lower_power(dev_info_t *dip) 9695 { 9696 int ddiinst; 9697 int emlxinst; 9698 emlxs_config_t *cfg; 9699 int32_t rval; 9700 emlxs_hba_t *hba; 9701 9702 ddiinst = ddi_get_instance(dip); 9703 emlxinst = emlxs_get_instance(ddiinst); 9704 hba = emlxs_device.hba[emlxinst]; 9705 cfg = &CFG; 9706 9707 rval = DDI_SUCCESS; 9708 9709 /* Lower the power level */ 9710 if (cfg[CFG_PM_SUPPORT].current) { 9711 rval = 9712 pm_lower_power(dip, EMLXS_PM_ADAPTER, 9713 EMLXS_PM_ADAPTER_DOWN); 9714 } else { 9715 /* We do not have kernel support of power management enabled */ 9716 /* therefore, call our power management routine directly */ 9717 rval = 9718 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN); 9719 } 9720 9721 return (rval); 9722 9723 } /* emlxs_pm_lower_power() */ 9724 9725 9726 static int 9727 emlxs_pm_raise_power(dev_info_t *dip) 9728 { 9729 int ddiinst; 9730 int emlxinst; 9731 emlxs_config_t *cfg; 9732 int32_t rval; 9733 emlxs_hba_t *hba; 9734 9735 ddiinst = ddi_get_instance(dip); 9736 emlxinst = emlxs_get_instance(ddiinst); 9737 hba = emlxs_device.hba[emlxinst]; 9738 cfg = &CFG; 9739 9740 /* Raise the power level */ 9741 if (cfg[CFG_PM_SUPPORT].current) { 9742 rval = 9743 pm_raise_power(dip, EMLXS_PM_ADAPTER, 9744 EMLXS_PM_ADAPTER_UP); 9745 } else { 9746 /* We do not have kernel support of power management enabled */ 9747 /* therefore, call our power management routine directly */ 9748 rval = 9749 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP); 9750 } 9751 9752 return (rval); 9753 9754 } /* emlxs_pm_raise_power() */ 9755 9756 9757 #ifdef IDLE_TIMER 9758 9759 extern int 9760 emlxs_pm_busy_component(emlxs_hba_t *hba) 9761 { 9762 emlxs_config_t *cfg = &CFG; 9763 int rval; 9764 9765 hba->pm_active = 1; 9766 9767 if (hba->pm_busy) { 9768 return (DDI_SUCCESS); 9769 } 9770 9771 mutex_enter(&hba->pm_lock); 9772 9773 if (hba->pm_busy) { 9774 mutex_exit(&hba->pm_lock); 9775 return (DDI_SUCCESS); 9776 } 9777 hba->pm_busy = 1; 9778 9779 mutex_exit(&hba->pm_lock); 9780 9781 /* Attempt to notify system that we are busy */ 9782 if (cfg[CFG_PM_SUPPORT].current) { 9783 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9784 "pm_busy_component."); 9785 9786 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER); 9787 9788 if (rval != DDI_SUCCESS) { 9789 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9790 "pm_busy_component failed. ret=%d", rval); 9791 9792 /* If this attempt failed then clear our flags */ 9793 mutex_enter(&hba->pm_lock); 9794 hba->pm_busy = 0; 9795 mutex_exit(&hba->pm_lock); 9796 9797 return (rval); 9798 } 9799 } 9800 9801 return (DDI_SUCCESS); 9802 9803 } /* emlxs_pm_busy_component() */ 9804 9805 9806 extern int 9807 emlxs_pm_idle_component(emlxs_hba_t *hba) 9808 { 9809 emlxs_config_t *cfg = &CFG; 9810 int rval; 9811 9812 if (!hba->pm_busy) { 9813 return (DDI_SUCCESS); 9814 } 9815 9816 mutex_enter(&hba->pm_lock); 9817 9818 if (!hba->pm_busy) { 9819 mutex_exit(&hba->pm_lock); 9820 return (DDI_SUCCESS); 9821 } 9822 hba->pm_busy = 0; 9823 9824 mutex_exit(&hba->pm_lock); 9825 9826 if (cfg[CFG_PM_SUPPORT].current) { 9827 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9828 "pm_idle_component."); 9829 9830 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER); 9831 9832 if (rval != DDI_SUCCESS) { 9833 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9834 "pm_idle_component failed. ret=%d", rval); 9835 9836 /* If this attempt failed then */ 9837 /* reset our flags for another attempt */ 9838 mutex_enter(&hba->pm_lock); 9839 hba->pm_busy = 1; 9840 mutex_exit(&hba->pm_lock); 9841 9842 return (rval); 9843 } 9844 } 9845 9846 return (DDI_SUCCESS); 9847 9848 } /* emlxs_pm_idle_component() */ 9849 9850 9851 extern void 9852 emlxs_pm_idle_timer(emlxs_hba_t *hba) 9853 { 9854 emlxs_config_t *cfg = &CFG; 9855 9856 if (hba->pm_active) { 9857 /* Clear active flag and reset idle timer */ 9858 mutex_enter(&hba->pm_lock); 9859 hba->pm_active = 0; 9860 hba->pm_idle_timer = 9861 hba->timer_tics + cfg[CFG_PM_IDLE].current; 9862 mutex_exit(&hba->pm_lock); 9863 } 9864 9865 /* Check for idle timeout */ 9866 else if (hba->timer_tics >= hba->pm_idle_timer) { 9867 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) { 9868 mutex_enter(&hba->pm_lock); 9869 hba->pm_idle_timer = 9870 hba->timer_tics + cfg[CFG_PM_IDLE].current; 9871 mutex_exit(&hba->pm_lock); 9872 } 9873 } 9874 9875 return; 9876 9877 } /* emlxs_pm_idle_timer() */ 9878 9879 #endif /* IDLE_TIMER */ 9880 9881 9882 #ifdef SLI3_SUPPORT 9883 static void 9884 emlxs_read_vport_prop(emlxs_hba_t *hba) 9885 { 9886 emlxs_port_t *port = &PPORT; 9887 emlxs_config_t *cfg = &CFG; 9888 char **arrayp; 9889 uint8_t *s; 9890 uint8_t *np; 9891 NAME_TYPE pwwpn; 9892 NAME_TYPE wwnn; 9893 NAME_TYPE wwpn; 9894 uint32_t vpi; 9895 uint32_t cnt; 9896 uint32_t rval; 9897 uint32_t i; 9898 uint32_t j; 9899 uint32_t c1; 9900 uint32_t sum; 9901 uint32_t errors; 9902 char buffer[64]; 9903 9904 /* Check for the per adapter vport setting */ 9905 (void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst); 9906 cnt = 0; 9907 arrayp = NULL; 9908 rval = 9909 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 9910 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt); 9911 9912 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 9913 /* Check for the global vport setting */ 9914 cnt = 0; 9915 arrayp = NULL; 9916 rval = 9917 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 9918 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt); 9919 } 9920 9921 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 9922 return; 9923 } 9924 9925 for (i = 0; i < cnt; i++) { 9926 errors = 0; 9927 s = (uint8_t *)arrayp[i]; 9928 9929 if (!s) { 9930 break; 9931 } 9932 9933 np = (uint8_t *)&pwwpn; 9934 for (j = 0; j < sizeof (NAME_TYPE); j++) { 9935 c1 = *s++; 9936 if ((c1 >= '0') && (c1 <= '9')) { 9937 sum = ((c1 - '0') << 4); 9938 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9939 sum = ((c1 - 'a' + 10) << 4); 9940 } else if ((c1 >= 'A') && (c1 <= 'F')) { 9941 sum = ((c1 - 'A' + 10) << 4); 9942 } else { 9943 EMLXS_MSGF(EMLXS_CONTEXT, 9944 &emlxs_attach_debug_msg, 9945 "Config error: Invalid PWWPN found. " 9946 "entry=%d byte=%d hi_nibble=%c", 9947 i, j, c1); 9948 errors++; 9949 } 9950 9951 c1 = *s++; 9952 if ((c1 >= '0') && (c1 <= '9')) { 9953 sum |= (c1 - '0'); 9954 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9955 sum |= (c1 - 'a' + 10); 9956 } else if ((c1 >= 'A') && (c1 <= 'F')) { 9957 sum |= (c1 - 'A' + 10); 9958 } else { 9959 EMLXS_MSGF(EMLXS_CONTEXT, 9960 &emlxs_attach_debug_msg, 9961 "Config error: Invalid PWWPN found. " 9962 "entry=%d byte=%d lo_nibble=%c", 9963 i, j, c1); 9964 errors++; 9965 } 9966 9967 *np++ = sum; 9968 } 9969 9970 if (*s++ != ':') { 9971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 9972 "Config error: Invalid delimiter after PWWPN. " 9973 "entry=%d", i); 9974 goto out; 9975 } 9976 9977 np = (uint8_t *)&wwnn; 9978 for (j = 0; j < sizeof (NAME_TYPE); j++) { 9979 c1 = *s++; 9980 if ((c1 >= '0') && (c1 <= '9')) { 9981 sum = ((c1 - '0') << 4); 9982 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9983 sum = ((c1 - 'a' + 10) << 4); 9984 } else if ((c1 >= 'A') && (c1 <= 'F')) { 9985 sum = ((c1 - 'A' + 10) << 4); 9986 } else { 9987 EMLXS_MSGF(EMLXS_CONTEXT, 9988 &emlxs_attach_debug_msg, 9989 "Config error: Invalid WWNN found. " 9990 "entry=%d byte=%d hi_nibble=%c", 9991 i, j, c1); 9992 errors++; 9993 } 9994 9995 c1 = *s++; 9996 if ((c1 >= '0') && (c1 <= '9')) { 9997 sum |= (c1 - '0'); 9998 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9999 sum |= (c1 - 'a' + 10); 10000 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10001 sum |= (c1 - 'A' + 10); 10002 } else { 10003 EMLXS_MSGF(EMLXS_CONTEXT, 10004 &emlxs_attach_debug_msg, 10005 "Config error: Invalid WWNN found. " 10006 "entry=%d byte=%d lo_nibble=%c", 10007 i, j, c1); 10008 errors++; 10009 } 10010 10011 *np++ = sum; 10012 } 10013 10014 if (*s++ != ':') { 10015 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10016 "Config error: Invalid delimiter after WWNN. " 10017 "entry=%d", i); 10018 goto out; 10019 } 10020 10021 np = (uint8_t *)&wwpn; 10022 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10023 c1 = *s++; 10024 if ((c1 >= '0') && (c1 <= '9')) { 10025 sum = ((c1 - '0') << 4); 10026 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10027 sum = ((c1 - 'a' + 10) << 4); 10028 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10029 sum = ((c1 - 'A' + 10) << 4); 10030 } else { 10031 EMLXS_MSGF(EMLXS_CONTEXT, 10032 &emlxs_attach_debug_msg, 10033 "Config error: Invalid WWPN found. " 10034 "entry=%d byte=%d hi_nibble=%c", 10035 i, j, c1); 10036 10037 errors++; 10038 } 10039 10040 c1 = *s++; 10041 if ((c1 >= '0') && (c1 <= '9')) { 10042 sum |= (c1 - '0'); 10043 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10044 sum |= (c1 - 'a' + 10); 10045 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10046 sum |= (c1 - 'A' + 10); 10047 } else { 10048 EMLXS_MSGF(EMLXS_CONTEXT, 10049 &emlxs_attach_debug_msg, 10050 "Config error: Invalid WWPN found. " 10051 "entry=%d byte=%d lo_nibble=%c", 10052 i, j, c1); 10053 10054 errors++; 10055 } 10056 10057 *np++ = sum; 10058 } 10059 10060 if (*s++ != ':') { 10061 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10062 "Config error: Invalid delimiter after WWPN. " 10063 "entry=%d", i); 10064 10065 goto out; 10066 } 10067 10068 sum = 0; 10069 do { 10070 c1 = *s++; 10071 if ((c1 < '0') || (c1 > '9')) { 10072 EMLXS_MSGF(EMLXS_CONTEXT, 10073 &emlxs_attach_debug_msg, 10074 "Config error: Invalid VPI found. " 10075 "entry=%d c=%c vpi=%d", i, c1, sum); 10076 10077 goto out; 10078 } 10079 10080 sum = (sum * 10) + (c1 - '0'); 10081 10082 } while (*s != 0); 10083 10084 vpi = sum; 10085 10086 if (errors) { 10087 continue; 10088 } 10089 10090 /* Entry has been read */ 10091 10092 /* Check if the physical port wwpn */ 10093 /* matches our physical port wwpn */ 10094 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) { 10095 continue; 10096 } 10097 10098 /* Check vpi range */ 10099 if ((vpi == 0) || (vpi >= MAX_VPORTS)) { 10100 continue; 10101 } 10102 10103 /* Check if port has already been configured */ 10104 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) { 10105 continue; 10106 } 10107 10108 /* Set the highest configured vpi */ 10109 if (vpi >= hba->vpi_high) { 10110 hba->vpi_high = vpi; 10111 } 10112 10113 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn, 10114 sizeof (NAME_TYPE)); 10115 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn, 10116 sizeof (NAME_TYPE)); 10117 10118 if (hba->port[vpi].snn[0] == 0) { 10119 (void) strncpy((caddr_t)hba->port[vpi].snn, 10120 (caddr_t)hba->snn, 256); 10121 } 10122 10123 if (hba->port[vpi].spn[0] == 0) { 10124 (void) sprintf((caddr_t)hba->port[vpi].spn, 10125 "%s VPort-%d", 10126 (caddr_t)hba->spn, vpi); 10127 } 10128 10129 hba->port[vpi].flag |= 10130 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 10131 10132 #ifdef NPIV_SUPPORT 10133 if (cfg[CFG_VPORT_RESTRICTED].current) { 10134 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED; 10135 } 10136 #endif /* NPIV_SUPPORT */ 10137 } 10138 10139 out: 10140 10141 (void) ddi_prop_free((void *) arrayp); 10142 return; 10143 10144 } /* emlxs_read_vport_prop() */ 10145 10146 #endif /* SLI3_SUPPORT */ 10147 10148 10149 10150 extern char * 10151 emlxs_wwn_xlate(char *buffer, uint8_t *wwn) 10152 { 10153 (void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x", 10154 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff, 10155 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff); 10156 10157 return (buffer); 10158 10159 } /* emlxs_wwn_xlate() */ 10160 10161 10162 /* This is called at port online and offline */ 10163 extern void 10164 emlxs_ub_flush(emlxs_port_t *port) 10165 { 10166 emlxs_hba_t *hba = HBA; 10167 fc_unsol_buf_t *ubp; 10168 emlxs_ub_priv_t *ub_priv; 10169 emlxs_ub_priv_t *next; 10170 10171 /* Return if nothing to do */ 10172 if (!port->ub_wait_head) { 10173 return; 10174 } 10175 10176 mutex_enter(&EMLXS_PORT_LOCK); 10177 ub_priv = port->ub_wait_head; 10178 port->ub_wait_head = NULL; 10179 port->ub_wait_tail = NULL; 10180 mutex_exit(&EMLXS_PORT_LOCK); 10181 10182 while (ub_priv) { 10183 next = ub_priv->next; 10184 ubp = ub_priv->ubp; 10185 10186 /* Check if ULP is online and we have a callback function */ 10187 if ((port->ulp_statec != FC_STATE_OFFLINE) && 10188 port->ulp_unsol_cb) { 10189 /* Send ULP the ub buffer */ 10190 port->ulp_unsol_cb(port->ulp_handle, ubp, 10191 ubp->ub_frame.type); 10192 } else { /* Drop the buffer */ 10193 10194 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10195 } 10196 10197 ub_priv = next; 10198 10199 } /* while() */ 10200 10201 return; 10202 10203 } /* emlxs_ub_flush() */ 10204 10205 10206 extern void 10207 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp) 10208 { 10209 emlxs_hba_t *hba = HBA; 10210 emlxs_ub_priv_t *ub_priv; 10211 10212 ub_priv = ubp->ub_fca_private; 10213 10214 /* Check if ULP is online */ 10215 if (port->ulp_statec != FC_STATE_OFFLINE) { 10216 if (port->ulp_unsol_cb) { 10217 port->ulp_unsol_cb(port->ulp_handle, ubp, 10218 ubp->ub_frame.type); 10219 } else { 10220 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10221 } 10222 10223 return; 10224 } else { /* ULP offline */ 10225 10226 if (hba->state >= FC_LINK_UP) { 10227 /* Add buffer to queue tail */ 10228 mutex_enter(&EMLXS_PORT_LOCK); 10229 10230 if (port->ub_wait_tail) { 10231 port->ub_wait_tail->next = ub_priv; 10232 } 10233 port->ub_wait_tail = ub_priv; 10234 10235 if (!port->ub_wait_head) { 10236 port->ub_wait_head = ub_priv; 10237 } 10238 10239 mutex_exit(&EMLXS_PORT_LOCK); 10240 } else { 10241 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10242 } 10243 } 10244 10245 return; 10246 10247 } /* emlxs_ub_callback() */ 10248 10249 10250 static uint32_t 10251 emlxs_integrity_check(emlxs_hba_t *hba) 10252 { 10253 uint32_t size; 10254 uint32_t errors = 0; 10255 int ddiinst = hba->ddiinst; 10256 10257 size = 16; 10258 if (sizeof (ULP_BDL) != size) { 10259 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16", 10260 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL)); 10261 10262 errors++; 10263 } 10264 size = 8; 10265 if (sizeof (ULP_BDE) != size) { 10266 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8", 10267 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE)); 10268 10269 errors++; 10270 } 10271 size = 12; 10272 if (sizeof (ULP_BDE64) != size) { 10273 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12", 10274 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64)); 10275 10276 errors++; 10277 } 10278 size = 16; 10279 if (sizeof (HBQE_t) != size) { 10280 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16", 10281 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t)); 10282 10283 errors++; 10284 } 10285 size = 8; 10286 if (sizeof (HGP) != size) { 10287 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8", 10288 DRIVER_NAME, ddiinst, (int)sizeof (HGP)); 10289 10290 errors++; 10291 } 10292 if (sizeof (PGP) != size) { 10293 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8", 10294 DRIVER_NAME, ddiinst, (int)sizeof (PGP)); 10295 10296 errors++; 10297 } 10298 size = 4; 10299 if (sizeof (WORD5) != size) { 10300 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4", 10301 DRIVER_NAME, ddiinst, (int)sizeof (WORD5)); 10302 10303 errors++; 10304 } 10305 size = 124; 10306 if (sizeof (MAILVARIANTS) != size) { 10307 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. " 10308 "%d != 124", DRIVER_NAME, ddiinst, 10309 (int)sizeof (MAILVARIANTS)); 10310 10311 errors++; 10312 } 10313 size = 128; 10314 if (sizeof (SLI1_DESC) != size) { 10315 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128", 10316 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC)); 10317 10318 errors++; 10319 } 10320 if (sizeof (SLI2_DESC) != size) { 10321 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128", 10322 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC)); 10323 10324 errors++; 10325 } 10326 size = MBOX_SIZE; 10327 if (sizeof (MAILBOX) != size) { 10328 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d", 10329 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE); 10330 10331 errors++; 10332 } 10333 size = PCB_SIZE; 10334 if (sizeof (PCB) != size) { 10335 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d", 10336 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE); 10337 10338 errors++; 10339 } 10340 size = 260; 10341 if (sizeof (ATTRIBUTE_ENTRY) != size) { 10342 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. " 10343 "%d != 260", DRIVER_NAME, ddiinst, 10344 (int)sizeof (ATTRIBUTE_ENTRY)); 10345 10346 errors++; 10347 } 10348 size = SLI_SLIM1_SIZE; 10349 if (sizeof (SLIM1) != size) { 10350 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d", 10351 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE); 10352 10353 errors++; 10354 } 10355 #ifdef SLI3_SUPPORT 10356 size = SLI3_IOCB_CMD_SIZE; 10357 if (sizeof (IOCB) != size) { 10358 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d", 10359 DRIVER_NAME, ddiinst, (int)sizeof (IOCB), 10360 SLI3_IOCB_CMD_SIZE); 10361 10362 errors++; 10363 } 10364 #else 10365 size = SLI2_IOCB_CMD_SIZE; 10366 if (sizeof (IOCB) != size) { 10367 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d", 10368 DRIVER_NAME, ddiinst, (int)sizeof (IOCB), 10369 SLI2_IOCB_CMD_SIZE); 10370 10371 errors++; 10372 } 10373 #endif /* SLI3_SUPPORT */ 10374 10375 size = SLI_SLIM2_SIZE; 10376 if (sizeof (SLIM2) != size) { 10377 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d", 10378 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2), 10379 SLI_SLIM2_SIZE); 10380 10381 errors++; 10382 } 10383 return (errors); 10384 10385 } /* emlxs_integrity_check() */ 10386 10387 10388 #ifdef FMA_SUPPORT 10389 /* 10390 * FMA support 10391 */ 10392 10393 extern void 10394 emlxs_fm_init(emlxs_hba_t *hba) 10395 { 10396 ddi_iblock_cookie_t iblk; 10397 10398 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 10399 return; 10400 } 10401 10402 if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 10403 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 10404 emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 10405 } else { 10406 emlxs_dev_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 10407 emlxs_data_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 10408 } 10409 10410 if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) { 10411 emlxs_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 10412 emlxs_dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR; 10413 emlxs_dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR; 10414 emlxs_dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR; 10415 } else { 10416 emlxs_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 10417 emlxs_dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR; 10418 emlxs_dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR; 10419 emlxs_dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR; 10420 } 10421 10422 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk); 10423 10424 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 10425 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10426 pci_ereport_setup(hba->dip); 10427 } 10428 10429 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10430 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb, 10431 (void *)hba); 10432 } 10433 10434 } /* emlxs_fm_init() */ 10435 10436 10437 extern void 10438 emlxs_fm_fini(emlxs_hba_t *hba) 10439 { 10440 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 10441 return; 10442 } 10443 10444 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 10445 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10446 pci_ereport_teardown(hba->dip); 10447 } 10448 10449 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10450 ddi_fm_handler_unregister(hba->dip); 10451 } 10452 10453 (void) ddi_fm_fini(hba->dip); 10454 10455 } /* emlxs_fm_fini() */ 10456 10457 10458 extern int 10459 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle) 10460 { 10461 ddi_fm_error_t err; 10462 10463 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 10464 return (DDI_FM_OK); 10465 } 10466 10467 /* Some S10 versions do not define the ahi_err structure */ 10468 if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) { 10469 return (DDI_FM_OK); 10470 } 10471 10472 err.fme_status = DDI_FM_OK; 10473 (void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION); 10474 10475 /* Some S10 versions do not define the ddi_fm_acc_err_clear function */ 10476 if ((void *)&ddi_fm_acc_err_clear != NULL) { 10477 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 10478 } 10479 10480 return (err.fme_status); 10481 10482 } /* emlxs_fm_check_acc_handle() */ 10483 10484 10485 extern int 10486 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle) 10487 { 10488 ddi_fm_error_t err; 10489 10490 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 10491 return (DDI_FM_OK); 10492 } 10493 10494 err.fme_status = DDI_FM_OK; 10495 (void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION); 10496 10497 return (err.fme_status); 10498 10499 } /* emlxs_fm_check_dma_handle() */ 10500 10501 10502 extern void 10503 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail) 10504 { 10505 uint64_t ena; 10506 char buf[FM_MAX_CLASS]; 10507 10508 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 10509 return; 10510 } 10511 10512 if (detail == NULL) { 10513 return; 10514 } 10515 10516 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 10517 ena = fm_ena_generate(0, FM_ENA_FMT1); 10518 10519 ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP, 10520 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 10521 10522 } /* emlxs_fm_ereport() */ 10523 10524 10525 extern void 10526 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact) 10527 { 10528 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 10529 return; 10530 } 10531 10532 if (impact == NULL) { 10533 return; 10534 } 10535 10536 ddi_fm_service_impact(hba->dip, impact); 10537 10538 } 10539 10540 10541 /* 10542 * The I/O fault service error handling callback function 10543 */ 10544 /*ARGSUSED*/ 10545 extern int 10546 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 10547 const void *impl_data) 10548 { 10549 /* 10550 * as the driver can always deal with an error 10551 * in any dma or access handle, we can just return 10552 * the fme_status value. 10553 */ 10554 pci_ereport_post(dip, err, NULL); 10555 return (err->fme_status); 10556 10557 } /* emlxs_fm_error_cb() */ 10558 #endif /* FMA_SUPPORT */ 10559