1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * ibdm.c 27 * 28 * This file contains the InifiniBand Device Manager (IBDM) support functions. 29 * IB nexus driver will only be the client for the IBDM module. 30 * 31 * IBDM registers with IBTF for HCA arrival/removal notification. 32 * IBDM registers with SA access to send DM MADs to discover the IOC's behind 33 * the IOU's. 34 * 35 * IB nexus driver registers with IBDM to find the information about the 36 * HCA's and IOC's (behind the IOU) present on the IB fabric. 37 */ 38 39 #include <sys/systm.h> 40 #include <sys/taskq.h> 41 #include <sys/ib/mgt/ibdm/ibdm_impl.h> 42 #include <sys/ib/mgt/ibmf/ibmf_impl.h> 43 #include <sys/ib/ibtl/impl/ibtl_ibnex.h> 44 #include <sys/modctl.h> 45 46 /* Function Prototype declarations */ 47 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *, ibdm_iou_info_t **); 48 static int ibdm_fini(void); 49 static int ibdm_init(void); 50 static int ibdm_get_reachable_ports(ibdm_port_attr_t *, 51 ibdm_hca_list_t *); 52 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t); 53 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *); 54 static boolean_t ibdm_is_cisco(ib_guid_t); 55 static boolean_t ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *); 56 static void ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *); 57 static int ibdm_set_classportinfo(ibdm_dp_gidinfo_t *); 58 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *); 59 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *); 60 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *); 61 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t, 62 ib_guid_t *, ib_guid_t *); 63 static int ibdm_retry_command(ibdm_timeout_cb_args_t *); 64 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int); 65 static int ibdm_verify_mad_status(ib_mad_hdr_t *); 66 static int ibdm_handle_redirection(ibmf_msg_t *, 67 ibdm_dp_gidinfo_t *, int *); 68 static void ibdm_wait_probe_completion(void); 69 static void ibdm_sweep_fabric(int); 70 static void ibdm_probe_gid_thread(void *); 71 static void ibdm_wakeup_probe_gid_cv(void); 72 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int); 73 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int); 74 static void ibdm_update_port_attr(ibdm_port_attr_t *); 75 static void ibdm_handle_hca_attach(ib_guid_t); 76 static void ibdm_handle_srventry_mad(ibmf_msg_t *, 77 ibdm_dp_gidinfo_t *, int *); 78 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *); 79 static void ibdm_recv_incoming_mad(void *); 80 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *); 81 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *); 82 static void ibdm_pkt_timeout_hdlr(void *arg); 83 static void ibdm_initialize_port(ibdm_port_attr_t *); 84 static void ibdm_update_port_pkeys(ibdm_port_attr_t *port); 85 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 86 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *); 87 static void ibdm_alloc_send_buffers(ibmf_msg_t *); 88 static void ibdm_free_send_buffers(ibmf_msg_t *); 89 static void ibdm_handle_hca_detach(ib_guid_t); 90 static void ibdm_handle_port_change_event(ibt_async_event_t *); 91 static int ibdm_fini_port(ibdm_port_attr_t *); 92 static int ibdm_uninit_hca(ibdm_hca_list_t *); 93 static void ibdm_handle_setclassportinfo(ibmf_handle_t, ibmf_msg_t *, 94 ibdm_dp_gidinfo_t *, int *); 95 static void ibdm_handle_iounitinfo(ibmf_handle_t, 96 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 97 static void ibdm_handle_ioc_profile(ibmf_handle_t, 98 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 99 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t, 100 ibt_async_code_t, ibt_async_event_t *); 101 static void ibdm_handle_classportinfo(ibmf_handle_t, 102 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 103 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *, 104 ibdm_dp_gidinfo_t *); 105 106 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *); 107 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *, 108 ibdm_dp_gidinfo_t *gid_list); 109 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int); 110 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t, 111 ibdm_dp_gidinfo_t *, int *); 112 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *, 113 ibdm_hca_list_t **); 114 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t, 115 size_t *, ib_guid_t); 116 static int ibdm_get_node_record_by_port(ibmf_saa_handle_t, 117 ib_guid_t, sa_node_record_t **, size_t *); 118 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *, 119 ib_lid_t); 120 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *, 121 ib_gid_t, ib_gid_t); 122 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t); 123 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t); 124 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int); 125 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t, 126 ibmf_saa_event_details_t *, void *); 127 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *, 128 ibdm_dp_gidinfo_t *); 129 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *); 130 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *, 131 ibdm_dp_gidinfo_t *); 132 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *); 133 static void ibdm_free_gid_list(ibdm_gid_t *); 134 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid); 135 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *); 136 static void ibdm_saa_event_taskq(void *); 137 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *); 138 static void ibdm_get_next_port(ibdm_hca_list_t **, 139 ibdm_port_attr_t **, int); 140 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *, 141 ibdm_dp_gidinfo_t *); 142 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *, 143 ibdm_hca_list_t *); 144 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *); 145 static void ibdm_saa_handle_new_gid(void *); 146 static void ibdm_reset_all_dgids(ibmf_saa_handle_t); 147 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *); 148 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *); 149 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *); 150 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *); 151 static ibdm_ioc_info_t *ibdm_handle_prev_iou(); 152 static int ibdm_serv_cmp(ibdm_srvents_info_t *, ibdm_srvents_info_t *, 153 int); 154 static ibdm_ioc_info_t *ibdm_get_ioc_info_with_gid(ib_guid_t, 155 ibdm_dp_gidinfo_t **); 156 157 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT; 158 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES; 159 #ifdef DEBUG 160 int ibdm_ignore_saa_event = 0; 161 #endif 162 163 /* Modload support */ 164 static struct modlmisc ibdm_modlmisc = { 165 &mod_miscops, 166 "InfiniBand Device Manager" 167 }; 168 169 struct modlinkage ibdm_modlinkage = { 170 MODREV_1, 171 (void *)&ibdm_modlmisc, 172 NULL 173 }; 174 175 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = { 176 IBTI_V_CURR, 177 IBT_DM, 178 ibdm_event_hdlr, 179 NULL, 180 "ibdm" 181 }; 182 183 /* Global variables */ 184 ibdm_t ibdm; 185 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING; 186 char *ibdm_string = "ibdm"; 187 188 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv", 189 ibdm.ibdm_dp_gidlist_head)) 190 191 /* 192 * _init 193 * Loadable module init, called before any other module. 194 * Initialize mutex 195 * Register with IBTF 196 */ 197 int 198 _init(void) 199 { 200 int err; 201 202 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm); 203 204 if ((err = ibdm_init()) != IBDM_SUCCESS) { 205 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err); 206 (void) ibdm_fini(); 207 return (DDI_FAILURE); 208 } 209 210 if ((err = mod_install(&ibdm_modlinkage)) != 0) { 211 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err); 212 (void) ibdm_fini(); 213 } 214 return (err); 215 } 216 217 218 int 219 _fini(void) 220 { 221 int err; 222 223 if ((err = ibdm_fini()) != IBDM_SUCCESS) { 224 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err); 225 (void) ibdm_init(); 226 return (EBUSY); 227 } 228 229 if ((err = mod_remove(&ibdm_modlinkage)) != 0) { 230 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err); 231 (void) ibdm_init(); 232 } 233 return (err); 234 } 235 236 237 int 238 _info(struct modinfo *modinfop) 239 { 240 return (mod_info(&ibdm_modlinkage, modinfop)); 241 } 242 243 244 /* 245 * ibdm_init(): 246 * Register with IBTF 247 * Allocate memory for the HCAs 248 * Allocate minor-nodes for the HCAs 249 */ 250 static int 251 ibdm_init(void) 252 { 253 int i, hca_count; 254 ib_guid_t *hca_guids; 255 ibt_status_t status; 256 257 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:"); 258 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) { 259 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL); 260 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL); 261 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL); 262 cv_init(&ibdm.ibdm_port_settle_cv, NULL, CV_DRIVER, NULL); 263 mutex_enter(&ibdm.ibdm_mutex); 264 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED; 265 } 266 267 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) { 268 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL, 269 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) { 270 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach " 271 "failed %x", status); 272 mutex_exit(&ibdm.ibdm_mutex); 273 return (IBDM_FAILURE); 274 } 275 276 ibdm.ibdm_state |= IBDM_IBT_ATTACHED; 277 mutex_exit(&ibdm.ibdm_mutex); 278 } 279 280 281 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) { 282 hca_count = ibt_get_hca_list(&hca_guids); 283 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count); 284 for (i = 0; i < hca_count; i++) 285 (void) ibdm_handle_hca_attach(hca_guids[i]); 286 if (hca_count) 287 ibt_free_hca_list(hca_guids, hca_count); 288 289 mutex_enter(&ibdm.ibdm_mutex); 290 ibdm.ibdm_state |= IBDM_HCA_ATTACHED; 291 mutex_exit(&ibdm.ibdm_mutex); 292 } 293 294 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) { 295 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL); 296 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL); 297 mutex_enter(&ibdm.ibdm_mutex); 298 ibdm.ibdm_state |= IBDM_CVS_ALLOCED; 299 mutex_exit(&ibdm.ibdm_mutex); 300 } 301 return (IBDM_SUCCESS); 302 } 303 304 305 static int 306 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info, ibdm_iou_info_t **ioup) 307 { 308 int ii, k, niocs; 309 size_t size; 310 ibdm_gid_t *delete, *head; 311 timeout_id_t timeout_id; 312 ibdm_ioc_info_t *ioc; 313 ibdm_iou_info_t *gl_iou = *ioup; 314 315 ASSERT(mutex_owned(&gid_info->gl_mutex)); 316 if (gl_iou == NULL) { 317 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU"); 318 return (0); 319 } 320 321 niocs = gl_iou->iou_info.iou_num_ctrl_slots; 322 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d", 323 gid_info, niocs); 324 325 for (ii = 0; ii < niocs; ii++) { 326 ioc = (ibdm_ioc_info_t *)&gl_iou->iou_ioc_info[ii]; 327 328 /* handle the case where an ioc_timeout_id is scheduled */ 329 if (ioc->ioc_timeout_id) { 330 timeout_id = ioc->ioc_timeout_id; 331 ioc->ioc_timeout_id = 0; 332 mutex_exit(&gid_info->gl_mutex); 333 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 334 "ioc_timeout_id = 0x%x", timeout_id); 335 if (untimeout(timeout_id) == -1) { 336 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 337 "untimeout ioc_timeout_id failed"); 338 mutex_enter(&gid_info->gl_mutex); 339 return (-1); 340 } 341 mutex_enter(&gid_info->gl_mutex); 342 } 343 344 /* handle the case where an ioc_dc_timeout_id is scheduled */ 345 if (ioc->ioc_dc_timeout_id) { 346 timeout_id = ioc->ioc_dc_timeout_id; 347 ioc->ioc_dc_timeout_id = 0; 348 mutex_exit(&gid_info->gl_mutex); 349 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 350 "ioc_dc_timeout_id = 0x%x", timeout_id); 351 if (untimeout(timeout_id) == -1) { 352 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 353 "untimeout ioc_dc_timeout_id failed"); 354 mutex_enter(&gid_info->gl_mutex); 355 return (-1); 356 } 357 mutex_enter(&gid_info->gl_mutex); 358 } 359 360 /* handle the case where serv[k].se_timeout_id is scheduled */ 361 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) { 362 if (ioc->ioc_serv[k].se_timeout_id) { 363 timeout_id = ioc->ioc_serv[k].se_timeout_id; 364 ioc->ioc_serv[k].se_timeout_id = 0; 365 mutex_exit(&gid_info->gl_mutex); 366 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 367 "ioc->ioc_serv[%d].se_timeout_id = 0x%x", 368 k, timeout_id); 369 if (untimeout(timeout_id) == -1) { 370 IBTF_DPRINTF_L2("ibdm", "free_iou_info:" 371 " untimeout se_timeout_id failed"); 372 mutex_enter(&gid_info->gl_mutex); 373 return (-1); 374 } 375 mutex_enter(&gid_info->gl_mutex); 376 } 377 } 378 379 /* delete GID list in IOC */ 380 head = ioc->ioc_gid_list; 381 while (head) { 382 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: " 383 "Deleting gid_list struct %p", head); 384 delete = head; 385 head = head->gid_next; 386 kmem_free(delete, sizeof (ibdm_gid_t)); 387 } 388 ioc->ioc_gid_list = NULL; 389 390 /* delete ioc_serv */ 391 size = ioc->ioc_profile.ioc_service_entries * 392 sizeof (ibdm_srvents_info_t); 393 if (ioc->ioc_serv && size) { 394 kmem_free(ioc->ioc_serv, size); 395 ioc->ioc_serv = NULL; 396 } 397 } 398 /* 399 * Clear the IBDM_CISCO_PROBE_DONE flag to get the IO Unit information 400 * via the switch during the probe process. 401 */ 402 gid_info->gl_flag &= ~IBDM_CISCO_PROBE_DONE; 403 404 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC"); 405 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t); 406 kmem_free(gl_iou, size); 407 *ioup = NULL; 408 return (0); 409 } 410 411 412 /* 413 * ibdm_fini(): 414 * Un-register with IBTF 415 * De allocate memory for the GID info 416 */ 417 static int 418 ibdm_fini() 419 { 420 int ii; 421 ibdm_hca_list_t *hca_list, *temp; 422 ibdm_dp_gidinfo_t *gid_info, *tmp; 423 ibdm_gid_t *head, *delete; 424 425 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini"); 426 427 mutex_enter(&ibdm.ibdm_hl_mutex); 428 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) { 429 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) { 430 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed"); 431 mutex_exit(&ibdm.ibdm_hl_mutex); 432 return (IBDM_FAILURE); 433 } 434 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED; 435 ibdm.ibdm_ibt_clnt_hdl = NULL; 436 } 437 438 hca_list = ibdm.ibdm_hca_list_head; 439 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count); 440 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 441 temp = hca_list; 442 hca_list = hca_list->hl_next; 443 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp); 444 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) { 445 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: " 446 "uninit_hca %p failed", temp); 447 mutex_exit(&ibdm.ibdm_hl_mutex); 448 return (IBDM_FAILURE); 449 } 450 } 451 mutex_exit(&ibdm.ibdm_hl_mutex); 452 453 mutex_enter(&ibdm.ibdm_mutex); 454 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED) 455 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED; 456 457 gid_info = ibdm.ibdm_dp_gidlist_head; 458 while (gid_info) { 459 mutex_enter(&gid_info->gl_mutex); 460 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 461 mutex_exit(&gid_info->gl_mutex); 462 ibdm_delete_glhca_list(gid_info); 463 464 tmp = gid_info; 465 gid_info = gid_info->gl_next; 466 mutex_destroy(&tmp->gl_mutex); 467 head = tmp->gl_gid; 468 while (head) { 469 IBTF_DPRINTF_L4("ibdm", 470 "\tibdm_fini: Deleting gid structs"); 471 delete = head; 472 head = head->gid_next; 473 kmem_free(delete, sizeof (ibdm_gid_t)); 474 } 475 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t)); 476 } 477 mutex_exit(&ibdm.ibdm_mutex); 478 479 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) { 480 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED; 481 mutex_destroy(&ibdm.ibdm_mutex); 482 mutex_destroy(&ibdm.ibdm_hl_mutex); 483 mutex_destroy(&ibdm.ibdm_ibnex_mutex); 484 cv_destroy(&ibdm.ibdm_port_settle_cv); 485 } 486 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) { 487 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED; 488 cv_destroy(&ibdm.ibdm_probe_cv); 489 cv_destroy(&ibdm.ibdm_busy_cv); 490 } 491 return (IBDM_SUCCESS); 492 } 493 494 495 /* 496 * ibdm_event_hdlr() 497 * 498 * IBDM registers this asynchronous event handler at the time of 499 * ibt_attach. IBDM support the following async events. For other 500 * event, simply returns success. 501 * IBT_HCA_ATTACH_EVENT: 502 * Retrieves the information about all the port that are 503 * present on this HCA, allocates the port attributes 504 * structure and calls IB nexus callback routine with 505 * the port attributes structure as an input argument. 506 * IBT_HCA_DETACH_EVENT: 507 * Retrieves the information about all the ports that are 508 * present on this HCA and calls IB nexus callback with 509 * port guid as an argument 510 * IBT_EVENT_PORT_UP: 511 * Register with IBMF and SA access 512 * Setup IBMF receive callback routine 513 * IBT_EVENT_PORT_DOWN: 514 * Un-Register with IBMF and SA access 515 * Teardown IBMF receive callback routine 516 */ 517 /*ARGSUSED*/ 518 static void 519 ibdm_event_hdlr(void *clnt_hdl, 520 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event) 521 { 522 ibdm_hca_list_t *hca_list; 523 ibdm_port_attr_t *port; 524 ibmf_saa_handle_t port_sa_hdl; 525 526 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code); 527 528 switch (code) { 529 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */ 530 ibdm_handle_hca_attach(event->ev_hca_guid); 531 break; 532 533 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */ 534 ibdm_handle_hca_detach(event->ev_hca_guid); 535 mutex_enter(&ibdm.ibdm_ibnex_mutex); 536 if (ibdm.ibdm_ibnex_callback != NULL) { 537 (*ibdm.ibdm_ibnex_callback)((void *) 538 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED); 539 } 540 mutex_exit(&ibdm.ibdm_ibnex_mutex); 541 break; 542 543 case IBT_EVENT_PORT_UP: 544 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP"); 545 mutex_enter(&ibdm.ibdm_hl_mutex); 546 port = ibdm_get_port_attr(event, &hca_list); 547 if (port == NULL) { 548 IBTF_DPRINTF_L2("ibdm", 549 "\tevent_hdlr: HCA not present"); 550 mutex_exit(&ibdm.ibdm_hl_mutex); 551 break; 552 } 553 ibdm_initialize_port(port); 554 hca_list->hl_nports_active++; 555 cv_broadcast(&ibdm.ibdm_port_settle_cv); 556 mutex_exit(&ibdm.ibdm_hl_mutex); 557 558 /* Inform IB nexus driver */ 559 mutex_enter(&ibdm.ibdm_ibnex_mutex); 560 if (ibdm.ibdm_ibnex_callback != NULL) { 561 (*ibdm.ibdm_ibnex_callback)((void *) 562 &event->ev_hca_guid, IBDM_EVENT_PORT_UP); 563 } 564 mutex_exit(&ibdm.ibdm_ibnex_mutex); 565 break; 566 567 case IBT_ERROR_PORT_DOWN: 568 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN"); 569 mutex_enter(&ibdm.ibdm_hl_mutex); 570 port = ibdm_get_port_attr(event, &hca_list); 571 if (port == NULL) { 572 IBTF_DPRINTF_L2("ibdm", 573 "\tevent_hdlr: HCA not present"); 574 mutex_exit(&ibdm.ibdm_hl_mutex); 575 break; 576 } 577 hca_list->hl_nports_active--; 578 port_sa_hdl = port->pa_sa_hdl; 579 (void) ibdm_fini_port(port); 580 port->pa_state = IBT_PORT_DOWN; 581 cv_broadcast(&ibdm.ibdm_port_settle_cv); 582 mutex_exit(&ibdm.ibdm_hl_mutex); 583 ibdm_reset_all_dgids(port_sa_hdl); 584 break; 585 586 case IBT_PORT_CHANGE_EVENT: 587 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_CHANGE"); 588 if (event->ev_port_flags & IBT_PORT_CHANGE_PKEY) 589 ibdm_handle_port_change_event(event); 590 break; 591 592 default: /* Ignore all other events/errors */ 593 break; 594 } 595 } 596 597 static void 598 ibdm_handle_port_change_event(ibt_async_event_t *event) 599 { 600 ibdm_port_attr_t *port; 601 ibdm_hca_list_t *hca_list; 602 603 IBTF_DPRINTF_L2("ibdm", "\tibdm_handle_port_change_event:" 604 " HCA guid %llx", event->ev_hca_guid); 605 mutex_enter(&ibdm.ibdm_hl_mutex); 606 port = ibdm_get_port_attr(event, &hca_list); 607 if (port == NULL) { 608 IBTF_DPRINTF_L2("ibdm", "\tevent_hdlr: HCA not present"); 609 mutex_exit(&ibdm.ibdm_hl_mutex); 610 return; 611 } 612 ibdm_update_port_pkeys(port); 613 cv_broadcast(&ibdm.ibdm_port_settle_cv); 614 mutex_exit(&ibdm.ibdm_hl_mutex); 615 616 /* Inform IB nexus driver */ 617 mutex_enter(&ibdm.ibdm_ibnex_mutex); 618 if (ibdm.ibdm_ibnex_callback != NULL) { 619 (*ibdm.ibdm_ibnex_callback)((void *) 620 &event->ev_hca_guid, IBDM_EVENT_PORT_PKEY_CHANGE); 621 } 622 mutex_exit(&ibdm.ibdm_ibnex_mutex); 623 } 624 625 /* 626 * ibdm_update_port_pkeys() 627 * Update the pkey table 628 * Update the port attributes 629 */ 630 static void 631 ibdm_update_port_pkeys(ibdm_port_attr_t *port) 632 { 633 uint_t nports, size; 634 uint_t pkey_idx, opkey_idx; 635 uint16_t npkeys; 636 ibt_hca_portinfo_t *pinfop; 637 ib_pkey_t pkey; 638 ibdm_pkey_tbl_t *pkey_tbl; 639 ibdm_port_attr_t newport; 640 641 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_pkeys:"); 642 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 643 644 /* Check whether the port is active */ 645 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 646 NULL) != IBT_SUCCESS) 647 return; 648 649 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 650 &pinfop, &nports, &size) != IBT_SUCCESS) { 651 /* This should not occur */ 652 port->pa_npkeys = 0; 653 port->pa_pkey_tbl = NULL; 654 return; 655 } 656 657 npkeys = pinfop->p_pkey_tbl_sz; 658 pkey_tbl = kmem_zalloc(npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 659 newport.pa_pkey_tbl = pkey_tbl; 660 newport.pa_ibmf_hdl = port->pa_ibmf_hdl; 661 662 for (pkey_idx = 0; pkey_idx < npkeys; pkey_idx++) { 663 pkey = pkey_tbl[pkey_idx].pt_pkey = 664 pinfop->p_pkey_tbl[pkey_idx]; 665 /* 666 * Is this pkey present in the current table ? 667 */ 668 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) { 669 if (pkey == port->pa_pkey_tbl[opkey_idx].pt_pkey) { 670 pkey_tbl[pkey_idx].pt_qp_hdl = 671 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl; 672 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl = NULL; 673 break; 674 } 675 } 676 677 if (opkey_idx == port->pa_npkeys) { 678 pkey = pkey_tbl[pkey_idx].pt_pkey; 679 if (IBDM_INVALID_PKEY(pkey)) { 680 pkey_tbl[pkey_idx].pt_qp_hdl = NULL; 681 continue; 682 } 683 ibdm_port_attr_ibmf_init(&newport, pkey, pkey_idx); 684 } 685 } 686 687 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) { 688 if (port->pa_pkey_tbl[opkey_idx].pt_qp_hdl != NULL) { 689 if (ibdm_port_attr_ibmf_fini(port, opkey_idx) != 690 IBDM_SUCCESS) { 691 IBTF_DPRINTF_L2("ibdm", "\tupdate_port_pkeys: " 692 "ibdm_port_attr_ibmf_fini failed for " 693 "port pkey 0x%x", 694 port->pa_pkey_tbl[opkey_idx].pt_pkey); 695 } 696 } 697 } 698 699 if (port->pa_pkey_tbl != NULL) { 700 kmem_free(port->pa_pkey_tbl, 701 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 702 } 703 704 port->pa_npkeys = npkeys; 705 port->pa_pkey_tbl = pkey_tbl; 706 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 707 port->pa_state = pinfop->p_linkstate; 708 ibt_free_portinfo(pinfop, size); 709 } 710 711 /* 712 * ibdm_initialize_port() 713 * Register with IBMF 714 * Register with SA access 715 * Register a receive callback routine with IBMF. IBMF invokes 716 * this routine whenever a MAD arrives at this port. 717 * Update the port attributes 718 */ 719 static void 720 ibdm_initialize_port(ibdm_port_attr_t *port) 721 { 722 int ii; 723 uint_t nports, size; 724 uint_t pkey_idx; 725 ib_pkey_t pkey; 726 ibt_hca_portinfo_t *pinfop; 727 ibmf_register_info_t ibmf_reg; 728 ibmf_saa_subnet_event_args_t event_args; 729 730 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:"); 731 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 732 733 /* Check whether the port is active */ 734 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 735 NULL) != IBT_SUCCESS) 736 return; 737 738 if (port->pa_sa_hdl != NULL) 739 return; 740 741 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 742 &pinfop, &nports, &size) != IBT_SUCCESS) { 743 /* This should not occur */ 744 port->pa_npkeys = 0; 745 port->pa_pkey_tbl = NULL; 746 return; 747 } 748 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 749 750 port->pa_state = pinfop->p_linkstate; 751 port->pa_npkeys = pinfop->p_pkey_tbl_sz; 752 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 753 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 754 755 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) 756 port->pa_pkey_tbl[pkey_idx].pt_pkey = 757 pinfop->p_pkey_tbl[pkey_idx]; 758 759 ibt_free_portinfo(pinfop, size); 760 761 event_args.is_event_callback = ibdm_saa_event_cb; 762 event_args.is_event_callback_arg = port; 763 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args, 764 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) { 765 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 766 "sa access registration failed"); 767 return; 768 } 769 ibmf_reg.ir_ci_guid = port->pa_hca_guid; 770 ibmf_reg.ir_port_num = port->pa_port_num; 771 ibmf_reg.ir_client_class = DEV_MGT_MANAGER; 772 773 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL, 774 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) { 775 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 776 "IBMF registration failed"); 777 (void) ibdm_fini_port(port); 778 return; 779 } 780 if (ibmf_setup_async_cb(port->pa_ibmf_hdl, IBMF_QP_HANDLE_DEFAULT, 781 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) { 782 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 783 "IBMF setup recv cb failed"); 784 (void) ibdm_fini_port(port); 785 return; 786 } 787 788 for (ii = 0; ii < port->pa_npkeys; ii++) { 789 pkey = port->pa_pkey_tbl[ii].pt_pkey; 790 if (IBDM_INVALID_PKEY(pkey)) { 791 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 792 continue; 793 } 794 ibdm_port_attr_ibmf_init(port, pkey, ii); 795 } 796 } 797 798 799 /* 800 * ibdm_port_attr_ibmf_init: 801 * With IBMF - Alloc QP Handle and Setup Async callback 802 */ 803 static void 804 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii) 805 { 806 int ret; 807 808 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY, 809 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) != 810 IBMF_SUCCESS) { 811 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 812 "IBMF failed to alloc qp %d", ret); 813 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 814 return; 815 } 816 817 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p", 818 port->pa_ibmf_hdl); 819 820 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl, 821 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) != 822 IBMF_SUCCESS) { 823 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 824 "IBMF setup recv cb failed %d", ret); 825 (void) ibmf_free_qp(port->pa_ibmf_hdl, 826 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0); 827 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 828 } 829 } 830 831 832 /* 833 * ibdm_get_port_attr() 834 * Get port attributes from HCA guid and port number 835 * Return pointer to ibdm_port_attr_t on Success 836 * and NULL on failure 837 */ 838 static ibdm_port_attr_t * 839 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval) 840 { 841 ibdm_hca_list_t *hca_list; 842 ibdm_port_attr_t *port_attr; 843 int ii; 844 845 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port); 846 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 847 hca_list = ibdm.ibdm_hca_list_head; 848 while (hca_list) { 849 if (hca_list->hl_hca_guid == event->ev_hca_guid) { 850 for (ii = 0; ii < hca_list->hl_nports; ii++) { 851 port_attr = &hca_list->hl_port_attr[ii]; 852 if (port_attr->pa_port_num == event->ev_port) { 853 *retval = hca_list; 854 return (port_attr); 855 } 856 } 857 } 858 hca_list = hca_list->hl_next; 859 } 860 return (NULL); 861 } 862 863 864 /* 865 * ibdm_update_port_attr() 866 * Update the port attributes 867 */ 868 static void 869 ibdm_update_port_attr(ibdm_port_attr_t *port) 870 { 871 uint_t nports, size; 872 uint_t pkey_idx; 873 ibt_hca_portinfo_t *portinfop; 874 875 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin"); 876 if (ibt_query_hca_ports(port->pa_hca_hdl, 877 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) { 878 /* This should not occur */ 879 port->pa_npkeys = 0; 880 port->pa_pkey_tbl = NULL; 881 return; 882 } 883 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix; 884 885 port->pa_state = portinfop->p_linkstate; 886 887 /* 888 * PKey information in portinfo valid only if port is 889 * ACTIVE. Bail out if not. 890 */ 891 if (port->pa_state != IBT_PORT_ACTIVE) { 892 port->pa_npkeys = 0; 893 port->pa_pkey_tbl = NULL; 894 ibt_free_portinfo(portinfop, size); 895 return; 896 } 897 898 port->pa_npkeys = portinfop->p_pkey_tbl_sz; 899 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 900 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 901 902 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) { 903 port->pa_pkey_tbl[pkey_idx].pt_pkey = 904 portinfop->p_pkey_tbl[pkey_idx]; 905 } 906 ibt_free_portinfo(portinfop, size); 907 } 908 909 910 /* 911 * ibdm_handle_hca_attach() 912 */ 913 static void 914 ibdm_handle_hca_attach(ib_guid_t hca_guid) 915 { 916 uint_t size; 917 uint_t ii, nports; 918 ibt_status_t status; 919 ibt_hca_hdl_t hca_hdl; 920 ibt_hca_attr_t *hca_attr; 921 ibdm_hca_list_t *hca_list, *temp; 922 ibdm_port_attr_t *port_attr; 923 ibt_hca_portinfo_t *portinfop; 924 925 IBTF_DPRINTF_L4("ibdm", 926 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid); 927 928 /* open the HCA first */ 929 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid, 930 &hca_hdl)) != IBT_SUCCESS) { 931 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 932 "open_hca failed, status 0x%x", status); 933 return; 934 } 935 936 hca_attr = (ibt_hca_attr_t *) 937 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP); 938 /* ibt_query_hca always returns IBT_SUCCESS */ 939 (void) ibt_query_hca(hca_hdl, hca_attr); 940 941 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x," 942 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id, 943 hca_attr->hca_version_id, hca_attr->hca_nports); 944 945 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports, 946 &size)) != IBT_SUCCESS) { 947 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 948 "ibt_query_hca_ports failed, status 0x%x", status); 949 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 950 (void) ibt_close_hca(hca_hdl); 951 return; 952 } 953 hca_list = (ibdm_hca_list_t *) 954 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP); 955 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 956 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP); 957 hca_list->hl_hca_guid = hca_attr->hca_node_guid; 958 hca_list->hl_nports = hca_attr->hca_nports; 959 hca_list->hl_attach_time = ddi_get_time(); 960 hca_list->hl_hca_hdl = hca_hdl; 961 962 /* 963 * Init a dummy port attribute for the HCA node 964 * This is for Per-HCA Node. Initialize port_attr : 965 * hca_guid & port_guid -> hca_guid 966 * npkeys, pkey_tbl is NULL 967 * port_num, sn_prefix is 0 968 * vendorid, product_id, dev_version from HCA 969 * pa_state is IBT_PORT_ACTIVE 970 */ 971 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 972 sizeof (ibdm_port_attr_t), KM_SLEEP); 973 port_attr = hca_list->hl_hca_port_attr; 974 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 975 port_attr->pa_productid = hca_attr->hca_device_id; 976 port_attr->pa_dev_version = hca_attr->hca_version_id; 977 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 978 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 979 port_attr->pa_port_guid = hca_attr->hca_node_guid; 980 port_attr->pa_state = IBT_PORT_ACTIVE; 981 982 983 for (ii = 0; ii < nports; ii++) { 984 port_attr = &hca_list->hl_port_attr[ii]; 985 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 986 port_attr->pa_productid = hca_attr->hca_device_id; 987 port_attr->pa_dev_version = hca_attr->hca_version_id; 988 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 989 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 990 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid; 991 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix; 992 port_attr->pa_port_num = portinfop[ii].p_port_num; 993 port_attr->pa_state = portinfop[ii].p_linkstate; 994 995 /* 996 * Register with IBMF, SA access when the port is in 997 * ACTIVE state. Also register a callback routine 998 * with IBMF to receive incoming DM MAD's. 999 * The IBDM event handler takes care of registration of 1000 * port which are not active. 1001 */ 1002 IBTF_DPRINTF_L4("ibdm", 1003 "\thandle_hca_attach: port guid %llx Port state 0x%x", 1004 port_attr->pa_port_guid, portinfop[ii].p_linkstate); 1005 1006 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) { 1007 mutex_enter(&ibdm.ibdm_hl_mutex); 1008 hca_list->hl_nports_active++; 1009 ibdm_initialize_port(port_attr); 1010 cv_broadcast(&ibdm.ibdm_port_settle_cv); 1011 mutex_exit(&ibdm.ibdm_hl_mutex); 1012 } 1013 } 1014 mutex_enter(&ibdm.ibdm_hl_mutex); 1015 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) { 1016 if (temp->hl_hca_guid == hca_guid) { 1017 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX " 1018 "already seen by IBDM", hca_guid); 1019 mutex_exit(&ibdm.ibdm_hl_mutex); 1020 (void) ibdm_uninit_hca(hca_list); 1021 return; 1022 } 1023 } 1024 ibdm.ibdm_hca_count++; 1025 if (ibdm.ibdm_hca_list_head == NULL) { 1026 ibdm.ibdm_hca_list_head = hca_list; 1027 ibdm.ibdm_hca_list_tail = hca_list; 1028 } else { 1029 ibdm.ibdm_hca_list_tail->hl_next = hca_list; 1030 ibdm.ibdm_hca_list_tail = hca_list; 1031 } 1032 mutex_exit(&ibdm.ibdm_hl_mutex); 1033 mutex_enter(&ibdm.ibdm_ibnex_mutex); 1034 if (ibdm.ibdm_ibnex_callback != NULL) { 1035 (*ibdm.ibdm_ibnex_callback)((void *) 1036 &hca_guid, IBDM_EVENT_HCA_ADDED); 1037 } 1038 mutex_exit(&ibdm.ibdm_ibnex_mutex); 1039 1040 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 1041 ibt_free_portinfo(portinfop, size); 1042 } 1043 1044 1045 /* 1046 * ibdm_handle_hca_detach() 1047 */ 1048 static void 1049 ibdm_handle_hca_detach(ib_guid_t hca_guid) 1050 { 1051 ibdm_hca_list_t *head, *prev = NULL; 1052 size_t len; 1053 ibdm_dp_gidinfo_t *gidinfo; 1054 ibdm_port_attr_t *port_attr; 1055 int i; 1056 1057 IBTF_DPRINTF_L4("ibdm", 1058 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid); 1059 1060 /* Make sure no probes are running */ 1061 mutex_enter(&ibdm.ibdm_mutex); 1062 while (ibdm.ibdm_busy & IBDM_BUSY) 1063 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1064 ibdm.ibdm_busy |= IBDM_BUSY; 1065 mutex_exit(&ibdm.ibdm_mutex); 1066 1067 mutex_enter(&ibdm.ibdm_hl_mutex); 1068 head = ibdm.ibdm_hca_list_head; 1069 while (head) { 1070 if (head->hl_hca_guid == hca_guid) { 1071 if (prev == NULL) 1072 ibdm.ibdm_hca_list_head = head->hl_next; 1073 else 1074 prev->hl_next = head->hl_next; 1075 if (ibdm.ibdm_hca_list_tail == head) 1076 ibdm.ibdm_hca_list_tail = prev; 1077 ibdm.ibdm_hca_count--; 1078 break; 1079 } 1080 prev = head; 1081 head = head->hl_next; 1082 } 1083 mutex_exit(&ibdm.ibdm_hl_mutex); 1084 if (ibdm_uninit_hca(head) != IBDM_SUCCESS) 1085 (void) ibdm_handle_hca_attach(hca_guid); 1086 1087 /* 1088 * Now clean up the HCA lists in the gidlist. 1089 */ 1090 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo = 1091 gidinfo->gl_next) { 1092 prev = NULL; 1093 head = gidinfo->gl_hca_list; 1094 while (head) { 1095 if (head->hl_hca_guid == hca_guid) { 1096 if (prev == NULL) 1097 gidinfo->gl_hca_list = 1098 head->hl_next; 1099 else 1100 prev->hl_next = head->hl_next; 1101 for (i = 0; i < head->hl_nports; i++) { 1102 port_attr = &head->hl_port_attr[i]; 1103 if (port_attr->pa_pkey_tbl != NULL) 1104 kmem_free( 1105 port_attr->pa_pkey_tbl, 1106 port_attr->pa_npkeys * 1107 sizeof (ibdm_pkey_tbl_t)); 1108 } 1109 len = sizeof (ibdm_hca_list_t) + 1110 (head->hl_nports * 1111 sizeof (ibdm_port_attr_t)); 1112 kmem_free(head, len); 1113 1114 break; 1115 } 1116 prev = head; 1117 head = head->hl_next; 1118 } 1119 } 1120 1121 mutex_enter(&ibdm.ibdm_mutex); 1122 ibdm.ibdm_busy &= ~IBDM_BUSY; 1123 cv_broadcast(&ibdm.ibdm_busy_cv); 1124 mutex_exit(&ibdm.ibdm_mutex); 1125 } 1126 1127 1128 static int 1129 ibdm_uninit_hca(ibdm_hca_list_t *head) 1130 { 1131 int ii; 1132 ibdm_port_attr_t *port_attr; 1133 1134 for (ii = 0; ii < head->hl_nports; ii++) { 1135 port_attr = &head->hl_port_attr[ii]; 1136 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) { 1137 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x " 1138 "ibdm_fini_port() failed", head, ii); 1139 return (IBDM_FAILURE); 1140 } 1141 } 1142 if (head->hl_hca_hdl) 1143 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) { 1144 IBTF_DPRINTF_L2("ibdm", "uninit_hca: " 1145 "ibt_close_hca() failed"); 1146 return (IBDM_FAILURE); 1147 } 1148 kmem_free(head->hl_port_attr, 1149 head->hl_nports * sizeof (ibdm_port_attr_t)); 1150 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t)); 1151 kmem_free(head, sizeof (ibdm_hca_list_t)); 1152 return (IBDM_SUCCESS); 1153 } 1154 1155 1156 /* 1157 * For each port on the HCA, 1158 * 1) Teardown IBMF receive callback function 1159 * 2) Unregister with IBMF 1160 * 3) Unregister with SA access 1161 */ 1162 static int 1163 ibdm_fini_port(ibdm_port_attr_t *port_attr) 1164 { 1165 int ii, ibmf_status; 1166 1167 for (ii = 0; ii < port_attr->pa_npkeys; ii++) { 1168 if (port_attr->pa_pkey_tbl == NULL) 1169 break; 1170 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl) 1171 continue; 1172 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) { 1173 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1174 "ibdm_port_attr_ibmf_fini failed for " 1175 "port pkey 0x%x", ii); 1176 return (IBDM_FAILURE); 1177 } 1178 } 1179 1180 if (port_attr->pa_ibmf_hdl) { 1181 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1182 IBMF_QP_HANDLE_DEFAULT, 0); 1183 if (ibmf_status != IBMF_SUCCESS) { 1184 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1185 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1186 return (IBDM_FAILURE); 1187 } 1188 1189 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0); 1190 if (ibmf_status != IBMF_SUCCESS) { 1191 IBTF_DPRINTF_L2("ibdm", "\tfini_port: " 1192 "ibmf_unregister failed %d", ibmf_status); 1193 return (IBDM_FAILURE); 1194 } 1195 1196 port_attr->pa_ibmf_hdl = NULL; 1197 } 1198 1199 if (port_attr->pa_sa_hdl) { 1200 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0); 1201 if (ibmf_status != IBMF_SUCCESS) { 1202 IBTF_DPRINTF_L2("ibdm", "\tfini_port: " 1203 "ibmf_sa_session_close failed %d", ibmf_status); 1204 return (IBDM_FAILURE); 1205 } 1206 port_attr->pa_sa_hdl = NULL; 1207 } 1208 1209 if (port_attr->pa_pkey_tbl != NULL) { 1210 kmem_free(port_attr->pa_pkey_tbl, 1211 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 1212 port_attr->pa_pkey_tbl = NULL; 1213 port_attr->pa_npkeys = 0; 1214 } 1215 1216 return (IBDM_SUCCESS); 1217 } 1218 1219 1220 /* 1221 * ibdm_port_attr_ibmf_fini: 1222 * With IBMF - Tear down Async callback and free QP Handle 1223 */ 1224 static int 1225 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii) 1226 { 1227 int ibmf_status; 1228 1229 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:"); 1230 1231 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) { 1232 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1233 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1234 if (ibmf_status != IBMF_SUCCESS) { 1235 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1236 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1237 return (IBDM_FAILURE); 1238 } 1239 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl, 1240 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1241 if (ibmf_status != IBMF_SUCCESS) { 1242 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1243 "ibmf_free_qp failed %d", ibmf_status); 1244 return (IBDM_FAILURE); 1245 } 1246 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 1247 } 1248 return (IBDM_SUCCESS); 1249 } 1250 1251 1252 /* 1253 * ibdm_gid_decr_pending: 1254 * decrement gl_pending_cmds. If zero wakeup sleeping threads 1255 */ 1256 static void 1257 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo) 1258 { 1259 mutex_enter(&ibdm.ibdm_mutex); 1260 mutex_enter(&gidinfo->gl_mutex); 1261 if (--gidinfo->gl_pending_cmds == 0) { 1262 /* 1263 * Handle DGID getting removed. 1264 */ 1265 if (gidinfo->gl_disconnected) { 1266 mutex_exit(&gidinfo->gl_mutex); 1267 mutex_exit(&ibdm.ibdm_mutex); 1268 1269 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: " 1270 "gidinfo %p hot removal", gidinfo); 1271 ibdm_delete_gidinfo(gidinfo); 1272 1273 mutex_enter(&ibdm.ibdm_mutex); 1274 ibdm.ibdm_ngid_probes_in_progress--; 1275 ibdm_wait_probe_completion(); 1276 mutex_exit(&ibdm.ibdm_mutex); 1277 return; 1278 } 1279 mutex_exit(&gidinfo->gl_mutex); 1280 mutex_exit(&ibdm.ibdm_mutex); 1281 ibdm_notify_newgid_iocs(gidinfo); 1282 mutex_enter(&ibdm.ibdm_mutex); 1283 mutex_enter(&gidinfo->gl_mutex); 1284 1285 ibdm.ibdm_ngid_probes_in_progress--; 1286 ibdm_wait_probe_completion(); 1287 } 1288 mutex_exit(&gidinfo->gl_mutex); 1289 mutex_exit(&ibdm.ibdm_mutex); 1290 } 1291 1292 1293 /* 1294 * ibdm_wait_probe_completion: 1295 * wait for probing to complete 1296 */ 1297 static void 1298 ibdm_wait_probe_completion(void) 1299 { 1300 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1301 if (ibdm.ibdm_ngid_probes_in_progress) { 1302 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete"); 1303 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS; 1304 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS) 1305 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex); 1306 } 1307 } 1308 1309 1310 /* 1311 * ibdm_wait_cisco_probe_completion: 1312 * wait for the reply from the Cisco FC GW switch after a setclassportinfo 1313 * request is sent. This wait can be achieved on each gid. 1314 */ 1315 static void 1316 ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *gidinfo) 1317 { 1318 ASSERT(MUTEX_HELD(&gidinfo->gl_mutex)); 1319 IBTF_DPRINTF_L4("ibdm", "\twait for cisco probe complete"); 1320 gidinfo->gl_flag |= IBDM_CISCO_PROBE; 1321 while (gidinfo->gl_flag & IBDM_CISCO_PROBE) 1322 cv_wait(&gidinfo->gl_probe_cv, &gidinfo->gl_mutex); 1323 } 1324 1325 1326 /* 1327 * ibdm_wakeup_probe_gid_cv: 1328 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress) 1329 */ 1330 static void 1331 ibdm_wakeup_probe_gid_cv(void) 1332 { 1333 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1334 if (!ibdm.ibdm_ngid_probes_in_progress) { 1335 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup"); 1336 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 1337 cv_broadcast(&ibdm.ibdm_probe_cv); 1338 } 1339 1340 } 1341 1342 1343 /* 1344 * ibdm_sweep_fabric(reprobe_flag) 1345 * Find all possible Managed IOU's and their IOC's that are visible 1346 * to the host. The algorithm used is as follows 1347 * 1348 * Send a "bus walk" request for each port on the host HCA to SA access 1349 * SA returns complete set of GID's that are reachable from 1350 * source port. This is done in parallel. 1351 * 1352 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE 1353 * 1354 * Sort the GID list and eliminate duplicate GID's 1355 * 1) Use DGID for sorting 1356 * 2) use PortGuid for sorting 1357 * Send SA query to retrieve NodeRecord and 1358 * extract PortGuid from that. 1359 * 1360 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont 1361 * support DM MAD's 1362 * Send a "Portinfo" query to get the port capabilities and 1363 * then check for DM MAD's support 1364 * 1365 * Send "ClassPortInfo" request for all the GID's in parallel, 1366 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the 1367 * cv_signal to complete. 1368 * 1369 * When DM agent on the remote GID sends back the response, IBMF 1370 * invokes DM callback routine. 1371 * 1372 * If the response is proper, send "IOUnitInfo" request and set 1373 * GID state to IBDM_GET_IOUNITINFO. 1374 * 1375 * If the response is proper, send "IocProfileInfo" request to 1376 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS. 1377 * 1378 * Send request to get Service entries simultaneously 1379 * 1380 * Signal the waiting thread when received response for all the commands. 1381 * 1382 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error 1383 * response during the probing period. 1384 * 1385 * Note: 1386 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds 1387 * keep track of number commands in progress at any point of time. 1388 * MAD transaction ID is used to identify a particular GID 1389 * TBD: Consider registering the IBMF receive callback on demand 1390 * 1391 * Note: This routine must be called with ibdm.ibdm_mutex held 1392 * TBD: Re probe the failure GID (for certain failures) when requested 1393 * for fabric sweep next time 1394 * 1395 * Parameters : If reprobe_flag is set, All IOCs will be reprobed. 1396 */ 1397 static void 1398 ibdm_sweep_fabric(int reprobe_flag) 1399 { 1400 int ii; 1401 int new_paths = 0; 1402 uint8_t niocs; 1403 taskqid_t tid; 1404 ibdm_ioc_info_t *ioc; 1405 ibdm_hca_list_t *hca_list = NULL; 1406 ibdm_port_attr_t *port = NULL; 1407 ibdm_dp_gidinfo_t *gid_info; 1408 1409 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter"); 1410 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1411 1412 /* 1413 * Check whether a sweep already in progress. If so, just 1414 * wait for the fabric sweep to complete 1415 */ 1416 while (ibdm.ibdm_busy & IBDM_BUSY) 1417 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1418 ibdm.ibdm_busy |= IBDM_BUSY; 1419 mutex_exit(&ibdm.ibdm_mutex); 1420 1421 ibdm_dump_sweep_fabric_timestamp(0); 1422 1423 /* Rescan the GID list for any removed GIDs for reprobe */ 1424 if (reprobe_flag) 1425 ibdm_rescan_gidlist(NULL); 1426 1427 /* 1428 * Get list of all the ports reachable from the local known HCA 1429 * ports which are active 1430 */ 1431 mutex_enter(&ibdm.ibdm_hl_mutex); 1432 for (ibdm_get_next_port(&hca_list, &port, 1); port; 1433 ibdm_get_next_port(&hca_list, &port, 1)) { 1434 /* 1435 * Get PATHS to all the reachable ports from 1436 * SGID and update the global ibdm structure. 1437 */ 1438 new_paths = ibdm_get_reachable_ports(port, hca_list); 1439 ibdm.ibdm_ngids += new_paths; 1440 } 1441 mutex_exit(&ibdm.ibdm_hl_mutex); 1442 1443 mutex_enter(&ibdm.ibdm_mutex); 1444 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids; 1445 mutex_exit(&ibdm.ibdm_mutex); 1446 1447 /* Send a request to probe GIDs asynchronously. */ 1448 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1449 gid_info = gid_info->gl_next) { 1450 mutex_enter(&gid_info->gl_mutex); 1451 gid_info->gl_reprobe_flag = reprobe_flag; 1452 mutex_exit(&gid_info->gl_mutex); 1453 1454 /* process newly encountered GIDs */ 1455 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread, 1456 (void *)gid_info, TQ_NOSLEEP); 1457 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p" 1458 " taskq_id = %x", gid_info, tid); 1459 /* taskq failed to dispatch call it directly */ 1460 if (tid == NULL) 1461 ibdm_probe_gid_thread((void *)gid_info); 1462 } 1463 1464 mutex_enter(&ibdm.ibdm_mutex); 1465 ibdm_wait_probe_completion(); 1466 1467 /* 1468 * Update the properties, if reprobe_flag is set 1469 * Skip if gl_reprobe_flag is set, this will be 1470 * a re-inserted / new GID, for which notifications 1471 * have already been send. 1472 */ 1473 if (reprobe_flag) { 1474 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1475 gid_info = gid_info->gl_next) { 1476 if (gid_info->gl_iou == NULL) 1477 continue; 1478 if (gid_info->gl_reprobe_flag) { 1479 gid_info->gl_reprobe_flag = 0; 1480 continue; 1481 } 1482 1483 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1484 for (ii = 0; ii < niocs; ii++) { 1485 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1486 if (ioc) 1487 ibdm_reprobe_update_port_srv(ioc, 1488 gid_info); 1489 } 1490 } 1491 } else if (ibdm.ibdm_prev_iou) { 1492 ibdm_ioc_info_t *ioc_list; 1493 1494 /* 1495 * Get the list of IOCs which have changed. 1496 * If any IOCs have changed, Notify IBNexus 1497 */ 1498 ibdm.ibdm_prev_iou = 0; 1499 ioc_list = ibdm_handle_prev_iou(); 1500 if (ioc_list) { 1501 if (ibdm.ibdm_ibnex_callback != NULL) { 1502 (*ibdm.ibdm_ibnex_callback)( 1503 (void *)ioc_list, 1504 IBDM_EVENT_IOC_PROP_UPDATE); 1505 } 1506 } 1507 } 1508 1509 ibdm_dump_sweep_fabric_timestamp(1); 1510 1511 ibdm.ibdm_busy &= ~IBDM_BUSY; 1512 cv_broadcast(&ibdm.ibdm_busy_cv); 1513 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT"); 1514 } 1515 1516 1517 /* 1518 * ibdm_is_cisco: 1519 * Check if this is a Cisco device or not. 1520 */ 1521 static boolean_t 1522 ibdm_is_cisco(ib_guid_t guid) 1523 { 1524 if ((guid >> IBDM_OUI_GUID_SHIFT) == IBDM_CISCO_COMPANY_ID) 1525 return (B_TRUE); 1526 return (B_FALSE); 1527 } 1528 1529 1530 /* 1531 * ibdm_is_cisco_switch: 1532 * Check if this switch is a CISCO switch or not. 1533 * Note that if this switch is already activated, ibdm_is_cisco_switch() 1534 * returns B_FALSE not to re-activate it again. 1535 */ 1536 static boolean_t 1537 ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *gid_info) 1538 { 1539 int company_id, device_id; 1540 ASSERT(gid_info != 0); 1541 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 1542 1543 /* 1544 * If this switch is already activated, don't re-activate it. 1545 */ 1546 if (gid_info->gl_flag & IBDM_CISCO_PROBE_DONE) 1547 return (B_FALSE); 1548 1549 /* 1550 * Check if this switch is a Cisco FC GW or not. 1551 * Use the node guid (the OUI part) instead of the vendor id 1552 * since the vendor id is zero in practice. 1553 */ 1554 company_id = gid_info->gl_nodeguid >> IBDM_OUI_GUID_SHIFT; 1555 device_id = gid_info->gl_devid; 1556 1557 if (company_id == IBDM_CISCO_COMPANY_ID && 1558 device_id == IBDM_CISCO_DEVICE_ID) 1559 return (B_TRUE); 1560 return (B_FALSE); 1561 } 1562 1563 1564 /* 1565 * ibdm_probe_gid_thread: 1566 * thread that does the actual work for sweeping the fabric 1567 * for a given GID 1568 */ 1569 static void 1570 ibdm_probe_gid_thread(void *args) 1571 { 1572 int reprobe_flag; 1573 ib_guid_t node_guid; 1574 ib_guid_t port_guid; 1575 ibdm_dp_gidinfo_t *gid_info; 1576 1577 gid_info = (ibdm_dp_gidinfo_t *)args; 1578 reprobe_flag = gid_info->gl_reprobe_flag; 1579 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d", 1580 gid_info, reprobe_flag); 1581 ASSERT(gid_info != NULL); 1582 ASSERT(gid_info->gl_pending_cmds == 0); 1583 1584 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE && 1585 reprobe_flag == 0) { 1586 /* 1587 * This GID may have been already probed. Send 1588 * in a CLP to check if IOUnitInfo changed? 1589 * Explicitly set gl_reprobe_flag to 0 so that 1590 * IBnex is not notified on completion 1591 */ 1592 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) { 1593 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: " 1594 "get new IOCs information"); 1595 mutex_enter(&gid_info->gl_mutex); 1596 gid_info->gl_pending_cmds++; 1597 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1598 gid_info->gl_reprobe_flag = 0; 1599 mutex_exit(&gid_info->gl_mutex); 1600 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) { 1601 mutex_enter(&gid_info->gl_mutex); 1602 --gid_info->gl_pending_cmds; 1603 mutex_exit(&gid_info->gl_mutex); 1604 mutex_enter(&ibdm.ibdm_mutex); 1605 --ibdm.ibdm_ngid_probes_in_progress; 1606 ibdm_wakeup_probe_gid_cv(); 1607 mutex_exit(&ibdm.ibdm_mutex); 1608 } 1609 } else { 1610 mutex_enter(&ibdm.ibdm_mutex); 1611 --ibdm.ibdm_ngid_probes_in_progress; 1612 ibdm_wakeup_probe_gid_cv(); 1613 mutex_exit(&ibdm.ibdm_mutex); 1614 } 1615 return; 1616 } else if (reprobe_flag && gid_info->gl_state == 1617 IBDM_GID_PROBING_COMPLETE) { 1618 /* 1619 * Reprobe all IOCs for the GID which has completed 1620 * probe. Skip other port GIDs to same IOU. 1621 * Explicitly set gl_reprobe_flag to 0 so that 1622 * IBnex is not notified on completion 1623 */ 1624 ibdm_ioc_info_t *ioc_info; 1625 uint8_t niocs, ii; 1626 1627 ASSERT(gid_info->gl_iou); 1628 mutex_enter(&gid_info->gl_mutex); 1629 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1630 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 1631 gid_info->gl_pending_cmds += niocs; 1632 gid_info->gl_reprobe_flag = 0; 1633 mutex_exit(&gid_info->gl_mutex); 1634 for (ii = 0; ii < niocs; ii++) { 1635 uchar_t slot_info; 1636 ib_dm_io_unitinfo_t *giou_info; 1637 1638 /* 1639 * Check whether IOC is present in the slot 1640 * Series of nibbles (in the field 1641 * iou_ctrl_list) represents a slot in the 1642 * IOU. 1643 * Byte format: 76543210 1644 * Bits 0-3 of first byte represent Slot 2 1645 * bits 4-7 of first byte represent slot 1, 1646 * bits 0-3 of second byte represent slot 4 1647 * and so on 1648 * Each 4-bit nibble has the following meaning 1649 * 0x0 : IOC not installed 1650 * 0x1 : IOC is present 1651 * 0xf : Slot does not exist 1652 * and all other values are reserved. 1653 */ 1654 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1655 giou_info = &gid_info->gl_iou->iou_info; 1656 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 1657 if ((ii % 2) == 0) 1658 slot_info = (slot_info >> 4); 1659 1660 if ((slot_info & 0xf) != 1) { 1661 ioc_info->ioc_state = 1662 IBDM_IOC_STATE_PROBE_FAILED; 1663 ibdm_gid_decr_pending(gid_info); 1664 continue; 1665 } 1666 1667 if (ibdm_send_ioc_profile(gid_info, ii) != 1668 IBDM_SUCCESS) { 1669 ibdm_gid_decr_pending(gid_info); 1670 } 1671 } 1672 1673 return; 1674 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 1675 mutex_enter(&ibdm.ibdm_mutex); 1676 --ibdm.ibdm_ngid_probes_in_progress; 1677 ibdm_wakeup_probe_gid_cv(); 1678 mutex_exit(&ibdm.ibdm_mutex); 1679 return; 1680 } 1681 1682 /* 1683 * Check whether the destination GID supports DM agents. If 1684 * not, stop probing the GID and continue with the next GID 1685 * in the list. 1686 */ 1687 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) { 1688 mutex_enter(&gid_info->gl_mutex); 1689 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1690 gid_info->gl_is_dm_capable = B_FALSE; 1691 mutex_exit(&gid_info->gl_mutex); 1692 ibdm_delete_glhca_list(gid_info); 1693 mutex_enter(&ibdm.ibdm_mutex); 1694 --ibdm.ibdm_ngid_probes_in_progress; 1695 ibdm_wakeup_probe_gid_cv(); 1696 mutex_exit(&ibdm.ibdm_mutex); 1697 return; 1698 } 1699 1700 /* 1701 * This GID is Device management capable 1702 */ 1703 mutex_enter(&gid_info->gl_mutex); 1704 gid_info->gl_is_dm_capable = B_TRUE; 1705 mutex_exit(&gid_info->gl_mutex); 1706 1707 /* Get the nodeguid and portguid of the port */ 1708 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid, 1709 &node_guid, &port_guid) != IBDM_SUCCESS) { 1710 mutex_enter(&gid_info->gl_mutex); 1711 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1712 mutex_exit(&gid_info->gl_mutex); 1713 ibdm_delete_glhca_list(gid_info); 1714 mutex_enter(&ibdm.ibdm_mutex); 1715 --ibdm.ibdm_ngid_probes_in_progress; 1716 ibdm_wakeup_probe_gid_cv(); 1717 mutex_exit(&ibdm.ibdm_mutex); 1718 return; 1719 } 1720 1721 /* 1722 * Check whether we already knew about this NodeGuid 1723 * If so, do not probe the GID and continue with the 1724 * next GID in the gid list. Set the GID state to 1725 * probing done. 1726 */ 1727 mutex_enter(&ibdm.ibdm_mutex); 1728 gid_info->gl_nodeguid = node_guid; 1729 gid_info->gl_portguid = port_guid; 1730 if (ibdm_check_dest_nodeguid(gid_info) != NULL) { 1731 mutex_exit(&ibdm.ibdm_mutex); 1732 mutex_enter(&gid_info->gl_mutex); 1733 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 1734 mutex_exit(&gid_info->gl_mutex); 1735 ibdm_delete_glhca_list(gid_info); 1736 mutex_enter(&ibdm.ibdm_mutex); 1737 --ibdm.ibdm_ngid_probes_in_progress; 1738 ibdm_wakeup_probe_gid_cv(); 1739 mutex_exit(&ibdm.ibdm_mutex); 1740 return; 1741 } 1742 ibdm_add_to_gl_gid(gid_info, gid_info); 1743 mutex_exit(&ibdm.ibdm_mutex); 1744 1745 /* 1746 * New or reinserted GID : Enable notification to IBnex 1747 */ 1748 mutex_enter(&gid_info->gl_mutex); 1749 gid_info->gl_reprobe_flag = 1; 1750 1751 /* 1752 * A Cisco FC GW needs the special handling to get IOUnitInfo. 1753 */ 1754 if (ibdm_is_cisco_switch(gid_info)) { 1755 gid_info->gl_pending_cmds++; 1756 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 1757 mutex_exit(&gid_info->gl_mutex); 1758 1759 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 1760 mutex_enter(&gid_info->gl_mutex); 1761 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1762 --gid_info->gl_pending_cmds; 1763 mutex_exit(&gid_info->gl_mutex); 1764 1765 /* free the hca_list on this gid_info */ 1766 ibdm_delete_glhca_list(gid_info); 1767 1768 mutex_enter(&ibdm.ibdm_mutex); 1769 --ibdm.ibdm_ngid_probes_in_progress; 1770 ibdm_wakeup_probe_gid_cv(); 1771 mutex_exit(&ibdm.ibdm_mutex); 1772 1773 return; 1774 } 1775 1776 mutex_enter(&gid_info->gl_mutex); 1777 ibdm_wait_cisco_probe_completion(gid_info); 1778 1779 IBTF_DPRINTF_L4("ibdm", "\tibdm_probe_gid_thread: " 1780 "CISCO Wakeup signal received"); 1781 } 1782 1783 /* move on to the 'GET_CLASSPORTINFO' stage */ 1784 gid_info->gl_pending_cmds++; 1785 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 1786 mutex_exit(&gid_info->gl_mutex); 1787 1788 IBTF_DPRINTF_L3(ibdm_string, "\tibdm_probe_gid_thread: " 1789 "%d: gid_info %p gl_state %d pending_cmds %d", 1790 __LINE__, gid_info, gid_info->gl_state, 1791 gid_info->gl_pending_cmds); 1792 1793 /* 1794 * Send ClassPortInfo request to the GID asynchronously. 1795 */ 1796 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 1797 1798 mutex_enter(&gid_info->gl_mutex); 1799 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1800 --gid_info->gl_pending_cmds; 1801 mutex_exit(&gid_info->gl_mutex); 1802 1803 /* free the hca_list on this gid_info */ 1804 ibdm_delete_glhca_list(gid_info); 1805 1806 mutex_enter(&ibdm.ibdm_mutex); 1807 --ibdm.ibdm_ngid_probes_in_progress; 1808 ibdm_wakeup_probe_gid_cv(); 1809 mutex_exit(&ibdm.ibdm_mutex); 1810 1811 return; 1812 } 1813 } 1814 1815 1816 /* 1817 * ibdm_check_dest_nodeguid 1818 * Searches for the NodeGuid in the GID list 1819 * Returns matching gid_info if found and otherwise NULL 1820 * 1821 * This function is called to handle new GIDs discovered 1822 * during device sweep / probe or for GID_AVAILABLE event. 1823 * 1824 * Parameter : 1825 * gid_info GID to check 1826 */ 1827 static ibdm_dp_gidinfo_t * 1828 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info) 1829 { 1830 ibdm_dp_gidinfo_t *gid_list; 1831 ibdm_gid_t *tmp; 1832 1833 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid"); 1834 1835 gid_list = ibdm.ibdm_dp_gidlist_head; 1836 while (gid_list) { 1837 if ((gid_list != gid_info) && 1838 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) { 1839 IBTF_DPRINTF_L4("ibdm", 1840 "\tcheck_dest_nodeguid: NodeGuid is present"); 1841 1842 /* Add to gid_list */ 1843 tmp = kmem_zalloc(sizeof (ibdm_gid_t), 1844 KM_SLEEP); 1845 tmp->gid_dgid_hi = gid_info->gl_dgid_hi; 1846 tmp->gid_dgid_lo = gid_info->gl_dgid_lo; 1847 tmp->gid_next = gid_list->gl_gid; 1848 gid_list->gl_gid = tmp; 1849 gid_list->gl_ngids++; 1850 return (gid_list); 1851 } 1852 1853 gid_list = gid_list->gl_next; 1854 } 1855 1856 return (NULL); 1857 } 1858 1859 1860 /* 1861 * ibdm_is_dev_mgt_supported 1862 * Get the PortInfo attribute (SA Query) 1863 * Check "CompatabilityMask" field in the Portinfo. 1864 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set) 1865 * by the port, otherwise IBDM_FAILURE 1866 */ 1867 static int 1868 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info) 1869 { 1870 int ret; 1871 size_t length = 0; 1872 sa_portinfo_record_t req, *resp = NULL; 1873 ibmf_saa_access_args_t qargs; 1874 1875 bzero(&req, sizeof (sa_portinfo_record_t)); 1876 req.EndportLID = gid_info->gl_dlid; 1877 1878 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID; 1879 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1880 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 1881 qargs.sq_template = &req; 1882 qargs.sq_callback = NULL; 1883 qargs.sq_callback_arg = NULL; 1884 1885 ret = ibmf_sa_access(gid_info->gl_sa_hdl, 1886 &qargs, 0, &length, (void **)&resp); 1887 1888 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1889 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:" 1890 "failed to get PORTINFO attribute %d", ret); 1891 return (IBDM_FAILURE); 1892 } 1893 1894 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) { 1895 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!"); 1896 ret = IBDM_SUCCESS; 1897 } else { 1898 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: " 1899 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask); 1900 ret = IBDM_FAILURE; 1901 } 1902 kmem_free(resp, length); 1903 return (ret); 1904 } 1905 1906 1907 /* 1908 * ibdm_get_node_port_guids() 1909 * Get the NodeInfoRecord of the port 1910 * Save NodeGuid and PortGUID values in the GID list structure. 1911 * Return IBDM_SUCCESS/IBDM_FAILURE 1912 */ 1913 static int 1914 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid, 1915 ib_guid_t *node_guid, ib_guid_t *port_guid) 1916 { 1917 int ret; 1918 size_t length = 0; 1919 sa_node_record_t req, *resp = NULL; 1920 ibmf_saa_access_args_t qargs; 1921 1922 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids"); 1923 1924 bzero(&req, sizeof (sa_node_record_t)); 1925 req.LID = dlid; 1926 1927 qargs.sq_attr_id = SA_NODERECORD_ATTRID; 1928 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1929 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID; 1930 qargs.sq_template = &req; 1931 qargs.sq_callback = NULL; 1932 qargs.sq_callback_arg = NULL; 1933 1934 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp); 1935 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1936 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:" 1937 " SA Retrieve Failed: %d", ret); 1938 return (IBDM_FAILURE); 1939 } 1940 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port" 1941 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID); 1942 1943 *node_guid = resp->NodeInfo.NodeGUID; 1944 *port_guid = resp->NodeInfo.PortGUID; 1945 kmem_free(resp, length); 1946 return (IBDM_SUCCESS); 1947 } 1948 1949 1950 /* 1951 * ibdm_get_reachable_ports() 1952 * Get list of the destination GID (and its path records) by 1953 * querying the SA access. 1954 * 1955 * Returns Number paths 1956 */ 1957 static int 1958 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca) 1959 { 1960 uint_t ii, jj, nrecs; 1961 uint_t npaths = 0; 1962 size_t length; 1963 ib_gid_t sgid; 1964 ibdm_pkey_tbl_t *pkey_tbl; 1965 sa_path_record_t *result; 1966 sa_path_record_t *precp; 1967 ibdm_dp_gidinfo_t *gid_info; 1968 1969 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 1970 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo); 1971 1972 sgid.gid_prefix = portinfo->pa_sn_prefix; 1973 sgid.gid_guid = portinfo->pa_port_guid; 1974 1975 /* get reversible paths */ 1976 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl, 1977 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result) 1978 != IBMF_SUCCESS) { 1979 IBTF_DPRINTF_L2("ibdm", 1980 "\tget_reachable_ports: Getting path records failed"); 1981 return (0); 1982 } 1983 1984 for (ii = 0; ii < nrecs; ii++) { 1985 sa_node_record_t *nrec; 1986 size_t length; 1987 1988 precp = &result[ii]; 1989 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid, 1990 precp->DGID.gid_prefix)) != NULL) { 1991 IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: " 1992 "Already exists nrecs %d, ii %d", nrecs, ii); 1993 ibdm_addto_glhcalist(gid_info, hca); 1994 continue; 1995 } 1996 /* 1997 * This is a new GID. Allocate a GID structure and 1998 * initialize the structure 1999 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0) 2000 * by kmem_zalloc call 2001 */ 2002 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 2003 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 2004 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 2005 gid_info->gl_dgid_hi = precp->DGID.gid_prefix; 2006 gid_info->gl_dgid_lo = precp->DGID.gid_guid; 2007 gid_info->gl_sgid_hi = precp->SGID.gid_prefix; 2008 gid_info->gl_sgid_lo = precp->SGID.gid_guid; 2009 gid_info->gl_p_key = precp->P_Key; 2010 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl; 2011 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl; 2012 gid_info->gl_slid = precp->SLID; 2013 gid_info->gl_dlid = precp->DLID; 2014 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 2015 << IBDM_GID_TRANSACTIONID_SHIFT; 2016 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 2017 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 2018 << IBDM_GID_TRANSACTIONID_SHIFT; 2019 gid_info->gl_SL = precp->SL; 2020 2021 /* 2022 * get the node record with this guid if the destination 2023 * device is a Cisco one. 2024 */ 2025 if (ibdm_is_cisco(precp->DGID.gid_guid) && 2026 (gid_info->gl_nodeguid == 0 || gid_info->gl_devid == 0) && 2027 ibdm_get_node_record_by_port(portinfo->pa_sa_hdl, 2028 precp->DGID.gid_guid, &nrec, &length) == IBDM_SUCCESS) { 2029 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 2030 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 2031 kmem_free(nrec, length); 2032 } 2033 2034 ibdm_addto_glhcalist(gid_info, hca); 2035 2036 ibdm_dump_path_info(precp); 2037 2038 gid_info->gl_qp_hdl = NULL; 2039 ASSERT(portinfo->pa_pkey_tbl != NULL && 2040 portinfo->pa_npkeys != 0); 2041 2042 for (jj = 0; jj < portinfo->pa_npkeys; jj++) { 2043 pkey_tbl = &portinfo->pa_pkey_tbl[jj]; 2044 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 2045 (pkey_tbl->pt_qp_hdl != NULL)) { 2046 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 2047 break; 2048 } 2049 } 2050 2051 /* 2052 * QP handle for GID not initialized. No matching Pkey 2053 * was found!! ibdm should *not* hit this case. Flag an 2054 * error and drop the GID if ibdm does encounter this. 2055 */ 2056 if (gid_info->gl_qp_hdl == NULL) { 2057 IBTF_DPRINTF_L2(ibdm_string, 2058 "\tget_reachable_ports: No matching Pkey"); 2059 ibdm_delete_gidinfo(gid_info); 2060 continue; 2061 } 2062 if (ibdm.ibdm_dp_gidlist_head == NULL) { 2063 ibdm.ibdm_dp_gidlist_head = gid_info; 2064 ibdm.ibdm_dp_gidlist_tail = gid_info; 2065 } else { 2066 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 2067 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 2068 ibdm.ibdm_dp_gidlist_tail = gid_info; 2069 } 2070 npaths++; 2071 } 2072 kmem_free(result, length); 2073 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths); 2074 return (npaths); 2075 } 2076 2077 2078 /* 2079 * ibdm_check_dgid() 2080 * Look in the global list to check whether we know this DGID already 2081 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT 2082 */ 2083 static ibdm_dp_gidinfo_t * 2084 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix) 2085 { 2086 ibdm_dp_gidinfo_t *gid_list; 2087 2088 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 2089 gid_list = gid_list->gl_next) { 2090 if ((guid == gid_list->gl_dgid_lo) && 2091 (prefix == gid_list->gl_dgid_hi)) { 2092 break; 2093 } 2094 } 2095 return (gid_list); 2096 } 2097 2098 2099 /* 2100 * ibdm_find_gid() 2101 * Look in the global list to find a GID entry with matching 2102 * port & node GUID. 2103 * Return pointer to gidinfo if found, else return NULL 2104 */ 2105 static ibdm_dp_gidinfo_t * 2106 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid) 2107 { 2108 ibdm_dp_gidinfo_t *gid_list; 2109 2110 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n", 2111 nodeguid, portguid); 2112 2113 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 2114 gid_list = gid_list->gl_next) { 2115 if ((portguid == gid_list->gl_portguid) && 2116 (nodeguid == gid_list->gl_nodeguid)) { 2117 break; 2118 } 2119 } 2120 2121 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n", 2122 gid_list); 2123 return (gid_list); 2124 } 2125 2126 2127 /* 2128 * ibdm_set_classportinfo() 2129 * ibdm_set_classportinfo() is a function to activate a Cisco FC GW 2130 * by sending the setClassPortInfo request with the trapLID, trapGID 2131 * and etc. to the gateway since the gateway doesn't provide the IO 2132 * Unit Information othewise. This behavior is the Cisco specific one, 2133 * and this function is called to a Cisco FC GW only. 2134 * Returns IBDM_SUCCESS/IBDM_FAILURE 2135 */ 2136 static int 2137 ibdm_set_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2138 { 2139 ibmf_msg_t *msg; 2140 ib_mad_hdr_t *hdr; 2141 ibdm_timeout_cb_args_t *cb_args; 2142 void *data; 2143 ib_mad_classportinfo_t *cpi; 2144 2145 IBTF_DPRINTF_L4("ibdm", 2146 "\tset_classportinfo: gid info 0x%p", gid_info); 2147 2148 /* 2149 * Send command to set classportinfo attribute. Allocate a IBMF 2150 * packet and initialize the packet. 2151 */ 2152 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2153 &msg) != IBMF_SUCCESS) { 2154 IBTF_DPRINTF_L4("ibdm", "\tset_classportinfo: pkt alloc fail"); 2155 return (IBDM_FAILURE); 2156 } 2157 2158 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2159 ibdm_alloc_send_buffers(msg); 2160 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2161 2162 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2163 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2164 msg->im_local_addr.ia_remote_qno = 1; 2165 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2166 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2167 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2168 2169 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2170 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2171 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2172 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2173 hdr->R_Method = IB_DM_DEVMGT_METHOD_SET; 2174 hdr->Status = 0; 2175 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2176 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2177 hdr->AttributeModifier = 0; 2178 2179 data = msg->im_msgbufs_send.im_bufs_cl_data; 2180 cpi = (ib_mad_classportinfo_t *)data; 2181 2182 /* 2183 * Set the classportinfo values to activate this Cisco FC GW. 2184 */ 2185 cpi->TrapGID_hi = h2b64(gid_info->gl_sgid_hi); 2186 cpi->TrapGID_lo = h2b64(gid_info->gl_sgid_lo); 2187 cpi->TrapLID = h2b16(gid_info->gl_slid); 2188 cpi->TrapSL = gid_info->gl_SL; 2189 cpi->TrapP_Key = h2b16(gid_info->gl_p_key); 2190 cpi->TrapQP = h2b32((((ibmf_alt_qp_t *)gid_info->gl_qp_hdl)->isq_qpn)); 2191 cpi->TrapQ_Key = h2b32((((ibmf_alt_qp_t *) 2192 gid_info->gl_qp_hdl)->isq_qkey)); 2193 2194 cb_args = &gid_info->gl_cpi_cb_args; 2195 cb_args->cb_gid_info = gid_info; 2196 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2197 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2198 2199 mutex_enter(&gid_info->gl_mutex); 2200 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2201 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2202 mutex_exit(&gid_info->gl_mutex); 2203 2204 IBTF_DPRINTF_L5("ibdm", "\tset_classportinfo: " 2205 "timeout id %x", gid_info->gl_timeout_id); 2206 2207 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2208 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2209 IBTF_DPRINTF_L2("ibdm", 2210 "\tset_classportinfo: ibmf send failed"); 2211 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2212 } 2213 2214 return (IBDM_SUCCESS); 2215 } 2216 2217 2218 /* 2219 * ibdm_send_classportinfo() 2220 * Send classportinfo request. When the request is completed 2221 * IBMF calls ibdm_classportinfo_cb routine to inform about 2222 * the completion. 2223 * Returns IBDM_SUCCESS/IBDM_FAILURE 2224 */ 2225 static int 2226 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2227 { 2228 ibmf_msg_t *msg; 2229 ib_mad_hdr_t *hdr; 2230 ibdm_timeout_cb_args_t *cb_args; 2231 2232 IBTF_DPRINTF_L4("ibdm", 2233 "\tsend_classportinfo: gid info 0x%p", gid_info); 2234 2235 /* 2236 * Send command to get classportinfo attribute. Allocate a IBMF 2237 * packet and initialize the packet. 2238 */ 2239 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2240 &msg) != IBMF_SUCCESS) { 2241 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail"); 2242 return (IBDM_FAILURE); 2243 } 2244 2245 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2246 ibdm_alloc_send_buffers(msg); 2247 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2248 2249 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2250 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2251 msg->im_local_addr.ia_remote_qno = 1; 2252 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2253 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2254 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2255 2256 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2257 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2258 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2259 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2260 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2261 hdr->Status = 0; 2262 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2263 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2264 hdr->AttributeModifier = 0; 2265 2266 cb_args = &gid_info->gl_cpi_cb_args; 2267 cb_args->cb_gid_info = gid_info; 2268 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2269 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2270 2271 mutex_enter(&gid_info->gl_mutex); 2272 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2273 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2274 mutex_exit(&gid_info->gl_mutex); 2275 2276 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: " 2277 "timeout id %x", gid_info->gl_timeout_id); 2278 2279 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2280 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2281 IBTF_DPRINTF_L2("ibdm", 2282 "\tsend_classportinfo: ibmf send failed"); 2283 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2284 } 2285 2286 return (IBDM_SUCCESS); 2287 } 2288 2289 2290 /* 2291 * ibdm_handle_setclassportinfo() 2292 * Invoked by the IBMF when setClassPortInfo request is completed. 2293 */ 2294 static void 2295 ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl, 2296 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2297 { 2298 void *data; 2299 timeout_id_t timeout_id; 2300 ib_mad_classportinfo_t *cpi; 2301 2302 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo:ibmf hdl " 2303 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2304 2305 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2306 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo: " 2307 "Not a ClassPortInfo resp"); 2308 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2309 return; 2310 } 2311 2312 /* 2313 * Verify whether timeout handler is created/active. 2314 * If created/ active, cancel the timeout handler 2315 */ 2316 mutex_enter(&gid_info->gl_mutex); 2317 if (gid_info->gl_state != IBDM_SET_CLASSPORTINFO) { 2318 IBTF_DPRINTF_L2("ibdm", "\thandle_setclassportinfo:DUP resp"); 2319 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2320 mutex_exit(&gid_info->gl_mutex); 2321 return; 2322 } 2323 ibdm_bump_transactionID(gid_info); 2324 2325 gid_info->gl_iou_cb_args.cb_req_type = 0; 2326 if (gid_info->gl_timeout_id) { 2327 timeout_id = gid_info->gl_timeout_id; 2328 mutex_exit(&gid_info->gl_mutex); 2329 IBTF_DPRINTF_L5("ibdm", "handle_setlassportinfo: " 2330 "gl_timeout_id = 0x%x", timeout_id); 2331 if (untimeout(timeout_id) == -1) { 2332 IBTF_DPRINTF_L2("ibdm", "handle_setclassportinfo: " 2333 "untimeout gl_timeout_id failed"); 2334 } 2335 mutex_enter(&gid_info->gl_mutex); 2336 gid_info->gl_timeout_id = 0; 2337 } 2338 mutex_exit(&gid_info->gl_mutex); 2339 2340 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2341 cpi = (ib_mad_classportinfo_t *)data; 2342 2343 ibdm_dump_classportinfo(cpi); 2344 } 2345 2346 2347 /* 2348 * ibdm_handle_classportinfo() 2349 * Invoked by the IBMF when the classportinfo request is completed. 2350 */ 2351 static void 2352 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl, 2353 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2354 { 2355 void *data; 2356 timeout_id_t timeout_id; 2357 ib_mad_hdr_t *hdr; 2358 ib_mad_classportinfo_t *cpi; 2359 2360 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl " 2361 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2362 2363 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2364 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: " 2365 "Not a ClassPortInfo resp"); 2366 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2367 return; 2368 } 2369 2370 /* 2371 * Verify whether timeout handler is created/active. 2372 * If created/ active, cancel the timeout handler 2373 */ 2374 mutex_enter(&gid_info->gl_mutex); 2375 ibdm_bump_transactionID(gid_info); 2376 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) { 2377 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp"); 2378 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2379 mutex_exit(&gid_info->gl_mutex); 2380 return; 2381 } 2382 gid_info->gl_iou_cb_args.cb_req_type = 0; 2383 if (gid_info->gl_timeout_id) { 2384 timeout_id = gid_info->gl_timeout_id; 2385 mutex_exit(&gid_info->gl_mutex); 2386 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: " 2387 "gl_timeout_id = 0x%x", timeout_id); 2388 if (untimeout(timeout_id) == -1) { 2389 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: " 2390 "untimeout gl_timeout_id failed"); 2391 } 2392 mutex_enter(&gid_info->gl_mutex); 2393 gid_info->gl_timeout_id = 0; 2394 } 2395 gid_info->gl_state = IBDM_GET_IOUNITINFO; 2396 gid_info->gl_pending_cmds++; 2397 mutex_exit(&gid_info->gl_mutex); 2398 2399 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2400 cpi = (ib_mad_classportinfo_t *)data; 2401 2402 /* 2403 * Cache the "RespTimeValue" and redirection information in the 2404 * global gid list data structure. This cached information will 2405 * be used to send any further requests to the GID. 2406 */ 2407 gid_info->gl_resp_timeout = 2408 (b2h32(cpi->RespTimeValue) & 0x1F); 2409 2410 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) & 2411 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE); 2412 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 2413 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 2414 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 2415 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 2416 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 2417 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 2418 gid_info->gl_redirectSL = cpi->RedirectSL; 2419 2420 ibdm_dump_classportinfo(cpi); 2421 2422 /* 2423 * Send IOUnitInfo request 2424 * Reuse previously allocated IBMF packet for sending ClassPortInfo 2425 * Check whether DM agent on the remote node requested redirection 2426 * If so, send the request to the redirect DGID/DLID/PKEY/QP. 2427 */ 2428 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2429 ibdm_alloc_send_buffers(msg); 2430 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2431 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2432 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2433 2434 if (gid_info->gl_redirected == B_TRUE) { 2435 if (gid_info->gl_redirect_dlid != 0) { 2436 msg->im_local_addr.ia_remote_lid = 2437 gid_info->gl_redirect_dlid; 2438 } 2439 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2440 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2441 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2442 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 2443 } else { 2444 msg->im_local_addr.ia_remote_qno = 1; 2445 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2446 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2447 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2448 } 2449 2450 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2451 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2452 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2453 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2454 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2455 hdr->Status = 0; 2456 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2457 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2458 hdr->AttributeModifier = 0; 2459 2460 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2461 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2462 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2463 2464 mutex_enter(&gid_info->gl_mutex); 2465 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2466 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2467 mutex_exit(&gid_info->gl_mutex); 2468 2469 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:" 2470 "timeout %x", gid_info->gl_timeout_id); 2471 2472 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL, 2473 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) { 2474 IBTF_DPRINTF_L2("ibdm", 2475 "\thandle_classportinfo: msg transport failed"); 2476 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args); 2477 } 2478 (*flag) |= IBDM_IBMF_PKT_REUSED; 2479 } 2480 2481 2482 /* 2483 * ibdm_send_iounitinfo: 2484 * Sends a DM request to get IOU unitinfo. 2485 */ 2486 static int 2487 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info) 2488 { 2489 ibmf_msg_t *msg; 2490 ib_mad_hdr_t *hdr; 2491 2492 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info); 2493 2494 /* 2495 * Send command to get iounitinfo attribute. Allocate a IBMF 2496 * packet and initialize the packet. 2497 */ 2498 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) != 2499 IBMF_SUCCESS) { 2500 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail"); 2501 return (IBDM_FAILURE); 2502 } 2503 2504 mutex_enter(&gid_info->gl_mutex); 2505 ibdm_bump_transactionID(gid_info); 2506 mutex_exit(&gid_info->gl_mutex); 2507 2508 2509 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2510 ibdm_alloc_send_buffers(msg); 2511 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2512 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2513 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2514 msg->im_local_addr.ia_remote_qno = 1; 2515 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2516 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2517 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2518 2519 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2520 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2521 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2522 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2523 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2524 hdr->Status = 0; 2525 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2526 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2527 hdr->AttributeModifier = 0; 2528 2529 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2530 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2531 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2532 2533 mutex_enter(&gid_info->gl_mutex); 2534 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2535 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2536 mutex_exit(&gid_info->gl_mutex); 2537 2538 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:" 2539 "timeout %x", gid_info->gl_timeout_id); 2540 2541 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 2542 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != 2543 IBMF_SUCCESS) { 2544 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed"); 2545 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, 2546 msg, &gid_info->gl_iou_cb_args); 2547 } 2548 return (IBDM_SUCCESS); 2549 } 2550 2551 /* 2552 * ibdm_handle_iounitinfo() 2553 * Invoked by the IBMF when IO Unitinfo request is completed. 2554 */ 2555 static void 2556 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl, 2557 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2558 { 2559 int ii, first = B_TRUE; 2560 int num_iocs; 2561 size_t size; 2562 uchar_t slot_info; 2563 timeout_id_t timeout_id; 2564 ib_mad_hdr_t *hdr; 2565 ibdm_ioc_info_t *ioc_info; 2566 ib_dm_io_unitinfo_t *iou_info; 2567 ib_dm_io_unitinfo_t *giou_info; 2568 ibdm_timeout_cb_args_t *cb_args; 2569 2570 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:" 2571 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info); 2572 2573 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) { 2574 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: " 2575 "Unexpected response"); 2576 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2577 return; 2578 } 2579 2580 mutex_enter(&gid_info->gl_mutex); 2581 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) { 2582 IBTF_DPRINTF_L4("ibdm", 2583 "\thandle_iounitinfo: DUP resp"); 2584 mutex_exit(&gid_info->gl_mutex); 2585 (*flag) = IBDM_IBMF_PKT_DUP_RESP; 2586 return; 2587 } 2588 gid_info->gl_iou_cb_args.cb_req_type = 0; 2589 if (gid_info->gl_timeout_id) { 2590 timeout_id = gid_info->gl_timeout_id; 2591 mutex_exit(&gid_info->gl_mutex); 2592 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: " 2593 "gl_timeout_id = 0x%x", timeout_id); 2594 if (untimeout(timeout_id) == -1) { 2595 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: " 2596 "untimeout gl_timeout_id failed"); 2597 } 2598 mutex_enter(&gid_info->gl_mutex); 2599 gid_info->gl_timeout_id = 0; 2600 } 2601 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 2602 2603 iou_info = IBDM_IN_IBMFMSG2IOU(msg); 2604 ibdm_dump_iounitinfo(iou_info); 2605 num_iocs = iou_info->iou_num_ctrl_slots; 2606 /* 2607 * check if number of IOCs reported is zero? if yes, return. 2608 * when num_iocs are reported zero internal IOC database needs 2609 * to be updated. To ensure that save the number of IOCs in 2610 * the new field "gl_num_iocs". Use a new field instead of 2611 * "giou_info->iou_num_ctrl_slots" as that would prevent 2612 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0. 2613 */ 2614 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) { 2615 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's"); 2616 mutex_exit(&gid_info->gl_mutex); 2617 return; 2618 } 2619 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs); 2620 2621 /* 2622 * if there is an existing gl_iou (IOU has been probed before) 2623 * check if the "iou_changeid" is same as saved entry in 2624 * "giou_info->iou_changeid". 2625 * (note: this logic can prevent IOC enumeration if a given 2626 * vendor doesn't support setting iou_changeid field for its IOU) 2627 * 2628 * if there is an existing gl_iou and iou_changeid has changed : 2629 * free up existing gl_iou info and its related structures. 2630 * reallocate gl_iou info all over again. 2631 * if we donot free this up; then this leads to memory leaks 2632 */ 2633 if (gid_info->gl_iou) { 2634 giou_info = &gid_info->gl_iou->iou_info; 2635 if (b2h16(iou_info->iou_changeid) == 2636 giou_info->iou_changeid) { 2637 IBTF_DPRINTF_L3("ibdm", 2638 "\thandle_iounitinfo: no IOCs changed"); 2639 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2640 mutex_exit(&gid_info->gl_mutex); 2641 return; 2642 } 2643 2644 /* 2645 * Store the iou info as prev_iou to be used after 2646 * sweep is done. 2647 */ 2648 ASSERT(gid_info->gl_prev_iou == NULL); 2649 IBTF_DPRINTF_L4(ibdm_string, 2650 "\thandle_iounitinfo: setting gl_prev_iou %p", 2651 gid_info->gl_prev_iou); 2652 gid_info->gl_prev_iou = gid_info->gl_iou; 2653 ibdm.ibdm_prev_iou = 1; 2654 gid_info->gl_iou = NULL; 2655 } 2656 2657 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t); 2658 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP); 2659 giou_info = &gid_info->gl_iou->iou_info; 2660 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *) 2661 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t)); 2662 2663 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs; 2664 giou_info->iou_flag = iou_info->iou_flag; 2665 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128); 2666 giou_info->iou_changeid = b2h16(iou_info->iou_changeid); 2667 gid_info->gl_pending_cmds++; /* for diag code */ 2668 mutex_exit(&gid_info->gl_mutex); 2669 2670 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) { 2671 mutex_enter(&gid_info->gl_mutex); 2672 gid_info->gl_pending_cmds--; 2673 mutex_exit(&gid_info->gl_mutex); 2674 } 2675 /* 2676 * Parallelize getting IOC controller profiles from here. 2677 * Allocate IBMF packets and send commands to get IOC profile for 2678 * each IOC present on the IOU. 2679 */ 2680 for (ii = 0; ii < num_iocs; ii++) { 2681 /* 2682 * Check whether IOC is present in the slot 2683 * Series of nibbles (in the field iou_ctrl_list) represents 2684 * a slot in the IOU. 2685 * Byte format: 76543210 2686 * Bits 0-3 of first byte represent Slot 2 2687 * bits 4-7 of first byte represent slot 1, 2688 * bits 0-3 of second byte represent slot 4 and so on 2689 * Each 4-bit nibble has the following meaning 2690 * 0x0 : IOC not installed 2691 * 0x1 : IOC is present 2692 * 0xf : Slot does not exist 2693 * and all other values are reserved. 2694 */ 2695 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 2696 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 2697 if ((ii % 2) == 0) 2698 slot_info = (slot_info >> 4); 2699 2700 if ((slot_info & 0xf) != 1) { 2701 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2702 "No IOC is present in the slot = %d", ii); 2703 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 2704 continue; 2705 } 2706 2707 mutex_enter(&gid_info->gl_mutex); 2708 ibdm_bump_transactionID(gid_info); 2709 mutex_exit(&gid_info->gl_mutex); 2710 2711 /* 2712 * Re use the already allocated packet (for IOUnitinfo) to 2713 * send the first IOC controller attribute. Allocate new 2714 * IBMF packets for the rest of the IOC's 2715 */ 2716 if (first != B_TRUE) { 2717 msg = NULL; 2718 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2719 &msg) != IBMF_SUCCESS) { 2720 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2721 "IBMF packet allocation failed"); 2722 continue; 2723 } 2724 2725 } 2726 2727 /* allocate send buffers for all messages */ 2728 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2729 ibdm_alloc_send_buffers(msg); 2730 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2731 2732 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2733 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2734 if (gid_info->gl_redirected == B_TRUE) { 2735 if (gid_info->gl_redirect_dlid != 0) { 2736 msg->im_local_addr.ia_remote_lid = 2737 gid_info->gl_redirect_dlid; 2738 } 2739 msg->im_local_addr.ia_remote_qno = 2740 gid_info->gl_redirect_QP; 2741 msg->im_local_addr.ia_p_key = 2742 gid_info->gl_redirect_pkey; 2743 msg->im_local_addr.ia_q_key = 2744 gid_info->gl_redirect_qkey; 2745 msg->im_local_addr.ia_service_level = 2746 gid_info->gl_redirectSL; 2747 } else { 2748 msg->im_local_addr.ia_remote_qno = 1; 2749 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2750 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2751 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2752 } 2753 2754 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2755 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2756 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2757 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2758 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2759 hdr->Status = 0; 2760 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2761 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 2762 hdr->AttributeModifier = h2b32(ii + 1); 2763 2764 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID; 2765 cb_args = &ioc_info->ioc_cb_args; 2766 cb_args->cb_gid_info = gid_info; 2767 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2768 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 2769 cb_args->cb_ioc_num = ii; 2770 2771 mutex_enter(&gid_info->gl_mutex); 2772 gid_info->gl_pending_cmds++; /* for diag code */ 2773 2774 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2775 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2776 mutex_exit(&gid_info->gl_mutex); 2777 2778 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:" 2779 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii); 2780 2781 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2782 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2783 IBTF_DPRINTF_L2("ibdm", 2784 "\thandle_iounitinfo: msg transport failed"); 2785 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2786 } 2787 (*flag) |= IBDM_IBMF_PKT_REUSED; 2788 first = B_FALSE; 2789 gid_info->gl_iou->iou_niocs_probe_in_progress++; 2790 } 2791 } 2792 2793 2794 /* 2795 * ibdm_handle_ioc_profile() 2796 * Invoked by the IBMF when the IOCControllerProfile request 2797 * gets completed 2798 */ 2799 static void 2800 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl, 2801 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2802 { 2803 int first = B_TRUE, reprobe = 0; 2804 uint_t ii, ioc_no, srv_start; 2805 uint_t nserv_entries; 2806 timeout_id_t timeout_id; 2807 ib_mad_hdr_t *hdr; 2808 ibdm_ioc_info_t *ioc_info; 2809 ibdm_timeout_cb_args_t *cb_args; 2810 ib_dm_ioc_ctrl_profile_t *ioc, *gioc; 2811 2812 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2813 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2814 2815 ioc = IBDM_IN_IBMFMSG2IOC(msg); 2816 /* 2817 * Check whether we know this IOC already 2818 * This will return NULL if reprobe is in progress 2819 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set. 2820 * Do not hold mutexes here. 2821 */ 2822 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) { 2823 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2824 "IOC guid %llx is present", ioc->ioc_guid); 2825 return; 2826 } 2827 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2828 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1); 2829 2830 /* Make sure that IOC index is with the valid range */ 2831 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 2832 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: " 2833 "IOC index Out of range, index %d", ioc); 2834 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2835 return; 2836 } 2837 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1]; 2838 ioc_info->ioc_iou_info = gid_info->gl_iou; 2839 2840 mutex_enter(&gid_info->gl_mutex); 2841 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) { 2842 reprobe = 1; 2843 ioc_info->ioc_prev_serv = ioc_info->ioc_serv; 2844 ioc_info->ioc_serv = NULL; 2845 ioc_info->ioc_prev_serv_cnt = 2846 ioc_info->ioc_profile.ioc_service_entries; 2847 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) { 2848 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response" 2849 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state); 2850 mutex_exit(&gid_info->gl_mutex); 2851 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2852 return; 2853 } 2854 ioc_info->ioc_cb_args.cb_req_type = 0; 2855 if (ioc_info->ioc_timeout_id) { 2856 timeout_id = ioc_info->ioc_timeout_id; 2857 ioc_info->ioc_timeout_id = 0; 2858 mutex_exit(&gid_info->gl_mutex); 2859 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: " 2860 "ioc_timeout_id = 0x%x", timeout_id); 2861 if (untimeout(timeout_id) == -1) { 2862 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: " 2863 "untimeout ioc_timeout_id failed"); 2864 } 2865 mutex_enter(&gid_info->gl_mutex); 2866 } 2867 2868 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS; 2869 if (reprobe == 0) { 2870 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid; 2871 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid; 2872 } 2873 2874 /* 2875 * Save all the IOC information in the global structures. 2876 * Note the wire format is Big Endian and the Sparc process also 2877 * big endian. So, there is no need to convert the data fields 2878 * The conversion routines used below are ineffective on Sparc 2879 * machines where as they will be effective on little endian 2880 * machines such as Intel processors. 2881 */ 2882 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile; 2883 2884 /* 2885 * Restrict updates to onlyport GIDs and service entries during reprobe 2886 */ 2887 if (reprobe == 0) { 2888 gioc->ioc_guid = b2h64(ioc->ioc_guid); 2889 gioc->ioc_vendorid = 2890 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK) 2891 >> IB_DM_VENDORID_SHIFT); 2892 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid); 2893 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver); 2894 gioc->ioc_subsys_vendorid = 2895 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK) 2896 >> IB_DM_VENDORID_SHIFT); 2897 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id); 2898 gioc->ioc_io_class = b2h16(ioc->ioc_io_class); 2899 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass); 2900 gioc->ioc_protocol = b2h16(ioc->ioc_protocol); 2901 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver); 2902 gioc->ioc_send_msg_qdepth = 2903 b2h16(ioc->ioc_send_msg_qdepth); 2904 gioc->ioc_rdma_read_qdepth = 2905 b2h16(ioc->ioc_rdma_read_qdepth); 2906 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz); 2907 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz); 2908 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask; 2909 bcopy(ioc->ioc_id_string, gioc->ioc_id_string, 2910 IB_DM_IOC_ID_STRING_LEN); 2911 2912 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode; 2913 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid; 2914 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK & 2915 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE; 2916 2917 if (ioc_info->ioc_diagdeviceid == B_TRUE) { 2918 gid_info->gl_pending_cmds++; 2919 IBTF_DPRINTF_L3(ibdm_string, 2920 "\tibdm_handle_ioc_profile: " 2921 "%d: gid_info %p gl_state %d pending_cmds %d", 2922 __LINE__, gid_info, gid_info->gl_state, 2923 gid_info->gl_pending_cmds); 2924 } 2925 } 2926 gioc->ioc_service_entries = ioc->ioc_service_entries; 2927 mutex_exit(&gid_info->gl_mutex); 2928 2929 ibdm_dump_ioc_profile(gioc); 2930 2931 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) { 2932 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) { 2933 mutex_enter(&gid_info->gl_mutex); 2934 gid_info->gl_pending_cmds--; 2935 mutex_exit(&gid_info->gl_mutex); 2936 } 2937 } 2938 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc( 2939 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)), 2940 KM_SLEEP); 2941 2942 /* 2943 * In one single request, maximum number of requests that can be 2944 * obtained is 4. If number of service entries are more than four, 2945 * calculate number requests needed and send them parallelly. 2946 */ 2947 nserv_entries = ioc->ioc_service_entries; 2948 ii = 0; 2949 while (nserv_entries) { 2950 mutex_enter(&gid_info->gl_mutex); 2951 gid_info->gl_pending_cmds++; 2952 ibdm_bump_transactionID(gid_info); 2953 mutex_exit(&gid_info->gl_mutex); 2954 2955 if (first != B_TRUE) { 2956 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2957 &msg) != IBMF_SUCCESS) { 2958 continue; 2959 } 2960 2961 } 2962 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2963 ibdm_alloc_send_buffers(msg); 2964 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2965 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2966 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2967 if (gid_info->gl_redirected == B_TRUE) { 2968 if (gid_info->gl_redirect_dlid != 0) { 2969 msg->im_local_addr.ia_remote_lid = 2970 gid_info->gl_redirect_dlid; 2971 } 2972 msg->im_local_addr.ia_remote_qno = 2973 gid_info->gl_redirect_QP; 2974 msg->im_local_addr.ia_p_key = 2975 gid_info->gl_redirect_pkey; 2976 msg->im_local_addr.ia_q_key = 2977 gid_info->gl_redirect_qkey; 2978 msg->im_local_addr.ia_service_level = 2979 gid_info->gl_redirectSL; 2980 } else { 2981 msg->im_local_addr.ia_remote_qno = 1; 2982 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2983 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2984 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2985 } 2986 2987 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2988 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2989 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2990 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2991 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2992 hdr->Status = 0; 2993 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2994 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 2995 2996 srv_start = ii * 4; 2997 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args; 2998 cb_args->cb_gid_info = gid_info; 2999 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 3000 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS; 3001 cb_args->cb_srvents_start = srv_start; 3002 cb_args->cb_ioc_num = ioc_no - 1; 3003 3004 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) { 3005 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ; 3006 cb_args->cb_srvents_end = (cb_args->cb_srvents_start + 3007 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1); 3008 } else { 3009 cb_args->cb_srvents_end = 3010 (cb_args->cb_srvents_start + nserv_entries - 1); 3011 nserv_entries = 0; 3012 } 3013 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 3014 ibdm_fill_srv_attr_mod(hdr, cb_args); 3015 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 3016 3017 mutex_enter(&gid_info->gl_mutex); 3018 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout( 3019 ibdm_pkt_timeout_hdlr, cb_args, 3020 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3021 mutex_exit(&gid_info->gl_mutex); 3022 3023 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:" 3024 "timeout %x, ioc %d srv %d", 3025 ioc_info->ioc_serv[srv_start].se_timeout_id, 3026 ioc_no - 1, srv_start); 3027 3028 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 3029 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3030 IBTF_DPRINTF_L2("ibdm", 3031 "\thandle_ioc_profile: msg send failed"); 3032 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 3033 } 3034 (*flag) |= IBDM_IBMF_PKT_REUSED; 3035 first = B_FALSE; 3036 ii++; 3037 } 3038 } 3039 3040 3041 /* 3042 * ibdm_handle_srventry_mad() 3043 */ 3044 static void 3045 ibdm_handle_srventry_mad(ibmf_msg_t *msg, 3046 ibdm_dp_gidinfo_t *gid_info, int *flag) 3047 { 3048 uint_t ii, ioc_no, attrmod; 3049 uint_t nentries, start, end; 3050 timeout_id_t timeout_id; 3051 ib_dm_srv_t *srv_ents; 3052 ibdm_ioc_info_t *ioc_info; 3053 ibdm_srvents_info_t *gsrv_ents; 3054 3055 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:" 3056 " IBMF msg %p gid info %p", msg, gid_info); 3057 3058 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg); 3059 /* 3060 * Get the start and end index of the service entries 3061 * Upper 16 bits identify the IOC 3062 * Lower 16 bits specify the range of service entries 3063 * LSB specifies (Big endian) end of the range 3064 * MSB specifies (Big endian) start of the range 3065 */ 3066 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3067 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3068 end = ((attrmod >> 8) & IBDM_8_BIT_MASK); 3069 start = (attrmod & IBDM_8_BIT_MASK); 3070 3071 /* Make sure that IOC index is with the valid range */ 3072 if ((ioc_no < 1) | 3073 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) { 3074 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3075 "IOC index Out of range, index %d", ioc_no); 3076 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3077 return; 3078 } 3079 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3080 3081 /* 3082 * Make sure that the "start" and "end" service indexes are 3083 * with in the valid range 3084 */ 3085 nentries = ioc_info->ioc_profile.ioc_service_entries; 3086 if ((start > end) | (start >= nentries) | (end >= nentries)) { 3087 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3088 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries); 3089 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3090 return; 3091 } 3092 gsrv_ents = &ioc_info->ioc_serv[start]; 3093 mutex_enter(&gid_info->gl_mutex); 3094 if (gsrv_ents->se_state != IBDM_SE_INVALID) { 3095 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3096 "already known, ioc %d, srv %d, se_state %x", 3097 ioc_no - 1, start, gsrv_ents->se_state); 3098 mutex_exit(&gid_info->gl_mutex); 3099 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3100 return; 3101 } 3102 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0; 3103 if (ioc_info->ioc_serv[start].se_timeout_id) { 3104 IBTF_DPRINTF_L2("ibdm", 3105 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start); 3106 timeout_id = ioc_info->ioc_serv[start].se_timeout_id; 3107 ioc_info->ioc_serv[start].se_timeout_id = 0; 3108 mutex_exit(&gid_info->gl_mutex); 3109 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: " 3110 "se_timeout_id = 0x%x", timeout_id); 3111 if (untimeout(timeout_id) == -1) { 3112 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: " 3113 "untimeout se_timeout_id failed"); 3114 } 3115 mutex_enter(&gid_info->gl_mutex); 3116 } 3117 3118 gsrv_ents->se_state = IBDM_SE_VALID; 3119 mutex_exit(&gid_info->gl_mutex); 3120 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) { 3121 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id); 3122 bcopy(srv_ents->srv_name, 3123 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN); 3124 ibdm_dump_service_entries(&gsrv_ents->se_attr); 3125 } 3126 } 3127 3128 3129 /* 3130 * ibdm_get_diagcode: 3131 * Send request to get IOU/IOC diag code 3132 * Returns IBDM_SUCCESS/IBDM_FAILURE 3133 */ 3134 static int 3135 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr) 3136 { 3137 ibmf_msg_t *msg; 3138 ib_mad_hdr_t *hdr; 3139 ibdm_ioc_info_t *ioc; 3140 ibdm_timeout_cb_args_t *cb_args; 3141 timeout_id_t *timeout_id; 3142 3143 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d", 3144 gid_info, attr); 3145 3146 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 3147 &msg) != IBMF_SUCCESS) { 3148 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail"); 3149 return (IBDM_FAILURE); 3150 } 3151 3152 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 3153 ibdm_alloc_send_buffers(msg); 3154 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 3155 3156 mutex_enter(&gid_info->gl_mutex); 3157 ibdm_bump_transactionID(gid_info); 3158 mutex_exit(&gid_info->gl_mutex); 3159 3160 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3161 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3162 if (gid_info->gl_redirected == B_TRUE) { 3163 if (gid_info->gl_redirect_dlid != 0) { 3164 msg->im_local_addr.ia_remote_lid = 3165 gid_info->gl_redirect_dlid; 3166 } 3167 3168 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3169 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3170 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3171 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3172 } else { 3173 msg->im_local_addr.ia_remote_qno = 1; 3174 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3175 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3176 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 3177 } 3178 3179 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3180 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3181 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3182 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3183 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3184 hdr->Status = 0; 3185 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3186 3187 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3188 hdr->AttributeModifier = h2b32(attr); 3189 3190 if (attr == 0) { 3191 cb_args = &gid_info->gl_iou_cb_args; 3192 gid_info->gl_iou->iou_dc_valid = B_FALSE; 3193 cb_args->cb_ioc_num = 0; 3194 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE; 3195 timeout_id = &gid_info->gl_timeout_id; 3196 } else { 3197 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1)); 3198 ioc->ioc_dc_valid = B_FALSE; 3199 cb_args = &ioc->ioc_dc_cb_args; 3200 cb_args->cb_ioc_num = attr - 1; 3201 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE; 3202 timeout_id = &ioc->ioc_dc_timeout_id; 3203 } 3204 cb_args->cb_gid_info = gid_info; 3205 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 3206 cb_args->cb_srvents_start = 0; 3207 3208 mutex_enter(&gid_info->gl_mutex); 3209 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3210 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3211 mutex_exit(&gid_info->gl_mutex); 3212 3213 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:" 3214 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num); 3215 3216 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3217 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3218 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed"); 3219 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3220 } 3221 return (IBDM_SUCCESS); 3222 } 3223 3224 /* 3225 * ibdm_handle_diagcode: 3226 * Process the DiagCode MAD response and update local DM 3227 * data structure. 3228 */ 3229 static void 3230 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg, 3231 ibdm_dp_gidinfo_t *gid_info, int *flag) 3232 { 3233 uint16_t attrmod, *diagcode; 3234 ibdm_iou_info_t *iou; 3235 ibdm_ioc_info_t *ioc; 3236 timeout_id_t timeout_id; 3237 ibdm_timeout_cb_args_t *cb_args; 3238 3239 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data; 3240 3241 mutex_enter(&gid_info->gl_mutex); 3242 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg); 3243 iou = gid_info->gl_iou; 3244 if (attrmod == 0) { 3245 if (iou->iou_dc_valid != B_FALSE) { 3246 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3247 IBTF_DPRINTF_L4("ibdm", 3248 "\thandle_diagcode: Duplicate IOU DiagCode"); 3249 mutex_exit(&gid_info->gl_mutex); 3250 return; 3251 } 3252 cb_args = &gid_info->gl_iou_cb_args; 3253 cb_args->cb_req_type = 0; 3254 iou->iou_diagcode = b2h16(*diagcode); 3255 iou->iou_dc_valid = B_TRUE; 3256 if (gid_info->gl_timeout_id) { 3257 timeout_id = gid_info->gl_timeout_id; 3258 mutex_exit(&gid_info->gl_mutex); 3259 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: " 3260 "gl_timeout_id = 0x%x", timeout_id); 3261 if (untimeout(timeout_id) == -1) { 3262 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: " 3263 "untimeout gl_timeout_id failed"); 3264 } 3265 mutex_enter(&gid_info->gl_mutex); 3266 gid_info->gl_timeout_id = 0; 3267 } 3268 } else { 3269 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1)); 3270 if (ioc->ioc_dc_valid != B_FALSE) { 3271 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3272 IBTF_DPRINTF_L4("ibdm", 3273 "\thandle_diagcode: Duplicate IOC DiagCode"); 3274 mutex_exit(&gid_info->gl_mutex); 3275 return; 3276 } 3277 cb_args = &ioc->ioc_dc_cb_args; 3278 cb_args->cb_req_type = 0; 3279 ioc->ioc_diagcode = b2h16(*diagcode); 3280 ioc->ioc_dc_valid = B_TRUE; 3281 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id; 3282 if (timeout_id) { 3283 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0; 3284 mutex_exit(&gid_info->gl_mutex); 3285 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: " 3286 "timeout_id = 0x%x", timeout_id); 3287 if (untimeout(timeout_id) == -1) { 3288 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: " 3289 "untimeout ioc_dc_timeout_id failed"); 3290 } 3291 mutex_enter(&gid_info->gl_mutex); 3292 } 3293 } 3294 mutex_exit(&gid_info->gl_mutex); 3295 3296 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x" 3297 "attrmod : 0x%x", b2h16(*diagcode), attrmod); 3298 } 3299 3300 3301 /* 3302 * ibdm_is_ioc_present() 3303 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list 3304 */ 3305 static ibdm_ioc_info_t * 3306 ibdm_is_ioc_present(ib_guid_t ioc_guid, 3307 ibdm_dp_gidinfo_t *gid_info, int *flag) 3308 { 3309 int ii; 3310 ibdm_ioc_info_t *ioc; 3311 ibdm_dp_gidinfo_t *head; 3312 ib_dm_io_unitinfo_t *iou; 3313 3314 mutex_enter(&ibdm.ibdm_mutex); 3315 head = ibdm.ibdm_dp_gidlist_head; 3316 while (head) { 3317 mutex_enter(&head->gl_mutex); 3318 if (head->gl_iou == NULL) { 3319 mutex_exit(&head->gl_mutex); 3320 head = head->gl_next; 3321 continue; 3322 } 3323 iou = &head->gl_iou->iou_info; 3324 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 3325 ioc = IBDM_GIDINFO2IOCINFO(head, ii); 3326 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) && 3327 (ioc->ioc_profile.ioc_guid == ioc_guid)) { 3328 if (gid_info == head) { 3329 *flag |= IBDM_IBMF_PKT_DUP_RESP; 3330 } else if (ibdm_check_dgid(head->gl_dgid_lo, 3331 head->gl_dgid_hi) != NULL) { 3332 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_" 3333 "present: gid not present"); 3334 ibdm_add_to_gl_gid(gid_info, head); 3335 } 3336 mutex_exit(&head->gl_mutex); 3337 mutex_exit(&ibdm.ibdm_mutex); 3338 return (ioc); 3339 } 3340 } 3341 mutex_exit(&head->gl_mutex); 3342 head = head->gl_next; 3343 } 3344 mutex_exit(&ibdm.ibdm_mutex); 3345 return (NULL); 3346 } 3347 3348 3349 /* 3350 * ibdm_ibmf_send_cb() 3351 * IBMF invokes this callback routine after posting the DM MAD to 3352 * the HCA. 3353 */ 3354 /*ARGSUSED*/ 3355 static void 3356 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg) 3357 { 3358 ibdm_dump_ibmf_msg(ibmf_msg, 1); 3359 ibdm_free_send_buffers(ibmf_msg); 3360 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) { 3361 IBTF_DPRINTF_L4("ibdm", 3362 "\tibmf_send_cb: IBMF free msg failed"); 3363 } 3364 } 3365 3366 3367 /* 3368 * ibdm_ibmf_recv_cb() 3369 * Invoked by the IBMF when a response to the one of the DM requests 3370 * is received. 3371 */ 3372 /*ARGSUSED*/ 3373 static void 3374 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3375 { 3376 ibdm_taskq_args_t *taskq_args; 3377 3378 /* 3379 * If the taskq enable is set then dispatch a taskq to process 3380 * the MAD, otherwise just process it on this thread 3381 */ 3382 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) { 3383 ibdm_process_incoming_mad(ibmf_hdl, msg, arg); 3384 return; 3385 } 3386 3387 /* 3388 * create a taskq and dispatch it to process the incoming MAD 3389 */ 3390 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP); 3391 if (taskq_args == NULL) { 3392 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for" 3393 "taskq_args"); 3394 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3395 IBTF_DPRINTF_L4("ibmf_recv_cb", 3396 "\tibmf_recv_cb: IBMF free msg failed"); 3397 } 3398 return; 3399 } 3400 taskq_args->tq_ibmf_handle = ibmf_hdl; 3401 taskq_args->tq_ibmf_msg = msg; 3402 taskq_args->tq_args = arg; 3403 3404 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args, 3405 TQ_NOSLEEP) == 0) { 3406 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed"); 3407 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3408 IBTF_DPRINTF_L4("ibmf_recv_cb", 3409 "\tibmf_recv_cb: IBMF free msg failed"); 3410 } 3411 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3412 return; 3413 } 3414 3415 /* taskq_args are deleted in ibdm_recv_incoming_mad() */ 3416 } 3417 3418 3419 void 3420 ibdm_recv_incoming_mad(void *args) 3421 { 3422 ibdm_taskq_args_t *taskq_args; 3423 3424 taskq_args = (ibdm_taskq_args_t *)args; 3425 3426 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: " 3427 "Processing incoming MAD via taskq"); 3428 3429 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle, 3430 taskq_args->tq_ibmf_msg, taskq_args->tq_args); 3431 3432 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3433 } 3434 3435 3436 /* 3437 * Calls ibdm_process_incoming_mad with all function arguments extracted 3438 * from args 3439 */ 3440 /*ARGSUSED*/ 3441 static void 3442 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3443 { 3444 int flag = 0; 3445 int ret; 3446 uint64_t transaction_id; 3447 ib_mad_hdr_t *hdr; 3448 ibdm_dp_gidinfo_t *gid_info = NULL; 3449 3450 IBTF_DPRINTF_L4("ibdm", 3451 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg); 3452 ibdm_dump_ibmf_msg(msg, 0); 3453 3454 /* 3455 * IBMF calls this routine for every DM MAD that arrives at this port. 3456 * But we handle only the responses for requests we sent. We drop all 3457 * the DM packets that does not have response bit set in the MAD 3458 * header(this eliminates all the requests sent to this port). 3459 * We handle only DM class version 1 MAD's 3460 */ 3461 hdr = IBDM_IN_IBMFMSG_MADHDR(msg); 3462 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) { 3463 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3464 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3465 "IBMF free msg failed DM request drop it"); 3466 } 3467 return; 3468 } 3469 3470 transaction_id = b2h64(hdr->TransactionID); 3471 3472 mutex_enter(&ibdm.ibdm_mutex); 3473 gid_info = ibdm.ibdm_dp_gidlist_head; 3474 while (gid_info) { 3475 if ((gid_info->gl_transactionID & 3476 IBDM_GID_TRANSACTIONID_MASK) == 3477 (transaction_id & IBDM_GID_TRANSACTIONID_MASK)) 3478 break; 3479 gid_info = gid_info->gl_next; 3480 } 3481 mutex_exit(&ibdm.ibdm_mutex); 3482 3483 if (gid_info == NULL) { 3484 /* Drop the packet */ 3485 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID" 3486 " does not match: 0x%llx", transaction_id); 3487 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3488 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3489 "IBMF free msg failed DM request drop it"); 3490 } 3491 return; 3492 } 3493 3494 /* Handle redirection for all the MAD's, except ClassPortInfo */ 3495 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) && 3496 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) { 3497 ret = ibdm_handle_redirection(msg, gid_info, &flag); 3498 if (ret == IBDM_SUCCESS) { 3499 return; 3500 } 3501 } else { 3502 uint_t gl_state; 3503 3504 mutex_enter(&gid_info->gl_mutex); 3505 gl_state = gid_info->gl_state; 3506 mutex_exit(&gid_info->gl_mutex); 3507 3508 switch (gl_state) { 3509 3510 case IBDM_SET_CLASSPORTINFO: 3511 ibdm_handle_setclassportinfo( 3512 ibmf_hdl, msg, gid_info, &flag); 3513 break; 3514 3515 case IBDM_GET_CLASSPORTINFO: 3516 ibdm_handle_classportinfo( 3517 ibmf_hdl, msg, gid_info, &flag); 3518 break; 3519 3520 case IBDM_GET_IOUNITINFO: 3521 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag); 3522 break; 3523 3524 case IBDM_GET_IOC_DETAILS: 3525 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3526 3527 case IB_DM_ATTR_SERVICE_ENTRIES: 3528 ibdm_handle_srventry_mad(msg, gid_info, &flag); 3529 break; 3530 3531 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3532 ibdm_handle_ioc_profile( 3533 ibmf_hdl, msg, gid_info, &flag); 3534 break; 3535 3536 case IB_DM_ATTR_DIAG_CODE: 3537 ibdm_handle_diagcode(msg, gid_info, &flag); 3538 break; 3539 3540 default: 3541 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3542 "Error state, wrong attribute :-("); 3543 (void) ibmf_free_msg(ibmf_hdl, &msg); 3544 return; 3545 } 3546 break; 3547 default: 3548 IBTF_DPRINTF_L2("ibdm", 3549 "process_incoming_mad: Dropping the packet" 3550 " gl_state %x", gl_state); 3551 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3552 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3553 "IBMF free msg failed DM request drop it"); 3554 } 3555 return; 3556 } 3557 } 3558 3559 if ((flag & IBDM_IBMF_PKT_DUP_RESP) || 3560 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) { 3561 IBTF_DPRINTF_L2("ibdm", 3562 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag); 3563 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3564 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3565 "IBMF free msg failed DM request drop it"); 3566 } 3567 return; 3568 } 3569 3570 mutex_enter(&gid_info->gl_mutex); 3571 if (gid_info->gl_pending_cmds < 1) { 3572 IBTF_DPRINTF_L2("ibdm", 3573 "\tprocess_incoming_mad: pending commands negative"); 3574 } 3575 if (--gid_info->gl_pending_cmds) { 3576 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: " 3577 "gid_info %p pending cmds %d", 3578 gid_info, gid_info->gl_pending_cmds); 3579 mutex_exit(&gid_info->gl_mutex); 3580 } else { 3581 uint_t prev_state; 3582 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE"); 3583 prev_state = gid_info->gl_state; 3584 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3585 if (prev_state == IBDM_SET_CLASSPORTINFO) { 3586 IBTF_DPRINTF_L4("ibdm", 3587 "\tprocess_incoming_mad: " 3588 "Setclassportinfo for Cisco FC GW is done."); 3589 gid_info->gl_flag &= ~IBDM_CISCO_PROBE; 3590 gid_info->gl_flag |= IBDM_CISCO_PROBE_DONE; 3591 mutex_exit(&gid_info->gl_mutex); 3592 cv_broadcast(&gid_info->gl_probe_cv); 3593 } else { 3594 mutex_exit(&gid_info->gl_mutex); 3595 ibdm_notify_newgid_iocs(gid_info); 3596 mutex_enter(&ibdm.ibdm_mutex); 3597 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3598 IBTF_DPRINTF_L4("ibdm", 3599 "\tprocess_incoming_mad: Wakeup"); 3600 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3601 cv_broadcast(&ibdm.ibdm_probe_cv); 3602 } 3603 mutex_exit(&ibdm.ibdm_mutex); 3604 } 3605 } 3606 3607 /* 3608 * Do not deallocate the IBMF packet if atleast one request 3609 * is posted. IBMF packet is reused. 3610 */ 3611 if (!(flag & IBDM_IBMF_PKT_REUSED)) { 3612 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3613 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3614 "IBMF free msg failed DM request drop it"); 3615 } 3616 } 3617 } 3618 3619 3620 /* 3621 * ibdm_verify_mad_status() 3622 * Verifies the MAD status 3623 * Returns IBDM_SUCCESS if status is correct 3624 * Returns IBDM_FAILURE for bogus MAD status 3625 */ 3626 static int 3627 ibdm_verify_mad_status(ib_mad_hdr_t *hdr) 3628 { 3629 int ret = 0; 3630 3631 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) || 3632 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) { 3633 return (IBDM_FAILURE); 3634 } 3635 3636 if (b2h16(hdr->Status) == 0) 3637 ret = IBDM_SUCCESS; 3638 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED) 3639 ret = IBDM_SUCCESS; 3640 else { 3641 IBTF_DPRINTF_L2("ibdm", 3642 "\tverify_mad_status: Status : 0x%x", b2h16(hdr->Status)); 3643 ret = IBDM_FAILURE; 3644 } 3645 return (ret); 3646 } 3647 3648 3649 3650 /* 3651 * ibdm_handle_redirection() 3652 * Returns IBDM_SUCCESS/IBDM_FAILURE 3653 */ 3654 static int 3655 ibdm_handle_redirection(ibmf_msg_t *msg, 3656 ibdm_dp_gidinfo_t *gid_info, int *flag) 3657 { 3658 int attrmod, ioc_no, start; 3659 void *data; 3660 timeout_id_t *timeout_id; 3661 ib_mad_hdr_t *hdr; 3662 ibdm_ioc_info_t *ioc = NULL; 3663 ibdm_timeout_cb_args_t *cb_args; 3664 ib_mad_classportinfo_t *cpi; 3665 3666 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter"); 3667 mutex_enter(&gid_info->gl_mutex); 3668 switch (gid_info->gl_state) { 3669 case IBDM_GET_IOUNITINFO: 3670 cb_args = &gid_info->gl_iou_cb_args; 3671 timeout_id = &gid_info->gl_timeout_id; 3672 break; 3673 3674 case IBDM_GET_IOC_DETAILS: 3675 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3676 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3677 3678 case IB_DM_ATTR_DIAG_CODE: 3679 if (attrmod == 0) { 3680 cb_args = &gid_info->gl_iou_cb_args; 3681 timeout_id = &gid_info->gl_timeout_id; 3682 break; 3683 } 3684 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3685 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3686 "IOC# Out of range %d", attrmod); 3687 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3688 mutex_exit(&gid_info->gl_mutex); 3689 return (IBDM_FAILURE); 3690 } 3691 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3692 cb_args = &ioc->ioc_dc_cb_args; 3693 timeout_id = &ioc->ioc_dc_timeout_id; 3694 break; 3695 3696 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3697 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3698 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3699 "IOC# Out of range %d", attrmod); 3700 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3701 mutex_exit(&gid_info->gl_mutex); 3702 return (IBDM_FAILURE); 3703 } 3704 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3705 cb_args = &ioc->ioc_cb_args; 3706 timeout_id = &ioc->ioc_timeout_id; 3707 break; 3708 3709 case IB_DM_ATTR_SERVICE_ENTRIES: 3710 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3711 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 3712 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3713 "IOC# Out of range %d", ioc_no); 3714 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3715 mutex_exit(&gid_info->gl_mutex); 3716 return (IBDM_FAILURE); 3717 } 3718 start = (attrmod & IBDM_8_BIT_MASK); 3719 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3720 if (start > ioc->ioc_profile.ioc_service_entries) { 3721 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3722 " SE index Out of range %d", start); 3723 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3724 mutex_exit(&gid_info->gl_mutex); 3725 return (IBDM_FAILURE); 3726 } 3727 cb_args = &ioc->ioc_serv[start].se_cb_args; 3728 timeout_id = &ioc->ioc_serv[start].se_timeout_id; 3729 break; 3730 3731 default: 3732 /* ERROR State */ 3733 IBTF_DPRINTF_L2("ibdm", 3734 "\thandle_redirection: wrong attribute :-("); 3735 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3736 mutex_exit(&gid_info->gl_mutex); 3737 return (IBDM_FAILURE); 3738 } 3739 break; 3740 default: 3741 /* ERROR State */ 3742 IBTF_DPRINTF_L2("ibdm", 3743 "\thandle_redirection: Error state :-("); 3744 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3745 mutex_exit(&gid_info->gl_mutex); 3746 return (IBDM_FAILURE); 3747 } 3748 if ((*timeout_id) != 0) { 3749 mutex_exit(&gid_info->gl_mutex); 3750 if (untimeout(*timeout_id) == -1) { 3751 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: " 3752 "untimeout failed %x", *timeout_id); 3753 } else { 3754 IBTF_DPRINTF_L5("ibdm", 3755 "\thandle_redirection: timeout %x", *timeout_id); 3756 } 3757 mutex_enter(&gid_info->gl_mutex); 3758 *timeout_id = 0; 3759 } 3760 3761 data = msg->im_msgbufs_recv.im_bufs_cl_data; 3762 cpi = (ib_mad_classportinfo_t *)data; 3763 3764 gid_info->gl_resp_timeout = 3765 (b2h32(cpi->RespTimeValue) & 0x1F); 3766 3767 gid_info->gl_redirected = B_TRUE; 3768 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 3769 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 3770 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 3771 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 3772 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 3773 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 3774 gid_info->gl_redirectSL = cpi->RedirectSL; 3775 3776 if (gid_info->gl_redirect_dlid != 0) { 3777 msg->im_local_addr.ia_remote_lid = 3778 gid_info->gl_redirect_dlid; 3779 } 3780 ibdm_bump_transactionID(gid_info); 3781 mutex_exit(&gid_info->gl_mutex); 3782 3783 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3784 ibdm_alloc_send_buffers(msg); 3785 3786 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3787 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3788 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3789 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3790 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3791 hdr->Status = 0; 3792 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3793 hdr->AttributeID = 3794 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID; 3795 hdr->AttributeModifier = 3796 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier; 3797 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3798 3799 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3800 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3801 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3802 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3803 3804 mutex_enter(&gid_info->gl_mutex); 3805 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3806 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3807 mutex_exit(&gid_info->gl_mutex); 3808 3809 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:" 3810 "timeout %x", *timeout_id); 3811 3812 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3813 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3814 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:" 3815 "message transport failed"); 3816 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3817 } 3818 (*flag) |= IBDM_IBMF_PKT_REUSED; 3819 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit"); 3820 return (IBDM_SUCCESS); 3821 } 3822 3823 3824 /* 3825 * ibdm_pkt_timeout_hdlr 3826 * This timeout handler is registed for every IBMF packet that is 3827 * sent through the IBMF. It gets called when no response is received 3828 * within the specified time for the packet. No retries for the failed 3829 * commands currently. Drops the failed IBMF packet and update the 3830 * pending list commands. 3831 */ 3832 static void 3833 ibdm_pkt_timeout_hdlr(void *arg) 3834 { 3835 ibdm_iou_info_t *iou; 3836 ibdm_ioc_info_t *ioc; 3837 ibdm_timeout_cb_args_t *cb_args = arg; 3838 ibdm_dp_gidinfo_t *gid_info; 3839 int srv_ent; 3840 uint_t new_gl_state; 3841 3842 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p " 3843 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3844 cb_args->cb_req_type, cb_args->cb_ioc_num, 3845 cb_args->cb_srvents_start); 3846 3847 gid_info = cb_args->cb_gid_info; 3848 mutex_enter(&gid_info->gl_mutex); 3849 3850 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) || 3851 (cb_args->cb_req_type == 0)) { 3852 3853 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed" 3854 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type, 3855 cb_args->cb_ioc_num, cb_args->cb_srvents_start); 3856 3857 if (gid_info->gl_timeout_id) 3858 gid_info->gl_timeout_id = 0; 3859 mutex_exit(&gid_info->gl_mutex); 3860 return; 3861 } 3862 if (cb_args->cb_retry_count) { 3863 cb_args->cb_retry_count--; 3864 /* 3865 * A new timeout_id is set inside ibdm_retry_command(). 3866 * When the function returns an error, the timeout_id 3867 * is reset (to zero) in the switch statement below. 3868 */ 3869 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) { 3870 mutex_exit(&gid_info->gl_mutex); 3871 return; 3872 } 3873 cb_args->cb_retry_count = 0; 3874 } 3875 3876 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p" 3877 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3878 cb_args->cb_req_type, cb_args->cb_ioc_num, 3879 cb_args->cb_srvents_start); 3880 3881 switch (cb_args->cb_req_type) { 3882 3883 case IBDM_REQ_TYPE_CLASSPORTINFO: 3884 case IBDM_REQ_TYPE_IOUINFO: 3885 new_gl_state = IBDM_GID_PROBING_FAILED; 3886 if (gid_info->gl_timeout_id) 3887 gid_info->gl_timeout_id = 0; 3888 break; 3889 3890 case IBDM_REQ_TYPE_IOCINFO: 3891 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3892 iou = gid_info->gl_iou; 3893 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3894 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3895 if (ioc->ioc_timeout_id) 3896 ioc->ioc_timeout_id = 0; 3897 break; 3898 3899 case IBDM_REQ_TYPE_SRVENTS: 3900 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3901 iou = gid_info->gl_iou; 3902 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3903 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3904 srv_ent = cb_args->cb_srvents_start; 3905 if (ioc->ioc_serv[srv_ent].se_timeout_id) 3906 ioc->ioc_serv[srv_ent].se_timeout_id = 0; 3907 break; 3908 3909 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3910 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3911 iou = gid_info->gl_iou; 3912 iou->iou_dc_valid = B_FALSE; 3913 if (gid_info->gl_timeout_id) 3914 gid_info->gl_timeout_id = 0; 3915 break; 3916 3917 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3918 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3919 iou = gid_info->gl_iou; 3920 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3921 ioc->ioc_dc_valid = B_FALSE; 3922 if (ioc->ioc_dc_timeout_id) 3923 ioc->ioc_dc_timeout_id = 0; 3924 break; 3925 3926 default: /* ERROR State */ 3927 new_gl_state = IBDM_GID_PROBING_FAILED; 3928 if (gid_info->gl_timeout_id) 3929 gid_info->gl_timeout_id = 0; 3930 IBTF_DPRINTF_L2("ibdm", 3931 "\tpkt_timeout_hdlr: wrong request type."); 3932 break; 3933 } 3934 3935 --gid_info->gl_pending_cmds; /* decrease the counter */ 3936 3937 if (gid_info->gl_pending_cmds == 0) { 3938 gid_info->gl_state = new_gl_state; 3939 mutex_exit(&gid_info->gl_mutex); 3940 /* 3941 * Delete this gid_info if the gid probe fails. 3942 */ 3943 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3944 ibdm_delete_glhca_list(gid_info); 3945 } 3946 ibdm_notify_newgid_iocs(gid_info); 3947 mutex_enter(&ibdm.ibdm_mutex); 3948 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3949 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup"); 3950 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3951 cv_broadcast(&ibdm.ibdm_probe_cv); 3952 } 3953 mutex_exit(&ibdm.ibdm_mutex); 3954 } else { 3955 /* 3956 * Reset gl_pending_cmd if the extra timeout happens since 3957 * gl_pending_cmd becomes negative as a result. 3958 */ 3959 if (gid_info->gl_pending_cmds < 0) { 3960 gid_info->gl_pending_cmds = 0; 3961 IBTF_DPRINTF_L2("ibdm", 3962 "\tpkt_timeout_hdlr: extra timeout request." 3963 " reset gl_pending_cmds"); 3964 } 3965 mutex_exit(&gid_info->gl_mutex); 3966 /* 3967 * Delete this gid_info if the gid probe fails. 3968 */ 3969 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3970 ibdm_delete_glhca_list(gid_info); 3971 } 3972 } 3973 } 3974 3975 3976 /* 3977 * ibdm_retry_command() 3978 * Retries the failed command. 3979 * Returns IBDM_FAILURE/IBDM_SUCCESS 3980 */ 3981 static int 3982 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args) 3983 { 3984 int ret; 3985 ibmf_msg_t *msg; 3986 ib_mad_hdr_t *hdr; 3987 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info; 3988 timeout_id_t *timeout_id; 3989 ibdm_ioc_info_t *ioc; 3990 int ioc_no; 3991 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 3992 3993 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p " 3994 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3995 cb_args->cb_req_type, cb_args->cb_ioc_num, 3996 cb_args->cb_srvents_start); 3997 3998 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg); 3999 4000 4001 /* 4002 * Reset the gid if alloc_msg failed with BAD_HANDLE 4003 * ibdm_reset_gidinfo reinits the gid_info 4004 */ 4005 if (ret == IBMF_BAD_HANDLE) { 4006 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad", 4007 gid_info); 4008 4009 mutex_exit(&gid_info->gl_mutex); 4010 ibdm_reset_gidinfo(gid_info); 4011 mutex_enter(&gid_info->gl_mutex); 4012 4013 /* Retry alloc */ 4014 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, 4015 &msg); 4016 } 4017 4018 if (ret != IBDM_SUCCESS) { 4019 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p " 4020 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 4021 cb_args->cb_req_type, cb_args->cb_ioc_num, 4022 cb_args->cb_srvents_start); 4023 return (IBDM_FAILURE); 4024 } 4025 4026 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 4027 ibdm_alloc_send_buffers(msg); 4028 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 4029 4030 ibdm_bump_transactionID(gid_info); 4031 4032 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 4033 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 4034 if (gid_info->gl_redirected == B_TRUE) { 4035 if (gid_info->gl_redirect_dlid != 0) { 4036 msg->im_local_addr.ia_remote_lid = 4037 gid_info->gl_redirect_dlid; 4038 } 4039 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 4040 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 4041 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 4042 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 4043 } else { 4044 msg->im_local_addr.ia_remote_qno = 1; 4045 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 4046 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 4047 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 4048 } 4049 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 4050 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 4051 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 4052 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 4053 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 4054 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 4055 hdr->Status = 0; 4056 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 4057 4058 switch (cb_args->cb_req_type) { 4059 case IBDM_REQ_TYPE_CLASSPORTINFO: 4060 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 4061 hdr->AttributeModifier = 0; 4062 timeout_id = &gid_info->gl_timeout_id; 4063 break; 4064 case IBDM_REQ_TYPE_IOUINFO: 4065 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 4066 hdr->AttributeModifier = 0; 4067 timeout_id = &gid_info->gl_timeout_id; 4068 break; 4069 case IBDM_REQ_TYPE_IOCINFO: 4070 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 4071 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 4072 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 4073 timeout_id = &ioc->ioc_timeout_id; 4074 break; 4075 case IBDM_REQ_TYPE_SRVENTS: 4076 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 4077 ibdm_fill_srv_attr_mod(hdr, cb_args); 4078 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 4079 timeout_id = 4080 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id; 4081 break; 4082 case IBDM_REQ_TYPE_IOU_DIAGCODE: 4083 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 4084 hdr->AttributeModifier = 0; 4085 timeout_id = &gid_info->gl_timeout_id; 4086 break; 4087 case IBDM_REQ_TYPE_IOC_DIAGCODE: 4088 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 4089 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 4090 ioc_no = cb_args->cb_ioc_num; 4091 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no]; 4092 timeout_id = &ioc->ioc_dc_timeout_id; 4093 break; 4094 } 4095 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*hdr)) 4096 4097 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 4098 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 4099 4100 mutex_exit(&gid_info->gl_mutex); 4101 4102 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:" 4103 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num, 4104 cb_args->cb_srvents_start, *timeout_id); 4105 4106 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, 4107 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb, 4108 cb_args, 0) != IBMF_SUCCESS) { 4109 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p " 4110 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 4111 cb_args->cb_req_type, cb_args->cb_ioc_num, 4112 cb_args->cb_srvents_start); 4113 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 4114 } 4115 mutex_enter(&gid_info->gl_mutex); 4116 return (IBDM_SUCCESS); 4117 } 4118 4119 4120 /* 4121 * ibdm_update_ioc_port_gidlist() 4122 */ 4123 static void 4124 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest, 4125 ibdm_dp_gidinfo_t *gid_info) 4126 { 4127 int ii, ngid_ents; 4128 ibdm_gid_t *tmp; 4129 ibdm_hca_list_t *gid_hca_head, *temp; 4130 ibdm_hca_list_t *ioc_head = NULL; 4131 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 4132 4133 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter"); 4134 4135 ngid_ents = gid_info->gl_ngids; 4136 dest->ioc_nportgids = ngid_ents; 4137 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) * 4138 ngid_ents, KM_SLEEP); 4139 tmp = gid_info->gl_gid; 4140 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) { 4141 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi; 4142 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo; 4143 tmp = tmp->gid_next; 4144 } 4145 4146 gid_hca_head = gid_info->gl_hca_list; 4147 while (gid_hca_head) { 4148 temp = ibdm_dup_hca_attr(gid_hca_head); 4149 temp->hl_next = ioc_head; 4150 ioc_head = temp; 4151 gid_hca_head = gid_hca_head->hl_next; 4152 } 4153 dest->ioc_hca_list = ioc_head; 4154 } 4155 4156 4157 /* 4158 * ibdm_alloc_send_buffers() 4159 * Allocates memory for the IBMF send buffer to send and/or receive 4160 * the Device Management MAD packet. 4161 */ 4162 static void 4163 ibdm_alloc_send_buffers(ibmf_msg_t *msgp) 4164 { 4165 msgp->im_msgbufs_send.im_bufs_mad_hdr = 4166 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP); 4167 4168 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *) 4169 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 4170 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDM_DM_MAD_HDR_SZ; 4171 4172 msgp->im_msgbufs_send.im_bufs_cl_data = 4173 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr + IBDM_DM_MAD_HDR_SZ); 4174 msgp->im_msgbufs_send.im_bufs_cl_data_len = 4175 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDM_DM_MAD_HDR_SZ; 4176 } 4177 4178 4179 /* 4180 * ibdm_alloc_send_buffers() 4181 * De-allocates memory for the IBMF send buffer 4182 */ 4183 static void 4184 ibdm_free_send_buffers(ibmf_msg_t *msgp) 4185 { 4186 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) 4187 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE); 4188 } 4189 4190 /* 4191 * ibdm_probe_ioc() 4192 * 1. Gets the node records for the port GUID. This detects all the port 4193 * to the IOU. 4194 * 2. Selectively probes all the IOC, given it's node GUID 4195 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC 4196 * Controller Profile asynchronously 4197 */ 4198 /*ARGSUSED*/ 4199 static void 4200 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag) 4201 { 4202 int ii, nrecords; 4203 size_t nr_len = 0, pi_len = 0; 4204 ib_gid_t sgid, dgid; 4205 ibdm_hca_list_t *hca_list = NULL; 4206 sa_node_record_t *nr, *tmp; 4207 ibdm_port_attr_t *port = NULL; 4208 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid; 4209 ibdm_dp_gidinfo_t *temp_gidinfo; 4210 ibdm_gid_t *temp_gid; 4211 sa_portinfo_record_t *pi; 4212 4213 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%llx, %llx, %x): Begin", 4214 nodeguid, ioc_guid, reprobe_flag); 4215 4216 /* Rescan the GID list for any removed GIDs for reprobe */ 4217 if (reprobe_flag) 4218 ibdm_rescan_gidlist(&ioc_guid); 4219 4220 mutex_enter(&ibdm.ibdm_hl_mutex); 4221 for (ibdm_get_next_port(&hca_list, &port, 1); port; 4222 ibdm_get_next_port(&hca_list, &port, 1)) { 4223 reprobe_gid = new_gid = node_gid = NULL; 4224 4225 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid); 4226 if (nr == NULL) { 4227 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records"); 4228 continue; 4229 } 4230 nrecords = (nr_len / sizeof (sa_node_record_t)); 4231 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 4232 if ((pi = ibdm_get_portinfo( 4233 port->pa_sa_hdl, &pi_len, tmp->LID)) == NULL) { 4234 IBTF_DPRINTF_L4("ibdm", 4235 "\tibdm_get_portinfo: no portinfo recs"); 4236 continue; 4237 } 4238 4239 /* 4240 * If Device Management is not supported on 4241 * this port, skip the rest. 4242 */ 4243 if (!(pi->PortInfo.CapabilityMask & 4244 SM_CAP_MASK_IS_DM_SUPPD)) { 4245 kmem_free(pi, pi_len); 4246 continue; 4247 } 4248 4249 /* 4250 * For reprobes: Check if GID, already in 4251 * the list. If so, set the state to SKIPPED 4252 */ 4253 if (((temp_gidinfo = ibdm_find_gid(nodeguid, 4254 tmp->NodeInfo.PortGUID)) != NULL) && 4255 temp_gidinfo->gl_state == 4256 IBDM_GID_PROBING_COMPLETE) { 4257 ASSERT(reprobe_gid == NULL); 4258 ibdm_addto_glhcalist(temp_gidinfo, 4259 hca_list); 4260 reprobe_gid = temp_gidinfo; 4261 kmem_free(pi, pi_len); 4262 continue; 4263 } else if (temp_gidinfo != NULL) { 4264 kmem_free(pi, pi_len); 4265 ibdm_addto_glhcalist(temp_gidinfo, 4266 hca_list); 4267 continue; 4268 } 4269 4270 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : " 4271 "create_gid : prefix %llx, guid %llx\n", 4272 pi->PortInfo.GidPrefix, 4273 tmp->NodeInfo.PortGUID); 4274 4275 sgid.gid_prefix = port->pa_sn_prefix; 4276 sgid.gid_guid = port->pa_port_guid; 4277 dgid.gid_prefix = pi->PortInfo.GidPrefix; 4278 dgid.gid_guid = tmp->NodeInfo.PortGUID; 4279 new_gid = ibdm_create_gid_info(port, sgid, 4280 dgid); 4281 if (new_gid == NULL) { 4282 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4283 "create_gid_info failed\n"); 4284 kmem_free(pi, pi_len); 4285 continue; 4286 } 4287 if (node_gid == NULL) { 4288 node_gid = new_gid; 4289 ibdm_add_to_gl_gid(node_gid, node_gid); 4290 } else { 4291 IBTF_DPRINTF_L4("ibdm", 4292 "\tprobe_ioc: new gid"); 4293 temp_gid = kmem_zalloc( 4294 sizeof (ibdm_gid_t), KM_SLEEP); 4295 temp_gid->gid_dgid_hi = 4296 new_gid->gl_dgid_hi; 4297 temp_gid->gid_dgid_lo = 4298 new_gid->gl_dgid_lo; 4299 temp_gid->gid_next = node_gid->gl_gid; 4300 node_gid->gl_gid = temp_gid; 4301 node_gid->gl_ngids++; 4302 } 4303 new_gid->gl_is_dm_capable = B_TRUE; 4304 new_gid->gl_nodeguid = nodeguid; 4305 new_gid->gl_portguid = dgid.gid_guid; 4306 ibdm_addto_glhcalist(new_gid, hca_list); 4307 4308 /* 4309 * Set the state to skipped as all these 4310 * gids point to the same node. 4311 * We (re)probe only one GID below and reset 4312 * state appropriately 4313 */ 4314 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED; 4315 new_gid->gl_devid = (*tmp).NodeInfo.DeviceID; 4316 kmem_free(pi, pi_len); 4317 } 4318 kmem_free(nr, nr_len); 4319 4320 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d " 4321 "reprobe_gid %p new_gid %p node_gid %p", 4322 reprobe_flag, reprobe_gid, new_gid, node_gid); 4323 4324 if (reprobe_flag != 0 && reprobe_gid != NULL) { 4325 int niocs, jj; 4326 ibdm_ioc_info_t *tmp_ioc; 4327 int ioc_matched = 0; 4328 4329 mutex_exit(&ibdm.ibdm_hl_mutex); 4330 mutex_enter(&reprobe_gid->gl_mutex); 4331 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS; 4332 niocs = 4333 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots; 4334 reprobe_gid->gl_pending_cmds++; 4335 mutex_exit(&reprobe_gid->gl_mutex); 4336 4337 for (jj = 0; jj < niocs; jj++) { 4338 tmp_ioc = 4339 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj); 4340 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid) 4341 continue; 4342 4343 ioc_matched = 1; 4344 4345 /* 4346 * Explicitly set gl_reprobe_flag to 0 so that 4347 * IBnex is not notified on completion 4348 */ 4349 mutex_enter(&reprobe_gid->gl_mutex); 4350 reprobe_gid->gl_reprobe_flag = 0; 4351 mutex_exit(&reprobe_gid->gl_mutex); 4352 4353 mutex_enter(&ibdm.ibdm_mutex); 4354 ibdm.ibdm_ngid_probes_in_progress++; 4355 mutex_exit(&ibdm.ibdm_mutex); 4356 if (ibdm_send_ioc_profile(reprobe_gid, jj) != 4357 IBDM_SUCCESS) { 4358 IBTF_DPRINTF_L4("ibdm", 4359 "\tprobe_ioc: " 4360 "send_ioc_profile failed " 4361 "for ioc %d", jj); 4362 ibdm_gid_decr_pending(reprobe_gid); 4363 break; 4364 } 4365 mutex_enter(&ibdm.ibdm_mutex); 4366 ibdm_wait_probe_completion(); 4367 mutex_exit(&ibdm.ibdm_mutex); 4368 break; 4369 } 4370 if (ioc_matched == 0) 4371 ibdm_gid_decr_pending(reprobe_gid); 4372 else { 4373 mutex_enter(&ibdm.ibdm_hl_mutex); 4374 break; 4375 } 4376 } else if (new_gid != NULL) { 4377 mutex_exit(&ibdm.ibdm_hl_mutex); 4378 node_gid = node_gid ? node_gid : new_gid; 4379 4380 /* 4381 * New or reinserted GID : Enable notification 4382 * to IBnex 4383 */ 4384 mutex_enter(&node_gid->gl_mutex); 4385 node_gid->gl_reprobe_flag = 1; 4386 mutex_exit(&node_gid->gl_mutex); 4387 4388 ibdm_probe_gid(node_gid); 4389 4390 mutex_enter(&ibdm.ibdm_hl_mutex); 4391 } 4392 } 4393 mutex_exit(&ibdm.ibdm_hl_mutex); 4394 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n"); 4395 } 4396 4397 4398 /* 4399 * ibdm_probe_gid() 4400 * Selectively probes the GID 4401 */ 4402 static void 4403 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info) 4404 { 4405 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:"); 4406 4407 /* 4408 * A Cisco FC GW needs the special handling to get IOUnitInfo. 4409 */ 4410 mutex_enter(&gid_info->gl_mutex); 4411 if (ibdm_is_cisco_switch(gid_info)) { 4412 gid_info->gl_pending_cmds++; 4413 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 4414 mutex_exit(&gid_info->gl_mutex); 4415 4416 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 4417 4418 mutex_enter(&gid_info->gl_mutex); 4419 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4420 --gid_info->gl_pending_cmds; 4421 mutex_exit(&gid_info->gl_mutex); 4422 4423 /* free the hca_list on this gid_info */ 4424 ibdm_delete_glhca_list(gid_info); 4425 gid_info = gid_info->gl_next; 4426 return; 4427 } 4428 4429 mutex_enter(&gid_info->gl_mutex); 4430 ibdm_wait_cisco_probe_completion(gid_info); 4431 4432 IBTF_DPRINTF_L4("ibdm", 4433 "\tprobe_gid: CISCO Wakeup signal received"); 4434 } 4435 4436 /* move on to the 'GET_CLASSPORTINFO' stage */ 4437 gid_info->gl_pending_cmds++; 4438 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 4439 mutex_exit(&gid_info->gl_mutex); 4440 4441 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 4442 4443 mutex_enter(&gid_info->gl_mutex); 4444 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4445 --gid_info->gl_pending_cmds; 4446 mutex_exit(&gid_info->gl_mutex); 4447 4448 /* free the hca_list on this gid_info */ 4449 ibdm_delete_glhca_list(gid_info); 4450 gid_info = gid_info->gl_next; 4451 return; 4452 } 4453 4454 mutex_enter(&ibdm.ibdm_mutex); 4455 ibdm.ibdm_ngid_probes_in_progress++; 4456 gid_info = gid_info->gl_next; 4457 ibdm_wait_probe_completion(); 4458 mutex_exit(&ibdm.ibdm_mutex); 4459 4460 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received"); 4461 } 4462 4463 4464 /* 4465 * ibdm_create_gid_info() 4466 * Allocates a gid_info structure and initializes 4467 * Returns pointer to the structure on success 4468 * and NULL on failure 4469 */ 4470 static ibdm_dp_gidinfo_t * 4471 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid) 4472 { 4473 uint8_t ii, npaths; 4474 sa_path_record_t *path; 4475 size_t len; 4476 ibdm_pkey_tbl_t *pkey_tbl; 4477 ibdm_dp_gidinfo_t *gid_info = NULL; 4478 int ret; 4479 4480 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin"); 4481 npaths = 1; 4482 4483 /* query for reversible paths */ 4484 if (port->pa_sa_hdl) 4485 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, 4486 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, 4487 &len, &path); 4488 else 4489 return (NULL); 4490 4491 if (ret == IBMF_SUCCESS && path) { 4492 ibdm_dump_path_info(path); 4493 4494 gid_info = kmem_zalloc( 4495 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 4496 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 4497 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 4498 gid_info->gl_dgid_hi = path->DGID.gid_prefix; 4499 gid_info->gl_dgid_lo = path->DGID.gid_guid; 4500 gid_info->gl_sgid_hi = path->SGID.gid_prefix; 4501 gid_info->gl_sgid_lo = path->SGID.gid_guid; 4502 gid_info->gl_p_key = path->P_Key; 4503 gid_info->gl_sa_hdl = port->pa_sa_hdl; 4504 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl; 4505 gid_info->gl_slid = path->SLID; 4506 gid_info->gl_dlid = path->DLID; 4507 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 4508 << IBDM_GID_TRANSACTIONID_SHIFT; 4509 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 4510 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 4511 << IBDM_GID_TRANSACTIONID_SHIFT; 4512 gid_info->gl_SL = path->SL; 4513 4514 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 4515 for (ii = 0; ii < port->pa_npkeys; ii++) { 4516 if (port->pa_pkey_tbl == NULL) 4517 break; 4518 4519 pkey_tbl = &port->pa_pkey_tbl[ii]; 4520 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 4521 (pkey_tbl->pt_qp_hdl != NULL)) { 4522 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 4523 break; 4524 } 4525 } 4526 kmem_free(path, len); 4527 4528 /* 4529 * QP handle for GID not initialized. No matching Pkey 4530 * was found!! ibdm should *not* hit this case. Flag an 4531 * error and drop the GID if ibdm does encounter this. 4532 */ 4533 if (gid_info->gl_qp_hdl == NULL) { 4534 IBTF_DPRINTF_L2(ibdm_string, 4535 "\tcreate_gid_info: No matching Pkey"); 4536 ibdm_delete_gidinfo(gid_info); 4537 return (NULL); 4538 } 4539 4540 ibdm.ibdm_ngids++; 4541 if (ibdm.ibdm_dp_gidlist_head == NULL) { 4542 ibdm.ibdm_dp_gidlist_head = gid_info; 4543 ibdm.ibdm_dp_gidlist_tail = gid_info; 4544 } else { 4545 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 4546 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 4547 ibdm.ibdm_dp_gidlist_tail = gid_info; 4548 } 4549 } 4550 4551 return (gid_info); 4552 } 4553 4554 4555 /* 4556 * ibdm_get_node_records 4557 * Sends a SA query to get the NODE record 4558 * Returns pointer to the sa_node_record_t on success 4559 * and NULL on failure 4560 */ 4561 static sa_node_record_t * 4562 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid) 4563 { 4564 sa_node_record_t req, *resp = NULL; 4565 ibmf_saa_access_args_t args; 4566 int ret; 4567 4568 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin"); 4569 4570 bzero(&req, sizeof (sa_node_record_t)); 4571 req.NodeInfo.NodeGUID = guid; 4572 4573 args.sq_attr_id = SA_NODERECORD_ATTRID; 4574 args.sq_access_type = IBMF_SAA_RETRIEVE; 4575 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID; 4576 args.sq_template = &req; 4577 args.sq_callback = NULL; 4578 args.sq_callback_arg = NULL; 4579 4580 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4581 if (ret != IBMF_SUCCESS) { 4582 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:" 4583 " SA Retrieve Failed: %d", ret); 4584 return (NULL); 4585 } 4586 if ((resp == NULL) || (*length == 0)) { 4587 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records"); 4588 return (NULL); 4589 } 4590 4591 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx " 4592 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID); 4593 4594 return (resp); 4595 } 4596 4597 4598 /* 4599 * ibdm_get_portinfo() 4600 * Sends a SA query to get the PortInfo record 4601 * Returns pointer to the sa_portinfo_record_t on success 4602 * and NULL on failure 4603 */ 4604 static sa_portinfo_record_t * 4605 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid) 4606 { 4607 sa_portinfo_record_t req, *resp = NULL; 4608 ibmf_saa_access_args_t args; 4609 int ret; 4610 4611 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin"); 4612 4613 bzero(&req, sizeof (sa_portinfo_record_t)); 4614 req.EndportLID = lid; 4615 4616 args.sq_attr_id = SA_PORTINFORECORD_ATTRID; 4617 args.sq_access_type = IBMF_SAA_RETRIEVE; 4618 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 4619 args.sq_template = &req; 4620 args.sq_callback = NULL; 4621 args.sq_callback_arg = NULL; 4622 4623 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4624 if (ret != IBMF_SUCCESS) { 4625 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:" 4626 " SA Retrieve Failed: 0x%X", ret); 4627 return (NULL); 4628 } 4629 if ((*length == 0) || (resp == NULL)) 4630 return (NULL); 4631 4632 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x", 4633 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask); 4634 return (resp); 4635 } 4636 4637 4638 /* 4639 * ibdm_ibnex_register_callback 4640 * IB nexus callback routine for HCA attach and detach notification 4641 */ 4642 void 4643 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback) 4644 { 4645 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks"); 4646 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4647 ibdm.ibdm_ibnex_callback = ibnex_dm_callback; 4648 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4649 } 4650 4651 4652 /* 4653 * ibdm_ibnex_unregister_callbacks 4654 */ 4655 void 4656 ibdm_ibnex_unregister_callback() 4657 { 4658 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks"); 4659 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4660 ibdm.ibdm_ibnex_callback = NULL; 4661 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4662 } 4663 4664 /* 4665 * ibdm_get_waittime() 4666 * Calculates the wait time based on the last HCA attach time 4667 */ 4668 static time_t 4669 ibdm_get_waittime(ib_guid_t hca_guid, int dft_wait) 4670 { 4671 int ii; 4672 time_t temp, wait_time = 0; 4673 ibdm_hca_list_t *hca; 4674 4675 IBTF_DPRINTF_L4("ibdm", "\tget_waittime hcaguid:%llx" 4676 "\tport settling time %d", hca_guid, dft_wait); 4677 4678 ASSERT(mutex_owned(&ibdm.ibdm_hl_mutex)); 4679 4680 hca = ibdm.ibdm_hca_list_head; 4681 4682 if (hca_guid) { 4683 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4684 if ((hca_guid == hca->hl_hca_guid) && 4685 (hca->hl_nports != hca->hl_nports_active)) { 4686 wait_time = 4687 ddi_get_time() - hca->hl_attach_time; 4688 wait_time = ((wait_time >= dft_wait) ? 4689 0 : (dft_wait - wait_time)); 4690 break; 4691 } 4692 hca = hca->hl_next; 4693 } 4694 IBTF_DPRINTF_L4("ibdm", "\tget_waittime %llx", wait_time); 4695 return (wait_time); 4696 } 4697 4698 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4699 if (hca->hl_nports != hca->hl_nports_active) { 4700 temp = ddi_get_time() - hca->hl_attach_time; 4701 temp = ((temp >= dft_wait) ? 0 : (dft_wait - temp)); 4702 wait_time = (temp > wait_time) ? temp : wait_time; 4703 } 4704 } 4705 IBTF_DPRINTF_L4("ibdm", "\tget_waittime %llx", wait_time); 4706 return (wait_time); 4707 } 4708 4709 void 4710 ibdm_ibnex_port_settle_wait(ib_guid_t hca_guid, int dft_wait) 4711 { 4712 time_t wait_time; 4713 clock_t delta; 4714 4715 mutex_enter(&ibdm.ibdm_hl_mutex); 4716 4717 while ((wait_time = ibdm_get_waittime(hca_guid, dft_wait)) > 0) { 4718 delta = drv_usectohz(wait_time * 1000000); 4719 (void) cv_reltimedwait(&ibdm.ibdm_port_settle_cv, 4720 &ibdm.ibdm_hl_mutex, delta, TR_CLOCK_TICK); 4721 } 4722 4723 mutex_exit(&ibdm.ibdm_hl_mutex); 4724 } 4725 4726 4727 /* 4728 * ibdm_ibnex_probe_hcaport 4729 * Probes the presence of HCA port (with HCA dip and port number) 4730 * Returns port attributes structure on SUCCESS 4731 */ 4732 ibdm_port_attr_t * 4733 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num) 4734 { 4735 int ii, jj; 4736 ibdm_hca_list_t *hca_list; 4737 ibdm_port_attr_t *port_attr; 4738 4739 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:"); 4740 4741 mutex_enter(&ibdm.ibdm_hl_mutex); 4742 hca_list = ibdm.ibdm_hca_list_head; 4743 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4744 if (hca_list->hl_hca_guid == hca_guid) { 4745 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4746 if (hca_list->hl_port_attr[jj].pa_port_num == 4747 port_num) { 4748 break; 4749 } 4750 } 4751 if (jj != hca_list->hl_nports) 4752 break; 4753 } 4754 hca_list = hca_list->hl_next; 4755 } 4756 if (ii == ibdm.ibdm_hca_count) { 4757 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found"); 4758 mutex_exit(&ibdm.ibdm_hl_mutex); 4759 return (NULL); 4760 } 4761 port_attr = (ibdm_port_attr_t *)kmem_zalloc( 4762 sizeof (ibdm_port_attr_t), KM_SLEEP); 4763 bcopy((char *)&hca_list->hl_port_attr[jj], 4764 port_attr, sizeof (ibdm_port_attr_t)); 4765 ibdm_update_port_attr(port_attr); 4766 4767 mutex_exit(&ibdm.ibdm_hl_mutex); 4768 return (port_attr); 4769 } 4770 4771 4772 /* 4773 * ibdm_ibnex_get_port_attrs 4774 * Scan all HCAs for a matching port_guid. 4775 * Returns "port attributes" structure on success. 4776 */ 4777 ibdm_port_attr_t * 4778 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid) 4779 { 4780 int ii, jj; 4781 ibdm_hca_list_t *hca_list; 4782 ibdm_port_attr_t *port_attr; 4783 4784 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:"); 4785 4786 mutex_enter(&ibdm.ibdm_hl_mutex); 4787 hca_list = ibdm.ibdm_hca_list_head; 4788 4789 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4790 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4791 if (hca_list->hl_port_attr[jj].pa_port_guid == 4792 port_guid) { 4793 break; 4794 } 4795 } 4796 if (jj != hca_list->hl_nports) 4797 break; 4798 hca_list = hca_list->hl_next; 4799 } 4800 4801 if (ii == ibdm.ibdm_hca_count) { 4802 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found"); 4803 mutex_exit(&ibdm.ibdm_hl_mutex); 4804 return (NULL); 4805 } 4806 4807 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t), 4808 KM_SLEEP); 4809 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr, 4810 sizeof (ibdm_port_attr_t)); 4811 ibdm_update_port_attr(port_attr); 4812 4813 mutex_exit(&ibdm.ibdm_hl_mutex); 4814 return (port_attr); 4815 } 4816 4817 4818 /* 4819 * ibdm_ibnex_free_port_attr() 4820 */ 4821 void 4822 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr) 4823 { 4824 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:"); 4825 if (port_attr) { 4826 if (port_attr->pa_pkey_tbl != NULL) { 4827 kmem_free(port_attr->pa_pkey_tbl, 4828 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t))); 4829 } 4830 kmem_free(port_attr, sizeof (ibdm_port_attr_t)); 4831 } 4832 } 4833 4834 4835 /* 4836 * ibdm_ibnex_get_hca_list() 4837 * Returns portinfo for all the port for all the HCA's 4838 */ 4839 void 4840 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count) 4841 { 4842 ibdm_hca_list_t *head = NULL, *temp, *temp1; 4843 int ii; 4844 4845 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:"); 4846 4847 mutex_enter(&ibdm.ibdm_hl_mutex); 4848 temp = ibdm.ibdm_hca_list_head; 4849 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4850 temp1 = ibdm_dup_hca_attr(temp); 4851 temp1->hl_next = head; 4852 head = temp1; 4853 temp = temp->hl_next; 4854 } 4855 *count = ibdm.ibdm_hca_count; 4856 *hca = head; 4857 mutex_exit(&ibdm.ibdm_hl_mutex); 4858 } 4859 4860 4861 /* 4862 * ibdm_ibnex_get_hca_info_by_guid() 4863 */ 4864 ibdm_hca_list_t * 4865 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid) 4866 { 4867 ibdm_hca_list_t *head = NULL, *hca = NULL; 4868 4869 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip"); 4870 4871 mutex_enter(&ibdm.ibdm_hl_mutex); 4872 head = ibdm.ibdm_hca_list_head; 4873 while (head) { 4874 if (head->hl_hca_guid == hca_guid) { 4875 hca = ibdm_dup_hca_attr(head); 4876 hca->hl_next = NULL; 4877 break; 4878 } 4879 head = head->hl_next; 4880 } 4881 mutex_exit(&ibdm.ibdm_hl_mutex); 4882 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca); 4883 return (hca); 4884 } 4885 4886 4887 /* 4888 * ibdm_dup_hca_attr() 4889 * Allocate a new HCA attribute strucuture and initialize 4890 * hca attribute structure with the incoming HCA attributes 4891 * returned the allocated hca attributes. 4892 */ 4893 static ibdm_hca_list_t * 4894 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca) 4895 { 4896 int len; 4897 ibdm_hca_list_t *out_hca; 4898 4899 len = sizeof (ibdm_hca_list_t) + 4900 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)); 4901 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len); 4902 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP); 4903 bcopy((char *)in_hca, 4904 (char *)out_hca, sizeof (ibdm_hca_list_t)); 4905 if (in_hca->hl_nports) { 4906 out_hca->hl_port_attr = (ibdm_port_attr_t *) 4907 ((char *)out_hca + sizeof (ibdm_hca_list_t)); 4908 bcopy((char *)in_hca->hl_port_attr, 4909 (char *)out_hca->hl_port_attr, 4910 (in_hca->hl_nports * sizeof (ibdm_port_attr_t))); 4911 for (len = 0; len < out_hca->hl_nports; len++) 4912 ibdm_update_port_attr(&out_hca->hl_port_attr[len]); 4913 } 4914 return (out_hca); 4915 } 4916 4917 4918 /* 4919 * ibdm_ibnex_free_hca_list() 4920 * Free one/more HCA lists 4921 */ 4922 void 4923 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list) 4924 { 4925 int ii; 4926 size_t len; 4927 ibdm_hca_list_t *temp; 4928 ibdm_port_attr_t *port; 4929 4930 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:"); 4931 ASSERT(hca_list); 4932 while (hca_list) { 4933 temp = hca_list; 4934 hca_list = hca_list->hl_next; 4935 for (ii = 0; ii < temp->hl_nports; ii++) { 4936 port = &temp->hl_port_attr[ii]; 4937 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 4938 if (len != 0) 4939 kmem_free(port->pa_pkey_tbl, len); 4940 } 4941 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports * 4942 sizeof (ibdm_port_attr_t)); 4943 kmem_free(temp, len); 4944 } 4945 } 4946 4947 4948 /* 4949 * ibdm_ibnex_probe_iocguid() 4950 * Probes the IOC on the fabric and returns the IOC information 4951 * if present. Otherwise, NULL is returned 4952 */ 4953 /* ARGSUSED */ 4954 ibdm_ioc_info_t * 4955 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag) 4956 { 4957 int k; 4958 ibdm_ioc_info_t *ioc_info; 4959 ibdm_dp_gidinfo_t *gid_info; /* used as index and arg */ 4960 timeout_id_t *timeout_id; 4961 4962 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin", 4963 iou, ioc_guid, reprobe_flag); 4964 /* Check whether we know this already */ 4965 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4966 if (ioc_info == NULL) { 4967 mutex_enter(&ibdm.ibdm_mutex); 4968 while (ibdm.ibdm_busy & IBDM_BUSY) 4969 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4970 ibdm.ibdm_busy |= IBDM_BUSY; 4971 mutex_exit(&ibdm.ibdm_mutex); 4972 ibdm_probe_ioc(iou, ioc_guid, 0); 4973 mutex_enter(&ibdm.ibdm_mutex); 4974 ibdm.ibdm_busy &= ~IBDM_BUSY; 4975 cv_broadcast(&ibdm.ibdm_busy_cv); 4976 mutex_exit(&ibdm.ibdm_mutex); 4977 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4978 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */ 4979 ASSERT(gid_info != NULL); 4980 /* Free the ioc_list before reprobe; and cancel any timers */ 4981 mutex_enter(&ibdm.ibdm_mutex); 4982 mutex_enter(&gid_info->gl_mutex); 4983 if (ioc_info->ioc_timeout_id) { 4984 timeout_id = ioc_info->ioc_timeout_id; 4985 ioc_info->ioc_timeout_id = 0; 4986 mutex_exit(&gid_info->gl_mutex); 4987 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4988 "ioc_timeout_id = 0x%x", timeout_id); 4989 if (untimeout(timeout_id) == -1) { 4990 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4991 "untimeout ioc_timeout_id failed"); 4992 } 4993 mutex_enter(&gid_info->gl_mutex); 4994 } 4995 if (ioc_info->ioc_dc_timeout_id) { 4996 timeout_id = ioc_info->ioc_dc_timeout_id; 4997 ioc_info->ioc_dc_timeout_id = 0; 4998 mutex_exit(&gid_info->gl_mutex); 4999 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 5000 "ioc_dc_timeout_id = 0x%x", timeout_id); 5001 if (untimeout(timeout_id) == -1) { 5002 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 5003 "untimeout ioc_dc_timeout_id failed"); 5004 } 5005 mutex_enter(&gid_info->gl_mutex); 5006 } 5007 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++) 5008 if (ioc_info->ioc_serv[k].se_timeout_id) { 5009 timeout_id = ioc_info->ioc_serv[k]. 5010 se_timeout_id; 5011 ioc_info->ioc_serv[k].se_timeout_id = 0; 5012 mutex_exit(&gid_info->gl_mutex); 5013 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 5014 "ioc_info->ioc_serv[k].se_timeout_id = %x", 5015 k, timeout_id); 5016 if (untimeout(timeout_id) == -1) { 5017 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 5018 "untimeout se_timeout_id %d " 5019 "failed", k); 5020 } 5021 mutex_enter(&gid_info->gl_mutex); 5022 } 5023 mutex_exit(&gid_info->gl_mutex); 5024 mutex_exit(&ibdm.ibdm_mutex); 5025 ibdm_ibnex_free_ioc_list(ioc_info); 5026 5027 mutex_enter(&ibdm.ibdm_mutex); 5028 while (ibdm.ibdm_busy & IBDM_BUSY) 5029 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5030 ibdm.ibdm_busy |= IBDM_BUSY; 5031 mutex_exit(&ibdm.ibdm_mutex); 5032 5033 ibdm_probe_ioc(iou, ioc_guid, 1); 5034 5035 /* 5036 * Skip if gl_reprobe_flag is set, this will be 5037 * a re-inserted / new GID, for which notifications 5038 * have already been send. 5039 */ 5040 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 5041 gid_info = gid_info->gl_next) { 5042 uint8_t ii, niocs; 5043 ibdm_ioc_info_t *ioc; 5044 5045 if (gid_info->gl_iou == NULL) 5046 continue; 5047 5048 if (gid_info->gl_reprobe_flag) { 5049 gid_info->gl_reprobe_flag = 0; 5050 continue; 5051 } 5052 5053 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 5054 for (ii = 0; ii < niocs; ii++) { 5055 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 5056 if (ioc->ioc_profile.ioc_guid == ioc_guid) { 5057 mutex_enter(&ibdm.ibdm_mutex); 5058 ibdm_reprobe_update_port_srv(ioc, 5059 gid_info); 5060 mutex_exit(&ibdm.ibdm_mutex); 5061 } 5062 } 5063 } 5064 mutex_enter(&ibdm.ibdm_mutex); 5065 ibdm.ibdm_busy &= ~IBDM_BUSY; 5066 cv_broadcast(&ibdm.ibdm_busy_cv); 5067 mutex_exit(&ibdm.ibdm_mutex); 5068 5069 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 5070 } 5071 return (ioc_info); 5072 } 5073 5074 5075 /* 5076 * ibdm_get_ioc_info_with_gid() 5077 * Returns pointer to ibdm_ioc_info_t if it finds 5078 * matching record for the ioc_guid. Otherwise NULL is returned. 5079 * The pointer to gid_info is set to the second argument in case that 5080 * the non-NULL value returns (and the second argument is not NULL). 5081 * 5082 * Note. use the same strings as "ibnex_get_ioc_info" in 5083 * IBTF_DPRINTF() to keep compatibility. 5084 */ 5085 static ibdm_ioc_info_t * 5086 ibdm_get_ioc_info_with_gid(ib_guid_t ioc_guid, 5087 ibdm_dp_gidinfo_t **gid_info) 5088 { 5089 int ii; 5090 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL; 5091 ibdm_dp_gidinfo_t *gid_list; 5092 ib_dm_io_unitinfo_t *iou; 5093 5094 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid); 5095 5096 mutex_enter(&ibdm.ibdm_mutex); 5097 while (ibdm.ibdm_busy & IBDM_BUSY) 5098 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5099 ibdm.ibdm_busy |= IBDM_BUSY; 5100 5101 if (gid_info) 5102 *gid_info = NULL; /* clear the value of gid_info */ 5103 5104 gid_list = ibdm.ibdm_dp_gidlist_head; 5105 while (gid_list) { 5106 mutex_enter(&gid_list->gl_mutex); 5107 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 5108 mutex_exit(&gid_list->gl_mutex); 5109 gid_list = gid_list->gl_next; 5110 continue; 5111 } 5112 if (gid_list->gl_iou == NULL) { 5113 IBTF_DPRINTF_L2("ibdm", 5114 "\tget_ioc_info: No IOU info"); 5115 mutex_exit(&gid_list->gl_mutex); 5116 gid_list = gid_list->gl_next; 5117 continue; 5118 } 5119 iou = &gid_list->gl_iou->iou_info; 5120 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 5121 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii); 5122 if ((tmp->ioc_profile.ioc_guid == ioc_guid) && 5123 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) { 5124 ioc = ibdm_dup_ioc_info(tmp, gid_list); 5125 if (gid_info) 5126 *gid_info = gid_list; /* set this ptr */ 5127 mutex_exit(&gid_list->gl_mutex); 5128 ibdm.ibdm_busy &= ~IBDM_BUSY; 5129 cv_broadcast(&ibdm.ibdm_busy_cv); 5130 mutex_exit(&ibdm.ibdm_mutex); 5131 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End"); 5132 return (ioc); 5133 } 5134 } 5135 if (ii == iou->iou_num_ctrl_slots) 5136 ioc = NULL; 5137 5138 mutex_exit(&gid_list->gl_mutex); 5139 gid_list = gid_list->gl_next; 5140 } 5141 5142 ibdm.ibdm_busy &= ~IBDM_BUSY; 5143 cv_broadcast(&ibdm.ibdm_busy_cv); 5144 mutex_exit(&ibdm.ibdm_mutex); 5145 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End"); 5146 return (ioc); 5147 } 5148 5149 /* 5150 * ibdm_ibnex_get_ioc_info() 5151 * Returns pointer to ibdm_ioc_info_t if it finds 5152 * matching record for the ioc_guid, otherwise NULL 5153 * is returned 5154 * 5155 * Note. this is a wrapper function to ibdm_get_ioc_info_with_gid() now. 5156 */ 5157 ibdm_ioc_info_t * 5158 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid) 5159 { 5160 /* will not use the gid_info pointer, so the second arg is NULL */ 5161 return (ibdm_get_ioc_info_with_gid(ioc_guid, NULL)); 5162 } 5163 5164 /* 5165 * ibdm_ibnex_get_ioc_count() 5166 * Returns number of ibdm_ioc_info_t it finds 5167 */ 5168 int 5169 ibdm_ibnex_get_ioc_count(void) 5170 { 5171 int count = 0, k; 5172 ibdm_ioc_info_t *ioc; 5173 ibdm_dp_gidinfo_t *gid_list; 5174 5175 mutex_enter(&ibdm.ibdm_mutex); 5176 ibdm_sweep_fabric(0); 5177 5178 while (ibdm.ibdm_busy & IBDM_BUSY) 5179 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5180 ibdm.ibdm_busy |= IBDM_BUSY; 5181 5182 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5183 gid_list = gid_list->gl_next) { 5184 mutex_enter(&gid_list->gl_mutex); 5185 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) || 5186 (gid_list->gl_iou == NULL)) { 5187 mutex_exit(&gid_list->gl_mutex); 5188 continue; 5189 } 5190 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots; 5191 k++) { 5192 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k); 5193 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) 5194 ++count; 5195 } 5196 mutex_exit(&gid_list->gl_mutex); 5197 } 5198 ibdm.ibdm_busy &= ~IBDM_BUSY; 5199 cv_broadcast(&ibdm.ibdm_busy_cv); 5200 mutex_exit(&ibdm.ibdm_mutex); 5201 5202 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count); 5203 return (count); 5204 } 5205 5206 5207 /* 5208 * ibdm_ibnex_get_ioc_list() 5209 * Returns information about all the IOCs present on the fabric. 5210 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL. 5211 * Does not sweep fabric if DONOT_PROBE is set 5212 */ 5213 ibdm_ioc_info_t * 5214 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag) 5215 { 5216 int ii; 5217 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc; 5218 ibdm_dp_gidinfo_t *gid_list; 5219 ib_dm_io_unitinfo_t *iou; 5220 5221 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter"); 5222 5223 mutex_enter(&ibdm.ibdm_mutex); 5224 if (list_flag != IBDM_IBNEX_DONOT_PROBE) 5225 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL); 5226 5227 while (ibdm.ibdm_busy & IBDM_BUSY) 5228 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5229 ibdm.ibdm_busy |= IBDM_BUSY; 5230 5231 gid_list = ibdm.ibdm_dp_gidlist_head; 5232 while (gid_list) { 5233 mutex_enter(&gid_list->gl_mutex); 5234 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 5235 mutex_exit(&gid_list->gl_mutex); 5236 gid_list = gid_list->gl_next; 5237 continue; 5238 } 5239 if (gid_list->gl_iou == NULL) { 5240 IBTF_DPRINTF_L2("ibdm", 5241 "\tget_ioc_list: No IOU info"); 5242 mutex_exit(&gid_list->gl_mutex); 5243 gid_list = gid_list->gl_next; 5244 continue; 5245 } 5246 iou = &gid_list->gl_iou->iou_info; 5247 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 5248 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii); 5249 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5250 tmp = ibdm_dup_ioc_info(ioc, gid_list); 5251 tmp->ioc_next = ioc_list; 5252 ioc_list = tmp; 5253 } 5254 } 5255 mutex_exit(&gid_list->gl_mutex); 5256 gid_list = gid_list->gl_next; 5257 } 5258 ibdm.ibdm_busy &= ~IBDM_BUSY; 5259 cv_broadcast(&ibdm.ibdm_busy_cv); 5260 mutex_exit(&ibdm.ibdm_mutex); 5261 5262 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End"); 5263 return (ioc_list); 5264 } 5265 5266 /* 5267 * ibdm_dup_ioc_info() 5268 * Duplicate the IOC information and return the IOC 5269 * information. 5270 */ 5271 static ibdm_ioc_info_t * 5272 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list) 5273 { 5274 ibdm_ioc_info_t *out_ioc; 5275 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc)); 5276 ASSERT(MUTEX_HELD(&gid_list->gl_mutex)); 5277 5278 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP); 5279 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t)); 5280 ibdm_update_ioc_port_gidlist(out_ioc, gid_list); 5281 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid; 5282 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode; 5283 5284 return (out_ioc); 5285 } 5286 5287 5288 /* 5289 * ibdm_free_ioc_list() 5290 * Deallocate memory for IOC list structure 5291 */ 5292 void 5293 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc) 5294 { 5295 ibdm_ioc_info_t *temp; 5296 5297 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:"); 5298 while (ioc) { 5299 temp = ioc; 5300 ioc = ioc->ioc_next; 5301 kmem_free(temp->ioc_gid_list, 5302 (sizeof (ibdm_gid_t) * temp->ioc_nportgids)); 5303 if (temp->ioc_hca_list) 5304 ibdm_ibnex_free_hca_list(temp->ioc_hca_list); 5305 kmem_free(temp, sizeof (ibdm_ioc_info_t)); 5306 } 5307 } 5308 5309 5310 /* 5311 * ibdm_ibnex_update_pkey_tbls 5312 * Updates the DM P_Key database. 5313 * NOTE: Two cases are handled here: P_Key being added or removed. 5314 * 5315 * Arguments : NONE 5316 * Return Values : NONE 5317 */ 5318 void 5319 ibdm_ibnex_update_pkey_tbls(void) 5320 { 5321 int h, pp, pidx; 5322 uint_t nports; 5323 uint_t size; 5324 ib_pkey_t new_pkey; 5325 ib_pkey_t *orig_pkey; 5326 ibdm_hca_list_t *hca_list; 5327 ibdm_port_attr_t *port; 5328 ibt_hca_portinfo_t *pinfop; 5329 5330 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:"); 5331 5332 mutex_enter(&ibdm.ibdm_hl_mutex); 5333 hca_list = ibdm.ibdm_hca_list_head; 5334 5335 for (h = 0; h < ibdm.ibdm_hca_count; h++) { 5336 5337 /* This updates P_Key Tables for all ports of this HCA */ 5338 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop, 5339 &nports, &size); 5340 5341 /* number of ports shouldn't have changed */ 5342 ASSERT(nports == hca_list->hl_nports); 5343 5344 for (pp = 0; pp < hca_list->hl_nports; pp++) { 5345 port = &hca_list->hl_port_attr[pp]; 5346 5347 /* 5348 * First figure out the P_Keys from IBTL. 5349 * Three things could have happened: 5350 * New P_Keys added 5351 * Existing P_Keys removed 5352 * Both of the above two 5353 * 5354 * Loop through the P_Key Indices and check if a 5355 * give P_Key_Ix matches that of the one seen by 5356 * IBDM. If they match no action is needed. 5357 * 5358 * If they don't match: 5359 * 1. if orig_pkey is invalid and new_pkey is valid 5360 * ---> add new_pkey to DM database 5361 * 2. if orig_pkey is valid and new_pkey is invalid 5362 * ---> remove orig_pkey from DM database 5363 * 3. if orig_pkey and new_pkey are both valid: 5364 * ---> remov orig_pkey from DM database 5365 * ---> add new_pkey to DM database 5366 * 4. if orig_pkey and new_pkey are both invalid: 5367 * ---> do nothing. Updated DM database. 5368 */ 5369 5370 for (pidx = 0; pidx < port->pa_npkeys; pidx++) { 5371 new_pkey = pinfop[pp].p_pkey_tbl[pidx]; 5372 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey; 5373 5374 /* keys match - do nothing */ 5375 if (*orig_pkey == new_pkey) 5376 continue; 5377 5378 if (IBDM_INVALID_PKEY(*orig_pkey) && 5379 !IBDM_INVALID_PKEY(new_pkey)) { 5380 /* P_Key was added */ 5381 IBTF_DPRINTF_L5("ibdm", 5382 "\tibnex_update_pkey_tbls: new " 5383 "P_Key added = 0x%x", new_pkey); 5384 *orig_pkey = new_pkey; 5385 ibdm_port_attr_ibmf_init(port, 5386 new_pkey, pp); 5387 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5388 IBDM_INVALID_PKEY(new_pkey)) { 5389 /* P_Key was removed */ 5390 IBTF_DPRINTF_L5("ibdm", 5391 "\tibnex_update_pkey_tbls: P_Key " 5392 "removed = 0x%x", *orig_pkey); 5393 *orig_pkey = new_pkey; 5394 (void) ibdm_port_attr_ibmf_fini(port, 5395 pidx); 5396 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5397 !IBDM_INVALID_PKEY(new_pkey)) { 5398 /* P_Key were replaced */ 5399 IBTF_DPRINTF_L5("ibdm", 5400 "\tibnex_update_pkey_tbls: P_Key " 5401 "replaced 0x%x with 0x%x", 5402 *orig_pkey, new_pkey); 5403 (void) ibdm_port_attr_ibmf_fini(port, 5404 pidx); 5405 *orig_pkey = new_pkey; 5406 ibdm_port_attr_ibmf_init(port, 5407 new_pkey, pp); 5408 } else { 5409 /* 5410 * P_Keys are invalid 5411 * set anyway to reflect if 5412 * INVALID_FULL was changed to 5413 * INVALID_LIMITED or vice-versa. 5414 */ 5415 *orig_pkey = new_pkey; 5416 } /* end of else */ 5417 5418 } /* loop of p_key index */ 5419 5420 } /* loop of #ports of HCA */ 5421 5422 ibt_free_portinfo(pinfop, size); 5423 hca_list = hca_list->hl_next; 5424 5425 } /* loop for all HCAs in the system */ 5426 5427 mutex_exit(&ibdm.ibdm_hl_mutex); 5428 } 5429 5430 5431 /* 5432 * ibdm_send_ioc_profile() 5433 * Send IOC Controller Profile request. When the request is completed 5434 * IBMF calls ibdm_process_incoming_mad routine to inform about 5435 * the completion. 5436 */ 5437 static int 5438 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no) 5439 { 5440 ibmf_msg_t *msg; 5441 ib_mad_hdr_t *hdr; 5442 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]); 5443 ibdm_timeout_cb_args_t *cb_args; 5444 5445 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: " 5446 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no); 5447 5448 /* 5449 * Send command to get IOC profile. 5450 * Allocate a IBMF packet and initialize the packet. 5451 */ 5452 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 5453 &msg) != IBMF_SUCCESS) { 5454 IBTF_DPRINTF_L2("ibdm", "\tsend_ioc_profile: pkt alloc fail"); 5455 return (IBDM_FAILURE); 5456 } 5457 5458 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 5459 ibdm_alloc_send_buffers(msg); 5460 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 5461 5462 mutex_enter(&gid_info->gl_mutex); 5463 ibdm_bump_transactionID(gid_info); 5464 mutex_exit(&gid_info->gl_mutex); 5465 5466 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 5467 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 5468 if (gid_info->gl_redirected == B_TRUE) { 5469 if (gid_info->gl_redirect_dlid != 0) { 5470 msg->im_local_addr.ia_remote_lid = 5471 gid_info->gl_redirect_dlid; 5472 } 5473 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 5474 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 5475 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 5476 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 5477 } else { 5478 msg->im_local_addr.ia_remote_qno = 1; 5479 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 5480 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 5481 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 5482 } 5483 5484 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 5485 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 5486 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 5487 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 5488 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 5489 hdr->Status = 0; 5490 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 5491 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 5492 hdr->AttributeModifier = h2b32(ioc_no + 1); 5493 5494 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5495 cb_args = &ioc_info->ioc_cb_args; 5496 cb_args->cb_gid_info = gid_info; 5497 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 5498 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 5499 cb_args->cb_ioc_num = ioc_no; 5500 5501 mutex_enter(&gid_info->gl_mutex); 5502 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 5503 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 5504 mutex_exit(&gid_info->gl_mutex); 5505 5506 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:" 5507 "timeout %x", ioc_info->ioc_timeout_id); 5508 5509 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 5510 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 5511 IBTF_DPRINTF_L2("ibdm", 5512 "\tsend_ioc_profile: msg transport failed"); 5513 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 5514 } 5515 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5516 return (IBDM_SUCCESS); 5517 } 5518 5519 5520 /* 5521 * ibdm_port_reachable 5522 * Returns B_TRUE if the port GID is reachable by sending 5523 * a SA query to get the NODE record for this port GUID. 5524 */ 5525 static boolean_t 5526 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid) 5527 { 5528 sa_node_record_t *resp; 5529 size_t length; 5530 5531 /* 5532 * Verify if it's reachable by getting the node record. 5533 */ 5534 if (ibdm_get_node_record_by_port(sa_hdl, guid, &resp, &length) == 5535 IBDM_SUCCESS) { 5536 kmem_free(resp, length); 5537 return (B_TRUE); 5538 } 5539 return (B_FALSE); 5540 } 5541 5542 /* 5543 * ibdm_get_node_record_by_port 5544 * Sends a SA query to get the NODE record for port GUID 5545 * Returns IBDM_SUCCESS if the port GID is reachable. 5546 * 5547 * Note: the caller must be responsible for freeing the resource 5548 * by calling kmem_free(resp, length) later. 5549 */ 5550 static int 5551 ibdm_get_node_record_by_port(ibmf_saa_handle_t sa_hdl, ib_guid_t guid, 5552 sa_node_record_t **resp, size_t *length) 5553 { 5554 sa_node_record_t req; 5555 ibmf_saa_access_args_t args; 5556 int ret; 5557 ASSERT(resp != NULL && length != NULL); 5558 5559 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx", 5560 guid); 5561 5562 bzero(&req, sizeof (sa_node_record_t)); 5563 req.NodeInfo.PortGUID = guid; 5564 5565 args.sq_attr_id = SA_NODERECORD_ATTRID; 5566 args.sq_access_type = IBMF_SAA_RETRIEVE; 5567 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID; 5568 args.sq_template = &req; 5569 args.sq_callback = NULL; 5570 args.sq_callback_arg = NULL; 5571 5572 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) resp); 5573 if (ret != IBMF_SUCCESS) { 5574 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:" 5575 " SA Retrieve Failed: %d", ret); 5576 return (IBDM_FAILURE); 5577 } 5578 if (*resp == NULL || *length == 0) { 5579 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records"); 5580 return (IBDM_FAILURE); 5581 } 5582 /* 5583 * There is one NodeRecord on each endport on a subnet. 5584 */ 5585 ASSERT(*length == sizeof (sa_node_record_t)); 5586 5587 return (IBDM_SUCCESS); 5588 } 5589 5590 5591 /* 5592 * Update the gidlist for all affected IOCs when GID becomes 5593 * available/unavailable. 5594 * 5595 * Parameters : 5596 * gidinfo - Incoming / Outgoing GID. 5597 * add_flag - 1 for GID added, 0 for GID removed. 5598 * - (-1) : IOC gid list updated, ioc_list required. 5599 * 5600 * This function gets the GID for the node GUID corresponding to the 5601 * port GID. Gets the IOU info 5602 */ 5603 static ibdm_ioc_info_t * 5604 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag) 5605 { 5606 ibdm_dp_gidinfo_t *node_gid = NULL; 5607 uint8_t niocs, ii; 5608 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp; 5609 5610 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist"); 5611 5612 switch (avail_flag) { 5613 case 1 : 5614 node_gid = ibdm_check_dest_nodeguid(gid_info); 5615 break; 5616 case 0 : 5617 node_gid = ibdm_handle_gid_rm(gid_info); 5618 break; 5619 case -1 : 5620 node_gid = gid_info; 5621 break; 5622 default : 5623 break; 5624 } 5625 5626 if (node_gid == NULL) { 5627 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: " 5628 "No node GID found, port gid 0x%p, avail_flag %d", 5629 gid_info, avail_flag); 5630 return (NULL); 5631 } 5632 5633 mutex_enter(&node_gid->gl_mutex); 5634 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE && 5635 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) || 5636 node_gid->gl_iou == NULL) { 5637 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist " 5638 "gl_state %x, gl_iou %p", node_gid->gl_state, 5639 node_gid->gl_iou); 5640 mutex_exit(&node_gid->gl_mutex); 5641 return (NULL); 5642 } 5643 5644 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots; 5645 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x", 5646 niocs); 5647 for (ii = 0; ii < niocs; ii++) { 5648 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii); 5649 /* 5650 * Skip IOCs for which probe is not complete or 5651 * reprobe is progress 5652 */ 5653 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5654 tmp = ibdm_dup_ioc_info(ioc, node_gid); 5655 tmp->ioc_info_updated.ib_gid_prop_updated = 1; 5656 tmp->ioc_next = ioc_list; 5657 ioc_list = tmp; 5658 } 5659 } 5660 mutex_exit(&node_gid->gl_mutex); 5661 5662 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p", 5663 ioc_list); 5664 return (ioc_list); 5665 } 5666 5667 /* 5668 * ibdm_saa_event_cb : 5669 * Event handling which does *not* require ibdm_hl_mutex to be 5670 * held are executed in the same thread. This is to prevent 5671 * deadlocks with HCA port down notifications which hold the 5672 * ibdm_hl_mutex. 5673 * 5674 * GID_AVAILABLE event is handled here. A taskq is spawned to 5675 * handle GID_UNAVAILABLE. 5676 * 5677 * A new mutex ibdm_ibnex_mutex has been introduced to protect 5678 * ibnex_callback. This has been done to prevent any possible 5679 * deadlock (described above) while handling GID_AVAILABLE. 5680 * 5681 * IBMF calls the event callback for a HCA port. The SA handle 5682 * for this port would be valid, till the callback returns. 5683 * IBDM calling IBDM using the above SA handle should be valid. 5684 * 5685 * IBDM will additionally check (SA handle != NULL), before 5686 * calling IBMF. 5687 */ 5688 /*ARGSUSED*/ 5689 static void 5690 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle, 5691 ibmf_saa_subnet_event_t ibmf_saa_event, 5692 ibmf_saa_event_details_t *event_details, void *callback_arg) 5693 { 5694 ibdm_saa_event_arg_t *event_arg; 5695 ib_gid_t sgid, dgid; 5696 ibdm_port_attr_t *hca_port; 5697 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL; 5698 sa_node_record_t *nrec; 5699 size_t length; 5700 5701 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5702 5703 hca_port = (ibdm_port_attr_t *)callback_arg; 5704 5705 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n", 5706 ibmf_saa_handle, ibmf_saa_event, event_details, 5707 callback_arg); 5708 #ifdef DEBUG 5709 if (ibdm_ignore_saa_event) 5710 return; 5711 #endif 5712 5713 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) { 5714 /* 5715 * Ensure no other probe / sweep fabric is in 5716 * progress. 5717 */ 5718 mutex_enter(&ibdm.ibdm_mutex); 5719 while (ibdm.ibdm_busy & IBDM_BUSY) 5720 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5721 ibdm.ibdm_busy |= IBDM_BUSY; 5722 mutex_exit(&ibdm.ibdm_mutex); 5723 5724 /* 5725 * If we already know about this GID, return. 5726 * GID_AVAILABLE may be reported for multiple HCA 5727 * ports. 5728 */ 5729 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid, 5730 event_details->ie_gid.gid_prefix)) != NULL) { 5731 mutex_enter(&ibdm.ibdm_mutex); 5732 ibdm.ibdm_busy &= ~IBDM_BUSY; 5733 cv_broadcast(&ibdm.ibdm_busy_cv); 5734 mutex_exit(&ibdm.ibdm_mutex); 5735 return; 5736 } 5737 5738 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5739 "Insertion notified", 5740 event_details->ie_gid.gid_prefix, 5741 event_details->ie_gid.gid_guid); 5742 5743 /* This is a new gid, insert it to GID list */ 5744 sgid.gid_prefix = hca_port->pa_sn_prefix; 5745 sgid.gid_guid = hca_port->pa_port_guid; 5746 dgid.gid_prefix = event_details->ie_gid.gid_prefix; 5747 dgid.gid_guid = event_details->ie_gid.gid_guid; 5748 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid); 5749 if (gid_info == NULL) { 5750 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: " 5751 "create_gid_info returned NULL"); 5752 mutex_enter(&ibdm.ibdm_mutex); 5753 ibdm.ibdm_busy &= ~IBDM_BUSY; 5754 cv_broadcast(&ibdm.ibdm_busy_cv); 5755 mutex_exit(&ibdm.ibdm_mutex); 5756 return; 5757 } 5758 mutex_enter(&gid_info->gl_mutex); 5759 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 5760 mutex_exit(&gid_info->gl_mutex); 5761 5762 /* Get the node GUID */ 5763 if (ibdm_get_node_record_by_port(ibmf_saa_handle, dgid.gid_guid, 5764 &nrec, &length) != IBDM_SUCCESS) { 5765 /* 5766 * Set the state to PROBE_NOT_DONE for the 5767 * next sweep to probe it 5768 */ 5769 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: " 5770 "Skipping GID : port GUID not found"); 5771 mutex_enter(&gid_info->gl_mutex); 5772 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5773 mutex_exit(&gid_info->gl_mutex); 5774 mutex_enter(&ibdm.ibdm_mutex); 5775 ibdm.ibdm_busy &= ~IBDM_BUSY; 5776 cv_broadcast(&ibdm.ibdm_busy_cv); 5777 mutex_exit(&ibdm.ibdm_mutex); 5778 return; 5779 } 5780 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 5781 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 5782 kmem_free(nrec, length); 5783 gid_info->gl_portguid = dgid.gid_guid; 5784 5785 /* 5786 * Get the gid info with the same node GUID. 5787 */ 5788 mutex_enter(&ibdm.ibdm_mutex); 5789 node_gid_info = ibdm.ibdm_dp_gidlist_head; 5790 while (node_gid_info) { 5791 if (node_gid_info->gl_nodeguid == 5792 gid_info->gl_nodeguid && 5793 node_gid_info->gl_iou != NULL) { 5794 break; 5795 } 5796 node_gid_info = node_gid_info->gl_next; 5797 } 5798 mutex_exit(&ibdm.ibdm_mutex); 5799 5800 /* 5801 * Handling a new GID requires filling of gl_hca_list. 5802 * This require ibdm hca_list to be parsed and hence 5803 * holding the ibdm_hl_mutex. Spawning a new thread to 5804 * handle this. 5805 */ 5806 if (node_gid_info == NULL) { 5807 if (taskq_dispatch(system_taskq, 5808 ibdm_saa_handle_new_gid, (void *)gid_info, 5809 TQ_NOSLEEP) == NULL) { 5810 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5811 "new_gid taskq_dispatch failed"); 5812 return; 5813 } 5814 } 5815 5816 mutex_enter(&ibdm.ibdm_mutex); 5817 ibdm.ibdm_busy &= ~IBDM_BUSY; 5818 cv_broadcast(&ibdm.ibdm_busy_cv); 5819 mutex_exit(&ibdm.ibdm_mutex); 5820 return; 5821 } 5822 5823 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE) 5824 return; 5825 5826 /* 5827 * GID UNAVAIL EVENT: Try to locate the GID in the GID list. 5828 * If we don't find it we just return. 5829 */ 5830 mutex_enter(&ibdm.ibdm_mutex); 5831 gid_info = ibdm.ibdm_dp_gidlist_head; 5832 while (gid_info) { 5833 if (gid_info->gl_portguid == 5834 event_details->ie_gid.gid_guid) { 5835 break; 5836 } 5837 gid_info = gid_info->gl_next; 5838 } 5839 mutex_exit(&ibdm.ibdm_mutex); 5840 if (gid_info == NULL) { 5841 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5842 "GID for GUID %llX not found during GID UNAVAIL event", 5843 event_details->ie_gid.gid_guid); 5844 return; 5845 } 5846 5847 /* 5848 * If this GID is DM capable, we'll have to check whether this DGID 5849 * is reachable via another port. 5850 */ 5851 if (gid_info->gl_is_dm_capable == B_TRUE) { 5852 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc( 5853 sizeof (ibdm_saa_event_arg_t), KM_SLEEP); 5854 event_arg->ibmf_saa_handle = ibmf_saa_handle; 5855 event_arg->ibmf_saa_event = ibmf_saa_event; 5856 bcopy(event_details, &event_arg->event_details, 5857 sizeof (ibmf_saa_event_details_t)); 5858 event_arg->callback_arg = callback_arg; 5859 5860 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq, 5861 (void *)event_arg, TQ_NOSLEEP) == NULL) { 5862 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5863 "taskq_dispatch failed"); 5864 ibdm_free_saa_event_arg(event_arg); 5865 return; 5866 } 5867 } 5868 } 5869 5870 /* 5871 * Handle a new GID discovered by GID_AVAILABLE saa event. 5872 */ 5873 void 5874 ibdm_saa_handle_new_gid(void *arg) 5875 { 5876 ibdm_dp_gidinfo_t *gid_info; 5877 ibdm_hca_list_t *hca_list = NULL; 5878 ibdm_port_attr_t *port = NULL; 5879 ibdm_ioc_info_t *ioc_list = NULL; 5880 5881 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg); 5882 5883 gid_info = (ibdm_dp_gidinfo_t *)arg; 5884 5885 /* 5886 * Ensure that no other sweep / probe has completed 5887 * probing this gid. 5888 */ 5889 mutex_enter(&gid_info->gl_mutex); 5890 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 5891 mutex_exit(&gid_info->gl_mutex); 5892 return; 5893 } 5894 mutex_exit(&gid_info->gl_mutex); 5895 5896 /* 5897 * Parse HCAs to fill gl_hca_list 5898 */ 5899 mutex_enter(&ibdm.ibdm_hl_mutex); 5900 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5901 ibdm_get_next_port(&hca_list, &port, 1)) { 5902 if (ibdm_port_reachable(port->pa_sa_hdl, 5903 gid_info->gl_portguid) == B_TRUE) { 5904 ibdm_addto_glhcalist(gid_info, hca_list); 5905 } 5906 } 5907 mutex_exit(&ibdm.ibdm_hl_mutex); 5908 5909 /* 5910 * Ensure no other probe / sweep fabric is in 5911 * progress. 5912 */ 5913 mutex_enter(&ibdm.ibdm_mutex); 5914 while (ibdm.ibdm_busy & IBDM_BUSY) 5915 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5916 ibdm.ibdm_busy |= IBDM_BUSY; 5917 mutex_exit(&ibdm.ibdm_mutex); 5918 5919 /* 5920 * New IOU probe it, to check if new IOCs 5921 */ 5922 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: " 5923 "new GID : probing"); 5924 mutex_enter(&ibdm.ibdm_mutex); 5925 ibdm.ibdm_ngid_probes_in_progress++; 5926 mutex_exit(&ibdm.ibdm_mutex); 5927 mutex_enter(&gid_info->gl_mutex); 5928 gid_info->gl_reprobe_flag = 0; 5929 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5930 mutex_exit(&gid_info->gl_mutex); 5931 ibdm_probe_gid_thread((void *)gid_info); 5932 5933 mutex_enter(&ibdm.ibdm_mutex); 5934 ibdm_wait_probe_completion(); 5935 mutex_exit(&ibdm.ibdm_mutex); 5936 5937 if (gid_info->gl_iou == NULL) { 5938 mutex_enter(&ibdm.ibdm_mutex); 5939 ibdm.ibdm_busy &= ~IBDM_BUSY; 5940 cv_broadcast(&ibdm.ibdm_busy_cv); 5941 mutex_exit(&ibdm.ibdm_mutex); 5942 return; 5943 } 5944 5945 /* 5946 * Update GID list in all IOCs affected by this 5947 */ 5948 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1); 5949 5950 /* 5951 * Pass on the IOCs with updated GIDs to IBnexus 5952 */ 5953 if (ioc_list) { 5954 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5955 if (ibdm.ibdm_ibnex_callback != NULL) { 5956 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5957 IBDM_EVENT_IOC_PROP_UPDATE); 5958 } 5959 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5960 } 5961 5962 mutex_enter(&ibdm.ibdm_mutex); 5963 ibdm.ibdm_busy &= ~IBDM_BUSY; 5964 cv_broadcast(&ibdm.ibdm_busy_cv); 5965 mutex_exit(&ibdm.ibdm_mutex); 5966 } 5967 5968 /* 5969 * ibdm_saa_event_taskq : 5970 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be 5971 * held. The GID_UNAVAILABLE handling is done in a taskq to 5972 * prevent deadlocks with HCA port down notifications which hold 5973 * ibdm_hl_mutex. 5974 */ 5975 void 5976 ibdm_saa_event_taskq(void *arg) 5977 { 5978 ibdm_saa_event_arg_t *event_arg; 5979 ibmf_saa_handle_t ibmf_saa_handle; 5980 ibmf_saa_subnet_event_t ibmf_saa_event; 5981 ibmf_saa_event_details_t *event_details; 5982 void *callback_arg; 5983 5984 ibdm_dp_gidinfo_t *gid_info; 5985 ibdm_port_attr_t *hca_port, *port = NULL; 5986 ibdm_hca_list_t *hca_list = NULL; 5987 int sa_handle_valid = 0; 5988 ibdm_ioc_info_t *ioc_list = NULL; 5989 5990 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5991 5992 event_arg = (ibdm_saa_event_arg_t *)arg; 5993 ibmf_saa_handle = event_arg->ibmf_saa_handle; 5994 ibmf_saa_event = event_arg->ibmf_saa_event; 5995 event_details = &event_arg->event_details; 5996 callback_arg = event_arg->callback_arg; 5997 5998 ASSERT(callback_arg != NULL); 5999 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE); 6000 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)", 6001 ibmf_saa_handle, ibmf_saa_event, event_details, 6002 callback_arg); 6003 6004 hca_port = (ibdm_port_attr_t *)callback_arg; 6005 6006 /* Check if the port_attr is still valid */ 6007 mutex_enter(&ibdm.ibdm_hl_mutex); 6008 for (ibdm_get_next_port(&hca_list, &port, 0); port; 6009 ibdm_get_next_port(&hca_list, &port, 0)) { 6010 if (port == hca_port && port->pa_port_guid == 6011 hca_port->pa_port_guid) { 6012 if (ibmf_saa_handle == hca_port->pa_sa_hdl) 6013 sa_handle_valid = 1; 6014 break; 6015 } 6016 } 6017 mutex_exit(&ibdm.ibdm_hl_mutex); 6018 if (sa_handle_valid == 0) { 6019 ibdm_free_saa_event_arg(event_arg); 6020 return; 6021 } 6022 6023 if (hca_port && (hca_port->pa_sa_hdl == NULL || 6024 ibmf_saa_handle != hca_port->pa_sa_hdl)) { 6025 ibdm_free_saa_event_arg(event_arg); 6026 return; 6027 } 6028 hca_list = NULL; 6029 port = NULL; 6030 6031 /* 6032 * Check if the GID is visible to other HCA ports. 6033 * Return if so. 6034 */ 6035 mutex_enter(&ibdm.ibdm_hl_mutex); 6036 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6037 ibdm_get_next_port(&hca_list, &port, 1)) { 6038 if (ibdm_port_reachable(port->pa_sa_hdl, 6039 event_details->ie_gid.gid_guid) == B_TRUE) { 6040 mutex_exit(&ibdm.ibdm_hl_mutex); 6041 ibdm_free_saa_event_arg(event_arg); 6042 return; 6043 } 6044 } 6045 mutex_exit(&ibdm.ibdm_hl_mutex); 6046 6047 /* 6048 * Ensure no other probe / sweep fabric is in 6049 * progress. 6050 */ 6051 mutex_enter(&ibdm.ibdm_mutex); 6052 while (ibdm.ibdm_busy & IBDM_BUSY) 6053 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 6054 ibdm.ibdm_busy |= IBDM_BUSY; 6055 mutex_exit(&ibdm.ibdm_mutex); 6056 6057 /* 6058 * If this GID is no longer in GID list, return 6059 * GID_UNAVAILABLE may be reported for multiple HCA 6060 * ports. 6061 */ 6062 mutex_enter(&ibdm.ibdm_mutex); 6063 gid_info = ibdm.ibdm_dp_gidlist_head; 6064 while (gid_info) { 6065 if (gid_info->gl_portguid == 6066 event_details->ie_gid.gid_guid) { 6067 break; 6068 } 6069 gid_info = gid_info->gl_next; 6070 } 6071 mutex_exit(&ibdm.ibdm_mutex); 6072 if (gid_info == NULL) { 6073 mutex_enter(&ibdm.ibdm_mutex); 6074 ibdm.ibdm_busy &= ~IBDM_BUSY; 6075 cv_broadcast(&ibdm.ibdm_busy_cv); 6076 mutex_exit(&ibdm.ibdm_mutex); 6077 ibdm_free_saa_event_arg(event_arg); 6078 return; 6079 } 6080 6081 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 6082 "Unavailable notification", 6083 event_details->ie_gid.gid_prefix, 6084 event_details->ie_gid.gid_guid); 6085 6086 /* 6087 * Update GID list in all IOCs affected by this 6088 */ 6089 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED || 6090 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) 6091 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6092 6093 /* 6094 * Remove GID from the global GID list 6095 * Handle the case where all port GIDs for an 6096 * IOU have been hot-removed. Check both gid_info 6097 * & ioc_info for checking ngids. 6098 */ 6099 mutex_enter(&ibdm.ibdm_mutex); 6100 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6101 mutex_enter(&gid_info->gl_mutex); 6102 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6103 mutex_exit(&gid_info->gl_mutex); 6104 } 6105 if (gid_info->gl_prev != NULL) 6106 gid_info->gl_prev->gl_next = gid_info->gl_next; 6107 if (gid_info->gl_next != NULL) 6108 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6109 6110 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6111 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6112 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6113 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6114 ibdm.ibdm_ngids--; 6115 6116 ibdm.ibdm_busy &= ~IBDM_BUSY; 6117 cv_broadcast(&ibdm.ibdm_busy_cv); 6118 mutex_exit(&ibdm.ibdm_mutex); 6119 6120 /* free the hca_list on this gid_info */ 6121 ibdm_delete_glhca_list(gid_info); 6122 6123 mutex_destroy(&gid_info->gl_mutex); 6124 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6125 6126 /* 6127 * Pass on the IOCs with updated GIDs to IBnexus 6128 */ 6129 if (ioc_list) { 6130 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE " 6131 "IOC_PROP_UPDATE for %p\n", ioc_list); 6132 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6133 if (ibdm.ibdm_ibnex_callback != NULL) { 6134 (*ibdm.ibdm_ibnex_callback)((void *) 6135 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6136 } 6137 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6138 } 6139 6140 ibdm_free_saa_event_arg(event_arg); 6141 } 6142 6143 6144 static int 6145 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev) 6146 { 6147 ibdm_gid_t *scan_new, *scan_prev; 6148 int cmp_failed = 0; 6149 6150 ASSERT(new != NULL); 6151 ASSERT(prev != NULL); 6152 6153 /* 6154 * Search for each new gid anywhere in the prev GID list. 6155 * Note that the gid list could have been re-ordered. 6156 */ 6157 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) { 6158 for (scan_prev = prev, cmp_failed = 1; scan_prev; 6159 scan_prev = scan_prev->gid_next) { 6160 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi && 6161 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) { 6162 cmp_failed = 0; 6163 break; 6164 } 6165 } 6166 6167 if (cmp_failed) 6168 return (1); 6169 } 6170 return (0); 6171 } 6172 6173 /* 6174 * This is always called in a single thread 6175 * This function updates the gid_list and serv_list of IOC 6176 * The current gid_list is in ioc_info_t(contains only port 6177 * guids for which probe is done) & gidinfo_t(other port gids) 6178 * The gids in both locations are used for comparision. 6179 */ 6180 static void 6181 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo) 6182 { 6183 ibdm_gid_t *cur_gid_list; 6184 uint_t cur_nportgids; 6185 6186 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6187 6188 ioc->ioc_info_updated.ib_prop_updated = 0; 6189 6190 6191 /* Current GID list in gid_info only */ 6192 cur_gid_list = gidinfo->gl_gid; 6193 cur_nportgids = gidinfo->gl_ngids; 6194 6195 if (ioc->ioc_prev_serv_cnt != 6196 ioc->ioc_profile.ioc_service_entries || 6197 ibdm_serv_cmp(&ioc->ioc_serv[0], &ioc->ioc_prev_serv[0], 6198 ioc->ioc_prev_serv_cnt)) 6199 ioc->ioc_info_updated.ib_srv_prop_updated = 1; 6200 6201 if (ioc->ioc_prev_nportgids != cur_nportgids || 6202 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) { 6203 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6204 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) { 6205 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6206 } 6207 6208 /* Zero out previous entries */ 6209 ibdm_free_gid_list(ioc->ioc_prev_gid_list); 6210 if (ioc->ioc_prev_serv) 6211 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt * 6212 sizeof (ibdm_srvents_info_t)); 6213 ioc->ioc_prev_serv_cnt = 0; 6214 ioc->ioc_prev_nportgids = 0; 6215 ioc->ioc_prev_serv = NULL; 6216 ioc->ioc_prev_gid_list = NULL; 6217 } 6218 6219 /* 6220 * Handle GID removal. This returns gid_info of an GID for the same 6221 * node GUID, if found. For an GID with IOU information, the same 6222 * gid_info is returned if no gid_info with same node_guid is found. 6223 */ 6224 static ibdm_dp_gidinfo_t * 6225 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid) 6226 { 6227 ibdm_dp_gidinfo_t *gid_list; 6228 6229 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid); 6230 6231 if (rm_gid->gl_iou == NULL) { 6232 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou"); 6233 /* 6234 * Search for a GID with same node_guid and 6235 * gl_iou != NULL 6236 */ 6237 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6238 gid_list = gid_list->gl_next) { 6239 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid 6240 == rm_gid->gl_nodeguid)) 6241 break; 6242 } 6243 6244 if (gid_list) 6245 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6246 6247 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6248 return (gid_list); 6249 } else { 6250 /* 6251 * Search for a GID with same node_guid and 6252 * gl_iou == NULL 6253 */ 6254 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou"); 6255 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6256 gid_list = gid_list->gl_next) { 6257 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid 6258 == rm_gid->gl_nodeguid)) 6259 break; 6260 } 6261 6262 if (gid_list) { 6263 /* 6264 * Copy the following fields from rm_gid : 6265 * 1. gl_state 6266 * 2. gl_iou 6267 * 3. gl_gid & gl_ngids 6268 * 6269 * Note : Function is synchronized by 6270 * ibdm_busy flag. 6271 * 6272 * Note : Redirect info is initialized if 6273 * any MADs for the GID fail 6274 */ 6275 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm " 6276 "copying info to GID with gl_iou != NULl"); 6277 gid_list->gl_state = rm_gid->gl_state; 6278 gid_list->gl_iou = rm_gid->gl_iou; 6279 gid_list->gl_gid = rm_gid->gl_gid; 6280 gid_list->gl_ngids = rm_gid->gl_ngids; 6281 6282 /* Remove the GID from gl_gid list */ 6283 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6284 } else { 6285 /* 6286 * Handle a case where all GIDs to the IOU have 6287 * been removed. 6288 */ 6289 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID " 6290 "to IOU"); 6291 6292 ibdm_rmfrom_glgid_list(rm_gid, rm_gid); 6293 return (rm_gid); 6294 } 6295 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6296 return (gid_list); 6297 } 6298 } 6299 6300 static void 6301 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info, 6302 ibdm_dp_gidinfo_t *rm_gid) 6303 { 6304 ibdm_gid_t *tmp, *prev; 6305 6306 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)", 6307 gid_info, rm_gid); 6308 6309 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) { 6310 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi && 6311 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) { 6312 if (prev == NULL) 6313 gid_info->gl_gid = tmp->gid_next; 6314 else 6315 prev->gid_next = tmp->gid_next; 6316 6317 kmem_free(tmp, sizeof (ibdm_gid_t)); 6318 gid_info->gl_ngids--; 6319 break; 6320 } else { 6321 prev = tmp; 6322 tmp = tmp->gid_next; 6323 } 6324 } 6325 } 6326 6327 static void 6328 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest) 6329 { 6330 ibdm_gid_t *head = NULL, *new, *tail; 6331 6332 /* First copy the destination */ 6333 for (; dest; dest = dest->gid_next) { 6334 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6335 new->gid_dgid_hi = dest->gid_dgid_hi; 6336 new->gid_dgid_lo = dest->gid_dgid_lo; 6337 new->gid_next = head; 6338 head = new; 6339 } 6340 6341 /* Insert this to the source */ 6342 if (*src_ptr == NULL) 6343 *src_ptr = head; 6344 else { 6345 for (tail = *src_ptr; tail->gid_next != NULL; 6346 tail = tail->gid_next) 6347 ; 6348 6349 tail->gid_next = head; 6350 } 6351 } 6352 6353 static void 6354 ibdm_free_gid_list(ibdm_gid_t *head) 6355 { 6356 ibdm_gid_t *delete; 6357 6358 for (delete = head; delete; ) { 6359 head = delete->gid_next; 6360 kmem_free(delete, sizeof (ibdm_gid_t)); 6361 delete = head; 6362 } 6363 } 6364 6365 /* 6366 * This function rescans the DM capable GIDs (gl_state is 6367 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This 6368 * basically checks if the DM capable GID is reachable. If 6369 * not this is handled the same way as GID_UNAVAILABLE, 6370 * except that notifications are not send to IBnexus. 6371 * 6372 * This function also initializes the ioc_prev_list for 6373 * a particular IOC (when called from probe_ioc, with 6374 * ioc_guidp != NULL) or all IOCs for the gid (called from 6375 * sweep_fabric, ioc_guidp == NULL). 6376 */ 6377 static void 6378 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp) 6379 { 6380 ibdm_dp_gidinfo_t *gid_info, *tmp; 6381 int ii, niocs, found; 6382 ibdm_hca_list_t *hca_list = NULL; 6383 ibdm_port_attr_t *port = NULL; 6384 ibdm_ioc_info_t *ioc_list; 6385 6386 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6387 found = 0; 6388 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED && 6389 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) { 6390 gid_info = gid_info->gl_next; 6391 continue; 6392 } 6393 6394 /* 6395 * Check if the GID is visible to any HCA ports. 6396 * Return if so. 6397 */ 6398 mutex_enter(&ibdm.ibdm_hl_mutex); 6399 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6400 ibdm_get_next_port(&hca_list, &port, 1)) { 6401 if (ibdm_port_reachable(port->pa_sa_hdl, 6402 gid_info->gl_dgid_lo) == B_TRUE) { 6403 found = 1; 6404 break; 6405 } 6406 } 6407 mutex_exit(&ibdm.ibdm_hl_mutex); 6408 6409 if (found) { 6410 if (gid_info->gl_iou == NULL) { 6411 gid_info = gid_info->gl_next; 6412 continue; 6413 } 6414 6415 /* Intialize the ioc_prev_gid_list */ 6416 niocs = 6417 gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6418 for (ii = 0; ii < niocs; ii++) { 6419 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6420 6421 if (ioc_guidp == NULL || (*ioc_guidp == 6422 ioc_list->ioc_profile.ioc_guid)) { 6423 /* Add info of GIDs in gid_info also */ 6424 ibdm_addto_gidlist( 6425 &ioc_list->ioc_prev_gid_list, 6426 gid_info->gl_gid); 6427 ioc_list->ioc_prev_nportgids = 6428 gid_info->gl_ngids; 6429 } 6430 } 6431 gid_info = gid_info->gl_next; 6432 continue; 6433 } 6434 6435 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6436 "deleted port GUID %llx", 6437 gid_info->gl_dgid_lo); 6438 6439 /* 6440 * Update GID list in all IOCs affected by this 6441 */ 6442 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6443 6444 /* 6445 * Remove GID from the global GID list 6446 * Handle the case where all port GIDs for an 6447 * IOU have been hot-removed. 6448 */ 6449 mutex_enter(&ibdm.ibdm_mutex); 6450 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6451 mutex_enter(&gid_info->gl_mutex); 6452 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6453 mutex_exit(&gid_info->gl_mutex); 6454 } 6455 6456 tmp = gid_info->gl_next; 6457 if (gid_info->gl_prev != NULL) 6458 gid_info->gl_prev->gl_next = gid_info->gl_next; 6459 if (gid_info->gl_next != NULL) 6460 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6461 6462 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6463 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6464 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6465 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6466 ibdm.ibdm_ngids--; 6467 mutex_exit(&ibdm.ibdm_mutex); 6468 6469 /* free the hca_list on this gid_info */ 6470 ibdm_delete_glhca_list(gid_info); 6471 6472 mutex_destroy(&gid_info->gl_mutex); 6473 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6474 6475 gid_info = tmp; 6476 6477 /* 6478 * Pass on the IOCs with updated GIDs to IBnexus 6479 */ 6480 if (ioc_list) { 6481 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6482 "IOC_PROP_UPDATE for %p\n", ioc_list); 6483 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6484 if (ibdm.ibdm_ibnex_callback != NULL) { 6485 (*ibdm.ibdm_ibnex_callback)((void *) 6486 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6487 } 6488 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6489 } 6490 } 6491 } 6492 6493 /* 6494 * This function notifies IBnex of IOCs on this GID. 6495 * Notification is for GIDs with gl_reprobe_flag set. 6496 * The flag is set when IOC probe / fabric sweep 6497 * probes a GID starting from CLASS port info. 6498 * 6499 * IBnexus will have information of a reconnected IOC 6500 * if it had probed it before. If this is a new IOC, 6501 * IBnexus ignores the notification. 6502 * 6503 * This function should be called with no locks held. 6504 */ 6505 static void 6506 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info) 6507 { 6508 ibdm_ioc_info_t *ioc_list; 6509 6510 if (gid_info->gl_reprobe_flag == 0 || 6511 gid_info->gl_iou == NULL) 6512 return; 6513 6514 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1); 6515 6516 /* 6517 * Pass on the IOCs with updated GIDs to IBnexus 6518 */ 6519 if (ioc_list) { 6520 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6521 if (ibdm.ibdm_ibnex_callback != NULL) { 6522 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 6523 IBDM_EVENT_IOC_PROP_UPDATE); 6524 } 6525 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6526 } 6527 } 6528 6529 6530 static void 6531 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg) 6532 { 6533 if (arg != NULL) 6534 kmem_free(arg, sizeof (ibdm_saa_event_arg_t)); 6535 } 6536 6537 /* 6538 * This function parses the list of HCAs and HCA ports 6539 * to return the port_attr of the next HCA port. A port 6540 * connected to IB fabric (port_state active) is returned, 6541 * if connected_flag is set. 6542 */ 6543 static void 6544 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap, 6545 ibdm_port_attr_t **inp_portp, int connect_flag) 6546 { 6547 int ii; 6548 ibdm_port_attr_t *port, *next_port = NULL; 6549 ibdm_port_attr_t *inp_port; 6550 ibdm_hca_list_t *hca_list; 6551 int found = 0; 6552 6553 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6554 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)", 6555 inp_hcap, inp_portp, connect_flag); 6556 6557 hca_list = *inp_hcap; 6558 inp_port = *inp_portp; 6559 6560 if (hca_list == NULL) 6561 hca_list = ibdm.ibdm_hca_list_head; 6562 6563 for (; hca_list; hca_list = hca_list->hl_next) { 6564 for (ii = 0; ii < hca_list->hl_nports; ii++) { 6565 port = &hca_list->hl_port_attr[ii]; 6566 6567 /* 6568 * inp_port != NULL; 6569 * Skip till we find the matching port 6570 */ 6571 if (inp_port && !found) { 6572 if (inp_port == port) 6573 found = 1; 6574 continue; 6575 } 6576 6577 if (!connect_flag) { 6578 next_port = port; 6579 break; 6580 } 6581 6582 if (port->pa_sa_hdl == NULL) 6583 ibdm_initialize_port(port); 6584 if (port->pa_sa_hdl == NULL) 6585 (void) ibdm_fini_port(port); 6586 else if (next_port == NULL && 6587 port->pa_sa_hdl != NULL && 6588 port->pa_state == IBT_PORT_ACTIVE) { 6589 next_port = port; 6590 break; 6591 } 6592 } 6593 6594 if (next_port) 6595 break; 6596 } 6597 6598 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : " 6599 "returns hca_list %p port %p", hca_list, next_port); 6600 *inp_hcap = hca_list; 6601 *inp_portp = next_port; 6602 } 6603 6604 static void 6605 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid) 6606 { 6607 ibdm_gid_t *tmp; 6608 6609 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6610 tmp->gid_dgid_hi = addgid->gl_dgid_hi; 6611 tmp->gid_dgid_lo = addgid->gl_dgid_lo; 6612 6613 mutex_enter(&nodegid->gl_mutex); 6614 tmp->gid_next = nodegid->gl_gid; 6615 nodegid->gl_gid = tmp; 6616 nodegid->gl_ngids++; 6617 mutex_exit(&nodegid->gl_mutex); 6618 } 6619 6620 static void 6621 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info, 6622 ibdm_hca_list_t *hca) 6623 { 6624 ibdm_hca_list_t *head, *prev = NULL, *temp; 6625 6626 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) " 6627 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list); 6628 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6629 6630 mutex_enter(&gid_info->gl_mutex); 6631 head = gid_info->gl_hca_list; 6632 if (head == NULL) { 6633 head = ibdm_dup_hca_attr(hca); 6634 head->hl_next = NULL; 6635 gid_info->gl_hca_list = head; 6636 mutex_exit(&gid_info->gl_mutex); 6637 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6638 "gid %p, gl_hca_list %p", gid_info, 6639 gid_info->gl_hca_list); 6640 return; 6641 } 6642 6643 /* Check if already in the list */ 6644 while (head) { 6645 if (head->hl_hca_guid == hca->hl_hca_guid) { 6646 mutex_exit(&gid_info->gl_mutex); 6647 IBTF_DPRINTF_L4(ibdm_string, 6648 "\taddto_glhcalist : gid %p hca %p dup", 6649 gid_info, hca); 6650 return; 6651 } 6652 prev = head; 6653 head = head->hl_next; 6654 } 6655 6656 /* Add this HCA to gl_hca_list */ 6657 temp = ibdm_dup_hca_attr(hca); 6658 temp->hl_next = NULL; 6659 prev->hl_next = temp; 6660 mutex_exit(&gid_info->gl_mutex); 6661 6662 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6663 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list); 6664 } 6665 6666 static void 6667 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info) 6668 { 6669 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6670 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6671 6672 mutex_enter(&gid_info->gl_mutex); 6673 if (gid_info->gl_hca_list) 6674 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list); 6675 gid_info->gl_hca_list = NULL; 6676 mutex_exit(&gid_info->gl_mutex); 6677 } 6678 6679 6680 static void 6681 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl) 6682 { 6683 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)", 6684 port_sa_hdl); 6685 6686 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6687 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6688 6689 /* Check : Not busy in another probe / sweep */ 6690 mutex_enter(&ibdm.ibdm_mutex); 6691 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) { 6692 ibdm_dp_gidinfo_t *gid_info; 6693 6694 ibdm.ibdm_busy |= IBDM_BUSY; 6695 mutex_exit(&ibdm.ibdm_mutex); 6696 6697 /* 6698 * Check if any GID is using the SA & IBMF handle 6699 * of HCA port going down. Reset ibdm_dp_gidinfo_t 6700 * using another HCA port which can reach the GID. 6701 * This is for DM capable GIDs only, no need to do 6702 * this for others 6703 * 6704 * Delete the GID if no alternate HCA port to reach 6705 * it is found. 6706 */ 6707 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6708 ibdm_dp_gidinfo_t *tmp; 6709 6710 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr " 6711 "checking gidinfo %p", gid_info); 6712 6713 if (gid_info->gl_sa_hdl == port_sa_hdl) { 6714 IBTF_DPRINTF_L3(ibdm_string, 6715 "\tevent_hdlr: down HCA port hdl " 6716 "matches gid %p", gid_info); 6717 6718 /* 6719 * The non-DM GIDs can come back 6720 * with a new subnet prefix, when 6721 * the HCA port commes up again. To 6722 * avoid issues, delete non-DM 6723 * capable GIDs, if the gid was 6724 * discovered using the HCA port 6725 * going down. This is ensured by 6726 * setting gl_disconnected to 1. 6727 */ 6728 if (gid_info->gl_is_dm_capable == B_FALSE) 6729 gid_info->gl_disconnected = 1; 6730 else 6731 ibdm_reset_gidinfo(gid_info); 6732 6733 if (gid_info->gl_disconnected) { 6734 IBTF_DPRINTF_L3(ibdm_string, 6735 "\tevent_hdlr: deleting" 6736 " gid %p", gid_info); 6737 tmp = gid_info; 6738 gid_info = gid_info->gl_next; 6739 ibdm_delete_gidinfo(tmp); 6740 } else 6741 gid_info = gid_info->gl_next; 6742 } else 6743 gid_info = gid_info->gl_next; 6744 } 6745 6746 mutex_enter(&ibdm.ibdm_mutex); 6747 ibdm.ibdm_busy &= ~IBDM_BUSY; 6748 cv_signal(&ibdm.ibdm_busy_cv); 6749 } 6750 mutex_exit(&ibdm.ibdm_mutex); 6751 } 6752 6753 static void 6754 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6755 { 6756 ibdm_hca_list_t *hca_list = NULL; 6757 ibdm_port_attr_t *port = NULL; 6758 int gid_reinited = 0; 6759 sa_node_record_t *nr, *tmp; 6760 sa_portinfo_record_t *pi; 6761 size_t nr_len = 0, pi_len = 0; 6762 size_t path_len; 6763 ib_gid_t sgid, dgid; 6764 int ret, ii, nrecords; 6765 sa_path_record_t *path; 6766 uint8_t npaths = 1; 6767 ibdm_pkey_tbl_t *pkey_tbl; 6768 6769 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo); 6770 6771 /* 6772 * Get list of all the ports reachable from the local known HCA 6773 * ports which are active 6774 */ 6775 mutex_enter(&ibdm.ibdm_hl_mutex); 6776 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6777 ibdm_get_next_port(&hca_list, &port, 1)) { 6778 6779 6780 /* 6781 * Get the path and re-populate the gidinfo. 6782 * Getting the path is the same probe_ioc 6783 * Init the gid info as in ibdm_create_gidinfo() 6784 */ 6785 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, 6786 gidinfo->gl_nodeguid); 6787 if (nr == NULL) { 6788 IBTF_DPRINTF_L4(ibdm_string, 6789 "\treset_gidinfo : no records"); 6790 continue; 6791 } 6792 6793 nrecords = (nr_len / sizeof (sa_node_record_t)); 6794 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 6795 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid) 6796 break; 6797 } 6798 6799 if (ii == nrecords) { 6800 IBTF_DPRINTF_L4(ibdm_string, 6801 "\treset_gidinfo : no record for portguid"); 6802 kmem_free(nr, nr_len); 6803 continue; 6804 } 6805 6806 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID); 6807 if (pi == NULL) { 6808 IBTF_DPRINTF_L4(ibdm_string, 6809 "\treset_gidinfo : no portinfo"); 6810 kmem_free(nr, nr_len); 6811 continue; 6812 } 6813 6814 sgid.gid_prefix = port->pa_sn_prefix; 6815 sgid.gid_guid = port->pa_port_guid; 6816 dgid.gid_prefix = pi->PortInfo.GidPrefix; 6817 dgid.gid_guid = tmp->NodeInfo.PortGUID; 6818 6819 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid, 6820 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path); 6821 6822 if ((ret != IBMF_SUCCESS) || path == NULL) { 6823 IBTF_DPRINTF_L4(ibdm_string, 6824 "\treset_gidinfo : no paths"); 6825 kmem_free(pi, pi_len); 6826 kmem_free(nr, nr_len); 6827 continue; 6828 } 6829 6830 gidinfo->gl_dgid_hi = path->DGID.gid_prefix; 6831 gidinfo->gl_dgid_lo = path->DGID.gid_guid; 6832 gidinfo->gl_sgid_hi = path->SGID.gid_prefix; 6833 gidinfo->gl_sgid_lo = path->SGID.gid_guid; 6834 gidinfo->gl_p_key = path->P_Key; 6835 gidinfo->gl_sa_hdl = port->pa_sa_hdl; 6836 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl; 6837 gidinfo->gl_slid = path->SLID; 6838 gidinfo->gl_dlid = path->DLID; 6839 /* Reset redirect info, next MAD will set if redirected */ 6840 gidinfo->gl_redirected = 0; 6841 gidinfo->gl_devid = (*tmp).NodeInfo.DeviceID; 6842 gidinfo->gl_SL = path->SL; 6843 6844 gidinfo->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 6845 for (ii = 0; ii < port->pa_npkeys; ii++) { 6846 if (port->pa_pkey_tbl == NULL) 6847 break; 6848 6849 pkey_tbl = &port->pa_pkey_tbl[ii]; 6850 if ((gidinfo->gl_p_key == pkey_tbl->pt_pkey) && 6851 (pkey_tbl->pt_qp_hdl != NULL)) { 6852 gidinfo->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 6853 break; 6854 } 6855 } 6856 6857 if (gidinfo->gl_qp_hdl == NULL) 6858 IBTF_DPRINTF_L2(ibdm_string, 6859 "\treset_gid_info: No matching Pkey"); 6860 else 6861 gid_reinited = 1; 6862 6863 kmem_free(path, path_len); 6864 kmem_free(pi, pi_len); 6865 kmem_free(nr, nr_len); 6866 break; 6867 } 6868 mutex_exit(&ibdm.ibdm_hl_mutex); 6869 6870 if (!gid_reinited) 6871 gidinfo->gl_disconnected = 1; 6872 } 6873 6874 static void 6875 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6876 { 6877 ibdm_ioc_info_t *ioc_list; 6878 int in_gidlist = 0; 6879 6880 /* 6881 * Check if gidinfo has been inserted into the 6882 * ibdm_dp_gidlist_head list. gl_next or gl_prev 6883 * != NULL, if gidinfo is the list. 6884 */ 6885 if (gidinfo->gl_prev != NULL || 6886 gidinfo->gl_next != NULL || 6887 ibdm.ibdm_dp_gidlist_head == gidinfo) 6888 in_gidlist = 1; 6889 6890 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0); 6891 6892 /* 6893 * Remove GID from the global GID list 6894 * Handle the case where all port GIDs for an 6895 * IOU have been hot-removed. 6896 */ 6897 mutex_enter(&ibdm.ibdm_mutex); 6898 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) { 6899 mutex_enter(&gidinfo->gl_mutex); 6900 (void) ibdm_free_iou_info(gidinfo, &gidinfo->gl_iou); 6901 mutex_exit(&gidinfo->gl_mutex); 6902 } 6903 6904 /* Delete gl_hca_list */ 6905 mutex_exit(&ibdm.ibdm_mutex); 6906 ibdm_delete_glhca_list(gidinfo); 6907 mutex_enter(&ibdm.ibdm_mutex); 6908 6909 if (in_gidlist) { 6910 if (gidinfo->gl_prev != NULL) 6911 gidinfo->gl_prev->gl_next = gidinfo->gl_next; 6912 if (gidinfo->gl_next != NULL) 6913 gidinfo->gl_next->gl_prev = gidinfo->gl_prev; 6914 6915 if (gidinfo == ibdm.ibdm_dp_gidlist_head) 6916 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next; 6917 if (gidinfo == ibdm.ibdm_dp_gidlist_tail) 6918 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev; 6919 ibdm.ibdm_ngids--; 6920 } 6921 mutex_exit(&ibdm.ibdm_mutex); 6922 6923 mutex_destroy(&gidinfo->gl_mutex); 6924 cv_destroy(&gidinfo->gl_probe_cv); 6925 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t)); 6926 6927 /* 6928 * Pass on the IOCs with updated GIDs to IBnexus 6929 */ 6930 if (ioc_list) { 6931 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo " 6932 "IOC_PROP_UPDATE for %p\n", ioc_list); 6933 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6934 if (ibdm.ibdm_ibnex_callback != NULL) { 6935 (*ibdm.ibdm_ibnex_callback)((void *) 6936 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6937 } 6938 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6939 } 6940 } 6941 6942 6943 static void 6944 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args) 6945 { 6946 uint32_t attr_mod; 6947 6948 attr_mod = (cb_args->cb_ioc_num + 1) << 16; 6949 attr_mod |= cb_args->cb_srvents_start; 6950 attr_mod |= (cb_args->cb_srvents_end) << 8; 6951 hdr->AttributeModifier = h2b32(attr_mod); 6952 } 6953 6954 static void 6955 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info) 6956 { 6957 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 6958 gid_info->gl_transactionID++; 6959 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) { 6960 IBTF_DPRINTF_L4(ibdm_string, 6961 "\tbump_transactionID(%p), wrapup", gid_info); 6962 gid_info->gl_transactionID = gid_info->gl_min_transactionID; 6963 } 6964 } 6965 6966 /* 6967 * gl_prev_iou is set for *non-reprobe* sweeep requests, which 6968 * detected that ChangeID in IOU info has changed. The service 6969 * entry also may have changed. Check if service entry in IOC 6970 * has changed wrt the prev iou, if so notify to IB Nexus. 6971 */ 6972 static ibdm_ioc_info_t * 6973 ibdm_handle_prev_iou() 6974 { 6975 ibdm_dp_gidinfo_t *gid_info; 6976 ibdm_ioc_info_t *ioc_list_head = NULL, *ioc_list; 6977 ibdm_ioc_info_t *prev_ioc, *ioc; 6978 int ii, jj, niocs, prev_niocs; 6979 6980 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6981 6982 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou enter"); 6983 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 6984 gid_info = gid_info->gl_next) { 6985 if (gid_info->gl_prev_iou == NULL) 6986 continue; 6987 6988 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou gid %p", 6989 gid_info); 6990 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6991 prev_niocs = 6992 gid_info->gl_prev_iou->iou_info.iou_num_ctrl_slots; 6993 for (ii = 0; ii < niocs; ii++) { 6994 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6995 6996 /* Find matching IOC */ 6997 for (jj = 0; jj < prev_niocs; jj++) { 6998 prev_ioc = (ibdm_ioc_info_t *) 6999 &gid_info->gl_prev_iou->iou_ioc_info[jj]; 7000 if (prev_ioc->ioc_profile.ioc_guid == 7001 ioc->ioc_profile.ioc_guid) 7002 break; 7003 } 7004 if (jj == prev_niocs) 7005 prev_ioc = NULL; 7006 if (ioc == NULL || prev_ioc == NULL) 7007 continue; 7008 if ((ioc->ioc_profile.ioc_service_entries != 7009 prev_ioc->ioc_profile.ioc_service_entries) || 7010 ibdm_serv_cmp(&ioc->ioc_serv[0], 7011 &prev_ioc->ioc_serv[0], 7012 ioc->ioc_profile.ioc_service_entries) != 0) { 7013 IBTF_DPRINTF_L4(ibdm_string, 7014 "/thandle_prev_iou modified IOC: " 7015 "current ioc %p, old ioc %p", 7016 ioc, prev_ioc); 7017 mutex_enter(&gid_info->gl_mutex); 7018 ioc_list = ibdm_dup_ioc_info(ioc, gid_info); 7019 mutex_exit(&gid_info->gl_mutex); 7020 ioc_list->ioc_info_updated.ib_prop_updated 7021 = 0; 7022 ioc_list->ioc_info_updated.ib_srv_prop_updated 7023 = 1; 7024 7025 if (ioc_list_head == NULL) 7026 ioc_list_head = ioc_list; 7027 else { 7028 ioc_list_head->ioc_next = ioc_list; 7029 ioc_list_head = ioc_list; 7030 } 7031 } 7032 } 7033 7034 mutex_enter(&gid_info->gl_mutex); 7035 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_prev_iou); 7036 mutex_exit(&gid_info->gl_mutex); 7037 } 7038 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iouret %p", 7039 ioc_list_head); 7040 return (ioc_list_head); 7041 } 7042 7043 /* 7044 * Compares two service entries lists, returns 0 if same, returns 1 7045 * if no match. 7046 */ 7047 static int 7048 ibdm_serv_cmp(ibdm_srvents_info_t *serv1, ibdm_srvents_info_t *serv2, 7049 int nserv) 7050 { 7051 int ii; 7052 7053 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: enter"); 7054 for (ii = 0; ii < nserv; ii++, serv1++, serv2++) { 7055 if (serv1->se_attr.srv_id != serv2->se_attr.srv_id || 7056 bcmp(serv1->se_attr.srv_name, 7057 serv2->se_attr.srv_name, 7058 IB_DM_MAX_SVC_NAME_LEN) != 0) { 7059 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 1"); 7060 return (1); 7061 } 7062 } 7063 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 0"); 7064 return (0); 7065 } 7066 7067 /* For debugging purpose only */ 7068 #ifdef DEBUG 7069 void 7070 ibdm_dump_mad_hdr(ib_mad_hdr_t *mad_hdr) 7071 { 7072 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info"); 7073 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------"); 7074 7075 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x" 7076 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass); 7077 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x" 7078 "\tR Method : 0x%x", 7079 mad_hdr->ClassVersion, mad_hdr->R_Method); 7080 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x" 7081 "\tTransaction ID : 0x%llx", 7082 b2h16(mad_hdr->Status), b2h64(mad_hdr->TransactionID)); 7083 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x" 7084 "\tAttribute Modified : 0x%lx", 7085 b2h16(mad_hdr->AttributeID), b2h32(mad_hdr->AttributeModifier)); 7086 } 7087 7088 7089 void 7090 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag) 7091 { 7092 ib_mad_hdr_t *mad_hdr; 7093 7094 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info"); 7095 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------"); 7096 7097 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x" 7098 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid, 7099 ibmf_msg->im_local_addr.ia_remote_lid, 7100 ibmf_msg->im_local_addr.ia_remote_qno); 7101 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x" 7102 " SL : 0x%x", ibmf_msg->im_local_addr.ia_p_key, 7103 ibmf_msg->im_local_addr.ia_q_key, 7104 ibmf_msg->im_local_addr.ia_service_level); 7105 7106 if (flag) 7107 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg); 7108 else 7109 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg); 7110 7111 ibdm_dump_mad_hdr(mad_hdr); 7112 } 7113 7114 7115 void 7116 ibdm_dump_path_info(sa_path_record_t *path) 7117 { 7118 IBTF_DPRINTF_L4("ibdm", "\t\t Path information"); 7119 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------"); 7120 7121 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx", 7122 path->DGID.gid_prefix, path->DGID.gid_guid); 7123 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx", 7124 path->SGID.gid_prefix, path->SGID.gid_guid); 7125 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\t\tDlID : %x", 7126 path->SLID, path->DLID); 7127 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x\t\tSL : %x", 7128 path->P_Key, path->SL); 7129 } 7130 7131 7132 void 7133 ibdm_dump_classportinfo(ib_mad_classportinfo_t *classportinfo) 7134 { 7135 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO"); 7136 IBTF_DPRINTF_L4("ibdm", "\t\t --------------"); 7137 7138 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x", 7139 ((b2h32(classportinfo->RespTimeValue)) & 0x1F)); 7140 7141 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%llx", 7142 b2h64(classportinfo->RedirectGID_hi)); 7143 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%llx", 7144 b2h64(classportinfo->RedirectGID_lo)); 7145 IBTF_DPRINTF_L4("ibdm", "\t Redirected TC : 0x%x", 7146 classportinfo->RedirectTC); 7147 IBTF_DPRINTF_L4("ibdm", "\t Redirected SL : 0x%x", 7148 classportinfo->RedirectSL); 7149 IBTF_DPRINTF_L4("ibdm", "\t Redirected FL : 0x%x", 7150 classportinfo->RedirectFL); 7151 IBTF_DPRINTF_L4("ibdm", "\t Redirected LID : 0x%x", 7152 b2h16(classportinfo->RedirectLID)); 7153 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x", 7154 b2h16(classportinfo->RedirectP_Key)); 7155 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x", 7156 classportinfo->RedirectQP); 7157 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x", 7158 b2h32(classportinfo->RedirectQ_Key)); 7159 IBTF_DPRINTF_L4("ibdm", "\t Trap GID hi : 0x%llx", 7160 b2h64(classportinfo->TrapGID_hi)); 7161 IBTF_DPRINTF_L4("ibdm", "\t Trap GID lo : 0x%llx", 7162 b2h64(classportinfo->TrapGID_lo)); 7163 IBTF_DPRINTF_L4("ibdm", "\t Trap TC : 0x%x", 7164 classportinfo->TrapTC); 7165 IBTF_DPRINTF_L4("ibdm", "\t Trap SL : 0x%x", 7166 classportinfo->TrapSL); 7167 IBTF_DPRINTF_L4("ibdm", "\t Trap FL : 0x%x", 7168 classportinfo->TrapFL); 7169 IBTF_DPRINTF_L4("ibdm", "\t Trap LID : 0x%x", 7170 b2h16(classportinfo->TrapLID)); 7171 IBTF_DPRINTF_L4("ibdm", "\t Trap P_Key : 0x%x", 7172 b2h16(classportinfo->TrapP_Key)); 7173 IBTF_DPRINTF_L4("ibdm", "\t Trap HL : 0x%x", 7174 classportinfo->TrapHL); 7175 IBTF_DPRINTF_L4("ibdm", "\t Trap QP : 0x%x", 7176 classportinfo->TrapQP); 7177 IBTF_DPRINTF_L4("ibdm", "\t Trap Q_Key : 0x%x", 7178 b2h32(classportinfo->TrapQ_Key)); 7179 } 7180 7181 7182 void 7183 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info) 7184 { 7185 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo"); 7186 IBTF_DPRINTF_L4("ibdm", "\t\t ------------"); 7187 7188 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x", 7189 b2h16(iou_info->iou_changeid)); 7190 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d", 7191 iou_info->iou_num_ctrl_slots); 7192 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x", 7193 iou_info->iou_flag); 7194 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x", 7195 iou_info->iou_ctrl_list[0]); 7196 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x", 7197 iou_info->iou_ctrl_list[1]); 7198 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x", 7199 iou_info->iou_ctrl_list[2]); 7200 } 7201 7202 7203 void 7204 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc) 7205 { 7206 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile"); 7207 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------"); 7208 7209 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid); 7210 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid); 7211 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid); 7212 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver); 7213 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id); 7214 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class); 7215 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass); 7216 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol); 7217 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver); 7218 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth); 7219 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d", 7220 ioc->ioc_rdma_read_qdepth); 7221 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz); 7222 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz); 7223 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x", 7224 ioc->ioc_ctrl_opcap_mask); 7225 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries); 7226 } 7227 7228 7229 void 7230 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents) 7231 { 7232 IBTF_DPRINTF_L4("ibdm", 7233 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id); 7234 7235 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: " 7236 "Service Name : %s", srv_ents->srv_name); 7237 } 7238 7239 int ibdm_allow_sweep_fabric_timestamp = 1; 7240 7241 void 7242 ibdm_dump_sweep_fabric_timestamp(int flag) 7243 { 7244 static hrtime_t x; 7245 if (flag) { 7246 if (ibdm_allow_sweep_fabric_timestamp) { 7247 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete " 7248 "sweep %lld ms", ((gethrtime() - x)/ 1000000)); 7249 } 7250 x = 0; 7251 } else 7252 x = gethrtime(); 7253 } 7254 #endif 7255