1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * ibdm.c 28 * 29 * This file contains the InifiniBand Device Manager (IBDM) support functions. 30 * IB nexus driver will only be the client for the IBDM module. 31 * 32 * IBDM registers with IBTF for HCA arrival/removal notification. 33 * IBDM registers with SA access to send DM MADs to discover the IOC's behind 34 * the IOU's. 35 * 36 * IB nexus driver registers with IBDM to find the information about the 37 * HCA's and IOC's (behind the IOU) present on the IB fabric. 38 */ 39 40 #include <sys/systm.h> 41 #include <sys/taskq.h> 42 #include <sys/ib/mgt/ibdm/ibdm_impl.h> 43 #include <sys/ib/mgt/ibmf/ibmf_impl.h> 44 #include <sys/modctl.h> 45 46 /* Function Prototype declarations */ 47 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *, ibdm_iou_info_t **); 48 static int ibdm_fini(void); 49 static int ibdm_init(void); 50 static int ibdm_get_reachable_ports(ibdm_port_attr_t *, 51 ibdm_hca_list_t *); 52 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t); 53 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *); 54 static boolean_t ibdm_is_cisco(ib_guid_t); 55 static boolean_t ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *); 56 static void ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *); 57 static int ibdm_set_classportinfo(ibdm_dp_gidinfo_t *); 58 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *); 59 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *); 60 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *); 61 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t, 62 ib_guid_t *, ib_guid_t *); 63 static int ibdm_retry_command(ibdm_timeout_cb_args_t *); 64 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int); 65 static int ibdm_verify_mad_status(ib_mad_hdr_t *); 66 static int ibdm_handle_redirection(ibmf_msg_t *, 67 ibdm_dp_gidinfo_t *, int *); 68 static void ibdm_wait_probe_completion(void); 69 static void ibdm_sweep_fabric(int); 70 static void ibdm_probe_gid_thread(void *); 71 static void ibdm_wakeup_probe_gid_cv(void); 72 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int); 73 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int); 74 static void ibdm_update_port_attr(ibdm_port_attr_t *); 75 static void ibdm_handle_hca_attach(ib_guid_t); 76 static void ibdm_handle_srventry_mad(ibmf_msg_t *, 77 ibdm_dp_gidinfo_t *, int *); 78 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *); 79 static void ibdm_recv_incoming_mad(void *); 80 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *); 81 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *); 82 static void ibdm_pkt_timeout_hdlr(void *arg); 83 static void ibdm_initialize_port(ibdm_port_attr_t *); 84 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 85 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *); 86 static void ibdm_alloc_send_buffers(ibmf_msg_t *); 87 static void ibdm_free_send_buffers(ibmf_msg_t *); 88 static void ibdm_handle_hca_detach(ib_guid_t); 89 static int ibdm_fini_port(ibdm_port_attr_t *); 90 static int ibdm_uninit_hca(ibdm_hca_list_t *); 91 static void ibdm_handle_setclassportinfo(ibmf_handle_t, ibmf_msg_t *, 92 ibdm_dp_gidinfo_t *, int *); 93 static void ibdm_handle_iounitinfo(ibmf_handle_t, 94 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 95 static void ibdm_handle_ioc_profile(ibmf_handle_t, 96 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 97 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t, 98 ibt_async_code_t, ibt_async_event_t *); 99 static void ibdm_handle_classportinfo(ibmf_handle_t, 100 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 101 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *, 102 ibdm_dp_gidinfo_t *); 103 104 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *); 105 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *, 106 ibdm_dp_gidinfo_t *gid_list); 107 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int); 108 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t, 109 ibdm_dp_gidinfo_t *, int *); 110 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *, 111 ibdm_hca_list_t **); 112 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t, 113 size_t *, ib_guid_t); 114 static int ibdm_get_node_record_by_port(ibmf_saa_handle_t, 115 ib_guid_t, sa_node_record_t **, size_t *); 116 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *, 117 ib_lid_t); 118 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *, 119 ib_gid_t, ib_gid_t); 120 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t); 121 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t); 122 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int); 123 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t, 124 ibmf_saa_event_details_t *, void *); 125 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *, 126 ibdm_dp_gidinfo_t *); 127 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *); 128 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *, 129 ibdm_dp_gidinfo_t *); 130 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *); 131 static void ibdm_free_gid_list(ibdm_gid_t *); 132 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid); 133 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *); 134 static void ibdm_saa_event_taskq(void *); 135 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *); 136 static void ibdm_get_next_port(ibdm_hca_list_t **, 137 ibdm_port_attr_t **, int); 138 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *, 139 ibdm_dp_gidinfo_t *); 140 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *, 141 ibdm_hca_list_t *); 142 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *); 143 static void ibdm_saa_handle_new_gid(void *); 144 static void ibdm_reset_all_dgids(ibmf_saa_handle_t); 145 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *); 146 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *); 147 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *); 148 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *); 149 static ibdm_ioc_info_t *ibdm_handle_prev_iou(); 150 static int ibdm_serv_cmp(ibdm_srvents_info_t *, ibdm_srvents_info_t *, 151 int); 152 static ibdm_ioc_info_t *ibdm_get_ioc_info_with_gid(ib_guid_t, 153 ibdm_dp_gidinfo_t **); 154 155 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT; 156 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES; 157 #ifdef DEBUG 158 int ibdm_ignore_saa_event = 0; 159 #endif 160 161 /* Modload support */ 162 static struct modlmisc ibdm_modlmisc = { 163 &mod_miscops, 164 "InfiniBand Device Manager" 165 }; 166 167 struct modlinkage ibdm_modlinkage = { 168 MODREV_1, 169 (void *)&ibdm_modlmisc, 170 NULL 171 }; 172 173 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = { 174 IBTI_V2, 175 IBT_DM, 176 ibdm_event_hdlr, 177 NULL, 178 "ibdm" 179 }; 180 181 /* Global variables */ 182 ibdm_t ibdm; 183 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING; 184 char *ibdm_string = "ibdm"; 185 186 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv", 187 ibdm.ibdm_dp_gidlist_head)) 188 189 /* 190 * _init 191 * Loadable module init, called before any other module. 192 * Initialize mutex 193 * Register with IBTF 194 */ 195 int 196 _init(void) 197 { 198 int err; 199 200 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm); 201 202 if ((err = ibdm_init()) != IBDM_SUCCESS) { 203 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err); 204 (void) ibdm_fini(); 205 return (DDI_FAILURE); 206 } 207 208 if ((err = mod_install(&ibdm_modlinkage)) != 0) { 209 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err); 210 (void) ibdm_fini(); 211 } 212 return (err); 213 } 214 215 216 int 217 _fini(void) 218 { 219 int err; 220 221 if ((err = ibdm_fini()) != IBDM_SUCCESS) { 222 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err); 223 (void) ibdm_init(); 224 return (EBUSY); 225 } 226 227 if ((err = mod_remove(&ibdm_modlinkage)) != 0) { 228 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err); 229 (void) ibdm_init(); 230 } 231 return (err); 232 } 233 234 235 int 236 _info(struct modinfo *modinfop) 237 { 238 return (mod_info(&ibdm_modlinkage, modinfop)); 239 } 240 241 242 /* 243 * ibdm_init(): 244 * Register with IBTF 245 * Allocate memory for the HCAs 246 * Allocate minor-nodes for the HCAs 247 */ 248 static int 249 ibdm_init(void) 250 { 251 int i, hca_count; 252 ib_guid_t *hca_guids; 253 ibt_status_t status; 254 255 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:"); 256 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) { 257 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL); 258 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL); 259 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL); 260 cv_init(&ibdm.ibdm_port_settle_cv, NULL, CV_DRIVER, NULL); 261 mutex_enter(&ibdm.ibdm_mutex); 262 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED; 263 } 264 265 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) { 266 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL, 267 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) { 268 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach " 269 "failed %x", status); 270 mutex_exit(&ibdm.ibdm_mutex); 271 return (IBDM_FAILURE); 272 } 273 274 ibdm.ibdm_state |= IBDM_IBT_ATTACHED; 275 mutex_exit(&ibdm.ibdm_mutex); 276 } 277 278 279 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) { 280 hca_count = ibt_get_hca_list(&hca_guids); 281 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count); 282 for (i = 0; i < hca_count; i++) 283 (void) ibdm_handle_hca_attach(hca_guids[i]); 284 if (hca_count) 285 ibt_free_hca_list(hca_guids, hca_count); 286 287 mutex_enter(&ibdm.ibdm_mutex); 288 ibdm.ibdm_state |= IBDM_HCA_ATTACHED; 289 mutex_exit(&ibdm.ibdm_mutex); 290 } 291 292 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) { 293 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL); 294 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL); 295 mutex_enter(&ibdm.ibdm_mutex); 296 ibdm.ibdm_state |= IBDM_CVS_ALLOCED; 297 mutex_exit(&ibdm.ibdm_mutex); 298 } 299 return (IBDM_SUCCESS); 300 } 301 302 303 static int 304 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info, ibdm_iou_info_t **ioup) 305 { 306 int ii, k, niocs; 307 size_t size; 308 ibdm_gid_t *delete, *head; 309 timeout_id_t timeout_id; 310 ibdm_ioc_info_t *ioc; 311 ibdm_iou_info_t *gl_iou = *ioup; 312 313 ASSERT(mutex_owned(&gid_info->gl_mutex)); 314 if (gl_iou == NULL) { 315 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU"); 316 return (0); 317 } 318 319 niocs = gl_iou->iou_info.iou_num_ctrl_slots; 320 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d", 321 gid_info, niocs); 322 323 for (ii = 0; ii < niocs; ii++) { 324 ioc = (ibdm_ioc_info_t *)&gl_iou->iou_ioc_info[ii]; 325 326 /* handle the case where an ioc_timeout_id is scheduled */ 327 if (ioc->ioc_timeout_id) { 328 timeout_id = ioc->ioc_timeout_id; 329 ioc->ioc_timeout_id = 0; 330 mutex_exit(&gid_info->gl_mutex); 331 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 332 "ioc_timeout_id = 0x%x", timeout_id); 333 if (untimeout(timeout_id) == -1) { 334 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 335 "untimeout ioc_timeout_id failed"); 336 mutex_enter(&gid_info->gl_mutex); 337 return (-1); 338 } 339 mutex_enter(&gid_info->gl_mutex); 340 } 341 342 /* handle the case where an ioc_dc_timeout_id is scheduled */ 343 if (ioc->ioc_dc_timeout_id) { 344 timeout_id = ioc->ioc_dc_timeout_id; 345 ioc->ioc_dc_timeout_id = 0; 346 mutex_exit(&gid_info->gl_mutex); 347 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 348 "ioc_dc_timeout_id = 0x%x", timeout_id); 349 if (untimeout(timeout_id) == -1) { 350 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 351 "untimeout ioc_dc_timeout_id failed"); 352 mutex_enter(&gid_info->gl_mutex); 353 return (-1); 354 } 355 mutex_enter(&gid_info->gl_mutex); 356 } 357 358 /* handle the case where serv[k].se_timeout_id is scheduled */ 359 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) { 360 if (ioc->ioc_serv[k].se_timeout_id) { 361 timeout_id = ioc->ioc_serv[k].se_timeout_id; 362 ioc->ioc_serv[k].se_timeout_id = 0; 363 mutex_exit(&gid_info->gl_mutex); 364 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 365 "ioc->ioc_serv[%d].se_timeout_id = 0x%x", 366 k, timeout_id); 367 if (untimeout(timeout_id) == -1) { 368 IBTF_DPRINTF_L2("ibdm", "free_iou_info:" 369 " untimeout se_timeout_id failed"); 370 mutex_enter(&gid_info->gl_mutex); 371 return (-1); 372 } 373 mutex_enter(&gid_info->gl_mutex); 374 } 375 } 376 377 /* delete GID list in IOC */ 378 head = ioc->ioc_gid_list; 379 while (head) { 380 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: " 381 "Deleting gid_list struct %p", head); 382 delete = head; 383 head = head->gid_next; 384 kmem_free(delete, sizeof (ibdm_gid_t)); 385 } 386 ioc->ioc_gid_list = NULL; 387 388 /* delete ioc_serv */ 389 size = ioc->ioc_profile.ioc_service_entries * 390 sizeof (ibdm_srvents_info_t); 391 if (ioc->ioc_serv && size) { 392 kmem_free(ioc->ioc_serv, size); 393 ioc->ioc_serv = NULL; 394 } 395 } 396 /* 397 * Clear the IBDM_CISCO_PROBE_DONE flag to get the IO Unit information 398 * via the switch during the probe process. 399 */ 400 gid_info->gl_flag &= ~IBDM_CISCO_PROBE_DONE; 401 402 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC"); 403 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t); 404 kmem_free(gl_iou, size); 405 *ioup = NULL; 406 return (0); 407 } 408 409 410 /* 411 * ibdm_fini(): 412 * Un-register with IBTF 413 * De allocate memory for the GID info 414 */ 415 static int 416 ibdm_fini() 417 { 418 int ii; 419 ibdm_hca_list_t *hca_list, *temp; 420 ibdm_dp_gidinfo_t *gid_info, *tmp; 421 ibdm_gid_t *head, *delete; 422 423 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini"); 424 425 mutex_enter(&ibdm.ibdm_hl_mutex); 426 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) { 427 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) { 428 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed"); 429 mutex_exit(&ibdm.ibdm_hl_mutex); 430 return (IBDM_FAILURE); 431 } 432 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED; 433 ibdm.ibdm_ibt_clnt_hdl = NULL; 434 } 435 436 hca_list = ibdm.ibdm_hca_list_head; 437 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count); 438 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 439 temp = hca_list; 440 hca_list = hca_list->hl_next; 441 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp); 442 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) { 443 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: " 444 "uninit_hca %p failed", temp); 445 mutex_exit(&ibdm.ibdm_hl_mutex); 446 return (IBDM_FAILURE); 447 } 448 } 449 mutex_exit(&ibdm.ibdm_hl_mutex); 450 451 mutex_enter(&ibdm.ibdm_mutex); 452 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED) 453 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED; 454 455 gid_info = ibdm.ibdm_dp_gidlist_head; 456 while (gid_info) { 457 mutex_enter(&gid_info->gl_mutex); 458 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 459 mutex_exit(&gid_info->gl_mutex); 460 ibdm_delete_glhca_list(gid_info); 461 462 tmp = gid_info; 463 gid_info = gid_info->gl_next; 464 mutex_destroy(&tmp->gl_mutex); 465 head = tmp->gl_gid; 466 while (head) { 467 IBTF_DPRINTF_L4("ibdm", 468 "\tibdm_fini: Deleting gid structs"); 469 delete = head; 470 head = head->gid_next; 471 kmem_free(delete, sizeof (ibdm_gid_t)); 472 } 473 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t)); 474 } 475 mutex_exit(&ibdm.ibdm_mutex); 476 477 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) { 478 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED; 479 mutex_destroy(&ibdm.ibdm_mutex); 480 mutex_destroy(&ibdm.ibdm_hl_mutex); 481 mutex_destroy(&ibdm.ibdm_ibnex_mutex); 482 cv_destroy(&ibdm.ibdm_port_settle_cv); 483 } 484 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) { 485 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED; 486 cv_destroy(&ibdm.ibdm_probe_cv); 487 cv_destroy(&ibdm.ibdm_busy_cv); 488 } 489 return (IBDM_SUCCESS); 490 } 491 492 493 /* 494 * ibdm_event_hdlr() 495 * 496 * IBDM registers this asynchronous event handler at the time of 497 * ibt_attach. IBDM support the following async events. For other 498 * event, simply returns success. 499 * IBT_HCA_ATTACH_EVENT: 500 * Retrieves the information about all the port that are 501 * present on this HCA, allocates the port attributes 502 * structure and calls IB nexus callback routine with 503 * the port attributes structure as an input argument. 504 * IBT_HCA_DETACH_EVENT: 505 * Retrieves the information about all the ports that are 506 * present on this HCA and calls IB nexus callback with 507 * port guid as an argument 508 * IBT_EVENT_PORT_UP: 509 * Register with IBMF and SA access 510 * Setup IBMF receive callback routine 511 * IBT_EVENT_PORT_DOWN: 512 * Un-Register with IBMF and SA access 513 * Teardown IBMF receive callback routine 514 */ 515 /*ARGSUSED*/ 516 static void 517 ibdm_event_hdlr(void *clnt_hdl, 518 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event) 519 { 520 ibdm_hca_list_t *hca_list; 521 ibdm_port_attr_t *port; 522 ibmf_saa_handle_t port_sa_hdl; 523 524 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code); 525 526 switch (code) { 527 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */ 528 ibdm_handle_hca_attach(event->ev_hca_guid); 529 break; 530 531 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */ 532 ibdm_handle_hca_detach(event->ev_hca_guid); 533 mutex_enter(&ibdm.ibdm_ibnex_mutex); 534 if (ibdm.ibdm_ibnex_callback != NULL) { 535 (*ibdm.ibdm_ibnex_callback)((void *) 536 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED); 537 } 538 mutex_exit(&ibdm.ibdm_ibnex_mutex); 539 break; 540 541 case IBT_EVENT_PORT_UP: 542 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP"); 543 mutex_enter(&ibdm.ibdm_hl_mutex); 544 port = ibdm_get_port_attr(event, &hca_list); 545 if (port == NULL) { 546 IBTF_DPRINTF_L2("ibdm", 547 "\tevent_hdlr: HCA not present"); 548 mutex_exit(&ibdm.ibdm_hl_mutex); 549 break; 550 } 551 ibdm_initialize_port(port); 552 hca_list->hl_nports_active++; 553 cv_broadcast(&ibdm.ibdm_port_settle_cv); 554 mutex_exit(&ibdm.ibdm_hl_mutex); 555 break; 556 557 case IBT_ERROR_PORT_DOWN: 558 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN"); 559 mutex_enter(&ibdm.ibdm_hl_mutex); 560 port = ibdm_get_port_attr(event, &hca_list); 561 if (port == NULL) { 562 IBTF_DPRINTF_L2("ibdm", 563 "\tevent_hdlr: HCA not present"); 564 mutex_exit(&ibdm.ibdm_hl_mutex); 565 break; 566 } 567 hca_list->hl_nports_active--; 568 port_sa_hdl = port->pa_sa_hdl; 569 (void) ibdm_fini_port(port); 570 port->pa_state = IBT_PORT_DOWN; 571 cv_broadcast(&ibdm.ibdm_port_settle_cv); 572 mutex_exit(&ibdm.ibdm_hl_mutex); 573 ibdm_reset_all_dgids(port_sa_hdl); 574 break; 575 576 default: /* Ignore all other events/errors */ 577 break; 578 } 579 } 580 581 582 /* 583 * ibdm_initialize_port() 584 * Register with IBMF 585 * Register with SA access 586 * Register a receive callback routine with IBMF. IBMF invokes 587 * this routine whenever a MAD arrives at this port. 588 * Update the port attributes 589 */ 590 static void 591 ibdm_initialize_port(ibdm_port_attr_t *port) 592 { 593 int ii; 594 uint_t nports, size; 595 uint_t pkey_idx; 596 ib_pkey_t pkey; 597 ibt_hca_portinfo_t *pinfop; 598 ibmf_register_info_t ibmf_reg; 599 ibmf_saa_subnet_event_args_t event_args; 600 601 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:"); 602 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 603 604 /* Check whether the port is active */ 605 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 606 NULL) != IBT_SUCCESS) 607 return; 608 609 if (port->pa_sa_hdl != NULL) 610 return; 611 612 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 613 &pinfop, &nports, &size) != IBT_SUCCESS) { 614 /* This should not occur */ 615 port->pa_npkeys = 0; 616 port->pa_pkey_tbl = NULL; 617 return; 618 } 619 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 620 621 port->pa_state = pinfop->p_linkstate; 622 port->pa_npkeys = pinfop->p_pkey_tbl_sz; 623 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 624 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 625 626 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) 627 port->pa_pkey_tbl[pkey_idx].pt_pkey = 628 pinfop->p_pkey_tbl[pkey_idx]; 629 630 ibt_free_portinfo(pinfop, size); 631 632 event_args.is_event_callback = ibdm_saa_event_cb; 633 event_args.is_event_callback_arg = port; 634 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args, 635 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) { 636 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 637 "sa access registration failed"); 638 return; 639 } 640 ibmf_reg.ir_ci_guid = port->pa_hca_guid; 641 ibmf_reg.ir_port_num = port->pa_port_num; 642 ibmf_reg.ir_client_class = DEV_MGT_MANAGER; 643 644 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL, 645 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) { 646 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 647 "IBMF registration failed"); 648 (void) ibdm_fini_port(port); 649 return; 650 } 651 if (ibmf_setup_async_cb(port->pa_ibmf_hdl, IBMF_QP_HANDLE_DEFAULT, 652 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) { 653 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 654 "IBMF setup recv cb failed"); 655 (void) ibdm_fini_port(port); 656 return; 657 } 658 659 for (ii = 0; ii < port->pa_npkeys; ii++) { 660 pkey = port->pa_pkey_tbl[ii].pt_pkey; 661 if (IBDM_INVALID_PKEY(pkey)) { 662 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 663 continue; 664 } 665 ibdm_port_attr_ibmf_init(port, pkey, ii); 666 } 667 } 668 669 670 /* 671 * ibdm_port_attr_ibmf_init: 672 * With IBMF - Alloc QP Handle and Setup Async callback 673 */ 674 static void 675 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii) 676 { 677 int ret; 678 679 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY, 680 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) != 681 IBMF_SUCCESS) { 682 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 683 "IBMF failed to alloc qp %d", ret); 684 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 685 return; 686 } 687 688 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p", 689 port->pa_ibmf_hdl); 690 691 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl, 692 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) != 693 IBMF_SUCCESS) { 694 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 695 "IBMF setup recv cb failed %d", ret); 696 (void) ibmf_free_qp(port->pa_ibmf_hdl, 697 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0); 698 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 699 } 700 } 701 702 703 /* 704 * ibdm_get_port_attr() 705 * Get port attributes from HCA guid and port number 706 * Return pointer to ibdm_port_attr_t on Success 707 * and NULL on failure 708 */ 709 static ibdm_port_attr_t * 710 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval) 711 { 712 ibdm_hca_list_t *hca_list; 713 ibdm_port_attr_t *port_attr; 714 int ii; 715 716 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port); 717 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 718 hca_list = ibdm.ibdm_hca_list_head; 719 while (hca_list) { 720 if (hca_list->hl_hca_guid == event->ev_hca_guid) { 721 for (ii = 0; ii < hca_list->hl_nports; ii++) { 722 port_attr = &hca_list->hl_port_attr[ii]; 723 if (port_attr->pa_port_num == event->ev_port) { 724 *retval = hca_list; 725 return (port_attr); 726 } 727 } 728 } 729 hca_list = hca_list->hl_next; 730 } 731 return (NULL); 732 } 733 734 735 /* 736 * ibdm_update_port_attr() 737 * Update the port attributes 738 */ 739 static void 740 ibdm_update_port_attr(ibdm_port_attr_t *port) 741 { 742 uint_t nports, size; 743 uint_t pkey_idx; 744 ibt_hca_portinfo_t *portinfop; 745 746 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin"); 747 if (ibt_query_hca_ports(port->pa_hca_hdl, 748 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) { 749 /* This should not occur */ 750 port->pa_npkeys = 0; 751 port->pa_pkey_tbl = NULL; 752 return; 753 } 754 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix; 755 756 port->pa_state = portinfop->p_linkstate; 757 758 /* 759 * PKey information in portinfo valid only if port is 760 * ACTIVE. Bail out if not. 761 */ 762 if (port->pa_state != IBT_PORT_ACTIVE) { 763 port->pa_npkeys = 0; 764 port->pa_pkey_tbl = NULL; 765 ibt_free_portinfo(portinfop, size); 766 return; 767 } 768 769 port->pa_npkeys = portinfop->p_pkey_tbl_sz; 770 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 771 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 772 773 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) { 774 port->pa_pkey_tbl[pkey_idx].pt_pkey = 775 portinfop->p_pkey_tbl[pkey_idx]; 776 } 777 ibt_free_portinfo(portinfop, size); 778 } 779 780 781 /* 782 * ibdm_handle_hca_attach() 783 */ 784 static void 785 ibdm_handle_hca_attach(ib_guid_t hca_guid) 786 { 787 uint_t size; 788 uint_t ii, nports; 789 ibt_status_t status; 790 ibt_hca_hdl_t hca_hdl; 791 ibt_hca_attr_t *hca_attr; 792 ibdm_hca_list_t *hca_list, *temp; 793 ibdm_port_attr_t *port_attr; 794 ibt_hca_portinfo_t *portinfop; 795 796 IBTF_DPRINTF_L4("ibdm", 797 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid); 798 799 /* open the HCA first */ 800 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid, 801 &hca_hdl)) != IBT_SUCCESS) { 802 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 803 "open_hca failed, status 0x%x", status); 804 return; 805 } 806 807 hca_attr = (ibt_hca_attr_t *) 808 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP); 809 /* ibt_query_hca always returns IBT_SUCCESS */ 810 (void) ibt_query_hca(hca_hdl, hca_attr); 811 812 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x," 813 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id, 814 hca_attr->hca_version_id, hca_attr->hca_nports); 815 816 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports, 817 &size)) != IBT_SUCCESS) { 818 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 819 "ibt_query_hca_ports failed, status 0x%x", status); 820 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 821 (void) ibt_close_hca(hca_hdl); 822 return; 823 } 824 hca_list = (ibdm_hca_list_t *) 825 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP); 826 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 827 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP); 828 hca_list->hl_hca_guid = hca_attr->hca_node_guid; 829 hca_list->hl_nports = hca_attr->hca_nports; 830 hca_list->hl_attach_time = ddi_get_time(); 831 hca_list->hl_hca_hdl = hca_hdl; 832 833 /* 834 * Init a dummy port attribute for the HCA node 835 * This is for Per-HCA Node. Initialize port_attr : 836 * hca_guid & port_guid -> hca_guid 837 * npkeys, pkey_tbl is NULL 838 * port_num, sn_prefix is 0 839 * vendorid, product_id, dev_version from HCA 840 * pa_state is IBT_PORT_ACTIVE 841 */ 842 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 843 sizeof (ibdm_port_attr_t), KM_SLEEP); 844 port_attr = hca_list->hl_hca_port_attr; 845 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 846 port_attr->pa_productid = hca_attr->hca_device_id; 847 port_attr->pa_dev_version = hca_attr->hca_version_id; 848 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 849 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 850 port_attr->pa_port_guid = hca_attr->hca_node_guid; 851 port_attr->pa_state = IBT_PORT_ACTIVE; 852 853 854 for (ii = 0; ii < nports; ii++) { 855 port_attr = &hca_list->hl_port_attr[ii]; 856 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 857 port_attr->pa_productid = hca_attr->hca_device_id; 858 port_attr->pa_dev_version = hca_attr->hca_version_id; 859 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 860 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 861 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid; 862 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix; 863 port_attr->pa_port_num = portinfop[ii].p_port_num; 864 port_attr->pa_state = portinfop[ii].p_linkstate; 865 866 /* 867 * Register with IBMF, SA access when the port is in 868 * ACTIVE state. Also register a callback routine 869 * with IBMF to receive incoming DM MAD's. 870 * The IBDM event handler takes care of registration of 871 * port which are not active. 872 */ 873 IBTF_DPRINTF_L4("ibdm", 874 "\thandle_hca_attach: port guid %llx Port state 0x%x", 875 port_attr->pa_port_guid, portinfop[ii].p_linkstate); 876 877 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) { 878 mutex_enter(&ibdm.ibdm_hl_mutex); 879 hca_list->hl_nports_active++; 880 ibdm_initialize_port(port_attr); 881 cv_broadcast(&ibdm.ibdm_port_settle_cv); 882 mutex_exit(&ibdm.ibdm_hl_mutex); 883 } 884 } 885 mutex_enter(&ibdm.ibdm_hl_mutex); 886 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) { 887 if (temp->hl_hca_guid == hca_guid) { 888 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX " 889 "already seen by IBDM", hca_guid); 890 mutex_exit(&ibdm.ibdm_hl_mutex); 891 (void) ibdm_uninit_hca(hca_list); 892 return; 893 } 894 } 895 ibdm.ibdm_hca_count++; 896 if (ibdm.ibdm_hca_list_head == NULL) { 897 ibdm.ibdm_hca_list_head = hca_list; 898 ibdm.ibdm_hca_list_tail = hca_list; 899 } else { 900 ibdm.ibdm_hca_list_tail->hl_next = hca_list; 901 ibdm.ibdm_hca_list_tail = hca_list; 902 } 903 mutex_exit(&ibdm.ibdm_hl_mutex); 904 mutex_enter(&ibdm.ibdm_ibnex_mutex); 905 if (ibdm.ibdm_ibnex_callback != NULL) { 906 (*ibdm.ibdm_ibnex_callback)((void *) 907 &hca_guid, IBDM_EVENT_HCA_ADDED); 908 } 909 mutex_exit(&ibdm.ibdm_ibnex_mutex); 910 911 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 912 ibt_free_portinfo(portinfop, size); 913 } 914 915 916 /* 917 * ibdm_handle_hca_detach() 918 */ 919 static void 920 ibdm_handle_hca_detach(ib_guid_t hca_guid) 921 { 922 ibdm_hca_list_t *head, *prev = NULL; 923 size_t len; 924 ibdm_dp_gidinfo_t *gidinfo; 925 926 IBTF_DPRINTF_L4("ibdm", 927 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid); 928 929 /* Make sure no probes are running */ 930 mutex_enter(&ibdm.ibdm_mutex); 931 while (ibdm.ibdm_busy & IBDM_BUSY) 932 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 933 ibdm.ibdm_busy |= IBDM_BUSY; 934 mutex_exit(&ibdm.ibdm_mutex); 935 936 mutex_enter(&ibdm.ibdm_hl_mutex); 937 head = ibdm.ibdm_hca_list_head; 938 while (head) { 939 if (head->hl_hca_guid == hca_guid) { 940 if (prev == NULL) 941 ibdm.ibdm_hca_list_head = head->hl_next; 942 else 943 prev->hl_next = head->hl_next; 944 if (ibdm.ibdm_hca_list_tail == head) 945 ibdm.ibdm_hca_list_tail = prev; 946 ibdm.ibdm_hca_count--; 947 break; 948 } 949 prev = head; 950 head = head->hl_next; 951 } 952 mutex_exit(&ibdm.ibdm_hl_mutex); 953 if (ibdm_uninit_hca(head) != IBDM_SUCCESS) 954 (void) ibdm_handle_hca_attach(hca_guid); 955 956 /* 957 * Now clean up the HCA lists in the gidlist. 958 */ 959 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo = 960 gidinfo->gl_next) { 961 prev = NULL; 962 head = gidinfo->gl_hca_list; 963 while (head) { 964 if (head->hl_hca_guid == hca_guid) { 965 if (prev == NULL) 966 gidinfo->gl_hca_list = 967 head->hl_next; 968 else 969 prev->hl_next = head->hl_next; 970 971 len = sizeof (ibdm_hca_list_t) + 972 (head->hl_nports * 973 sizeof (ibdm_port_attr_t)); 974 kmem_free(head, len); 975 976 break; 977 } 978 prev = head; 979 head = head->hl_next; 980 } 981 } 982 983 mutex_enter(&ibdm.ibdm_mutex); 984 ibdm.ibdm_busy &= ~IBDM_BUSY; 985 cv_broadcast(&ibdm.ibdm_busy_cv); 986 mutex_exit(&ibdm.ibdm_mutex); 987 } 988 989 990 static int 991 ibdm_uninit_hca(ibdm_hca_list_t *head) 992 { 993 int ii; 994 ibdm_port_attr_t *port_attr; 995 996 for (ii = 0; ii < head->hl_nports; ii++) { 997 port_attr = &head->hl_port_attr[ii]; 998 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) { 999 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x " 1000 "ibdm_fini_port() failed", head, ii); 1001 return (IBDM_FAILURE); 1002 } 1003 } 1004 if (head->hl_hca_hdl) 1005 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) 1006 return (IBDM_FAILURE); 1007 kmem_free(head->hl_port_attr, 1008 head->hl_nports * sizeof (ibdm_port_attr_t)); 1009 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t)); 1010 kmem_free(head, sizeof (ibdm_hca_list_t)); 1011 return (IBDM_SUCCESS); 1012 } 1013 1014 1015 /* 1016 * For each port on the HCA, 1017 * 1) Teardown IBMF receive callback function 1018 * 2) Unregister with IBMF 1019 * 3) Unregister with SA access 1020 */ 1021 static int 1022 ibdm_fini_port(ibdm_port_attr_t *port_attr) 1023 { 1024 int ii, ibmf_status; 1025 1026 for (ii = 0; ii < port_attr->pa_npkeys; ii++) { 1027 if (port_attr->pa_pkey_tbl == NULL) 1028 break; 1029 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl) 1030 continue; 1031 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) { 1032 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1033 "ibdm_port_attr_ibmf_fini failed for " 1034 "port pkey 0x%x", ii); 1035 return (IBDM_FAILURE); 1036 } 1037 } 1038 1039 if (port_attr->pa_ibmf_hdl) { 1040 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1041 IBMF_QP_HANDLE_DEFAULT, 0); 1042 if (ibmf_status != IBMF_SUCCESS) { 1043 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1044 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1045 return (IBDM_FAILURE); 1046 } 1047 1048 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0); 1049 if (ibmf_status != IBMF_SUCCESS) { 1050 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1051 "ibmf_unregister failed %d", ibmf_status); 1052 return (IBDM_FAILURE); 1053 } 1054 1055 port_attr->pa_ibmf_hdl = NULL; 1056 } 1057 1058 if (port_attr->pa_sa_hdl) { 1059 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0); 1060 if (ibmf_status != IBMF_SUCCESS) { 1061 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1062 "ibmf_sa_session_close failed %d", ibmf_status); 1063 return (IBDM_FAILURE); 1064 } 1065 port_attr->pa_sa_hdl = NULL; 1066 } 1067 1068 if (port_attr->pa_pkey_tbl != NULL) { 1069 kmem_free(port_attr->pa_pkey_tbl, 1070 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 1071 port_attr->pa_pkey_tbl = NULL; 1072 port_attr->pa_npkeys = 0; 1073 } 1074 1075 return (IBDM_SUCCESS); 1076 } 1077 1078 1079 /* 1080 * ibdm_port_attr_ibmf_fini: 1081 * With IBMF - Tear down Async callback and free QP Handle 1082 */ 1083 static int 1084 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii) 1085 { 1086 int ibmf_status; 1087 1088 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:"); 1089 1090 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) { 1091 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1092 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1093 if (ibmf_status != IBMF_SUCCESS) { 1094 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1095 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1096 return (IBDM_FAILURE); 1097 } 1098 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl, 1099 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1100 if (ibmf_status != IBMF_SUCCESS) { 1101 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1102 "ibmf_free_qp failed %d", ibmf_status); 1103 return (IBDM_FAILURE); 1104 } 1105 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 1106 } 1107 return (IBDM_SUCCESS); 1108 } 1109 1110 1111 /* 1112 * ibdm_gid_decr_pending: 1113 * decrement gl_pending_cmds. If zero wakeup sleeping threads 1114 */ 1115 static void 1116 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo) 1117 { 1118 mutex_enter(&ibdm.ibdm_mutex); 1119 mutex_enter(&gidinfo->gl_mutex); 1120 if (--gidinfo->gl_pending_cmds == 0) { 1121 /* 1122 * Handle DGID getting removed. 1123 */ 1124 if (gidinfo->gl_disconnected) { 1125 mutex_exit(&gidinfo->gl_mutex); 1126 mutex_exit(&ibdm.ibdm_mutex); 1127 1128 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: " 1129 "gidinfo %p hot removal", gidinfo); 1130 ibdm_delete_gidinfo(gidinfo); 1131 1132 mutex_enter(&ibdm.ibdm_mutex); 1133 ibdm.ibdm_ngid_probes_in_progress--; 1134 ibdm_wait_probe_completion(); 1135 mutex_exit(&ibdm.ibdm_mutex); 1136 return; 1137 } 1138 mutex_exit(&gidinfo->gl_mutex); 1139 mutex_exit(&ibdm.ibdm_mutex); 1140 ibdm_notify_newgid_iocs(gidinfo); 1141 mutex_enter(&ibdm.ibdm_mutex); 1142 mutex_enter(&gidinfo->gl_mutex); 1143 1144 ibdm.ibdm_ngid_probes_in_progress--; 1145 ibdm_wait_probe_completion(); 1146 } 1147 mutex_exit(&gidinfo->gl_mutex); 1148 mutex_exit(&ibdm.ibdm_mutex); 1149 } 1150 1151 1152 /* 1153 * ibdm_wait_probe_completion: 1154 * wait for probing to complete 1155 */ 1156 static void 1157 ibdm_wait_probe_completion(void) 1158 { 1159 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1160 if (ibdm.ibdm_ngid_probes_in_progress) { 1161 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete"); 1162 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS; 1163 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS) 1164 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex); 1165 } 1166 } 1167 1168 1169 /* 1170 * ibdm_wait_cisco_probe_completion: 1171 * wait for the reply from the Cisco FC GW switch after a setclassportinfo 1172 * request is sent. This wait can be achieved on each gid. 1173 */ 1174 static void 1175 ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *gidinfo) 1176 { 1177 ASSERT(MUTEX_HELD(&gidinfo->gl_mutex)); 1178 IBTF_DPRINTF_L4("ibdm", "\twait for cisco probe complete"); 1179 gidinfo->gl_flag |= IBDM_CISCO_PROBE; 1180 while (gidinfo->gl_flag & IBDM_CISCO_PROBE) 1181 cv_wait(&gidinfo->gl_probe_cv, &gidinfo->gl_mutex); 1182 } 1183 1184 1185 /* 1186 * ibdm_wakeup_probe_gid_cv: 1187 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress) 1188 */ 1189 static void 1190 ibdm_wakeup_probe_gid_cv(void) 1191 { 1192 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1193 if (!ibdm.ibdm_ngid_probes_in_progress) { 1194 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup"); 1195 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 1196 cv_broadcast(&ibdm.ibdm_probe_cv); 1197 } 1198 1199 } 1200 1201 1202 /* 1203 * ibdm_sweep_fabric(reprobe_flag) 1204 * Find all possible Managed IOU's and their IOC's that are visible 1205 * to the host. The algorithm used is as follows 1206 * 1207 * Send a "bus walk" request for each port on the host HCA to SA access 1208 * SA returns complete set of GID's that are reachable from 1209 * source port. This is done in parallel. 1210 * 1211 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE 1212 * 1213 * Sort the GID list and eliminate duplicate GID's 1214 * 1) Use DGID for sorting 1215 * 2) use PortGuid for sorting 1216 * Send SA query to retrieve NodeRecord and 1217 * extract PortGuid from that. 1218 * 1219 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont 1220 * support DM MAD's 1221 * Send a "Portinfo" query to get the port capabilities and 1222 * then check for DM MAD's support 1223 * 1224 * Send "ClassPortInfo" request for all the GID's in parallel, 1225 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the 1226 * cv_signal to complete. 1227 * 1228 * When DM agent on the remote GID sends back the response, IBMF 1229 * invokes DM callback routine. 1230 * 1231 * If the response is proper, send "IOUnitInfo" request and set 1232 * GID state to IBDM_GET_IOUNITINFO. 1233 * 1234 * If the response is proper, send "IocProfileInfo" request to 1235 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS. 1236 * 1237 * Send request to get Service entries simultaneously 1238 * 1239 * Signal the waiting thread when received response for all the commands. 1240 * 1241 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error 1242 * response during the probing period. 1243 * 1244 * Note: 1245 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds 1246 * keep track of number commands in progress at any point of time. 1247 * MAD transaction ID is used to identify a particular GID 1248 * TBD: Consider registering the IBMF receive callback on demand 1249 * 1250 * Note: This routine must be called with ibdm.ibdm_mutex held 1251 * TBD: Re probe the failure GID (for certain failures) when requested 1252 * for fabric sweep next time 1253 * 1254 * Parameters : If reprobe_flag is set, All IOCs will be reprobed. 1255 */ 1256 static void 1257 ibdm_sweep_fabric(int reprobe_flag) 1258 { 1259 int ii; 1260 int new_paths = 0; 1261 uint8_t niocs; 1262 taskqid_t tid; 1263 ibdm_ioc_info_t *ioc; 1264 ibdm_hca_list_t *hca_list = NULL; 1265 ibdm_port_attr_t *port = NULL; 1266 ibdm_dp_gidinfo_t *gid_info; 1267 1268 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter"); 1269 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1270 1271 /* 1272 * Check whether a sweep already in progress. If so, just 1273 * wait for the fabric sweep to complete 1274 */ 1275 while (ibdm.ibdm_busy & IBDM_BUSY) 1276 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1277 ibdm.ibdm_busy |= IBDM_BUSY; 1278 mutex_exit(&ibdm.ibdm_mutex); 1279 1280 ibdm_dump_sweep_fabric_timestamp(0); 1281 1282 /* Rescan the GID list for any removed GIDs for reprobe */ 1283 if (reprobe_flag) 1284 ibdm_rescan_gidlist(NULL); 1285 1286 /* 1287 * Get list of all the ports reachable from the local known HCA 1288 * ports which are active 1289 */ 1290 mutex_enter(&ibdm.ibdm_hl_mutex); 1291 for (ibdm_get_next_port(&hca_list, &port, 1); port; 1292 ibdm_get_next_port(&hca_list, &port, 1)) { 1293 /* 1294 * Get PATHS to all the reachable ports from 1295 * SGID and update the global ibdm structure. 1296 */ 1297 new_paths = ibdm_get_reachable_ports(port, hca_list); 1298 ibdm.ibdm_ngids += new_paths; 1299 } 1300 mutex_exit(&ibdm.ibdm_hl_mutex); 1301 1302 mutex_enter(&ibdm.ibdm_mutex); 1303 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids; 1304 mutex_exit(&ibdm.ibdm_mutex); 1305 1306 /* Send a request to probe GIDs asynchronously. */ 1307 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1308 gid_info = gid_info->gl_next) { 1309 mutex_enter(&gid_info->gl_mutex); 1310 gid_info->gl_reprobe_flag = reprobe_flag; 1311 mutex_exit(&gid_info->gl_mutex); 1312 1313 /* process newly encountered GIDs */ 1314 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread, 1315 (void *)gid_info, TQ_NOSLEEP); 1316 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p" 1317 " taskq_id = %x", gid_info, tid); 1318 /* taskq failed to dispatch call it directly */ 1319 if (tid == NULL) 1320 ibdm_probe_gid_thread((void *)gid_info); 1321 } 1322 1323 mutex_enter(&ibdm.ibdm_mutex); 1324 ibdm_wait_probe_completion(); 1325 1326 /* 1327 * Update the properties, if reprobe_flag is set 1328 * Skip if gl_reprobe_flag is set, this will be 1329 * a re-inserted / new GID, for which notifications 1330 * have already been send. 1331 */ 1332 if (reprobe_flag) { 1333 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1334 gid_info = gid_info->gl_next) { 1335 if (gid_info->gl_iou == NULL) 1336 continue; 1337 if (gid_info->gl_reprobe_flag) { 1338 gid_info->gl_reprobe_flag = 0; 1339 continue; 1340 } 1341 1342 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1343 for (ii = 0; ii < niocs; ii++) { 1344 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1345 if (ioc) 1346 ibdm_reprobe_update_port_srv(ioc, 1347 gid_info); 1348 } 1349 } 1350 } else if (ibdm.ibdm_prev_iou) { 1351 ibdm_ioc_info_t *ioc_list; 1352 1353 /* 1354 * Get the list of IOCs which have changed. 1355 * If any IOCs have changed, Notify IBNexus 1356 */ 1357 ibdm.ibdm_prev_iou = 0; 1358 ioc_list = ibdm_handle_prev_iou(); 1359 if (ioc_list) { 1360 if (ibdm.ibdm_ibnex_callback != NULL) { 1361 (*ibdm.ibdm_ibnex_callback)( 1362 (void *)ioc_list, 1363 IBDM_EVENT_IOC_PROP_UPDATE); 1364 } 1365 } 1366 } 1367 1368 ibdm_dump_sweep_fabric_timestamp(1); 1369 1370 ibdm.ibdm_busy &= ~IBDM_BUSY; 1371 cv_broadcast(&ibdm.ibdm_busy_cv); 1372 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT"); 1373 } 1374 1375 1376 /* 1377 * ibdm_is_cisco: 1378 * Check if this is a Cisco device or not. 1379 */ 1380 static boolean_t 1381 ibdm_is_cisco(ib_guid_t guid) 1382 { 1383 if ((guid >> IBDM_OUI_GUID_SHIFT) == IBDM_CISCO_COMPANY_ID) 1384 return (B_TRUE); 1385 return (B_FALSE); 1386 } 1387 1388 1389 /* 1390 * ibdm_is_cisco_switch: 1391 * Check if this switch is a CISCO switch or not. 1392 * Note that if this switch is already activated, ibdm_is_cisco_switch() 1393 * returns B_FALSE not to re-activate it again. 1394 */ 1395 static boolean_t 1396 ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *gid_info) 1397 { 1398 int company_id, device_id; 1399 ASSERT(gid_info != 0); 1400 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 1401 1402 /* 1403 * If this switch is already activated, don't re-activate it. 1404 */ 1405 if (gid_info->gl_flag & IBDM_CISCO_PROBE_DONE) 1406 return (B_FALSE); 1407 1408 /* 1409 * Check if this switch is a Cisco FC GW or not. 1410 * Use the node guid (the OUI part) instead of the vendor id 1411 * since the vendor id is zero in practice. 1412 */ 1413 company_id = gid_info->gl_nodeguid >> IBDM_OUI_GUID_SHIFT; 1414 device_id = gid_info->gl_devid; 1415 1416 if (company_id == IBDM_CISCO_COMPANY_ID && 1417 device_id == IBDM_CISCO_DEVICE_ID) 1418 return (B_TRUE); 1419 return (B_FALSE); 1420 } 1421 1422 1423 /* 1424 * ibdm_probe_gid_thread: 1425 * thread that does the actual work for sweeping the fabric 1426 * for a given GID 1427 */ 1428 static void 1429 ibdm_probe_gid_thread(void *args) 1430 { 1431 int reprobe_flag; 1432 ib_guid_t node_guid; 1433 ib_guid_t port_guid; 1434 ibdm_dp_gidinfo_t *gid_info; 1435 1436 gid_info = (ibdm_dp_gidinfo_t *)args; 1437 reprobe_flag = gid_info->gl_reprobe_flag; 1438 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d", 1439 gid_info, reprobe_flag); 1440 ASSERT(gid_info != NULL); 1441 ASSERT(gid_info->gl_pending_cmds == 0); 1442 1443 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE && 1444 reprobe_flag == 0) { 1445 /* 1446 * This GID may have been already probed. Send 1447 * in a CLP to check if IOUnitInfo changed? 1448 * Explicitly set gl_reprobe_flag to 0 so that 1449 * IBnex is not notified on completion 1450 */ 1451 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) { 1452 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: " 1453 "get new IOCs information"); 1454 mutex_enter(&gid_info->gl_mutex); 1455 gid_info->gl_pending_cmds++; 1456 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1457 gid_info->gl_reprobe_flag = 0; 1458 mutex_exit(&gid_info->gl_mutex); 1459 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) { 1460 mutex_enter(&gid_info->gl_mutex); 1461 --gid_info->gl_pending_cmds; 1462 mutex_exit(&gid_info->gl_mutex); 1463 mutex_enter(&ibdm.ibdm_mutex); 1464 --ibdm.ibdm_ngid_probes_in_progress; 1465 ibdm_wakeup_probe_gid_cv(); 1466 mutex_exit(&ibdm.ibdm_mutex); 1467 } 1468 } else { 1469 mutex_enter(&ibdm.ibdm_mutex); 1470 --ibdm.ibdm_ngid_probes_in_progress; 1471 ibdm_wakeup_probe_gid_cv(); 1472 mutex_exit(&ibdm.ibdm_mutex); 1473 } 1474 return; 1475 } else if (reprobe_flag && gid_info->gl_state == 1476 IBDM_GID_PROBING_COMPLETE) { 1477 /* 1478 * Reprobe all IOCs for the GID which has completed 1479 * probe. Skip other port GIDs to same IOU. 1480 * Explicitly set gl_reprobe_flag to 0 so that 1481 * IBnex is not notified on completion 1482 */ 1483 ibdm_ioc_info_t *ioc_info; 1484 uint8_t niocs, ii; 1485 1486 ASSERT(gid_info->gl_iou); 1487 mutex_enter(&gid_info->gl_mutex); 1488 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1489 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 1490 gid_info->gl_pending_cmds += niocs; 1491 gid_info->gl_reprobe_flag = 0; 1492 mutex_exit(&gid_info->gl_mutex); 1493 for (ii = 0; ii < niocs; ii++) { 1494 uchar_t slot_info; 1495 ib_dm_io_unitinfo_t *giou_info; 1496 1497 /* 1498 * Check whether IOC is present in the slot 1499 * Series of nibbles (in the field 1500 * iou_ctrl_list) represents a slot in the 1501 * IOU. 1502 * Byte format: 76543210 1503 * Bits 0-3 of first byte represent Slot 2 1504 * bits 4-7 of first byte represent slot 1, 1505 * bits 0-3 of second byte represent slot 4 1506 * and so on 1507 * Each 4-bit nibble has the following meaning 1508 * 0x0 : IOC not installed 1509 * 0x1 : IOC is present 1510 * 0xf : Slot does not exist 1511 * and all other values are reserved. 1512 */ 1513 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1514 giou_info = &gid_info->gl_iou->iou_info; 1515 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 1516 if ((ii % 2) == 0) 1517 slot_info = (slot_info >> 4); 1518 1519 if ((slot_info & 0xf) != 1) { 1520 ioc_info->ioc_state = 1521 IBDM_IOC_STATE_PROBE_FAILED; 1522 ibdm_gid_decr_pending(gid_info); 1523 continue; 1524 } 1525 1526 if (ibdm_send_ioc_profile(gid_info, ii) != 1527 IBDM_SUCCESS) { 1528 ibdm_gid_decr_pending(gid_info); 1529 } 1530 } 1531 1532 return; 1533 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 1534 mutex_enter(&ibdm.ibdm_mutex); 1535 --ibdm.ibdm_ngid_probes_in_progress; 1536 ibdm_wakeup_probe_gid_cv(); 1537 mutex_exit(&ibdm.ibdm_mutex); 1538 return; 1539 } 1540 1541 /* 1542 * Check whether the destination GID supports DM agents. If 1543 * not, stop probing the GID and continue with the next GID 1544 * in the list. 1545 */ 1546 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) { 1547 mutex_enter(&gid_info->gl_mutex); 1548 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1549 mutex_exit(&gid_info->gl_mutex); 1550 ibdm_delete_glhca_list(gid_info); 1551 mutex_enter(&ibdm.ibdm_mutex); 1552 --ibdm.ibdm_ngid_probes_in_progress; 1553 ibdm_wakeup_probe_gid_cv(); 1554 mutex_exit(&ibdm.ibdm_mutex); 1555 return; 1556 } 1557 1558 /* Get the nodeguid and portguid of the port */ 1559 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid, 1560 &node_guid, &port_guid) != IBDM_SUCCESS) { 1561 mutex_enter(&gid_info->gl_mutex); 1562 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1563 mutex_exit(&gid_info->gl_mutex); 1564 ibdm_delete_glhca_list(gid_info); 1565 mutex_enter(&ibdm.ibdm_mutex); 1566 --ibdm.ibdm_ngid_probes_in_progress; 1567 ibdm_wakeup_probe_gid_cv(); 1568 mutex_exit(&ibdm.ibdm_mutex); 1569 return; 1570 } 1571 1572 /* 1573 * Check whether we already knew about this NodeGuid 1574 * If so, do not probe the GID and continue with the 1575 * next GID in the gid list. Set the GID state to 1576 * probing done. 1577 */ 1578 mutex_enter(&ibdm.ibdm_mutex); 1579 gid_info->gl_nodeguid = node_guid; 1580 gid_info->gl_portguid = port_guid; 1581 if (ibdm_check_dest_nodeguid(gid_info) != NULL) { 1582 mutex_exit(&ibdm.ibdm_mutex); 1583 mutex_enter(&gid_info->gl_mutex); 1584 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 1585 mutex_exit(&gid_info->gl_mutex); 1586 ibdm_delete_glhca_list(gid_info); 1587 mutex_enter(&ibdm.ibdm_mutex); 1588 --ibdm.ibdm_ngid_probes_in_progress; 1589 ibdm_wakeup_probe_gid_cv(); 1590 mutex_exit(&ibdm.ibdm_mutex); 1591 return; 1592 } 1593 ibdm_add_to_gl_gid(gid_info, gid_info); 1594 mutex_exit(&ibdm.ibdm_mutex); 1595 1596 /* 1597 * New or reinserted GID : Enable notification to IBnex 1598 */ 1599 mutex_enter(&gid_info->gl_mutex); 1600 gid_info->gl_reprobe_flag = 1; 1601 1602 /* 1603 * A Cisco FC GW needs the special handling to get IOUnitInfo. 1604 */ 1605 if (ibdm_is_cisco_switch(gid_info)) { 1606 gid_info->gl_pending_cmds++; 1607 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 1608 mutex_exit(&gid_info->gl_mutex); 1609 1610 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 1611 mutex_enter(&gid_info->gl_mutex); 1612 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1613 --gid_info->gl_pending_cmds; 1614 mutex_exit(&gid_info->gl_mutex); 1615 1616 /* free the hca_list on this gid_info */ 1617 ibdm_delete_glhca_list(gid_info); 1618 1619 mutex_enter(&ibdm.ibdm_mutex); 1620 --ibdm.ibdm_ngid_probes_in_progress; 1621 ibdm_wakeup_probe_gid_cv(); 1622 mutex_exit(&ibdm.ibdm_mutex); 1623 1624 return; 1625 } 1626 1627 mutex_enter(&gid_info->gl_mutex); 1628 ibdm_wait_cisco_probe_completion(gid_info); 1629 1630 IBTF_DPRINTF_L4("ibdm", "\tibdm_probe_gid_thread: " 1631 "CISCO Wakeup signal received"); 1632 } 1633 1634 /* move on to the 'GET_CLASSPORTINFO' stage */ 1635 gid_info->gl_pending_cmds++; 1636 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 1637 mutex_exit(&gid_info->gl_mutex); 1638 1639 IBTF_DPRINTF_L3(ibdm_string, "\tibdm_probe_gid_thread: " 1640 "%d: gid_info %p gl_state %d pending_cmds %d", 1641 __LINE__, gid_info, gid_info->gl_state, 1642 gid_info->gl_pending_cmds); 1643 1644 /* 1645 * Send ClassPortInfo request to the GID asynchronously. 1646 */ 1647 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 1648 1649 mutex_enter(&gid_info->gl_mutex); 1650 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1651 --gid_info->gl_pending_cmds; 1652 mutex_exit(&gid_info->gl_mutex); 1653 1654 /* free the hca_list on this gid_info */ 1655 ibdm_delete_glhca_list(gid_info); 1656 1657 mutex_enter(&ibdm.ibdm_mutex); 1658 --ibdm.ibdm_ngid_probes_in_progress; 1659 ibdm_wakeup_probe_gid_cv(); 1660 mutex_exit(&ibdm.ibdm_mutex); 1661 1662 return; 1663 } 1664 } 1665 1666 1667 /* 1668 * ibdm_check_dest_nodeguid 1669 * Searches for the NodeGuid in the GID list 1670 * Returns matching gid_info if found and otherwise NULL 1671 * 1672 * This function is called to handle new GIDs discovered 1673 * during device sweep / probe or for GID_AVAILABLE event. 1674 * 1675 * Parameter : 1676 * gid_info GID to check 1677 */ 1678 static ibdm_dp_gidinfo_t * 1679 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info) 1680 { 1681 ibdm_dp_gidinfo_t *gid_list; 1682 ibdm_gid_t *tmp; 1683 1684 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid"); 1685 1686 gid_list = ibdm.ibdm_dp_gidlist_head; 1687 while (gid_list) { 1688 if ((gid_list != gid_info) && 1689 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) { 1690 IBTF_DPRINTF_L4("ibdm", 1691 "\tcheck_dest_nodeguid: NodeGuid is present"); 1692 1693 /* Add to gid_list */ 1694 tmp = kmem_zalloc(sizeof (ibdm_gid_t), 1695 KM_SLEEP); 1696 tmp->gid_dgid_hi = gid_info->gl_dgid_hi; 1697 tmp->gid_dgid_lo = gid_info->gl_dgid_lo; 1698 tmp->gid_next = gid_list->gl_gid; 1699 gid_list->gl_gid = tmp; 1700 gid_list->gl_ngids++; 1701 return (gid_list); 1702 } 1703 1704 gid_list = gid_list->gl_next; 1705 } 1706 1707 return (NULL); 1708 } 1709 1710 1711 /* 1712 * ibdm_is_dev_mgt_supported 1713 * Get the PortInfo attribute (SA Query) 1714 * Check "CompatabilityMask" field in the Portinfo. 1715 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set) 1716 * by the port, otherwise IBDM_FAILURE 1717 */ 1718 static int 1719 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info) 1720 { 1721 int ret; 1722 size_t length = 0; 1723 sa_portinfo_record_t req, *resp = NULL; 1724 ibmf_saa_access_args_t qargs; 1725 1726 bzero(&req, sizeof (sa_portinfo_record_t)); 1727 req.EndportLID = gid_info->gl_dlid; 1728 1729 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID; 1730 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1731 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 1732 qargs.sq_template = &req; 1733 qargs.sq_callback = NULL; 1734 qargs.sq_callback_arg = NULL; 1735 1736 ret = ibmf_sa_access(gid_info->gl_sa_hdl, 1737 &qargs, 0, &length, (void **)&resp); 1738 1739 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1740 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:" 1741 "failed to get PORTINFO attribute %d", ret); 1742 return (IBDM_FAILURE); 1743 } 1744 1745 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) { 1746 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!"); 1747 ret = IBDM_SUCCESS; 1748 } else { 1749 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: " 1750 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask); 1751 ret = IBDM_FAILURE; 1752 } 1753 kmem_free(resp, length); 1754 return (ret); 1755 } 1756 1757 1758 /* 1759 * ibdm_get_node_port_guids() 1760 * Get the NodeInfoRecord of the port 1761 * Save NodeGuid and PortGUID values in the GID list structure. 1762 * Return IBDM_SUCCESS/IBDM_FAILURE 1763 */ 1764 static int 1765 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid, 1766 ib_guid_t *node_guid, ib_guid_t *port_guid) 1767 { 1768 int ret; 1769 size_t length = 0; 1770 sa_node_record_t req, *resp = NULL; 1771 ibmf_saa_access_args_t qargs; 1772 1773 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids"); 1774 1775 bzero(&req, sizeof (sa_node_record_t)); 1776 req.LID = dlid; 1777 1778 qargs.sq_attr_id = SA_NODERECORD_ATTRID; 1779 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1780 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID; 1781 qargs.sq_template = &req; 1782 qargs.sq_callback = NULL; 1783 qargs.sq_callback_arg = NULL; 1784 1785 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp); 1786 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1787 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:" 1788 " SA Retrieve Failed: %d", ret); 1789 return (IBDM_FAILURE); 1790 } 1791 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port" 1792 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID); 1793 1794 *node_guid = resp->NodeInfo.NodeGUID; 1795 *port_guid = resp->NodeInfo.PortGUID; 1796 kmem_free(resp, length); 1797 return (IBDM_SUCCESS); 1798 } 1799 1800 1801 /* 1802 * ibdm_get_reachable_ports() 1803 * Get list of the destination GID (and its path records) by 1804 * querying the SA access. 1805 * 1806 * Returns Number paths 1807 */ 1808 static int 1809 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca) 1810 { 1811 uint_t ii, jj, nrecs; 1812 uint_t npaths = 0; 1813 size_t length; 1814 ib_gid_t sgid; 1815 ibdm_pkey_tbl_t *pkey_tbl; 1816 sa_path_record_t *result; 1817 sa_path_record_t *precp; 1818 ibdm_dp_gidinfo_t *gid_info; 1819 1820 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 1821 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo); 1822 1823 sgid.gid_prefix = portinfo->pa_sn_prefix; 1824 sgid.gid_guid = portinfo->pa_port_guid; 1825 1826 /* get reversible paths */ 1827 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl, 1828 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result) 1829 != IBMF_SUCCESS) { 1830 IBTF_DPRINTF_L2("ibdm", 1831 "\tget_reachable_ports: Getting path records failed"); 1832 return (0); 1833 } 1834 1835 for (ii = 0; ii < nrecs; ii++) { 1836 sa_node_record_t *nrec; 1837 size_t length; 1838 1839 precp = &result[ii]; 1840 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid, 1841 precp->DGID.gid_prefix)) != NULL) { 1842 IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: " 1843 "Already exists nrecs %d, ii %d", nrecs, ii); 1844 ibdm_addto_glhcalist(gid_info, hca); 1845 continue; 1846 } 1847 /* 1848 * This is a new GID. Allocate a GID structure and 1849 * initialize the structure 1850 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0) 1851 * by kmem_zalloc call 1852 */ 1853 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 1854 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 1855 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 1856 gid_info->gl_dgid_hi = precp->DGID.gid_prefix; 1857 gid_info->gl_dgid_lo = precp->DGID.gid_guid; 1858 gid_info->gl_sgid_hi = precp->SGID.gid_prefix; 1859 gid_info->gl_sgid_lo = precp->SGID.gid_guid; 1860 gid_info->gl_p_key = precp->P_Key; 1861 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl; 1862 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl; 1863 gid_info->gl_slid = precp->SLID; 1864 gid_info->gl_dlid = precp->DLID; 1865 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 1866 << IBDM_GID_TRANSACTIONID_SHIFT; 1867 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 1868 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 1869 << IBDM_GID_TRANSACTIONID_SHIFT; 1870 gid_info->gl_SL = precp->SL; 1871 1872 /* 1873 * get the node record with this guid if the destination 1874 * device is a Cisco one. 1875 */ 1876 if (ibdm_is_cisco(precp->DGID.gid_guid) && 1877 (gid_info->gl_nodeguid == 0 || gid_info->gl_devid == 0) && 1878 ibdm_get_node_record_by_port(portinfo->pa_sa_hdl, 1879 precp->DGID.gid_guid, &nrec, &length) == IBDM_SUCCESS) { 1880 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 1881 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 1882 kmem_free(nrec, length); 1883 } 1884 1885 ibdm_addto_glhcalist(gid_info, hca); 1886 1887 ibdm_dump_path_info(precp); 1888 1889 gid_info->gl_qp_hdl = NULL; 1890 ASSERT(portinfo->pa_pkey_tbl != NULL && 1891 portinfo->pa_npkeys != 0); 1892 1893 for (jj = 0; jj < portinfo->pa_npkeys; jj++) { 1894 pkey_tbl = &portinfo->pa_pkey_tbl[jj]; 1895 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 1896 (pkey_tbl->pt_qp_hdl != NULL)) { 1897 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 1898 break; 1899 } 1900 } 1901 1902 /* 1903 * QP handle for GID not initialized. No matching Pkey 1904 * was found!! ibdm should *not* hit this case. Flag an 1905 * error and drop the GID if ibdm does encounter this. 1906 */ 1907 if (gid_info->gl_qp_hdl == NULL) { 1908 IBTF_DPRINTF_L2(ibdm_string, 1909 "\tget_reachable_ports: No matching Pkey"); 1910 ibdm_delete_gidinfo(gid_info); 1911 continue; 1912 } 1913 if (ibdm.ibdm_dp_gidlist_head == NULL) { 1914 ibdm.ibdm_dp_gidlist_head = gid_info; 1915 ibdm.ibdm_dp_gidlist_tail = gid_info; 1916 } else { 1917 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 1918 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 1919 ibdm.ibdm_dp_gidlist_tail = gid_info; 1920 } 1921 npaths++; 1922 } 1923 kmem_free(result, length); 1924 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths); 1925 return (npaths); 1926 } 1927 1928 1929 /* 1930 * ibdm_check_dgid() 1931 * Look in the global list to check whether we know this DGID already 1932 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT 1933 */ 1934 static ibdm_dp_gidinfo_t * 1935 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix) 1936 { 1937 ibdm_dp_gidinfo_t *gid_list; 1938 1939 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1940 gid_list = gid_list->gl_next) { 1941 if ((guid == gid_list->gl_dgid_lo) && 1942 (prefix == gid_list->gl_dgid_hi)) { 1943 break; 1944 } 1945 } 1946 return (gid_list); 1947 } 1948 1949 1950 /* 1951 * ibdm_find_gid() 1952 * Look in the global list to find a GID entry with matching 1953 * port & node GUID. 1954 * Return pointer to gidinfo if found, else return NULL 1955 */ 1956 static ibdm_dp_gidinfo_t * 1957 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid) 1958 { 1959 ibdm_dp_gidinfo_t *gid_list; 1960 1961 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n", 1962 nodeguid, portguid); 1963 1964 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1965 gid_list = gid_list->gl_next) { 1966 if ((portguid == gid_list->gl_portguid) && 1967 (nodeguid == gid_list->gl_nodeguid)) { 1968 break; 1969 } 1970 } 1971 1972 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n", 1973 gid_list); 1974 return (gid_list); 1975 } 1976 1977 1978 /* 1979 * ibdm_set_classportinfo() 1980 * ibdm_set_classportinfo() is a function to activate a Cisco FC GW 1981 * by sending the setClassPortInfo request with the trapLID, trapGID 1982 * and etc. to the gateway since the gateway doesn't provide the IO 1983 * Unit Information othewise. This behavior is the Cisco specific one, 1984 * and this function is called to a Cisco FC GW only. 1985 * Returns IBDM_SUCCESS/IBDM_FAILURE 1986 */ 1987 static int 1988 ibdm_set_classportinfo(ibdm_dp_gidinfo_t *gid_info) 1989 { 1990 ibmf_msg_t *msg; 1991 ib_mad_hdr_t *hdr; 1992 ibdm_timeout_cb_args_t *cb_args; 1993 void *data; 1994 ib_mad_classportinfo_t *cpi; 1995 1996 IBTF_DPRINTF_L4("ibdm", 1997 "\tset_classportinfo: gid info 0x%p", gid_info); 1998 1999 /* 2000 * Send command to set classportinfo attribute. Allocate a IBMF 2001 * packet and initialize the packet. 2002 */ 2003 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2004 &msg) != IBMF_SUCCESS) { 2005 IBTF_DPRINTF_L4("ibdm", "\tset_classportinfo: pkt alloc fail"); 2006 return (IBDM_FAILURE); 2007 } 2008 2009 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2010 ibdm_alloc_send_buffers(msg); 2011 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2012 2013 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2014 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2015 msg->im_local_addr.ia_remote_qno = 1; 2016 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2017 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2018 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2019 2020 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2021 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2022 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2023 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2024 hdr->R_Method = IB_DM_DEVMGT_METHOD_SET; 2025 hdr->Status = 0; 2026 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2027 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2028 hdr->AttributeModifier = 0; 2029 2030 data = msg->im_msgbufs_send.im_bufs_cl_data; 2031 cpi = (ib_mad_classportinfo_t *)data; 2032 2033 /* 2034 * Set the classportinfo values to activate this Cisco FC GW. 2035 */ 2036 cpi->TrapGID_hi = h2b64(gid_info->gl_sgid_hi); 2037 cpi->TrapGID_lo = h2b64(gid_info->gl_sgid_lo); 2038 cpi->TrapLID = h2b16(gid_info->gl_slid); 2039 cpi->TrapSL = gid_info->gl_SL; 2040 cpi->TrapP_Key = h2b16(gid_info->gl_p_key); 2041 cpi->TrapQP = h2b32((((ibmf_alt_qp_t *)gid_info->gl_qp_hdl)->isq_qpn)); 2042 cpi->TrapQ_Key = h2b32((((ibmf_alt_qp_t *) 2043 gid_info->gl_qp_hdl)->isq_qkey)); 2044 2045 cb_args = &gid_info->gl_cpi_cb_args; 2046 cb_args->cb_gid_info = gid_info; 2047 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2048 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2049 2050 mutex_enter(&gid_info->gl_mutex); 2051 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2052 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2053 mutex_exit(&gid_info->gl_mutex); 2054 2055 IBTF_DPRINTF_L5("ibdm", "\tset_classportinfo: " 2056 "timeout id %x", gid_info->gl_timeout_id); 2057 2058 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2059 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2060 IBTF_DPRINTF_L2("ibdm", 2061 "\tset_classportinfo: ibmf send failed"); 2062 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2063 } 2064 2065 return (IBDM_SUCCESS); 2066 } 2067 2068 2069 /* 2070 * ibdm_send_classportinfo() 2071 * Send classportinfo request. When the request is completed 2072 * IBMF calls ibdm_classportinfo_cb routine to inform about 2073 * the completion. 2074 * Returns IBDM_SUCCESS/IBDM_FAILURE 2075 */ 2076 static int 2077 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2078 { 2079 ibmf_msg_t *msg; 2080 ib_mad_hdr_t *hdr; 2081 ibdm_timeout_cb_args_t *cb_args; 2082 2083 IBTF_DPRINTF_L4("ibdm", 2084 "\tsend_classportinfo: gid info 0x%p", gid_info); 2085 2086 /* 2087 * Send command to get classportinfo attribute. Allocate a IBMF 2088 * packet and initialize the packet. 2089 */ 2090 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2091 &msg) != IBMF_SUCCESS) { 2092 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail"); 2093 return (IBDM_FAILURE); 2094 } 2095 2096 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2097 ibdm_alloc_send_buffers(msg); 2098 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2099 2100 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2101 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2102 msg->im_local_addr.ia_remote_qno = 1; 2103 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2104 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2105 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2106 2107 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2108 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2109 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2110 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2111 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2112 hdr->Status = 0; 2113 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2114 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2115 hdr->AttributeModifier = 0; 2116 2117 cb_args = &gid_info->gl_cpi_cb_args; 2118 cb_args->cb_gid_info = gid_info; 2119 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2120 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2121 2122 mutex_enter(&gid_info->gl_mutex); 2123 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2124 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2125 mutex_exit(&gid_info->gl_mutex); 2126 2127 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: " 2128 "timeout id %x", gid_info->gl_timeout_id); 2129 2130 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2131 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2132 IBTF_DPRINTF_L2("ibdm", 2133 "\tsend_classportinfo: ibmf send failed"); 2134 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2135 } 2136 2137 return (IBDM_SUCCESS); 2138 } 2139 2140 2141 /* 2142 * ibdm_handle_setclassportinfo() 2143 * Invoked by the IBMF when setClassPortInfo request is completed. 2144 */ 2145 static void 2146 ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl, 2147 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2148 { 2149 void *data; 2150 timeout_id_t timeout_id; 2151 ib_mad_classportinfo_t *cpi; 2152 2153 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo:ibmf hdl " 2154 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2155 2156 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2157 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo: " 2158 "Not a ClassPortInfo resp"); 2159 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2160 return; 2161 } 2162 2163 /* 2164 * Verify whether timeout handler is created/active. 2165 * If created/ active, cancel the timeout handler 2166 */ 2167 mutex_enter(&gid_info->gl_mutex); 2168 if (gid_info->gl_state != IBDM_SET_CLASSPORTINFO) { 2169 IBTF_DPRINTF_L2("ibdm", "\thandle_setclassportinfo:DUP resp"); 2170 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2171 mutex_exit(&gid_info->gl_mutex); 2172 return; 2173 } 2174 ibdm_bump_transactionID(gid_info); 2175 2176 gid_info->gl_iou_cb_args.cb_req_type = 0; 2177 if (gid_info->gl_timeout_id) { 2178 timeout_id = gid_info->gl_timeout_id; 2179 mutex_exit(&gid_info->gl_mutex); 2180 IBTF_DPRINTF_L5("ibdm", "handle_setlassportinfo: " 2181 "gl_timeout_id = 0x%x", timeout_id); 2182 if (untimeout(timeout_id) == -1) { 2183 IBTF_DPRINTF_L2("ibdm", "handle_setclassportinfo: " 2184 "untimeout gl_timeout_id failed"); 2185 } 2186 mutex_enter(&gid_info->gl_mutex); 2187 gid_info->gl_timeout_id = 0; 2188 } 2189 mutex_exit(&gid_info->gl_mutex); 2190 2191 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2192 cpi = (ib_mad_classportinfo_t *)data; 2193 2194 ibdm_dump_classportinfo(cpi); 2195 } 2196 2197 2198 /* 2199 * ibdm_handle_classportinfo() 2200 * Invoked by the IBMF when the classportinfo request is completed. 2201 */ 2202 static void 2203 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl, 2204 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2205 { 2206 void *data; 2207 timeout_id_t timeout_id; 2208 ib_mad_hdr_t *hdr; 2209 ib_mad_classportinfo_t *cpi; 2210 2211 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl " 2212 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2213 2214 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2215 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: " 2216 "Not a ClassPortInfo resp"); 2217 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2218 return; 2219 } 2220 2221 /* 2222 * Verify whether timeout handler is created/active. 2223 * If created/ active, cancel the timeout handler 2224 */ 2225 mutex_enter(&gid_info->gl_mutex); 2226 ibdm_bump_transactionID(gid_info); 2227 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) { 2228 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp"); 2229 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2230 mutex_exit(&gid_info->gl_mutex); 2231 return; 2232 } 2233 gid_info->gl_iou_cb_args.cb_req_type = 0; 2234 if (gid_info->gl_timeout_id) { 2235 timeout_id = gid_info->gl_timeout_id; 2236 mutex_exit(&gid_info->gl_mutex); 2237 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: " 2238 "gl_timeout_id = 0x%x", timeout_id); 2239 if (untimeout(timeout_id) == -1) { 2240 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: " 2241 "untimeout gl_timeout_id failed"); 2242 } 2243 mutex_enter(&gid_info->gl_mutex); 2244 gid_info->gl_timeout_id = 0; 2245 } 2246 gid_info->gl_state = IBDM_GET_IOUNITINFO; 2247 gid_info->gl_pending_cmds++; 2248 mutex_exit(&gid_info->gl_mutex); 2249 2250 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2251 cpi = (ib_mad_classportinfo_t *)data; 2252 2253 /* 2254 * Cache the "RespTimeValue" and redirection information in the 2255 * global gid list data structure. This cached information will 2256 * be used to send any further requests to the GID. 2257 */ 2258 gid_info->gl_resp_timeout = 2259 (b2h32(cpi->RespTimeValue) & 0x1F); 2260 2261 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) & 2262 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE); 2263 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 2264 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 2265 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 2266 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 2267 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 2268 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 2269 gid_info->gl_redirectSL = cpi->RedirectSL; 2270 2271 ibdm_dump_classportinfo(cpi); 2272 2273 /* 2274 * Send IOUnitInfo request 2275 * Reuse previously allocated IBMF packet for sending ClassPortInfo 2276 * Check whether DM agent on the remote node requested redirection 2277 * If so, send the request to the redirect DGID/DLID/PKEY/QP. 2278 */ 2279 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2280 ibdm_alloc_send_buffers(msg); 2281 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2282 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2283 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2284 2285 if (gid_info->gl_redirected == B_TRUE) { 2286 if (gid_info->gl_redirect_dlid != 0) { 2287 msg->im_local_addr.ia_remote_lid = 2288 gid_info->gl_redirect_dlid; 2289 } 2290 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2291 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2292 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2293 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 2294 } else { 2295 msg->im_local_addr.ia_remote_qno = 1; 2296 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2297 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2298 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2299 } 2300 2301 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2302 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2303 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2304 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2305 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2306 hdr->Status = 0; 2307 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2308 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2309 hdr->AttributeModifier = 0; 2310 2311 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2312 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2313 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2314 2315 mutex_enter(&gid_info->gl_mutex); 2316 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2317 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2318 mutex_exit(&gid_info->gl_mutex); 2319 2320 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:" 2321 "timeout %x", gid_info->gl_timeout_id); 2322 2323 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL, 2324 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) { 2325 IBTF_DPRINTF_L2("ibdm", 2326 "\thandle_classportinfo: msg transport failed"); 2327 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args); 2328 } 2329 (*flag) |= IBDM_IBMF_PKT_REUSED; 2330 } 2331 2332 2333 /* 2334 * ibdm_send_iounitinfo: 2335 * Sends a DM request to get IOU unitinfo. 2336 */ 2337 static int 2338 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info) 2339 { 2340 ibmf_msg_t *msg; 2341 ib_mad_hdr_t *hdr; 2342 2343 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info); 2344 2345 /* 2346 * Send command to get iounitinfo attribute. Allocate a IBMF 2347 * packet and initialize the packet. 2348 */ 2349 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) != 2350 IBMF_SUCCESS) { 2351 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail"); 2352 return (IBDM_FAILURE); 2353 } 2354 2355 mutex_enter(&gid_info->gl_mutex); 2356 ibdm_bump_transactionID(gid_info); 2357 mutex_exit(&gid_info->gl_mutex); 2358 2359 2360 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2361 ibdm_alloc_send_buffers(msg); 2362 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2363 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2364 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2365 msg->im_local_addr.ia_remote_qno = 1; 2366 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2367 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2368 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2369 2370 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2371 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2372 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2373 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2374 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2375 hdr->Status = 0; 2376 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2377 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2378 hdr->AttributeModifier = 0; 2379 2380 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2381 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2382 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2383 2384 mutex_enter(&gid_info->gl_mutex); 2385 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2386 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2387 mutex_exit(&gid_info->gl_mutex); 2388 2389 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:" 2390 "timeout %x", gid_info->gl_timeout_id); 2391 2392 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 2393 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != 2394 IBMF_SUCCESS) { 2395 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed"); 2396 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, 2397 msg, &gid_info->gl_iou_cb_args); 2398 } 2399 return (IBDM_SUCCESS); 2400 } 2401 2402 /* 2403 * ibdm_handle_iounitinfo() 2404 * Invoked by the IBMF when IO Unitinfo request is completed. 2405 */ 2406 static void 2407 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl, 2408 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2409 { 2410 int ii, first = B_TRUE; 2411 int num_iocs; 2412 size_t size; 2413 uchar_t slot_info; 2414 timeout_id_t timeout_id; 2415 ib_mad_hdr_t *hdr; 2416 ibdm_ioc_info_t *ioc_info; 2417 ib_dm_io_unitinfo_t *iou_info; 2418 ib_dm_io_unitinfo_t *giou_info; 2419 ibdm_timeout_cb_args_t *cb_args; 2420 2421 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:" 2422 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info); 2423 2424 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) { 2425 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: " 2426 "Unexpected response"); 2427 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2428 return; 2429 } 2430 2431 mutex_enter(&gid_info->gl_mutex); 2432 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) { 2433 IBTF_DPRINTF_L4("ibdm", 2434 "\thandle_iounitinfo: DUP resp"); 2435 mutex_exit(&gid_info->gl_mutex); 2436 (*flag) = IBDM_IBMF_PKT_DUP_RESP; 2437 return; 2438 } 2439 gid_info->gl_iou_cb_args.cb_req_type = 0; 2440 if (gid_info->gl_timeout_id) { 2441 timeout_id = gid_info->gl_timeout_id; 2442 mutex_exit(&gid_info->gl_mutex); 2443 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: " 2444 "gl_timeout_id = 0x%x", timeout_id); 2445 if (untimeout(timeout_id) == -1) { 2446 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: " 2447 "untimeout gl_timeout_id failed"); 2448 } 2449 mutex_enter(&gid_info->gl_mutex); 2450 gid_info->gl_timeout_id = 0; 2451 } 2452 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 2453 2454 iou_info = IBDM_IN_IBMFMSG2IOU(msg); 2455 ibdm_dump_iounitinfo(iou_info); 2456 num_iocs = iou_info->iou_num_ctrl_slots; 2457 /* 2458 * check if number of IOCs reported is zero? if yes, return. 2459 * when num_iocs are reported zero internal IOC database needs 2460 * to be updated. To ensure that save the number of IOCs in 2461 * the new field "gl_num_iocs". Use a new field instead of 2462 * "giou_info->iou_num_ctrl_slots" as that would prevent 2463 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0. 2464 */ 2465 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) { 2466 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's"); 2467 mutex_exit(&gid_info->gl_mutex); 2468 return; 2469 } 2470 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs); 2471 2472 /* 2473 * if there is an existing gl_iou (IOU has been probed before) 2474 * check if the "iou_changeid" is same as saved entry in 2475 * "giou_info->iou_changeid". 2476 * (note: this logic can prevent IOC enumeration if a given 2477 * vendor doesn't support setting iou_changeid field for its IOU) 2478 * 2479 * if there is an existing gl_iou and iou_changeid has changed : 2480 * free up existing gl_iou info and its related structures. 2481 * reallocate gl_iou info all over again. 2482 * if we donot free this up; then this leads to memory leaks 2483 */ 2484 if (gid_info->gl_iou) { 2485 giou_info = &gid_info->gl_iou->iou_info; 2486 if (b2h16(iou_info->iou_changeid) == 2487 giou_info->iou_changeid) { 2488 IBTF_DPRINTF_L3("ibdm", 2489 "\thandle_iounitinfo: no IOCs changed"); 2490 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2491 mutex_exit(&gid_info->gl_mutex); 2492 return; 2493 } 2494 2495 /* 2496 * Store the iou info as prev_iou to be used after 2497 * sweep is done. 2498 */ 2499 ASSERT(gid_info->gl_prev_iou == NULL); 2500 IBTF_DPRINTF_L4(ibdm_string, 2501 "\thandle_iounitinfo: setting gl_prev_iou %p", 2502 gid_info->gl_prev_iou); 2503 gid_info->gl_prev_iou = gid_info->gl_iou; 2504 ibdm.ibdm_prev_iou = 1; 2505 gid_info->gl_iou = NULL; 2506 } 2507 2508 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t); 2509 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP); 2510 giou_info = &gid_info->gl_iou->iou_info; 2511 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *) 2512 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t)); 2513 2514 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs; 2515 giou_info->iou_flag = iou_info->iou_flag; 2516 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128); 2517 giou_info->iou_changeid = b2h16(iou_info->iou_changeid); 2518 gid_info->gl_pending_cmds++; /* for diag code */ 2519 mutex_exit(&gid_info->gl_mutex); 2520 2521 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) { 2522 mutex_enter(&gid_info->gl_mutex); 2523 gid_info->gl_pending_cmds--; 2524 mutex_exit(&gid_info->gl_mutex); 2525 } 2526 /* 2527 * Parallelize getting IOC controller profiles from here. 2528 * Allocate IBMF packets and send commands to get IOC profile for 2529 * each IOC present on the IOU. 2530 */ 2531 for (ii = 0; ii < num_iocs; ii++) { 2532 /* 2533 * Check whether IOC is present in the slot 2534 * Series of nibbles (in the field iou_ctrl_list) represents 2535 * a slot in the IOU. 2536 * Byte format: 76543210 2537 * Bits 0-3 of first byte represent Slot 2 2538 * bits 4-7 of first byte represent slot 1, 2539 * bits 0-3 of second byte represent slot 4 and so on 2540 * Each 4-bit nibble has the following meaning 2541 * 0x0 : IOC not installed 2542 * 0x1 : IOC is present 2543 * 0xf : Slot does not exist 2544 * and all other values are reserved. 2545 */ 2546 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 2547 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 2548 if ((ii % 2) == 0) 2549 slot_info = (slot_info >> 4); 2550 2551 if ((slot_info & 0xf) != 1) { 2552 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2553 "No IOC is present in the slot = %d", ii); 2554 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 2555 continue; 2556 } 2557 2558 mutex_enter(&gid_info->gl_mutex); 2559 ibdm_bump_transactionID(gid_info); 2560 mutex_exit(&gid_info->gl_mutex); 2561 2562 /* 2563 * Re use the already allocated packet (for IOUnitinfo) to 2564 * send the first IOC controller attribute. Allocate new 2565 * IBMF packets for the rest of the IOC's 2566 */ 2567 if (first != B_TRUE) { 2568 msg = NULL; 2569 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2570 &msg) != IBMF_SUCCESS) { 2571 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2572 "IBMF packet allocation failed"); 2573 continue; 2574 } 2575 2576 } 2577 2578 /* allocate send buffers for all messages */ 2579 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2580 ibdm_alloc_send_buffers(msg); 2581 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2582 2583 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2584 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2585 if (gid_info->gl_redirected == B_TRUE) { 2586 if (gid_info->gl_redirect_dlid != 0) { 2587 msg->im_local_addr.ia_remote_lid = 2588 gid_info->gl_redirect_dlid; 2589 } 2590 msg->im_local_addr.ia_remote_qno = 2591 gid_info->gl_redirect_QP; 2592 msg->im_local_addr.ia_p_key = 2593 gid_info->gl_redirect_pkey; 2594 msg->im_local_addr.ia_q_key = 2595 gid_info->gl_redirect_qkey; 2596 msg->im_local_addr.ia_service_level = 2597 gid_info->gl_redirectSL; 2598 } else { 2599 msg->im_local_addr.ia_remote_qno = 1; 2600 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2601 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2602 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2603 } 2604 2605 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2606 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2607 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2608 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2609 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2610 hdr->Status = 0; 2611 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2612 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 2613 hdr->AttributeModifier = h2b32(ii + 1); 2614 2615 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID; 2616 cb_args = &ioc_info->ioc_cb_args; 2617 cb_args->cb_gid_info = gid_info; 2618 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2619 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 2620 cb_args->cb_ioc_num = ii; 2621 2622 mutex_enter(&gid_info->gl_mutex); 2623 gid_info->gl_pending_cmds++; /* for diag code */ 2624 2625 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2626 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2627 mutex_exit(&gid_info->gl_mutex); 2628 2629 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:" 2630 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii); 2631 2632 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2633 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2634 IBTF_DPRINTF_L2("ibdm", 2635 "\thandle_iounitinfo: msg transport failed"); 2636 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2637 } 2638 (*flag) |= IBDM_IBMF_PKT_REUSED; 2639 first = B_FALSE; 2640 gid_info->gl_iou->iou_niocs_probe_in_progress++; 2641 } 2642 } 2643 2644 2645 /* 2646 * ibdm_handle_ioc_profile() 2647 * Invoked by the IBMF when the IOCControllerProfile request 2648 * gets completed 2649 */ 2650 static void 2651 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl, 2652 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2653 { 2654 int first = B_TRUE, reprobe = 0; 2655 uint_t ii, ioc_no, srv_start; 2656 uint_t nserv_entries; 2657 timeout_id_t timeout_id; 2658 ib_mad_hdr_t *hdr; 2659 ibdm_ioc_info_t *ioc_info; 2660 ibdm_timeout_cb_args_t *cb_args; 2661 ib_dm_ioc_ctrl_profile_t *ioc, *gioc; 2662 2663 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2664 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2665 2666 ioc = IBDM_IN_IBMFMSG2IOC(msg); 2667 /* 2668 * Check whether we know this IOC already 2669 * This will return NULL if reprobe is in progress 2670 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set. 2671 * Do not hold mutexes here. 2672 */ 2673 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) { 2674 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2675 "IOC guid %llx is present", ioc->ioc_guid); 2676 return; 2677 } 2678 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2679 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1); 2680 2681 /* Make sure that IOC index is with the valid range */ 2682 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 2683 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: " 2684 "IOC index Out of range, index %d", ioc); 2685 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2686 return; 2687 } 2688 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1]; 2689 ioc_info->ioc_iou_info = gid_info->gl_iou; 2690 2691 mutex_enter(&gid_info->gl_mutex); 2692 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) { 2693 reprobe = 1; 2694 ioc_info->ioc_prev_serv = ioc_info->ioc_serv; 2695 ioc_info->ioc_serv = NULL; 2696 ioc_info->ioc_prev_serv_cnt = 2697 ioc_info->ioc_profile.ioc_service_entries; 2698 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) { 2699 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response" 2700 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state); 2701 mutex_exit(&gid_info->gl_mutex); 2702 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2703 return; 2704 } 2705 ioc_info->ioc_cb_args.cb_req_type = 0; 2706 if (ioc_info->ioc_timeout_id) { 2707 timeout_id = ioc_info->ioc_timeout_id; 2708 ioc_info->ioc_timeout_id = 0; 2709 mutex_exit(&gid_info->gl_mutex); 2710 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: " 2711 "ioc_timeout_id = 0x%x", timeout_id); 2712 if (untimeout(timeout_id) == -1) { 2713 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: " 2714 "untimeout ioc_timeout_id failed"); 2715 } 2716 mutex_enter(&gid_info->gl_mutex); 2717 } 2718 2719 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS; 2720 if (reprobe == 0) { 2721 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid; 2722 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid; 2723 } 2724 2725 /* 2726 * Save all the IOC information in the global structures. 2727 * Note the wire format is Big Endian and the Sparc process also 2728 * big endian. So, there is no need to convert the data fields 2729 * The conversion routines used below are ineffective on Sparc 2730 * machines where as they will be effective on little endian 2731 * machines such as Intel processors. 2732 */ 2733 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile; 2734 2735 /* 2736 * Restrict updates to onlyport GIDs and service entries during reprobe 2737 */ 2738 if (reprobe == 0) { 2739 gioc->ioc_guid = b2h64(ioc->ioc_guid); 2740 gioc->ioc_vendorid = 2741 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK) 2742 >> IB_DM_VENDORID_SHIFT); 2743 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid); 2744 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver); 2745 gioc->ioc_subsys_vendorid = 2746 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK) 2747 >> IB_DM_VENDORID_SHIFT); 2748 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id); 2749 gioc->ioc_io_class = b2h16(ioc->ioc_io_class); 2750 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass); 2751 gioc->ioc_protocol = b2h16(ioc->ioc_protocol); 2752 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver); 2753 gioc->ioc_send_msg_qdepth = 2754 b2h16(ioc->ioc_send_msg_qdepth); 2755 gioc->ioc_rdma_read_qdepth = 2756 b2h16(ioc->ioc_rdma_read_qdepth); 2757 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz); 2758 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz); 2759 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask; 2760 bcopy(ioc->ioc_id_string, gioc->ioc_id_string, 2761 IB_DM_IOC_ID_STRING_LEN); 2762 2763 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode; 2764 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid; 2765 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK & 2766 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE; 2767 2768 if (ioc_info->ioc_diagdeviceid == B_TRUE) { 2769 gid_info->gl_pending_cmds++; 2770 IBTF_DPRINTF_L3(ibdm_string, 2771 "\tibdm_handle_ioc_profile: " 2772 "%d: gid_info %p gl_state %d pending_cmds %d", 2773 __LINE__, gid_info, gid_info->gl_state, 2774 gid_info->gl_pending_cmds); 2775 } 2776 } 2777 gioc->ioc_service_entries = ioc->ioc_service_entries; 2778 mutex_exit(&gid_info->gl_mutex); 2779 2780 ibdm_dump_ioc_profile(gioc); 2781 2782 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) { 2783 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) { 2784 mutex_enter(&gid_info->gl_mutex); 2785 gid_info->gl_pending_cmds--; 2786 mutex_exit(&gid_info->gl_mutex); 2787 } 2788 } 2789 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc( 2790 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)), 2791 KM_SLEEP); 2792 2793 /* 2794 * In one single request, maximum number of requests that can be 2795 * obtained is 4. If number of service entries are more than four, 2796 * calculate number requests needed and send them parallelly. 2797 */ 2798 nserv_entries = ioc->ioc_service_entries; 2799 ii = 0; 2800 while (nserv_entries) { 2801 mutex_enter(&gid_info->gl_mutex); 2802 gid_info->gl_pending_cmds++; 2803 ibdm_bump_transactionID(gid_info); 2804 mutex_exit(&gid_info->gl_mutex); 2805 2806 if (first != B_TRUE) { 2807 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2808 &msg) != IBMF_SUCCESS) { 2809 continue; 2810 } 2811 2812 } 2813 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2814 ibdm_alloc_send_buffers(msg); 2815 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2816 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2817 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2818 if (gid_info->gl_redirected == B_TRUE) { 2819 if (gid_info->gl_redirect_dlid != 0) { 2820 msg->im_local_addr.ia_remote_lid = 2821 gid_info->gl_redirect_dlid; 2822 } 2823 msg->im_local_addr.ia_remote_qno = 2824 gid_info->gl_redirect_QP; 2825 msg->im_local_addr.ia_p_key = 2826 gid_info->gl_redirect_pkey; 2827 msg->im_local_addr.ia_q_key = 2828 gid_info->gl_redirect_qkey; 2829 msg->im_local_addr.ia_service_level = 2830 gid_info->gl_redirectSL; 2831 } else { 2832 msg->im_local_addr.ia_remote_qno = 1; 2833 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2834 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2835 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2836 } 2837 2838 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2839 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2840 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2841 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2842 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2843 hdr->Status = 0; 2844 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2845 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 2846 2847 srv_start = ii * 4; 2848 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args; 2849 cb_args->cb_gid_info = gid_info; 2850 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2851 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS; 2852 cb_args->cb_srvents_start = srv_start; 2853 cb_args->cb_ioc_num = ioc_no - 1; 2854 2855 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) { 2856 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ; 2857 cb_args->cb_srvents_end = (cb_args->cb_srvents_start + 2858 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1); 2859 } else { 2860 cb_args->cb_srvents_end = 2861 (cb_args->cb_srvents_start + nserv_entries - 1); 2862 nserv_entries = 0; 2863 } 2864 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 2865 ibdm_fill_srv_attr_mod(hdr, cb_args); 2866 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 2867 2868 mutex_enter(&gid_info->gl_mutex); 2869 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout( 2870 ibdm_pkt_timeout_hdlr, cb_args, 2871 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2872 mutex_exit(&gid_info->gl_mutex); 2873 2874 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:" 2875 "timeout %x, ioc %d srv %d", 2876 ioc_info->ioc_serv[srv_start].se_timeout_id, 2877 ioc_no - 1, srv_start); 2878 2879 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2880 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2881 IBTF_DPRINTF_L2("ibdm", 2882 "\thandle_ioc_profile: msg send failed"); 2883 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2884 } 2885 (*flag) |= IBDM_IBMF_PKT_REUSED; 2886 first = B_FALSE; 2887 ii++; 2888 } 2889 } 2890 2891 2892 /* 2893 * ibdm_handle_srventry_mad() 2894 */ 2895 static void 2896 ibdm_handle_srventry_mad(ibmf_msg_t *msg, 2897 ibdm_dp_gidinfo_t *gid_info, int *flag) 2898 { 2899 uint_t ii, ioc_no, attrmod; 2900 uint_t nentries, start, end; 2901 timeout_id_t timeout_id; 2902 ib_dm_srv_t *srv_ents; 2903 ibdm_ioc_info_t *ioc_info; 2904 ibdm_srvents_info_t *gsrv_ents; 2905 2906 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:" 2907 " IBMF msg %p gid info %p", msg, gid_info); 2908 2909 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg); 2910 /* 2911 * Get the start and end index of the service entries 2912 * Upper 16 bits identify the IOC 2913 * Lower 16 bits specify the range of service entries 2914 * LSB specifies (Big endian) end of the range 2915 * MSB specifies (Big endian) start of the range 2916 */ 2917 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2918 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 2919 end = ((attrmod >> 8) & IBDM_8_BIT_MASK); 2920 start = (attrmod & IBDM_8_BIT_MASK); 2921 2922 /* Make sure that IOC index is with the valid range */ 2923 if ((ioc_no < 1) | 2924 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) { 2925 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2926 "IOC index Out of range, index %d", ioc_no); 2927 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2928 return; 2929 } 2930 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 2931 2932 /* 2933 * Make sure that the "start" and "end" service indexes are 2934 * with in the valid range 2935 */ 2936 nentries = ioc_info->ioc_profile.ioc_service_entries; 2937 if ((start > end) | (start >= nentries) | (end >= nentries)) { 2938 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2939 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries); 2940 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2941 return; 2942 } 2943 gsrv_ents = &ioc_info->ioc_serv[start]; 2944 mutex_enter(&gid_info->gl_mutex); 2945 if (gsrv_ents->se_state != IBDM_SE_INVALID) { 2946 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2947 "already known, ioc %d, srv %d, se_state %x", 2948 ioc_no - 1, start, gsrv_ents->se_state); 2949 mutex_exit(&gid_info->gl_mutex); 2950 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2951 return; 2952 } 2953 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0; 2954 if (ioc_info->ioc_serv[start].se_timeout_id) { 2955 IBTF_DPRINTF_L2("ibdm", 2956 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start); 2957 timeout_id = ioc_info->ioc_serv[start].se_timeout_id; 2958 ioc_info->ioc_serv[start].se_timeout_id = 0; 2959 mutex_exit(&gid_info->gl_mutex); 2960 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: " 2961 "se_timeout_id = 0x%x", timeout_id); 2962 if (untimeout(timeout_id) == -1) { 2963 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: " 2964 "untimeout se_timeout_id failed"); 2965 } 2966 mutex_enter(&gid_info->gl_mutex); 2967 } 2968 2969 gsrv_ents->se_state = IBDM_SE_VALID; 2970 mutex_exit(&gid_info->gl_mutex); 2971 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) { 2972 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id); 2973 bcopy(srv_ents->srv_name, 2974 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN); 2975 ibdm_dump_service_entries(&gsrv_ents->se_attr); 2976 } 2977 } 2978 2979 2980 /* 2981 * ibdm_get_diagcode: 2982 * Send request to get IOU/IOC diag code 2983 * Returns IBDM_SUCCESS/IBDM_FAILURE 2984 */ 2985 static int 2986 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr) 2987 { 2988 ibmf_msg_t *msg; 2989 ib_mad_hdr_t *hdr; 2990 ibdm_ioc_info_t *ioc; 2991 ibdm_timeout_cb_args_t *cb_args; 2992 timeout_id_t *timeout_id; 2993 2994 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d", 2995 gid_info, attr); 2996 2997 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2998 &msg) != IBMF_SUCCESS) { 2999 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail"); 3000 return (IBDM_FAILURE); 3001 } 3002 3003 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 3004 ibdm_alloc_send_buffers(msg); 3005 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 3006 3007 mutex_enter(&gid_info->gl_mutex); 3008 ibdm_bump_transactionID(gid_info); 3009 mutex_exit(&gid_info->gl_mutex); 3010 3011 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3012 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3013 if (gid_info->gl_redirected == B_TRUE) { 3014 if (gid_info->gl_redirect_dlid != 0) { 3015 msg->im_local_addr.ia_remote_lid = 3016 gid_info->gl_redirect_dlid; 3017 } 3018 3019 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3020 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3021 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3022 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3023 } else { 3024 msg->im_local_addr.ia_remote_qno = 1; 3025 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3026 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3027 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 3028 } 3029 3030 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3031 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3032 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3033 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3034 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3035 hdr->Status = 0; 3036 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3037 3038 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3039 hdr->AttributeModifier = h2b32(attr); 3040 3041 if (attr == 0) { 3042 cb_args = &gid_info->gl_iou_cb_args; 3043 gid_info->gl_iou->iou_dc_valid = B_FALSE; 3044 cb_args->cb_ioc_num = 0; 3045 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE; 3046 timeout_id = &gid_info->gl_timeout_id; 3047 } else { 3048 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1)); 3049 ioc->ioc_dc_valid = B_FALSE; 3050 cb_args = &ioc->ioc_dc_cb_args; 3051 cb_args->cb_ioc_num = attr - 1; 3052 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE; 3053 timeout_id = &ioc->ioc_dc_timeout_id; 3054 } 3055 cb_args->cb_gid_info = gid_info; 3056 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 3057 cb_args->cb_srvents_start = 0; 3058 3059 mutex_enter(&gid_info->gl_mutex); 3060 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3061 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3062 mutex_exit(&gid_info->gl_mutex); 3063 3064 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:" 3065 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num); 3066 3067 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3068 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3069 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed"); 3070 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3071 } 3072 return (IBDM_SUCCESS); 3073 } 3074 3075 /* 3076 * ibdm_handle_diagcode: 3077 * Process the DiagCode MAD response and update local DM 3078 * data structure. 3079 */ 3080 static void 3081 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg, 3082 ibdm_dp_gidinfo_t *gid_info, int *flag) 3083 { 3084 uint16_t attrmod, *diagcode; 3085 ibdm_iou_info_t *iou; 3086 ibdm_ioc_info_t *ioc; 3087 timeout_id_t timeout_id; 3088 ibdm_timeout_cb_args_t *cb_args; 3089 3090 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data; 3091 3092 mutex_enter(&gid_info->gl_mutex); 3093 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg); 3094 iou = gid_info->gl_iou; 3095 if (attrmod == 0) { 3096 if (iou->iou_dc_valid != B_FALSE) { 3097 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3098 IBTF_DPRINTF_L4("ibdm", 3099 "\thandle_diagcode: Duplicate IOU DiagCode"); 3100 mutex_exit(&gid_info->gl_mutex); 3101 return; 3102 } 3103 cb_args = &gid_info->gl_iou_cb_args; 3104 cb_args->cb_req_type = 0; 3105 iou->iou_diagcode = b2h16(*diagcode); 3106 iou->iou_dc_valid = B_TRUE; 3107 if (gid_info->gl_timeout_id) { 3108 timeout_id = gid_info->gl_timeout_id; 3109 mutex_exit(&gid_info->gl_mutex); 3110 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: " 3111 "gl_timeout_id = 0x%x", timeout_id); 3112 if (untimeout(timeout_id) == -1) { 3113 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: " 3114 "untimeout gl_timeout_id failed"); 3115 } 3116 mutex_enter(&gid_info->gl_mutex); 3117 gid_info->gl_timeout_id = 0; 3118 } 3119 } else { 3120 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1)); 3121 if (ioc->ioc_dc_valid != B_FALSE) { 3122 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3123 IBTF_DPRINTF_L4("ibdm", 3124 "\thandle_diagcode: Duplicate IOC DiagCode"); 3125 mutex_exit(&gid_info->gl_mutex); 3126 return; 3127 } 3128 cb_args = &ioc->ioc_dc_cb_args; 3129 cb_args->cb_req_type = 0; 3130 ioc->ioc_diagcode = b2h16(*diagcode); 3131 ioc->ioc_dc_valid = B_TRUE; 3132 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id; 3133 if (timeout_id) { 3134 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0; 3135 mutex_exit(&gid_info->gl_mutex); 3136 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: " 3137 "timeout_id = 0x%x", timeout_id); 3138 if (untimeout(timeout_id) == -1) { 3139 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: " 3140 "untimeout ioc_dc_timeout_id failed"); 3141 } 3142 mutex_enter(&gid_info->gl_mutex); 3143 } 3144 } 3145 mutex_exit(&gid_info->gl_mutex); 3146 3147 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x" 3148 "attrmod : 0x%x", b2h16(*diagcode), attrmod); 3149 } 3150 3151 3152 /* 3153 * ibdm_is_ioc_present() 3154 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list 3155 */ 3156 static ibdm_ioc_info_t * 3157 ibdm_is_ioc_present(ib_guid_t ioc_guid, 3158 ibdm_dp_gidinfo_t *gid_info, int *flag) 3159 { 3160 int ii; 3161 ibdm_ioc_info_t *ioc; 3162 ibdm_dp_gidinfo_t *head; 3163 ib_dm_io_unitinfo_t *iou; 3164 3165 mutex_enter(&ibdm.ibdm_mutex); 3166 head = ibdm.ibdm_dp_gidlist_head; 3167 while (head) { 3168 mutex_enter(&head->gl_mutex); 3169 if (head->gl_iou == NULL) { 3170 mutex_exit(&head->gl_mutex); 3171 head = head->gl_next; 3172 continue; 3173 } 3174 iou = &head->gl_iou->iou_info; 3175 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 3176 ioc = IBDM_GIDINFO2IOCINFO(head, ii); 3177 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) && 3178 (ioc->ioc_profile.ioc_guid == ioc_guid)) { 3179 if (gid_info == head) { 3180 *flag |= IBDM_IBMF_PKT_DUP_RESP; 3181 } else if (ibdm_check_dgid(head->gl_dgid_lo, 3182 head->gl_dgid_hi) != NULL) { 3183 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_" 3184 "present: gid not present"); 3185 ibdm_add_to_gl_gid(gid_info, head); 3186 } 3187 mutex_exit(&head->gl_mutex); 3188 mutex_exit(&ibdm.ibdm_mutex); 3189 return (ioc); 3190 } 3191 } 3192 mutex_exit(&head->gl_mutex); 3193 head = head->gl_next; 3194 } 3195 mutex_exit(&ibdm.ibdm_mutex); 3196 return (NULL); 3197 } 3198 3199 3200 /* 3201 * ibdm_ibmf_send_cb() 3202 * IBMF invokes this callback routine after posting the DM MAD to 3203 * the HCA. 3204 */ 3205 /*ARGSUSED*/ 3206 static void 3207 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg) 3208 { 3209 ibdm_dump_ibmf_msg(ibmf_msg, 1); 3210 ibdm_free_send_buffers(ibmf_msg); 3211 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) { 3212 IBTF_DPRINTF_L4("ibdm", 3213 "\tibmf_send_cb: IBMF free msg failed"); 3214 } 3215 } 3216 3217 3218 /* 3219 * ibdm_ibmf_recv_cb() 3220 * Invoked by the IBMF when a response to the one of the DM requests 3221 * is received. 3222 */ 3223 /*ARGSUSED*/ 3224 static void 3225 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3226 { 3227 ibdm_taskq_args_t *taskq_args; 3228 3229 /* 3230 * If the taskq enable is set then dispatch a taskq to process 3231 * the MAD, otherwise just process it on this thread 3232 */ 3233 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) { 3234 ibdm_process_incoming_mad(ibmf_hdl, msg, arg); 3235 return; 3236 } 3237 3238 /* 3239 * create a taskq and dispatch it to process the incoming MAD 3240 */ 3241 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP); 3242 if (taskq_args == NULL) { 3243 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for" 3244 "taskq_args"); 3245 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3246 IBTF_DPRINTF_L4("ibmf_recv_cb", 3247 "\tibmf_recv_cb: IBMF free msg failed"); 3248 } 3249 return; 3250 } 3251 taskq_args->tq_ibmf_handle = ibmf_hdl; 3252 taskq_args->tq_ibmf_msg = msg; 3253 taskq_args->tq_args = arg; 3254 3255 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args, 3256 TQ_NOSLEEP) == 0) { 3257 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed"); 3258 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3259 IBTF_DPRINTF_L4("ibmf_recv_cb", 3260 "\tibmf_recv_cb: IBMF free msg failed"); 3261 } 3262 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3263 return; 3264 } 3265 3266 /* taskq_args are deleted in ibdm_recv_incoming_mad() */ 3267 } 3268 3269 3270 void 3271 ibdm_recv_incoming_mad(void *args) 3272 { 3273 ibdm_taskq_args_t *taskq_args; 3274 3275 taskq_args = (ibdm_taskq_args_t *)args; 3276 3277 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: " 3278 "Processing incoming MAD via taskq"); 3279 3280 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle, 3281 taskq_args->tq_ibmf_msg, taskq_args->tq_args); 3282 3283 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3284 } 3285 3286 3287 /* 3288 * Calls ibdm_process_incoming_mad with all function arguments extracted 3289 * from args 3290 */ 3291 /*ARGSUSED*/ 3292 static void 3293 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3294 { 3295 int flag = 0; 3296 int ret; 3297 uint64_t transaction_id; 3298 ib_mad_hdr_t *hdr; 3299 ibdm_dp_gidinfo_t *gid_info = NULL; 3300 3301 IBTF_DPRINTF_L4("ibdm", 3302 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg); 3303 ibdm_dump_ibmf_msg(msg, 0); 3304 3305 /* 3306 * IBMF calls this routine for every DM MAD that arrives at this port. 3307 * But we handle only the responses for requests we sent. We drop all 3308 * the DM packets that does not have response bit set in the MAD 3309 * header(this eliminates all the requests sent to this port). 3310 * We handle only DM class version 1 MAD's 3311 */ 3312 hdr = IBDM_IN_IBMFMSG_MADHDR(msg); 3313 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) { 3314 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3315 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3316 "IBMF free msg failed DM request drop it"); 3317 } 3318 return; 3319 } 3320 3321 transaction_id = b2h64(hdr->TransactionID); 3322 3323 mutex_enter(&ibdm.ibdm_mutex); 3324 gid_info = ibdm.ibdm_dp_gidlist_head; 3325 while (gid_info) { 3326 if ((gid_info->gl_transactionID & 3327 IBDM_GID_TRANSACTIONID_MASK) == 3328 (transaction_id & IBDM_GID_TRANSACTIONID_MASK)) 3329 break; 3330 gid_info = gid_info->gl_next; 3331 } 3332 mutex_exit(&ibdm.ibdm_mutex); 3333 3334 if (gid_info == NULL) { 3335 /* Drop the packet */ 3336 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID" 3337 " does not match: 0x%llx", transaction_id); 3338 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3339 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3340 "IBMF free msg failed DM request drop it"); 3341 } 3342 return; 3343 } 3344 3345 /* Handle redirection for all the MAD's, except ClassPortInfo */ 3346 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) && 3347 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) { 3348 ret = ibdm_handle_redirection(msg, gid_info, &flag); 3349 if (ret == IBDM_SUCCESS) { 3350 return; 3351 } 3352 } else { 3353 uint_t gl_state; 3354 3355 mutex_enter(&gid_info->gl_mutex); 3356 gl_state = gid_info->gl_state; 3357 mutex_exit(&gid_info->gl_mutex); 3358 3359 switch (gl_state) { 3360 3361 case IBDM_SET_CLASSPORTINFO: 3362 ibdm_handle_setclassportinfo( 3363 ibmf_hdl, msg, gid_info, &flag); 3364 break; 3365 3366 case IBDM_GET_CLASSPORTINFO: 3367 ibdm_handle_classportinfo( 3368 ibmf_hdl, msg, gid_info, &flag); 3369 break; 3370 3371 case IBDM_GET_IOUNITINFO: 3372 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag); 3373 break; 3374 3375 case IBDM_GET_IOC_DETAILS: 3376 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3377 3378 case IB_DM_ATTR_SERVICE_ENTRIES: 3379 ibdm_handle_srventry_mad(msg, gid_info, &flag); 3380 break; 3381 3382 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3383 ibdm_handle_ioc_profile( 3384 ibmf_hdl, msg, gid_info, &flag); 3385 break; 3386 3387 case IB_DM_ATTR_DIAG_CODE: 3388 ibdm_handle_diagcode(msg, gid_info, &flag); 3389 break; 3390 3391 default: 3392 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3393 "Error state, wrong attribute :-("); 3394 (void) ibmf_free_msg(ibmf_hdl, &msg); 3395 return; 3396 } 3397 break; 3398 default: 3399 IBTF_DPRINTF_L2("ibdm", 3400 "process_incoming_mad: Dropping the packet" 3401 " gl_state %x", gl_state); 3402 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3403 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3404 "IBMF free msg failed DM request drop it"); 3405 } 3406 return; 3407 } 3408 } 3409 3410 if ((flag & IBDM_IBMF_PKT_DUP_RESP) || 3411 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) { 3412 IBTF_DPRINTF_L2("ibdm", 3413 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag); 3414 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3415 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3416 "IBMF free msg failed DM request drop it"); 3417 } 3418 return; 3419 } 3420 3421 mutex_enter(&gid_info->gl_mutex); 3422 if (gid_info->gl_pending_cmds < 1) { 3423 IBTF_DPRINTF_L2("ibdm", 3424 "\tprocess_incoming_mad: pending commands negative"); 3425 } 3426 if (--gid_info->gl_pending_cmds) { 3427 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: " 3428 "gid_info %p pending cmds %d", 3429 gid_info, gid_info->gl_pending_cmds); 3430 mutex_exit(&gid_info->gl_mutex); 3431 } else { 3432 uint_t prev_state; 3433 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE"); 3434 prev_state = gid_info->gl_state; 3435 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3436 if (prev_state == IBDM_SET_CLASSPORTINFO) { 3437 IBTF_DPRINTF_L4("ibdm", 3438 "\tprocess_incoming_mad: " 3439 "Setclassportinfo for Cisco FC GW is done."); 3440 gid_info->gl_flag &= ~IBDM_CISCO_PROBE; 3441 gid_info->gl_flag |= IBDM_CISCO_PROBE_DONE; 3442 mutex_exit(&gid_info->gl_mutex); 3443 cv_broadcast(&gid_info->gl_probe_cv); 3444 } else { 3445 mutex_exit(&gid_info->gl_mutex); 3446 ibdm_notify_newgid_iocs(gid_info); 3447 mutex_enter(&ibdm.ibdm_mutex); 3448 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3449 IBTF_DPRINTF_L4("ibdm", 3450 "\tprocess_incoming_mad: Wakeup"); 3451 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3452 cv_broadcast(&ibdm.ibdm_probe_cv); 3453 } 3454 mutex_exit(&ibdm.ibdm_mutex); 3455 } 3456 } 3457 3458 /* 3459 * Do not deallocate the IBMF packet if atleast one request 3460 * is posted. IBMF packet is reused. 3461 */ 3462 if (!(flag & IBDM_IBMF_PKT_REUSED)) { 3463 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3464 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3465 "IBMF free msg failed DM request drop it"); 3466 } 3467 } 3468 } 3469 3470 3471 /* 3472 * ibdm_verify_mad_status() 3473 * Verifies the MAD status 3474 * Returns IBDM_SUCCESS if status is correct 3475 * Returns IBDM_FAILURE for bogus MAD status 3476 */ 3477 static int 3478 ibdm_verify_mad_status(ib_mad_hdr_t *hdr) 3479 { 3480 int ret = 0; 3481 3482 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) || 3483 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) { 3484 return (IBDM_FAILURE); 3485 } 3486 3487 if (b2h16(hdr->Status) == 0) 3488 ret = IBDM_SUCCESS; 3489 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED) 3490 ret = IBDM_SUCCESS; 3491 else { 3492 IBTF_DPRINTF_L2("ibdm", 3493 "\tverify_mad_status: Status : 0x%x", b2h16(hdr->Status)); 3494 ret = IBDM_FAILURE; 3495 } 3496 return (ret); 3497 } 3498 3499 3500 3501 /* 3502 * ibdm_handle_redirection() 3503 * Returns IBDM_SUCCESS/IBDM_FAILURE 3504 */ 3505 static int 3506 ibdm_handle_redirection(ibmf_msg_t *msg, 3507 ibdm_dp_gidinfo_t *gid_info, int *flag) 3508 { 3509 int attrmod, ioc_no, start; 3510 void *data; 3511 timeout_id_t *timeout_id; 3512 ib_mad_hdr_t *hdr; 3513 ibdm_ioc_info_t *ioc = NULL; 3514 ibdm_timeout_cb_args_t *cb_args; 3515 ib_mad_classportinfo_t *cpi; 3516 3517 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter"); 3518 mutex_enter(&gid_info->gl_mutex); 3519 switch (gid_info->gl_state) { 3520 case IBDM_GET_IOUNITINFO: 3521 cb_args = &gid_info->gl_iou_cb_args; 3522 timeout_id = &gid_info->gl_timeout_id; 3523 break; 3524 3525 case IBDM_GET_IOC_DETAILS: 3526 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3527 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3528 3529 case IB_DM_ATTR_DIAG_CODE: 3530 if (attrmod == 0) { 3531 cb_args = &gid_info->gl_iou_cb_args; 3532 timeout_id = &gid_info->gl_timeout_id; 3533 break; 3534 } 3535 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3536 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3537 "IOC# Out of range %d", attrmod); 3538 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3539 mutex_exit(&gid_info->gl_mutex); 3540 return (IBDM_FAILURE); 3541 } 3542 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3543 cb_args = &ioc->ioc_dc_cb_args; 3544 timeout_id = &ioc->ioc_dc_timeout_id; 3545 break; 3546 3547 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3548 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3549 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3550 "IOC# Out of range %d", attrmod); 3551 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3552 mutex_exit(&gid_info->gl_mutex); 3553 return (IBDM_FAILURE); 3554 } 3555 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3556 cb_args = &ioc->ioc_cb_args; 3557 timeout_id = &ioc->ioc_timeout_id; 3558 break; 3559 3560 case IB_DM_ATTR_SERVICE_ENTRIES: 3561 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3562 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 3563 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3564 "IOC# Out of range %d", ioc_no); 3565 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3566 mutex_exit(&gid_info->gl_mutex); 3567 return (IBDM_FAILURE); 3568 } 3569 start = (attrmod & IBDM_8_BIT_MASK); 3570 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3571 if (start > ioc->ioc_profile.ioc_service_entries) { 3572 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3573 " SE index Out of range %d", start); 3574 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3575 mutex_exit(&gid_info->gl_mutex); 3576 return (IBDM_FAILURE); 3577 } 3578 cb_args = &ioc->ioc_serv[start].se_cb_args; 3579 timeout_id = &ioc->ioc_serv[start].se_timeout_id; 3580 break; 3581 3582 default: 3583 /* ERROR State */ 3584 IBTF_DPRINTF_L2("ibdm", 3585 "\thandle_redirection: wrong attribute :-("); 3586 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3587 mutex_exit(&gid_info->gl_mutex); 3588 return (IBDM_FAILURE); 3589 } 3590 break; 3591 default: 3592 /* ERROR State */ 3593 IBTF_DPRINTF_L2("ibdm", 3594 "\thandle_redirection: Error state :-("); 3595 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3596 mutex_exit(&gid_info->gl_mutex); 3597 return (IBDM_FAILURE); 3598 } 3599 if ((*timeout_id) != 0) { 3600 mutex_exit(&gid_info->gl_mutex); 3601 if (untimeout(*timeout_id) == -1) { 3602 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: " 3603 "untimeout failed %x", *timeout_id); 3604 } else { 3605 IBTF_DPRINTF_L5("ibdm", 3606 "\thandle_redirection: timeout %x", *timeout_id); 3607 } 3608 mutex_enter(&gid_info->gl_mutex); 3609 *timeout_id = 0; 3610 } 3611 3612 data = msg->im_msgbufs_recv.im_bufs_cl_data; 3613 cpi = (ib_mad_classportinfo_t *)data; 3614 3615 gid_info->gl_resp_timeout = 3616 (b2h32(cpi->RespTimeValue) & 0x1F); 3617 3618 gid_info->gl_redirected = B_TRUE; 3619 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 3620 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 3621 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 3622 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 3623 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 3624 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 3625 gid_info->gl_redirectSL = cpi->RedirectSL; 3626 3627 if (gid_info->gl_redirect_dlid != 0) { 3628 msg->im_local_addr.ia_remote_lid = 3629 gid_info->gl_redirect_dlid; 3630 } 3631 ibdm_bump_transactionID(gid_info); 3632 mutex_exit(&gid_info->gl_mutex); 3633 3634 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3635 ibdm_alloc_send_buffers(msg); 3636 3637 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3638 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3639 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3640 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3641 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3642 hdr->Status = 0; 3643 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3644 hdr->AttributeID = 3645 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID; 3646 hdr->AttributeModifier = 3647 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier; 3648 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3649 3650 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3651 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3652 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3653 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3654 3655 mutex_enter(&gid_info->gl_mutex); 3656 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3657 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3658 mutex_exit(&gid_info->gl_mutex); 3659 3660 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:" 3661 "timeout %x", *timeout_id); 3662 3663 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3664 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3665 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:" 3666 "message transport failed"); 3667 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3668 } 3669 (*flag) |= IBDM_IBMF_PKT_REUSED; 3670 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit"); 3671 return (IBDM_SUCCESS); 3672 } 3673 3674 3675 /* 3676 * ibdm_pkt_timeout_hdlr 3677 * This timeout handler is registed for every IBMF packet that is 3678 * sent through the IBMF. It gets called when no response is received 3679 * within the specified time for the packet. No retries for the failed 3680 * commands currently. Drops the failed IBMF packet and update the 3681 * pending list commands. 3682 */ 3683 static void 3684 ibdm_pkt_timeout_hdlr(void *arg) 3685 { 3686 ibdm_iou_info_t *iou; 3687 ibdm_ioc_info_t *ioc; 3688 ibdm_timeout_cb_args_t *cb_args = arg; 3689 ibdm_dp_gidinfo_t *gid_info; 3690 int srv_ent; 3691 uint_t new_gl_state; 3692 3693 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p " 3694 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3695 cb_args->cb_req_type, cb_args->cb_ioc_num, 3696 cb_args->cb_srvents_start); 3697 3698 gid_info = cb_args->cb_gid_info; 3699 mutex_enter(&gid_info->gl_mutex); 3700 3701 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) || 3702 (cb_args->cb_req_type == 0)) { 3703 3704 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed" 3705 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type, 3706 cb_args->cb_ioc_num, cb_args->cb_srvents_start); 3707 3708 if (gid_info->gl_timeout_id) 3709 gid_info->gl_timeout_id = 0; 3710 mutex_exit(&gid_info->gl_mutex); 3711 return; 3712 } 3713 if (cb_args->cb_retry_count) { 3714 cb_args->cb_retry_count--; 3715 /* 3716 * A new timeout_id is set inside ibdm_retry_command(). 3717 * When the function returns an error, the timeout_id 3718 * is reset (to zero) in the switch statement below. 3719 */ 3720 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) { 3721 mutex_exit(&gid_info->gl_mutex); 3722 return; 3723 } 3724 cb_args->cb_retry_count = 0; 3725 } 3726 3727 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p" 3728 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3729 cb_args->cb_req_type, cb_args->cb_ioc_num, 3730 cb_args->cb_srvents_start); 3731 3732 switch (cb_args->cb_req_type) { 3733 3734 case IBDM_REQ_TYPE_CLASSPORTINFO: 3735 case IBDM_REQ_TYPE_IOUINFO: 3736 new_gl_state = IBDM_GID_PROBING_FAILED; 3737 if (gid_info->gl_timeout_id) 3738 gid_info->gl_timeout_id = 0; 3739 break; 3740 3741 case IBDM_REQ_TYPE_IOCINFO: 3742 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3743 iou = gid_info->gl_iou; 3744 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3745 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3746 if (ioc->ioc_timeout_id) 3747 ioc->ioc_timeout_id = 0; 3748 break; 3749 3750 case IBDM_REQ_TYPE_SRVENTS: 3751 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3752 iou = gid_info->gl_iou; 3753 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3754 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3755 srv_ent = cb_args->cb_srvents_start; 3756 if (ioc->ioc_serv[srv_ent].se_timeout_id) 3757 ioc->ioc_serv[srv_ent].se_timeout_id = 0; 3758 break; 3759 3760 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3761 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3762 iou = gid_info->gl_iou; 3763 iou->iou_dc_valid = B_FALSE; 3764 if (gid_info->gl_timeout_id) 3765 gid_info->gl_timeout_id = 0; 3766 break; 3767 3768 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3769 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3770 iou = gid_info->gl_iou; 3771 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3772 ioc->ioc_dc_valid = B_FALSE; 3773 if (ioc->ioc_dc_timeout_id) 3774 ioc->ioc_dc_timeout_id = 0; 3775 break; 3776 3777 default: /* ERROR State */ 3778 new_gl_state = IBDM_GID_PROBING_FAILED; 3779 if (gid_info->gl_timeout_id) 3780 gid_info->gl_timeout_id = 0; 3781 IBTF_DPRINTF_L2("ibdm", 3782 "\tpkt_timeout_hdlr: wrong request type."); 3783 break; 3784 } 3785 3786 --gid_info->gl_pending_cmds; /* decrease the counter */ 3787 3788 if (gid_info->gl_pending_cmds == 0) { 3789 gid_info->gl_state = new_gl_state; 3790 mutex_exit(&gid_info->gl_mutex); 3791 /* 3792 * Delete this gid_info if the gid probe fails. 3793 */ 3794 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3795 ibdm_delete_glhca_list(gid_info); 3796 } 3797 ibdm_notify_newgid_iocs(gid_info); 3798 mutex_enter(&ibdm.ibdm_mutex); 3799 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3800 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup"); 3801 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3802 cv_broadcast(&ibdm.ibdm_probe_cv); 3803 } 3804 mutex_exit(&ibdm.ibdm_mutex); 3805 } else { 3806 /* 3807 * Reset gl_pending_cmd if the extra timeout happens since 3808 * gl_pending_cmd becomes negative as a result. 3809 */ 3810 if (gid_info->gl_pending_cmds < 0) { 3811 gid_info->gl_pending_cmds = 0; 3812 IBTF_DPRINTF_L2("ibdm", 3813 "\tpkt_timeout_hdlr: extra timeout request." 3814 " reset gl_pending_cmds"); 3815 } 3816 mutex_exit(&gid_info->gl_mutex); 3817 /* 3818 * Delete this gid_info if the gid probe fails. 3819 */ 3820 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3821 ibdm_delete_glhca_list(gid_info); 3822 } 3823 } 3824 } 3825 3826 3827 /* 3828 * ibdm_retry_command() 3829 * Retries the failed command. 3830 * Returns IBDM_FAILURE/IBDM_SUCCESS 3831 */ 3832 static int 3833 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args) 3834 { 3835 int ret; 3836 ibmf_msg_t *msg; 3837 ib_mad_hdr_t *hdr; 3838 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info; 3839 timeout_id_t *timeout_id; 3840 ibdm_ioc_info_t *ioc; 3841 int ioc_no; 3842 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 3843 3844 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p " 3845 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3846 cb_args->cb_req_type, cb_args->cb_ioc_num, 3847 cb_args->cb_srvents_start); 3848 3849 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg); 3850 3851 3852 /* 3853 * Reset the gid if alloc_msg failed with BAD_HANDLE 3854 * ibdm_reset_gidinfo reinits the gid_info 3855 */ 3856 if (ret == IBMF_BAD_HANDLE) { 3857 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad", 3858 gid_info); 3859 3860 mutex_exit(&gid_info->gl_mutex); 3861 ibdm_reset_gidinfo(gid_info); 3862 mutex_enter(&gid_info->gl_mutex); 3863 3864 /* Retry alloc */ 3865 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, 3866 &msg); 3867 } 3868 3869 if (ret != IBDM_SUCCESS) { 3870 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p " 3871 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3872 cb_args->cb_req_type, cb_args->cb_ioc_num, 3873 cb_args->cb_srvents_start); 3874 return (IBDM_FAILURE); 3875 } 3876 3877 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 3878 ibdm_alloc_send_buffers(msg); 3879 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 3880 3881 ibdm_bump_transactionID(gid_info); 3882 3883 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3884 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3885 if (gid_info->gl_redirected == B_TRUE) { 3886 if (gid_info->gl_redirect_dlid != 0) { 3887 msg->im_local_addr.ia_remote_lid = 3888 gid_info->gl_redirect_dlid; 3889 } 3890 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3891 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3892 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3893 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3894 } else { 3895 msg->im_local_addr.ia_remote_qno = 1; 3896 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3897 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3898 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 3899 } 3900 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3901 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 3902 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3903 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3904 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3905 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3906 hdr->Status = 0; 3907 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3908 3909 switch (cb_args->cb_req_type) { 3910 case IBDM_REQ_TYPE_CLASSPORTINFO: 3911 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 3912 hdr->AttributeModifier = 0; 3913 timeout_id = &gid_info->gl_timeout_id; 3914 break; 3915 case IBDM_REQ_TYPE_IOUINFO: 3916 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 3917 hdr->AttributeModifier = 0; 3918 timeout_id = &gid_info->gl_timeout_id; 3919 break; 3920 case IBDM_REQ_TYPE_IOCINFO: 3921 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 3922 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3923 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3924 timeout_id = &ioc->ioc_timeout_id; 3925 break; 3926 case IBDM_REQ_TYPE_SRVENTS: 3927 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 3928 ibdm_fill_srv_attr_mod(hdr, cb_args); 3929 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3930 timeout_id = 3931 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id; 3932 break; 3933 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3934 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3935 hdr->AttributeModifier = 0; 3936 timeout_id = &gid_info->gl_timeout_id; 3937 break; 3938 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3939 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3940 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3941 ioc_no = cb_args->cb_ioc_num; 3942 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no]; 3943 timeout_id = &ioc->ioc_dc_timeout_id; 3944 break; 3945 } 3946 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*hdr)) 3947 3948 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3949 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3950 3951 mutex_exit(&gid_info->gl_mutex); 3952 3953 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:" 3954 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num, 3955 cb_args->cb_srvents_start, *timeout_id); 3956 3957 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, 3958 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb, 3959 cb_args, 0) != IBMF_SUCCESS) { 3960 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p " 3961 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3962 cb_args->cb_req_type, cb_args->cb_ioc_num, 3963 cb_args->cb_srvents_start); 3964 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3965 } 3966 mutex_enter(&gid_info->gl_mutex); 3967 return (IBDM_SUCCESS); 3968 } 3969 3970 3971 /* 3972 * ibdm_update_ioc_port_gidlist() 3973 */ 3974 static void 3975 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest, 3976 ibdm_dp_gidinfo_t *gid_info) 3977 { 3978 int ii, ngid_ents; 3979 ibdm_gid_t *tmp; 3980 ibdm_hca_list_t *gid_hca_head, *temp; 3981 ibdm_hca_list_t *ioc_head = NULL; 3982 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 3983 3984 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter"); 3985 3986 ngid_ents = gid_info->gl_ngids; 3987 dest->ioc_nportgids = ngid_ents; 3988 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) * 3989 ngid_ents, KM_SLEEP); 3990 tmp = gid_info->gl_gid; 3991 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) { 3992 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi; 3993 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo; 3994 tmp = tmp->gid_next; 3995 } 3996 3997 gid_hca_head = gid_info->gl_hca_list; 3998 while (gid_hca_head) { 3999 temp = ibdm_dup_hca_attr(gid_hca_head); 4000 temp->hl_next = ioc_head; 4001 ioc_head = temp; 4002 gid_hca_head = gid_hca_head->hl_next; 4003 } 4004 dest->ioc_hca_list = ioc_head; 4005 } 4006 4007 4008 /* 4009 * ibdm_alloc_send_buffers() 4010 * Allocates memory for the IBMF send buffer to send and/or receive 4011 * the Device Management MAD packet. 4012 */ 4013 static void 4014 ibdm_alloc_send_buffers(ibmf_msg_t *msgp) 4015 { 4016 msgp->im_msgbufs_send.im_bufs_mad_hdr = 4017 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP); 4018 4019 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *) 4020 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 4021 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDM_DM_MAD_HDR_SZ; 4022 4023 msgp->im_msgbufs_send.im_bufs_cl_data = 4024 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr + IBDM_DM_MAD_HDR_SZ); 4025 msgp->im_msgbufs_send.im_bufs_cl_data_len = 4026 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDM_DM_MAD_HDR_SZ; 4027 } 4028 4029 4030 /* 4031 * ibdm_alloc_send_buffers() 4032 * De-allocates memory for the IBMF send buffer 4033 */ 4034 static void 4035 ibdm_free_send_buffers(ibmf_msg_t *msgp) 4036 { 4037 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) 4038 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE); 4039 } 4040 4041 /* 4042 * ibdm_probe_ioc() 4043 * 1. Gets the node records for the port GUID. This detects all the port 4044 * to the IOU. 4045 * 2. Selectively probes all the IOC, given it's node GUID 4046 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC 4047 * Controller Profile asynchronously 4048 */ 4049 /*ARGSUSED*/ 4050 static void 4051 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag) 4052 { 4053 int ii, nrecords; 4054 size_t nr_len = 0, pi_len = 0; 4055 ib_gid_t sgid, dgid; 4056 ibdm_hca_list_t *hca_list = NULL; 4057 sa_node_record_t *nr, *tmp; 4058 ibdm_port_attr_t *port = NULL; 4059 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid; 4060 ibdm_dp_gidinfo_t *temp_gidinfo; 4061 ibdm_gid_t *temp_gid; 4062 sa_portinfo_record_t *pi; 4063 4064 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%llx, %llx, %x): Begin", 4065 nodeguid, ioc_guid, reprobe_flag); 4066 4067 /* Rescan the GID list for any removed GIDs for reprobe */ 4068 if (reprobe_flag) 4069 ibdm_rescan_gidlist(&ioc_guid); 4070 4071 mutex_enter(&ibdm.ibdm_hl_mutex); 4072 for (ibdm_get_next_port(&hca_list, &port, 1); port; 4073 ibdm_get_next_port(&hca_list, &port, 1)) { 4074 reprobe_gid = new_gid = node_gid = NULL; 4075 4076 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid); 4077 if (nr == NULL) { 4078 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records"); 4079 continue; 4080 } 4081 nrecords = (nr_len / sizeof (sa_node_record_t)); 4082 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 4083 if ((pi = ibdm_get_portinfo( 4084 port->pa_sa_hdl, &pi_len, tmp->LID)) == NULL) { 4085 IBTF_DPRINTF_L4("ibdm", 4086 "\tibdm_get_portinfo: no portinfo recs"); 4087 continue; 4088 } 4089 4090 /* 4091 * If Device Management is not supported on 4092 * this port, skip the rest. 4093 */ 4094 if (!(pi->PortInfo.CapabilityMask & 4095 SM_CAP_MASK_IS_DM_SUPPD)) { 4096 kmem_free(pi, pi_len); 4097 continue; 4098 } 4099 4100 /* 4101 * For reprobes: Check if GID, already in 4102 * the list. If so, set the state to SKIPPED 4103 */ 4104 if (((temp_gidinfo = ibdm_find_gid(nodeguid, 4105 tmp->NodeInfo.PortGUID)) != NULL) && 4106 temp_gidinfo->gl_state == 4107 IBDM_GID_PROBING_COMPLETE) { 4108 ASSERT(reprobe_gid == NULL); 4109 ibdm_addto_glhcalist(temp_gidinfo, 4110 hca_list); 4111 reprobe_gid = temp_gidinfo; 4112 kmem_free(pi, pi_len); 4113 continue; 4114 } else if (temp_gidinfo != NULL) { 4115 kmem_free(pi, pi_len); 4116 ibdm_addto_glhcalist(temp_gidinfo, 4117 hca_list); 4118 continue; 4119 } 4120 4121 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : " 4122 "create_gid : prefix %llx, guid %llx\n", 4123 pi->PortInfo.GidPrefix, 4124 tmp->NodeInfo.PortGUID); 4125 4126 sgid.gid_prefix = port->pa_sn_prefix; 4127 sgid.gid_guid = port->pa_port_guid; 4128 dgid.gid_prefix = pi->PortInfo.GidPrefix; 4129 dgid.gid_guid = tmp->NodeInfo.PortGUID; 4130 new_gid = ibdm_create_gid_info(port, sgid, 4131 dgid); 4132 if (new_gid == NULL) { 4133 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4134 "create_gid_info failed\n"); 4135 kmem_free(pi, pi_len); 4136 continue; 4137 } 4138 if (node_gid == NULL) { 4139 node_gid = new_gid; 4140 ibdm_add_to_gl_gid(node_gid, node_gid); 4141 } else { 4142 IBTF_DPRINTF_L4("ibdm", 4143 "\tprobe_ioc: new gid"); 4144 temp_gid = kmem_zalloc( 4145 sizeof (ibdm_gid_t), KM_SLEEP); 4146 temp_gid->gid_dgid_hi = 4147 new_gid->gl_dgid_hi; 4148 temp_gid->gid_dgid_lo = 4149 new_gid->gl_dgid_lo; 4150 temp_gid->gid_next = node_gid->gl_gid; 4151 node_gid->gl_gid = temp_gid; 4152 node_gid->gl_ngids++; 4153 } 4154 new_gid->gl_nodeguid = nodeguid; 4155 new_gid->gl_portguid = dgid.gid_guid; 4156 ibdm_addto_glhcalist(new_gid, hca_list); 4157 4158 /* 4159 * Set the state to skipped as all these 4160 * gids point to the same node. 4161 * We (re)probe only one GID below and reset 4162 * state appropriately 4163 */ 4164 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED; 4165 new_gid->gl_devid = (*tmp).NodeInfo.DeviceID; 4166 kmem_free(pi, pi_len); 4167 } 4168 kmem_free(nr, nr_len); 4169 4170 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d " 4171 "reprobe_gid %p new_gid %p node_gid %p", 4172 reprobe_flag, reprobe_gid, new_gid, node_gid); 4173 4174 if (reprobe_flag != 0 && reprobe_gid != NULL) { 4175 int niocs, jj; 4176 ibdm_ioc_info_t *tmp_ioc; 4177 int ioc_matched = 0; 4178 4179 mutex_exit(&ibdm.ibdm_hl_mutex); 4180 mutex_enter(&reprobe_gid->gl_mutex); 4181 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS; 4182 niocs = 4183 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots; 4184 reprobe_gid->gl_pending_cmds++; 4185 mutex_exit(&reprobe_gid->gl_mutex); 4186 4187 for (jj = 0; jj < niocs; jj++) { 4188 tmp_ioc = 4189 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj); 4190 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid) 4191 continue; 4192 4193 ioc_matched = 1; 4194 4195 /* 4196 * Explicitly set gl_reprobe_flag to 0 so that 4197 * IBnex is not notified on completion 4198 */ 4199 mutex_enter(&reprobe_gid->gl_mutex); 4200 reprobe_gid->gl_reprobe_flag = 0; 4201 mutex_exit(&reprobe_gid->gl_mutex); 4202 4203 mutex_enter(&ibdm.ibdm_mutex); 4204 ibdm.ibdm_ngid_probes_in_progress++; 4205 mutex_exit(&ibdm.ibdm_mutex); 4206 if (ibdm_send_ioc_profile(reprobe_gid, jj) != 4207 IBDM_SUCCESS) { 4208 IBTF_DPRINTF_L4("ibdm", 4209 "\tprobe_ioc: " 4210 "send_ioc_profile failed " 4211 "for ioc %d", jj); 4212 ibdm_gid_decr_pending(reprobe_gid); 4213 break; 4214 } 4215 mutex_enter(&ibdm.ibdm_mutex); 4216 ibdm_wait_probe_completion(); 4217 mutex_exit(&ibdm.ibdm_mutex); 4218 break; 4219 } 4220 if (ioc_matched == 0) 4221 ibdm_gid_decr_pending(reprobe_gid); 4222 else { 4223 mutex_enter(&ibdm.ibdm_hl_mutex); 4224 break; 4225 } 4226 } else if (new_gid != NULL) { 4227 mutex_exit(&ibdm.ibdm_hl_mutex); 4228 node_gid = node_gid ? node_gid : new_gid; 4229 4230 /* 4231 * New or reinserted GID : Enable notification 4232 * to IBnex 4233 */ 4234 mutex_enter(&node_gid->gl_mutex); 4235 node_gid->gl_reprobe_flag = 1; 4236 mutex_exit(&node_gid->gl_mutex); 4237 4238 ibdm_probe_gid(node_gid); 4239 4240 mutex_enter(&ibdm.ibdm_hl_mutex); 4241 } 4242 } 4243 mutex_exit(&ibdm.ibdm_hl_mutex); 4244 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n"); 4245 } 4246 4247 4248 /* 4249 * ibdm_probe_gid() 4250 * Selectively probes the GID 4251 */ 4252 static void 4253 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info) 4254 { 4255 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:"); 4256 4257 /* 4258 * A Cisco FC GW needs the special handling to get IOUnitInfo. 4259 */ 4260 mutex_enter(&gid_info->gl_mutex); 4261 if (ibdm_is_cisco_switch(gid_info)) { 4262 gid_info->gl_pending_cmds++; 4263 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 4264 mutex_exit(&gid_info->gl_mutex); 4265 4266 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 4267 4268 mutex_enter(&gid_info->gl_mutex); 4269 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4270 --gid_info->gl_pending_cmds; 4271 mutex_exit(&gid_info->gl_mutex); 4272 4273 /* free the hca_list on this gid_info */ 4274 ibdm_delete_glhca_list(gid_info); 4275 gid_info = gid_info->gl_next; 4276 return; 4277 } 4278 4279 mutex_enter(&gid_info->gl_mutex); 4280 ibdm_wait_cisco_probe_completion(gid_info); 4281 4282 IBTF_DPRINTF_L4("ibdm", 4283 "\tprobe_gid: CISCO Wakeup signal received"); 4284 } 4285 4286 /* move on to the 'GET_CLASSPORTINFO' stage */ 4287 gid_info->gl_pending_cmds++; 4288 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 4289 mutex_exit(&gid_info->gl_mutex); 4290 4291 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 4292 4293 mutex_enter(&gid_info->gl_mutex); 4294 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4295 --gid_info->gl_pending_cmds; 4296 mutex_exit(&gid_info->gl_mutex); 4297 4298 /* free the hca_list on this gid_info */ 4299 ibdm_delete_glhca_list(gid_info); 4300 gid_info = gid_info->gl_next; 4301 return; 4302 } 4303 4304 mutex_enter(&ibdm.ibdm_mutex); 4305 ibdm.ibdm_ngid_probes_in_progress++; 4306 gid_info = gid_info->gl_next; 4307 ibdm_wait_probe_completion(); 4308 mutex_exit(&ibdm.ibdm_mutex); 4309 4310 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received"); 4311 } 4312 4313 4314 /* 4315 * ibdm_create_gid_info() 4316 * Allocates a gid_info structure and initializes 4317 * Returns pointer to the structure on success 4318 * and NULL on failure 4319 */ 4320 static ibdm_dp_gidinfo_t * 4321 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid) 4322 { 4323 uint8_t ii, npaths; 4324 sa_path_record_t *path; 4325 size_t len; 4326 ibdm_pkey_tbl_t *pkey_tbl; 4327 ibdm_dp_gidinfo_t *gid_info = NULL; 4328 int ret; 4329 4330 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin"); 4331 npaths = 1; 4332 4333 /* query for reversible paths */ 4334 if (port->pa_sa_hdl) 4335 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, 4336 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, 4337 &len, &path); 4338 else 4339 return (NULL); 4340 4341 if (ret == IBMF_SUCCESS && path) { 4342 ibdm_dump_path_info(path); 4343 4344 gid_info = kmem_zalloc( 4345 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 4346 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 4347 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 4348 gid_info->gl_dgid_hi = path->DGID.gid_prefix; 4349 gid_info->gl_dgid_lo = path->DGID.gid_guid; 4350 gid_info->gl_sgid_hi = path->SGID.gid_prefix; 4351 gid_info->gl_sgid_lo = path->SGID.gid_guid; 4352 gid_info->gl_p_key = path->P_Key; 4353 gid_info->gl_sa_hdl = port->pa_sa_hdl; 4354 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl; 4355 gid_info->gl_slid = path->SLID; 4356 gid_info->gl_dlid = path->DLID; 4357 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 4358 << IBDM_GID_TRANSACTIONID_SHIFT; 4359 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 4360 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 4361 << IBDM_GID_TRANSACTIONID_SHIFT; 4362 gid_info->gl_SL = path->SL; 4363 4364 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 4365 for (ii = 0; ii < port->pa_npkeys; ii++) { 4366 if (port->pa_pkey_tbl == NULL) 4367 break; 4368 4369 pkey_tbl = &port->pa_pkey_tbl[ii]; 4370 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 4371 (pkey_tbl->pt_qp_hdl != NULL)) { 4372 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 4373 break; 4374 } 4375 } 4376 kmem_free(path, len); 4377 4378 /* 4379 * QP handle for GID not initialized. No matching Pkey 4380 * was found!! ibdm should *not* hit this case. Flag an 4381 * error and drop the GID if ibdm does encounter this. 4382 */ 4383 if (gid_info->gl_qp_hdl == NULL) { 4384 IBTF_DPRINTF_L2(ibdm_string, 4385 "\tcreate_gid_info: No matching Pkey"); 4386 ibdm_delete_gidinfo(gid_info); 4387 return (NULL); 4388 } 4389 4390 ibdm.ibdm_ngids++; 4391 if (ibdm.ibdm_dp_gidlist_head == NULL) { 4392 ibdm.ibdm_dp_gidlist_head = gid_info; 4393 ibdm.ibdm_dp_gidlist_tail = gid_info; 4394 } else { 4395 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 4396 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 4397 ibdm.ibdm_dp_gidlist_tail = gid_info; 4398 } 4399 } 4400 4401 return (gid_info); 4402 } 4403 4404 4405 /* 4406 * ibdm_get_node_records 4407 * Sends a SA query to get the NODE record 4408 * Returns pointer to the sa_node_record_t on success 4409 * and NULL on failure 4410 */ 4411 static sa_node_record_t * 4412 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid) 4413 { 4414 sa_node_record_t req, *resp = NULL; 4415 ibmf_saa_access_args_t args; 4416 int ret; 4417 4418 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin"); 4419 4420 bzero(&req, sizeof (sa_node_record_t)); 4421 req.NodeInfo.NodeGUID = guid; 4422 4423 args.sq_attr_id = SA_NODERECORD_ATTRID; 4424 args.sq_access_type = IBMF_SAA_RETRIEVE; 4425 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID; 4426 args.sq_template = &req; 4427 args.sq_callback = NULL; 4428 args.sq_callback_arg = NULL; 4429 4430 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4431 if (ret != IBMF_SUCCESS) { 4432 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:" 4433 " SA Retrieve Failed: %d", ret); 4434 return (NULL); 4435 } 4436 if ((resp == NULL) || (*length == 0)) { 4437 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records"); 4438 return (NULL); 4439 } 4440 4441 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx " 4442 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID); 4443 4444 return (resp); 4445 } 4446 4447 4448 /* 4449 * ibdm_get_portinfo() 4450 * Sends a SA query to get the PortInfo record 4451 * Returns pointer to the sa_portinfo_record_t on success 4452 * and NULL on failure 4453 */ 4454 static sa_portinfo_record_t * 4455 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid) 4456 { 4457 sa_portinfo_record_t req, *resp = NULL; 4458 ibmf_saa_access_args_t args; 4459 int ret; 4460 4461 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin"); 4462 4463 bzero(&req, sizeof (sa_portinfo_record_t)); 4464 req.EndportLID = lid; 4465 4466 args.sq_attr_id = SA_PORTINFORECORD_ATTRID; 4467 args.sq_access_type = IBMF_SAA_RETRIEVE; 4468 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 4469 args.sq_template = &req; 4470 args.sq_callback = NULL; 4471 args.sq_callback_arg = NULL; 4472 4473 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4474 if (ret != IBMF_SUCCESS) { 4475 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:" 4476 " SA Retrieve Failed: 0x%X", ret); 4477 return (NULL); 4478 } 4479 if ((*length == 0) || (resp == NULL)) 4480 return (NULL); 4481 4482 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x", 4483 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask); 4484 return (resp); 4485 } 4486 4487 4488 /* 4489 * ibdm_ibnex_register_callback 4490 * IB nexus callback routine for HCA attach and detach notification 4491 */ 4492 void 4493 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback) 4494 { 4495 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks"); 4496 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4497 ibdm.ibdm_ibnex_callback = ibnex_dm_callback; 4498 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4499 } 4500 4501 4502 /* 4503 * ibdm_ibnex_unregister_callbacks 4504 */ 4505 void 4506 ibdm_ibnex_unregister_callback() 4507 { 4508 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks"); 4509 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4510 ibdm.ibdm_ibnex_callback = NULL; 4511 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4512 } 4513 4514 /* 4515 * ibdm_get_waittime() 4516 * Calculates the wait time based on the last HCA attach time 4517 */ 4518 static time_t 4519 ibdm_get_waittime(ib_guid_t hca_guid, int dft_wait) 4520 { 4521 int ii; 4522 time_t temp, wait_time = 0; 4523 ibdm_hca_list_t *hca; 4524 4525 IBTF_DPRINTF_L4("ibdm", "\tget_waittime hcaguid:%llx" 4526 "\tport settling time %d", hca_guid, dft_wait); 4527 4528 ASSERT(mutex_owned(&ibdm.ibdm_hl_mutex)); 4529 4530 hca = ibdm.ibdm_hca_list_head; 4531 4532 if (hca_guid) { 4533 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4534 if ((hca_guid == hca->hl_hca_guid) && 4535 (hca->hl_nports != hca->hl_nports_active)) { 4536 wait_time = 4537 ddi_get_time() - hca->hl_attach_time; 4538 wait_time = ((wait_time >= dft_wait) ? 4539 0 : (dft_wait - wait_time)); 4540 break; 4541 } 4542 hca = hca->hl_next; 4543 } 4544 IBTF_DPRINTF_L4("ibdm", "\tget_waittime %llx", wait_time); 4545 return (wait_time); 4546 } 4547 4548 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4549 if (hca->hl_nports != hca->hl_nports_active) { 4550 temp = ddi_get_time() - hca->hl_attach_time; 4551 temp = ((temp >= dft_wait) ? 0 : (dft_wait - temp)); 4552 wait_time = (temp > wait_time) ? temp : wait_time; 4553 } 4554 } 4555 IBTF_DPRINTF_L4("ibdm", "\tget_waittime %llx", wait_time); 4556 return (wait_time); 4557 } 4558 4559 void 4560 ibdm_ibnex_port_settle_wait(ib_guid_t hca_guid, int dft_wait) 4561 { 4562 time_t wait_time; 4563 4564 mutex_enter(&ibdm.ibdm_hl_mutex); 4565 4566 while ((wait_time = ibdm_get_waittime(hca_guid, dft_wait)) > 0) { 4567 (void) cv_timedwait(&ibdm.ibdm_port_settle_cv, 4568 &ibdm.ibdm_hl_mutex, 4569 ddi_get_lbolt() + drv_usectohz(wait_time * 1000000)); 4570 } 4571 4572 mutex_exit(&ibdm.ibdm_hl_mutex); 4573 } 4574 4575 4576 /* 4577 * ibdm_ibnex_probe_hcaport 4578 * Probes the presence of HCA port (with HCA dip and port number) 4579 * Returns port attributes structure on SUCCESS 4580 */ 4581 ibdm_port_attr_t * 4582 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num) 4583 { 4584 int ii, jj; 4585 ibdm_hca_list_t *hca_list; 4586 ibdm_port_attr_t *port_attr; 4587 4588 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:"); 4589 4590 mutex_enter(&ibdm.ibdm_hl_mutex); 4591 hca_list = ibdm.ibdm_hca_list_head; 4592 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4593 if (hca_list->hl_hca_guid == hca_guid) { 4594 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4595 if (hca_list->hl_port_attr[jj].pa_port_num == 4596 port_num) { 4597 break; 4598 } 4599 } 4600 if (jj != hca_list->hl_nports) 4601 break; 4602 } 4603 hca_list = hca_list->hl_next; 4604 } 4605 if (ii == ibdm.ibdm_hca_count) { 4606 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found"); 4607 mutex_exit(&ibdm.ibdm_hl_mutex); 4608 return (NULL); 4609 } 4610 port_attr = (ibdm_port_attr_t *)kmem_zalloc( 4611 sizeof (ibdm_port_attr_t), KM_SLEEP); 4612 bcopy((char *)&hca_list->hl_port_attr[jj], 4613 port_attr, sizeof (ibdm_port_attr_t)); 4614 ibdm_update_port_attr(port_attr); 4615 4616 mutex_exit(&ibdm.ibdm_hl_mutex); 4617 return (port_attr); 4618 } 4619 4620 4621 /* 4622 * ibdm_ibnex_get_port_attrs 4623 * Scan all HCAs for a matching port_guid. 4624 * Returns "port attributes" structure on success. 4625 */ 4626 ibdm_port_attr_t * 4627 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid) 4628 { 4629 int ii, jj; 4630 ibdm_hca_list_t *hca_list; 4631 ibdm_port_attr_t *port_attr; 4632 4633 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:"); 4634 4635 mutex_enter(&ibdm.ibdm_hl_mutex); 4636 hca_list = ibdm.ibdm_hca_list_head; 4637 4638 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4639 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4640 if (hca_list->hl_port_attr[jj].pa_port_guid == 4641 port_guid) { 4642 break; 4643 } 4644 } 4645 if (jj != hca_list->hl_nports) 4646 break; 4647 hca_list = hca_list->hl_next; 4648 } 4649 4650 if (ii == ibdm.ibdm_hca_count) { 4651 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found"); 4652 mutex_exit(&ibdm.ibdm_hl_mutex); 4653 return (NULL); 4654 } 4655 4656 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t), 4657 KM_SLEEP); 4658 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr, 4659 sizeof (ibdm_port_attr_t)); 4660 ibdm_update_port_attr(port_attr); 4661 4662 mutex_exit(&ibdm.ibdm_hl_mutex); 4663 return (port_attr); 4664 } 4665 4666 4667 /* 4668 * ibdm_ibnex_free_port_attr() 4669 */ 4670 void 4671 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr) 4672 { 4673 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:"); 4674 if (port_attr) { 4675 if (port_attr->pa_pkey_tbl != NULL) { 4676 kmem_free(port_attr->pa_pkey_tbl, 4677 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t))); 4678 } 4679 kmem_free(port_attr, sizeof (ibdm_port_attr_t)); 4680 } 4681 } 4682 4683 4684 /* 4685 * ibdm_ibnex_get_hca_list() 4686 * Returns portinfo for all the port for all the HCA's 4687 */ 4688 void 4689 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count) 4690 { 4691 ibdm_hca_list_t *head = NULL, *temp, *temp1; 4692 int ii; 4693 4694 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:"); 4695 4696 mutex_enter(&ibdm.ibdm_hl_mutex); 4697 temp = ibdm.ibdm_hca_list_head; 4698 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4699 temp1 = ibdm_dup_hca_attr(temp); 4700 temp1->hl_next = head; 4701 head = temp1; 4702 temp = temp->hl_next; 4703 } 4704 *count = ibdm.ibdm_hca_count; 4705 *hca = head; 4706 mutex_exit(&ibdm.ibdm_hl_mutex); 4707 } 4708 4709 4710 /* 4711 * ibdm_ibnex_get_hca_info_by_guid() 4712 */ 4713 ibdm_hca_list_t * 4714 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid) 4715 { 4716 ibdm_hca_list_t *head = NULL, *hca = NULL; 4717 4718 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip"); 4719 4720 mutex_enter(&ibdm.ibdm_hl_mutex); 4721 head = ibdm.ibdm_hca_list_head; 4722 while (head) { 4723 if (head->hl_hca_guid == hca_guid) { 4724 hca = ibdm_dup_hca_attr(head); 4725 hca->hl_next = NULL; 4726 break; 4727 } 4728 head = head->hl_next; 4729 } 4730 mutex_exit(&ibdm.ibdm_hl_mutex); 4731 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca); 4732 return (hca); 4733 } 4734 4735 4736 /* 4737 * ibdm_dup_hca_attr() 4738 * Allocate a new HCA attribute strucuture and initialize 4739 * hca attribute structure with the incoming HCA attributes 4740 * returned the allocated hca attributes. 4741 */ 4742 static ibdm_hca_list_t * 4743 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca) 4744 { 4745 int len; 4746 ibdm_hca_list_t *out_hca; 4747 4748 len = sizeof (ibdm_hca_list_t) + 4749 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)); 4750 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len); 4751 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP); 4752 bcopy((char *)in_hca, 4753 (char *)out_hca, sizeof (ibdm_hca_list_t)); 4754 if (in_hca->hl_nports) { 4755 out_hca->hl_port_attr = (ibdm_port_attr_t *) 4756 ((char *)out_hca + sizeof (ibdm_hca_list_t)); 4757 bcopy((char *)in_hca->hl_port_attr, 4758 (char *)out_hca->hl_port_attr, 4759 (in_hca->hl_nports * sizeof (ibdm_port_attr_t))); 4760 for (len = 0; len < out_hca->hl_nports; len++) 4761 ibdm_update_port_attr(&out_hca->hl_port_attr[len]); 4762 } 4763 return (out_hca); 4764 } 4765 4766 4767 /* 4768 * ibdm_ibnex_free_hca_list() 4769 * Free one/more HCA lists 4770 */ 4771 void 4772 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list) 4773 { 4774 int ii; 4775 size_t len; 4776 ibdm_hca_list_t *temp; 4777 ibdm_port_attr_t *port; 4778 4779 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:"); 4780 ASSERT(hca_list); 4781 while (hca_list) { 4782 temp = hca_list; 4783 hca_list = hca_list->hl_next; 4784 for (ii = 0; ii < temp->hl_nports; ii++) { 4785 port = &temp->hl_port_attr[ii]; 4786 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 4787 if (len != 0) 4788 kmem_free(port->pa_pkey_tbl, len); 4789 } 4790 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports * 4791 sizeof (ibdm_port_attr_t)); 4792 kmem_free(temp, len); 4793 } 4794 } 4795 4796 4797 /* 4798 * ibdm_ibnex_probe_iocguid() 4799 * Probes the IOC on the fabric and returns the IOC information 4800 * if present. Otherwise, NULL is returned 4801 */ 4802 /* ARGSUSED */ 4803 ibdm_ioc_info_t * 4804 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag) 4805 { 4806 int k; 4807 ibdm_ioc_info_t *ioc_info; 4808 ibdm_dp_gidinfo_t *gid_info; /* used as index and arg */ 4809 timeout_id_t *timeout_id; 4810 4811 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin", 4812 iou, ioc_guid, reprobe_flag); 4813 /* Check whether we know this already */ 4814 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4815 if (ioc_info == NULL) { 4816 mutex_enter(&ibdm.ibdm_mutex); 4817 while (ibdm.ibdm_busy & IBDM_BUSY) 4818 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4819 ibdm.ibdm_busy |= IBDM_BUSY; 4820 mutex_exit(&ibdm.ibdm_mutex); 4821 ibdm_probe_ioc(iou, ioc_guid, 0); 4822 mutex_enter(&ibdm.ibdm_mutex); 4823 ibdm.ibdm_busy &= ~IBDM_BUSY; 4824 cv_broadcast(&ibdm.ibdm_busy_cv); 4825 mutex_exit(&ibdm.ibdm_mutex); 4826 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4827 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */ 4828 ASSERT(gid_info != NULL); 4829 /* Free the ioc_list before reprobe; and cancel any timers */ 4830 mutex_enter(&ibdm.ibdm_mutex); 4831 mutex_enter(&gid_info->gl_mutex); 4832 if (ioc_info->ioc_timeout_id) { 4833 timeout_id = ioc_info->ioc_timeout_id; 4834 ioc_info->ioc_timeout_id = 0; 4835 mutex_exit(&gid_info->gl_mutex); 4836 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4837 "ioc_timeout_id = 0x%x", timeout_id); 4838 if (untimeout(timeout_id) == -1) { 4839 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4840 "untimeout ioc_timeout_id failed"); 4841 } 4842 mutex_enter(&gid_info->gl_mutex); 4843 } 4844 if (ioc_info->ioc_dc_timeout_id) { 4845 timeout_id = ioc_info->ioc_dc_timeout_id; 4846 ioc_info->ioc_dc_timeout_id = 0; 4847 mutex_exit(&gid_info->gl_mutex); 4848 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4849 "ioc_dc_timeout_id = 0x%x", timeout_id); 4850 if (untimeout(timeout_id) == -1) { 4851 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4852 "untimeout ioc_dc_timeout_id failed"); 4853 } 4854 mutex_enter(&gid_info->gl_mutex); 4855 } 4856 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++) 4857 if (ioc_info->ioc_serv[k].se_timeout_id) { 4858 timeout_id = ioc_info->ioc_serv[k]. 4859 se_timeout_id; 4860 ioc_info->ioc_serv[k].se_timeout_id = 0; 4861 mutex_exit(&gid_info->gl_mutex); 4862 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4863 "ioc_info->ioc_serv[k].se_timeout_id = %x", 4864 k, timeout_id); 4865 if (untimeout(timeout_id) == -1) { 4866 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4867 "untimeout se_timeout_id %d " 4868 "failed", k); 4869 } 4870 mutex_enter(&gid_info->gl_mutex); 4871 } 4872 mutex_exit(&gid_info->gl_mutex); 4873 mutex_exit(&ibdm.ibdm_mutex); 4874 ibdm_ibnex_free_ioc_list(ioc_info); 4875 4876 mutex_enter(&ibdm.ibdm_mutex); 4877 while (ibdm.ibdm_busy & IBDM_BUSY) 4878 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4879 ibdm.ibdm_busy |= IBDM_BUSY; 4880 mutex_exit(&ibdm.ibdm_mutex); 4881 4882 ibdm_probe_ioc(iou, ioc_guid, 1); 4883 4884 /* 4885 * Skip if gl_reprobe_flag is set, this will be 4886 * a re-inserted / new GID, for which notifications 4887 * have already been send. 4888 */ 4889 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 4890 gid_info = gid_info->gl_next) { 4891 uint8_t ii, niocs; 4892 ibdm_ioc_info_t *ioc; 4893 4894 if (gid_info->gl_iou == NULL) 4895 continue; 4896 4897 if (gid_info->gl_reprobe_flag) { 4898 gid_info->gl_reprobe_flag = 0; 4899 continue; 4900 } 4901 4902 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 4903 for (ii = 0; ii < niocs; ii++) { 4904 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 4905 if (ioc->ioc_profile.ioc_guid == ioc_guid) { 4906 mutex_enter(&ibdm.ibdm_mutex); 4907 ibdm_reprobe_update_port_srv(ioc, 4908 gid_info); 4909 mutex_exit(&ibdm.ibdm_mutex); 4910 } 4911 } 4912 } 4913 mutex_enter(&ibdm.ibdm_mutex); 4914 ibdm.ibdm_busy &= ~IBDM_BUSY; 4915 cv_broadcast(&ibdm.ibdm_busy_cv); 4916 mutex_exit(&ibdm.ibdm_mutex); 4917 4918 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4919 } 4920 return (ioc_info); 4921 } 4922 4923 4924 /* 4925 * ibdm_get_ioc_info_with_gid() 4926 * Returns pointer to ibdm_ioc_info_t if it finds 4927 * matching record for the ioc_guid. Otherwise NULL is returned. 4928 * The pointer to gid_info is set to the second argument in case that 4929 * the non-NULL value returns (and the second argument is not NULL). 4930 * 4931 * Note. use the same strings as "ibnex_get_ioc_info" in 4932 * IBTF_DPRINTF() to keep compatibility. 4933 */ 4934 static ibdm_ioc_info_t * 4935 ibdm_get_ioc_info_with_gid(ib_guid_t ioc_guid, 4936 ibdm_dp_gidinfo_t **gid_info) 4937 { 4938 int ii; 4939 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL; 4940 ibdm_dp_gidinfo_t *gid_list; 4941 ib_dm_io_unitinfo_t *iou; 4942 4943 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid); 4944 4945 mutex_enter(&ibdm.ibdm_mutex); 4946 while (ibdm.ibdm_busy & IBDM_BUSY) 4947 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4948 ibdm.ibdm_busy |= IBDM_BUSY; 4949 4950 if (gid_info) 4951 *gid_info = NULL; /* clear the value of gid_info */ 4952 4953 gid_list = ibdm.ibdm_dp_gidlist_head; 4954 while (gid_list) { 4955 mutex_enter(&gid_list->gl_mutex); 4956 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 4957 mutex_exit(&gid_list->gl_mutex); 4958 gid_list = gid_list->gl_next; 4959 continue; 4960 } 4961 if (gid_list->gl_iou == NULL) { 4962 IBTF_DPRINTF_L2("ibdm", 4963 "\tget_ioc_info: No IOU info"); 4964 mutex_exit(&gid_list->gl_mutex); 4965 gid_list = gid_list->gl_next; 4966 continue; 4967 } 4968 iou = &gid_list->gl_iou->iou_info; 4969 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 4970 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii); 4971 if ((tmp->ioc_profile.ioc_guid == ioc_guid) && 4972 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) { 4973 ioc = ibdm_dup_ioc_info(tmp, gid_list); 4974 if (gid_info) 4975 *gid_info = gid_list; /* set this ptr */ 4976 mutex_exit(&gid_list->gl_mutex); 4977 ibdm.ibdm_busy &= ~IBDM_BUSY; 4978 cv_broadcast(&ibdm.ibdm_busy_cv); 4979 mutex_exit(&ibdm.ibdm_mutex); 4980 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End"); 4981 return (ioc); 4982 } 4983 } 4984 if (ii == iou->iou_num_ctrl_slots) 4985 ioc = NULL; 4986 4987 mutex_exit(&gid_list->gl_mutex); 4988 gid_list = gid_list->gl_next; 4989 } 4990 4991 ibdm.ibdm_busy &= ~IBDM_BUSY; 4992 cv_broadcast(&ibdm.ibdm_busy_cv); 4993 mutex_exit(&ibdm.ibdm_mutex); 4994 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End"); 4995 return (ioc); 4996 } 4997 4998 /* 4999 * ibdm_ibnex_get_ioc_info() 5000 * Returns pointer to ibdm_ioc_info_t if it finds 5001 * matching record for the ioc_guid, otherwise NULL 5002 * is returned 5003 * 5004 * Note. this is a wrapper function to ibdm_get_ioc_info_with_gid() now. 5005 */ 5006 ibdm_ioc_info_t * 5007 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid) 5008 { 5009 /* will not use the gid_info pointer, so the second arg is NULL */ 5010 return (ibdm_get_ioc_info_with_gid(ioc_guid, NULL)); 5011 } 5012 5013 /* 5014 * ibdm_ibnex_get_ioc_count() 5015 * Returns number of ibdm_ioc_info_t it finds 5016 */ 5017 int 5018 ibdm_ibnex_get_ioc_count(void) 5019 { 5020 int count = 0, k; 5021 ibdm_ioc_info_t *ioc; 5022 ibdm_dp_gidinfo_t *gid_list; 5023 5024 mutex_enter(&ibdm.ibdm_mutex); 5025 ibdm_sweep_fabric(0); 5026 5027 while (ibdm.ibdm_busy & IBDM_BUSY) 5028 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5029 ibdm.ibdm_busy |= IBDM_BUSY; 5030 5031 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5032 gid_list = gid_list->gl_next) { 5033 mutex_enter(&gid_list->gl_mutex); 5034 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) || 5035 (gid_list->gl_iou == NULL)) { 5036 mutex_exit(&gid_list->gl_mutex); 5037 continue; 5038 } 5039 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots; 5040 k++) { 5041 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k); 5042 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) 5043 ++count; 5044 } 5045 mutex_exit(&gid_list->gl_mutex); 5046 } 5047 ibdm.ibdm_busy &= ~IBDM_BUSY; 5048 cv_broadcast(&ibdm.ibdm_busy_cv); 5049 mutex_exit(&ibdm.ibdm_mutex); 5050 5051 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count); 5052 return (count); 5053 } 5054 5055 5056 /* 5057 * ibdm_ibnex_get_ioc_list() 5058 * Returns information about all the IOCs present on the fabric. 5059 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL. 5060 * Does not sweep fabric if DONOT_PROBE is set 5061 */ 5062 ibdm_ioc_info_t * 5063 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag) 5064 { 5065 int ii; 5066 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc; 5067 ibdm_dp_gidinfo_t *gid_list; 5068 ib_dm_io_unitinfo_t *iou; 5069 5070 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter"); 5071 5072 mutex_enter(&ibdm.ibdm_mutex); 5073 if (list_flag != IBDM_IBNEX_DONOT_PROBE) 5074 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL); 5075 5076 while (ibdm.ibdm_busy & IBDM_BUSY) 5077 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5078 ibdm.ibdm_busy |= IBDM_BUSY; 5079 5080 gid_list = ibdm.ibdm_dp_gidlist_head; 5081 while (gid_list) { 5082 mutex_enter(&gid_list->gl_mutex); 5083 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 5084 mutex_exit(&gid_list->gl_mutex); 5085 gid_list = gid_list->gl_next; 5086 continue; 5087 } 5088 if (gid_list->gl_iou == NULL) { 5089 IBTF_DPRINTF_L2("ibdm", 5090 "\tget_ioc_list: No IOU info"); 5091 mutex_exit(&gid_list->gl_mutex); 5092 gid_list = gid_list->gl_next; 5093 continue; 5094 } 5095 iou = &gid_list->gl_iou->iou_info; 5096 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 5097 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii); 5098 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5099 tmp = ibdm_dup_ioc_info(ioc, gid_list); 5100 tmp->ioc_next = ioc_list; 5101 ioc_list = tmp; 5102 } 5103 } 5104 mutex_exit(&gid_list->gl_mutex); 5105 gid_list = gid_list->gl_next; 5106 } 5107 ibdm.ibdm_busy &= ~IBDM_BUSY; 5108 cv_broadcast(&ibdm.ibdm_busy_cv); 5109 mutex_exit(&ibdm.ibdm_mutex); 5110 5111 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End"); 5112 return (ioc_list); 5113 } 5114 5115 /* 5116 * ibdm_dup_ioc_info() 5117 * Duplicate the IOC information and return the IOC 5118 * information. 5119 */ 5120 static ibdm_ioc_info_t * 5121 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list) 5122 { 5123 ibdm_ioc_info_t *out_ioc; 5124 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc)); 5125 ASSERT(MUTEX_HELD(&gid_list->gl_mutex)); 5126 5127 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP); 5128 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t)); 5129 ibdm_update_ioc_port_gidlist(out_ioc, gid_list); 5130 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid; 5131 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode; 5132 5133 return (out_ioc); 5134 } 5135 5136 5137 /* 5138 * ibdm_free_ioc_list() 5139 * Deallocate memory for IOC list structure 5140 */ 5141 void 5142 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc) 5143 { 5144 ibdm_ioc_info_t *temp; 5145 5146 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:"); 5147 while (ioc) { 5148 temp = ioc; 5149 ioc = ioc->ioc_next; 5150 kmem_free(temp->ioc_gid_list, 5151 (sizeof (ibdm_gid_t) * temp->ioc_nportgids)); 5152 if (temp->ioc_hca_list) 5153 ibdm_ibnex_free_hca_list(temp->ioc_hca_list); 5154 kmem_free(temp, sizeof (ibdm_ioc_info_t)); 5155 } 5156 } 5157 5158 5159 /* 5160 * ibdm_ibnex_update_pkey_tbls 5161 * Updates the DM P_Key database. 5162 * NOTE: Two cases are handled here: P_Key being added or removed. 5163 * 5164 * Arguments : NONE 5165 * Return Values : NONE 5166 */ 5167 void 5168 ibdm_ibnex_update_pkey_tbls(void) 5169 { 5170 int h, pp, pidx; 5171 uint_t nports; 5172 uint_t size; 5173 ib_pkey_t new_pkey; 5174 ib_pkey_t *orig_pkey; 5175 ibdm_hca_list_t *hca_list; 5176 ibdm_port_attr_t *port; 5177 ibt_hca_portinfo_t *pinfop; 5178 5179 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:"); 5180 5181 mutex_enter(&ibdm.ibdm_hl_mutex); 5182 hca_list = ibdm.ibdm_hca_list_head; 5183 5184 for (h = 0; h < ibdm.ibdm_hca_count; h++) { 5185 5186 /* This updates P_Key Tables for all ports of this HCA */ 5187 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop, 5188 &nports, &size); 5189 5190 /* number of ports shouldn't have changed */ 5191 ASSERT(nports == hca_list->hl_nports); 5192 5193 for (pp = 0; pp < hca_list->hl_nports; pp++) { 5194 port = &hca_list->hl_port_attr[pp]; 5195 5196 /* 5197 * First figure out the P_Keys from IBTL. 5198 * Three things could have happened: 5199 * New P_Keys added 5200 * Existing P_Keys removed 5201 * Both of the above two 5202 * 5203 * Loop through the P_Key Indices and check if a 5204 * give P_Key_Ix matches that of the one seen by 5205 * IBDM. If they match no action is needed. 5206 * 5207 * If they don't match: 5208 * 1. if orig_pkey is invalid and new_pkey is valid 5209 * ---> add new_pkey to DM database 5210 * 2. if orig_pkey is valid and new_pkey is invalid 5211 * ---> remove orig_pkey from DM database 5212 * 3. if orig_pkey and new_pkey are both valid: 5213 * ---> remov orig_pkey from DM database 5214 * ---> add new_pkey to DM database 5215 * 4. if orig_pkey and new_pkey are both invalid: 5216 * ---> do nothing. Updated DM database. 5217 */ 5218 5219 for (pidx = 0; pidx < port->pa_npkeys; pidx++) { 5220 new_pkey = pinfop[pp].p_pkey_tbl[pidx]; 5221 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey; 5222 5223 /* keys match - do nothing */ 5224 if (*orig_pkey == new_pkey) 5225 continue; 5226 5227 if (IBDM_INVALID_PKEY(*orig_pkey) && 5228 !IBDM_INVALID_PKEY(new_pkey)) { 5229 /* P_Key was added */ 5230 IBTF_DPRINTF_L5("ibdm", 5231 "\tibnex_update_pkey_tbls: new " 5232 "P_Key added = 0x%x", new_pkey); 5233 *orig_pkey = new_pkey; 5234 ibdm_port_attr_ibmf_init(port, 5235 new_pkey, pp); 5236 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5237 IBDM_INVALID_PKEY(new_pkey)) { 5238 /* P_Key was removed */ 5239 IBTF_DPRINTF_L5("ibdm", 5240 "\tibnex_update_pkey_tbls: P_Key " 5241 "removed = 0x%x", *orig_pkey); 5242 *orig_pkey = new_pkey; 5243 (void) ibdm_port_attr_ibmf_fini(port, 5244 pidx); 5245 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5246 !IBDM_INVALID_PKEY(new_pkey)) { 5247 /* P_Key were replaced */ 5248 IBTF_DPRINTF_L5("ibdm", 5249 "\tibnex_update_pkey_tbls: P_Key " 5250 "replaced 0x%x with 0x%x", 5251 *orig_pkey, new_pkey); 5252 (void) ibdm_port_attr_ibmf_fini(port, 5253 pidx); 5254 *orig_pkey = new_pkey; 5255 ibdm_port_attr_ibmf_init(port, 5256 new_pkey, pp); 5257 } else { 5258 /* 5259 * P_Keys are invalid 5260 * set anyway to reflect if 5261 * INVALID_FULL was changed to 5262 * INVALID_LIMITED or vice-versa. 5263 */ 5264 *orig_pkey = new_pkey; 5265 } /* end of else */ 5266 5267 } /* loop of p_key index */ 5268 5269 } /* loop of #ports of HCA */ 5270 5271 ibt_free_portinfo(pinfop, size); 5272 hca_list = hca_list->hl_next; 5273 5274 } /* loop for all HCAs in the system */ 5275 5276 mutex_exit(&ibdm.ibdm_hl_mutex); 5277 } 5278 5279 5280 /* 5281 * ibdm_send_ioc_profile() 5282 * Send IOC Controller Profile request. When the request is completed 5283 * IBMF calls ibdm_process_incoming_mad routine to inform about 5284 * the completion. 5285 */ 5286 static int 5287 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no) 5288 { 5289 ibmf_msg_t *msg; 5290 ib_mad_hdr_t *hdr; 5291 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]); 5292 ibdm_timeout_cb_args_t *cb_args; 5293 5294 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: " 5295 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no); 5296 5297 /* 5298 * Send command to get IOC profile. 5299 * Allocate a IBMF packet and initialize the packet. 5300 */ 5301 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 5302 &msg) != IBMF_SUCCESS) { 5303 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: pkt alloc fail"); 5304 return (IBDM_FAILURE); 5305 } 5306 5307 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 5308 ibdm_alloc_send_buffers(msg); 5309 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 5310 5311 mutex_enter(&gid_info->gl_mutex); 5312 ibdm_bump_transactionID(gid_info); 5313 mutex_exit(&gid_info->gl_mutex); 5314 5315 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 5316 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 5317 if (gid_info->gl_redirected == B_TRUE) { 5318 if (gid_info->gl_redirect_dlid != 0) { 5319 msg->im_local_addr.ia_remote_lid = 5320 gid_info->gl_redirect_dlid; 5321 } 5322 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 5323 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 5324 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 5325 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 5326 } else { 5327 msg->im_local_addr.ia_remote_qno = 1; 5328 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 5329 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 5330 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 5331 } 5332 5333 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 5334 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 5335 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 5336 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 5337 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 5338 hdr->Status = 0; 5339 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 5340 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 5341 hdr->AttributeModifier = h2b32(ioc_no + 1); 5342 5343 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5344 cb_args = &ioc_info->ioc_cb_args; 5345 cb_args->cb_gid_info = gid_info; 5346 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 5347 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 5348 cb_args->cb_ioc_num = ioc_no; 5349 5350 mutex_enter(&gid_info->gl_mutex); 5351 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 5352 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 5353 mutex_exit(&gid_info->gl_mutex); 5354 5355 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:" 5356 "timeout %x", ioc_info->ioc_timeout_id); 5357 5358 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 5359 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 5360 IBTF_DPRINTF_L2("ibdm", 5361 "\tsend_ioc_profile: msg transport failed"); 5362 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 5363 } 5364 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5365 return (IBDM_SUCCESS); 5366 } 5367 5368 5369 /* 5370 * ibdm_port_reachable 5371 * Returns B_TRUE if the port GID is reachable by sending 5372 * a SA query to get the NODE record for this port GUID. 5373 */ 5374 static boolean_t 5375 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid) 5376 { 5377 sa_node_record_t *resp; 5378 size_t length; 5379 5380 /* 5381 * Verify if it's reachable by getting the node record. 5382 */ 5383 if (ibdm_get_node_record_by_port(sa_hdl, guid, &resp, &length) == 5384 IBDM_SUCCESS) { 5385 kmem_free(resp, length); 5386 return (B_TRUE); 5387 } 5388 return (B_FALSE); 5389 } 5390 5391 /* 5392 * ibdm_get_node_record_by_port 5393 * Sends a SA query to get the NODE record for port GUID 5394 * Returns IBDM_SUCCESS if the port GID is reachable. 5395 * 5396 * Note: the caller must be responsible for freeing the resource 5397 * by calling kmem_free(resp, length) later. 5398 */ 5399 static int 5400 ibdm_get_node_record_by_port(ibmf_saa_handle_t sa_hdl, ib_guid_t guid, 5401 sa_node_record_t **resp, size_t *length) 5402 { 5403 sa_node_record_t req; 5404 ibmf_saa_access_args_t args; 5405 int ret; 5406 ASSERT(resp != NULL && length != NULL); 5407 5408 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx", 5409 guid); 5410 5411 bzero(&req, sizeof (sa_node_record_t)); 5412 req.NodeInfo.PortGUID = guid; 5413 5414 args.sq_attr_id = SA_NODERECORD_ATTRID; 5415 args.sq_access_type = IBMF_SAA_RETRIEVE; 5416 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID; 5417 args.sq_template = &req; 5418 args.sq_callback = NULL; 5419 args.sq_callback_arg = NULL; 5420 5421 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) resp); 5422 if (ret != IBMF_SUCCESS) { 5423 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:" 5424 " SA Retrieve Failed: %d", ret); 5425 return (IBDM_FAILURE); 5426 } 5427 if (*resp == NULL || *length == 0) { 5428 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records"); 5429 return (IBDM_FAILURE); 5430 } 5431 /* 5432 * There is one NodeRecord on each endport on a subnet. 5433 */ 5434 ASSERT(*length == sizeof (sa_node_record_t)); 5435 5436 return (IBDM_SUCCESS); 5437 } 5438 5439 5440 /* 5441 * Update the gidlist for all affected IOCs when GID becomes 5442 * available/unavailable. 5443 * 5444 * Parameters : 5445 * gidinfo - Incoming / Outgoing GID. 5446 * add_flag - 1 for GID added, 0 for GID removed. 5447 * - (-1) : IOC gid list updated, ioc_list required. 5448 * 5449 * This function gets the GID for the node GUID corresponding to the 5450 * port GID. Gets the IOU info 5451 */ 5452 static ibdm_ioc_info_t * 5453 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag) 5454 { 5455 ibdm_dp_gidinfo_t *node_gid = NULL; 5456 uint8_t niocs, ii; 5457 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp; 5458 5459 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist"); 5460 5461 switch (avail_flag) { 5462 case 1 : 5463 node_gid = ibdm_check_dest_nodeguid(gid_info); 5464 break; 5465 case 0 : 5466 node_gid = ibdm_handle_gid_rm(gid_info); 5467 break; 5468 case -1 : 5469 node_gid = gid_info; 5470 break; 5471 default : 5472 break; 5473 } 5474 5475 if (node_gid == NULL) { 5476 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: " 5477 "No node GID found, port gid 0x%p, avail_flag %d", 5478 gid_info, avail_flag); 5479 return (NULL); 5480 } 5481 5482 mutex_enter(&node_gid->gl_mutex); 5483 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE && 5484 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) || 5485 node_gid->gl_iou == NULL) { 5486 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist " 5487 "gl_state %x, gl_iou %p", node_gid->gl_state, 5488 node_gid->gl_iou); 5489 mutex_exit(&node_gid->gl_mutex); 5490 return (NULL); 5491 } 5492 5493 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots; 5494 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x", 5495 niocs); 5496 for (ii = 0; ii < niocs; ii++) { 5497 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii); 5498 /* 5499 * Skip IOCs for which probe is not complete or 5500 * reprobe is progress 5501 */ 5502 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5503 tmp = ibdm_dup_ioc_info(ioc, node_gid); 5504 tmp->ioc_info_updated.ib_gid_prop_updated = 1; 5505 tmp->ioc_next = ioc_list; 5506 ioc_list = tmp; 5507 } 5508 } 5509 mutex_exit(&node_gid->gl_mutex); 5510 5511 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p", 5512 ioc_list); 5513 return (ioc_list); 5514 } 5515 5516 /* 5517 * ibdm_saa_event_cb : 5518 * Event handling which does *not* require ibdm_hl_mutex to be 5519 * held are executed in the same thread. This is to prevent 5520 * deadlocks with HCA port down notifications which hold the 5521 * ibdm_hl_mutex. 5522 * 5523 * GID_AVAILABLE event is handled here. A taskq is spawned to 5524 * handle GID_UNAVAILABLE. 5525 * 5526 * A new mutex ibdm_ibnex_mutex has been introduced to protect 5527 * ibnex_callback. This has been done to prevent any possible 5528 * deadlock (described above) while handling GID_AVAILABLE. 5529 * 5530 * IBMF calls the event callback for a HCA port. The SA handle 5531 * for this port would be valid, till the callback returns. 5532 * IBDM calling IBDM using the above SA handle should be valid. 5533 * 5534 * IBDM will additionally check (SA handle != NULL), before 5535 * calling IBMF. 5536 */ 5537 /*ARGSUSED*/ 5538 static void 5539 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle, 5540 ibmf_saa_subnet_event_t ibmf_saa_event, 5541 ibmf_saa_event_details_t *event_details, void *callback_arg) 5542 { 5543 ibdm_saa_event_arg_t *event_arg; 5544 ib_gid_t sgid, dgid; 5545 ibdm_port_attr_t *hca_port; 5546 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL; 5547 sa_node_record_t *nrec; 5548 size_t length; 5549 5550 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5551 5552 hca_port = (ibdm_port_attr_t *)callback_arg; 5553 5554 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n", 5555 ibmf_saa_handle, ibmf_saa_event, event_details, 5556 callback_arg); 5557 #ifdef DEBUG 5558 if (ibdm_ignore_saa_event) 5559 return; 5560 #endif 5561 5562 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) { 5563 /* 5564 * Ensure no other probe / sweep fabric is in 5565 * progress. 5566 */ 5567 mutex_enter(&ibdm.ibdm_mutex); 5568 while (ibdm.ibdm_busy & IBDM_BUSY) 5569 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5570 ibdm.ibdm_busy |= IBDM_BUSY; 5571 mutex_exit(&ibdm.ibdm_mutex); 5572 5573 /* 5574 * If we already know about this GID, return. 5575 * GID_AVAILABLE may be reported for multiple HCA 5576 * ports. 5577 */ 5578 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid, 5579 event_details->ie_gid.gid_prefix)) != NULL) { 5580 mutex_enter(&ibdm.ibdm_mutex); 5581 ibdm.ibdm_busy &= ~IBDM_BUSY; 5582 cv_broadcast(&ibdm.ibdm_busy_cv); 5583 mutex_exit(&ibdm.ibdm_mutex); 5584 return; 5585 } 5586 5587 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5588 "Insertion notified", 5589 event_details->ie_gid.gid_prefix, 5590 event_details->ie_gid.gid_guid); 5591 5592 /* This is a new gid, insert it to GID list */ 5593 sgid.gid_prefix = hca_port->pa_sn_prefix; 5594 sgid.gid_guid = hca_port->pa_port_guid; 5595 dgid.gid_prefix = event_details->ie_gid.gid_prefix; 5596 dgid.gid_guid = event_details->ie_gid.gid_guid; 5597 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid); 5598 if (gid_info == NULL) { 5599 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: " 5600 "create_gid_info returned NULL"); 5601 mutex_enter(&ibdm.ibdm_mutex); 5602 ibdm.ibdm_busy &= ~IBDM_BUSY; 5603 cv_broadcast(&ibdm.ibdm_busy_cv); 5604 mutex_exit(&ibdm.ibdm_mutex); 5605 return; 5606 } 5607 mutex_enter(&gid_info->gl_mutex); 5608 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 5609 mutex_exit(&gid_info->gl_mutex); 5610 5611 /* Get the node GUID */ 5612 if (ibdm_get_node_record_by_port(ibmf_saa_handle, dgid.gid_guid, 5613 &nrec, &length) != IBDM_SUCCESS) { 5614 /* 5615 * Set the state to PROBE_NOT_DONE for the 5616 * next sweep to probe it 5617 */ 5618 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: " 5619 "Skipping GID : port GUID not found"); 5620 mutex_enter(&gid_info->gl_mutex); 5621 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5622 mutex_exit(&gid_info->gl_mutex); 5623 mutex_enter(&ibdm.ibdm_mutex); 5624 ibdm.ibdm_busy &= ~IBDM_BUSY; 5625 cv_broadcast(&ibdm.ibdm_busy_cv); 5626 mutex_exit(&ibdm.ibdm_mutex); 5627 return; 5628 } 5629 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 5630 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 5631 kmem_free(nrec, length); 5632 gid_info->gl_portguid = dgid.gid_guid; 5633 5634 /* 5635 * Get the gid info with the same node GUID. 5636 */ 5637 mutex_enter(&ibdm.ibdm_mutex); 5638 node_gid_info = ibdm.ibdm_dp_gidlist_head; 5639 while (node_gid_info) { 5640 if (node_gid_info->gl_nodeguid == 5641 gid_info->gl_nodeguid && 5642 node_gid_info->gl_iou != NULL) { 5643 break; 5644 } 5645 node_gid_info = node_gid_info->gl_next; 5646 } 5647 mutex_exit(&ibdm.ibdm_mutex); 5648 5649 /* 5650 * Handling a new GID requires filling of gl_hca_list. 5651 * This require ibdm hca_list to be parsed and hence 5652 * holding the ibdm_hl_mutex. Spawning a new thread to 5653 * handle this. 5654 */ 5655 if (node_gid_info == NULL) { 5656 if (taskq_dispatch(system_taskq, 5657 ibdm_saa_handle_new_gid, (void *)gid_info, 5658 TQ_NOSLEEP) == NULL) { 5659 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5660 "new_gid taskq_dispatch failed"); 5661 return; 5662 } 5663 } 5664 5665 mutex_enter(&ibdm.ibdm_mutex); 5666 ibdm.ibdm_busy &= ~IBDM_BUSY; 5667 cv_broadcast(&ibdm.ibdm_busy_cv); 5668 mutex_exit(&ibdm.ibdm_mutex); 5669 return; 5670 } 5671 5672 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE) 5673 return; 5674 5675 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc( 5676 sizeof (ibdm_saa_event_arg_t), KM_SLEEP); 5677 event_arg->ibmf_saa_handle = ibmf_saa_handle; 5678 event_arg->ibmf_saa_event = ibmf_saa_event; 5679 bcopy(event_details, &event_arg->event_details, 5680 sizeof (ibmf_saa_event_details_t)); 5681 event_arg->callback_arg = callback_arg; 5682 5683 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq, 5684 (void *)event_arg, TQ_NOSLEEP) == NULL) { 5685 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5686 "taskq_dispatch failed"); 5687 ibdm_free_saa_event_arg(event_arg); 5688 return; 5689 } 5690 } 5691 5692 /* 5693 * Handle a new GID discovered by GID_AVAILABLE saa event. 5694 */ 5695 void 5696 ibdm_saa_handle_new_gid(void *arg) 5697 { 5698 ibdm_dp_gidinfo_t *gid_info; 5699 ibdm_hca_list_t *hca_list = NULL; 5700 ibdm_port_attr_t *port = NULL; 5701 ibdm_ioc_info_t *ioc_list = NULL; 5702 5703 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg); 5704 5705 gid_info = (ibdm_dp_gidinfo_t *)arg; 5706 5707 /* 5708 * Ensure that no other sweep / probe has completed 5709 * probing this gid. 5710 */ 5711 mutex_enter(&gid_info->gl_mutex); 5712 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 5713 mutex_exit(&gid_info->gl_mutex); 5714 return; 5715 } 5716 mutex_exit(&gid_info->gl_mutex); 5717 5718 /* 5719 * Parse HCAs to fill gl_hca_list 5720 */ 5721 mutex_enter(&ibdm.ibdm_hl_mutex); 5722 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5723 ibdm_get_next_port(&hca_list, &port, 1)) { 5724 if (ibdm_port_reachable(port->pa_sa_hdl, 5725 gid_info->gl_portguid) == B_TRUE) { 5726 ibdm_addto_glhcalist(gid_info, hca_list); 5727 } 5728 } 5729 mutex_exit(&ibdm.ibdm_hl_mutex); 5730 5731 /* 5732 * Ensure no other probe / sweep fabric is in 5733 * progress. 5734 */ 5735 mutex_enter(&ibdm.ibdm_mutex); 5736 while (ibdm.ibdm_busy & IBDM_BUSY) 5737 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5738 ibdm.ibdm_busy |= IBDM_BUSY; 5739 mutex_exit(&ibdm.ibdm_mutex); 5740 5741 /* 5742 * New IOU probe it, to check if new IOCs 5743 */ 5744 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: " 5745 "new GID : probing"); 5746 mutex_enter(&ibdm.ibdm_mutex); 5747 ibdm.ibdm_ngid_probes_in_progress++; 5748 mutex_exit(&ibdm.ibdm_mutex); 5749 mutex_enter(&gid_info->gl_mutex); 5750 gid_info->gl_reprobe_flag = 0; 5751 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5752 mutex_exit(&gid_info->gl_mutex); 5753 ibdm_probe_gid_thread((void *)gid_info); 5754 5755 mutex_enter(&ibdm.ibdm_mutex); 5756 ibdm_wait_probe_completion(); 5757 mutex_exit(&ibdm.ibdm_mutex); 5758 5759 if (gid_info->gl_iou == NULL) { 5760 mutex_enter(&ibdm.ibdm_mutex); 5761 ibdm.ibdm_busy &= ~IBDM_BUSY; 5762 cv_broadcast(&ibdm.ibdm_busy_cv); 5763 mutex_exit(&ibdm.ibdm_mutex); 5764 return; 5765 } 5766 5767 /* 5768 * Update GID list in all IOCs affected by this 5769 */ 5770 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1); 5771 5772 /* 5773 * Pass on the IOCs with updated GIDs to IBnexus 5774 */ 5775 if (ioc_list) { 5776 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5777 if (ibdm.ibdm_ibnex_callback != NULL) { 5778 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5779 IBDM_EVENT_IOC_PROP_UPDATE); 5780 } 5781 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5782 } 5783 5784 mutex_enter(&ibdm.ibdm_mutex); 5785 ibdm.ibdm_busy &= ~IBDM_BUSY; 5786 cv_broadcast(&ibdm.ibdm_busy_cv); 5787 mutex_exit(&ibdm.ibdm_mutex); 5788 } 5789 5790 /* 5791 * ibdm_saa_event_taskq : 5792 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be 5793 * held. The GID_UNAVAILABLE handling is done in a taskq to 5794 * prevent deadlocks with HCA port down notifications which hold 5795 * ibdm_hl_mutex. 5796 */ 5797 void 5798 ibdm_saa_event_taskq(void *arg) 5799 { 5800 ibdm_saa_event_arg_t *event_arg; 5801 ibmf_saa_handle_t ibmf_saa_handle; 5802 ibmf_saa_subnet_event_t ibmf_saa_event; 5803 ibmf_saa_event_details_t *event_details; 5804 void *callback_arg; 5805 5806 ibdm_dp_gidinfo_t *gid_info; 5807 ibdm_port_attr_t *hca_port, *port = NULL; 5808 ibdm_hca_list_t *hca_list = NULL; 5809 int sa_handle_valid = 0; 5810 ibdm_ioc_info_t *ioc_list = NULL; 5811 5812 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5813 5814 event_arg = (ibdm_saa_event_arg_t *)arg; 5815 ibmf_saa_handle = event_arg->ibmf_saa_handle; 5816 ibmf_saa_event = event_arg->ibmf_saa_event; 5817 event_details = &event_arg->event_details; 5818 callback_arg = event_arg->callback_arg; 5819 5820 ASSERT(callback_arg != NULL); 5821 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE); 5822 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)", 5823 ibmf_saa_handle, ibmf_saa_event, event_details, 5824 callback_arg); 5825 5826 hca_port = (ibdm_port_attr_t *)callback_arg; 5827 5828 /* Check if the port_attr is still valid */ 5829 mutex_enter(&ibdm.ibdm_hl_mutex); 5830 for (ibdm_get_next_port(&hca_list, &port, 0); port; 5831 ibdm_get_next_port(&hca_list, &port, 0)) { 5832 if (port == hca_port && port->pa_port_guid == 5833 hca_port->pa_port_guid) { 5834 if (ibmf_saa_handle == hca_port->pa_sa_hdl) 5835 sa_handle_valid = 1; 5836 break; 5837 } 5838 } 5839 mutex_exit(&ibdm.ibdm_hl_mutex); 5840 if (sa_handle_valid == 0) { 5841 ibdm_free_saa_event_arg(event_arg); 5842 return; 5843 } 5844 5845 if (hca_port && (hca_port->pa_sa_hdl == NULL || 5846 ibmf_saa_handle != hca_port->pa_sa_hdl)) { 5847 ibdm_free_saa_event_arg(event_arg); 5848 return; 5849 } 5850 hca_list = NULL; 5851 port = NULL; 5852 5853 /* 5854 * Check if the GID is visible to other HCA ports. 5855 * Return if so. 5856 */ 5857 mutex_enter(&ibdm.ibdm_hl_mutex); 5858 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5859 ibdm_get_next_port(&hca_list, &port, 1)) { 5860 if (ibdm_port_reachable(port->pa_sa_hdl, 5861 event_details->ie_gid.gid_guid) == B_TRUE) { 5862 mutex_exit(&ibdm.ibdm_hl_mutex); 5863 ibdm_free_saa_event_arg(event_arg); 5864 return; 5865 } 5866 } 5867 mutex_exit(&ibdm.ibdm_hl_mutex); 5868 5869 /* 5870 * Ensure no other probe / sweep fabric is in 5871 * progress. 5872 */ 5873 mutex_enter(&ibdm.ibdm_mutex); 5874 while (ibdm.ibdm_busy & IBDM_BUSY) 5875 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5876 ibdm.ibdm_busy |= IBDM_BUSY; 5877 mutex_exit(&ibdm.ibdm_mutex); 5878 5879 /* 5880 * If this GID is no longer in GID list, return 5881 * GID_UNAVAILABLE may be reported for multiple HCA 5882 * ports. 5883 */ 5884 mutex_enter(&ibdm.ibdm_mutex); 5885 gid_info = ibdm.ibdm_dp_gidlist_head; 5886 while (gid_info) { 5887 if (gid_info->gl_portguid == 5888 event_details->ie_gid.gid_guid) { 5889 break; 5890 } 5891 gid_info = gid_info->gl_next; 5892 } 5893 mutex_exit(&ibdm.ibdm_mutex); 5894 if (gid_info == NULL) { 5895 mutex_enter(&ibdm.ibdm_mutex); 5896 ibdm.ibdm_busy &= ~IBDM_BUSY; 5897 cv_broadcast(&ibdm.ibdm_busy_cv); 5898 mutex_exit(&ibdm.ibdm_mutex); 5899 ibdm_free_saa_event_arg(event_arg); 5900 return; 5901 } 5902 5903 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5904 "Unavailable notification", 5905 event_details->ie_gid.gid_prefix, 5906 event_details->ie_gid.gid_guid); 5907 5908 /* 5909 * Update GID list in all IOCs affected by this 5910 */ 5911 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED || 5912 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) 5913 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 5914 5915 /* 5916 * Remove GID from the global GID list 5917 * Handle the case where all port GIDs for an 5918 * IOU have been hot-removed. Check both gid_info 5919 * & ioc_info for checking ngids. 5920 */ 5921 mutex_enter(&ibdm.ibdm_mutex); 5922 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 5923 mutex_enter(&gid_info->gl_mutex); 5924 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 5925 mutex_exit(&gid_info->gl_mutex); 5926 } 5927 if (gid_info->gl_prev != NULL) 5928 gid_info->gl_prev->gl_next = gid_info->gl_next; 5929 if (gid_info->gl_next != NULL) 5930 gid_info->gl_next->gl_prev = gid_info->gl_prev; 5931 5932 if (gid_info == ibdm.ibdm_dp_gidlist_head) 5933 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 5934 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 5935 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 5936 ibdm.ibdm_ngids--; 5937 5938 ibdm.ibdm_busy &= ~IBDM_BUSY; 5939 cv_broadcast(&ibdm.ibdm_busy_cv); 5940 mutex_exit(&ibdm.ibdm_mutex); 5941 5942 /* free the hca_list on this gid_info */ 5943 ibdm_delete_glhca_list(gid_info); 5944 5945 mutex_destroy(&gid_info->gl_mutex); 5946 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 5947 5948 /* 5949 * Pass on the IOCs with updated GIDs to IBnexus 5950 */ 5951 if (ioc_list) { 5952 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE " 5953 "IOC_PROP_UPDATE for %p\n", ioc_list); 5954 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5955 if (ibdm.ibdm_ibnex_callback != NULL) { 5956 (*ibdm.ibdm_ibnex_callback)((void *) 5957 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 5958 } 5959 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5960 } 5961 5962 ibdm_free_saa_event_arg(event_arg); 5963 } 5964 5965 5966 static int 5967 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev) 5968 { 5969 ibdm_gid_t *scan_new, *scan_prev; 5970 int cmp_failed = 0; 5971 5972 ASSERT(new != NULL); 5973 ASSERT(prev != NULL); 5974 5975 /* 5976 * Search for each new gid anywhere in the prev GID list. 5977 * Note that the gid list could have been re-ordered. 5978 */ 5979 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) { 5980 for (scan_prev = prev, cmp_failed = 1; scan_prev; 5981 scan_prev = scan_prev->gid_next) { 5982 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi && 5983 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) { 5984 cmp_failed = 0; 5985 break; 5986 } 5987 } 5988 5989 if (cmp_failed) 5990 return (1); 5991 } 5992 return (0); 5993 } 5994 5995 /* 5996 * This is always called in a single thread 5997 * This function updates the gid_list and serv_list of IOC 5998 * The current gid_list is in ioc_info_t(contains only port 5999 * guids for which probe is done) & gidinfo_t(other port gids) 6000 * The gids in both locations are used for comparision. 6001 */ 6002 static void 6003 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo) 6004 { 6005 ibdm_gid_t *cur_gid_list; 6006 uint_t cur_nportgids; 6007 6008 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6009 6010 ioc->ioc_info_updated.ib_prop_updated = 0; 6011 6012 6013 /* Current GID list in gid_info only */ 6014 cur_gid_list = gidinfo->gl_gid; 6015 cur_nportgids = gidinfo->gl_ngids; 6016 6017 if (ioc->ioc_prev_serv_cnt != 6018 ioc->ioc_profile.ioc_service_entries || 6019 ibdm_serv_cmp(&ioc->ioc_serv[0], &ioc->ioc_prev_serv[0], 6020 ioc->ioc_prev_serv_cnt)) 6021 ioc->ioc_info_updated.ib_srv_prop_updated = 1; 6022 6023 if (ioc->ioc_prev_nportgids != cur_nportgids || 6024 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) { 6025 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6026 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) { 6027 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6028 } 6029 6030 /* Zero out previous entries */ 6031 ibdm_free_gid_list(ioc->ioc_prev_gid_list); 6032 if (ioc->ioc_prev_serv) 6033 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt * 6034 sizeof (ibdm_srvents_info_t)); 6035 ioc->ioc_prev_serv_cnt = 0; 6036 ioc->ioc_prev_nportgids = 0; 6037 ioc->ioc_prev_serv = NULL; 6038 ioc->ioc_prev_gid_list = NULL; 6039 } 6040 6041 /* 6042 * Handle GID removal. This returns gid_info of an GID for the same 6043 * node GUID, if found. For an GID with IOU information, the same 6044 * gid_info is returned if no gid_info with same node_guid is found. 6045 */ 6046 static ibdm_dp_gidinfo_t * 6047 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid) 6048 { 6049 ibdm_dp_gidinfo_t *gid_list; 6050 6051 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid); 6052 6053 if (rm_gid->gl_iou == NULL) { 6054 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou"); 6055 /* 6056 * Search for a GID with same node_guid and 6057 * gl_iou != NULL 6058 */ 6059 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6060 gid_list = gid_list->gl_next) { 6061 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid 6062 == rm_gid->gl_nodeguid)) 6063 break; 6064 } 6065 6066 if (gid_list) 6067 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6068 6069 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6070 return (gid_list); 6071 } else { 6072 /* 6073 * Search for a GID with same node_guid and 6074 * gl_iou == NULL 6075 */ 6076 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou"); 6077 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6078 gid_list = gid_list->gl_next) { 6079 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid 6080 == rm_gid->gl_nodeguid)) 6081 break; 6082 } 6083 6084 if (gid_list) { 6085 /* 6086 * Copy the following fields from rm_gid : 6087 * 1. gl_state 6088 * 2. gl_iou 6089 * 3. gl_gid & gl_ngids 6090 * 6091 * Note : Function is synchronized by 6092 * ibdm_busy flag. 6093 * 6094 * Note : Redirect info is initialized if 6095 * any MADs for the GID fail 6096 */ 6097 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm " 6098 "copying info to GID with gl_iou != NULl"); 6099 gid_list->gl_state = rm_gid->gl_state; 6100 gid_list->gl_iou = rm_gid->gl_iou; 6101 gid_list->gl_gid = rm_gid->gl_gid; 6102 gid_list->gl_ngids = rm_gid->gl_ngids; 6103 6104 /* Remove the GID from gl_gid list */ 6105 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6106 } else { 6107 /* 6108 * Handle a case where all GIDs to the IOU have 6109 * been removed. 6110 */ 6111 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID " 6112 "to IOU"); 6113 6114 ibdm_rmfrom_glgid_list(rm_gid, rm_gid); 6115 return (rm_gid); 6116 } 6117 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6118 return (gid_list); 6119 } 6120 } 6121 6122 static void 6123 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info, 6124 ibdm_dp_gidinfo_t *rm_gid) 6125 { 6126 ibdm_gid_t *tmp, *prev; 6127 6128 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)", 6129 gid_info, rm_gid); 6130 6131 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) { 6132 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi && 6133 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) { 6134 if (prev == NULL) 6135 gid_info->gl_gid = tmp->gid_next; 6136 else 6137 prev->gid_next = tmp->gid_next; 6138 6139 kmem_free(tmp, sizeof (ibdm_gid_t)); 6140 gid_info->gl_ngids--; 6141 break; 6142 } else { 6143 prev = tmp; 6144 tmp = tmp->gid_next; 6145 } 6146 } 6147 } 6148 6149 static void 6150 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest) 6151 { 6152 ibdm_gid_t *head = NULL, *new, *tail; 6153 6154 /* First copy the destination */ 6155 for (; dest; dest = dest->gid_next) { 6156 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6157 new->gid_dgid_hi = dest->gid_dgid_hi; 6158 new->gid_dgid_lo = dest->gid_dgid_lo; 6159 new->gid_next = head; 6160 head = new; 6161 } 6162 6163 /* Insert this to the source */ 6164 if (*src_ptr == NULL) 6165 *src_ptr = head; 6166 else { 6167 for (tail = *src_ptr; tail->gid_next != NULL; 6168 tail = tail->gid_next) 6169 ; 6170 6171 tail->gid_next = head; 6172 } 6173 } 6174 6175 static void 6176 ibdm_free_gid_list(ibdm_gid_t *head) 6177 { 6178 ibdm_gid_t *delete; 6179 6180 for (delete = head; delete; ) { 6181 head = delete->gid_next; 6182 kmem_free(delete, sizeof (ibdm_gid_t)); 6183 delete = head; 6184 } 6185 } 6186 6187 /* 6188 * This function rescans the DM capable GIDs (gl_state is 6189 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This 6190 * basically checks if the DM capable GID is reachable. If 6191 * not this is handled the same way as GID_UNAVAILABLE, 6192 * except that notifications are not send to IBnexus. 6193 * 6194 * This function also initializes the ioc_prev_list for 6195 * a particular IOC (when called from probe_ioc, with 6196 * ioc_guidp != NULL) or all IOCs for the gid (called from 6197 * sweep_fabric, ioc_guidp == NULL). 6198 */ 6199 static void 6200 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp) 6201 { 6202 ibdm_dp_gidinfo_t *gid_info, *tmp; 6203 int ii, niocs, found; 6204 ibdm_hca_list_t *hca_list = NULL; 6205 ibdm_port_attr_t *port = NULL; 6206 ibdm_ioc_info_t *ioc_list; 6207 6208 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6209 found = 0; 6210 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED && 6211 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) { 6212 gid_info = gid_info->gl_next; 6213 continue; 6214 } 6215 6216 /* 6217 * Check if the GID is visible to any HCA ports. 6218 * Return if so. 6219 */ 6220 mutex_enter(&ibdm.ibdm_hl_mutex); 6221 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6222 ibdm_get_next_port(&hca_list, &port, 1)) { 6223 if (ibdm_port_reachable(port->pa_sa_hdl, 6224 gid_info->gl_dgid_lo) == B_TRUE) { 6225 found = 1; 6226 break; 6227 } 6228 } 6229 mutex_exit(&ibdm.ibdm_hl_mutex); 6230 6231 if (found) { 6232 if (gid_info->gl_iou == NULL) { 6233 gid_info = gid_info->gl_next; 6234 continue; 6235 } 6236 6237 /* Intialize the ioc_prev_gid_list */ 6238 niocs = 6239 gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6240 for (ii = 0; ii < niocs; ii++) { 6241 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6242 6243 if (ioc_guidp == NULL || (*ioc_guidp == 6244 ioc_list->ioc_profile.ioc_guid)) { 6245 /* Add info of GIDs in gid_info also */ 6246 ibdm_addto_gidlist( 6247 &ioc_list->ioc_prev_gid_list, 6248 gid_info->gl_gid); 6249 ioc_list->ioc_prev_nportgids = 6250 gid_info->gl_ngids; 6251 } 6252 } 6253 gid_info = gid_info->gl_next; 6254 continue; 6255 } 6256 6257 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6258 "deleted port GUID %llx", 6259 gid_info->gl_dgid_lo); 6260 6261 /* 6262 * Update GID list in all IOCs affected by this 6263 */ 6264 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6265 6266 /* 6267 * Remove GID from the global GID list 6268 * Handle the case where all port GIDs for an 6269 * IOU have been hot-removed. 6270 */ 6271 mutex_enter(&ibdm.ibdm_mutex); 6272 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6273 mutex_enter(&gid_info->gl_mutex); 6274 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6275 mutex_exit(&gid_info->gl_mutex); 6276 } 6277 6278 tmp = gid_info->gl_next; 6279 if (gid_info->gl_prev != NULL) 6280 gid_info->gl_prev->gl_next = gid_info->gl_next; 6281 if (gid_info->gl_next != NULL) 6282 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6283 6284 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6285 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6286 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6287 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6288 ibdm.ibdm_ngids--; 6289 mutex_exit(&ibdm.ibdm_mutex); 6290 6291 /* free the hca_list on this gid_info */ 6292 ibdm_delete_glhca_list(gid_info); 6293 6294 mutex_destroy(&gid_info->gl_mutex); 6295 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6296 6297 gid_info = tmp; 6298 6299 /* 6300 * Pass on the IOCs with updated GIDs to IBnexus 6301 */ 6302 if (ioc_list) { 6303 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6304 "IOC_PROP_UPDATE for %p\n", ioc_list); 6305 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6306 if (ibdm.ibdm_ibnex_callback != NULL) { 6307 (*ibdm.ibdm_ibnex_callback)((void *) 6308 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6309 } 6310 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6311 } 6312 } 6313 } 6314 6315 /* 6316 * This function notifies IBnex of IOCs on this GID. 6317 * Notification is for GIDs with gl_reprobe_flag set. 6318 * The flag is set when IOC probe / fabric sweep 6319 * probes a GID starting from CLASS port info. 6320 * 6321 * IBnexus will have information of a reconnected IOC 6322 * if it had probed it before. If this is a new IOC, 6323 * IBnexus ignores the notification. 6324 * 6325 * This function should be called with no locks held. 6326 */ 6327 static void 6328 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info) 6329 { 6330 ibdm_ioc_info_t *ioc_list; 6331 6332 if (gid_info->gl_reprobe_flag == 0 || 6333 gid_info->gl_iou == NULL) 6334 return; 6335 6336 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1); 6337 6338 /* 6339 * Pass on the IOCs with updated GIDs to IBnexus 6340 */ 6341 if (ioc_list) { 6342 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6343 if (ibdm.ibdm_ibnex_callback != NULL) { 6344 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 6345 IBDM_EVENT_IOC_PROP_UPDATE); 6346 } 6347 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6348 } 6349 } 6350 6351 6352 static void 6353 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg) 6354 { 6355 if (arg != NULL) 6356 kmem_free(arg, sizeof (ibdm_saa_event_arg_t)); 6357 } 6358 6359 /* 6360 * This function parses the list of HCAs and HCA ports 6361 * to return the port_attr of the next HCA port. A port 6362 * connected to IB fabric (port_state active) is returned, 6363 * if connected_flag is set. 6364 */ 6365 static void 6366 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap, 6367 ibdm_port_attr_t **inp_portp, int connect_flag) 6368 { 6369 int ii; 6370 ibdm_port_attr_t *port, *next_port = NULL; 6371 ibdm_port_attr_t *inp_port; 6372 ibdm_hca_list_t *hca_list; 6373 int found = 0; 6374 6375 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6376 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)", 6377 inp_hcap, inp_portp, connect_flag); 6378 6379 hca_list = *inp_hcap; 6380 inp_port = *inp_portp; 6381 6382 if (hca_list == NULL) 6383 hca_list = ibdm.ibdm_hca_list_head; 6384 6385 for (; hca_list; hca_list = hca_list->hl_next) { 6386 for (ii = 0; ii < hca_list->hl_nports; ii++) { 6387 port = &hca_list->hl_port_attr[ii]; 6388 6389 /* 6390 * inp_port != NULL; 6391 * Skip till we find the matching port 6392 */ 6393 if (inp_port && !found) { 6394 if (inp_port == port) 6395 found = 1; 6396 continue; 6397 } 6398 6399 if (!connect_flag) { 6400 next_port = port; 6401 break; 6402 } 6403 6404 if (port->pa_sa_hdl == NULL) 6405 ibdm_initialize_port(port); 6406 if (port->pa_sa_hdl == NULL) 6407 (void) ibdm_fini_port(port); 6408 else if (next_port == NULL && 6409 port->pa_sa_hdl != NULL && 6410 port->pa_state == IBT_PORT_ACTIVE) { 6411 next_port = port; 6412 break; 6413 } 6414 } 6415 6416 if (next_port) 6417 break; 6418 } 6419 6420 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : " 6421 "returns hca_list %p port %p", hca_list, next_port); 6422 *inp_hcap = hca_list; 6423 *inp_portp = next_port; 6424 } 6425 6426 static void 6427 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid) 6428 { 6429 ibdm_gid_t *tmp; 6430 6431 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6432 tmp->gid_dgid_hi = addgid->gl_dgid_hi; 6433 tmp->gid_dgid_lo = addgid->gl_dgid_lo; 6434 6435 mutex_enter(&nodegid->gl_mutex); 6436 tmp->gid_next = nodegid->gl_gid; 6437 nodegid->gl_gid = tmp; 6438 nodegid->gl_ngids++; 6439 mutex_exit(&nodegid->gl_mutex); 6440 } 6441 6442 static void 6443 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info, 6444 ibdm_hca_list_t *hca) 6445 { 6446 ibdm_hca_list_t *head, *prev = NULL, *temp; 6447 6448 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) " 6449 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list); 6450 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6451 6452 mutex_enter(&gid_info->gl_mutex); 6453 head = gid_info->gl_hca_list; 6454 if (head == NULL) { 6455 head = ibdm_dup_hca_attr(hca); 6456 head->hl_next = NULL; 6457 gid_info->gl_hca_list = head; 6458 mutex_exit(&gid_info->gl_mutex); 6459 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6460 "gid %p, gl_hca_list %p", gid_info, 6461 gid_info->gl_hca_list); 6462 return; 6463 } 6464 6465 /* Check if already in the list */ 6466 while (head) { 6467 if (head->hl_hca_guid == hca->hl_hca_guid) { 6468 mutex_exit(&gid_info->gl_mutex); 6469 IBTF_DPRINTF_L4(ibdm_string, 6470 "\taddto_glhcalist : gid %p hca %p dup", 6471 gid_info, hca); 6472 return; 6473 } 6474 prev = head; 6475 head = head->hl_next; 6476 } 6477 6478 /* Add this HCA to gl_hca_list */ 6479 temp = ibdm_dup_hca_attr(hca); 6480 temp->hl_next = NULL; 6481 prev->hl_next = temp; 6482 mutex_exit(&gid_info->gl_mutex); 6483 6484 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6485 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list); 6486 } 6487 6488 static void 6489 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info) 6490 { 6491 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6492 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6493 6494 mutex_enter(&gid_info->gl_mutex); 6495 if (gid_info->gl_hca_list) 6496 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list); 6497 gid_info->gl_hca_list = NULL; 6498 mutex_exit(&gid_info->gl_mutex); 6499 } 6500 6501 6502 static void 6503 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl) 6504 { 6505 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)", 6506 port_sa_hdl); 6507 6508 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6509 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6510 6511 /* Check : Not busy in another probe / sweep */ 6512 mutex_enter(&ibdm.ibdm_mutex); 6513 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) { 6514 ibdm_dp_gidinfo_t *gid_info; 6515 6516 ibdm.ibdm_busy |= IBDM_BUSY; 6517 mutex_exit(&ibdm.ibdm_mutex); 6518 6519 /* 6520 * Check if any GID is using the SA & IBMF handle 6521 * of HCA port going down. Reset ibdm_dp_gidinfo_t 6522 * using another HCA port which can reach the GID. 6523 * This is for DM capable GIDs only, no need to do 6524 * this for others 6525 * 6526 * Delete the GID if no alternate HCA port to reach 6527 * it is found. 6528 */ 6529 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6530 ibdm_dp_gidinfo_t *tmp; 6531 6532 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr " 6533 "checking gidinfo %p", gid_info); 6534 6535 if (gid_info->gl_sa_hdl == port_sa_hdl) { 6536 IBTF_DPRINTF_L3(ibdm_string, 6537 "\tevent_hdlr: down HCA port hdl " 6538 "matches gid %p", gid_info); 6539 6540 /* 6541 * The non-DM GIDs can come back 6542 * with a new subnet prefix, when 6543 * the HCA port commes up again. To 6544 * avoid issues, delete non-DM 6545 * capable GIDs, if the gid was 6546 * discovered using the HCA port 6547 * going down. This is ensured by 6548 * setting gl_disconnected to 1. 6549 */ 6550 if (gid_info->gl_nodeguid == 0) 6551 gid_info->gl_disconnected = 1; 6552 else 6553 ibdm_reset_gidinfo(gid_info); 6554 6555 if (gid_info->gl_disconnected) { 6556 IBTF_DPRINTF_L3(ibdm_string, 6557 "\tevent_hdlr: deleting" 6558 " gid %p", gid_info); 6559 tmp = gid_info; 6560 gid_info = gid_info->gl_next; 6561 ibdm_delete_gidinfo(tmp); 6562 } else 6563 gid_info = gid_info->gl_next; 6564 } else 6565 gid_info = gid_info->gl_next; 6566 } 6567 6568 mutex_enter(&ibdm.ibdm_mutex); 6569 ibdm.ibdm_busy &= ~IBDM_BUSY; 6570 cv_signal(&ibdm.ibdm_busy_cv); 6571 } 6572 mutex_exit(&ibdm.ibdm_mutex); 6573 } 6574 6575 static void 6576 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6577 { 6578 ibdm_hca_list_t *hca_list = NULL; 6579 ibdm_port_attr_t *port = NULL; 6580 int gid_reinited = 0; 6581 sa_node_record_t *nr, *tmp; 6582 sa_portinfo_record_t *pi; 6583 size_t nr_len = 0, pi_len = 0; 6584 size_t path_len; 6585 ib_gid_t sgid, dgid; 6586 int ret, ii, nrecords; 6587 sa_path_record_t *path; 6588 uint8_t npaths = 1; 6589 ibdm_pkey_tbl_t *pkey_tbl; 6590 6591 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo); 6592 6593 /* 6594 * Get list of all the ports reachable from the local known HCA 6595 * ports which are active 6596 */ 6597 mutex_enter(&ibdm.ibdm_hl_mutex); 6598 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6599 ibdm_get_next_port(&hca_list, &port, 1)) { 6600 6601 6602 /* 6603 * Get the path and re-populate the gidinfo. 6604 * Getting the path is the same probe_ioc 6605 * Init the gid info as in ibdm_create_gidinfo() 6606 */ 6607 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, 6608 gidinfo->gl_nodeguid); 6609 if (nr == NULL) { 6610 IBTF_DPRINTF_L4(ibdm_string, 6611 "\treset_gidinfo : no records"); 6612 continue; 6613 } 6614 6615 nrecords = (nr_len / sizeof (sa_node_record_t)); 6616 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 6617 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid) 6618 break; 6619 } 6620 6621 if (ii == nrecords) { 6622 IBTF_DPRINTF_L4(ibdm_string, 6623 "\treset_gidinfo : no record for portguid"); 6624 kmem_free(nr, nr_len); 6625 continue; 6626 } 6627 6628 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID); 6629 if (pi == NULL) { 6630 IBTF_DPRINTF_L4(ibdm_string, 6631 "\treset_gidinfo : no portinfo"); 6632 kmem_free(nr, nr_len); 6633 continue; 6634 } 6635 6636 sgid.gid_prefix = port->pa_sn_prefix; 6637 sgid.gid_guid = port->pa_port_guid; 6638 dgid.gid_prefix = pi->PortInfo.GidPrefix; 6639 dgid.gid_guid = tmp->NodeInfo.PortGUID; 6640 6641 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid, 6642 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path); 6643 6644 if ((ret != IBMF_SUCCESS) || path == NULL) { 6645 IBTF_DPRINTF_L4(ibdm_string, 6646 "\treset_gidinfo : no paths"); 6647 kmem_free(pi, pi_len); 6648 kmem_free(nr, nr_len); 6649 continue; 6650 } 6651 6652 gidinfo->gl_dgid_hi = path->DGID.gid_prefix; 6653 gidinfo->gl_dgid_lo = path->DGID.gid_guid; 6654 gidinfo->gl_sgid_hi = path->SGID.gid_prefix; 6655 gidinfo->gl_sgid_lo = path->SGID.gid_guid; 6656 gidinfo->gl_p_key = path->P_Key; 6657 gidinfo->gl_sa_hdl = port->pa_sa_hdl; 6658 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl; 6659 gidinfo->gl_slid = path->SLID; 6660 gidinfo->gl_dlid = path->DLID; 6661 /* Reset redirect info, next MAD will set if redirected */ 6662 gidinfo->gl_redirected = 0; 6663 gidinfo->gl_devid = (*tmp).NodeInfo.DeviceID; 6664 gidinfo->gl_SL = path->SL; 6665 6666 gidinfo->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 6667 for (ii = 0; ii < port->pa_npkeys; ii++) { 6668 if (port->pa_pkey_tbl == NULL) 6669 break; 6670 6671 pkey_tbl = &port->pa_pkey_tbl[ii]; 6672 if ((gidinfo->gl_p_key == pkey_tbl->pt_pkey) && 6673 (pkey_tbl->pt_qp_hdl != NULL)) { 6674 gidinfo->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 6675 break; 6676 } 6677 } 6678 6679 if (gidinfo->gl_qp_hdl == NULL) 6680 IBTF_DPRINTF_L2(ibdm_string, 6681 "\treset_gid_info: No matching Pkey"); 6682 else 6683 gid_reinited = 1; 6684 6685 kmem_free(path, path_len); 6686 kmem_free(pi, pi_len); 6687 kmem_free(nr, nr_len); 6688 break; 6689 } 6690 mutex_exit(&ibdm.ibdm_hl_mutex); 6691 6692 if (!gid_reinited) 6693 gidinfo->gl_disconnected = 1; 6694 } 6695 6696 static void 6697 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6698 { 6699 ibdm_ioc_info_t *ioc_list; 6700 int in_gidlist = 0; 6701 6702 /* 6703 * Check if gidinfo has been inserted into the 6704 * ibdm_dp_gidlist_head list. gl_next or gl_prev 6705 * != NULL, if gidinfo is the list. 6706 */ 6707 if (gidinfo->gl_prev != NULL || 6708 gidinfo->gl_next != NULL || 6709 ibdm.ibdm_dp_gidlist_head == gidinfo) 6710 in_gidlist = 1; 6711 6712 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0); 6713 6714 /* 6715 * Remove GID from the global GID list 6716 * Handle the case where all port GIDs for an 6717 * IOU have been hot-removed. 6718 */ 6719 mutex_enter(&ibdm.ibdm_mutex); 6720 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) { 6721 mutex_enter(&gidinfo->gl_mutex); 6722 (void) ibdm_free_iou_info(gidinfo, &gidinfo->gl_iou); 6723 mutex_exit(&gidinfo->gl_mutex); 6724 } 6725 6726 /* Delete gl_hca_list */ 6727 mutex_exit(&ibdm.ibdm_mutex); 6728 ibdm_delete_glhca_list(gidinfo); 6729 mutex_enter(&ibdm.ibdm_mutex); 6730 6731 if (in_gidlist) { 6732 if (gidinfo->gl_prev != NULL) 6733 gidinfo->gl_prev->gl_next = gidinfo->gl_next; 6734 if (gidinfo->gl_next != NULL) 6735 gidinfo->gl_next->gl_prev = gidinfo->gl_prev; 6736 6737 if (gidinfo == ibdm.ibdm_dp_gidlist_head) 6738 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next; 6739 if (gidinfo == ibdm.ibdm_dp_gidlist_tail) 6740 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev; 6741 ibdm.ibdm_ngids--; 6742 } 6743 mutex_exit(&ibdm.ibdm_mutex); 6744 6745 mutex_destroy(&gidinfo->gl_mutex); 6746 cv_destroy(&gidinfo->gl_probe_cv); 6747 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t)); 6748 6749 /* 6750 * Pass on the IOCs with updated GIDs to IBnexus 6751 */ 6752 if (ioc_list) { 6753 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo " 6754 "IOC_PROP_UPDATE for %p\n", ioc_list); 6755 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6756 if (ibdm.ibdm_ibnex_callback != NULL) { 6757 (*ibdm.ibdm_ibnex_callback)((void *) 6758 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6759 } 6760 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6761 } 6762 } 6763 6764 6765 static void 6766 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args) 6767 { 6768 uint32_t attr_mod; 6769 6770 attr_mod = (cb_args->cb_ioc_num + 1) << 16; 6771 attr_mod |= cb_args->cb_srvents_start; 6772 attr_mod |= (cb_args->cb_srvents_end) << 8; 6773 hdr->AttributeModifier = h2b32(attr_mod); 6774 } 6775 6776 static void 6777 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info) 6778 { 6779 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 6780 gid_info->gl_transactionID++; 6781 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) { 6782 IBTF_DPRINTF_L4(ibdm_string, 6783 "\tbump_transactionID(%p), wrapup", gid_info); 6784 gid_info->gl_transactionID = gid_info->gl_min_transactionID; 6785 } 6786 } 6787 6788 /* 6789 * gl_prev_iou is set for *non-reprobe* sweeep requests, which 6790 * detected that ChangeID in IOU info has changed. The service 6791 * entry also may have changed. Check if service entry in IOC 6792 * has changed wrt the prev iou, if so notify to IB Nexus. 6793 */ 6794 static ibdm_ioc_info_t * 6795 ibdm_handle_prev_iou() 6796 { 6797 ibdm_dp_gidinfo_t *gid_info; 6798 ibdm_ioc_info_t *ioc_list_head = NULL, *ioc_list; 6799 ibdm_ioc_info_t *prev_ioc, *ioc; 6800 int ii, jj, niocs, prev_niocs; 6801 6802 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6803 6804 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou enter"); 6805 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 6806 gid_info = gid_info->gl_next) { 6807 if (gid_info->gl_prev_iou == NULL) 6808 continue; 6809 6810 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou gid %p", 6811 gid_info); 6812 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6813 prev_niocs = 6814 gid_info->gl_prev_iou->iou_info.iou_num_ctrl_slots; 6815 for (ii = 0; ii < niocs; ii++) { 6816 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6817 6818 /* Find matching IOC */ 6819 for (jj = 0; jj < prev_niocs; jj++) { 6820 prev_ioc = (ibdm_ioc_info_t *) 6821 &gid_info->gl_prev_iou->iou_ioc_info[jj]; 6822 if (prev_ioc->ioc_profile.ioc_guid == 6823 ioc->ioc_profile.ioc_guid) 6824 break; 6825 } 6826 if (jj == prev_niocs) 6827 prev_ioc = NULL; 6828 if (ioc == NULL || prev_ioc == NULL) 6829 continue; 6830 if ((ioc->ioc_profile.ioc_service_entries != 6831 prev_ioc->ioc_profile.ioc_service_entries) || 6832 ibdm_serv_cmp(&ioc->ioc_serv[0], 6833 &prev_ioc->ioc_serv[0], 6834 ioc->ioc_profile.ioc_service_entries) != 0) { 6835 IBTF_DPRINTF_L4(ibdm_string, 6836 "/thandle_prev_iou modified IOC: " 6837 "current ioc %p, old ioc %p", 6838 ioc, prev_ioc); 6839 mutex_enter(&gid_info->gl_mutex); 6840 ioc_list = ibdm_dup_ioc_info(ioc, gid_info); 6841 mutex_exit(&gid_info->gl_mutex); 6842 ioc_list->ioc_info_updated.ib_prop_updated 6843 = 0; 6844 ioc_list->ioc_info_updated.ib_srv_prop_updated 6845 = 1; 6846 6847 if (ioc_list_head == NULL) 6848 ioc_list_head = ioc_list; 6849 else { 6850 ioc_list_head->ioc_next = ioc_list; 6851 ioc_list_head = ioc_list; 6852 } 6853 } 6854 } 6855 6856 mutex_enter(&gid_info->gl_mutex); 6857 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_prev_iou); 6858 mutex_exit(&gid_info->gl_mutex); 6859 } 6860 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iouret %p", 6861 ioc_list_head); 6862 return (ioc_list_head); 6863 } 6864 6865 /* 6866 * Compares two service entries lists, returns 0 if same, returns 1 6867 * if no match. 6868 */ 6869 static int 6870 ibdm_serv_cmp(ibdm_srvents_info_t *serv1, ibdm_srvents_info_t *serv2, 6871 int nserv) 6872 { 6873 int ii; 6874 6875 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: enter"); 6876 for (ii = 0; ii < nserv; ii++, serv1++, serv2++) { 6877 if (serv1->se_attr.srv_id != serv2->se_attr.srv_id || 6878 bcmp(serv1->se_attr.srv_name, 6879 serv2->se_attr.srv_name, 6880 IB_DM_MAX_SVC_NAME_LEN) != 0) { 6881 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 1"); 6882 return (1); 6883 } 6884 } 6885 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 0"); 6886 return (0); 6887 } 6888 6889 /* For debugging purpose only */ 6890 #ifdef DEBUG 6891 void 6892 ibdm_dump_mad_hdr(ib_mad_hdr_t *mad_hdr) 6893 { 6894 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info"); 6895 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------"); 6896 6897 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x" 6898 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass); 6899 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x" 6900 "\tR Method : 0x%x", 6901 mad_hdr->ClassVersion, mad_hdr->R_Method); 6902 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x" 6903 "\tTransaction ID : 0x%llx", 6904 b2h16(mad_hdr->Status), b2h64(mad_hdr->TransactionID)); 6905 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x" 6906 "\tAttribute Modified : 0x%lx", 6907 b2h16(mad_hdr->AttributeID), b2h32(mad_hdr->AttributeModifier)); 6908 } 6909 6910 6911 void 6912 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag) 6913 { 6914 ib_mad_hdr_t *mad_hdr; 6915 6916 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info"); 6917 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------"); 6918 6919 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x" 6920 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid, 6921 ibmf_msg->im_local_addr.ia_remote_lid, 6922 ibmf_msg->im_local_addr.ia_remote_qno); 6923 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x" 6924 " SL : 0x%x", ibmf_msg->im_local_addr.ia_p_key, 6925 ibmf_msg->im_local_addr.ia_q_key, 6926 ibmf_msg->im_local_addr.ia_service_level); 6927 6928 if (flag) 6929 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg); 6930 else 6931 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg); 6932 6933 ibdm_dump_mad_hdr(mad_hdr); 6934 } 6935 6936 6937 void 6938 ibdm_dump_path_info(sa_path_record_t *path) 6939 { 6940 IBTF_DPRINTF_L4("ibdm", "\t\t Path information"); 6941 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------"); 6942 6943 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx", 6944 path->DGID.gid_prefix, path->DGID.gid_guid); 6945 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx", 6946 path->SGID.gid_prefix, path->SGID.gid_guid); 6947 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\t\tDlID : %x", 6948 path->SLID, path->DLID); 6949 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x\t\tSL : %x", 6950 path->P_Key, path->SL); 6951 } 6952 6953 6954 void 6955 ibdm_dump_classportinfo(ib_mad_classportinfo_t *classportinfo) 6956 { 6957 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO"); 6958 IBTF_DPRINTF_L4("ibdm", "\t\t --------------"); 6959 6960 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x", 6961 ((b2h32(classportinfo->RespTimeValue)) & 0x1F)); 6962 6963 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%llx", 6964 b2h64(classportinfo->RedirectGID_hi)); 6965 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%llx", 6966 b2h64(classportinfo->RedirectGID_lo)); 6967 IBTF_DPRINTF_L4("ibdm", "\t Redirected TC : 0x%x", 6968 classportinfo->RedirectTC); 6969 IBTF_DPRINTF_L4("ibdm", "\t Redirected SL : 0x%x", 6970 classportinfo->RedirectSL); 6971 IBTF_DPRINTF_L4("ibdm", "\t Redirected FL : 0x%x", 6972 classportinfo->RedirectFL); 6973 IBTF_DPRINTF_L4("ibdm", "\t Redirected LID : 0x%x", 6974 b2h16(classportinfo->RedirectLID)); 6975 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x", 6976 b2h16(classportinfo->RedirectP_Key)); 6977 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x", 6978 classportinfo->RedirectQP); 6979 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x", 6980 b2h32(classportinfo->RedirectQ_Key)); 6981 IBTF_DPRINTF_L4("ibdm", "\t Trap GID hi : 0x%llx", 6982 b2h64(classportinfo->TrapGID_hi)); 6983 IBTF_DPRINTF_L4("ibdm", "\t Trap GID lo : 0x%llx", 6984 b2h64(classportinfo->TrapGID_lo)); 6985 IBTF_DPRINTF_L4("ibdm", "\t Trap TC : 0x%x", 6986 classportinfo->TrapTC); 6987 IBTF_DPRINTF_L4("ibdm", "\t Trap SL : 0x%x", 6988 classportinfo->TrapSL); 6989 IBTF_DPRINTF_L4("ibdm", "\t Trap FL : 0x%x", 6990 classportinfo->TrapFL); 6991 IBTF_DPRINTF_L4("ibdm", "\t Trap LID : 0x%x", 6992 b2h16(classportinfo->TrapLID)); 6993 IBTF_DPRINTF_L4("ibdm", "\t Trap P_Key : 0x%x", 6994 b2h16(classportinfo->TrapP_Key)); 6995 IBTF_DPRINTF_L4("ibdm", "\t Trap HL : 0x%x", 6996 classportinfo->TrapHL); 6997 IBTF_DPRINTF_L4("ibdm", "\t Trap QP : 0x%x", 6998 classportinfo->TrapQP); 6999 IBTF_DPRINTF_L4("ibdm", "\t Trap Q_Key : 0x%x", 7000 b2h32(classportinfo->TrapQ_Key)); 7001 } 7002 7003 7004 void 7005 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info) 7006 { 7007 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo"); 7008 IBTF_DPRINTF_L4("ibdm", "\t\t ------------"); 7009 7010 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x", 7011 b2h16(iou_info->iou_changeid)); 7012 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d", 7013 iou_info->iou_num_ctrl_slots); 7014 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x", 7015 iou_info->iou_flag); 7016 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x", 7017 iou_info->iou_ctrl_list[0]); 7018 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x", 7019 iou_info->iou_ctrl_list[1]); 7020 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x", 7021 iou_info->iou_ctrl_list[2]); 7022 } 7023 7024 7025 void 7026 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc) 7027 { 7028 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile"); 7029 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------"); 7030 7031 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid); 7032 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid); 7033 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid); 7034 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver); 7035 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id); 7036 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class); 7037 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass); 7038 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol); 7039 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver); 7040 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth); 7041 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d", 7042 ioc->ioc_rdma_read_qdepth); 7043 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz); 7044 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz); 7045 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x", 7046 ioc->ioc_ctrl_opcap_mask); 7047 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries); 7048 } 7049 7050 7051 void 7052 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents) 7053 { 7054 IBTF_DPRINTF_L4("ibdm", 7055 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id); 7056 7057 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: " 7058 "Service Name : %s", srv_ents->srv_name); 7059 } 7060 7061 int ibdm_allow_sweep_fabric_timestamp = 1; 7062 7063 void 7064 ibdm_dump_sweep_fabric_timestamp(int flag) 7065 { 7066 static hrtime_t x; 7067 if (flag) { 7068 if (ibdm_allow_sweep_fabric_timestamp) { 7069 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete " 7070 "sweep %lld ms", ((gethrtime() - x)/ 1000000)); 7071 } 7072 x = 0; 7073 } else 7074 x = gethrtime(); 7075 } 7076 #endif 7077