1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * ibdm.c 30 * 31 * This file contains the InifiniBand Device Manager (IBDM) support functions. 32 * IB nexus driver will only be the client for the IBDM module. 33 * 34 * IBDM registers with IBTF for HCA arrival/removal notification. 35 * IBDM registers with SA access to send DM MADs to discover the IOC's behind 36 * the IOU's. 37 * 38 * IB nexus driver registers with IBDM to find the information about the 39 * HCA's and IOC's (behind the IOU) present on the IB fabric. 40 */ 41 42 #include <sys/systm.h> 43 #include <sys/taskq.h> 44 #include <sys/ib/mgt/ibdm/ibdm_impl.h> 45 #include <sys/ib/mgt/ibmf/ibmf_impl.h> 46 #include <sys/modctl.h> 47 48 /* Function Prototype declarations */ 49 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *, ibdm_iou_info_t **); 50 static int ibdm_fini(void); 51 static int ibdm_init(void); 52 static int ibdm_get_reachable_ports(ibdm_port_attr_t *, 53 ibdm_hca_list_t *); 54 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t); 55 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *); 56 static boolean_t ibdm_is_cisco(ib_guid_t); 57 static boolean_t ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *); 58 static void ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *); 59 static int ibdm_set_classportinfo(ibdm_dp_gidinfo_t *); 60 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *); 61 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *); 62 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *); 63 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t, 64 ib_guid_t *, ib_guid_t *); 65 static int ibdm_retry_command(ibdm_timeout_cb_args_t *); 66 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int); 67 static int ibdm_verify_mad_status(ib_mad_hdr_t *); 68 static int ibdm_handle_redirection(ibmf_msg_t *, 69 ibdm_dp_gidinfo_t *, int *); 70 static void ibdm_wait_probe_completion(void); 71 static void ibdm_sweep_fabric(int); 72 static void ibdm_probe_gid_thread(void *); 73 static void ibdm_wakeup_probe_gid_cv(void); 74 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int); 75 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int); 76 static void ibdm_update_port_attr(ibdm_port_attr_t *); 77 static void ibdm_handle_hca_attach(ib_guid_t); 78 static void ibdm_handle_srventry_mad(ibmf_msg_t *, 79 ibdm_dp_gidinfo_t *, int *); 80 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *); 81 static void ibdm_recv_incoming_mad(void *); 82 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *); 83 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *); 84 static void ibdm_pkt_timeout_hdlr(void *arg); 85 static void ibdm_initialize_port(ibdm_port_attr_t *); 86 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 87 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *); 88 static void ibdm_alloc_send_buffers(ibmf_msg_t *); 89 static void ibdm_free_send_buffers(ibmf_msg_t *); 90 static void ibdm_handle_hca_detach(ib_guid_t); 91 static int ibdm_fini_port(ibdm_port_attr_t *); 92 static int ibdm_uninit_hca(ibdm_hca_list_t *); 93 static void ibdm_handle_setclassportinfo(ibmf_handle_t, ibmf_msg_t *, 94 ibdm_dp_gidinfo_t *, int *); 95 static void ibdm_handle_iounitinfo(ibmf_handle_t, 96 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 97 static void ibdm_handle_ioc_profile(ibmf_handle_t, 98 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 99 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t, 100 ibt_async_code_t, ibt_async_event_t *); 101 static void ibdm_handle_classportinfo(ibmf_handle_t, 102 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 103 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *, 104 ibdm_dp_gidinfo_t *); 105 106 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *); 107 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *, 108 ibdm_dp_gidinfo_t *gid_list); 109 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int); 110 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t, 111 ibdm_dp_gidinfo_t *, int *); 112 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *, 113 ibdm_hca_list_t **); 114 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t, 115 size_t *, ib_guid_t); 116 static int ibdm_get_node_record_by_port(ibmf_saa_handle_t, 117 ib_guid_t, sa_node_record_t **, size_t *); 118 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *, 119 ib_lid_t); 120 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *, 121 ib_gid_t, ib_gid_t); 122 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t); 123 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t); 124 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int); 125 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t, 126 ibmf_saa_event_details_t *, void *); 127 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *, 128 ibdm_dp_gidinfo_t *); 129 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *); 130 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *, 131 ibdm_dp_gidinfo_t *); 132 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *); 133 static void ibdm_free_gid_list(ibdm_gid_t *); 134 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid); 135 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *); 136 static void ibdm_saa_event_taskq(void *); 137 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *); 138 static void ibdm_get_next_port(ibdm_hca_list_t **, 139 ibdm_port_attr_t **, int); 140 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *, 141 ibdm_dp_gidinfo_t *); 142 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *, 143 ibdm_hca_list_t *); 144 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *); 145 static void ibdm_saa_handle_new_gid(void *); 146 static void ibdm_reset_all_dgids(ibmf_saa_handle_t); 147 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *); 148 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *); 149 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *); 150 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *); 151 static ibdm_ioc_info_t *ibdm_handle_prev_iou(); 152 static int ibdm_serv_cmp(ibdm_srvents_info_t *, ibdm_srvents_info_t *, 153 int); 154 static ibdm_ioc_info_t *ibdm_get_ioc_info_with_gid(ib_guid_t, 155 ibdm_dp_gidinfo_t **); 156 157 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT; 158 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES; 159 #ifdef DEBUG 160 int ibdm_ignore_saa_event = 0; 161 #endif 162 163 /* Modload support */ 164 static struct modlmisc ibdm_modlmisc = { 165 &mod_miscops, 166 "InfiniBand Device Manager %I%", 167 }; 168 169 struct modlinkage ibdm_modlinkage = { 170 MODREV_1, 171 (void *)&ibdm_modlmisc, 172 NULL 173 }; 174 175 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = { 176 IBTI_V2, 177 IBT_DM, 178 ibdm_event_hdlr, 179 NULL, 180 "ibdm" 181 }; 182 183 /* Global variables */ 184 ibdm_t ibdm; 185 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING; 186 char *ibdm_string = "ibdm"; 187 188 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv", 189 ibdm.ibdm_dp_gidlist_head)) 190 191 /* 192 * _init 193 * Loadable module init, called before any other module. 194 * Initialize mutex 195 * Register with IBTF 196 */ 197 int 198 _init(void) 199 { 200 int err; 201 202 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm); 203 204 if ((err = ibdm_init()) != IBDM_SUCCESS) { 205 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err); 206 (void) ibdm_fini(); 207 return (DDI_FAILURE); 208 } 209 210 if ((err = mod_install(&ibdm_modlinkage)) != 0) { 211 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err); 212 (void) ibdm_fini(); 213 } 214 return (err); 215 } 216 217 218 int 219 _fini(void) 220 { 221 int err; 222 223 if ((err = ibdm_fini()) != IBDM_SUCCESS) { 224 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err); 225 (void) ibdm_init(); 226 return (EBUSY); 227 } 228 229 if ((err = mod_remove(&ibdm_modlinkage)) != 0) { 230 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err); 231 (void) ibdm_init(); 232 } 233 return (err); 234 } 235 236 237 int 238 _info(struct modinfo *modinfop) 239 { 240 return (mod_info(&ibdm_modlinkage, modinfop)); 241 } 242 243 244 /* 245 * ibdm_init(): 246 * Register with IBTF 247 * Allocate memory for the HCAs 248 * Allocate minor-nodes for the HCAs 249 */ 250 static int 251 ibdm_init(void) 252 { 253 int i, hca_count; 254 ib_guid_t *hca_guids; 255 ibt_status_t status; 256 257 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:"); 258 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) { 259 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL); 260 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL); 261 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL); 262 mutex_enter(&ibdm.ibdm_mutex); 263 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED; 264 } 265 266 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) { 267 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL, 268 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) { 269 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach " 270 "failed %x", status); 271 mutex_exit(&ibdm.ibdm_mutex); 272 return (IBDM_FAILURE); 273 } 274 275 ibdm.ibdm_state |= IBDM_IBT_ATTACHED; 276 mutex_exit(&ibdm.ibdm_mutex); 277 } 278 279 280 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) { 281 hca_count = ibt_get_hca_list(&hca_guids); 282 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count); 283 for (i = 0; i < hca_count; i++) 284 (void) ibdm_handle_hca_attach(hca_guids[i]); 285 if (hca_count) 286 ibt_free_hca_list(hca_guids, hca_count); 287 288 mutex_enter(&ibdm.ibdm_mutex); 289 ibdm.ibdm_state |= IBDM_HCA_ATTACHED; 290 mutex_exit(&ibdm.ibdm_mutex); 291 } 292 293 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) { 294 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL); 295 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL); 296 mutex_enter(&ibdm.ibdm_mutex); 297 ibdm.ibdm_state |= IBDM_CVS_ALLOCED; 298 mutex_exit(&ibdm.ibdm_mutex); 299 } 300 return (IBDM_SUCCESS); 301 } 302 303 304 static int 305 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info, ibdm_iou_info_t **ioup) 306 { 307 int ii, k, niocs; 308 size_t size; 309 ibdm_gid_t *delete, *head; 310 timeout_id_t timeout_id; 311 ibdm_ioc_info_t *ioc; 312 ibdm_iou_info_t *gl_iou = *ioup; 313 314 ASSERT(mutex_owned(&gid_info->gl_mutex)); 315 if (gl_iou == NULL) { 316 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU"); 317 return (0); 318 } 319 320 niocs = gl_iou->iou_info.iou_num_ctrl_slots; 321 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d", 322 gid_info, niocs); 323 324 for (ii = 0; ii < niocs; ii++) { 325 ioc = (ibdm_ioc_info_t *)&gl_iou->iou_ioc_info[ii]; 326 327 /* handle the case where an ioc_timeout_id is scheduled */ 328 if (ioc->ioc_timeout_id) { 329 timeout_id = ioc->ioc_timeout_id; 330 ioc->ioc_timeout_id = 0; 331 mutex_exit(&gid_info->gl_mutex); 332 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 333 "ioc_timeout_id = 0x%x", timeout_id); 334 if (untimeout(timeout_id) == -1) { 335 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 336 "untimeout ioc_timeout_id failed"); 337 mutex_enter(&gid_info->gl_mutex); 338 return (-1); 339 } 340 mutex_enter(&gid_info->gl_mutex); 341 } 342 343 /* handle the case where an ioc_dc_timeout_id is scheduled */ 344 if (ioc->ioc_dc_timeout_id) { 345 timeout_id = ioc->ioc_dc_timeout_id; 346 ioc->ioc_dc_timeout_id = 0; 347 mutex_exit(&gid_info->gl_mutex); 348 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 349 "ioc_dc_timeout_id = 0x%x", timeout_id); 350 if (untimeout(timeout_id) == -1) { 351 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 352 "untimeout ioc_dc_timeout_id failed"); 353 mutex_enter(&gid_info->gl_mutex); 354 return (-1); 355 } 356 mutex_enter(&gid_info->gl_mutex); 357 } 358 359 /* handle the case where serv[k].se_timeout_id is scheduled */ 360 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) { 361 if (ioc->ioc_serv[k].se_timeout_id) { 362 timeout_id = ioc->ioc_serv[k].se_timeout_id; 363 ioc->ioc_serv[k].se_timeout_id = 0; 364 mutex_exit(&gid_info->gl_mutex); 365 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 366 "ioc->ioc_serv[%d].se_timeout_id = 0x%x", 367 k, timeout_id); 368 if (untimeout(timeout_id) == -1) { 369 IBTF_DPRINTF_L2("ibdm", "free_iou_info:" 370 " untimeout se_timeout_id failed"); 371 mutex_enter(&gid_info->gl_mutex); 372 return (-1); 373 } 374 mutex_enter(&gid_info->gl_mutex); 375 } 376 } 377 378 /* delete GID list in IOC */ 379 head = ioc->ioc_gid_list; 380 while (head) { 381 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: " 382 "Deleting gid_list struct %p", head); 383 delete = head; 384 head = head->gid_next; 385 kmem_free(delete, sizeof (ibdm_gid_t)); 386 } 387 ioc->ioc_gid_list = NULL; 388 389 /* delete ioc_serv */ 390 size = ioc->ioc_profile.ioc_service_entries * 391 sizeof (ibdm_srvents_info_t); 392 if (ioc->ioc_serv && size) { 393 kmem_free(ioc->ioc_serv, size); 394 ioc->ioc_serv = NULL; 395 } 396 } 397 /* 398 * Clear the IBDM_CISCO_PROBE_DONE flag to get the IO Unit information 399 * via the switch during the probe process. 400 */ 401 gid_info->gl_flag &= ~IBDM_CISCO_PROBE_DONE; 402 403 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC"); 404 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t); 405 kmem_free(gl_iou, size); 406 *ioup = NULL; 407 return (0); 408 } 409 410 411 /* 412 * ibdm_fini(): 413 * Un-register with IBTF 414 * De allocate memory for the GID info 415 */ 416 static int 417 ibdm_fini() 418 { 419 int ii; 420 ibdm_hca_list_t *hca_list, *temp; 421 ibdm_dp_gidinfo_t *gid_info, *tmp; 422 ibdm_gid_t *head, *delete; 423 424 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini"); 425 426 mutex_enter(&ibdm.ibdm_hl_mutex); 427 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) { 428 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) { 429 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed"); 430 mutex_exit(&ibdm.ibdm_hl_mutex); 431 return (IBDM_FAILURE); 432 } 433 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED; 434 ibdm.ibdm_ibt_clnt_hdl = NULL; 435 } 436 437 hca_list = ibdm.ibdm_hca_list_head; 438 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count); 439 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 440 temp = hca_list; 441 hca_list = hca_list->hl_next; 442 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp); 443 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) { 444 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: " 445 "uninit_hca %p failed", temp); 446 mutex_exit(&ibdm.ibdm_hl_mutex); 447 return (IBDM_FAILURE); 448 } 449 } 450 mutex_exit(&ibdm.ibdm_hl_mutex); 451 452 mutex_enter(&ibdm.ibdm_mutex); 453 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED) 454 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED; 455 456 gid_info = ibdm.ibdm_dp_gidlist_head; 457 while (gid_info) { 458 mutex_enter(&gid_info->gl_mutex); 459 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 460 mutex_exit(&gid_info->gl_mutex); 461 ibdm_delete_glhca_list(gid_info); 462 463 tmp = gid_info; 464 gid_info = gid_info->gl_next; 465 mutex_destroy(&tmp->gl_mutex); 466 head = tmp->gl_gid; 467 while (head) { 468 IBTF_DPRINTF_L4("ibdm", 469 "\tibdm_fini: Deleting gid structs"); 470 delete = head; 471 head = head->gid_next; 472 kmem_free(delete, sizeof (ibdm_gid_t)); 473 } 474 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t)); 475 } 476 mutex_exit(&ibdm.ibdm_mutex); 477 478 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) { 479 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED; 480 mutex_destroy(&ibdm.ibdm_mutex); 481 mutex_destroy(&ibdm.ibdm_hl_mutex); 482 mutex_destroy(&ibdm.ibdm_ibnex_mutex); 483 } 484 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) { 485 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED; 486 cv_destroy(&ibdm.ibdm_probe_cv); 487 cv_destroy(&ibdm.ibdm_busy_cv); 488 } 489 return (IBDM_SUCCESS); 490 } 491 492 493 /* 494 * ibdm_event_hdlr() 495 * 496 * IBDM registers this asynchronous event handler at the time of 497 * ibt_attach. IBDM support the following async events. For other 498 * event, simply returns success. 499 * IBT_HCA_ATTACH_EVENT: 500 * Retrieves the information about all the port that are 501 * present on this HCA, allocates the port attributes 502 * structure and calls IB nexus callback routine with 503 * the port attributes structure as an input argument. 504 * IBT_HCA_DETACH_EVENT: 505 * Retrieves the information about all the ports that are 506 * present on this HCA and calls IB nexus callback with 507 * port guid as an argument 508 * IBT_EVENT_PORT_UP: 509 * Register with IBMF and SA access 510 * Setup IBMF receive callback routine 511 * IBT_EVENT_PORT_DOWN: 512 * Un-Register with IBMF and SA access 513 * Teardown IBMF receive callback routine 514 */ 515 /*ARGSUSED*/ 516 static void 517 ibdm_event_hdlr(void *clnt_hdl, 518 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event) 519 { 520 ibdm_hca_list_t *hca_list; 521 ibdm_port_attr_t *port; 522 ibmf_saa_handle_t port_sa_hdl; 523 524 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code); 525 526 switch (code) { 527 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */ 528 ibdm_handle_hca_attach(event->ev_hca_guid); 529 break; 530 531 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */ 532 ibdm_handle_hca_detach(event->ev_hca_guid); 533 mutex_enter(&ibdm.ibdm_ibnex_mutex); 534 if (ibdm.ibdm_ibnex_callback != NULL) { 535 (*ibdm.ibdm_ibnex_callback)((void *) 536 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED); 537 } 538 mutex_exit(&ibdm.ibdm_ibnex_mutex); 539 break; 540 541 case IBT_EVENT_PORT_UP: 542 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP"); 543 mutex_enter(&ibdm.ibdm_hl_mutex); 544 port = ibdm_get_port_attr(event, &hca_list); 545 if (port == NULL) { 546 IBTF_DPRINTF_L2("ibdm", 547 "\tevent_hdlr: HCA not present"); 548 mutex_exit(&ibdm.ibdm_hl_mutex); 549 break; 550 } 551 ibdm_initialize_port(port); 552 hca_list->hl_nports_active++; 553 mutex_exit(&ibdm.ibdm_hl_mutex); 554 break; 555 556 case IBT_ERROR_PORT_DOWN: 557 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN"); 558 mutex_enter(&ibdm.ibdm_hl_mutex); 559 port = ibdm_get_port_attr(event, &hca_list); 560 if (port == NULL) { 561 IBTF_DPRINTF_L2("ibdm", 562 "\tevent_hdlr: HCA not present"); 563 mutex_exit(&ibdm.ibdm_hl_mutex); 564 break; 565 } 566 hca_list->hl_nports_active--; 567 port_sa_hdl = port->pa_sa_hdl; 568 (void) ibdm_fini_port(port); 569 port->pa_state = IBT_PORT_DOWN; 570 mutex_exit(&ibdm.ibdm_hl_mutex); 571 ibdm_reset_all_dgids(port_sa_hdl); 572 break; 573 574 default: /* Ignore all other events/errors */ 575 break; 576 } 577 } 578 579 580 /* 581 * ibdm_initialize_port() 582 * Register with IBMF 583 * Register with SA access 584 * Register a receive callback routine with IBMF. IBMF invokes 585 * this routine whenever a MAD arrives at this port. 586 * Update the port attributes 587 */ 588 static void 589 ibdm_initialize_port(ibdm_port_attr_t *port) 590 { 591 int ii; 592 uint_t nports, size; 593 uint_t pkey_idx; 594 ib_pkey_t pkey; 595 ibt_hca_portinfo_t *pinfop; 596 ibmf_register_info_t ibmf_reg; 597 ibmf_saa_subnet_event_args_t event_args; 598 599 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:"); 600 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 601 602 /* Check whether the port is active */ 603 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 604 NULL) != IBT_SUCCESS) 605 return; 606 607 if (port->pa_sa_hdl != NULL) 608 return; 609 610 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 611 &pinfop, &nports, &size) != IBT_SUCCESS) { 612 /* This should not occur */ 613 port->pa_npkeys = 0; 614 port->pa_pkey_tbl = NULL; 615 return; 616 } 617 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 618 619 port->pa_state = pinfop->p_linkstate; 620 port->pa_npkeys = pinfop->p_pkey_tbl_sz; 621 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 622 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 623 624 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) 625 port->pa_pkey_tbl[pkey_idx].pt_pkey = 626 pinfop->p_pkey_tbl[pkey_idx]; 627 628 ibt_free_portinfo(pinfop, size); 629 630 event_args.is_event_callback = ibdm_saa_event_cb; 631 event_args.is_event_callback_arg = port; 632 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args, 633 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) { 634 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 635 "sa access registration failed"); 636 return; 637 } 638 ibmf_reg.ir_ci_guid = port->pa_hca_guid; 639 ibmf_reg.ir_port_num = port->pa_port_num; 640 ibmf_reg.ir_client_class = DEV_MGT_MANAGER; 641 642 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL, 643 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) { 644 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 645 "IBMF registration failed"); 646 (void) ibdm_fini_port(port); 647 return; 648 } 649 if (ibmf_setup_async_cb(port->pa_ibmf_hdl, IBMF_QP_HANDLE_DEFAULT, 650 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) { 651 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 652 "IBMF setup recv cb failed"); 653 (void) ibdm_fini_port(port); 654 return; 655 } 656 657 for (ii = 0; ii < port->pa_npkeys; ii++) { 658 pkey = port->pa_pkey_tbl[ii].pt_pkey; 659 if (IBDM_INVALID_PKEY(pkey)) { 660 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 661 continue; 662 } 663 ibdm_port_attr_ibmf_init(port, pkey, ii); 664 } 665 } 666 667 668 /* 669 * ibdm_port_attr_ibmf_init: 670 * With IBMF - Alloc QP Handle and Setup Async callback 671 */ 672 static void 673 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii) 674 { 675 int ret; 676 677 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY, 678 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) != 679 IBMF_SUCCESS) { 680 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 681 "IBMF failed to alloc qp %d", ret); 682 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 683 return; 684 } 685 686 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p", 687 port->pa_ibmf_hdl); 688 689 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl, 690 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) != 691 IBMF_SUCCESS) { 692 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 693 "IBMF setup recv cb failed %d", ret); 694 (void) ibmf_free_qp(port->pa_ibmf_hdl, 695 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0); 696 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 697 } 698 } 699 700 701 /* 702 * ibdm_get_port_attr() 703 * Get port attributes from HCA guid and port number 704 * Return pointer to ibdm_port_attr_t on Success 705 * and NULL on failure 706 */ 707 static ibdm_port_attr_t * 708 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval) 709 { 710 ibdm_hca_list_t *hca_list; 711 ibdm_port_attr_t *port_attr; 712 int ii; 713 714 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port); 715 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 716 hca_list = ibdm.ibdm_hca_list_head; 717 while (hca_list) { 718 if (hca_list->hl_hca_guid == event->ev_hca_guid) { 719 for (ii = 0; ii < hca_list->hl_nports; ii++) { 720 port_attr = &hca_list->hl_port_attr[ii]; 721 if (port_attr->pa_port_num == event->ev_port) { 722 *retval = hca_list; 723 return (port_attr); 724 } 725 } 726 } 727 hca_list = hca_list->hl_next; 728 } 729 return (NULL); 730 } 731 732 733 /* 734 * ibdm_update_port_attr() 735 * Update the port attributes 736 */ 737 static void 738 ibdm_update_port_attr(ibdm_port_attr_t *port) 739 { 740 uint_t nports, size; 741 uint_t pkey_idx; 742 ibt_hca_portinfo_t *portinfop; 743 744 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin"); 745 if (ibt_query_hca_ports(port->pa_hca_hdl, 746 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) { 747 /* This should not occur */ 748 port->pa_npkeys = 0; 749 port->pa_pkey_tbl = NULL; 750 return; 751 } 752 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix; 753 754 port->pa_state = portinfop->p_linkstate; 755 756 /* 757 * PKey information in portinfo valid only if port is 758 * ACTIVE. Bail out if not. 759 */ 760 if (port->pa_state != IBT_PORT_ACTIVE) { 761 port->pa_npkeys = 0; 762 port->pa_pkey_tbl = NULL; 763 ibt_free_portinfo(portinfop, size); 764 return; 765 } 766 767 port->pa_npkeys = portinfop->p_pkey_tbl_sz; 768 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 769 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 770 771 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) { 772 port->pa_pkey_tbl[pkey_idx].pt_pkey = 773 portinfop->p_pkey_tbl[pkey_idx]; 774 } 775 ibt_free_portinfo(portinfop, size); 776 } 777 778 779 /* 780 * ibdm_handle_hca_attach() 781 */ 782 static void 783 ibdm_handle_hca_attach(ib_guid_t hca_guid) 784 { 785 uint_t size; 786 uint_t ii, nports; 787 ibt_status_t status; 788 ibt_hca_hdl_t hca_hdl; 789 ibt_hca_attr_t *hca_attr; 790 ibdm_hca_list_t *hca_list, *temp; 791 ibdm_port_attr_t *port_attr; 792 ibt_hca_portinfo_t *portinfop; 793 794 IBTF_DPRINTF_L4("ibdm", 795 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid); 796 797 /* open the HCA first */ 798 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid, 799 &hca_hdl)) != IBT_SUCCESS) { 800 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 801 "open_hca failed, status 0x%x", status); 802 return; 803 } 804 805 hca_attr = (ibt_hca_attr_t *) 806 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP); 807 /* ibt_query_hca always returns IBT_SUCCESS */ 808 (void) ibt_query_hca(hca_hdl, hca_attr); 809 810 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x," 811 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id, 812 hca_attr->hca_version_id, hca_attr->hca_nports); 813 814 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports, 815 &size)) != IBT_SUCCESS) { 816 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 817 "ibt_query_hca_ports failed, status 0x%x", status); 818 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 819 (void) ibt_close_hca(hca_hdl); 820 return; 821 } 822 hca_list = (ibdm_hca_list_t *) 823 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP); 824 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 825 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP); 826 hca_list->hl_hca_guid = hca_attr->hca_node_guid; 827 hca_list->hl_nports = hca_attr->hca_nports; 828 hca_list->hl_attach_time = ddi_get_time(); 829 hca_list->hl_hca_hdl = hca_hdl; 830 831 /* 832 * Init a dummy port attribute for the HCA node 833 * This is for Per-HCA Node. Initialize port_attr : 834 * hca_guid & port_guid -> hca_guid 835 * npkeys, pkey_tbl is NULL 836 * port_num, sn_prefix is 0 837 * vendorid, product_id, dev_version from HCA 838 * pa_state is IBT_PORT_ACTIVE 839 */ 840 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 841 sizeof (ibdm_port_attr_t), KM_SLEEP); 842 port_attr = hca_list->hl_hca_port_attr; 843 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 844 port_attr->pa_productid = hca_attr->hca_device_id; 845 port_attr->pa_dev_version = hca_attr->hca_version_id; 846 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 847 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 848 port_attr->pa_port_guid = hca_attr->hca_node_guid; 849 port_attr->pa_state = IBT_PORT_ACTIVE; 850 851 852 for (ii = 0; ii < nports; ii++) { 853 port_attr = &hca_list->hl_port_attr[ii]; 854 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 855 port_attr->pa_productid = hca_attr->hca_device_id; 856 port_attr->pa_dev_version = hca_attr->hca_version_id; 857 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 858 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 859 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid; 860 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix; 861 port_attr->pa_port_num = portinfop[ii].p_port_num; 862 port_attr->pa_state = portinfop[ii].p_linkstate; 863 864 /* 865 * Register with IBMF, SA access when the port is in 866 * ACTIVE state. Also register a callback routine 867 * with IBMF to receive incoming DM MAD's. 868 * The IBDM event handler takes care of registration of 869 * port which are not active. 870 */ 871 IBTF_DPRINTF_L4("ibdm", 872 "\thandle_hca_attach: port guid %llx Port state 0x%x", 873 port_attr->pa_port_guid, portinfop[ii].p_linkstate); 874 875 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) { 876 mutex_enter(&ibdm.ibdm_hl_mutex); 877 hca_list->hl_nports_active++; 878 ibdm_initialize_port(port_attr); 879 mutex_exit(&ibdm.ibdm_hl_mutex); 880 } 881 } 882 mutex_enter(&ibdm.ibdm_hl_mutex); 883 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) { 884 if (temp->hl_hca_guid == hca_guid) { 885 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX " 886 "already seen by IBDM", hca_guid); 887 mutex_exit(&ibdm.ibdm_hl_mutex); 888 (void) ibdm_uninit_hca(hca_list); 889 return; 890 } 891 } 892 ibdm.ibdm_hca_count++; 893 if (ibdm.ibdm_hca_list_head == NULL) { 894 ibdm.ibdm_hca_list_head = hca_list; 895 ibdm.ibdm_hca_list_tail = hca_list; 896 } else { 897 ibdm.ibdm_hca_list_tail->hl_next = hca_list; 898 ibdm.ibdm_hca_list_tail = hca_list; 899 } 900 mutex_exit(&ibdm.ibdm_hl_mutex); 901 mutex_enter(&ibdm.ibdm_ibnex_mutex); 902 if (ibdm.ibdm_ibnex_callback != NULL) { 903 (*ibdm.ibdm_ibnex_callback)((void *) 904 &hca_guid, IBDM_EVENT_HCA_ADDED); 905 } 906 mutex_exit(&ibdm.ibdm_ibnex_mutex); 907 908 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 909 ibt_free_portinfo(portinfop, size); 910 } 911 912 913 /* 914 * ibdm_handle_hca_detach() 915 */ 916 static void 917 ibdm_handle_hca_detach(ib_guid_t hca_guid) 918 { 919 ibdm_hca_list_t *head, *prev = NULL; 920 size_t len; 921 ibdm_dp_gidinfo_t *gidinfo; 922 923 IBTF_DPRINTF_L4("ibdm", 924 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid); 925 926 /* Make sure no probes are running */ 927 mutex_enter(&ibdm.ibdm_mutex); 928 while (ibdm.ibdm_busy & IBDM_BUSY) 929 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 930 ibdm.ibdm_busy |= IBDM_BUSY; 931 mutex_exit(&ibdm.ibdm_mutex); 932 933 mutex_enter(&ibdm.ibdm_hl_mutex); 934 head = ibdm.ibdm_hca_list_head; 935 while (head) { 936 if (head->hl_hca_guid == hca_guid) { 937 if (prev == NULL) 938 ibdm.ibdm_hca_list_head = head->hl_next; 939 else 940 prev->hl_next = head->hl_next; 941 ibdm.ibdm_hca_count--; 942 break; 943 } 944 prev = head; 945 head = head->hl_next; 946 } 947 mutex_exit(&ibdm.ibdm_hl_mutex); 948 if (ibdm_uninit_hca(head) != IBDM_SUCCESS) 949 (void) ibdm_handle_hca_attach(hca_guid); 950 951 /* 952 * Now clean up the HCA lists in the gidlist. 953 */ 954 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo = 955 gidinfo->gl_next) { 956 prev = NULL; 957 head = gidinfo->gl_hca_list; 958 while (head) { 959 if (head->hl_hca_guid == hca_guid) { 960 if (prev == NULL) 961 gidinfo->gl_hca_list = 962 head->hl_next; 963 else 964 prev->hl_next = head->hl_next; 965 966 len = sizeof (ibdm_hca_list_t) + 967 (head->hl_nports * 968 sizeof (ibdm_port_attr_t)); 969 kmem_free(head, len); 970 971 break; 972 } 973 prev = head; 974 head = head->hl_next; 975 } 976 } 977 978 mutex_enter(&ibdm.ibdm_mutex); 979 ibdm.ibdm_busy &= ~IBDM_BUSY; 980 cv_broadcast(&ibdm.ibdm_busy_cv); 981 mutex_exit(&ibdm.ibdm_mutex); 982 } 983 984 985 static int 986 ibdm_uninit_hca(ibdm_hca_list_t *head) 987 { 988 int ii; 989 ibdm_port_attr_t *port_attr; 990 991 for (ii = 0; ii < head->hl_nports; ii++) { 992 port_attr = &head->hl_port_attr[ii]; 993 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) { 994 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x " 995 "ibdm_fini_port() failed", head, ii); 996 return (IBDM_FAILURE); 997 } 998 } 999 if (head->hl_hca_hdl) 1000 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) 1001 return (IBDM_FAILURE); 1002 kmem_free(head->hl_port_attr, 1003 head->hl_nports * sizeof (ibdm_port_attr_t)); 1004 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t)); 1005 kmem_free(head, sizeof (ibdm_hca_list_t)); 1006 return (IBDM_SUCCESS); 1007 } 1008 1009 1010 /* 1011 * For each port on the HCA, 1012 * 1) Teardown IBMF receive callback function 1013 * 2) Unregister with IBMF 1014 * 3) Unregister with SA access 1015 */ 1016 static int 1017 ibdm_fini_port(ibdm_port_attr_t *port_attr) 1018 { 1019 int ii, ibmf_status; 1020 1021 for (ii = 0; ii < port_attr->pa_npkeys; ii++) { 1022 if (port_attr->pa_pkey_tbl == NULL) 1023 break; 1024 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl) 1025 continue; 1026 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) { 1027 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1028 "ibdm_port_attr_ibmf_fini failed for " 1029 "port pkey 0x%x", ii); 1030 return (IBDM_FAILURE); 1031 } 1032 } 1033 1034 if (port_attr->pa_ibmf_hdl) { 1035 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1036 IBMF_QP_HANDLE_DEFAULT, 0); 1037 if (ibmf_status != IBMF_SUCCESS) { 1038 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1039 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1040 return (IBDM_FAILURE); 1041 } 1042 1043 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0); 1044 if (ibmf_status != IBMF_SUCCESS) { 1045 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1046 "ibmf_unregister failed %d", ibmf_status); 1047 return (IBDM_FAILURE); 1048 } 1049 1050 port_attr->pa_ibmf_hdl = NULL; 1051 } 1052 1053 if (port_attr->pa_sa_hdl) { 1054 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0); 1055 if (ibmf_status != IBMF_SUCCESS) { 1056 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1057 "ibmf_sa_session_close failed %d", ibmf_status); 1058 return (IBDM_FAILURE); 1059 } 1060 port_attr->pa_sa_hdl = NULL; 1061 } 1062 1063 if (port_attr->pa_pkey_tbl != NULL) { 1064 kmem_free(port_attr->pa_pkey_tbl, 1065 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 1066 port_attr->pa_pkey_tbl = NULL; 1067 port_attr->pa_npkeys = 0; 1068 } 1069 1070 return (IBDM_SUCCESS); 1071 } 1072 1073 1074 /* 1075 * ibdm_port_attr_ibmf_fini: 1076 * With IBMF - Tear down Async callback and free QP Handle 1077 */ 1078 static int 1079 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii) 1080 { 1081 int ibmf_status; 1082 1083 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:"); 1084 1085 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) { 1086 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1087 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1088 if (ibmf_status != IBMF_SUCCESS) { 1089 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1090 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1091 return (IBDM_FAILURE); 1092 } 1093 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl, 1094 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1095 if (ibmf_status != IBMF_SUCCESS) { 1096 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1097 "ibmf_free_qp failed %d", ibmf_status); 1098 return (IBDM_FAILURE); 1099 } 1100 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 1101 } 1102 return (IBDM_SUCCESS); 1103 } 1104 1105 1106 /* 1107 * ibdm_gid_decr_pending: 1108 * decrement gl_pending_cmds. If zero wakeup sleeping threads 1109 */ 1110 static void 1111 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo) 1112 { 1113 mutex_enter(&ibdm.ibdm_mutex); 1114 mutex_enter(&gidinfo->gl_mutex); 1115 if (--gidinfo->gl_pending_cmds == 0) { 1116 /* 1117 * Handle DGID getting removed. 1118 */ 1119 if (gidinfo->gl_disconnected) { 1120 mutex_exit(&gidinfo->gl_mutex); 1121 mutex_exit(&ibdm.ibdm_mutex); 1122 1123 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: " 1124 "gidinfo %p hot removal", gidinfo); 1125 ibdm_delete_gidinfo(gidinfo); 1126 1127 mutex_enter(&ibdm.ibdm_mutex); 1128 ibdm.ibdm_ngid_probes_in_progress--; 1129 ibdm_wait_probe_completion(); 1130 mutex_exit(&ibdm.ibdm_mutex); 1131 return; 1132 } 1133 mutex_exit(&gidinfo->gl_mutex); 1134 mutex_exit(&ibdm.ibdm_mutex); 1135 ibdm_notify_newgid_iocs(gidinfo); 1136 mutex_enter(&ibdm.ibdm_mutex); 1137 mutex_enter(&gidinfo->gl_mutex); 1138 1139 ibdm.ibdm_ngid_probes_in_progress--; 1140 ibdm_wait_probe_completion(); 1141 } 1142 mutex_exit(&gidinfo->gl_mutex); 1143 mutex_exit(&ibdm.ibdm_mutex); 1144 } 1145 1146 1147 /* 1148 * ibdm_wait_probe_completion: 1149 * wait for probing to complete 1150 */ 1151 static void 1152 ibdm_wait_probe_completion(void) 1153 { 1154 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1155 if (ibdm.ibdm_ngid_probes_in_progress) { 1156 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete"); 1157 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS; 1158 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS) 1159 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex); 1160 } 1161 } 1162 1163 1164 /* 1165 * ibdm_wait_cisco_probe_completion: 1166 * wait for the reply from the Cisco FC GW switch after a setclassportinfo 1167 * request is sent. This wait can be achieved on each gid. 1168 */ 1169 static void 1170 ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *gidinfo) 1171 { 1172 ASSERT(MUTEX_HELD(&gidinfo->gl_mutex)); 1173 IBTF_DPRINTF_L4("ibdm", "\twait for cisco probe complete"); 1174 gidinfo->gl_flag |= IBDM_CISCO_PROBE; 1175 while (gidinfo->gl_flag & IBDM_CISCO_PROBE) 1176 cv_wait(&gidinfo->gl_probe_cv, &gidinfo->gl_mutex); 1177 } 1178 1179 1180 /* 1181 * ibdm_wakeup_probe_gid_cv: 1182 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress) 1183 */ 1184 static void 1185 ibdm_wakeup_probe_gid_cv(void) 1186 { 1187 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1188 if (!ibdm.ibdm_ngid_probes_in_progress) { 1189 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup"); 1190 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 1191 cv_broadcast(&ibdm.ibdm_probe_cv); 1192 } 1193 1194 } 1195 1196 1197 /* 1198 * ibdm_sweep_fabric(reprobe_flag) 1199 * Find all possible Managed IOU's and their IOC's that are visible 1200 * to the host. The algorithm used is as follows 1201 * 1202 * Send a "bus walk" request for each port on the host HCA to SA access 1203 * SA returns complete set of GID's that are reachable from 1204 * source port. This is done in parallel. 1205 * 1206 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE 1207 * 1208 * Sort the GID list and eliminate duplicate GID's 1209 * 1) Use DGID for sorting 1210 * 2) use PortGuid for sorting 1211 * Send SA query to retrieve NodeRecord and 1212 * extract PortGuid from that. 1213 * 1214 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont 1215 * support DM MAD's 1216 * Send a "Portinfo" query to get the port capabilities and 1217 * then check for DM MAD's support 1218 * 1219 * Send "ClassPortInfo" request for all the GID's in parallel, 1220 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the 1221 * cv_signal to complete. 1222 * 1223 * When DM agent on the remote GID sends back the response, IBMF 1224 * invokes DM callback routine. 1225 * 1226 * If the response is proper, send "IOUnitInfo" request and set 1227 * GID state to IBDM_GET_IOUNITINFO. 1228 * 1229 * If the response is proper, send "IocProfileInfo" request to 1230 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS. 1231 * 1232 * Send request to get Service entries simultaneously 1233 * 1234 * Signal the waiting thread when received response for all the commands. 1235 * 1236 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error 1237 * response during the probing period. 1238 * 1239 * Note: 1240 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds 1241 * keep track of number commands in progress at any point of time. 1242 * MAD transaction ID is used to identify a particular GID 1243 * TBD: Consider registering the IBMF receive callback on demand 1244 * 1245 * Note: This routine must be called with ibdm.ibdm_mutex held 1246 * TBD: Re probe the failure GID (for certain failures) when requested 1247 * for fabric sweep next time 1248 * 1249 * Parameters : If reprobe_flag is set, All IOCs will be reprobed. 1250 */ 1251 static void 1252 ibdm_sweep_fabric(int reprobe_flag) 1253 { 1254 int ii; 1255 int new_paths = 0; 1256 uint8_t niocs; 1257 taskqid_t tid; 1258 ibdm_ioc_info_t *ioc; 1259 ibdm_hca_list_t *hca_list = NULL; 1260 ibdm_port_attr_t *port = NULL; 1261 ibdm_dp_gidinfo_t *gid_info; 1262 1263 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter"); 1264 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1265 1266 /* 1267 * Check whether a sweep already in progress. If so, just 1268 * wait for the fabric sweep to complete 1269 */ 1270 while (ibdm.ibdm_busy & IBDM_BUSY) 1271 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1272 ibdm.ibdm_busy |= IBDM_BUSY; 1273 mutex_exit(&ibdm.ibdm_mutex); 1274 1275 ibdm_dump_sweep_fabric_timestamp(0); 1276 1277 /* Rescan the GID list for any removed GIDs for reprobe */ 1278 if (reprobe_flag) 1279 ibdm_rescan_gidlist(NULL); 1280 1281 /* 1282 * Get list of all the ports reachable from the local known HCA 1283 * ports which are active 1284 */ 1285 mutex_enter(&ibdm.ibdm_hl_mutex); 1286 for (ibdm_get_next_port(&hca_list, &port, 1); port; 1287 ibdm_get_next_port(&hca_list, &port, 1)) { 1288 /* 1289 * Get PATHS to all the reachable ports from 1290 * SGID and update the global ibdm structure. 1291 */ 1292 new_paths = ibdm_get_reachable_ports(port, hca_list); 1293 ibdm.ibdm_ngids += new_paths; 1294 } 1295 mutex_exit(&ibdm.ibdm_hl_mutex); 1296 1297 mutex_enter(&ibdm.ibdm_mutex); 1298 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids; 1299 mutex_exit(&ibdm.ibdm_mutex); 1300 1301 /* Send a request to probe GIDs asynchronously. */ 1302 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1303 gid_info = gid_info->gl_next) { 1304 mutex_enter(&gid_info->gl_mutex); 1305 gid_info->gl_reprobe_flag = reprobe_flag; 1306 mutex_exit(&gid_info->gl_mutex); 1307 1308 /* process newly encountered GIDs */ 1309 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread, 1310 (void *)gid_info, TQ_NOSLEEP); 1311 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p" 1312 " taskq_id = %x", gid_info, tid); 1313 /* taskq failed to dispatch call it directly */ 1314 if (tid == NULL) 1315 ibdm_probe_gid_thread((void *)gid_info); 1316 } 1317 1318 mutex_enter(&ibdm.ibdm_mutex); 1319 ibdm_wait_probe_completion(); 1320 1321 /* 1322 * Update the properties, if reprobe_flag is set 1323 * Skip if gl_reprobe_flag is set, this will be 1324 * a re-inserted / new GID, for which notifications 1325 * have already been send. 1326 */ 1327 if (reprobe_flag) { 1328 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1329 gid_info = gid_info->gl_next) { 1330 if (gid_info->gl_iou == NULL) 1331 continue; 1332 if (gid_info->gl_reprobe_flag) { 1333 gid_info->gl_reprobe_flag = 0; 1334 continue; 1335 } 1336 1337 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1338 for (ii = 0; ii < niocs; ii++) { 1339 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1340 if (ioc) 1341 ibdm_reprobe_update_port_srv(ioc, 1342 gid_info); 1343 } 1344 } 1345 } else if (ibdm.ibdm_prev_iou) { 1346 ibdm_ioc_info_t *ioc_list; 1347 1348 /* 1349 * Get the list of IOCs which have changed. 1350 * If any IOCs have changed, Notify IBNexus 1351 */ 1352 ibdm.ibdm_prev_iou = 0; 1353 ioc_list = ibdm_handle_prev_iou(); 1354 if (ioc_list) { 1355 if (ibdm.ibdm_ibnex_callback != NULL) { 1356 (*ibdm.ibdm_ibnex_callback)( 1357 (void *)ioc_list, 1358 IBDM_EVENT_IOC_PROP_UPDATE); 1359 } 1360 } 1361 } 1362 1363 ibdm_dump_sweep_fabric_timestamp(1); 1364 1365 ibdm.ibdm_busy &= ~IBDM_BUSY; 1366 cv_broadcast(&ibdm.ibdm_busy_cv); 1367 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT"); 1368 } 1369 1370 1371 /* 1372 * ibdm_is_cisco: 1373 * Check if this is a Cisco device or not. 1374 */ 1375 static boolean_t 1376 ibdm_is_cisco(ib_guid_t guid) 1377 { 1378 if ((guid >> IBDM_OUI_GUID_SHIFT) == IBDM_CISCO_COMPANY_ID) 1379 return (B_TRUE); 1380 return (B_FALSE); 1381 } 1382 1383 1384 /* 1385 * ibdm_is_cisco_switch: 1386 * Check if this switch is a CISCO switch or not. 1387 * Note that if this switch is already activated, ibdm_is_cisco_switch() 1388 * returns B_FALSE not to re-activate it again. 1389 */ 1390 static boolean_t 1391 ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *gid_info) 1392 { 1393 int company_id, device_id; 1394 ASSERT(gid_info != 0); 1395 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 1396 1397 /* 1398 * If this switch is already activated, don't re-activate it. 1399 */ 1400 if (gid_info->gl_flag & IBDM_CISCO_PROBE_DONE) 1401 return (B_FALSE); 1402 1403 /* 1404 * Check if this switch is a Cisco FC GW or not. 1405 * Use the node guid (the OUI part) instead of the vendor id 1406 * since the vendor id is zero in practice. 1407 */ 1408 company_id = gid_info->gl_nodeguid >> IBDM_OUI_GUID_SHIFT; 1409 device_id = gid_info->gl_devid; 1410 1411 if (company_id == IBDM_CISCO_COMPANY_ID && 1412 device_id == IBDM_CISCO_DEVICE_ID) 1413 return (B_TRUE); 1414 return (B_FALSE); 1415 } 1416 1417 1418 /* 1419 * ibdm_probe_gid_thread: 1420 * thread that does the actual work for sweeping the fabric 1421 * for a given GID 1422 */ 1423 static void 1424 ibdm_probe_gid_thread(void *args) 1425 { 1426 int reprobe_flag; 1427 ib_guid_t node_guid; 1428 ib_guid_t port_guid; 1429 ibdm_dp_gidinfo_t *gid_info; 1430 1431 gid_info = (ibdm_dp_gidinfo_t *)args; 1432 reprobe_flag = gid_info->gl_reprobe_flag; 1433 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d", 1434 gid_info, reprobe_flag); 1435 ASSERT(gid_info != NULL); 1436 ASSERT(gid_info->gl_pending_cmds == 0); 1437 1438 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE && 1439 reprobe_flag == 0) { 1440 /* 1441 * This GID may have been already probed. Send 1442 * in a CLP to check if IOUnitInfo changed? 1443 * Explicitly set gl_reprobe_flag to 0 so that 1444 * IBnex is not notified on completion 1445 */ 1446 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) { 1447 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: " 1448 "get new IOCs information"); 1449 mutex_enter(&gid_info->gl_mutex); 1450 gid_info->gl_pending_cmds++; 1451 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1452 gid_info->gl_reprobe_flag = 0; 1453 mutex_exit(&gid_info->gl_mutex); 1454 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) { 1455 mutex_enter(&gid_info->gl_mutex); 1456 --gid_info->gl_pending_cmds; 1457 mutex_exit(&gid_info->gl_mutex); 1458 mutex_enter(&ibdm.ibdm_mutex); 1459 --ibdm.ibdm_ngid_probes_in_progress; 1460 ibdm_wakeup_probe_gid_cv(); 1461 mutex_exit(&ibdm.ibdm_mutex); 1462 } 1463 } else { 1464 mutex_enter(&ibdm.ibdm_mutex); 1465 --ibdm.ibdm_ngid_probes_in_progress; 1466 ibdm_wakeup_probe_gid_cv(); 1467 mutex_exit(&ibdm.ibdm_mutex); 1468 } 1469 return; 1470 } else if (reprobe_flag && gid_info->gl_state == 1471 IBDM_GID_PROBING_COMPLETE) { 1472 /* 1473 * Reprobe all IOCs for the GID which has completed 1474 * probe. Skip other port GIDs to same IOU. 1475 * Explicitly set gl_reprobe_flag to 0 so that 1476 * IBnex is not notified on completion 1477 */ 1478 ibdm_ioc_info_t *ioc_info; 1479 uint8_t niocs, ii; 1480 1481 ASSERT(gid_info->gl_iou); 1482 mutex_enter(&gid_info->gl_mutex); 1483 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1484 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 1485 gid_info->gl_pending_cmds += niocs; 1486 gid_info->gl_reprobe_flag = 0; 1487 mutex_exit(&gid_info->gl_mutex); 1488 for (ii = 0; ii < niocs; ii++) { 1489 uchar_t slot_info; 1490 ib_dm_io_unitinfo_t *giou_info; 1491 1492 /* 1493 * Check whether IOC is present in the slot 1494 * Series of nibbles (in the field 1495 * iou_ctrl_list) represents a slot in the 1496 * IOU. 1497 * Byte format: 76543210 1498 * Bits 0-3 of first byte represent Slot 2 1499 * bits 4-7 of first byte represent slot 1, 1500 * bits 0-3 of second byte represent slot 4 1501 * and so on 1502 * Each 4-bit nibble has the following meaning 1503 * 0x0 : IOC not installed 1504 * 0x1 : IOC is present 1505 * 0xf : Slot does not exist 1506 * and all other values are reserved. 1507 */ 1508 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1509 giou_info = &gid_info->gl_iou->iou_info; 1510 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 1511 if ((ii % 2) == 0) 1512 slot_info = (slot_info >> 4); 1513 1514 if ((slot_info & 0xf) != 1) { 1515 ioc_info->ioc_state = 1516 IBDM_IOC_STATE_PROBE_FAILED; 1517 ibdm_gid_decr_pending(gid_info); 1518 continue; 1519 } 1520 1521 if (ibdm_send_ioc_profile(gid_info, ii) != 1522 IBDM_SUCCESS) { 1523 ibdm_gid_decr_pending(gid_info); 1524 } 1525 } 1526 1527 return; 1528 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 1529 mutex_enter(&ibdm.ibdm_mutex); 1530 --ibdm.ibdm_ngid_probes_in_progress; 1531 ibdm_wakeup_probe_gid_cv(); 1532 mutex_exit(&ibdm.ibdm_mutex); 1533 return; 1534 } 1535 1536 /* 1537 * Check whether the destination GID supports DM agents. If 1538 * not, stop probing the GID and continue with the next GID 1539 * in the list. 1540 */ 1541 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) { 1542 mutex_enter(&gid_info->gl_mutex); 1543 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1544 mutex_exit(&gid_info->gl_mutex); 1545 ibdm_delete_glhca_list(gid_info); 1546 mutex_enter(&ibdm.ibdm_mutex); 1547 --ibdm.ibdm_ngid_probes_in_progress; 1548 ibdm_wakeup_probe_gid_cv(); 1549 mutex_exit(&ibdm.ibdm_mutex); 1550 return; 1551 } 1552 1553 /* Get the nodeguid and portguid of the port */ 1554 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid, 1555 &node_guid, &port_guid) != IBDM_SUCCESS) { 1556 mutex_enter(&gid_info->gl_mutex); 1557 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1558 mutex_exit(&gid_info->gl_mutex); 1559 ibdm_delete_glhca_list(gid_info); 1560 mutex_enter(&ibdm.ibdm_mutex); 1561 --ibdm.ibdm_ngid_probes_in_progress; 1562 ibdm_wakeup_probe_gid_cv(); 1563 mutex_exit(&ibdm.ibdm_mutex); 1564 return; 1565 } 1566 1567 /* 1568 * Check whether we already knew about this NodeGuid 1569 * If so, do not probe the GID and continue with the 1570 * next GID in the gid list. Set the GID state to 1571 * probing done. 1572 */ 1573 mutex_enter(&ibdm.ibdm_mutex); 1574 gid_info->gl_nodeguid = node_guid; 1575 gid_info->gl_portguid = port_guid; 1576 if (ibdm_check_dest_nodeguid(gid_info) != NULL) { 1577 mutex_exit(&ibdm.ibdm_mutex); 1578 mutex_enter(&gid_info->gl_mutex); 1579 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 1580 mutex_exit(&gid_info->gl_mutex); 1581 ibdm_delete_glhca_list(gid_info); 1582 mutex_enter(&ibdm.ibdm_mutex); 1583 --ibdm.ibdm_ngid_probes_in_progress; 1584 ibdm_wakeup_probe_gid_cv(); 1585 mutex_exit(&ibdm.ibdm_mutex); 1586 return; 1587 } 1588 ibdm_add_to_gl_gid(gid_info, gid_info); 1589 mutex_exit(&ibdm.ibdm_mutex); 1590 1591 /* 1592 * New or reinserted GID : Enable notification to IBnex 1593 */ 1594 mutex_enter(&gid_info->gl_mutex); 1595 gid_info->gl_reprobe_flag = 1; 1596 1597 /* 1598 * A Cisco FC GW needs the special handling to get IOUnitInfo. 1599 */ 1600 if (ibdm_is_cisco_switch(gid_info)) { 1601 gid_info->gl_pending_cmds++; 1602 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 1603 mutex_exit(&gid_info->gl_mutex); 1604 1605 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 1606 mutex_enter(&gid_info->gl_mutex); 1607 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1608 --gid_info->gl_pending_cmds; 1609 mutex_exit(&gid_info->gl_mutex); 1610 1611 /* free the hca_list on this gid_info */ 1612 ibdm_delete_glhca_list(gid_info); 1613 1614 mutex_enter(&ibdm.ibdm_mutex); 1615 --ibdm.ibdm_ngid_probes_in_progress; 1616 ibdm_wakeup_probe_gid_cv(); 1617 mutex_exit(&ibdm.ibdm_mutex); 1618 1619 return; 1620 } 1621 1622 mutex_enter(&gid_info->gl_mutex); 1623 ibdm_wait_cisco_probe_completion(gid_info); 1624 1625 IBTF_DPRINTF_L4("ibdm", "\tibdm_probe_gid_thread: " 1626 "CISCO Wakeup signal received"); 1627 } 1628 1629 /* move on to the 'GET_CLASSPORTINFO' stage */ 1630 gid_info->gl_pending_cmds++; 1631 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 1632 mutex_exit(&gid_info->gl_mutex); 1633 1634 IBTF_DPRINTF_L3(ibdm_string, "\tibdm_probe_gid_thread: " 1635 "%d: gid_info %p gl_state %d pending_cmds %d", 1636 __LINE__, gid_info, gid_info->gl_state, 1637 gid_info->gl_pending_cmds); 1638 1639 /* 1640 * Send ClassPortInfo request to the GID asynchronously. 1641 */ 1642 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 1643 1644 mutex_enter(&gid_info->gl_mutex); 1645 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1646 --gid_info->gl_pending_cmds; 1647 mutex_exit(&gid_info->gl_mutex); 1648 1649 /* free the hca_list on this gid_info */ 1650 ibdm_delete_glhca_list(gid_info); 1651 1652 mutex_enter(&ibdm.ibdm_mutex); 1653 --ibdm.ibdm_ngid_probes_in_progress; 1654 ibdm_wakeup_probe_gid_cv(); 1655 mutex_exit(&ibdm.ibdm_mutex); 1656 1657 return; 1658 } 1659 } 1660 1661 1662 /* 1663 * ibdm_check_dest_nodeguid 1664 * Searches for the NodeGuid in the GID list 1665 * Returns matching gid_info if found and otherwise NULL 1666 * 1667 * This function is called to handle new GIDs discovered 1668 * during device sweep / probe or for GID_AVAILABLE event. 1669 * 1670 * Parameter : 1671 * gid_info GID to check 1672 */ 1673 static ibdm_dp_gidinfo_t * 1674 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info) 1675 { 1676 ibdm_dp_gidinfo_t *gid_list; 1677 ibdm_gid_t *tmp; 1678 1679 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid"); 1680 1681 gid_list = ibdm.ibdm_dp_gidlist_head; 1682 while (gid_list) { 1683 if ((gid_list != gid_info) && 1684 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) { 1685 IBTF_DPRINTF_L4("ibdm", 1686 "\tcheck_dest_nodeguid: NodeGuid is present"); 1687 1688 /* Add to gid_list */ 1689 tmp = kmem_zalloc(sizeof (ibdm_gid_t), 1690 KM_SLEEP); 1691 tmp->gid_dgid_hi = gid_info->gl_dgid_hi; 1692 tmp->gid_dgid_lo = gid_info->gl_dgid_lo; 1693 tmp->gid_next = gid_list->gl_gid; 1694 gid_list->gl_gid = tmp; 1695 gid_list->gl_ngids++; 1696 return (gid_list); 1697 } 1698 1699 gid_list = gid_list->gl_next; 1700 } 1701 1702 return (NULL); 1703 } 1704 1705 1706 /* 1707 * ibdm_is_dev_mgt_supported 1708 * Get the PortInfo attribute (SA Query) 1709 * Check "CompatabilityMask" field in the Portinfo. 1710 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set) 1711 * by the port, otherwise IBDM_FAILURE 1712 */ 1713 static int 1714 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info) 1715 { 1716 int ret; 1717 size_t length = 0; 1718 sa_portinfo_record_t req, *resp = NULL; 1719 ibmf_saa_access_args_t qargs; 1720 1721 bzero(&req, sizeof (sa_portinfo_record_t)); 1722 req.EndportLID = gid_info->gl_dlid; 1723 1724 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID; 1725 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1726 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 1727 qargs.sq_template = &req; 1728 qargs.sq_callback = NULL; 1729 qargs.sq_callback_arg = NULL; 1730 1731 ret = ibmf_sa_access(gid_info->gl_sa_hdl, 1732 &qargs, 0, &length, (void **)&resp); 1733 1734 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1735 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:" 1736 "failed to get PORTINFO attribute %d", ret); 1737 return (IBDM_FAILURE); 1738 } 1739 1740 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) { 1741 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!"); 1742 ret = IBDM_SUCCESS; 1743 } else { 1744 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: " 1745 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask); 1746 ret = IBDM_FAILURE; 1747 } 1748 kmem_free(resp, length); 1749 return (ret); 1750 } 1751 1752 1753 /* 1754 * ibdm_get_node_port_guids() 1755 * Get the NodeInfoRecord of the port 1756 * Save NodeGuid and PortGUID values in the GID list structure. 1757 * Return IBDM_SUCCESS/IBDM_FAILURE 1758 */ 1759 static int 1760 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid, 1761 ib_guid_t *node_guid, ib_guid_t *port_guid) 1762 { 1763 int ret; 1764 size_t length = 0; 1765 sa_node_record_t req, *resp = NULL; 1766 ibmf_saa_access_args_t qargs; 1767 1768 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids"); 1769 1770 bzero(&req, sizeof (sa_node_record_t)); 1771 req.LID = dlid; 1772 1773 qargs.sq_attr_id = SA_NODERECORD_ATTRID; 1774 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1775 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID; 1776 qargs.sq_template = &req; 1777 qargs.sq_callback = NULL; 1778 qargs.sq_callback_arg = NULL; 1779 1780 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp); 1781 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1782 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:" 1783 " SA Retrieve Failed: %d", ret); 1784 return (IBDM_FAILURE); 1785 } 1786 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port" 1787 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID); 1788 1789 *node_guid = resp->NodeInfo.NodeGUID; 1790 *port_guid = resp->NodeInfo.PortGUID; 1791 kmem_free(resp, length); 1792 return (IBDM_SUCCESS); 1793 } 1794 1795 1796 /* 1797 * ibdm_get_reachable_ports() 1798 * Get list of the destination GID (and its path records) by 1799 * querying the SA access. 1800 * 1801 * Returns Number paths 1802 */ 1803 static int 1804 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca) 1805 { 1806 uint_t ii, jj, nrecs; 1807 uint_t npaths = 0; 1808 size_t length; 1809 ib_gid_t sgid; 1810 ibdm_pkey_tbl_t *pkey_tbl; 1811 sa_path_record_t *result; 1812 sa_path_record_t *precp; 1813 ibdm_dp_gidinfo_t *gid_info; 1814 1815 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 1816 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo); 1817 1818 sgid.gid_prefix = portinfo->pa_sn_prefix; 1819 sgid.gid_guid = portinfo->pa_port_guid; 1820 1821 /* get reversible paths */ 1822 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl, 1823 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result) 1824 != IBMF_SUCCESS) { 1825 IBTF_DPRINTF_L2("ibdm", 1826 "\tget_reachable_ports: Getting path records failed"); 1827 return (0); 1828 } 1829 1830 for (ii = 0; ii < nrecs; ii++) { 1831 sa_node_record_t *nrec; 1832 size_t length; 1833 1834 precp = &result[ii]; 1835 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid, 1836 precp->DGID.gid_prefix)) != NULL) { 1837 IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: " 1838 "Already exists nrecs %d, ii %d", nrecs, ii); 1839 ibdm_addto_glhcalist(gid_info, hca); 1840 continue; 1841 } 1842 /* 1843 * This is a new GID. Allocate a GID structure and 1844 * initialize the structure 1845 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0) 1846 * by kmem_zalloc call 1847 */ 1848 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 1849 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 1850 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 1851 gid_info->gl_dgid_hi = precp->DGID.gid_prefix; 1852 gid_info->gl_dgid_lo = precp->DGID.gid_guid; 1853 gid_info->gl_sgid_hi = precp->SGID.gid_prefix; 1854 gid_info->gl_sgid_lo = precp->SGID.gid_guid; 1855 gid_info->gl_p_key = precp->P_Key; 1856 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl; 1857 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl; 1858 gid_info->gl_slid = precp->SLID; 1859 gid_info->gl_dlid = precp->DLID; 1860 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 1861 << IBDM_GID_TRANSACTIONID_SHIFT; 1862 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 1863 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 1864 << IBDM_GID_TRANSACTIONID_SHIFT; 1865 gid_info->gl_SL = precp->SL; 1866 1867 /* 1868 * get the node record with this guid if the destination 1869 * device is a Cisco one. 1870 */ 1871 if (ibdm_is_cisco(precp->DGID.gid_guid) && 1872 (gid_info->gl_nodeguid == 0 || gid_info->gl_devid == 0) && 1873 ibdm_get_node_record_by_port(portinfo->pa_sa_hdl, 1874 precp->DGID.gid_guid, &nrec, &length) == IBDM_SUCCESS) { 1875 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 1876 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 1877 kmem_free(nrec, length); 1878 } 1879 1880 ibdm_addto_glhcalist(gid_info, hca); 1881 1882 ibdm_dump_path_info(precp); 1883 1884 gid_info->gl_qp_hdl = NULL; 1885 ASSERT(portinfo->pa_pkey_tbl != NULL && 1886 portinfo->pa_npkeys != 0); 1887 1888 for (jj = 0; jj < portinfo->pa_npkeys; jj++) { 1889 pkey_tbl = &portinfo->pa_pkey_tbl[jj]; 1890 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 1891 (pkey_tbl->pt_qp_hdl != NULL)) { 1892 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 1893 break; 1894 } 1895 } 1896 1897 /* 1898 * QP handle for GID not initialized. No matching Pkey 1899 * was found!! ibdm should *not* hit this case. Flag an 1900 * error and drop the GID if ibdm does encounter this. 1901 */ 1902 if (gid_info->gl_qp_hdl == NULL) { 1903 IBTF_DPRINTF_L2(ibdm_string, 1904 "\tget_reachable_ports: No matching Pkey"); 1905 ibdm_delete_gidinfo(gid_info); 1906 continue; 1907 } 1908 if (ibdm.ibdm_dp_gidlist_head == NULL) { 1909 ibdm.ibdm_dp_gidlist_head = gid_info; 1910 ibdm.ibdm_dp_gidlist_tail = gid_info; 1911 } else { 1912 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 1913 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 1914 ibdm.ibdm_dp_gidlist_tail = gid_info; 1915 } 1916 npaths++; 1917 } 1918 kmem_free(result, length); 1919 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths); 1920 return (npaths); 1921 } 1922 1923 1924 /* 1925 * ibdm_check_dgid() 1926 * Look in the global list to check whether we know this DGID already 1927 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT 1928 */ 1929 static ibdm_dp_gidinfo_t * 1930 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix) 1931 { 1932 ibdm_dp_gidinfo_t *gid_list; 1933 1934 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1935 gid_list = gid_list->gl_next) { 1936 if ((guid == gid_list->gl_dgid_lo) && 1937 (prefix == gid_list->gl_dgid_hi)) { 1938 break; 1939 } 1940 } 1941 return (gid_list); 1942 } 1943 1944 1945 /* 1946 * ibdm_find_gid() 1947 * Look in the global list to find a GID entry with matching 1948 * port & node GUID. 1949 * Return pointer to gidinfo if found, else return NULL 1950 */ 1951 static ibdm_dp_gidinfo_t * 1952 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid) 1953 { 1954 ibdm_dp_gidinfo_t *gid_list; 1955 1956 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n", 1957 nodeguid, portguid); 1958 1959 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1960 gid_list = gid_list->gl_next) { 1961 if ((portguid == gid_list->gl_portguid) && 1962 (nodeguid == gid_list->gl_nodeguid)) { 1963 break; 1964 } 1965 } 1966 1967 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n", 1968 gid_list); 1969 return (gid_list); 1970 } 1971 1972 1973 /* 1974 * ibdm_set_classportinfo() 1975 * ibdm_set_classportinfo() is a function to activate a Cisco FC GW 1976 * by sending the setClassPortInfo request with the trapLID, trapGID 1977 * and etc. to the gateway since the gateway doesn't provide the IO 1978 * Unit Information othewise. This behavior is the Cisco specific one, 1979 * and this function is called to a Cisco FC GW only. 1980 * Returns IBDM_SUCCESS/IBDM_FAILURE 1981 */ 1982 static int 1983 ibdm_set_classportinfo(ibdm_dp_gidinfo_t *gid_info) 1984 { 1985 ibmf_msg_t *msg; 1986 ib_mad_hdr_t *hdr; 1987 ibdm_timeout_cb_args_t *cb_args; 1988 void *data; 1989 ib_mad_classportinfo_t *cpi; 1990 1991 IBTF_DPRINTF_L4("ibdm", 1992 "\tset_classportinfo: gid info 0x%p", gid_info); 1993 1994 /* 1995 * Send command to set classportinfo attribute. Allocate a IBMF 1996 * packet and initialize the packet. 1997 */ 1998 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 1999 &msg) != IBMF_SUCCESS) { 2000 IBTF_DPRINTF_L4("ibdm", "\tset_classportinfo: pkt alloc fail"); 2001 return (IBDM_FAILURE); 2002 } 2003 2004 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2005 ibdm_alloc_send_buffers(msg); 2006 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2007 2008 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2009 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2010 msg->im_local_addr.ia_remote_qno = 1; 2011 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2012 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2013 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2014 2015 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2016 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2017 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2018 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2019 hdr->R_Method = IB_DM_DEVMGT_METHOD_SET; 2020 hdr->Status = 0; 2021 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2022 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2023 hdr->AttributeModifier = 0; 2024 2025 data = msg->im_msgbufs_send.im_bufs_cl_data; 2026 cpi = (ib_mad_classportinfo_t *)data; 2027 2028 /* 2029 * Set the classportinfo values to activate this Cisco FC GW. 2030 */ 2031 cpi->TrapGID_hi = h2b64(gid_info->gl_sgid_hi); 2032 cpi->TrapGID_lo = h2b64(gid_info->gl_sgid_lo); 2033 cpi->TrapLID = h2b16(gid_info->gl_slid); 2034 cpi->TrapSL = gid_info->gl_SL; 2035 cpi->TrapP_Key = h2b16(gid_info->gl_p_key); 2036 cpi->TrapQP = h2b32((((ibmf_alt_qp_t *)gid_info->gl_qp_hdl)->isq_qpn)); 2037 cpi->TrapQ_Key = h2b32((((ibmf_alt_qp_t *) 2038 gid_info->gl_qp_hdl)->isq_qkey)); 2039 2040 cb_args = &gid_info->gl_cpi_cb_args; 2041 cb_args->cb_gid_info = gid_info; 2042 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2043 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2044 2045 mutex_enter(&gid_info->gl_mutex); 2046 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2047 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2048 mutex_exit(&gid_info->gl_mutex); 2049 2050 IBTF_DPRINTF_L5("ibdm", "\tset_classportinfo: " 2051 "timeout id %x", gid_info->gl_timeout_id); 2052 2053 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2054 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2055 IBTF_DPRINTF_L2("ibdm", 2056 "\tset_classportinfo: ibmf send failed"); 2057 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2058 } 2059 2060 return (IBDM_SUCCESS); 2061 } 2062 2063 2064 /* 2065 * ibdm_send_classportinfo() 2066 * Send classportinfo request. When the request is completed 2067 * IBMF calls ibdm_classportinfo_cb routine to inform about 2068 * the completion. 2069 * Returns IBDM_SUCCESS/IBDM_FAILURE 2070 */ 2071 static int 2072 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2073 { 2074 ibmf_msg_t *msg; 2075 ib_mad_hdr_t *hdr; 2076 ibdm_timeout_cb_args_t *cb_args; 2077 2078 IBTF_DPRINTF_L4("ibdm", 2079 "\tsend_classportinfo: gid info 0x%p", gid_info); 2080 2081 /* 2082 * Send command to get classportinfo attribute. Allocate a IBMF 2083 * packet and initialize the packet. 2084 */ 2085 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2086 &msg) != IBMF_SUCCESS) { 2087 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail"); 2088 return (IBDM_FAILURE); 2089 } 2090 2091 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2092 ibdm_alloc_send_buffers(msg); 2093 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2094 2095 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2096 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2097 msg->im_local_addr.ia_remote_qno = 1; 2098 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2099 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2100 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2101 2102 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2103 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2104 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2105 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2106 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2107 hdr->Status = 0; 2108 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2109 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2110 hdr->AttributeModifier = 0; 2111 2112 cb_args = &gid_info->gl_cpi_cb_args; 2113 cb_args->cb_gid_info = gid_info; 2114 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2115 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2116 2117 mutex_enter(&gid_info->gl_mutex); 2118 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2119 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2120 mutex_exit(&gid_info->gl_mutex); 2121 2122 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: " 2123 "timeout id %x", gid_info->gl_timeout_id); 2124 2125 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2126 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2127 IBTF_DPRINTF_L2("ibdm", 2128 "\tsend_classportinfo: ibmf send failed"); 2129 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2130 } 2131 2132 return (IBDM_SUCCESS); 2133 } 2134 2135 2136 /* 2137 * ibdm_handle_setclassportinfo() 2138 * Invoked by the IBMF when setClassPortInfo request is completed. 2139 */ 2140 static void 2141 ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl, 2142 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2143 { 2144 void *data; 2145 timeout_id_t timeout_id; 2146 ib_mad_classportinfo_t *cpi; 2147 2148 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo:ibmf hdl " 2149 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2150 2151 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2152 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo: " 2153 "Not a ClassPortInfo resp"); 2154 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2155 return; 2156 } 2157 2158 /* 2159 * Verify whether timeout handler is created/active. 2160 * If created/ active, cancel the timeout handler 2161 */ 2162 mutex_enter(&gid_info->gl_mutex); 2163 if (gid_info->gl_state != IBDM_SET_CLASSPORTINFO) { 2164 IBTF_DPRINTF_L2("ibdm", "\thandle_setclassportinfo:DUP resp"); 2165 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2166 mutex_exit(&gid_info->gl_mutex); 2167 return; 2168 } 2169 ibdm_bump_transactionID(gid_info); 2170 2171 gid_info->gl_iou_cb_args.cb_req_type = 0; 2172 if (gid_info->gl_timeout_id) { 2173 timeout_id = gid_info->gl_timeout_id; 2174 mutex_exit(&gid_info->gl_mutex); 2175 IBTF_DPRINTF_L5("ibdm", "handle_setlassportinfo: " 2176 "gl_timeout_id = 0x%x", timeout_id); 2177 if (untimeout(timeout_id) == -1) { 2178 IBTF_DPRINTF_L2("ibdm", "handle_setclassportinfo: " 2179 "untimeout gl_timeout_id failed"); 2180 } 2181 mutex_enter(&gid_info->gl_mutex); 2182 gid_info->gl_timeout_id = 0; 2183 } 2184 mutex_exit(&gid_info->gl_mutex); 2185 2186 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2187 cpi = (ib_mad_classportinfo_t *)data; 2188 2189 ibdm_dump_classportinfo(cpi); 2190 } 2191 2192 2193 /* 2194 * ibdm_handle_classportinfo() 2195 * Invoked by the IBMF when the classportinfo request is completed. 2196 */ 2197 static void 2198 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl, 2199 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2200 { 2201 void *data; 2202 timeout_id_t timeout_id; 2203 ib_mad_hdr_t *hdr; 2204 ib_mad_classportinfo_t *cpi; 2205 2206 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl " 2207 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2208 2209 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2210 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: " 2211 "Not a ClassPortInfo resp"); 2212 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2213 return; 2214 } 2215 2216 /* 2217 * Verify whether timeout handler is created/active. 2218 * If created/ active, cancel the timeout handler 2219 */ 2220 mutex_enter(&gid_info->gl_mutex); 2221 ibdm_bump_transactionID(gid_info); 2222 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) { 2223 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp"); 2224 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2225 mutex_exit(&gid_info->gl_mutex); 2226 return; 2227 } 2228 gid_info->gl_iou_cb_args.cb_req_type = 0; 2229 if (gid_info->gl_timeout_id) { 2230 timeout_id = gid_info->gl_timeout_id; 2231 mutex_exit(&gid_info->gl_mutex); 2232 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: " 2233 "gl_timeout_id = 0x%x", timeout_id); 2234 if (untimeout(timeout_id) == -1) { 2235 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: " 2236 "untimeout gl_timeout_id failed"); 2237 } 2238 mutex_enter(&gid_info->gl_mutex); 2239 gid_info->gl_timeout_id = 0; 2240 } 2241 gid_info->gl_state = IBDM_GET_IOUNITINFO; 2242 gid_info->gl_pending_cmds++; 2243 mutex_exit(&gid_info->gl_mutex); 2244 2245 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2246 cpi = (ib_mad_classportinfo_t *)data; 2247 2248 /* 2249 * Cache the "RespTimeValue" and redirection information in the 2250 * global gid list data structure. This cached information will 2251 * be used to send any further requests to the GID. 2252 */ 2253 gid_info->gl_resp_timeout = 2254 (b2h32(cpi->RespTimeValue) & 0x1F); 2255 2256 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) & 2257 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE); 2258 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 2259 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 2260 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 2261 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 2262 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 2263 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 2264 gid_info->gl_redirectSL = cpi->RedirectSL; 2265 2266 ibdm_dump_classportinfo(cpi); 2267 2268 /* 2269 * Send IOUnitInfo request 2270 * Reuse previously allocated IBMF packet for sending ClassPortInfo 2271 * Check whether DM agent on the remote node requested redirection 2272 * If so, send the request to the redirect DGID/DLID/PKEY/QP. 2273 */ 2274 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2275 ibdm_alloc_send_buffers(msg); 2276 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2277 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2278 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2279 2280 if (gid_info->gl_redirected == B_TRUE) { 2281 if (gid_info->gl_redirect_dlid != 0) { 2282 msg->im_local_addr.ia_remote_lid = 2283 gid_info->gl_redirect_dlid; 2284 } 2285 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2286 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2287 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2288 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 2289 } else { 2290 msg->im_local_addr.ia_remote_qno = 1; 2291 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2292 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2293 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2294 } 2295 2296 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2297 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2298 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2299 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2300 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2301 hdr->Status = 0; 2302 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2303 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2304 hdr->AttributeModifier = 0; 2305 2306 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2307 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2308 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2309 2310 mutex_enter(&gid_info->gl_mutex); 2311 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2312 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2313 mutex_exit(&gid_info->gl_mutex); 2314 2315 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:" 2316 "timeout %x", gid_info->gl_timeout_id); 2317 2318 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL, 2319 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) { 2320 IBTF_DPRINTF_L2("ibdm", 2321 "\thandle_classportinfo: msg transport failed"); 2322 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args); 2323 } 2324 (*flag) |= IBDM_IBMF_PKT_REUSED; 2325 } 2326 2327 2328 /* 2329 * ibdm_send_iounitinfo: 2330 * Sends a DM request to get IOU unitinfo. 2331 */ 2332 static int 2333 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info) 2334 { 2335 ibmf_msg_t *msg; 2336 ib_mad_hdr_t *hdr; 2337 2338 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info); 2339 2340 /* 2341 * Send command to get iounitinfo attribute. Allocate a IBMF 2342 * packet and initialize the packet. 2343 */ 2344 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) != 2345 IBMF_SUCCESS) { 2346 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail"); 2347 return (IBDM_FAILURE); 2348 } 2349 2350 mutex_enter(&gid_info->gl_mutex); 2351 ibdm_bump_transactionID(gid_info); 2352 mutex_exit(&gid_info->gl_mutex); 2353 2354 2355 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2356 ibdm_alloc_send_buffers(msg); 2357 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2358 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2359 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2360 msg->im_local_addr.ia_remote_qno = 1; 2361 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2362 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2363 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2364 2365 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2366 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2367 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2368 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2369 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2370 hdr->Status = 0; 2371 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2372 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2373 hdr->AttributeModifier = 0; 2374 2375 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2376 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2377 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2378 2379 mutex_enter(&gid_info->gl_mutex); 2380 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2381 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2382 mutex_exit(&gid_info->gl_mutex); 2383 2384 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:" 2385 "timeout %x", gid_info->gl_timeout_id); 2386 2387 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 2388 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != 2389 IBMF_SUCCESS) { 2390 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed"); 2391 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, 2392 msg, &gid_info->gl_iou_cb_args); 2393 } 2394 return (IBDM_SUCCESS); 2395 } 2396 2397 /* 2398 * ibdm_handle_iounitinfo() 2399 * Invoked by the IBMF when IO Unitinfo request is completed. 2400 */ 2401 static void 2402 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl, 2403 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2404 { 2405 int ii, first = B_TRUE; 2406 int num_iocs; 2407 size_t size; 2408 uchar_t slot_info; 2409 timeout_id_t timeout_id; 2410 ib_mad_hdr_t *hdr; 2411 ibdm_ioc_info_t *ioc_info; 2412 ib_dm_io_unitinfo_t *iou_info; 2413 ib_dm_io_unitinfo_t *giou_info; 2414 ibdm_timeout_cb_args_t *cb_args; 2415 2416 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:" 2417 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info); 2418 2419 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) { 2420 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: " 2421 "Unexpected response"); 2422 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2423 return; 2424 } 2425 2426 mutex_enter(&gid_info->gl_mutex); 2427 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) { 2428 IBTF_DPRINTF_L4("ibdm", 2429 "\thandle_iounitinfo: DUP resp"); 2430 mutex_exit(&gid_info->gl_mutex); 2431 (*flag) = IBDM_IBMF_PKT_DUP_RESP; 2432 return; 2433 } 2434 gid_info->gl_iou_cb_args.cb_req_type = 0; 2435 if (gid_info->gl_timeout_id) { 2436 timeout_id = gid_info->gl_timeout_id; 2437 mutex_exit(&gid_info->gl_mutex); 2438 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: " 2439 "gl_timeout_id = 0x%x", timeout_id); 2440 if (untimeout(timeout_id) == -1) { 2441 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: " 2442 "untimeout gl_timeout_id failed"); 2443 } 2444 mutex_enter(&gid_info->gl_mutex); 2445 gid_info->gl_timeout_id = 0; 2446 } 2447 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 2448 2449 iou_info = IBDM_IN_IBMFMSG2IOU(msg); 2450 ibdm_dump_iounitinfo(iou_info); 2451 num_iocs = iou_info->iou_num_ctrl_slots; 2452 /* 2453 * check if number of IOCs reported is zero? if yes, return. 2454 * when num_iocs are reported zero internal IOC database needs 2455 * to be updated. To ensure that save the number of IOCs in 2456 * the new field "gl_num_iocs". Use a new field instead of 2457 * "giou_info->iou_num_ctrl_slots" as that would prevent 2458 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0. 2459 */ 2460 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) { 2461 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's"); 2462 mutex_exit(&gid_info->gl_mutex); 2463 return; 2464 } 2465 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs); 2466 2467 /* 2468 * if there is an existing gl_iou (IOU has been probed before) 2469 * check if the "iou_changeid" is same as saved entry in 2470 * "giou_info->iou_changeid". 2471 * (note: this logic can prevent IOC enumeration if a given 2472 * vendor doesn't support setting iou_changeid field for its IOU) 2473 * 2474 * if there is an existing gl_iou and iou_changeid has changed : 2475 * free up existing gl_iou info and its related structures. 2476 * reallocate gl_iou info all over again. 2477 * if we donot free this up; then this leads to memory leaks 2478 */ 2479 if (gid_info->gl_iou) { 2480 giou_info = &gid_info->gl_iou->iou_info; 2481 if (b2h16(iou_info->iou_changeid) == 2482 giou_info->iou_changeid) { 2483 IBTF_DPRINTF_L3("ibdm", 2484 "\thandle_iounitinfo: no IOCs changed"); 2485 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2486 mutex_exit(&gid_info->gl_mutex); 2487 return; 2488 } 2489 2490 /* 2491 * Store the iou info as prev_iou to be used after 2492 * sweep is done. 2493 */ 2494 ASSERT(gid_info->gl_prev_iou == NULL); 2495 IBTF_DPRINTF_L4(ibdm_string, 2496 "\thandle_iounitinfo: setting gl_prev_iou %p", 2497 gid_info->gl_prev_iou); 2498 gid_info->gl_prev_iou = gid_info->gl_iou; 2499 ibdm.ibdm_prev_iou = 1; 2500 gid_info->gl_iou = NULL; 2501 } 2502 2503 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t); 2504 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP); 2505 giou_info = &gid_info->gl_iou->iou_info; 2506 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *) 2507 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t)); 2508 2509 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs; 2510 giou_info->iou_flag = iou_info->iou_flag; 2511 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128); 2512 giou_info->iou_changeid = b2h16(iou_info->iou_changeid); 2513 gid_info->gl_pending_cmds++; /* for diag code */ 2514 mutex_exit(&gid_info->gl_mutex); 2515 2516 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) { 2517 mutex_enter(&gid_info->gl_mutex); 2518 gid_info->gl_pending_cmds--; 2519 mutex_exit(&gid_info->gl_mutex); 2520 } 2521 /* 2522 * Parallelize getting IOC controller profiles from here. 2523 * Allocate IBMF packets and send commands to get IOC profile for 2524 * each IOC present on the IOU. 2525 */ 2526 for (ii = 0; ii < num_iocs; ii++) { 2527 /* 2528 * Check whether IOC is present in the slot 2529 * Series of nibbles (in the field iou_ctrl_list) represents 2530 * a slot in the IOU. 2531 * Byte format: 76543210 2532 * Bits 0-3 of first byte represent Slot 2 2533 * bits 4-7 of first byte represent slot 1, 2534 * bits 0-3 of second byte represent slot 4 and so on 2535 * Each 4-bit nibble has the following meaning 2536 * 0x0 : IOC not installed 2537 * 0x1 : IOC is present 2538 * 0xf : Slot does not exist 2539 * and all other values are reserved. 2540 */ 2541 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 2542 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 2543 if ((ii % 2) == 0) 2544 slot_info = (slot_info >> 4); 2545 2546 if ((slot_info & 0xf) != 1) { 2547 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2548 "No IOC is present in the slot = %d", ii); 2549 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 2550 continue; 2551 } 2552 2553 mutex_enter(&gid_info->gl_mutex); 2554 ibdm_bump_transactionID(gid_info); 2555 mutex_exit(&gid_info->gl_mutex); 2556 2557 /* 2558 * Re use the already allocated packet (for IOUnitinfo) to 2559 * send the first IOC controller attribute. Allocate new 2560 * IBMF packets for the rest of the IOC's 2561 */ 2562 if (first != B_TRUE) { 2563 msg = NULL; 2564 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2565 &msg) != IBMF_SUCCESS) { 2566 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2567 "IBMF packet allocation failed"); 2568 continue; 2569 } 2570 2571 } 2572 2573 /* allocate send buffers for all messages */ 2574 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2575 ibdm_alloc_send_buffers(msg); 2576 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2577 2578 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2579 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2580 if (gid_info->gl_redirected == B_TRUE) { 2581 if (gid_info->gl_redirect_dlid != 0) { 2582 msg->im_local_addr.ia_remote_lid = 2583 gid_info->gl_redirect_dlid; 2584 } 2585 msg->im_local_addr.ia_remote_qno = 2586 gid_info->gl_redirect_QP; 2587 msg->im_local_addr.ia_p_key = 2588 gid_info->gl_redirect_pkey; 2589 msg->im_local_addr.ia_q_key = 2590 gid_info->gl_redirect_qkey; 2591 msg->im_local_addr.ia_service_level = 2592 gid_info->gl_redirectSL; 2593 } else { 2594 msg->im_local_addr.ia_remote_qno = 1; 2595 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2596 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2597 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2598 } 2599 2600 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2601 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2602 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2603 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2604 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2605 hdr->Status = 0; 2606 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2607 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 2608 hdr->AttributeModifier = h2b32(ii + 1); 2609 2610 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID; 2611 cb_args = &ioc_info->ioc_cb_args; 2612 cb_args->cb_gid_info = gid_info; 2613 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2614 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 2615 cb_args->cb_ioc_num = ii; 2616 2617 mutex_enter(&gid_info->gl_mutex); 2618 gid_info->gl_pending_cmds++; /* for diag code */ 2619 2620 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2621 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2622 mutex_exit(&gid_info->gl_mutex); 2623 2624 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:" 2625 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii); 2626 2627 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2628 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2629 IBTF_DPRINTF_L2("ibdm", 2630 "\thandle_iounitinfo: msg transport failed"); 2631 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2632 } 2633 (*flag) |= IBDM_IBMF_PKT_REUSED; 2634 first = B_FALSE; 2635 gid_info->gl_iou->iou_niocs_probe_in_progress++; 2636 } 2637 } 2638 2639 2640 /* 2641 * ibdm_handle_ioc_profile() 2642 * Invoked by the IBMF when the IOCControllerProfile request 2643 * gets completed 2644 */ 2645 static void 2646 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl, 2647 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2648 { 2649 int first = B_TRUE, reprobe = 0; 2650 uint_t ii, ioc_no, srv_start; 2651 uint_t nserv_entries; 2652 timeout_id_t timeout_id; 2653 ib_mad_hdr_t *hdr; 2654 ibdm_ioc_info_t *ioc_info; 2655 ibdm_timeout_cb_args_t *cb_args; 2656 ib_dm_ioc_ctrl_profile_t *ioc, *gioc; 2657 2658 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2659 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2660 2661 ioc = IBDM_IN_IBMFMSG2IOC(msg); 2662 /* 2663 * Check whether we know this IOC already 2664 * This will return NULL if reprobe is in progress 2665 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set. 2666 * Do not hold mutexes here. 2667 */ 2668 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) { 2669 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2670 "IOC guid %llx is present", ioc->ioc_guid); 2671 return; 2672 } 2673 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2674 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1); 2675 2676 /* Make sure that IOC index is with the valid range */ 2677 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 2678 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: " 2679 "IOC index Out of range, index %d", ioc); 2680 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2681 return; 2682 } 2683 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1]; 2684 ioc_info->ioc_iou_info = gid_info->gl_iou; 2685 2686 mutex_enter(&gid_info->gl_mutex); 2687 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) { 2688 reprobe = 1; 2689 ioc_info->ioc_prev_serv = ioc_info->ioc_serv; 2690 ioc_info->ioc_serv = NULL; 2691 ioc_info->ioc_prev_serv_cnt = 2692 ioc_info->ioc_profile.ioc_service_entries; 2693 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) { 2694 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response" 2695 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state); 2696 mutex_exit(&gid_info->gl_mutex); 2697 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2698 return; 2699 } 2700 ioc_info->ioc_cb_args.cb_req_type = 0; 2701 if (ioc_info->ioc_timeout_id) { 2702 timeout_id = ioc_info->ioc_timeout_id; 2703 ioc_info->ioc_timeout_id = 0; 2704 mutex_exit(&gid_info->gl_mutex); 2705 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: " 2706 "ioc_timeout_id = 0x%x", timeout_id); 2707 if (untimeout(timeout_id) == -1) { 2708 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: " 2709 "untimeout ioc_timeout_id failed"); 2710 } 2711 mutex_enter(&gid_info->gl_mutex); 2712 } 2713 2714 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS; 2715 if (reprobe == 0) { 2716 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid; 2717 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid; 2718 } 2719 2720 /* 2721 * Save all the IOC information in the global structures. 2722 * Note the wire format is Big Endian and the Sparc process also 2723 * big endian. So, there is no need to convert the data fields 2724 * The conversion routines used below are ineffective on Sparc 2725 * machines where as they will be effective on little endian 2726 * machines such as Intel processors. 2727 */ 2728 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile; 2729 2730 /* 2731 * Restrict updates to onlyport GIDs and service entries during reprobe 2732 */ 2733 if (reprobe == 0) { 2734 gioc->ioc_guid = b2h64(ioc->ioc_guid); 2735 gioc->ioc_vendorid = 2736 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK) 2737 >> IB_DM_VENDORID_SHIFT); 2738 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid); 2739 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver); 2740 gioc->ioc_subsys_vendorid = 2741 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK) 2742 >> IB_DM_VENDORID_SHIFT); 2743 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id); 2744 gioc->ioc_io_class = b2h16(ioc->ioc_io_class); 2745 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass); 2746 gioc->ioc_protocol = b2h16(ioc->ioc_protocol); 2747 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver); 2748 gioc->ioc_send_msg_qdepth = 2749 b2h16(ioc->ioc_send_msg_qdepth); 2750 gioc->ioc_rdma_read_qdepth = 2751 b2h16(ioc->ioc_rdma_read_qdepth); 2752 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz); 2753 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz); 2754 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask; 2755 bcopy(ioc->ioc_id_string, gioc->ioc_id_string, 2756 IB_DM_IOC_ID_STRING_LEN); 2757 2758 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode; 2759 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid; 2760 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK & 2761 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE; 2762 2763 if (ioc_info->ioc_diagdeviceid == B_TRUE) { 2764 gid_info->gl_pending_cmds++; 2765 IBTF_DPRINTF_L3(ibdm_string, 2766 "\tibdm_handle_ioc_profile: " 2767 "%d: gid_info %p gl_state %d pending_cmds %d", 2768 __LINE__, gid_info, gid_info->gl_state, 2769 gid_info->gl_pending_cmds); 2770 } 2771 } 2772 gioc->ioc_service_entries = ioc->ioc_service_entries; 2773 mutex_exit(&gid_info->gl_mutex); 2774 2775 ibdm_dump_ioc_profile(gioc); 2776 2777 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) { 2778 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) { 2779 mutex_enter(&gid_info->gl_mutex); 2780 gid_info->gl_pending_cmds--; 2781 mutex_exit(&gid_info->gl_mutex); 2782 } 2783 } 2784 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc( 2785 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)), 2786 KM_SLEEP); 2787 2788 /* 2789 * In one single request, maximum number of requests that can be 2790 * obtained is 4. If number of service entries are more than four, 2791 * calculate number requests needed and send them parallelly. 2792 */ 2793 nserv_entries = ioc->ioc_service_entries; 2794 ii = 0; 2795 while (nserv_entries) { 2796 mutex_enter(&gid_info->gl_mutex); 2797 gid_info->gl_pending_cmds++; 2798 ibdm_bump_transactionID(gid_info); 2799 mutex_exit(&gid_info->gl_mutex); 2800 2801 if (first != B_TRUE) { 2802 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2803 &msg) != IBMF_SUCCESS) { 2804 continue; 2805 } 2806 2807 } 2808 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2809 ibdm_alloc_send_buffers(msg); 2810 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2811 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2812 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2813 if (gid_info->gl_redirected == B_TRUE) { 2814 if (gid_info->gl_redirect_dlid != 0) { 2815 msg->im_local_addr.ia_remote_lid = 2816 gid_info->gl_redirect_dlid; 2817 } 2818 msg->im_local_addr.ia_remote_qno = 2819 gid_info->gl_redirect_QP; 2820 msg->im_local_addr.ia_p_key = 2821 gid_info->gl_redirect_pkey; 2822 msg->im_local_addr.ia_q_key = 2823 gid_info->gl_redirect_qkey; 2824 msg->im_local_addr.ia_service_level = 2825 gid_info->gl_redirectSL; 2826 } else { 2827 msg->im_local_addr.ia_remote_qno = 1; 2828 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2829 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2830 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2831 } 2832 2833 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2834 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2835 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2836 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2837 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2838 hdr->Status = 0; 2839 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2840 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 2841 2842 srv_start = ii * 4; 2843 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args; 2844 cb_args->cb_gid_info = gid_info; 2845 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2846 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS; 2847 cb_args->cb_srvents_start = srv_start; 2848 cb_args->cb_ioc_num = ioc_no - 1; 2849 2850 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) { 2851 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ; 2852 cb_args->cb_srvents_end = (cb_args->cb_srvents_start + 2853 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1); 2854 } else { 2855 cb_args->cb_srvents_end = 2856 (cb_args->cb_srvents_start + nserv_entries - 1); 2857 nserv_entries = 0; 2858 } 2859 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 2860 ibdm_fill_srv_attr_mod(hdr, cb_args); 2861 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 2862 2863 mutex_enter(&gid_info->gl_mutex); 2864 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout( 2865 ibdm_pkt_timeout_hdlr, cb_args, 2866 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2867 mutex_exit(&gid_info->gl_mutex); 2868 2869 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:" 2870 "timeout %x, ioc %d srv %d", 2871 ioc_info->ioc_serv[srv_start].se_timeout_id, 2872 ioc_no - 1, srv_start); 2873 2874 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2875 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2876 IBTF_DPRINTF_L2("ibdm", 2877 "\thandle_ioc_profile: msg send failed"); 2878 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2879 } 2880 (*flag) |= IBDM_IBMF_PKT_REUSED; 2881 first = B_FALSE; 2882 ii++; 2883 } 2884 } 2885 2886 2887 /* 2888 * ibdm_handle_srventry_mad() 2889 */ 2890 static void 2891 ibdm_handle_srventry_mad(ibmf_msg_t *msg, 2892 ibdm_dp_gidinfo_t *gid_info, int *flag) 2893 { 2894 uint_t ii, ioc_no, attrmod; 2895 uint_t nentries, start, end; 2896 timeout_id_t timeout_id; 2897 ib_dm_srv_t *srv_ents; 2898 ibdm_ioc_info_t *ioc_info; 2899 ibdm_srvents_info_t *gsrv_ents; 2900 2901 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:" 2902 " IBMF msg %p gid info %p", msg, gid_info); 2903 2904 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg); 2905 /* 2906 * Get the start and end index of the service entries 2907 * Upper 16 bits identify the IOC 2908 * Lower 16 bits specify the range of service entries 2909 * LSB specifies (Big endian) end of the range 2910 * MSB specifies (Big endian) start of the range 2911 */ 2912 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2913 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 2914 end = ((attrmod >> 8) & IBDM_8_BIT_MASK); 2915 start = (attrmod & IBDM_8_BIT_MASK); 2916 2917 /* Make sure that IOC index is with the valid range */ 2918 if ((ioc_no < 1) | 2919 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) { 2920 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2921 "IOC index Out of range, index %d", ioc_no); 2922 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2923 return; 2924 } 2925 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 2926 2927 /* 2928 * Make sure that the "start" and "end" service indexes are 2929 * with in the valid range 2930 */ 2931 nentries = ioc_info->ioc_profile.ioc_service_entries; 2932 if ((start > end) | (start >= nentries) | (end >= nentries)) { 2933 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2934 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries); 2935 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2936 return; 2937 } 2938 gsrv_ents = &ioc_info->ioc_serv[start]; 2939 mutex_enter(&gid_info->gl_mutex); 2940 if (gsrv_ents->se_state != IBDM_SE_INVALID) { 2941 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2942 "already known, ioc %d, srv %d, se_state %x", 2943 ioc_no - 1, start, gsrv_ents->se_state); 2944 mutex_exit(&gid_info->gl_mutex); 2945 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2946 return; 2947 } 2948 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0; 2949 if (ioc_info->ioc_serv[start].se_timeout_id) { 2950 IBTF_DPRINTF_L2("ibdm", 2951 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start); 2952 timeout_id = ioc_info->ioc_serv[start].se_timeout_id; 2953 ioc_info->ioc_serv[start].se_timeout_id = 0; 2954 mutex_exit(&gid_info->gl_mutex); 2955 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: " 2956 "se_timeout_id = 0x%x", timeout_id); 2957 if (untimeout(timeout_id) == -1) { 2958 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: " 2959 "untimeout se_timeout_id failed"); 2960 } 2961 mutex_enter(&gid_info->gl_mutex); 2962 } 2963 2964 gsrv_ents->se_state = IBDM_SE_VALID; 2965 mutex_exit(&gid_info->gl_mutex); 2966 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) { 2967 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id); 2968 bcopy(srv_ents->srv_name, 2969 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN); 2970 ibdm_dump_service_entries(&gsrv_ents->se_attr); 2971 } 2972 } 2973 2974 2975 /* 2976 * ibdm_get_diagcode: 2977 * Send request to get IOU/IOC diag code 2978 * Returns IBDM_SUCCESS/IBDM_FAILURE 2979 */ 2980 static int 2981 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr) 2982 { 2983 ibmf_msg_t *msg; 2984 ib_mad_hdr_t *hdr; 2985 ibdm_ioc_info_t *ioc; 2986 ibdm_timeout_cb_args_t *cb_args; 2987 timeout_id_t *timeout_id; 2988 2989 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d", 2990 gid_info, attr); 2991 2992 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2993 &msg) != IBMF_SUCCESS) { 2994 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail"); 2995 return (IBDM_FAILURE); 2996 } 2997 2998 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2999 ibdm_alloc_send_buffers(msg); 3000 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 3001 3002 mutex_enter(&gid_info->gl_mutex); 3003 ibdm_bump_transactionID(gid_info); 3004 mutex_exit(&gid_info->gl_mutex); 3005 3006 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3007 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3008 if (gid_info->gl_redirected == B_TRUE) { 3009 if (gid_info->gl_redirect_dlid != 0) { 3010 msg->im_local_addr.ia_remote_lid = 3011 gid_info->gl_redirect_dlid; 3012 } 3013 3014 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3015 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3016 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3017 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3018 } else { 3019 msg->im_local_addr.ia_remote_qno = 1; 3020 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3021 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3022 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 3023 } 3024 3025 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3026 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3027 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3028 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3029 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3030 hdr->Status = 0; 3031 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3032 3033 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3034 hdr->AttributeModifier = h2b32(attr); 3035 3036 if (attr == 0) { 3037 cb_args = &gid_info->gl_iou_cb_args; 3038 gid_info->gl_iou->iou_dc_valid = B_FALSE; 3039 cb_args->cb_ioc_num = 0; 3040 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE; 3041 timeout_id = &gid_info->gl_timeout_id; 3042 } else { 3043 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1)); 3044 ioc->ioc_dc_valid = B_FALSE; 3045 cb_args = &ioc->ioc_dc_cb_args; 3046 cb_args->cb_ioc_num = attr - 1; 3047 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE; 3048 timeout_id = &ioc->ioc_dc_timeout_id; 3049 } 3050 cb_args->cb_gid_info = gid_info; 3051 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 3052 cb_args->cb_srvents_start = 0; 3053 3054 mutex_enter(&gid_info->gl_mutex); 3055 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3056 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3057 mutex_exit(&gid_info->gl_mutex); 3058 3059 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:" 3060 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num); 3061 3062 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3063 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3064 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed"); 3065 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3066 } 3067 return (IBDM_SUCCESS); 3068 } 3069 3070 /* 3071 * ibdm_handle_diagcode: 3072 * Process the DiagCode MAD response and update local DM 3073 * data structure. 3074 */ 3075 static void 3076 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg, 3077 ibdm_dp_gidinfo_t *gid_info, int *flag) 3078 { 3079 uint16_t attrmod, *diagcode; 3080 ibdm_iou_info_t *iou; 3081 ibdm_ioc_info_t *ioc; 3082 timeout_id_t timeout_id; 3083 ibdm_timeout_cb_args_t *cb_args; 3084 3085 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data; 3086 3087 mutex_enter(&gid_info->gl_mutex); 3088 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg); 3089 iou = gid_info->gl_iou; 3090 if (attrmod == 0) { 3091 if (iou->iou_dc_valid != B_FALSE) { 3092 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3093 IBTF_DPRINTF_L4("ibdm", 3094 "\thandle_diagcode: Duplicate IOU DiagCode"); 3095 mutex_exit(&gid_info->gl_mutex); 3096 return; 3097 } 3098 cb_args = &gid_info->gl_iou_cb_args; 3099 cb_args->cb_req_type = 0; 3100 iou->iou_diagcode = b2h16(*diagcode); 3101 iou->iou_dc_valid = B_TRUE; 3102 if (gid_info->gl_timeout_id) { 3103 timeout_id = gid_info->gl_timeout_id; 3104 mutex_exit(&gid_info->gl_mutex); 3105 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: " 3106 "gl_timeout_id = 0x%x", timeout_id); 3107 if (untimeout(timeout_id) == -1) { 3108 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: " 3109 "untimeout gl_timeout_id failed"); 3110 } 3111 mutex_enter(&gid_info->gl_mutex); 3112 gid_info->gl_timeout_id = 0; 3113 } 3114 } else { 3115 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1)); 3116 if (ioc->ioc_dc_valid != B_FALSE) { 3117 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3118 IBTF_DPRINTF_L4("ibdm", 3119 "\thandle_diagcode: Duplicate IOC DiagCode"); 3120 mutex_exit(&gid_info->gl_mutex); 3121 return; 3122 } 3123 cb_args = &ioc->ioc_dc_cb_args; 3124 cb_args->cb_req_type = 0; 3125 ioc->ioc_diagcode = b2h16(*diagcode); 3126 ioc->ioc_dc_valid = B_TRUE; 3127 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id; 3128 if (timeout_id) { 3129 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0; 3130 mutex_exit(&gid_info->gl_mutex); 3131 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: " 3132 "timeout_id = 0x%x", timeout_id); 3133 if (untimeout(timeout_id) == -1) { 3134 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: " 3135 "untimeout ioc_dc_timeout_id failed"); 3136 } 3137 mutex_enter(&gid_info->gl_mutex); 3138 } 3139 } 3140 mutex_exit(&gid_info->gl_mutex); 3141 3142 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x" 3143 "attrmod : 0x%x", b2h16(*diagcode), attrmod); 3144 } 3145 3146 3147 /* 3148 * ibdm_is_ioc_present() 3149 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list 3150 */ 3151 static ibdm_ioc_info_t * 3152 ibdm_is_ioc_present(ib_guid_t ioc_guid, 3153 ibdm_dp_gidinfo_t *gid_info, int *flag) 3154 { 3155 int ii; 3156 ibdm_ioc_info_t *ioc; 3157 ibdm_dp_gidinfo_t *head; 3158 ib_dm_io_unitinfo_t *iou; 3159 3160 mutex_enter(&ibdm.ibdm_mutex); 3161 head = ibdm.ibdm_dp_gidlist_head; 3162 while (head) { 3163 mutex_enter(&head->gl_mutex); 3164 if (head->gl_iou == NULL) { 3165 mutex_exit(&head->gl_mutex); 3166 head = head->gl_next; 3167 continue; 3168 } 3169 iou = &head->gl_iou->iou_info; 3170 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 3171 ioc = IBDM_GIDINFO2IOCINFO(head, ii); 3172 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) && 3173 (ioc->ioc_profile.ioc_guid == ioc_guid)) { 3174 if (gid_info == head) { 3175 *flag |= IBDM_IBMF_PKT_DUP_RESP; 3176 } else if (ibdm_check_dgid(head->gl_dgid_lo, 3177 head->gl_dgid_hi) != NULL) { 3178 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_" 3179 "present: gid not present"); 3180 ibdm_add_to_gl_gid(gid_info, head); 3181 } 3182 mutex_exit(&head->gl_mutex); 3183 mutex_exit(&ibdm.ibdm_mutex); 3184 return (ioc); 3185 } 3186 } 3187 mutex_exit(&head->gl_mutex); 3188 head = head->gl_next; 3189 } 3190 mutex_exit(&ibdm.ibdm_mutex); 3191 return (NULL); 3192 } 3193 3194 3195 /* 3196 * ibdm_ibmf_send_cb() 3197 * IBMF invokes this callback routine after posting the DM MAD to 3198 * the HCA. 3199 */ 3200 /*ARGSUSED*/ 3201 static void 3202 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg) 3203 { 3204 ibdm_dump_ibmf_msg(ibmf_msg, 1); 3205 ibdm_free_send_buffers(ibmf_msg); 3206 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) { 3207 IBTF_DPRINTF_L4("ibdm", 3208 "\tibmf_send_cb: IBMF free msg failed"); 3209 } 3210 } 3211 3212 3213 /* 3214 * ibdm_ibmf_recv_cb() 3215 * Invoked by the IBMF when a response to the one of the DM requests 3216 * is received. 3217 */ 3218 /*ARGSUSED*/ 3219 static void 3220 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3221 { 3222 ibdm_taskq_args_t *taskq_args; 3223 3224 /* 3225 * If the taskq enable is set then dispatch a taskq to process 3226 * the MAD, otherwise just process it on this thread 3227 */ 3228 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) { 3229 ibdm_process_incoming_mad(ibmf_hdl, msg, arg); 3230 return; 3231 } 3232 3233 /* 3234 * create a taskq and dispatch it to process the incoming MAD 3235 */ 3236 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP); 3237 if (taskq_args == NULL) { 3238 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for" 3239 "taskq_args"); 3240 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3241 IBTF_DPRINTF_L4("ibmf_recv_cb", 3242 "\tibmf_recv_cb: IBMF free msg failed"); 3243 } 3244 return; 3245 } 3246 taskq_args->tq_ibmf_handle = ibmf_hdl; 3247 taskq_args->tq_ibmf_msg = msg; 3248 taskq_args->tq_args = arg; 3249 3250 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args, 3251 TQ_NOSLEEP) == 0) { 3252 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed"); 3253 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3254 IBTF_DPRINTF_L4("ibmf_recv_cb", 3255 "\tibmf_recv_cb: IBMF free msg failed"); 3256 } 3257 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3258 return; 3259 } 3260 3261 /* taskq_args are deleted in ibdm_recv_incoming_mad() */ 3262 } 3263 3264 3265 void 3266 ibdm_recv_incoming_mad(void *args) 3267 { 3268 ibdm_taskq_args_t *taskq_args; 3269 3270 taskq_args = (ibdm_taskq_args_t *)args; 3271 3272 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: " 3273 "Processing incoming MAD via taskq"); 3274 3275 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle, 3276 taskq_args->tq_ibmf_msg, taskq_args->tq_args); 3277 3278 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3279 } 3280 3281 3282 /* 3283 * Calls ibdm_process_incoming_mad with all function arguments extracted 3284 * from args 3285 */ 3286 /*ARGSUSED*/ 3287 static void 3288 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3289 { 3290 int flag = 0; 3291 int ret; 3292 uint64_t transaction_id; 3293 ib_mad_hdr_t *hdr; 3294 ibdm_dp_gidinfo_t *gid_info = NULL; 3295 3296 IBTF_DPRINTF_L4("ibdm", 3297 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg); 3298 ibdm_dump_ibmf_msg(msg, 0); 3299 3300 /* 3301 * IBMF calls this routine for every DM MAD that arrives at this port. 3302 * But we handle only the responses for requests we sent. We drop all 3303 * the DM packets that does not have response bit set in the MAD 3304 * header(this eliminates all the requests sent to this port). 3305 * We handle only DM class version 1 MAD's 3306 */ 3307 hdr = IBDM_IN_IBMFMSG_MADHDR(msg); 3308 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) { 3309 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3310 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3311 "IBMF free msg failed DM request drop it"); 3312 } 3313 return; 3314 } 3315 3316 transaction_id = b2h64(hdr->TransactionID); 3317 3318 mutex_enter(&ibdm.ibdm_mutex); 3319 gid_info = ibdm.ibdm_dp_gidlist_head; 3320 while (gid_info) { 3321 if ((gid_info->gl_transactionID & 3322 IBDM_GID_TRANSACTIONID_MASK) == 3323 (transaction_id & IBDM_GID_TRANSACTIONID_MASK)) 3324 break; 3325 gid_info = gid_info->gl_next; 3326 } 3327 mutex_exit(&ibdm.ibdm_mutex); 3328 3329 if (gid_info == NULL) { 3330 /* Drop the packet */ 3331 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID" 3332 " does not match: 0x%llx", transaction_id); 3333 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3334 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3335 "IBMF free msg failed DM request drop it"); 3336 } 3337 return; 3338 } 3339 3340 /* Handle redirection for all the MAD's, except ClassPortInfo */ 3341 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) && 3342 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) { 3343 ret = ibdm_handle_redirection(msg, gid_info, &flag); 3344 if (ret == IBDM_SUCCESS) { 3345 return; 3346 } 3347 } else { 3348 uint_t gl_state; 3349 3350 mutex_enter(&gid_info->gl_mutex); 3351 gl_state = gid_info->gl_state; 3352 mutex_exit(&gid_info->gl_mutex); 3353 3354 switch (gl_state) { 3355 3356 case IBDM_SET_CLASSPORTINFO: 3357 ibdm_handle_setclassportinfo( 3358 ibmf_hdl, msg, gid_info, &flag); 3359 break; 3360 3361 case IBDM_GET_CLASSPORTINFO: 3362 ibdm_handle_classportinfo( 3363 ibmf_hdl, msg, gid_info, &flag); 3364 break; 3365 3366 case IBDM_GET_IOUNITINFO: 3367 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag); 3368 break; 3369 3370 case IBDM_GET_IOC_DETAILS: 3371 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3372 3373 case IB_DM_ATTR_SERVICE_ENTRIES: 3374 ibdm_handle_srventry_mad(msg, gid_info, &flag); 3375 break; 3376 3377 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3378 ibdm_handle_ioc_profile( 3379 ibmf_hdl, msg, gid_info, &flag); 3380 break; 3381 3382 case IB_DM_ATTR_DIAG_CODE: 3383 ibdm_handle_diagcode(msg, gid_info, &flag); 3384 break; 3385 3386 default: 3387 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3388 "Error state, wrong attribute :-("); 3389 (void) ibmf_free_msg(ibmf_hdl, &msg); 3390 return; 3391 } 3392 break; 3393 default: 3394 IBTF_DPRINTF_L2("ibdm", 3395 "process_incoming_mad: Dropping the packet" 3396 " gl_state %x", gl_state); 3397 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3398 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3399 "IBMF free msg failed DM request drop it"); 3400 } 3401 return; 3402 } 3403 } 3404 3405 if ((flag & IBDM_IBMF_PKT_DUP_RESP) || 3406 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) { 3407 IBTF_DPRINTF_L2("ibdm", 3408 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag); 3409 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3410 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3411 "IBMF free msg failed DM request drop it"); 3412 } 3413 return; 3414 } 3415 3416 mutex_enter(&gid_info->gl_mutex); 3417 if (gid_info->gl_pending_cmds < 1) { 3418 IBTF_DPRINTF_L2("ibdm", 3419 "\tprocess_incoming_mad: pending commands negative"); 3420 } 3421 if (--gid_info->gl_pending_cmds) { 3422 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: " 3423 "gid_info %p pending cmds %d", 3424 gid_info, gid_info->gl_pending_cmds); 3425 mutex_exit(&gid_info->gl_mutex); 3426 } else { 3427 uint_t prev_state; 3428 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE"); 3429 prev_state = gid_info->gl_state; 3430 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3431 if (prev_state == IBDM_SET_CLASSPORTINFO) { 3432 IBTF_DPRINTF_L4("ibdm", 3433 "\tprocess_incoming_mad: " 3434 "Setclassportinfo for Cisco FC GW is done."); 3435 gid_info->gl_flag &= ~IBDM_CISCO_PROBE; 3436 gid_info->gl_flag |= IBDM_CISCO_PROBE_DONE; 3437 mutex_exit(&gid_info->gl_mutex); 3438 cv_broadcast(&gid_info->gl_probe_cv); 3439 } else { 3440 mutex_exit(&gid_info->gl_mutex); 3441 ibdm_notify_newgid_iocs(gid_info); 3442 mutex_enter(&ibdm.ibdm_mutex); 3443 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3444 IBTF_DPRINTF_L4("ibdm", 3445 "\tprocess_incoming_mad: Wakeup"); 3446 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3447 cv_broadcast(&ibdm.ibdm_probe_cv); 3448 } 3449 mutex_exit(&ibdm.ibdm_mutex); 3450 } 3451 } 3452 3453 /* 3454 * Do not deallocate the IBMF packet if atleast one request 3455 * is posted. IBMF packet is reused. 3456 */ 3457 if (!(flag & IBDM_IBMF_PKT_REUSED)) { 3458 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3459 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3460 "IBMF free msg failed DM request drop it"); 3461 } 3462 } 3463 } 3464 3465 3466 /* 3467 * ibdm_verify_mad_status() 3468 * Verifies the MAD status 3469 * Returns IBDM_SUCCESS if status is correct 3470 * Returns IBDM_FAILURE for bogus MAD status 3471 */ 3472 static int 3473 ibdm_verify_mad_status(ib_mad_hdr_t *hdr) 3474 { 3475 int ret = 0; 3476 3477 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) || 3478 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) { 3479 return (IBDM_FAILURE); 3480 } 3481 3482 if (b2h16(hdr->Status) == 0) 3483 ret = IBDM_SUCCESS; 3484 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED) 3485 ret = IBDM_SUCCESS; 3486 else { 3487 IBTF_DPRINTF_L2("ibdm", 3488 "\tverify_mad_status: Status : 0x%x", b2h16(hdr->Status)); 3489 ret = IBDM_FAILURE; 3490 } 3491 return (ret); 3492 } 3493 3494 3495 3496 /* 3497 * ibdm_handle_redirection() 3498 * Returns IBDM_SUCCESS/IBDM_FAILURE 3499 */ 3500 static int 3501 ibdm_handle_redirection(ibmf_msg_t *msg, 3502 ibdm_dp_gidinfo_t *gid_info, int *flag) 3503 { 3504 int attrmod, ioc_no, start; 3505 void *data; 3506 timeout_id_t *timeout_id; 3507 ib_mad_hdr_t *hdr; 3508 ibdm_ioc_info_t *ioc = NULL; 3509 ibdm_timeout_cb_args_t *cb_args; 3510 ib_mad_classportinfo_t *cpi; 3511 3512 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter"); 3513 mutex_enter(&gid_info->gl_mutex); 3514 switch (gid_info->gl_state) { 3515 case IBDM_GET_IOUNITINFO: 3516 cb_args = &gid_info->gl_iou_cb_args; 3517 timeout_id = &gid_info->gl_timeout_id; 3518 break; 3519 3520 case IBDM_GET_IOC_DETAILS: 3521 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3522 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3523 3524 case IB_DM_ATTR_DIAG_CODE: 3525 if (attrmod == 0) { 3526 cb_args = &gid_info->gl_iou_cb_args; 3527 timeout_id = &gid_info->gl_timeout_id; 3528 break; 3529 } 3530 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3531 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3532 "IOC# Out of range %d", attrmod); 3533 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3534 mutex_exit(&gid_info->gl_mutex); 3535 return (IBDM_FAILURE); 3536 } 3537 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3538 cb_args = &ioc->ioc_dc_cb_args; 3539 timeout_id = &ioc->ioc_dc_timeout_id; 3540 break; 3541 3542 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3543 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3544 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3545 "IOC# Out of range %d", attrmod); 3546 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3547 mutex_exit(&gid_info->gl_mutex); 3548 return (IBDM_FAILURE); 3549 } 3550 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3551 cb_args = &ioc->ioc_cb_args; 3552 timeout_id = &ioc->ioc_timeout_id; 3553 break; 3554 3555 case IB_DM_ATTR_SERVICE_ENTRIES: 3556 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3557 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 3558 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3559 "IOC# Out of range %d", ioc_no); 3560 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3561 mutex_exit(&gid_info->gl_mutex); 3562 return (IBDM_FAILURE); 3563 } 3564 start = (attrmod & IBDM_8_BIT_MASK); 3565 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3566 if (start > ioc->ioc_profile.ioc_service_entries) { 3567 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3568 " SE index Out of range %d", start); 3569 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3570 mutex_exit(&gid_info->gl_mutex); 3571 return (IBDM_FAILURE); 3572 } 3573 cb_args = &ioc->ioc_serv[start].se_cb_args; 3574 timeout_id = &ioc->ioc_serv[start].se_timeout_id; 3575 break; 3576 3577 default: 3578 /* ERROR State */ 3579 IBTF_DPRINTF_L2("ibdm", 3580 "\thandle_redirection: wrong attribute :-("); 3581 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3582 mutex_exit(&gid_info->gl_mutex); 3583 return (IBDM_FAILURE); 3584 } 3585 break; 3586 default: 3587 /* ERROR State */ 3588 IBTF_DPRINTF_L2("ibdm", 3589 "\thandle_redirection: Error state :-("); 3590 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3591 mutex_exit(&gid_info->gl_mutex); 3592 return (IBDM_FAILURE); 3593 } 3594 if ((*timeout_id) != 0) { 3595 mutex_exit(&gid_info->gl_mutex); 3596 if (untimeout(*timeout_id) == -1) { 3597 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: " 3598 "untimeout failed %x", *timeout_id); 3599 } else { 3600 IBTF_DPRINTF_L5("ibdm", 3601 "\thandle_redirection: timeout %x", *timeout_id); 3602 } 3603 mutex_enter(&gid_info->gl_mutex); 3604 *timeout_id = 0; 3605 } 3606 3607 data = msg->im_msgbufs_recv.im_bufs_cl_data; 3608 cpi = (ib_mad_classportinfo_t *)data; 3609 3610 gid_info->gl_resp_timeout = 3611 (b2h32(cpi->RespTimeValue) & 0x1F); 3612 3613 gid_info->gl_redirected = B_TRUE; 3614 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 3615 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 3616 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 3617 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 3618 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 3619 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 3620 gid_info->gl_redirectSL = cpi->RedirectSL; 3621 3622 if (gid_info->gl_redirect_dlid != 0) { 3623 msg->im_local_addr.ia_remote_lid = 3624 gid_info->gl_redirect_dlid; 3625 } 3626 ibdm_bump_transactionID(gid_info); 3627 mutex_exit(&gid_info->gl_mutex); 3628 3629 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3630 ibdm_alloc_send_buffers(msg); 3631 3632 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3633 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3634 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3635 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3636 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3637 hdr->Status = 0; 3638 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3639 hdr->AttributeID = 3640 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID; 3641 hdr->AttributeModifier = 3642 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier; 3643 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3644 3645 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3646 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3647 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3648 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3649 3650 mutex_enter(&gid_info->gl_mutex); 3651 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3652 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3653 mutex_exit(&gid_info->gl_mutex); 3654 3655 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:" 3656 "timeout %x", *timeout_id); 3657 3658 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3659 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3660 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:" 3661 "message transport failed"); 3662 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3663 } 3664 (*flag) |= IBDM_IBMF_PKT_REUSED; 3665 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit"); 3666 return (IBDM_SUCCESS); 3667 } 3668 3669 3670 /* 3671 * ibdm_pkt_timeout_hdlr 3672 * This timeout handler is registed for every IBMF packet that is 3673 * sent through the IBMF. It gets called when no response is received 3674 * within the specified time for the packet. No retries for the failed 3675 * commands currently. Drops the failed IBMF packet and update the 3676 * pending list commands. 3677 */ 3678 static void 3679 ibdm_pkt_timeout_hdlr(void *arg) 3680 { 3681 ibdm_iou_info_t *iou; 3682 ibdm_ioc_info_t *ioc; 3683 ibdm_timeout_cb_args_t *cb_args = arg; 3684 ibdm_dp_gidinfo_t *gid_info; 3685 int srv_ent; 3686 uint_t new_gl_state; 3687 3688 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p " 3689 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3690 cb_args->cb_req_type, cb_args->cb_ioc_num, 3691 cb_args->cb_srvents_start); 3692 3693 gid_info = cb_args->cb_gid_info; 3694 mutex_enter(&gid_info->gl_mutex); 3695 3696 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) || 3697 (cb_args->cb_req_type == 0)) { 3698 3699 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed" 3700 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type, 3701 cb_args->cb_ioc_num, cb_args->cb_srvents_start); 3702 3703 if (gid_info->gl_timeout_id) 3704 gid_info->gl_timeout_id = 0; 3705 mutex_exit(&gid_info->gl_mutex); 3706 return; 3707 } 3708 if (cb_args->cb_retry_count) { 3709 cb_args->cb_retry_count--; 3710 /* 3711 * A new timeout_id is set inside ibdm_retry_command(). 3712 * When the function returns an error, the timeout_id 3713 * is reset (to zero) in the switch statement below. 3714 */ 3715 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) { 3716 mutex_exit(&gid_info->gl_mutex); 3717 return; 3718 } 3719 cb_args->cb_retry_count = 0; 3720 } 3721 3722 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p" 3723 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3724 cb_args->cb_req_type, cb_args->cb_ioc_num, 3725 cb_args->cb_srvents_start); 3726 3727 switch (cb_args->cb_req_type) { 3728 3729 case IBDM_REQ_TYPE_CLASSPORTINFO: 3730 case IBDM_REQ_TYPE_IOUINFO: 3731 new_gl_state = IBDM_GID_PROBING_FAILED; 3732 if (gid_info->gl_timeout_id) 3733 gid_info->gl_timeout_id = 0; 3734 break; 3735 3736 case IBDM_REQ_TYPE_IOCINFO: 3737 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3738 iou = gid_info->gl_iou; 3739 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3740 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3741 if (ioc->ioc_timeout_id) 3742 ioc->ioc_timeout_id = 0; 3743 break; 3744 3745 case IBDM_REQ_TYPE_SRVENTS: 3746 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3747 iou = gid_info->gl_iou; 3748 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3749 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3750 srv_ent = cb_args->cb_srvents_start; 3751 if (ioc->ioc_serv[srv_ent].se_timeout_id) 3752 ioc->ioc_serv[srv_ent].se_timeout_id = 0; 3753 break; 3754 3755 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3756 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3757 iou = gid_info->gl_iou; 3758 iou->iou_dc_valid = B_FALSE; 3759 if (gid_info->gl_timeout_id) 3760 gid_info->gl_timeout_id = 0; 3761 break; 3762 3763 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3764 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3765 iou = gid_info->gl_iou; 3766 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3767 ioc->ioc_dc_valid = B_FALSE; 3768 if (ioc->ioc_dc_timeout_id) 3769 ioc->ioc_dc_timeout_id = 0; 3770 break; 3771 3772 default: /* ERROR State */ 3773 new_gl_state = IBDM_GID_PROBING_FAILED; 3774 if (gid_info->gl_timeout_id) 3775 gid_info->gl_timeout_id = 0; 3776 IBTF_DPRINTF_L2("ibdm", 3777 "\tpkt_timeout_hdlr: wrong request type."); 3778 break; 3779 } 3780 3781 --gid_info->gl_pending_cmds; /* decrease the counter */ 3782 3783 if (gid_info->gl_pending_cmds == 0) { 3784 gid_info->gl_state = new_gl_state; 3785 mutex_exit(&gid_info->gl_mutex); 3786 /* 3787 * Delete this gid_info if the gid probe fails. 3788 */ 3789 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3790 ibdm_delete_glhca_list(gid_info); 3791 } 3792 ibdm_notify_newgid_iocs(gid_info); 3793 mutex_enter(&ibdm.ibdm_mutex); 3794 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3795 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup"); 3796 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3797 cv_broadcast(&ibdm.ibdm_probe_cv); 3798 } 3799 mutex_exit(&ibdm.ibdm_mutex); 3800 } else { 3801 /* 3802 * Reset gl_pending_cmd if the extra timeout happens since 3803 * gl_pending_cmd becomes negative as a result. 3804 */ 3805 if (gid_info->gl_pending_cmds < 0) { 3806 gid_info->gl_pending_cmds = 0; 3807 IBTF_DPRINTF_L2("ibdm", 3808 "\tpkt_timeout_hdlr: extra timeout request." 3809 " reset gl_pending_cmds"); 3810 } 3811 mutex_exit(&gid_info->gl_mutex); 3812 /* 3813 * Delete this gid_info if the gid probe fails. 3814 */ 3815 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3816 ibdm_delete_glhca_list(gid_info); 3817 } 3818 } 3819 } 3820 3821 3822 /* 3823 * ibdm_retry_command() 3824 * Retries the failed command. 3825 * Returns IBDM_FAILURE/IBDM_SUCCESS 3826 */ 3827 static int 3828 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args) 3829 { 3830 int ret; 3831 ibmf_msg_t *msg; 3832 ib_mad_hdr_t *hdr; 3833 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info; 3834 timeout_id_t *timeout_id; 3835 ibdm_ioc_info_t *ioc; 3836 int ioc_no; 3837 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 3838 3839 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p " 3840 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3841 cb_args->cb_req_type, cb_args->cb_ioc_num, 3842 cb_args->cb_srvents_start); 3843 3844 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg); 3845 3846 3847 /* 3848 * Reset the gid if alloc_msg failed with BAD_HANDLE 3849 * ibdm_reset_gidinfo reinits the gid_info 3850 */ 3851 if (ret == IBMF_BAD_HANDLE) { 3852 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad", 3853 gid_info); 3854 3855 mutex_exit(&gid_info->gl_mutex); 3856 ibdm_reset_gidinfo(gid_info); 3857 mutex_enter(&gid_info->gl_mutex); 3858 3859 /* Retry alloc */ 3860 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, 3861 &msg); 3862 } 3863 3864 if (ret != IBDM_SUCCESS) { 3865 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p " 3866 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3867 cb_args->cb_req_type, cb_args->cb_ioc_num, 3868 cb_args->cb_srvents_start); 3869 return (IBDM_FAILURE); 3870 } 3871 3872 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 3873 ibdm_alloc_send_buffers(msg); 3874 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 3875 3876 ibdm_bump_transactionID(gid_info); 3877 3878 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3879 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3880 if (gid_info->gl_redirected == B_TRUE) { 3881 if (gid_info->gl_redirect_dlid != 0) { 3882 msg->im_local_addr.ia_remote_lid = 3883 gid_info->gl_redirect_dlid; 3884 } 3885 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3886 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3887 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3888 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3889 } else { 3890 msg->im_local_addr.ia_remote_qno = 1; 3891 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3892 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3893 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 3894 } 3895 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3896 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 3897 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3898 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3899 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3900 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3901 hdr->Status = 0; 3902 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3903 3904 switch (cb_args->cb_req_type) { 3905 case IBDM_REQ_TYPE_CLASSPORTINFO: 3906 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 3907 hdr->AttributeModifier = 0; 3908 timeout_id = &gid_info->gl_timeout_id; 3909 break; 3910 case IBDM_REQ_TYPE_IOUINFO: 3911 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 3912 hdr->AttributeModifier = 0; 3913 timeout_id = &gid_info->gl_timeout_id; 3914 break; 3915 case IBDM_REQ_TYPE_IOCINFO: 3916 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 3917 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3918 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3919 timeout_id = &ioc->ioc_timeout_id; 3920 break; 3921 case IBDM_REQ_TYPE_SRVENTS: 3922 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 3923 ibdm_fill_srv_attr_mod(hdr, cb_args); 3924 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3925 timeout_id = 3926 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id; 3927 break; 3928 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3929 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3930 hdr->AttributeModifier = 0; 3931 timeout_id = &gid_info->gl_timeout_id; 3932 break; 3933 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3934 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3935 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3936 ioc_no = cb_args->cb_ioc_num; 3937 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no]; 3938 timeout_id = &ioc->ioc_dc_timeout_id; 3939 break; 3940 } 3941 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*hdr)) 3942 3943 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3944 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3945 3946 mutex_exit(&gid_info->gl_mutex); 3947 3948 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:" 3949 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num, 3950 cb_args->cb_srvents_start, *timeout_id); 3951 3952 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, 3953 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb, 3954 cb_args, 0) != IBMF_SUCCESS) { 3955 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p " 3956 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3957 cb_args->cb_req_type, cb_args->cb_ioc_num, 3958 cb_args->cb_srvents_start); 3959 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3960 } 3961 mutex_enter(&gid_info->gl_mutex); 3962 return (IBDM_SUCCESS); 3963 } 3964 3965 3966 /* 3967 * ibdm_update_ioc_port_gidlist() 3968 */ 3969 static void 3970 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest, 3971 ibdm_dp_gidinfo_t *gid_info) 3972 { 3973 int ii, ngid_ents; 3974 ibdm_gid_t *tmp; 3975 ibdm_hca_list_t *gid_hca_head, *temp; 3976 ibdm_hca_list_t *ioc_head = NULL; 3977 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 3978 3979 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter"); 3980 3981 ngid_ents = gid_info->gl_ngids; 3982 dest->ioc_nportgids = ngid_ents; 3983 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) * 3984 ngid_ents, KM_SLEEP); 3985 tmp = gid_info->gl_gid; 3986 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) { 3987 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi; 3988 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo; 3989 tmp = tmp->gid_next; 3990 } 3991 3992 gid_hca_head = gid_info->gl_hca_list; 3993 while (gid_hca_head) { 3994 temp = ibdm_dup_hca_attr(gid_hca_head); 3995 temp->hl_next = ioc_head; 3996 ioc_head = temp; 3997 gid_hca_head = gid_hca_head->hl_next; 3998 } 3999 dest->ioc_hca_list = ioc_head; 4000 } 4001 4002 4003 /* 4004 * ibdm_alloc_send_buffers() 4005 * Allocates memory for the IBMF send buffer to send and/or receive 4006 * the Device Management MAD packet. 4007 */ 4008 static void 4009 ibdm_alloc_send_buffers(ibmf_msg_t *msgp) 4010 { 4011 msgp->im_msgbufs_send.im_bufs_mad_hdr = 4012 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP); 4013 4014 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *) 4015 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 4016 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDM_DM_MAD_HDR_SZ; 4017 4018 msgp->im_msgbufs_send.im_bufs_cl_data = 4019 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr + IBDM_DM_MAD_HDR_SZ); 4020 msgp->im_msgbufs_send.im_bufs_cl_data_len = 4021 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDM_DM_MAD_HDR_SZ; 4022 } 4023 4024 4025 /* 4026 * ibdm_alloc_send_buffers() 4027 * De-allocates memory for the IBMF send buffer 4028 */ 4029 static void 4030 ibdm_free_send_buffers(ibmf_msg_t *msgp) 4031 { 4032 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) 4033 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE); 4034 } 4035 4036 /* 4037 * ibdm_probe_ioc() 4038 * 1. Gets the node records for the port GUID. This detects all the port 4039 * to the IOU. 4040 * 2. Selectively probes all the IOC, given it's node GUID 4041 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC 4042 * Controller Profile asynchronously 4043 */ 4044 /*ARGSUSED*/ 4045 static void 4046 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag) 4047 { 4048 int ii, nrecords; 4049 size_t nr_len = 0, pi_len = 0; 4050 ib_gid_t sgid, dgid; 4051 ibdm_hca_list_t *hca_list = NULL; 4052 sa_node_record_t *nr, *tmp; 4053 ibdm_port_attr_t *port = NULL; 4054 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid; 4055 ibdm_dp_gidinfo_t *temp_gidinfo; 4056 ibdm_gid_t *temp_gid; 4057 sa_portinfo_record_t *pi; 4058 4059 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%llx, %llx, %x): Begin", 4060 nodeguid, ioc_guid, reprobe_flag); 4061 4062 /* Rescan the GID list for any removed GIDs for reprobe */ 4063 if (reprobe_flag) 4064 ibdm_rescan_gidlist(&ioc_guid); 4065 4066 mutex_enter(&ibdm.ibdm_hl_mutex); 4067 for (ibdm_get_next_port(&hca_list, &port, 1); port; 4068 ibdm_get_next_port(&hca_list, &port, 1)) { 4069 reprobe_gid = new_gid = node_gid = NULL; 4070 4071 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid); 4072 if (nr == NULL) { 4073 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records"); 4074 continue; 4075 } 4076 nrecords = (nr_len / sizeof (sa_node_record_t)); 4077 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 4078 if ((pi = ibdm_get_portinfo( 4079 port->pa_sa_hdl, &pi_len, tmp->LID)) == NULL) { 4080 IBTF_DPRINTF_L4("ibdm", 4081 "\tibdm_get_portinfo: no portinfo recs"); 4082 continue; 4083 } 4084 4085 /* 4086 * If Device Management is not supported on 4087 * this port, skip the rest. 4088 */ 4089 if (!(pi->PortInfo.CapabilityMask & 4090 SM_CAP_MASK_IS_DM_SUPPD)) { 4091 kmem_free(pi, pi_len); 4092 continue; 4093 } 4094 4095 /* 4096 * For reprobes: Check if GID, already in 4097 * the list. If so, set the state to SKIPPED 4098 */ 4099 if (((temp_gidinfo = ibdm_find_gid(nodeguid, 4100 tmp->NodeInfo.PortGUID)) != NULL) && 4101 temp_gidinfo->gl_state == 4102 IBDM_GID_PROBING_COMPLETE) { 4103 ASSERT(reprobe_gid == NULL); 4104 ibdm_addto_glhcalist(temp_gidinfo, 4105 hca_list); 4106 reprobe_gid = temp_gidinfo; 4107 kmem_free(pi, pi_len); 4108 continue; 4109 } else if (temp_gidinfo != NULL) { 4110 kmem_free(pi, pi_len); 4111 ibdm_addto_glhcalist(temp_gidinfo, 4112 hca_list); 4113 continue; 4114 } 4115 4116 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : " 4117 "create_gid : prefix %llx, guid %llx\n", 4118 pi->PortInfo.GidPrefix, 4119 tmp->NodeInfo.PortGUID); 4120 4121 sgid.gid_prefix = port->pa_sn_prefix; 4122 sgid.gid_guid = port->pa_port_guid; 4123 dgid.gid_prefix = pi->PortInfo.GidPrefix; 4124 dgid.gid_guid = tmp->NodeInfo.PortGUID; 4125 new_gid = ibdm_create_gid_info(port, sgid, 4126 dgid); 4127 if (new_gid == NULL) { 4128 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4129 "create_gid_info failed\n"); 4130 kmem_free(pi, pi_len); 4131 continue; 4132 } 4133 if (node_gid == NULL) { 4134 node_gid = new_gid; 4135 ibdm_add_to_gl_gid(node_gid, node_gid); 4136 } else { 4137 IBTF_DPRINTF_L4("ibdm", 4138 "\tprobe_ioc: new gid"); 4139 temp_gid = kmem_zalloc( 4140 sizeof (ibdm_gid_t), KM_SLEEP); 4141 temp_gid->gid_dgid_hi = 4142 new_gid->gl_dgid_hi; 4143 temp_gid->gid_dgid_lo = 4144 new_gid->gl_dgid_lo; 4145 temp_gid->gid_next = node_gid->gl_gid; 4146 node_gid->gl_gid = temp_gid; 4147 node_gid->gl_ngids++; 4148 } 4149 new_gid->gl_nodeguid = nodeguid; 4150 new_gid->gl_portguid = dgid.gid_guid; 4151 ibdm_addto_glhcalist(new_gid, hca_list); 4152 4153 /* 4154 * Set the state to skipped as all these 4155 * gids point to the same node. 4156 * We (re)probe only one GID below and reset 4157 * state appropriately 4158 */ 4159 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED; 4160 new_gid->gl_devid = (*tmp).NodeInfo.DeviceID; 4161 kmem_free(pi, pi_len); 4162 } 4163 kmem_free(nr, nr_len); 4164 4165 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d " 4166 "reprobe_gid %p new_gid %p node_gid %p", 4167 reprobe_flag, reprobe_gid, new_gid, node_gid); 4168 4169 if (reprobe_flag != 0 && reprobe_gid != NULL) { 4170 int niocs, jj; 4171 ibdm_ioc_info_t *tmp_ioc; 4172 int ioc_matched = 0; 4173 4174 mutex_exit(&ibdm.ibdm_hl_mutex); 4175 mutex_enter(&reprobe_gid->gl_mutex); 4176 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS; 4177 niocs = 4178 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots; 4179 reprobe_gid->gl_pending_cmds++; 4180 mutex_exit(&reprobe_gid->gl_mutex); 4181 4182 for (jj = 0; jj < niocs; jj++) { 4183 tmp_ioc = 4184 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj); 4185 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid) 4186 continue; 4187 4188 ioc_matched = 1; 4189 4190 /* 4191 * Explicitly set gl_reprobe_flag to 0 so that 4192 * IBnex is not notified on completion 4193 */ 4194 mutex_enter(&reprobe_gid->gl_mutex); 4195 reprobe_gid->gl_reprobe_flag = 0; 4196 mutex_exit(&reprobe_gid->gl_mutex); 4197 4198 mutex_enter(&ibdm.ibdm_mutex); 4199 ibdm.ibdm_ngid_probes_in_progress++; 4200 mutex_exit(&ibdm.ibdm_mutex); 4201 if (ibdm_send_ioc_profile(reprobe_gid, jj) != 4202 IBDM_SUCCESS) { 4203 IBTF_DPRINTF_L4("ibdm", 4204 "\tprobe_ioc: " 4205 "send_ioc_profile failed " 4206 "for ioc %d", jj); 4207 ibdm_gid_decr_pending(reprobe_gid); 4208 break; 4209 } 4210 mutex_enter(&ibdm.ibdm_mutex); 4211 ibdm_wait_probe_completion(); 4212 mutex_exit(&ibdm.ibdm_mutex); 4213 break; 4214 } 4215 if (ioc_matched == 0) 4216 ibdm_gid_decr_pending(reprobe_gid); 4217 else { 4218 mutex_enter(&ibdm.ibdm_hl_mutex); 4219 break; 4220 } 4221 } else if (new_gid != NULL) { 4222 mutex_exit(&ibdm.ibdm_hl_mutex); 4223 node_gid = node_gid ? node_gid : new_gid; 4224 4225 /* 4226 * New or reinserted GID : Enable notification 4227 * to IBnex 4228 */ 4229 mutex_enter(&node_gid->gl_mutex); 4230 node_gid->gl_reprobe_flag = 1; 4231 mutex_exit(&node_gid->gl_mutex); 4232 4233 ibdm_probe_gid(node_gid); 4234 4235 mutex_enter(&ibdm.ibdm_hl_mutex); 4236 } 4237 } 4238 mutex_exit(&ibdm.ibdm_hl_mutex); 4239 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n"); 4240 } 4241 4242 4243 /* 4244 * ibdm_probe_gid() 4245 * Selectively probes the GID 4246 */ 4247 static void 4248 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info) 4249 { 4250 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:"); 4251 4252 /* 4253 * A Cisco FC GW needs the special handling to get IOUnitInfo. 4254 */ 4255 mutex_enter(&gid_info->gl_mutex); 4256 if (ibdm_is_cisco_switch(gid_info)) { 4257 gid_info->gl_pending_cmds++; 4258 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 4259 mutex_exit(&gid_info->gl_mutex); 4260 4261 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 4262 4263 mutex_enter(&gid_info->gl_mutex); 4264 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4265 --gid_info->gl_pending_cmds; 4266 mutex_exit(&gid_info->gl_mutex); 4267 4268 /* free the hca_list on this gid_info */ 4269 ibdm_delete_glhca_list(gid_info); 4270 gid_info = gid_info->gl_next; 4271 return; 4272 } 4273 4274 mutex_enter(&gid_info->gl_mutex); 4275 ibdm_wait_cisco_probe_completion(gid_info); 4276 4277 IBTF_DPRINTF_L4("ibdm", 4278 "\tprobe_gid: CISCO Wakeup signal received"); 4279 } 4280 4281 /* move on to the 'GET_CLASSPORTINFO' stage */ 4282 gid_info->gl_pending_cmds++; 4283 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 4284 mutex_exit(&gid_info->gl_mutex); 4285 4286 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 4287 4288 mutex_enter(&gid_info->gl_mutex); 4289 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4290 --gid_info->gl_pending_cmds; 4291 mutex_exit(&gid_info->gl_mutex); 4292 4293 /* free the hca_list on this gid_info */ 4294 ibdm_delete_glhca_list(gid_info); 4295 gid_info = gid_info->gl_next; 4296 return; 4297 } 4298 4299 mutex_enter(&ibdm.ibdm_mutex); 4300 ibdm.ibdm_ngid_probes_in_progress++; 4301 gid_info = gid_info->gl_next; 4302 ibdm_wait_probe_completion(); 4303 mutex_exit(&ibdm.ibdm_mutex); 4304 4305 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received"); 4306 } 4307 4308 4309 /* 4310 * ibdm_create_gid_info() 4311 * Allocates a gid_info structure and initializes 4312 * Returns pointer to the structure on success 4313 * and NULL on failure 4314 */ 4315 static ibdm_dp_gidinfo_t * 4316 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid) 4317 { 4318 uint8_t ii, npaths; 4319 sa_path_record_t *path; 4320 size_t len; 4321 ibdm_pkey_tbl_t *pkey_tbl; 4322 ibdm_dp_gidinfo_t *gid_info = NULL; 4323 int ret; 4324 4325 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin"); 4326 npaths = 1; 4327 4328 /* query for reversible paths */ 4329 if (port->pa_sa_hdl) 4330 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, 4331 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, 4332 &len, &path); 4333 else 4334 return (NULL); 4335 4336 if (ret == IBMF_SUCCESS && path) { 4337 ibdm_dump_path_info(path); 4338 4339 gid_info = kmem_zalloc( 4340 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 4341 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 4342 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 4343 gid_info->gl_dgid_hi = path->DGID.gid_prefix; 4344 gid_info->gl_dgid_lo = path->DGID.gid_guid; 4345 gid_info->gl_sgid_hi = path->SGID.gid_prefix; 4346 gid_info->gl_sgid_lo = path->SGID.gid_guid; 4347 gid_info->gl_p_key = path->P_Key; 4348 gid_info->gl_sa_hdl = port->pa_sa_hdl; 4349 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl; 4350 gid_info->gl_slid = path->SLID; 4351 gid_info->gl_dlid = path->DLID; 4352 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 4353 << IBDM_GID_TRANSACTIONID_SHIFT; 4354 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 4355 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 4356 << IBDM_GID_TRANSACTIONID_SHIFT; 4357 gid_info->gl_SL = path->SL; 4358 4359 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 4360 for (ii = 0; ii < port->pa_npkeys; ii++) { 4361 if (port->pa_pkey_tbl == NULL) 4362 break; 4363 4364 pkey_tbl = &port->pa_pkey_tbl[ii]; 4365 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 4366 (pkey_tbl->pt_qp_hdl != NULL)) { 4367 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 4368 break; 4369 } 4370 } 4371 kmem_free(path, len); 4372 4373 /* 4374 * QP handle for GID not initialized. No matching Pkey 4375 * was found!! ibdm should *not* hit this case. Flag an 4376 * error and drop the GID if ibdm does encounter this. 4377 */ 4378 if (gid_info->gl_qp_hdl == NULL) { 4379 IBTF_DPRINTF_L2(ibdm_string, 4380 "\tcreate_gid_info: No matching Pkey"); 4381 ibdm_delete_gidinfo(gid_info); 4382 return (NULL); 4383 } 4384 4385 ibdm.ibdm_ngids++; 4386 if (ibdm.ibdm_dp_gidlist_head == NULL) { 4387 ibdm.ibdm_dp_gidlist_head = gid_info; 4388 ibdm.ibdm_dp_gidlist_tail = gid_info; 4389 } else { 4390 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 4391 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 4392 ibdm.ibdm_dp_gidlist_tail = gid_info; 4393 } 4394 } 4395 4396 return (gid_info); 4397 } 4398 4399 4400 /* 4401 * ibdm_get_node_records 4402 * Sends a SA query to get the NODE record 4403 * Returns pointer to the sa_node_record_t on success 4404 * and NULL on failure 4405 */ 4406 static sa_node_record_t * 4407 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid) 4408 { 4409 sa_node_record_t req, *resp = NULL; 4410 ibmf_saa_access_args_t args; 4411 int ret; 4412 4413 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin"); 4414 4415 bzero(&req, sizeof (sa_node_record_t)); 4416 req.NodeInfo.NodeGUID = guid; 4417 4418 args.sq_attr_id = SA_NODERECORD_ATTRID; 4419 args.sq_access_type = IBMF_SAA_RETRIEVE; 4420 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID; 4421 args.sq_template = &req; 4422 args.sq_callback = NULL; 4423 args.sq_callback_arg = NULL; 4424 4425 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4426 if (ret != IBMF_SUCCESS) { 4427 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:" 4428 " SA Retrieve Failed: %d", ret); 4429 return (NULL); 4430 } 4431 if ((resp == NULL) || (*length == 0)) { 4432 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records"); 4433 return (NULL); 4434 } 4435 4436 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx " 4437 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID); 4438 4439 return (resp); 4440 } 4441 4442 4443 /* 4444 * ibdm_get_portinfo() 4445 * Sends a SA query to get the PortInfo record 4446 * Returns pointer to the sa_portinfo_record_t on success 4447 * and NULL on failure 4448 */ 4449 static sa_portinfo_record_t * 4450 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid) 4451 { 4452 sa_portinfo_record_t req, *resp = NULL; 4453 ibmf_saa_access_args_t args; 4454 int ret; 4455 4456 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin"); 4457 4458 bzero(&req, sizeof (sa_portinfo_record_t)); 4459 req.EndportLID = lid; 4460 4461 args.sq_attr_id = SA_PORTINFORECORD_ATTRID; 4462 args.sq_access_type = IBMF_SAA_RETRIEVE; 4463 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 4464 args.sq_template = &req; 4465 args.sq_callback = NULL; 4466 args.sq_callback_arg = NULL; 4467 4468 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4469 if (ret != IBMF_SUCCESS) { 4470 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:" 4471 " SA Retrieve Failed: 0x%X", ret); 4472 return (NULL); 4473 } 4474 if ((*length == 0) || (resp == NULL)) 4475 return (NULL); 4476 4477 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x", 4478 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask); 4479 return (resp); 4480 } 4481 4482 4483 /* 4484 * ibdm_ibnex_register_callback 4485 * IB nexus callback routine for HCA attach and detach notification 4486 */ 4487 void 4488 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback) 4489 { 4490 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks"); 4491 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4492 ibdm.ibdm_ibnex_callback = ibnex_dm_callback; 4493 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4494 } 4495 4496 4497 /* 4498 * ibdm_ibnex_unregister_callbacks 4499 */ 4500 void 4501 ibdm_ibnex_unregister_callback() 4502 { 4503 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks"); 4504 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4505 ibdm.ibdm_ibnex_callback = NULL; 4506 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4507 } 4508 4509 4510 /* 4511 * ibdm_ibnex_get_waittime() 4512 * Calculates the wait time based on the last HCA attach time 4513 */ 4514 time_t 4515 ibdm_ibnex_get_waittime(ib_guid_t hca_guid, int *dft_wait) 4516 { 4517 int ii; 4518 time_t temp, wait_time = 0; 4519 ibdm_hca_list_t *hca; 4520 4521 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime hcaguid:%llx" 4522 "\tport settling time %d", hca_guid, *dft_wait); 4523 4524 mutex_enter(&ibdm.ibdm_hl_mutex); 4525 hca = ibdm.ibdm_hca_list_head; 4526 4527 if (hca_guid) { 4528 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4529 if ((hca_guid == hca->hl_hca_guid) && 4530 (hca->hl_nports != hca->hl_nports_active)) { 4531 wait_time = 4532 ddi_get_time() - hca->hl_attach_time; 4533 wait_time = ((wait_time >= *dft_wait) ? 4534 0 : (*dft_wait - wait_time)); 4535 break; 4536 } 4537 hca = hca->hl_next; 4538 } 4539 mutex_exit(&ibdm.ibdm_hl_mutex); 4540 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4541 return (wait_time); 4542 } 4543 4544 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4545 if (hca->hl_nports != hca->hl_nports_active) { 4546 temp = ddi_get_time() - hca->hl_attach_time; 4547 temp = ((temp >= *dft_wait) ? 0 : (*dft_wait - temp)); 4548 wait_time = (temp > wait_time) ? temp : wait_time; 4549 } 4550 } 4551 mutex_exit(&ibdm.ibdm_hl_mutex); 4552 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4553 return (wait_time); 4554 } 4555 4556 4557 /* 4558 * ibdm_ibnex_probe_hcaport 4559 * Probes the presence of HCA port (with HCA dip and port number) 4560 * Returns port attributes structure on SUCCESS 4561 */ 4562 ibdm_port_attr_t * 4563 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num) 4564 { 4565 int ii, jj; 4566 ibdm_hca_list_t *hca_list; 4567 ibdm_port_attr_t *port_attr; 4568 4569 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:"); 4570 4571 mutex_enter(&ibdm.ibdm_hl_mutex); 4572 hca_list = ibdm.ibdm_hca_list_head; 4573 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4574 if (hca_list->hl_hca_guid == hca_guid) { 4575 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4576 if (hca_list->hl_port_attr[jj].pa_port_num == 4577 port_num) { 4578 break; 4579 } 4580 } 4581 if (jj != hca_list->hl_nports) 4582 break; 4583 } 4584 hca_list = hca_list->hl_next; 4585 } 4586 if (ii == ibdm.ibdm_hca_count) { 4587 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found"); 4588 mutex_exit(&ibdm.ibdm_hl_mutex); 4589 return (NULL); 4590 } 4591 port_attr = (ibdm_port_attr_t *)kmem_zalloc( 4592 sizeof (ibdm_port_attr_t), KM_SLEEP); 4593 bcopy((char *)&hca_list->hl_port_attr[jj], 4594 port_attr, sizeof (ibdm_port_attr_t)); 4595 ibdm_update_port_attr(port_attr); 4596 4597 mutex_exit(&ibdm.ibdm_hl_mutex); 4598 return (port_attr); 4599 } 4600 4601 4602 /* 4603 * ibdm_ibnex_get_port_attrs 4604 * Scan all HCAs for a matching port_guid. 4605 * Returns "port attributes" structure on success. 4606 */ 4607 ibdm_port_attr_t * 4608 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid) 4609 { 4610 int ii, jj; 4611 ibdm_hca_list_t *hca_list; 4612 ibdm_port_attr_t *port_attr; 4613 4614 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:"); 4615 4616 mutex_enter(&ibdm.ibdm_hl_mutex); 4617 hca_list = ibdm.ibdm_hca_list_head; 4618 4619 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4620 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4621 if (hca_list->hl_port_attr[jj].pa_port_guid == 4622 port_guid) { 4623 break; 4624 } 4625 } 4626 if (jj != hca_list->hl_nports) 4627 break; 4628 hca_list = hca_list->hl_next; 4629 } 4630 4631 if (ii == ibdm.ibdm_hca_count) { 4632 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found"); 4633 mutex_exit(&ibdm.ibdm_hl_mutex); 4634 return (NULL); 4635 } 4636 4637 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t), 4638 KM_SLEEP); 4639 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr, 4640 sizeof (ibdm_port_attr_t)); 4641 ibdm_update_port_attr(port_attr); 4642 4643 mutex_exit(&ibdm.ibdm_hl_mutex); 4644 return (port_attr); 4645 } 4646 4647 4648 /* 4649 * ibdm_ibnex_free_port_attr() 4650 */ 4651 void 4652 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr) 4653 { 4654 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:"); 4655 if (port_attr) { 4656 if (port_attr->pa_pkey_tbl != NULL) { 4657 kmem_free(port_attr->pa_pkey_tbl, 4658 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t))); 4659 } 4660 kmem_free(port_attr, sizeof (ibdm_port_attr_t)); 4661 } 4662 } 4663 4664 4665 /* 4666 * ibdm_ibnex_get_hca_list() 4667 * Returns portinfo for all the port for all the HCA's 4668 */ 4669 void 4670 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count) 4671 { 4672 ibdm_hca_list_t *head = NULL, *temp, *temp1; 4673 int ii; 4674 4675 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:"); 4676 4677 mutex_enter(&ibdm.ibdm_hl_mutex); 4678 temp = ibdm.ibdm_hca_list_head; 4679 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4680 temp1 = ibdm_dup_hca_attr(temp); 4681 temp1->hl_next = head; 4682 head = temp1; 4683 temp = temp->hl_next; 4684 } 4685 *count = ibdm.ibdm_hca_count; 4686 *hca = head; 4687 mutex_exit(&ibdm.ibdm_hl_mutex); 4688 } 4689 4690 4691 /* 4692 * ibdm_ibnex_get_hca_info_by_guid() 4693 */ 4694 ibdm_hca_list_t * 4695 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid) 4696 { 4697 ibdm_hca_list_t *head = NULL, *hca = NULL; 4698 4699 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip"); 4700 4701 mutex_enter(&ibdm.ibdm_hl_mutex); 4702 head = ibdm.ibdm_hca_list_head; 4703 while (head) { 4704 if (head->hl_hca_guid == hca_guid) { 4705 hca = ibdm_dup_hca_attr(head); 4706 hca->hl_next = NULL; 4707 break; 4708 } 4709 head = head->hl_next; 4710 } 4711 mutex_exit(&ibdm.ibdm_hl_mutex); 4712 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca); 4713 return (hca); 4714 } 4715 4716 4717 /* 4718 * ibdm_dup_hca_attr() 4719 * Allocate a new HCA attribute strucuture and initialize 4720 * hca attribute structure with the incoming HCA attributes 4721 * returned the allocated hca attributes. 4722 */ 4723 static ibdm_hca_list_t * 4724 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca) 4725 { 4726 int len; 4727 ibdm_hca_list_t *out_hca; 4728 4729 len = sizeof (ibdm_hca_list_t) + 4730 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)); 4731 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len); 4732 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP); 4733 bcopy((char *)in_hca, 4734 (char *)out_hca, sizeof (ibdm_hca_list_t)); 4735 if (in_hca->hl_nports) { 4736 out_hca->hl_port_attr = (ibdm_port_attr_t *) 4737 ((char *)out_hca + sizeof (ibdm_hca_list_t)); 4738 bcopy((char *)in_hca->hl_port_attr, 4739 (char *)out_hca->hl_port_attr, 4740 (in_hca->hl_nports * sizeof (ibdm_port_attr_t))); 4741 for (len = 0; len < out_hca->hl_nports; len++) 4742 ibdm_update_port_attr(&out_hca->hl_port_attr[len]); 4743 } 4744 return (out_hca); 4745 } 4746 4747 4748 /* 4749 * ibdm_ibnex_free_hca_list() 4750 * Free one/more HCA lists 4751 */ 4752 void 4753 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list) 4754 { 4755 int ii; 4756 size_t len; 4757 ibdm_hca_list_t *temp; 4758 ibdm_port_attr_t *port; 4759 4760 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:"); 4761 ASSERT(hca_list); 4762 while (hca_list) { 4763 temp = hca_list; 4764 hca_list = hca_list->hl_next; 4765 for (ii = 0; ii < temp->hl_nports; ii++) { 4766 port = &temp->hl_port_attr[ii]; 4767 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 4768 if (len != 0) 4769 kmem_free(port->pa_pkey_tbl, len); 4770 } 4771 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports * 4772 sizeof (ibdm_port_attr_t)); 4773 kmem_free(temp, len); 4774 } 4775 } 4776 4777 4778 /* 4779 * ibdm_ibnex_probe_iocguid() 4780 * Probes the IOC on the fabric and returns the IOC information 4781 * if present. Otherwise, NULL is returned 4782 */ 4783 /* ARGSUSED */ 4784 ibdm_ioc_info_t * 4785 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag) 4786 { 4787 int k; 4788 ibdm_ioc_info_t *ioc_info; 4789 ibdm_dp_gidinfo_t *gid_info; /* used as index and arg */ 4790 timeout_id_t *timeout_id; 4791 4792 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin", 4793 iou, ioc_guid, reprobe_flag); 4794 /* Check whether we know this already */ 4795 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4796 if (ioc_info == NULL) { 4797 mutex_enter(&ibdm.ibdm_mutex); 4798 while (ibdm.ibdm_busy & IBDM_BUSY) 4799 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4800 ibdm.ibdm_busy |= IBDM_BUSY; 4801 mutex_exit(&ibdm.ibdm_mutex); 4802 ibdm_probe_ioc(iou, ioc_guid, 0); 4803 mutex_enter(&ibdm.ibdm_mutex); 4804 ibdm.ibdm_busy &= ~IBDM_BUSY; 4805 cv_broadcast(&ibdm.ibdm_busy_cv); 4806 mutex_exit(&ibdm.ibdm_mutex); 4807 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4808 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */ 4809 ASSERT(gid_info != NULL); 4810 /* Free the ioc_list before reprobe; and cancel any timers */ 4811 mutex_enter(&ibdm.ibdm_mutex); 4812 mutex_enter(&gid_info->gl_mutex); 4813 if (ioc_info->ioc_timeout_id) { 4814 timeout_id = ioc_info->ioc_timeout_id; 4815 ioc_info->ioc_timeout_id = 0; 4816 mutex_exit(&gid_info->gl_mutex); 4817 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4818 "ioc_timeout_id = 0x%x", timeout_id); 4819 if (untimeout(timeout_id) == -1) { 4820 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4821 "untimeout ioc_timeout_id failed"); 4822 } 4823 mutex_enter(&gid_info->gl_mutex); 4824 } 4825 if (ioc_info->ioc_dc_timeout_id) { 4826 timeout_id = ioc_info->ioc_dc_timeout_id; 4827 ioc_info->ioc_dc_timeout_id = 0; 4828 mutex_exit(&gid_info->gl_mutex); 4829 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4830 "ioc_dc_timeout_id = 0x%x", timeout_id); 4831 if (untimeout(timeout_id) == -1) { 4832 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4833 "untimeout ioc_dc_timeout_id failed"); 4834 } 4835 mutex_enter(&gid_info->gl_mutex); 4836 } 4837 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++) 4838 if (ioc_info->ioc_serv[k].se_timeout_id) { 4839 timeout_id = ioc_info->ioc_serv[k]. 4840 se_timeout_id; 4841 ioc_info->ioc_serv[k].se_timeout_id = 0; 4842 mutex_exit(&gid_info->gl_mutex); 4843 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4844 "ioc_info->ioc_serv[k].se_timeout_id = %x", 4845 k, timeout_id); 4846 if (untimeout(timeout_id) == -1) { 4847 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4848 "untimeout se_timeout_id %d " 4849 "failed", k); 4850 } 4851 mutex_enter(&gid_info->gl_mutex); 4852 } 4853 mutex_exit(&gid_info->gl_mutex); 4854 mutex_exit(&ibdm.ibdm_mutex); 4855 ibdm_ibnex_free_ioc_list(ioc_info); 4856 4857 mutex_enter(&ibdm.ibdm_mutex); 4858 while (ibdm.ibdm_busy & IBDM_BUSY) 4859 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4860 ibdm.ibdm_busy |= IBDM_BUSY; 4861 mutex_exit(&ibdm.ibdm_mutex); 4862 4863 ibdm_probe_ioc(iou, ioc_guid, 1); 4864 4865 /* 4866 * Skip if gl_reprobe_flag is set, this will be 4867 * a re-inserted / new GID, for which notifications 4868 * have already been send. 4869 */ 4870 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 4871 gid_info = gid_info->gl_next) { 4872 uint8_t ii, niocs; 4873 ibdm_ioc_info_t *ioc; 4874 4875 if (gid_info->gl_iou == NULL) 4876 continue; 4877 4878 if (gid_info->gl_reprobe_flag) { 4879 gid_info->gl_reprobe_flag = 0; 4880 continue; 4881 } 4882 4883 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 4884 for (ii = 0; ii < niocs; ii++) { 4885 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 4886 if (ioc->ioc_profile.ioc_guid == ioc_guid) { 4887 mutex_enter(&ibdm.ibdm_mutex); 4888 ibdm_reprobe_update_port_srv(ioc, 4889 gid_info); 4890 mutex_exit(&ibdm.ibdm_mutex); 4891 } 4892 } 4893 } 4894 mutex_enter(&ibdm.ibdm_mutex); 4895 ibdm.ibdm_busy &= ~IBDM_BUSY; 4896 cv_broadcast(&ibdm.ibdm_busy_cv); 4897 mutex_exit(&ibdm.ibdm_mutex); 4898 4899 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4900 } 4901 return (ioc_info); 4902 } 4903 4904 4905 /* 4906 * ibdm_get_ioc_info_with_gid() 4907 * Returns pointer to ibdm_ioc_info_t if it finds 4908 * matching record for the ioc_guid. Otherwise NULL is returned. 4909 * The pointer to gid_info is set to the second argument in case that 4910 * the non-NULL value returns (and the second argument is not NULL). 4911 * 4912 * Note. use the same strings as "ibnex_get_ioc_info" in 4913 * IBTF_DPRINTF() to keep compatibility. 4914 */ 4915 static ibdm_ioc_info_t * 4916 ibdm_get_ioc_info_with_gid(ib_guid_t ioc_guid, 4917 ibdm_dp_gidinfo_t **gid_info) 4918 { 4919 int ii; 4920 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL; 4921 ibdm_dp_gidinfo_t *gid_list; 4922 ib_dm_io_unitinfo_t *iou; 4923 4924 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid); 4925 4926 mutex_enter(&ibdm.ibdm_mutex); 4927 while (ibdm.ibdm_busy & IBDM_BUSY) 4928 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4929 ibdm.ibdm_busy |= IBDM_BUSY; 4930 4931 if (gid_info) 4932 *gid_info = NULL; /* clear the value of gid_info */ 4933 4934 gid_list = ibdm.ibdm_dp_gidlist_head; 4935 while (gid_list) { 4936 mutex_enter(&gid_list->gl_mutex); 4937 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 4938 mutex_exit(&gid_list->gl_mutex); 4939 gid_list = gid_list->gl_next; 4940 continue; 4941 } 4942 if (gid_list->gl_iou == NULL) { 4943 IBTF_DPRINTF_L2("ibdm", 4944 "\tget_ioc_info: No IOU info"); 4945 mutex_exit(&gid_list->gl_mutex); 4946 gid_list = gid_list->gl_next; 4947 continue; 4948 } 4949 iou = &gid_list->gl_iou->iou_info; 4950 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 4951 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii); 4952 if ((tmp->ioc_profile.ioc_guid == ioc_guid) && 4953 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) { 4954 ioc = ibdm_dup_ioc_info(tmp, gid_list); 4955 if (gid_info) 4956 *gid_info = gid_list; /* set this ptr */ 4957 mutex_exit(&gid_list->gl_mutex); 4958 ibdm.ibdm_busy &= ~IBDM_BUSY; 4959 cv_broadcast(&ibdm.ibdm_busy_cv); 4960 mutex_exit(&ibdm.ibdm_mutex); 4961 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End"); 4962 return (ioc); 4963 } 4964 } 4965 if (ii == iou->iou_num_ctrl_slots) 4966 ioc = NULL; 4967 4968 mutex_exit(&gid_list->gl_mutex); 4969 gid_list = gid_list->gl_next; 4970 } 4971 4972 ibdm.ibdm_busy &= ~IBDM_BUSY; 4973 cv_broadcast(&ibdm.ibdm_busy_cv); 4974 mutex_exit(&ibdm.ibdm_mutex); 4975 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End"); 4976 return (ioc); 4977 } 4978 4979 /* 4980 * ibdm_ibnex_get_ioc_info() 4981 * Returns pointer to ibdm_ioc_info_t if it finds 4982 * matching record for the ioc_guid, otherwise NULL 4983 * is returned 4984 * 4985 * Note. this is a wrapper function to ibdm_get_ioc_info_with_gid() now. 4986 */ 4987 ibdm_ioc_info_t * 4988 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid) 4989 { 4990 /* will not use the gid_info pointer, so the second arg is NULL */ 4991 return (ibdm_get_ioc_info_with_gid(ioc_guid, NULL)); 4992 } 4993 4994 /* 4995 * ibdm_ibnex_get_ioc_count() 4996 * Returns number of ibdm_ioc_info_t it finds 4997 */ 4998 int 4999 ibdm_ibnex_get_ioc_count(void) 5000 { 5001 int count = 0, k; 5002 ibdm_ioc_info_t *ioc; 5003 ibdm_dp_gidinfo_t *gid_list; 5004 5005 mutex_enter(&ibdm.ibdm_mutex); 5006 ibdm_sweep_fabric(0); 5007 5008 while (ibdm.ibdm_busy & IBDM_BUSY) 5009 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5010 ibdm.ibdm_busy |= IBDM_BUSY; 5011 5012 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5013 gid_list = gid_list->gl_next) { 5014 mutex_enter(&gid_list->gl_mutex); 5015 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) || 5016 (gid_list->gl_iou == NULL)) { 5017 mutex_exit(&gid_list->gl_mutex); 5018 continue; 5019 } 5020 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots; 5021 k++) { 5022 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k); 5023 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) 5024 ++count; 5025 } 5026 mutex_exit(&gid_list->gl_mutex); 5027 } 5028 ibdm.ibdm_busy &= ~IBDM_BUSY; 5029 cv_broadcast(&ibdm.ibdm_busy_cv); 5030 mutex_exit(&ibdm.ibdm_mutex); 5031 5032 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count); 5033 return (count); 5034 } 5035 5036 5037 /* 5038 * ibdm_ibnex_get_ioc_list() 5039 * Returns information about all the IOCs present on the fabric. 5040 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL. 5041 * Does not sweep fabric if DONOT_PROBE is set 5042 */ 5043 ibdm_ioc_info_t * 5044 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag) 5045 { 5046 int ii; 5047 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc; 5048 ibdm_dp_gidinfo_t *gid_list; 5049 ib_dm_io_unitinfo_t *iou; 5050 5051 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter"); 5052 5053 mutex_enter(&ibdm.ibdm_mutex); 5054 if (list_flag != IBDM_IBNEX_DONOT_PROBE) 5055 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL); 5056 5057 while (ibdm.ibdm_busy & IBDM_BUSY) 5058 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5059 ibdm.ibdm_busy |= IBDM_BUSY; 5060 5061 gid_list = ibdm.ibdm_dp_gidlist_head; 5062 while (gid_list) { 5063 mutex_enter(&gid_list->gl_mutex); 5064 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 5065 mutex_exit(&gid_list->gl_mutex); 5066 gid_list = gid_list->gl_next; 5067 continue; 5068 } 5069 if (gid_list->gl_iou == NULL) { 5070 IBTF_DPRINTF_L2("ibdm", 5071 "\tget_ioc_list: No IOU info"); 5072 mutex_exit(&gid_list->gl_mutex); 5073 gid_list = gid_list->gl_next; 5074 continue; 5075 } 5076 iou = &gid_list->gl_iou->iou_info; 5077 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 5078 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii); 5079 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5080 tmp = ibdm_dup_ioc_info(ioc, gid_list); 5081 tmp->ioc_next = ioc_list; 5082 ioc_list = tmp; 5083 } 5084 } 5085 mutex_exit(&gid_list->gl_mutex); 5086 gid_list = gid_list->gl_next; 5087 } 5088 ibdm.ibdm_busy &= ~IBDM_BUSY; 5089 cv_broadcast(&ibdm.ibdm_busy_cv); 5090 mutex_exit(&ibdm.ibdm_mutex); 5091 5092 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End"); 5093 return (ioc_list); 5094 } 5095 5096 /* 5097 * ibdm_dup_ioc_info() 5098 * Duplicate the IOC information and return the IOC 5099 * information. 5100 */ 5101 static ibdm_ioc_info_t * 5102 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list) 5103 { 5104 ibdm_ioc_info_t *out_ioc; 5105 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc)); 5106 ASSERT(MUTEX_HELD(&gid_list->gl_mutex)); 5107 5108 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP); 5109 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t)); 5110 ibdm_update_ioc_port_gidlist(out_ioc, gid_list); 5111 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid; 5112 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode; 5113 5114 return (out_ioc); 5115 } 5116 5117 5118 /* 5119 * ibdm_free_ioc_list() 5120 * Deallocate memory for IOC list structure 5121 */ 5122 void 5123 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc) 5124 { 5125 ibdm_ioc_info_t *temp; 5126 5127 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:"); 5128 while (ioc) { 5129 temp = ioc; 5130 ioc = ioc->ioc_next; 5131 kmem_free(temp->ioc_gid_list, 5132 (sizeof (ibdm_gid_t) * temp->ioc_nportgids)); 5133 if (temp->ioc_hca_list) 5134 ibdm_ibnex_free_hca_list(temp->ioc_hca_list); 5135 kmem_free(temp, sizeof (ibdm_ioc_info_t)); 5136 } 5137 } 5138 5139 5140 /* 5141 * ibdm_ibnex_update_pkey_tbls 5142 * Updates the DM P_Key database. 5143 * NOTE: Two cases are handled here: P_Key being added or removed. 5144 * 5145 * Arguments : NONE 5146 * Return Values : NONE 5147 */ 5148 void 5149 ibdm_ibnex_update_pkey_tbls(void) 5150 { 5151 int h, pp, pidx; 5152 uint_t nports; 5153 uint_t size; 5154 ib_pkey_t new_pkey; 5155 ib_pkey_t *orig_pkey; 5156 ibdm_hca_list_t *hca_list; 5157 ibdm_port_attr_t *port; 5158 ibt_hca_portinfo_t *pinfop; 5159 5160 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:"); 5161 5162 mutex_enter(&ibdm.ibdm_hl_mutex); 5163 hca_list = ibdm.ibdm_hca_list_head; 5164 5165 for (h = 0; h < ibdm.ibdm_hca_count; h++) { 5166 5167 /* This updates P_Key Tables for all ports of this HCA */ 5168 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop, 5169 &nports, &size); 5170 5171 /* number of ports shouldn't have changed */ 5172 ASSERT(nports == hca_list->hl_nports); 5173 5174 for (pp = 0; pp < hca_list->hl_nports; pp++) { 5175 port = &hca_list->hl_port_attr[pp]; 5176 5177 /* 5178 * First figure out the P_Keys from IBTL. 5179 * Three things could have happened: 5180 * New P_Keys added 5181 * Existing P_Keys removed 5182 * Both of the above two 5183 * 5184 * Loop through the P_Key Indices and check if a 5185 * give P_Key_Ix matches that of the one seen by 5186 * IBDM. If they match no action is needed. 5187 * 5188 * If they don't match: 5189 * 1. if orig_pkey is invalid and new_pkey is valid 5190 * ---> add new_pkey to DM database 5191 * 2. if orig_pkey is valid and new_pkey is invalid 5192 * ---> remove orig_pkey from DM database 5193 * 3. if orig_pkey and new_pkey are both valid: 5194 * ---> remov orig_pkey from DM database 5195 * ---> add new_pkey to DM database 5196 * 4. if orig_pkey and new_pkey are both invalid: 5197 * ---> do nothing. Updated DM database. 5198 */ 5199 5200 for (pidx = 0; pidx < port->pa_npkeys; pidx++) { 5201 new_pkey = pinfop[pp].p_pkey_tbl[pidx]; 5202 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey; 5203 5204 /* keys match - do nothing */ 5205 if (*orig_pkey == new_pkey) 5206 continue; 5207 5208 if (IBDM_INVALID_PKEY(*orig_pkey) && 5209 !IBDM_INVALID_PKEY(new_pkey)) { 5210 /* P_Key was added */ 5211 IBTF_DPRINTF_L5("ibdm", 5212 "\tibnex_update_pkey_tbls: new " 5213 "P_Key added = 0x%x", new_pkey); 5214 *orig_pkey = new_pkey; 5215 ibdm_port_attr_ibmf_init(port, 5216 new_pkey, pp); 5217 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5218 IBDM_INVALID_PKEY(new_pkey)) { 5219 /* P_Key was removed */ 5220 IBTF_DPRINTF_L5("ibdm", 5221 "\tibnex_update_pkey_tbls: P_Key " 5222 "removed = 0x%x", *orig_pkey); 5223 *orig_pkey = new_pkey; 5224 (void) ibdm_port_attr_ibmf_fini(port, 5225 pidx); 5226 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5227 !IBDM_INVALID_PKEY(new_pkey)) { 5228 /* P_Key were replaced */ 5229 IBTF_DPRINTF_L5("ibdm", 5230 "\tibnex_update_pkey_tbls: P_Key " 5231 "replaced 0x%x with 0x%x", 5232 *orig_pkey, new_pkey); 5233 (void) ibdm_port_attr_ibmf_fini(port, 5234 pidx); 5235 *orig_pkey = new_pkey; 5236 ibdm_port_attr_ibmf_init(port, 5237 new_pkey, pp); 5238 } else { 5239 /* 5240 * P_Keys are invalid 5241 * set anyway to reflect if 5242 * INVALID_FULL was changed to 5243 * INVALID_LIMITED or vice-versa. 5244 */ 5245 *orig_pkey = new_pkey; 5246 } /* end of else */ 5247 5248 } /* loop of p_key index */ 5249 5250 } /* loop of #ports of HCA */ 5251 5252 ibt_free_portinfo(pinfop, size); 5253 hca_list = hca_list->hl_next; 5254 5255 } /* loop for all HCAs in the system */ 5256 5257 mutex_exit(&ibdm.ibdm_hl_mutex); 5258 } 5259 5260 5261 /* 5262 * ibdm_send_ioc_profile() 5263 * Send IOC Controller Profile request. When the request is completed 5264 * IBMF calls ibdm_process_incoming_mad routine to inform about 5265 * the completion. 5266 */ 5267 static int 5268 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no) 5269 { 5270 ibmf_msg_t *msg; 5271 ib_mad_hdr_t *hdr; 5272 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]); 5273 ibdm_timeout_cb_args_t *cb_args; 5274 5275 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: " 5276 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no); 5277 5278 /* 5279 * Send command to get IOC profile. 5280 * Allocate a IBMF packet and initialize the packet. 5281 */ 5282 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 5283 &msg) != IBMF_SUCCESS) { 5284 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: pkt alloc fail"); 5285 return (IBDM_FAILURE); 5286 } 5287 5288 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 5289 ibdm_alloc_send_buffers(msg); 5290 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 5291 5292 mutex_enter(&gid_info->gl_mutex); 5293 ibdm_bump_transactionID(gid_info); 5294 mutex_exit(&gid_info->gl_mutex); 5295 5296 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 5297 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 5298 if (gid_info->gl_redirected == B_TRUE) { 5299 if (gid_info->gl_redirect_dlid != 0) { 5300 msg->im_local_addr.ia_remote_lid = 5301 gid_info->gl_redirect_dlid; 5302 } 5303 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 5304 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 5305 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 5306 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 5307 } else { 5308 msg->im_local_addr.ia_remote_qno = 1; 5309 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 5310 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 5311 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 5312 } 5313 5314 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 5315 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 5316 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 5317 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 5318 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 5319 hdr->Status = 0; 5320 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 5321 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 5322 hdr->AttributeModifier = h2b32(ioc_no + 1); 5323 5324 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5325 cb_args = &ioc_info->ioc_cb_args; 5326 cb_args->cb_gid_info = gid_info; 5327 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 5328 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 5329 cb_args->cb_ioc_num = ioc_no; 5330 5331 mutex_enter(&gid_info->gl_mutex); 5332 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 5333 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 5334 mutex_exit(&gid_info->gl_mutex); 5335 5336 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:" 5337 "timeout %x", ioc_info->ioc_timeout_id); 5338 5339 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 5340 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 5341 IBTF_DPRINTF_L2("ibdm", 5342 "\tsend_ioc_profile: msg transport failed"); 5343 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 5344 } 5345 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5346 return (IBDM_SUCCESS); 5347 } 5348 5349 5350 /* 5351 * ibdm_port_reachable 5352 * Returns B_TRUE if the port GID is reachable by sending 5353 * a SA query to get the NODE record for this port GUID. 5354 */ 5355 static boolean_t 5356 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid) 5357 { 5358 sa_node_record_t *resp; 5359 size_t length; 5360 5361 /* 5362 * Verify if it's reachable by getting the node record. 5363 */ 5364 if (ibdm_get_node_record_by_port(sa_hdl, guid, &resp, &length) == 5365 IBDM_SUCCESS) { 5366 kmem_free(resp, length); 5367 return (B_TRUE); 5368 } 5369 return (B_FALSE); 5370 } 5371 5372 /* 5373 * ibdm_get_node_record_by_port 5374 * Sends a SA query to get the NODE record for port GUID 5375 * Returns IBDM_SUCCESS if the port GID is reachable. 5376 * 5377 * Note: the caller must be responsible for freeing the resource 5378 * by calling kmem_free(resp, length) later. 5379 */ 5380 static int 5381 ibdm_get_node_record_by_port(ibmf_saa_handle_t sa_hdl, ib_guid_t guid, 5382 sa_node_record_t **resp, size_t *length) 5383 { 5384 sa_node_record_t req; 5385 ibmf_saa_access_args_t args; 5386 int ret; 5387 ASSERT(resp != NULL && length != NULL); 5388 5389 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx", 5390 guid); 5391 5392 bzero(&req, sizeof (sa_node_record_t)); 5393 req.NodeInfo.PortGUID = guid; 5394 5395 args.sq_attr_id = SA_NODERECORD_ATTRID; 5396 args.sq_access_type = IBMF_SAA_RETRIEVE; 5397 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID; 5398 args.sq_template = &req; 5399 args.sq_callback = NULL; 5400 args.sq_callback_arg = NULL; 5401 5402 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) resp); 5403 if (ret != IBMF_SUCCESS) { 5404 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:" 5405 " SA Retrieve Failed: %d", ret); 5406 return (IBDM_FAILURE); 5407 } 5408 if (*resp == NULL || *length == 0) { 5409 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records"); 5410 return (IBDM_FAILURE); 5411 } 5412 /* 5413 * There is one NodeRecord on each endport on a subnet. 5414 */ 5415 ASSERT(*length == sizeof (sa_node_record_t)); 5416 5417 return (IBDM_SUCCESS); 5418 } 5419 5420 5421 /* 5422 * Update the gidlist for all affected IOCs when GID becomes 5423 * available/unavailable. 5424 * 5425 * Parameters : 5426 * gidinfo - Incoming / Outgoing GID. 5427 * add_flag - 1 for GID added, 0 for GID removed. 5428 * - (-1) : IOC gid list updated, ioc_list required. 5429 * 5430 * This function gets the GID for the node GUID corresponding to the 5431 * port GID. Gets the IOU info 5432 */ 5433 static ibdm_ioc_info_t * 5434 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag) 5435 { 5436 ibdm_dp_gidinfo_t *node_gid = NULL; 5437 uint8_t niocs, ii; 5438 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp; 5439 5440 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist"); 5441 5442 switch (avail_flag) { 5443 case 1 : 5444 node_gid = ibdm_check_dest_nodeguid(gid_info); 5445 break; 5446 case 0 : 5447 node_gid = ibdm_handle_gid_rm(gid_info); 5448 break; 5449 case -1 : 5450 node_gid = gid_info; 5451 break; 5452 default : 5453 break; 5454 } 5455 5456 if (node_gid == NULL) { 5457 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: " 5458 "No node GID found, port gid 0x%p, avail_flag %d", 5459 gid_info, avail_flag); 5460 return (NULL); 5461 } 5462 5463 mutex_enter(&node_gid->gl_mutex); 5464 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE && 5465 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) || 5466 node_gid->gl_iou == NULL) { 5467 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist " 5468 "gl_state %x, gl_iou %p", node_gid->gl_state, 5469 node_gid->gl_iou); 5470 mutex_exit(&node_gid->gl_mutex); 5471 return (NULL); 5472 } 5473 5474 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots; 5475 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x", 5476 niocs); 5477 for (ii = 0; ii < niocs; ii++) { 5478 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii); 5479 /* 5480 * Skip IOCs for which probe is not complete or 5481 * reprobe is progress 5482 */ 5483 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5484 tmp = ibdm_dup_ioc_info(ioc, node_gid); 5485 tmp->ioc_info_updated.ib_gid_prop_updated = 1; 5486 tmp->ioc_next = ioc_list; 5487 ioc_list = tmp; 5488 } 5489 } 5490 mutex_exit(&node_gid->gl_mutex); 5491 5492 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p", 5493 ioc_list); 5494 return (ioc_list); 5495 } 5496 5497 /* 5498 * ibdm_saa_event_cb : 5499 * Event handling which does *not* require ibdm_hl_mutex to be 5500 * held are executed in the same thread. This is to prevent 5501 * deadlocks with HCA port down notifications which hold the 5502 * ibdm_hl_mutex. 5503 * 5504 * GID_AVAILABLE event is handled here. A taskq is spawned to 5505 * handle GID_UNAVAILABLE. 5506 * 5507 * A new mutex ibdm_ibnex_mutex has been introduced to protect 5508 * ibnex_callback. This has been done to prevent any possible 5509 * deadlock (described above) while handling GID_AVAILABLE. 5510 * 5511 * IBMF calls the event callback for a HCA port. The SA handle 5512 * for this port would be valid, till the callback returns. 5513 * IBDM calling IBDM using the above SA handle should be valid. 5514 * 5515 * IBDM will additionally check (SA handle != NULL), before 5516 * calling IBMF. 5517 */ 5518 /*ARGSUSED*/ 5519 static void 5520 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle, 5521 ibmf_saa_subnet_event_t ibmf_saa_event, 5522 ibmf_saa_event_details_t *event_details, void *callback_arg) 5523 { 5524 ibdm_saa_event_arg_t *event_arg; 5525 ib_gid_t sgid, dgid; 5526 ibdm_port_attr_t *hca_port; 5527 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL; 5528 sa_node_record_t *nrec; 5529 size_t length; 5530 5531 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5532 5533 hca_port = (ibdm_port_attr_t *)callback_arg; 5534 5535 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n", 5536 ibmf_saa_handle, ibmf_saa_event, event_details, 5537 callback_arg); 5538 #ifdef DEBUG 5539 if (ibdm_ignore_saa_event) 5540 return; 5541 #endif 5542 5543 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) { 5544 /* 5545 * Ensure no other probe / sweep fabric is in 5546 * progress. 5547 */ 5548 mutex_enter(&ibdm.ibdm_mutex); 5549 while (ibdm.ibdm_busy & IBDM_BUSY) 5550 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5551 ibdm.ibdm_busy |= IBDM_BUSY; 5552 mutex_exit(&ibdm.ibdm_mutex); 5553 5554 /* 5555 * If we already know about this GID, return. 5556 * GID_AVAILABLE may be reported for multiple HCA 5557 * ports. 5558 */ 5559 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid, 5560 event_details->ie_gid.gid_prefix)) != NULL) { 5561 mutex_enter(&ibdm.ibdm_mutex); 5562 ibdm.ibdm_busy &= ~IBDM_BUSY; 5563 cv_broadcast(&ibdm.ibdm_busy_cv); 5564 mutex_exit(&ibdm.ibdm_mutex); 5565 return; 5566 } 5567 5568 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5569 "Insertion notified", 5570 event_details->ie_gid.gid_prefix, 5571 event_details->ie_gid.gid_guid); 5572 5573 /* This is a new gid, insert it to GID list */ 5574 sgid.gid_prefix = hca_port->pa_sn_prefix; 5575 sgid.gid_guid = hca_port->pa_port_guid; 5576 dgid.gid_prefix = event_details->ie_gid.gid_prefix; 5577 dgid.gid_guid = event_details->ie_gid.gid_guid; 5578 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid); 5579 if (gid_info == NULL) { 5580 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: " 5581 "create_gid_info returned NULL"); 5582 mutex_enter(&ibdm.ibdm_mutex); 5583 ibdm.ibdm_busy &= ~IBDM_BUSY; 5584 cv_broadcast(&ibdm.ibdm_busy_cv); 5585 mutex_exit(&ibdm.ibdm_mutex); 5586 return; 5587 } 5588 mutex_enter(&gid_info->gl_mutex); 5589 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 5590 mutex_exit(&gid_info->gl_mutex); 5591 5592 /* Get the node GUID */ 5593 if (ibdm_get_node_record_by_port(ibmf_saa_handle, dgid.gid_guid, 5594 &nrec, &length) != IBDM_SUCCESS) { 5595 /* 5596 * Set the state to PROBE_NOT_DONE for the 5597 * next sweep to probe it 5598 */ 5599 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: " 5600 "Skipping GID : port GUID not found"); 5601 mutex_enter(&gid_info->gl_mutex); 5602 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5603 mutex_exit(&gid_info->gl_mutex); 5604 mutex_enter(&ibdm.ibdm_mutex); 5605 ibdm.ibdm_busy &= ~IBDM_BUSY; 5606 cv_broadcast(&ibdm.ibdm_busy_cv); 5607 mutex_exit(&ibdm.ibdm_mutex); 5608 return; 5609 } 5610 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 5611 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 5612 kmem_free(nrec, length); 5613 gid_info->gl_portguid = dgid.gid_guid; 5614 5615 /* 5616 * Get the gid info with the same node GUID. 5617 */ 5618 mutex_enter(&ibdm.ibdm_mutex); 5619 node_gid_info = ibdm.ibdm_dp_gidlist_head; 5620 while (node_gid_info) { 5621 if (node_gid_info->gl_nodeguid == 5622 gid_info->gl_nodeguid && 5623 node_gid_info->gl_iou != NULL) { 5624 break; 5625 } 5626 node_gid_info = node_gid_info->gl_next; 5627 } 5628 mutex_exit(&ibdm.ibdm_mutex); 5629 5630 /* 5631 * Handling a new GID requires filling of gl_hca_list. 5632 * This require ibdm hca_list to be parsed and hence 5633 * holding the ibdm_hl_mutex. Spawning a new thread to 5634 * handle this. 5635 */ 5636 if (node_gid_info == NULL) { 5637 if (taskq_dispatch(system_taskq, 5638 ibdm_saa_handle_new_gid, (void *)gid_info, 5639 TQ_NOSLEEP) == NULL) { 5640 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5641 "new_gid taskq_dispatch failed"); 5642 return; 5643 } 5644 } 5645 5646 mutex_enter(&ibdm.ibdm_mutex); 5647 ibdm.ibdm_busy &= ~IBDM_BUSY; 5648 cv_broadcast(&ibdm.ibdm_busy_cv); 5649 mutex_exit(&ibdm.ibdm_mutex); 5650 return; 5651 } 5652 5653 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE) 5654 return; 5655 5656 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc( 5657 sizeof (ibdm_saa_event_arg_t), KM_SLEEP); 5658 event_arg->ibmf_saa_handle = ibmf_saa_handle; 5659 event_arg->ibmf_saa_event = ibmf_saa_event; 5660 bcopy(event_details, &event_arg->event_details, 5661 sizeof (ibmf_saa_event_details_t)); 5662 event_arg->callback_arg = callback_arg; 5663 5664 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq, 5665 (void *)event_arg, TQ_NOSLEEP) == NULL) { 5666 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5667 "taskq_dispatch failed"); 5668 ibdm_free_saa_event_arg(event_arg); 5669 return; 5670 } 5671 } 5672 5673 /* 5674 * Handle a new GID discovered by GID_AVAILABLE saa event. 5675 */ 5676 void 5677 ibdm_saa_handle_new_gid(void *arg) 5678 { 5679 ibdm_dp_gidinfo_t *gid_info; 5680 ibdm_hca_list_t *hca_list = NULL; 5681 ibdm_port_attr_t *port = NULL; 5682 ibdm_ioc_info_t *ioc_list = NULL; 5683 5684 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg); 5685 5686 gid_info = (ibdm_dp_gidinfo_t *)arg; 5687 5688 /* 5689 * Ensure that no other sweep / probe has completed 5690 * probing this gid. 5691 */ 5692 mutex_enter(&gid_info->gl_mutex); 5693 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 5694 mutex_exit(&gid_info->gl_mutex); 5695 return; 5696 } 5697 mutex_exit(&gid_info->gl_mutex); 5698 5699 /* 5700 * Parse HCAs to fill gl_hca_list 5701 */ 5702 mutex_enter(&ibdm.ibdm_hl_mutex); 5703 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5704 ibdm_get_next_port(&hca_list, &port, 1)) { 5705 if (ibdm_port_reachable(port->pa_sa_hdl, 5706 gid_info->gl_portguid) == B_TRUE) { 5707 ibdm_addto_glhcalist(gid_info, hca_list); 5708 } 5709 } 5710 mutex_exit(&ibdm.ibdm_hl_mutex); 5711 5712 /* 5713 * Ensure no other probe / sweep fabric is in 5714 * progress. 5715 */ 5716 mutex_enter(&ibdm.ibdm_mutex); 5717 while (ibdm.ibdm_busy & IBDM_BUSY) 5718 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5719 ibdm.ibdm_busy |= IBDM_BUSY; 5720 mutex_exit(&ibdm.ibdm_mutex); 5721 5722 /* 5723 * New IOU probe it, to check if new IOCs 5724 */ 5725 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: " 5726 "new GID : probing"); 5727 mutex_enter(&ibdm.ibdm_mutex); 5728 ibdm.ibdm_ngid_probes_in_progress++; 5729 mutex_exit(&ibdm.ibdm_mutex); 5730 mutex_enter(&gid_info->gl_mutex); 5731 gid_info->gl_reprobe_flag = 0; 5732 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5733 mutex_exit(&gid_info->gl_mutex); 5734 ibdm_probe_gid_thread((void *)gid_info); 5735 5736 mutex_enter(&ibdm.ibdm_mutex); 5737 ibdm_wait_probe_completion(); 5738 mutex_exit(&ibdm.ibdm_mutex); 5739 5740 if (gid_info->gl_iou == NULL) { 5741 mutex_enter(&ibdm.ibdm_mutex); 5742 ibdm.ibdm_busy &= ~IBDM_BUSY; 5743 cv_broadcast(&ibdm.ibdm_busy_cv); 5744 mutex_exit(&ibdm.ibdm_mutex); 5745 return; 5746 } 5747 5748 /* 5749 * Update GID list in all IOCs affected by this 5750 */ 5751 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1); 5752 5753 /* 5754 * Pass on the IOCs with updated GIDs to IBnexus 5755 */ 5756 if (ioc_list) { 5757 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5758 if (ibdm.ibdm_ibnex_callback != NULL) { 5759 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5760 IBDM_EVENT_IOC_PROP_UPDATE); 5761 } 5762 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5763 } 5764 5765 mutex_enter(&ibdm.ibdm_mutex); 5766 ibdm.ibdm_busy &= ~IBDM_BUSY; 5767 cv_broadcast(&ibdm.ibdm_busy_cv); 5768 mutex_exit(&ibdm.ibdm_mutex); 5769 } 5770 5771 /* 5772 * ibdm_saa_event_taskq : 5773 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be 5774 * held. The GID_UNAVAILABLE handling is done in a taskq to 5775 * prevent deadlocks with HCA port down notifications which hold 5776 * ibdm_hl_mutex. 5777 */ 5778 void 5779 ibdm_saa_event_taskq(void *arg) 5780 { 5781 ibdm_saa_event_arg_t *event_arg; 5782 ibmf_saa_handle_t ibmf_saa_handle; 5783 ibmf_saa_subnet_event_t ibmf_saa_event; 5784 ibmf_saa_event_details_t *event_details; 5785 void *callback_arg; 5786 5787 ibdm_dp_gidinfo_t *gid_info; 5788 ibdm_port_attr_t *hca_port, *port = NULL; 5789 ibdm_hca_list_t *hca_list = NULL; 5790 int sa_handle_valid = 0; 5791 ibdm_ioc_info_t *ioc_list = NULL; 5792 5793 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5794 5795 event_arg = (ibdm_saa_event_arg_t *)arg; 5796 ibmf_saa_handle = event_arg->ibmf_saa_handle; 5797 ibmf_saa_event = event_arg->ibmf_saa_event; 5798 event_details = &event_arg->event_details; 5799 callback_arg = event_arg->callback_arg; 5800 5801 ASSERT(callback_arg != NULL); 5802 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE); 5803 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)", 5804 ibmf_saa_handle, ibmf_saa_event, event_details, 5805 callback_arg); 5806 5807 hca_port = (ibdm_port_attr_t *)callback_arg; 5808 5809 /* Check if the port_attr is still valid */ 5810 mutex_enter(&ibdm.ibdm_hl_mutex); 5811 for (ibdm_get_next_port(&hca_list, &port, 0); port; 5812 ibdm_get_next_port(&hca_list, &port, 0)) { 5813 if (port == hca_port && port->pa_port_guid == 5814 hca_port->pa_port_guid) { 5815 if (ibmf_saa_handle == hca_port->pa_sa_hdl) 5816 sa_handle_valid = 1; 5817 break; 5818 } 5819 } 5820 mutex_exit(&ibdm.ibdm_hl_mutex); 5821 if (sa_handle_valid == 0) { 5822 ibdm_free_saa_event_arg(event_arg); 5823 return; 5824 } 5825 5826 if (hca_port && (hca_port->pa_sa_hdl == NULL || 5827 ibmf_saa_handle != hca_port->pa_sa_hdl)) { 5828 ibdm_free_saa_event_arg(event_arg); 5829 return; 5830 } 5831 hca_list = NULL; 5832 port = NULL; 5833 5834 /* 5835 * Check if the GID is visible to other HCA ports. 5836 * Return if so. 5837 */ 5838 mutex_enter(&ibdm.ibdm_hl_mutex); 5839 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5840 ibdm_get_next_port(&hca_list, &port, 1)) { 5841 if (ibdm_port_reachable(port->pa_sa_hdl, 5842 event_details->ie_gid.gid_guid) == B_TRUE) { 5843 mutex_exit(&ibdm.ibdm_hl_mutex); 5844 ibdm_free_saa_event_arg(event_arg); 5845 return; 5846 } 5847 } 5848 mutex_exit(&ibdm.ibdm_hl_mutex); 5849 5850 /* 5851 * Ensure no other probe / sweep fabric is in 5852 * progress. 5853 */ 5854 mutex_enter(&ibdm.ibdm_mutex); 5855 while (ibdm.ibdm_busy & IBDM_BUSY) 5856 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5857 ibdm.ibdm_busy |= IBDM_BUSY; 5858 mutex_exit(&ibdm.ibdm_mutex); 5859 5860 /* 5861 * If this GID is no longer in GID list, return 5862 * GID_UNAVAILABLE may be reported for multiple HCA 5863 * ports. 5864 */ 5865 mutex_enter(&ibdm.ibdm_mutex); 5866 gid_info = ibdm.ibdm_dp_gidlist_head; 5867 while (gid_info) { 5868 if (gid_info->gl_portguid == 5869 event_details->ie_gid.gid_guid) { 5870 break; 5871 } 5872 gid_info = gid_info->gl_next; 5873 } 5874 mutex_exit(&ibdm.ibdm_mutex); 5875 if (gid_info == NULL) { 5876 mutex_enter(&ibdm.ibdm_mutex); 5877 ibdm.ibdm_busy &= ~IBDM_BUSY; 5878 cv_broadcast(&ibdm.ibdm_busy_cv); 5879 mutex_exit(&ibdm.ibdm_mutex); 5880 ibdm_free_saa_event_arg(event_arg); 5881 return; 5882 } 5883 5884 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5885 "Unavailable notification", 5886 event_details->ie_gid.gid_prefix, 5887 event_details->ie_gid.gid_guid); 5888 5889 /* 5890 * Update GID list in all IOCs affected by this 5891 */ 5892 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED || 5893 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) 5894 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 5895 5896 /* 5897 * Remove GID from the global GID list 5898 * Handle the case where all port GIDs for an 5899 * IOU have been hot-removed. Check both gid_info 5900 * & ioc_info for checking ngids. 5901 */ 5902 mutex_enter(&ibdm.ibdm_mutex); 5903 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 5904 mutex_enter(&gid_info->gl_mutex); 5905 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 5906 mutex_exit(&gid_info->gl_mutex); 5907 } 5908 if (gid_info->gl_prev != NULL) 5909 gid_info->gl_prev->gl_next = gid_info->gl_next; 5910 if (gid_info->gl_next != NULL) 5911 gid_info->gl_next->gl_prev = gid_info->gl_prev; 5912 5913 if (gid_info == ibdm.ibdm_dp_gidlist_head) 5914 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 5915 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 5916 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 5917 ibdm.ibdm_ngids--; 5918 5919 ibdm.ibdm_busy &= ~IBDM_BUSY; 5920 cv_broadcast(&ibdm.ibdm_busy_cv); 5921 mutex_exit(&ibdm.ibdm_mutex); 5922 5923 /* free the hca_list on this gid_info */ 5924 ibdm_delete_glhca_list(gid_info); 5925 5926 mutex_destroy(&gid_info->gl_mutex); 5927 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 5928 5929 /* 5930 * Pass on the IOCs with updated GIDs to IBnexus 5931 */ 5932 if (ioc_list) { 5933 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE " 5934 "IOC_PROP_UPDATE for %p\n", ioc_list); 5935 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5936 if (ibdm.ibdm_ibnex_callback != NULL) { 5937 (*ibdm.ibdm_ibnex_callback)((void *) 5938 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 5939 } 5940 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5941 } 5942 5943 ibdm_free_saa_event_arg(event_arg); 5944 } 5945 5946 5947 static int 5948 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev) 5949 { 5950 ibdm_gid_t *scan_new, *scan_prev; 5951 int cmp_failed = 0; 5952 5953 ASSERT(new != NULL); 5954 ASSERT(prev != NULL); 5955 5956 /* 5957 * Search for each new gid anywhere in the prev GID list. 5958 * Note that the gid list could have been re-ordered. 5959 */ 5960 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) { 5961 for (scan_prev = prev, cmp_failed = 1; scan_prev; 5962 scan_prev = scan_prev->gid_next) { 5963 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi && 5964 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) { 5965 cmp_failed = 0; 5966 break; 5967 } 5968 } 5969 5970 if (cmp_failed) 5971 return (1); 5972 } 5973 return (0); 5974 } 5975 5976 /* 5977 * This is always called in a single thread 5978 * This function updates the gid_list and serv_list of IOC 5979 * The current gid_list is in ioc_info_t(contains only port 5980 * guids for which probe is done) & gidinfo_t(other port gids) 5981 * The gids in both locations are used for comparision. 5982 */ 5983 static void 5984 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo) 5985 { 5986 ibdm_gid_t *cur_gid_list; 5987 uint_t cur_nportgids; 5988 5989 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 5990 5991 ioc->ioc_info_updated.ib_prop_updated = 0; 5992 5993 5994 /* Current GID list in gid_info only */ 5995 cur_gid_list = gidinfo->gl_gid; 5996 cur_nportgids = gidinfo->gl_ngids; 5997 5998 if (ioc->ioc_prev_serv_cnt != 5999 ioc->ioc_profile.ioc_service_entries || 6000 ibdm_serv_cmp(&ioc->ioc_serv[0], &ioc->ioc_prev_serv[0], 6001 ioc->ioc_prev_serv_cnt)) 6002 ioc->ioc_info_updated.ib_srv_prop_updated = 1; 6003 6004 if (ioc->ioc_prev_nportgids != cur_nportgids || 6005 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) { 6006 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6007 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) { 6008 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6009 } 6010 6011 /* Zero out previous entries */ 6012 ibdm_free_gid_list(ioc->ioc_prev_gid_list); 6013 if (ioc->ioc_prev_serv) 6014 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt * 6015 sizeof (ibdm_srvents_info_t)); 6016 ioc->ioc_prev_serv_cnt = 0; 6017 ioc->ioc_prev_nportgids = 0; 6018 ioc->ioc_prev_serv = NULL; 6019 ioc->ioc_prev_gid_list = NULL; 6020 } 6021 6022 /* 6023 * Handle GID removal. This returns gid_info of an GID for the same 6024 * node GUID, if found. For an GID with IOU information, the same 6025 * gid_info is returned if no gid_info with same node_guid is found. 6026 */ 6027 static ibdm_dp_gidinfo_t * 6028 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid) 6029 { 6030 ibdm_dp_gidinfo_t *gid_list; 6031 6032 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid); 6033 6034 if (rm_gid->gl_iou == NULL) { 6035 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou"); 6036 /* 6037 * Search for a GID with same node_guid and 6038 * gl_iou != NULL 6039 */ 6040 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6041 gid_list = gid_list->gl_next) { 6042 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid 6043 == rm_gid->gl_nodeguid)) 6044 break; 6045 } 6046 6047 if (gid_list) 6048 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6049 6050 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6051 return (gid_list); 6052 } else { 6053 /* 6054 * Search for a GID with same node_guid and 6055 * gl_iou == NULL 6056 */ 6057 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou"); 6058 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6059 gid_list = gid_list->gl_next) { 6060 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid 6061 == rm_gid->gl_nodeguid)) 6062 break; 6063 } 6064 6065 if (gid_list) { 6066 /* 6067 * Copy the following fields from rm_gid : 6068 * 1. gl_state 6069 * 2. gl_iou 6070 * 3. gl_gid & gl_ngids 6071 * 6072 * Note : Function is synchronized by 6073 * ibdm_busy flag. 6074 * 6075 * Note : Redirect info is initialized if 6076 * any MADs for the GID fail 6077 */ 6078 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm " 6079 "copying info to GID with gl_iou != NULl"); 6080 gid_list->gl_state = rm_gid->gl_state; 6081 gid_list->gl_iou = rm_gid->gl_iou; 6082 gid_list->gl_gid = rm_gid->gl_gid; 6083 gid_list->gl_ngids = rm_gid->gl_ngids; 6084 6085 /* Remove the GID from gl_gid list */ 6086 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6087 } else { 6088 /* 6089 * Handle a case where all GIDs to the IOU have 6090 * been removed. 6091 */ 6092 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID " 6093 "to IOU"); 6094 6095 ibdm_rmfrom_glgid_list(rm_gid, rm_gid); 6096 return (rm_gid); 6097 } 6098 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6099 return (gid_list); 6100 } 6101 } 6102 6103 static void 6104 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info, 6105 ibdm_dp_gidinfo_t *rm_gid) 6106 { 6107 ibdm_gid_t *tmp, *prev; 6108 6109 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)", 6110 gid_info, rm_gid); 6111 6112 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) { 6113 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi && 6114 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) { 6115 if (prev == NULL) 6116 gid_info->gl_gid = tmp->gid_next; 6117 else 6118 prev->gid_next = tmp->gid_next; 6119 6120 kmem_free(tmp, sizeof (ibdm_gid_t)); 6121 gid_info->gl_ngids--; 6122 break; 6123 } else { 6124 prev = tmp; 6125 tmp = tmp->gid_next; 6126 } 6127 } 6128 } 6129 6130 static void 6131 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest) 6132 { 6133 ibdm_gid_t *head = NULL, *new, *tail; 6134 6135 /* First copy the destination */ 6136 for (; dest; dest = dest->gid_next) { 6137 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6138 new->gid_dgid_hi = dest->gid_dgid_hi; 6139 new->gid_dgid_lo = dest->gid_dgid_lo; 6140 new->gid_next = head; 6141 head = new; 6142 } 6143 6144 /* Insert this to the source */ 6145 if (*src_ptr == NULL) 6146 *src_ptr = head; 6147 else { 6148 for (tail = *src_ptr; tail->gid_next != NULL; 6149 tail = tail->gid_next) 6150 ; 6151 6152 tail->gid_next = head; 6153 } 6154 } 6155 6156 static void 6157 ibdm_free_gid_list(ibdm_gid_t *head) 6158 { 6159 ibdm_gid_t *delete; 6160 6161 for (delete = head; delete; ) { 6162 head = delete->gid_next; 6163 kmem_free(delete, sizeof (ibdm_gid_t)); 6164 delete = head; 6165 } 6166 } 6167 6168 /* 6169 * This function rescans the DM capable GIDs (gl_state is 6170 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This 6171 * basically checks if the DM capable GID is reachable. If 6172 * not this is handled the same way as GID_UNAVAILABLE, 6173 * except that notifications are not send to IBnexus. 6174 * 6175 * This function also initializes the ioc_prev_list for 6176 * a particular IOC (when called from probe_ioc, with 6177 * ioc_guidp != NULL) or all IOCs for the gid (called from 6178 * sweep_fabric, ioc_guidp == NULL). 6179 */ 6180 static void 6181 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp) 6182 { 6183 ibdm_dp_gidinfo_t *gid_info, *tmp; 6184 int ii, niocs, found; 6185 ibdm_hca_list_t *hca_list = NULL; 6186 ibdm_port_attr_t *port = NULL; 6187 ibdm_ioc_info_t *ioc_list; 6188 6189 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6190 found = 0; 6191 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED && 6192 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) { 6193 gid_info = gid_info->gl_next; 6194 continue; 6195 } 6196 6197 /* 6198 * Check if the GID is visible to any HCA ports. 6199 * Return if so. 6200 */ 6201 mutex_enter(&ibdm.ibdm_hl_mutex); 6202 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6203 ibdm_get_next_port(&hca_list, &port, 1)) { 6204 if (ibdm_port_reachable(port->pa_sa_hdl, 6205 gid_info->gl_dgid_lo) == B_TRUE) { 6206 found = 1; 6207 break; 6208 } 6209 } 6210 mutex_exit(&ibdm.ibdm_hl_mutex); 6211 6212 if (found) { 6213 if (gid_info->gl_iou == NULL) { 6214 gid_info = gid_info->gl_next; 6215 continue; 6216 } 6217 6218 /* Intialize the ioc_prev_gid_list */ 6219 niocs = 6220 gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6221 for (ii = 0; ii < niocs; ii++) { 6222 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6223 6224 if (ioc_guidp == NULL || (*ioc_guidp == 6225 ioc_list->ioc_profile.ioc_guid)) { 6226 /* Add info of GIDs in gid_info also */ 6227 ibdm_addto_gidlist( 6228 &ioc_list->ioc_prev_gid_list, 6229 gid_info->gl_gid); 6230 ioc_list->ioc_prev_nportgids = 6231 gid_info->gl_ngids; 6232 } 6233 } 6234 gid_info = gid_info->gl_next; 6235 continue; 6236 } 6237 6238 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6239 "deleted port GUID %llx", 6240 gid_info->gl_dgid_lo); 6241 6242 /* 6243 * Update GID list in all IOCs affected by this 6244 */ 6245 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6246 6247 /* 6248 * Remove GID from the global GID list 6249 * Handle the case where all port GIDs for an 6250 * IOU have been hot-removed. 6251 */ 6252 mutex_enter(&ibdm.ibdm_mutex); 6253 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6254 mutex_enter(&gid_info->gl_mutex); 6255 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6256 mutex_exit(&gid_info->gl_mutex); 6257 } 6258 6259 tmp = gid_info->gl_next; 6260 if (gid_info->gl_prev != NULL) 6261 gid_info->gl_prev->gl_next = gid_info->gl_next; 6262 if (gid_info->gl_next != NULL) 6263 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6264 6265 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6266 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6267 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6268 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6269 ibdm.ibdm_ngids--; 6270 mutex_exit(&ibdm.ibdm_mutex); 6271 6272 /* free the hca_list on this gid_info */ 6273 ibdm_delete_glhca_list(gid_info); 6274 6275 mutex_destroy(&gid_info->gl_mutex); 6276 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6277 6278 gid_info = tmp; 6279 6280 /* 6281 * Pass on the IOCs with updated GIDs to IBnexus 6282 */ 6283 if (ioc_list) { 6284 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6285 "IOC_PROP_UPDATE for %p\n", ioc_list); 6286 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6287 if (ibdm.ibdm_ibnex_callback != NULL) { 6288 (*ibdm.ibdm_ibnex_callback)((void *) 6289 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6290 } 6291 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6292 } 6293 } 6294 } 6295 6296 /* 6297 * This function notifies IBnex of IOCs on this GID. 6298 * Notification is for GIDs with gl_reprobe_flag set. 6299 * The flag is set when IOC probe / fabric sweep 6300 * probes a GID starting from CLASS port info. 6301 * 6302 * IBnexus will have information of a reconnected IOC 6303 * if it had probed it before. If this is a new IOC, 6304 * IBnexus ignores the notification. 6305 * 6306 * This function should be called with no locks held. 6307 */ 6308 static void 6309 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info) 6310 { 6311 ibdm_ioc_info_t *ioc_list; 6312 6313 if (gid_info->gl_reprobe_flag == 0 || 6314 gid_info->gl_iou == NULL) 6315 return; 6316 6317 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1); 6318 6319 /* 6320 * Pass on the IOCs with updated GIDs to IBnexus 6321 */ 6322 if (ioc_list) { 6323 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6324 if (ibdm.ibdm_ibnex_callback != NULL) { 6325 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 6326 IBDM_EVENT_IOC_PROP_UPDATE); 6327 } 6328 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6329 } 6330 } 6331 6332 6333 static void 6334 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg) 6335 { 6336 if (arg != NULL) 6337 kmem_free(arg, sizeof (ibdm_saa_event_arg_t)); 6338 } 6339 6340 /* 6341 * This function parses the list of HCAs and HCA ports 6342 * to return the port_attr of the next HCA port. A port 6343 * connected to IB fabric (port_state active) is returned, 6344 * if connected_flag is set. 6345 */ 6346 static void 6347 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap, 6348 ibdm_port_attr_t **inp_portp, int connect_flag) 6349 { 6350 int ii; 6351 ibdm_port_attr_t *port, *next_port = NULL; 6352 ibdm_port_attr_t *inp_port; 6353 ibdm_hca_list_t *hca_list; 6354 int found = 0; 6355 6356 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6357 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)", 6358 inp_hcap, inp_portp, connect_flag); 6359 6360 hca_list = *inp_hcap; 6361 inp_port = *inp_portp; 6362 6363 if (hca_list == NULL) 6364 hca_list = ibdm.ibdm_hca_list_head; 6365 6366 for (; hca_list; hca_list = hca_list->hl_next) { 6367 for (ii = 0; ii < hca_list->hl_nports; ii++) { 6368 port = &hca_list->hl_port_attr[ii]; 6369 6370 /* 6371 * inp_port != NULL; 6372 * Skip till we find the matching port 6373 */ 6374 if (inp_port && !found) { 6375 if (inp_port == port) 6376 found = 1; 6377 continue; 6378 } 6379 6380 if (!connect_flag) { 6381 next_port = port; 6382 break; 6383 } 6384 6385 if (port->pa_sa_hdl == NULL) 6386 ibdm_initialize_port(port); 6387 if (port->pa_sa_hdl == NULL) 6388 (void) ibdm_fini_port(port); 6389 else if (next_port == NULL && 6390 port->pa_sa_hdl != NULL && 6391 port->pa_state == IBT_PORT_ACTIVE) { 6392 next_port = port; 6393 break; 6394 } 6395 } 6396 6397 if (next_port) 6398 break; 6399 } 6400 6401 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : " 6402 "returns hca_list %p port %p", hca_list, next_port); 6403 *inp_hcap = hca_list; 6404 *inp_portp = next_port; 6405 } 6406 6407 static void 6408 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid) 6409 { 6410 ibdm_gid_t *tmp; 6411 6412 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6413 tmp->gid_dgid_hi = addgid->gl_dgid_hi; 6414 tmp->gid_dgid_lo = addgid->gl_dgid_lo; 6415 6416 mutex_enter(&nodegid->gl_mutex); 6417 tmp->gid_next = nodegid->gl_gid; 6418 nodegid->gl_gid = tmp; 6419 nodegid->gl_ngids++; 6420 mutex_exit(&nodegid->gl_mutex); 6421 } 6422 6423 static void 6424 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info, 6425 ibdm_hca_list_t *hca) 6426 { 6427 ibdm_hca_list_t *head, *prev = NULL, *temp; 6428 6429 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) " 6430 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list); 6431 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6432 6433 mutex_enter(&gid_info->gl_mutex); 6434 head = gid_info->gl_hca_list; 6435 if (head == NULL) { 6436 head = ibdm_dup_hca_attr(hca); 6437 head->hl_next = NULL; 6438 gid_info->gl_hca_list = head; 6439 mutex_exit(&gid_info->gl_mutex); 6440 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6441 "gid %p, gl_hca_list %p", gid_info, 6442 gid_info->gl_hca_list); 6443 return; 6444 } 6445 6446 /* Check if already in the list */ 6447 while (head) { 6448 if (head->hl_hca_guid == hca->hl_hca_guid) { 6449 mutex_exit(&gid_info->gl_mutex); 6450 IBTF_DPRINTF_L4(ibdm_string, 6451 "\taddto_glhcalist : gid %p hca %p dup", 6452 gid_info, hca); 6453 return; 6454 } 6455 prev = head; 6456 head = head->hl_next; 6457 } 6458 6459 /* Add this HCA to gl_hca_list */ 6460 temp = ibdm_dup_hca_attr(hca); 6461 temp->hl_next = NULL; 6462 prev->hl_next = temp; 6463 mutex_exit(&gid_info->gl_mutex); 6464 6465 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6466 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list); 6467 } 6468 6469 static void 6470 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info) 6471 { 6472 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6473 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6474 6475 mutex_enter(&gid_info->gl_mutex); 6476 if (gid_info->gl_hca_list) 6477 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list); 6478 gid_info->gl_hca_list = NULL; 6479 mutex_exit(&gid_info->gl_mutex); 6480 } 6481 6482 6483 static void 6484 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl) 6485 { 6486 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)", 6487 port_sa_hdl); 6488 6489 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6490 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6491 6492 /* Check : Not busy in another probe / sweep */ 6493 mutex_enter(&ibdm.ibdm_mutex); 6494 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) { 6495 ibdm_dp_gidinfo_t *gid_info; 6496 6497 ibdm.ibdm_busy |= IBDM_BUSY; 6498 mutex_exit(&ibdm.ibdm_mutex); 6499 6500 /* 6501 * Check if any GID is using the SA & IBMF handle 6502 * of HCA port going down. Reset ibdm_dp_gidinfo_t 6503 * using another HCA port which can reach the GID. 6504 * This is for DM capable GIDs only, no need to do 6505 * this for others 6506 * 6507 * Delete the GID if no alternate HCA port to reach 6508 * it is found. 6509 */ 6510 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6511 ibdm_dp_gidinfo_t *tmp; 6512 6513 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr " 6514 "checking gidinfo %p", gid_info); 6515 6516 if (gid_info->gl_sa_hdl == port_sa_hdl) { 6517 IBTF_DPRINTF_L3(ibdm_string, 6518 "\tevent_hdlr: down HCA port hdl " 6519 "matches gid %p", gid_info); 6520 6521 /* 6522 * The non-DM GIDs can come back 6523 * with a new subnet prefix, when 6524 * the HCA port commes up again. To 6525 * avoid issues, delete non-DM 6526 * capable GIDs, if the gid was 6527 * discovered using the HCA port 6528 * going down. This is ensured by 6529 * setting gl_disconnected to 1. 6530 */ 6531 if (gid_info->gl_nodeguid == 0) 6532 gid_info->gl_disconnected = 1; 6533 else 6534 ibdm_reset_gidinfo(gid_info); 6535 6536 if (gid_info->gl_disconnected) { 6537 IBTF_DPRINTF_L3(ibdm_string, 6538 "\tevent_hdlr: deleting" 6539 " gid %p", gid_info); 6540 tmp = gid_info; 6541 gid_info = gid_info->gl_next; 6542 ibdm_delete_gidinfo(tmp); 6543 } else 6544 gid_info = gid_info->gl_next; 6545 } else 6546 gid_info = gid_info->gl_next; 6547 } 6548 6549 mutex_enter(&ibdm.ibdm_mutex); 6550 ibdm.ibdm_busy &= ~IBDM_BUSY; 6551 cv_signal(&ibdm.ibdm_busy_cv); 6552 } 6553 mutex_exit(&ibdm.ibdm_mutex); 6554 } 6555 6556 static void 6557 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6558 { 6559 ibdm_hca_list_t *hca_list = NULL; 6560 ibdm_port_attr_t *port = NULL; 6561 int gid_reinited = 0; 6562 sa_node_record_t *nr, *tmp; 6563 sa_portinfo_record_t *pi; 6564 size_t nr_len = 0, pi_len = 0; 6565 size_t path_len; 6566 ib_gid_t sgid, dgid; 6567 int ret, ii, nrecords; 6568 sa_path_record_t *path; 6569 uint8_t npaths = 1; 6570 ibdm_pkey_tbl_t *pkey_tbl; 6571 6572 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo); 6573 6574 /* 6575 * Get list of all the ports reachable from the local known HCA 6576 * ports which are active 6577 */ 6578 mutex_enter(&ibdm.ibdm_hl_mutex); 6579 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6580 ibdm_get_next_port(&hca_list, &port, 1)) { 6581 6582 6583 /* 6584 * Get the path and re-populate the gidinfo. 6585 * Getting the path is the same probe_ioc 6586 * Init the gid info as in ibdm_create_gidinfo() 6587 */ 6588 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, 6589 gidinfo->gl_nodeguid); 6590 if (nr == NULL) { 6591 IBTF_DPRINTF_L4(ibdm_string, 6592 "\treset_gidinfo : no records"); 6593 continue; 6594 } 6595 6596 nrecords = (nr_len / sizeof (sa_node_record_t)); 6597 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 6598 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid) 6599 break; 6600 } 6601 6602 if (ii == nrecords) { 6603 IBTF_DPRINTF_L4(ibdm_string, 6604 "\treset_gidinfo : no record for portguid"); 6605 kmem_free(nr, nr_len); 6606 continue; 6607 } 6608 6609 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID); 6610 if (pi == NULL) { 6611 IBTF_DPRINTF_L4(ibdm_string, 6612 "\treset_gidinfo : no portinfo"); 6613 kmem_free(nr, nr_len); 6614 continue; 6615 } 6616 6617 sgid.gid_prefix = port->pa_sn_prefix; 6618 sgid.gid_guid = port->pa_port_guid; 6619 dgid.gid_prefix = pi->PortInfo.GidPrefix; 6620 dgid.gid_guid = tmp->NodeInfo.PortGUID; 6621 6622 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid, 6623 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path); 6624 6625 if ((ret != IBMF_SUCCESS) || path == NULL) { 6626 IBTF_DPRINTF_L4(ibdm_string, 6627 "\treset_gidinfo : no paths"); 6628 kmem_free(pi, pi_len); 6629 kmem_free(nr, nr_len); 6630 continue; 6631 } 6632 6633 gidinfo->gl_dgid_hi = path->DGID.gid_prefix; 6634 gidinfo->gl_dgid_lo = path->DGID.gid_guid; 6635 gidinfo->gl_sgid_hi = path->SGID.gid_prefix; 6636 gidinfo->gl_sgid_lo = path->SGID.gid_guid; 6637 gidinfo->gl_p_key = path->P_Key; 6638 gidinfo->gl_sa_hdl = port->pa_sa_hdl; 6639 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl; 6640 gidinfo->gl_slid = path->SLID; 6641 gidinfo->gl_dlid = path->DLID; 6642 /* Reset redirect info, next MAD will set if redirected */ 6643 gidinfo->gl_redirected = 0; 6644 gidinfo->gl_devid = (*tmp).NodeInfo.DeviceID; 6645 gidinfo->gl_SL = path->SL; 6646 6647 gidinfo->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 6648 for (ii = 0; ii < port->pa_npkeys; ii++) { 6649 if (port->pa_pkey_tbl == NULL) 6650 break; 6651 6652 pkey_tbl = &port->pa_pkey_tbl[ii]; 6653 if ((gidinfo->gl_p_key == pkey_tbl->pt_pkey) && 6654 (pkey_tbl->pt_qp_hdl != NULL)) { 6655 gidinfo->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 6656 break; 6657 } 6658 } 6659 6660 if (gidinfo->gl_qp_hdl == NULL) 6661 IBTF_DPRINTF_L2(ibdm_string, 6662 "\treset_gid_info: No matching Pkey"); 6663 else 6664 gid_reinited = 1; 6665 6666 kmem_free(path, path_len); 6667 kmem_free(pi, pi_len); 6668 kmem_free(nr, nr_len); 6669 break; 6670 } 6671 mutex_exit(&ibdm.ibdm_hl_mutex); 6672 6673 if (!gid_reinited) 6674 gidinfo->gl_disconnected = 1; 6675 } 6676 6677 static void 6678 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6679 { 6680 ibdm_ioc_info_t *ioc_list; 6681 int in_gidlist = 0; 6682 6683 /* 6684 * Check if gidinfo has been inserted into the 6685 * ibdm_dp_gidlist_head list. gl_next or gl_prev 6686 * != NULL, if gidinfo is the list. 6687 */ 6688 if (gidinfo->gl_prev != NULL || 6689 gidinfo->gl_next != NULL || 6690 ibdm.ibdm_dp_gidlist_head == gidinfo) 6691 in_gidlist = 1; 6692 6693 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0); 6694 6695 /* 6696 * Remove GID from the global GID list 6697 * Handle the case where all port GIDs for an 6698 * IOU have been hot-removed. 6699 */ 6700 mutex_enter(&ibdm.ibdm_mutex); 6701 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) { 6702 mutex_enter(&gidinfo->gl_mutex); 6703 (void) ibdm_free_iou_info(gidinfo, &gidinfo->gl_iou); 6704 mutex_exit(&gidinfo->gl_mutex); 6705 } 6706 6707 /* Delete gl_hca_list */ 6708 mutex_exit(&ibdm.ibdm_mutex); 6709 ibdm_delete_glhca_list(gidinfo); 6710 mutex_enter(&ibdm.ibdm_mutex); 6711 6712 if (in_gidlist) { 6713 if (gidinfo->gl_prev != NULL) 6714 gidinfo->gl_prev->gl_next = gidinfo->gl_next; 6715 if (gidinfo->gl_next != NULL) 6716 gidinfo->gl_next->gl_prev = gidinfo->gl_prev; 6717 6718 if (gidinfo == ibdm.ibdm_dp_gidlist_head) 6719 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next; 6720 if (gidinfo == ibdm.ibdm_dp_gidlist_tail) 6721 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev; 6722 ibdm.ibdm_ngids--; 6723 } 6724 mutex_exit(&ibdm.ibdm_mutex); 6725 6726 mutex_destroy(&gidinfo->gl_mutex); 6727 cv_destroy(&gidinfo->gl_probe_cv); 6728 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t)); 6729 6730 /* 6731 * Pass on the IOCs with updated GIDs to IBnexus 6732 */ 6733 if (ioc_list) { 6734 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo " 6735 "IOC_PROP_UPDATE for %p\n", ioc_list); 6736 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6737 if (ibdm.ibdm_ibnex_callback != NULL) { 6738 (*ibdm.ibdm_ibnex_callback)((void *) 6739 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6740 } 6741 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6742 } 6743 } 6744 6745 6746 static void 6747 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args) 6748 { 6749 uint32_t attr_mod; 6750 6751 attr_mod = (cb_args->cb_ioc_num + 1) << 16; 6752 attr_mod |= cb_args->cb_srvents_start; 6753 attr_mod |= (cb_args->cb_srvents_end) << 8; 6754 hdr->AttributeModifier = h2b32(attr_mod); 6755 } 6756 6757 static void 6758 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info) 6759 { 6760 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 6761 gid_info->gl_transactionID++; 6762 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) { 6763 IBTF_DPRINTF_L4(ibdm_string, 6764 "\tbump_transactionID(%p), wrapup", gid_info); 6765 gid_info->gl_transactionID = gid_info->gl_min_transactionID; 6766 } 6767 } 6768 6769 /* 6770 * gl_prev_iou is set for *non-reprobe* sweeep requests, which 6771 * detected that ChangeID in IOU info has changed. The service 6772 * entry also may have changed. Check if service entry in IOC 6773 * has changed wrt the prev iou, if so notify to IB Nexus. 6774 */ 6775 static ibdm_ioc_info_t * 6776 ibdm_handle_prev_iou() 6777 { 6778 ibdm_dp_gidinfo_t *gid_info; 6779 ibdm_ioc_info_t *ioc_list_head = NULL, *ioc_list; 6780 ibdm_ioc_info_t *prev_ioc, *ioc; 6781 int ii, jj, niocs, prev_niocs; 6782 6783 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6784 6785 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou enter"); 6786 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 6787 gid_info = gid_info->gl_next) { 6788 if (gid_info->gl_prev_iou == NULL) 6789 continue; 6790 6791 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou gid %p", 6792 gid_info); 6793 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6794 prev_niocs = 6795 gid_info->gl_prev_iou->iou_info.iou_num_ctrl_slots; 6796 for (ii = 0; ii < niocs; ii++) { 6797 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6798 6799 /* Find matching IOC */ 6800 for (jj = 0; jj < prev_niocs; jj++) { 6801 prev_ioc = (ibdm_ioc_info_t *) 6802 &gid_info->gl_prev_iou->iou_ioc_info[jj]; 6803 if (prev_ioc->ioc_profile.ioc_guid == 6804 ioc->ioc_profile.ioc_guid) 6805 break; 6806 } 6807 if (jj == prev_niocs) 6808 prev_ioc = NULL; 6809 if (ioc == NULL || prev_ioc == NULL) 6810 continue; 6811 if ((ioc->ioc_profile.ioc_service_entries != 6812 prev_ioc->ioc_profile.ioc_service_entries) || 6813 ibdm_serv_cmp(&ioc->ioc_serv[0], 6814 &prev_ioc->ioc_serv[0], 6815 ioc->ioc_profile.ioc_service_entries) != 0) { 6816 IBTF_DPRINTF_L4(ibdm_string, 6817 "/thandle_prev_iou modified IOC: " 6818 "current ioc %p, old ioc %p", 6819 ioc, prev_ioc); 6820 mutex_enter(&gid_info->gl_mutex); 6821 ioc_list = ibdm_dup_ioc_info(ioc, gid_info); 6822 mutex_exit(&gid_info->gl_mutex); 6823 ioc_list->ioc_info_updated.ib_prop_updated 6824 = 0; 6825 ioc_list->ioc_info_updated.ib_srv_prop_updated 6826 = 1; 6827 6828 if (ioc_list_head == NULL) 6829 ioc_list_head = ioc_list; 6830 else { 6831 ioc_list_head->ioc_next = ioc_list; 6832 ioc_list_head = ioc_list; 6833 } 6834 } 6835 } 6836 6837 mutex_enter(&gid_info->gl_mutex); 6838 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_prev_iou); 6839 mutex_exit(&gid_info->gl_mutex); 6840 } 6841 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iouret %p", 6842 ioc_list_head); 6843 return (ioc_list_head); 6844 } 6845 6846 /* 6847 * Compares two service entries lists, returns 0 if same, returns 1 6848 * if no match. 6849 */ 6850 static int 6851 ibdm_serv_cmp(ibdm_srvents_info_t *serv1, ibdm_srvents_info_t *serv2, 6852 int nserv) 6853 { 6854 int ii; 6855 6856 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: enter"); 6857 for (ii = 0; ii < nserv; ii++, serv1++, serv2++) { 6858 if (serv1->se_attr.srv_id != serv2->se_attr.srv_id || 6859 bcmp(serv1->se_attr.srv_name, 6860 serv2->se_attr.srv_name, 6861 IB_DM_MAX_SVC_NAME_LEN) != 0) { 6862 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 1"); 6863 return (1); 6864 } 6865 } 6866 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 0"); 6867 return (0); 6868 } 6869 6870 /* For debugging purpose only */ 6871 #ifdef DEBUG 6872 void 6873 ibdm_dump_mad_hdr(ib_mad_hdr_t *mad_hdr) 6874 { 6875 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info"); 6876 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------"); 6877 6878 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x" 6879 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass); 6880 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x" 6881 "\tR Method : 0x%x", 6882 mad_hdr->ClassVersion, mad_hdr->R_Method); 6883 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x" 6884 "\tTransaction ID : 0x%llx", 6885 b2h16(mad_hdr->Status), b2h64(mad_hdr->TransactionID)); 6886 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x" 6887 "\tAttribute Modified : 0x%lx", 6888 b2h16(mad_hdr->AttributeID), b2h32(mad_hdr->AttributeModifier)); 6889 } 6890 6891 6892 void 6893 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag) 6894 { 6895 ib_mad_hdr_t *mad_hdr; 6896 6897 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info"); 6898 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------"); 6899 6900 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x" 6901 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid, 6902 ibmf_msg->im_local_addr.ia_remote_lid, 6903 ibmf_msg->im_local_addr.ia_remote_qno); 6904 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x" 6905 " SL : 0x%x", ibmf_msg->im_local_addr.ia_p_key, 6906 ibmf_msg->im_local_addr.ia_q_key, 6907 ibmf_msg->im_local_addr.ia_service_level); 6908 6909 if (flag) 6910 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg); 6911 else 6912 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg); 6913 6914 ibdm_dump_mad_hdr(mad_hdr); 6915 } 6916 6917 6918 void 6919 ibdm_dump_path_info(sa_path_record_t *path) 6920 { 6921 IBTF_DPRINTF_L4("ibdm", "\t\t Path information"); 6922 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------"); 6923 6924 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx", 6925 path->DGID.gid_prefix, path->DGID.gid_guid); 6926 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx", 6927 path->SGID.gid_prefix, path->SGID.gid_guid); 6928 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\t\tDlID : %x", 6929 path->SLID, path->DLID); 6930 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x\t\tSL : %x", 6931 path->P_Key, path->SL); 6932 } 6933 6934 6935 void 6936 ibdm_dump_classportinfo(ib_mad_classportinfo_t *classportinfo) 6937 { 6938 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO"); 6939 IBTF_DPRINTF_L4("ibdm", "\t\t --------------"); 6940 6941 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x", 6942 ((b2h32(classportinfo->RespTimeValue)) & 0x1F)); 6943 6944 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%llx", 6945 b2h64(classportinfo->RedirectGID_hi)); 6946 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%llx", 6947 b2h64(classportinfo->RedirectGID_lo)); 6948 IBTF_DPRINTF_L4("ibdm", "\t Redirected TC : 0x%x", 6949 classportinfo->RedirectTC); 6950 IBTF_DPRINTF_L4("ibdm", "\t Redirected SL : 0x%x", 6951 classportinfo->RedirectSL); 6952 IBTF_DPRINTF_L4("ibdm", "\t Redirected FL : 0x%x", 6953 classportinfo->RedirectFL); 6954 IBTF_DPRINTF_L4("ibdm", "\t Redirected LID : 0x%x", 6955 b2h16(classportinfo->RedirectLID)); 6956 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x", 6957 b2h16(classportinfo->RedirectP_Key)); 6958 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x", 6959 classportinfo->RedirectQP); 6960 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x", 6961 b2h32(classportinfo->RedirectQ_Key)); 6962 IBTF_DPRINTF_L4("ibdm", "\t Trap GID hi : 0x%llx", 6963 b2h64(classportinfo->TrapGID_hi)); 6964 IBTF_DPRINTF_L4("ibdm", "\t Trap GID lo : 0x%llx", 6965 b2h64(classportinfo->TrapGID_lo)); 6966 IBTF_DPRINTF_L4("ibdm", "\t Trap TC : 0x%x", 6967 classportinfo->TrapTC); 6968 IBTF_DPRINTF_L4("ibdm", "\t Trap SL : 0x%x", 6969 classportinfo->TrapSL); 6970 IBTF_DPRINTF_L4("ibdm", "\t Trap FL : 0x%x", 6971 classportinfo->TrapFL); 6972 IBTF_DPRINTF_L4("ibdm", "\t Trap LID : 0x%x", 6973 b2h16(classportinfo->TrapLID)); 6974 IBTF_DPRINTF_L4("ibdm", "\t Trap P_Key : 0x%x", 6975 b2h16(classportinfo->TrapP_Key)); 6976 IBTF_DPRINTF_L4("ibdm", "\t Trap HL : 0x%x", 6977 classportinfo->TrapHL); 6978 IBTF_DPRINTF_L4("ibdm", "\t Trap QP : 0x%x", 6979 classportinfo->TrapQP); 6980 IBTF_DPRINTF_L4("ibdm", "\t Trap Q_Key : 0x%x", 6981 b2h32(classportinfo->TrapQ_Key)); 6982 } 6983 6984 6985 void 6986 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info) 6987 { 6988 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo"); 6989 IBTF_DPRINTF_L4("ibdm", "\t\t ------------"); 6990 6991 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x", 6992 b2h16(iou_info->iou_changeid)); 6993 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d", 6994 iou_info->iou_num_ctrl_slots); 6995 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x", 6996 iou_info->iou_flag); 6997 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x", 6998 iou_info->iou_ctrl_list[0]); 6999 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x", 7000 iou_info->iou_ctrl_list[1]); 7001 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x", 7002 iou_info->iou_ctrl_list[2]); 7003 } 7004 7005 7006 void 7007 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc) 7008 { 7009 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile"); 7010 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------"); 7011 7012 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid); 7013 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid); 7014 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid); 7015 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver); 7016 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id); 7017 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class); 7018 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass); 7019 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol); 7020 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver); 7021 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth); 7022 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d", 7023 ioc->ioc_rdma_read_qdepth); 7024 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz); 7025 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz); 7026 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x", 7027 ioc->ioc_ctrl_opcap_mask); 7028 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries); 7029 } 7030 7031 7032 void 7033 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents) 7034 { 7035 IBTF_DPRINTF_L4("ibdm", 7036 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id); 7037 7038 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: " 7039 "Service Name : %s", srv_ents->srv_name); 7040 } 7041 7042 int ibdm_allow_sweep_fabric_timestamp = 1; 7043 7044 void 7045 ibdm_dump_sweep_fabric_timestamp(int flag) 7046 { 7047 static hrtime_t x; 7048 if (flag) { 7049 if (ibdm_allow_sweep_fabric_timestamp) { 7050 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete " 7051 "sweep %lld ms", ((gethrtime() - x)/ 1000000)); 7052 } 7053 x = 0; 7054 } else 7055 x = gethrtime(); 7056 } 7057 #endif 7058