1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/ib/ibtl/impl/ibtl.h> 27 #include <sys/ib/ibtl/impl/ibtl_cm.h> 28 29 /* 30 * ibtl_qp.c 31 * These routines implement (most of) the verbs related to 32 * Queue Pairs. 33 */ 34 35 /* Globals. */ 36 static char ibtf_qp[] = "ibtl"; 37 38 /* This table indirectly initializes the ibt_cep_next_state[] table. */ 39 typedef struct ibt_cep_next_state_s { 40 ibt_cep_state_t next_state; 41 ibt_cep_modify_flags_t modify_flags; 42 } ibt_cep_next_state_t; 43 44 struct { 45 ibt_cep_state_t current_state; 46 ibt_cep_state_t next_state; 47 ibt_cep_modify_flags_t modify_flags; 48 } ibt_cep_next_state_inits[] = { 49 { IBT_STATE_RESET, IBT_STATE_INIT, IBT_CEP_SET_RESET_INIT}, 50 { IBT_STATE_INIT, IBT_STATE_RTR, IBT_CEP_SET_INIT_RTR}, 51 { IBT_STATE_RTR, IBT_STATE_RTS, IBT_CEP_SET_RTR_RTS} 52 }; 53 54 ibt_cep_next_state_t ibt_cep_next_state[IBT_STATE_NUM]; 55 56 _NOTE(SCHEME_PROTECTS_DATA("unique", ibt_cep_next_state)) 57 58 /* The following data and functions can increase system stability. */ 59 60 int ibtl_qp_calls_curr; 61 int ibtl_qp_calls_max = 128; /* limit on # of simultaneous QP verb calls */ 62 kmutex_t ibtl_qp_mutex; 63 kcondvar_t ibtl_qp_cv; 64 65 void 66 ibtl_qp_flow_control_enter(void) 67 { 68 mutex_enter(&ibtl_qp_mutex); 69 while (ibtl_qp_calls_curr >= ibtl_qp_calls_max) { 70 cv_wait(&ibtl_qp_cv, &ibtl_qp_mutex); 71 } 72 ++ibtl_qp_calls_curr; 73 mutex_exit(&ibtl_qp_mutex); 74 } 75 76 void 77 ibtl_qp_flow_control_exit(void) 78 { 79 mutex_enter(&ibtl_qp_mutex); 80 cv_signal(&ibtl_qp_cv); 81 --ibtl_qp_calls_curr; 82 mutex_exit(&ibtl_qp_mutex); 83 } 84 85 /* 86 * Function: 87 * ibt_alloc_qp 88 * Input: 89 * hca_hdl HCA Handle. 90 * type Specifies the type of QP to alloc in ibt_alloc_qp() 91 * qp_attrp Specifies the ibt_qp_alloc_attr_t that are needed to 92 * allocate a QP and transition it to the RTS state for 93 * UDs and INIT state for all other QPs. 94 * Output: 95 * queue_sizes_p Returned sizes for SQ, RQ, SQ WR SGL elements & RQ 96 * WR SGL elements. 97 * qpn_p Returned QP Number of the allocated QP. 98 * ibt_qp_p The ibt_qp_hdl_t of the allocated QP. 99 * Returns: 100 * IBT_SUCCESS 101 * Description: 102 * Allocate a QP with specified attributes. 103 */ 104 ibt_status_t 105 ibt_alloc_qp(ibt_hca_hdl_t hca_hdl, ibt_qp_type_t type, 106 ibt_qp_alloc_attr_t *qp_attrp, ibt_chan_sizes_t *queue_sizes_p, 107 ib_qpn_t *qpn_p, ibt_qp_hdl_t *ibt_qp_p) 108 { 109 ibt_status_t retval; 110 ibtl_channel_t *chanp; 111 ibt_tran_srv_t qp_type; 112 113 IBTF_DPRINTF_L3(ibtf_qp, "ibt_alloc_qp(%p, %d, %p, %p, %p, %p) ", 114 hca_hdl, type, qp_attrp, queue_sizes_p, qpn_p, ibt_qp_p); 115 116 switch (type) { 117 case IBT_UD_RQP: 118 qp_type = IBT_UD_SRV; 119 break; 120 case IBT_RC_RQP: 121 qp_type = IBT_RC_SRV; 122 break; 123 case IBT_UC_RQP: 124 IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: Unreliable Connected " 125 "Transport Type is not supported."); 126 *ibt_qp_p = NULL; 127 return (IBT_NOT_SUPPORTED); 128 case IBT_RD_RQP: 129 IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: Reliable Datagram " 130 "Transport Type is not supported."); 131 *ibt_qp_p = NULL; 132 return (IBT_NOT_SUPPORTED); 133 default: 134 /* shouldn't happen ILLEGAL Type */ 135 IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: Illegal Transport Type " 136 "%d", type); 137 *ibt_qp_p = NULL; 138 return (IBT_QP_SRV_TYPE_INVALID); 139 } 140 141 /* Get CI CQ handles */ 142 if ((qp_attrp->qp_scq_hdl == NULL) || (qp_attrp->qp_rcq_hdl == NULL)) { 143 IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: Invalid CQ Handle"); 144 *ibt_qp_p = NULL; 145 return (IBT_CQ_HDL_INVALID); 146 } 147 qp_attrp->qp_ibc_scq_hdl = qp_attrp->qp_scq_hdl->cq_ibc_cq_hdl; 148 qp_attrp->qp_ibc_rcq_hdl = qp_attrp->qp_rcq_hdl->cq_ibc_cq_hdl; 149 150 if ((qp_attrp->qp_alloc_flags & IBT_QP_USES_SRQ) && 151 (qp_attrp->qp_srq_hdl != NULL)) 152 qp_attrp->qp_ibc_srq_hdl = 153 qp_attrp->qp_srq_hdl->srq_ibc_srq_hdl; 154 else 155 qp_attrp->qp_ibc_srq_hdl = NULL; 156 157 /* Allocate Channel structure */ 158 chanp = kmem_zalloc(sizeof (*chanp), KM_SLEEP); 159 160 ibtl_qp_flow_control_enter(); 161 retval = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_qp)( 162 IBTL_HCA2CIHCA(hca_hdl), &chanp->ch_qp, type, qp_attrp, 163 queue_sizes_p, qpn_p, &chanp->ch_qp.qp_ibc_qp_hdl); 164 ibtl_qp_flow_control_exit(); 165 if (retval != IBT_SUCCESS) { 166 IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: " 167 "Failed to allocate QP: %d", retval); 168 kmem_free(chanp, sizeof (*chanp)); 169 *ibt_qp_p = NULL; 170 return (retval); 171 } 172 173 /* Initialize the internal QP struct. */ 174 chanp->ch_qp.qp_type = qp_type; 175 chanp->ch_qp.qp_hca = hca_hdl; 176 chanp->ch_qp.qp_send_cq = qp_attrp->qp_scq_hdl; 177 chanp->ch_qp.qp_recv_cq = qp_attrp->qp_rcq_hdl; 178 chanp->ch_current_state = IBT_STATE_RESET; 179 /* 180 * The IBTA spec does not include the signal type or PD on a QP 181 * query operation. In order to implement the "CLONE" feature 182 * we need to cache these values. Mostly used by TI client. 183 */ 184 chanp->ch_qp.qp_flags = qp_attrp->qp_flags; 185 chanp->ch_qp.qp_pd_hdl = qp_attrp->qp_pd_hdl; 186 mutex_init(&chanp->ch_cm_mutex, NULL, MUTEX_DEFAULT, NULL); 187 cv_init(&chanp->ch_cm_cv, NULL, CV_DEFAULT, NULL); 188 189 mutex_enter(&hca_hdl->ha_mutex); 190 hca_hdl->ha_qp_cnt++; 191 mutex_exit(&hca_hdl->ha_mutex); 192 193 IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: SUCCESS: qp %p owned by '%s'", 194 chanp, hca_hdl->ha_clnt_devp->clnt_name); 195 196 *ibt_qp_p = chanp; 197 198 return (retval); 199 } 200 201 202 /* 203 * Function: 204 * ibt_initialize_qp 205 * Input: 206 * ibt_qp The previously allocated IBT QP Handle. 207 * modify_attrp Specifies the QP Modify attributes that to transition 208 * the QP to the RTS state for UDs (including special QPs) 209 * and INIT state for all other QPs. 210 * Output: 211 * none. 212 * Returns: 213 * IBT_SUCCESS 214 * Description: 215 * Transition the QP to the RTS state for UDs (including special QPs) 216 * and INIT state for all other QPs. 217 */ 218 ibt_status_t 219 ibt_initialize_qp(ibt_qp_hdl_t ibt_qp, ibt_qp_info_t *modify_attrp) 220 { 221 ibt_status_t status; 222 ibt_cep_state_t state; 223 ibc_hca_hdl_t ibc_hca_hdl = IBTL_CHAN2CIHCA(ibt_qp); 224 ibc_qp_hdl_t ibc_qp_hdl = IBTL_CHAN2CIQP(ibt_qp); 225 ibc_operations_t *hca_ops_p = IBTL_CHAN2CIHCAOPS_P(ibt_qp); 226 ibt_cep_modify_flags_t modify_flags; 227 228 IBTF_DPRINTF_L3(ibtf_qp, "ibt_initialize_qp(%p, %p)", 229 ibt_qp, modify_attrp); 230 231 /* 232 * Validate the QP Type from the channel with QP Type from the 233 * modify attribute struct. 234 */ 235 if (ibt_qp->ch_qp.qp_type != modify_attrp->qp_trans) { 236 IBTF_DPRINTF_L2(ibtf_qp, "ibt_initialize_qp: " 237 "QP Type mismatch: Chan QP Type<%d>, Modify QP Type<%d>", 238 ibt_qp->ch_qp.qp_type, modify_attrp->qp_trans); 239 return (IBT_QP_SRV_TYPE_INVALID); 240 } 241 if (ibt_qp->ch_current_state != IBT_STATE_RESET) { 242 IBTF_DPRINTF_L2(ibtf_qp, "ibt_initialize_qp: " 243 "QP needs to be in RESET state: Chan QP State<%d>", 244 ibt_qp->ch_current_state); 245 return (IBT_CHAN_STATE_INVALID); 246 } 247 248 /* 249 * Initialize the QP to the RTS state for UDs 250 * and INIT state for all other QPs. 251 */ 252 switch (modify_attrp->qp_trans) { 253 case IBT_UD_SRV: 254 255 /* 256 * Bring the QP to the RTS state. 257 */ 258 state = IBT_STATE_RESET; 259 ibtl_qp_flow_control_enter(); 260 do { 261 modify_attrp->qp_current_state = state; 262 modify_flags = ibt_cep_next_state[state].modify_flags; 263 modify_attrp->qp_state = state = 264 ibt_cep_next_state[state].next_state; 265 266 IBTF_DPRINTF_L3(ibtf_qp, "ibt_initialize_qp: " 267 "modifying qp state to 0x%x", state); 268 status = (hca_ops_p->ibc_modify_qp)(ibc_hca_hdl, 269 ibc_qp_hdl, modify_flags, modify_attrp, NULL); 270 } while ((state != IBT_STATE_RTS) && (status == IBT_SUCCESS)); 271 ibtl_qp_flow_control_exit(); 272 273 if (status == IBT_SUCCESS) { 274 ibt_qp->ch_current_state = state; 275 ibt_qp->ch_transport.ud.ud_port_num = 276 modify_attrp->qp_transport.ud.ud_port; 277 ibt_qp->ch_transport.ud.ud_qkey = 278 modify_attrp->qp_transport.ud.ud_qkey; 279 } 280 break; 281 case IBT_UC_SRV: 282 case IBT_RD_SRV: 283 case IBT_RC_SRV: 284 285 /* 286 * Bring the QP to the INIT state. 287 */ 288 modify_attrp->qp_state = IBT_STATE_INIT; 289 290 ibtl_qp_flow_control_enter(); 291 status = (hca_ops_p->ibc_modify_qp)(ibc_hca_hdl, ibc_qp_hdl, 292 IBT_CEP_SET_RESET_INIT, modify_attrp, NULL); 293 ibtl_qp_flow_control_exit(); 294 if (status == IBT_SUCCESS) 295 ibt_qp->ch_current_state = IBT_STATE_INIT; 296 break; 297 default: 298 /* shouldn't happen ILLEGAL Type */ 299 IBTF_DPRINTF_L2(ibtf_qp, "ibt_initialize_qp: Illegal Type %d", 300 modify_attrp->qp_trans); 301 return (IBT_QP_SRV_TYPE_INVALID); 302 } /* End switch */ 303 304 return (status); 305 } 306 307 308 /* 309 * Function: 310 * ibt_alloc_special_qp 311 * Input: 312 * hca_hdl HCA Handle. 313 * type Specifies the type of Special QP to be allocated. 314 * qp_attrp Specifies the ibt_qp_alloc_attr_t that are needed to 315 * allocate a special QP. 316 * Output: 317 * queue_sizes_p Returned sizes for SQ, RQ, SQ WR SGL elements & RQ 318 * WR SGL elements. 319 * qpn_p Returned qpn of the allocated QP. 320 * ibt_qp_p The ibt_qp_hdl_t of the allocated QP. 321 * Returns: 322 * IBT_SUCCESS 323 * Description: 324 * Allocate a special QP with specified attributes. 325 */ 326 ibt_status_t 327 ibt_alloc_special_qp(ibt_hca_hdl_t hca_hdl, uint8_t port, ibt_sqp_type_t type, 328 ibt_qp_alloc_attr_t *qp_attrp, ibt_chan_sizes_t *queue_sizes_p, 329 ibt_qp_hdl_t *ibt_qp_p) 330 { 331 ibt_qp_hdl_t chanp; 332 ibt_status_t retval; 333 ibt_tran_srv_t sqp_type; 334 335 IBTF_DPRINTF_L3(ibtf_qp, "ibt_alloc_special_qp(%p, %d, %x, %p, %p, %p)", 336 hca_hdl, port, type, qp_attrp, queue_sizes_p, ibt_qp_p); 337 338 switch (type) { 339 case IBT_SMI_SQP: 340 case IBT_GSI_SQP: 341 sqp_type = IBT_UD_SRV; 342 break; 343 344 case IBT_RAWIP_SQP: 345 IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: Raw IP " 346 "Transport Type is not supported."); 347 *ibt_qp_p = NULL; 348 return (IBT_NOT_SUPPORTED); 349 350 case IBT_RAWETHER_SQP: 351 IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: Raw Ethernet " 352 "Transport Type is not supported."); 353 *ibt_qp_p = NULL; 354 return (IBT_NOT_SUPPORTED); 355 356 default: 357 /* Shouldn't happen */ 358 IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: " 359 "Illegal Type 0x%x", type); 360 *ibt_qp_p = NULL; 361 return (IBT_QP_SPECIAL_TYPE_INVALID); 362 } 363 364 /* convert the CQ handles for the CI */ 365 qp_attrp->qp_ibc_scq_hdl = qp_attrp->qp_scq_hdl->cq_ibc_cq_hdl; 366 qp_attrp->qp_ibc_rcq_hdl = qp_attrp->qp_rcq_hdl->cq_ibc_cq_hdl; 367 368 /* Allocate Channel structure */ 369 chanp = kmem_zalloc(sizeof (*chanp), KM_SLEEP); 370 371 ibtl_qp_flow_control_enter(); 372 retval = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_special_qp)( 373 IBTL_HCA2CIHCA(hca_hdl), port, &chanp->ch_qp, type, qp_attrp, 374 queue_sizes_p, &chanp->ch_qp.qp_ibc_qp_hdl); 375 ibtl_qp_flow_control_exit(); 376 if (retval != IBT_SUCCESS) { 377 IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: " 378 "Failed to allocate Special QP: %d", retval); 379 kmem_free(chanp, sizeof (*chanp)); 380 *ibt_qp_p = NULL; 381 return (retval); 382 } 383 384 /* Initialize the internal QP struct. */ 385 chanp->ch_qp.qp_type = sqp_type; 386 chanp->ch_qp.qp_hca = hca_hdl; 387 chanp->ch_qp.qp_send_cq = qp_attrp->qp_scq_hdl; 388 chanp->ch_qp.qp_recv_cq = qp_attrp->qp_rcq_hdl; 389 chanp->ch_current_state = IBT_STATE_RESET; 390 mutex_init(&chanp->ch_cm_mutex, NULL, MUTEX_DEFAULT, NULL); 391 cv_init(&chanp->ch_cm_cv, NULL, CV_DEFAULT, NULL); 392 393 /* Updating these variable, so that debugger shows correct values. */ 394 chanp->ch_qp.qp_flags = qp_attrp->qp_flags; 395 chanp->ch_qp.qp_pd_hdl = qp_attrp->qp_pd_hdl; 396 397 mutex_enter(&hca_hdl->ha_mutex); 398 hca_hdl->ha_qp_cnt++; 399 mutex_exit(&hca_hdl->ha_mutex); 400 401 *ibt_qp_p = chanp; 402 403 return (retval); 404 } 405 406 407 /* 408 * Function: 409 * ibt_flush_qp 410 * Input: 411 * ibtl_qp Handle for QP that needs to be flushed. 412 * Output: 413 * none. 414 * Returns: 415 * IBT_SUCCESS 416 * IBT_QP_HDL_INVALID 417 * Description: 418 * Put the QP into error state to flush out work requests. 419 */ 420 ibt_status_t 421 ibt_flush_qp(ibt_qp_hdl_t ibt_qp) 422 { 423 ibt_qp_info_t modify_attr; 424 ibt_status_t retval; 425 426 IBTF_DPRINTF_L3(ibtf_qp, "ibt_flush_qp(%p)", ibt_qp); 427 428 if (ibt_qp->ch_qp.qp_type == IBT_RC_SRV) { 429 mutex_enter(&ibtl_free_qp_mutex); 430 if ((ibt_qp->ch_transport.rc.rc_free_flags & 431 (IBTL_RC_QP_CONNECTED | IBTL_RC_QP_CLOSING)) == 432 IBTL_RC_QP_CONNECTED) { 433 mutex_exit(&ibtl_free_qp_mutex); 434 IBTF_DPRINTF_L2(ibtf_qp, "ibt_flush_qp(%p): " 435 "called with a connected RC QP", ibt_qp); 436 return (IBT_CHAN_STATE_INVALID); 437 } 438 mutex_exit(&ibtl_free_qp_mutex); 439 } 440 441 bzero(&modify_attr, sizeof (ibt_qp_info_t)); 442 443 /* 444 * Set the QP state to error to flush any uncompleted WRs. 445 */ 446 modify_attr.qp_state = IBT_STATE_ERROR; 447 modify_attr.qp_trans = ibt_qp->ch_qp.qp_type; 448 449 retval = ibt_modify_qp(ibt_qp, IBT_CEP_SET_STATE, &modify_attr, NULL); 450 451 if (retval != IBT_SUCCESS) { 452 IBTF_DPRINTF_L2(ibtf_qp, "ibt_flush_qp: " 453 "failed on chan %p: %d", ibt_qp, retval); 454 } 455 return (retval); 456 } 457 458 459 /* 460 * ibtl_cm_chan_is_open() 461 * 462 * Inform IBTL that the connection has been established on this 463 * channel so that a later call to ibtl_cm_chan_is_closed() 464 * will be required to free the QPN used by this channel. 465 * 466 * chan Channel Handle 467 */ 468 void 469 ibtl_cm_chan_is_open(ibt_channel_hdl_t chan) 470 { 471 IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_open(%p)", chan); 472 ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV); 473 mutex_enter(&ibtl_free_qp_mutex); 474 ASSERT(chan->ch_transport.rc.rc_free_flags == 0); 475 chan->ch_transport.rc.rc_free_flags |= IBTL_RC_QP_CONNECTED; 476 mutex_exit(&ibtl_free_qp_mutex); 477 } 478 479 /* 480 * ibtl_cm_is_chan_closing() 481 * 482 * Returns 1, if the connection that has been 483 * started for this channel has moved to TIMEWAIT 484 * If not, returns 0 485 * 486 * chan Channel Handle 487 */ 488 int 489 ibtl_cm_is_chan_closing(ibt_channel_hdl_t chan) 490 { 491 IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_is_chan_closing(%p)", chan); 492 ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV); 493 mutex_enter(&ibtl_free_qp_mutex); 494 if (chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_CLOSING) { 495 mutex_exit(&ibtl_free_qp_mutex); 496 return (1); 497 } 498 mutex_exit(&ibtl_free_qp_mutex); 499 return (0); 500 } 501 502 /* 503 * ibtl_cm_is_chan_closed() 504 * 505 * Returns 1, if the connection that has been 506 * started for this channel has completed TIMEWAIT 507 * If not, returns 0 508 * 509 * chan Channel Handle 510 */ 511 int 512 ibtl_cm_is_chan_closed(ibt_channel_hdl_t chan) 513 { 514 IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_is_chan_closed(%p)", chan); 515 ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV); 516 mutex_enter(&ibtl_free_qp_mutex); 517 if (chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_CLOSED) { 518 mutex_exit(&ibtl_free_qp_mutex); 519 return (1); 520 } 521 mutex_exit(&ibtl_free_qp_mutex); 522 return (0); 523 } 524 /* 525 * ibtl_cm_chan_is_closing() 526 * 527 * Inform IBTL that the TIMEWAIT delay for the connection has been 528 * started for this channel so that the QP can be freed. 529 * 530 * chan Channel Handle 531 */ 532 void 533 ibtl_cm_chan_is_closing(ibt_channel_hdl_t chan) 534 { 535 IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_closing(%p)", chan); 536 ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV); 537 mutex_enter(&ibtl_free_qp_mutex); 538 ASSERT(chan->ch_transport.rc.rc_free_flags == IBTL_RC_QP_CONNECTED); 539 chan->ch_transport.rc.rc_free_flags |= IBTL_RC_QP_CLOSING; 540 mutex_exit(&ibtl_free_qp_mutex); 541 } 542 /* 543 * ibtl_cm_chan_is_closed() 544 * 545 * Inform IBTL that the TIMEWAIT delay for the connection has been 546 * reached for this channel so that the QPN can be reused. 547 * 548 * chan Channel Handle 549 */ 550 void 551 ibtl_cm_chan_is_closed(ibt_channel_hdl_t chan) 552 { 553 ibt_status_t status; 554 ibtl_hca_t *ibtl_hca = chan->ch_qp.qp_hca; 555 556 IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_closed(%p)", chan); 557 ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV); 558 mutex_enter(&ibtl_free_qp_mutex); 559 ASSERT((chan->ch_transport.rc.rc_free_flags & 560 (IBTL_RC_QP_CONNECTED | IBTL_RC_QP_CLOSING)) == 561 (IBTL_RC_QP_CONNECTED | IBTL_RC_QP_CLOSING)); 562 563 chan->ch_transport.rc.rc_free_flags &= ~IBTL_RC_QP_CONNECTED; 564 chan->ch_transport.rc.rc_free_flags &= ~IBTL_RC_QP_CLOSING; 565 chan->ch_transport.rc.rc_free_flags |= IBTL_RC_QP_CLOSED; 566 567 ibtl_cm_set_chan_private(chan, NULL); 568 569 if ((chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_FREED) == 0) { 570 mutex_exit(&ibtl_free_qp_mutex); 571 return; 572 } 573 mutex_exit(&ibtl_free_qp_mutex); 574 ibtl_qp_flow_control_enter(); 575 if ((status = (IBTL_CHAN2CIHCAOPS_P(chan)->ibc_release_qpn) 576 (IBTL_CHAN2CIHCA(chan), chan->ch_transport.rc.rc_qpn_hdl)) == 577 IBT_SUCCESS) { 578 /* effectively, this is kmem_free(chan); */ 579 ibtl_free_qp_async_check(&chan->ch_qp); 580 581 /* decrement ha_qpn_cnt and check for close in progress */ 582 ibtl_close_hca_check(ibtl_hca); 583 } else 584 IBTF_DPRINTF_L2(ibtf_qp, "ibtl_cm_chan_is_closed: " 585 "ibc_release_qpn failed: status = %d\n", status); 586 ibtl_qp_flow_control_exit(); 587 } 588 589 /* 590 * ibtl_cm_chan_is_reused() 591 * 592 * Inform IBTL that the channel is going to be re-used 593 * chan Channel Handle 594 */ 595 void 596 ibtl_cm_chan_is_reused(ibt_channel_hdl_t chan) 597 { 598 IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_reused(%p)", chan); 599 ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV); 600 mutex_enter(&ibtl_free_qp_mutex); 601 ASSERT(((chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_CONNECTED) != 602 IBTL_RC_QP_CONNECTED)); 603 604 /* channel is no longer in closed state, shall be re-used */ 605 chan->ch_transport.rc.rc_free_flags = 0; 606 607 mutex_exit(&ibtl_free_qp_mutex); 608 609 } 610 611 /* 612 * Function: ibt_free_qp() 613 * 614 * Input: ibt_qp Handle for Channel(QP) that needs to be freed. 615 * 616 * Output: NONE. 617 * 618 * Returns: IBT_SUCCESS 619 * IBT_QP_STATE_INVALID 620 * IBT_QP_HDL_INVALID 621 * 622 * Description: 623 * Free a previously allocated QP. 624 */ 625 ibt_status_t 626 ibt_free_qp(ibt_qp_hdl_t ibt_qp) 627 { 628 ibt_status_t status; 629 ibtl_hca_t *ibtl_hca = ibt_qp->ch_qp.qp_hca; 630 631 IBTF_DPRINTF_L3(ibtf_qp, "ibt_free_qp(%p)", ibt_qp); 632 633 if (ibt_qp->ch_qp.qp_type == IBT_RC_SRV) { 634 ibtl_qp_flow_control_enter(); 635 mutex_enter(&ibtl_free_qp_mutex); 636 if (ibt_qp->ch_transport.rc.rc_free_flags & 637 IBTL_RC_QP_CONNECTED) { 638 if ((ibt_qp->ch_transport.rc.rc_free_flags & 639 IBTL_RC_QP_CLOSING) == 0) { 640 IBTF_DPRINTF_L2(ibtf_qp, "ibt_free_qp: ERROR - " 641 "need to call ibt_close_rc_channel"); 642 mutex_exit(&ibtl_free_qp_mutex); 643 ibtl_qp_flow_control_exit(); 644 return (IBT_CHAN_STATE_INVALID); 645 } 646 ibt_qp->ch_transport.rc.rc_free_flags |= 647 IBTL_RC_QP_FREED; 648 status = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_free_qp) 649 (IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp), 650 IBC_FREE_QP_ONLY, 651 &ibt_qp->ch_transport.rc.rc_qpn_hdl); 652 mutex_exit(&ibtl_free_qp_mutex); 653 ibtl_qp_flow_control_exit(); 654 655 if (status == IBT_SUCCESS) { 656 mutex_enter(&ibtl_clnt_list_mutex); 657 ibtl_hca->ha_qpn_cnt++; 658 mutex_exit(&ibtl_clnt_list_mutex); 659 mutex_enter(&ibtl_hca->ha_mutex); 660 ibtl_hca->ha_qp_cnt--; 661 mutex_exit(&ibtl_hca->ha_mutex); 662 IBTF_DPRINTF_L3(ibtf_qp, "ibt_free_qp(%p) - " 663 "SUCCESS", ibt_qp); 664 } else 665 IBTF_DPRINTF_L2(ibtf_qp, "ibt_free_qp: " 666 "ibc_free_qp failed: status = %d", status); 667 return (status); 668 } 669 mutex_exit(&ibtl_free_qp_mutex); 670 } else 671 ibtl_qp_flow_control_enter(); 672 673 status = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_free_qp) 674 (IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp), 675 IBC_FREE_QP_AND_QPN, NULL); 676 ibtl_qp_flow_control_exit(); 677 678 if (status == IBT_SUCCESS) { 679 /* effectively, this is kmem_free(ibt_qp); */ 680 ibtl_free_qp_async_check(&ibt_qp->ch_qp); 681 682 mutex_enter(&ibtl_hca->ha_mutex); 683 ibtl_hca->ha_qp_cnt--; 684 mutex_exit(&ibtl_hca->ha_mutex); 685 IBTF_DPRINTF_L3(ibtf_qp, "ibt_free_qp(%p) - SUCCESS", ibt_qp); 686 } else { 687 IBTF_DPRINTF_L2(ibtf_qp, "ibt_free_qp: " 688 "ibc_free_qp failed with error %d", status); 689 } 690 691 return (status); 692 } 693 694 695 /* helper function for ibt_query_qp */ 696 static void 697 ibtl_fillin_sgid(ibt_cep_path_t *pathp, ibtl_hca_devinfo_t *hca_devp) 698 { 699 uint8_t port; 700 uint32_t sgid_ix; 701 ib_gid_t *sgidp; 702 703 port = pathp->cep_hca_port_num; 704 sgid_ix = pathp->cep_adds_vect.av_sgid_ix; 705 if (port == 0 || port > hca_devp->hd_hca_attr->hca_nports || 706 sgid_ix >= IBTL_HDIP2SGIDTBLSZ(hca_devp)) { 707 pathp->cep_adds_vect.av_sgid.gid_prefix = 0; 708 pathp->cep_adds_vect.av_sgid.gid_guid = 0; 709 } else { 710 mutex_enter(&ibtl_clnt_list_mutex); 711 sgidp = hca_devp->hd_portinfop[port-1].p_sgid_tbl; 712 pathp->cep_adds_vect.av_sgid = sgidp[sgid_ix]; 713 mutex_exit(&ibtl_clnt_list_mutex); 714 } 715 } 716 717 718 /* 719 * Function: ibt_query_qp 720 * 721 * Input: ibt_qp - The IBT QP Handle. 722 * 723 * Output: ibt_qp_query_attrp - Points to a ibt_qp_query_attr_t 724 * that on return contains all the 725 * attributes of the specified qp. 726 * 727 * Returns: IBT_SUCCESS 728 * IBT_QP_HDL_INVALID 729 * 730 * Description: 731 * Query QP attributes 732 * 733 */ 734 ibt_status_t 735 ibt_query_qp(ibt_qp_hdl_t ibt_qp, ibt_qp_query_attr_t *qp_query_attrp) 736 { 737 ibt_status_t retval; 738 ibtl_hca_devinfo_t *hca_devp; 739 ibt_qp_info_t *qp_infop; 740 741 IBTF_DPRINTF_L3(ibtf_qp, "ibt_query_qp(%p, %p)", 742 ibt_qp, qp_query_attrp); 743 744 ibtl_qp_flow_control_enter(); 745 retval = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_query_qp( 746 IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp), qp_query_attrp)); 747 ibtl_qp_flow_control_exit(); 748 if (retval == IBT_SUCCESS) { 749 ibt_qp->ch_current_state = qp_query_attrp->qp_info.qp_state; 750 751 /* need to fill in sgid from port and sgid_ix for RC and UC */ 752 hca_devp = ibt_qp->ch_qp.qp_hca->ha_hca_devp; 753 qp_infop = &qp_query_attrp->qp_info; 754 755 switch (qp_infop->qp_trans) { 756 case IBT_RC_SRV: 757 ibtl_fillin_sgid(&qp_infop->qp_transport.rc.rc_path, 758 hca_devp); 759 ibtl_fillin_sgid(&qp_infop->qp_transport.rc.rc_alt_path, 760 hca_devp); 761 break; 762 case IBT_UC_SRV: 763 ibtl_fillin_sgid(&qp_infop->qp_transport.uc.uc_path, 764 hca_devp); 765 ibtl_fillin_sgid(&qp_infop->qp_transport.uc.uc_alt_path, 766 hca_devp); 767 break; 768 } 769 } else { 770 IBTF_DPRINTF_L2(ibtf_qp, "ibt_query_qp: " 771 "failed on chan %p: %d", ibt_qp, retval); 772 } 773 774 return (retval); 775 } 776 777 778 /* 779 * Function: 780 * ibt_modify_qp 781 * Input: 782 * ibt_qp The IBT QP Handle. 783 * flags Specifies which attributes in ibt_qp_mod_attr_t 784 * are to be modified. 785 * qp_attrp Points to an ibt_qp_mod_attr_t struct that contains all 786 * the attributes of the specified QP that a client is 787 * allowed to modify after a QP has been allocated 788 * Output: 789 * actual_sz Returned actual queue sizes. 790 * Returns: 791 * IBT_SUCCESS 792 * Description: 793 * Modify the attributes of an existing QP. 794 */ 795 ibt_status_t 796 ibt_modify_qp(ibt_qp_hdl_t ibt_qp, ibt_cep_modify_flags_t flags, 797 ibt_qp_info_t *modify_attrp, ibt_queue_sizes_t *actual_sz) 798 { 799 ibt_status_t retval; 800 801 IBTF_DPRINTF_L3(ibtf_qp, "ibt_modify_qp(%p, %d, %p, %p)", 802 ibt_qp, flags, modify_attrp, actual_sz); 803 804 ibtl_qp_flow_control_enter(); 805 retval = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_modify_qp)( 806 IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp), flags, 807 modify_attrp, actual_sz); 808 ibtl_qp_flow_control_exit(); 809 if (retval == IBT_SUCCESS) { 810 ibt_qp->ch_current_state = modify_attrp->qp_state; 811 if (ibt_qp->ch_qp.qp_type == IBT_UD_SRV) { 812 if (flags & (IBT_CEP_SET_PORT | IBT_CEP_SET_RESET_INIT)) 813 ibt_qp->ch_transport.ud.ud_port_num = 814 modify_attrp->qp_transport.ud.ud_port; 815 if (flags & (IBT_CEP_SET_QKEY | IBT_CEP_SET_RESET_INIT)) 816 ibt_qp->ch_transport.ud.ud_qkey = 817 modify_attrp->qp_transport.ud.ud_qkey; 818 } 819 } else { 820 IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_qp: failed on chan %p: %d", 821 ibt_qp, retval); 822 823 if (retval == IBT_CHAN_STATE_INVALID) { 824 /* That means our cache had invalid QP state value. */ 825 ibt_qp_query_attr_t qp_attr; 826 827 /* Query the channel (QP) */ 828 if (ibt_query_qp(ibt_qp, &qp_attr) == IBT_SUCCESS) 829 ibt_qp->ch_current_state = 830 qp_attr.qp_info.qp_state; 831 } 832 } 833 return (retval); 834 } 835 836 837 /* 838 * Function: 839 * ibt_migrate_path 840 * Input: 841 * rc_chan A previously allocated RC channel handle. 842 * Output: 843 * none. 844 * Returns: 845 * IBT_SUCCESS on Success else appropriate error. 846 * Description: 847 * Force the CI to use the alternate path. The alternate path becomes 848 * the primary path. A new alternate path should be loaded and enabled. 849 * Assumes that the given channel is in RTS/SQD state 850 */ 851 ibt_status_t 852 ibt_migrate_path(ibt_channel_hdl_t rc_chan) 853 { 854 ibt_status_t retval; 855 ibt_qp_info_t qp_info; 856 ibt_qp_query_attr_t qp_attr; 857 ibt_cep_modify_flags_t cep_flags; 858 int retries = 1; 859 860 IBTF_DPRINTF_L3(ibtf_qp, "ibt_migrate_path: channel %p", rc_chan); 861 862 if (rc_chan->ch_qp.qp_type != IBT_RC_SRV) { 863 IBTF_DPRINTF_L2(ibtf_qp, "ibt_migrate_path: " 864 "Invalid Channel type: Applicable only to RC Channel"); 865 return (IBT_CHAN_SRV_TYPE_INVALID); 866 } 867 868 if (rc_chan->ch_current_state != IBT_STATE_RTS && 869 rc_chan->ch_current_state != IBT_STATE_SQD) { 870 if (ibt_query_qp(rc_chan, &qp_attr) == IBT_SUCCESS) { 871 /* ch_current_state is fixed by ibt_query_qp */ 872 if (rc_chan->ch_current_state != IBT_STATE_RTS && 873 rc_chan->ch_current_state != IBT_STATE_SQD) 874 return (IBT_CHAN_STATE_INVALID); 875 retries = 0; 876 } else /* query_qp should never really fail */ 877 return (IBT_CHAN_STATE_INVALID); 878 } 879 880 retry: 881 /* Call modify_qp */ 882 cep_flags = IBT_CEP_SET_MIG | IBT_CEP_SET_STATE; 883 qp_info.qp_state = rc_chan->ch_current_state; 884 qp_info.qp_current_state = rc_chan->ch_current_state; 885 qp_info.qp_trans = IBT_RC_SRV; 886 qp_info.qp_transport.rc.rc_mig_state = IBT_STATE_MIGRATED; 887 retval = ibt_modify_qp(rc_chan, cep_flags, &qp_info, NULL); 888 889 if (retval != IBT_SUCCESS) { 890 IBTF_DPRINTF_L2(ibtf_qp, "ibt_migrate_path:" 891 " ibt_modify_qp() returned = %d", retval); 892 if (rc_chan->ch_current_state != qp_info.qp_state && 893 --retries >= 0) { 894 /* 895 * That means our cached 'state' was invalid. 896 * We know ibt_modify_qp() fixed it up, so it 897 * might be worth retrying. 898 */ 899 if (rc_chan->ch_current_state != IBT_STATE_RTS && 900 rc_chan->ch_current_state != IBT_STATE_SQD) 901 return (IBT_CHAN_STATE_INVALID); 902 IBTF_DPRINTF_L2(ibtf_qp, "ibt_migrate_path:" 903 " retrying after 'state' fixed"); 904 goto retry; 905 } 906 } 907 return (retval); 908 } 909 910 911 /* 912 * Function: 913 * ibt_set_qp_private 914 * Input: 915 * ibt_qp The ibt_qp_hdl_t of the allocated QP. 916 * clnt_private The client private data. 917 * Output: 918 * none. 919 * Returns: 920 * none. 921 * Description: 922 * Set the client private data. 923 */ 924 void 925 ibt_set_qp_private(ibt_qp_hdl_t ibt_qp, void *clnt_private) 926 { 927 ibt_qp->ch_clnt_private = clnt_private; 928 } 929 930 931 /* 932 * Function: 933 * ibt_get_qp_private 934 * Input: 935 * ibt_qp The ibt_qp_hdl_t of the allocated QP. 936 * Output: 937 * none. 938 * Returns: 939 * The client private data. 940 * Description: 941 * Get the client private data. 942 */ 943 void * 944 ibt_get_qp_private(ibt_qp_hdl_t ibt_qp) 945 { 946 return (ibt_qp->ch_clnt_private); 947 } 948 949 950 /* 951 * Function: 952 * ibt_qp_to_hca_guid 953 * Input: 954 * ibt_qp The ibt_qp_hdl_t of the allocated QP. 955 * Output: 956 * none. 957 * Returns: 958 * hca_guid Returned HCA GUID on which the specified QP is 959 * allocated. Valid if it is non-NULL on return. 960 * Description: 961 * A helper function to retrieve HCA GUID for the specified QP. 962 */ 963 ib_guid_t 964 ibt_qp_to_hca_guid(ibt_qp_hdl_t ibt_qp) 965 { 966 IBTF_DPRINTF_L3(ibtf_qp, "ibt_qp_to_hca_guid(%p)", ibt_qp); 967 968 return (IBTL_HCA2HCAGUID(IBTL_CHAN2HCA(ibt_qp))); 969 } 970 971 972 /* 973 * Function: 974 * ibt_recover_ud_qp 975 * Input: 976 * ibt_qp An QP Handle which is in SQError state. 977 * Output: 978 * none. 979 * Returns: 980 * IBT_SUCCESS 981 * IBT_QP_SRV_TYPE_INVALID 982 * IBT_QP_STATE_INVALID. 983 * Description: 984 * Recover an UD QP which has transitioned to SQ Error state. The 985 * ibt_recover_ud_qp() transitions the QP from SQ Error state to 986 * Ready-To-Send QP state. 987 * 988 * If a work request posted to a UD QP's send queue completes with an 989 * error (see ibt_wc_status_t), the QP gets transitioned to SQ Error state. 990 * In order to reuse this QP, ibt_recover_ud_qp() can be used to recover 991 * the QP to a usable (Ready-to-Send) state. 992 */ 993 ibt_status_t 994 ibt_recover_ud_qp(ibt_qp_hdl_t ibt_qp) 995 { 996 IBTF_DPRINTF_L3(ibtf_qp, "ibt_recover_ud_qp(%p)", ibt_qp); 997 998 return (ibt_recover_ud_channel(IBTL_QP2CHAN(ibt_qp))); 999 } 1000 1001 1002 /* 1003 * Function: 1004 * ibt_recycle_ud 1005 * Input: 1006 * ud_chan The IBT UD QP Handle. 1007 * various attributes 1008 * 1009 * Output: 1010 * none 1011 * Returns: 1012 * IBT_SUCCESS 1013 * IBT_CHAN_SRV_TYPE_INVALID 1014 * IBT_CHAN_STATE_INVALID 1015 * 1016 * Description: 1017 * Revert the UD QP back to a usable state. 1018 */ 1019 ibt_status_t 1020 ibt_recycle_ud(ibt_channel_hdl_t ud_chan, uint8_t hca_port_num, 1021 uint16_t pkey_ix, ib_qkey_t qkey) 1022 { 1023 ibt_qp_query_attr_t qp_attr; 1024 ibt_status_t retval; 1025 1026 IBTF_DPRINTF_L3(ibtf_qp, "ibt_recycle_ud(%p, %d, %x, %x): ", 1027 ud_chan, hca_port_num, pkey_ix, qkey); 1028 1029 if (ud_chan->ch_qp.qp_type != IBT_UD_SRV) { 1030 IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: " 1031 "chan %p is not a UD channel", ud_chan); 1032 return (IBT_CHAN_SRV_TYPE_INVALID); 1033 } 1034 1035 retval = ibt_query_qp(ud_chan, &qp_attr); 1036 if (retval != IBT_SUCCESS) { 1037 IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: " 1038 "ibt_query_qp failed on chan %p: %d", ud_chan, retval); 1039 return (retval); 1040 } 1041 if (qp_attr.qp_info.qp_state != IBT_STATE_ERROR) { 1042 IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: " 1043 "chan %p is in state %d (not in ERROR state)", 1044 ud_chan, qp_attr.qp_info.qp_state); 1045 ud_chan->ch_current_state = qp_attr.qp_info.qp_state; 1046 return (IBT_CHAN_STATE_INVALID); 1047 } 1048 1049 /* transition the QP from ERROR to RESET */ 1050 qp_attr.qp_info.qp_state = IBT_STATE_RESET; 1051 qp_attr.qp_info.qp_trans = ud_chan->ch_qp.qp_type; 1052 retval = ibt_modify_qp(ud_chan, IBT_CEP_SET_STATE, &qp_attr.qp_info, 1053 NULL); 1054 if (retval != IBT_SUCCESS) { 1055 IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: " 1056 "ibt_modify_qp(ERROR=>RESET) failed on chan %p: %d", 1057 ud_chan, retval); 1058 return (retval); 1059 } 1060 ud_chan->ch_current_state = IBT_STATE_RESET; 1061 1062 /* transition the QP back to RTS */ 1063 qp_attr.qp_info.qp_transport.ud.ud_port = hca_port_num; 1064 qp_attr.qp_info.qp_transport.ud.ud_qkey = qkey; 1065 qp_attr.qp_info.qp_transport.ud.ud_pkey_ix = pkey_ix; 1066 retval = ibt_initialize_qp(ud_chan, &qp_attr.qp_info); 1067 if (retval != IBT_SUCCESS) { 1068 IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: " 1069 "ibt_initialize_qp failed on chan %p: %d", ud_chan, retval); 1070 /* the man page says the QP should be left in ERROR state */ 1071 (void) ibt_flush_qp(ud_chan); 1072 } 1073 return (retval); 1074 } 1075 1076 /* 1077 * Function: 1078 * ibt_pause_sendq 1079 * Input: 1080 * chan The IBT QP Handle. 1081 * modify_flags IBT_CEP_SET_NOTHING or IBT_CEP_SET_SQD_EVENT 1082 * 1083 * Output: 1084 * none. 1085 * Returns: 1086 * IBT_SUCCESS 1087 * IBT_CHAN_HDL_INVALID 1088 * IBT_CHAN_STATE_INVALID 1089 * IBT_INVALID_PARAM 1090 * 1091 * Description: 1092 * Place the send queue of the specified channel into the send queue 1093 * drained (SQD) state. 1094 * 1095 */ 1096 ibt_status_t 1097 ibt_pause_sendq(ibt_channel_hdl_t chan, ibt_cep_modify_flags_t modify_flags) 1098 { 1099 ibt_qp_info_t modify_attr; 1100 ibt_status_t retval; 1101 1102 IBTF_DPRINTF_L3(ibtf_qp, "ibt_pause_sendq(%p, %x)", chan, modify_flags); 1103 1104 modify_flags &= IBT_CEP_SET_SQD_EVENT; /* ignore other bits */ 1105 modify_flags |= IBT_CEP_SET_STATE; 1106 1107 bzero(&modify_attr, sizeof (ibt_qp_info_t)); 1108 /* 1109 * Set the QP state to SQD. 1110 */ 1111 modify_attr.qp_state = IBT_STATE_SQD; 1112 modify_attr.qp_trans = chan->ch_qp.qp_type; 1113 1114 retval = ibt_modify_qp(chan, modify_flags, &modify_attr, NULL); 1115 1116 if (retval != IBT_SUCCESS) { 1117 IBTF_DPRINTF_L2(ibtf_qp, "ibt_pause_sendq: " 1118 "failed on chan %p: %d", chan, retval); 1119 } 1120 return (retval); 1121 } 1122 1123 1124 /* 1125 * Function: 1126 * ibt_unpause_sendq 1127 * Input: 1128 * chan The IBT Channel Handle. 1129 * Output: 1130 * none. 1131 * Returns: 1132 * IBT_SUCCESS 1133 * IBT_CHAN_HDL_INVALID 1134 * IBT_CHAN_STATE_INVALID 1135 * Description: 1136 * Un-pauses the previously paused channel. This call will transition the 1137 * QP from SQD to RTS state. 1138 */ 1139 ibt_status_t 1140 ibt_unpause_sendq(ibt_channel_hdl_t chan) 1141 { 1142 ibt_qp_info_t modify_attr; 1143 ibt_status_t retval; 1144 1145 IBTF_DPRINTF_L3(ibtf_qp, "ibt_unpause_sendq(%p)", chan); 1146 1147 bzero(&modify_attr, sizeof (ibt_qp_info_t)); 1148 1149 /* 1150 * Set the QP state to RTS. 1151 */ 1152 modify_attr.qp_current_state = IBT_STATE_SQD; 1153 modify_attr.qp_state = IBT_STATE_RTS; 1154 modify_attr.qp_trans = chan->ch_qp.qp_type; 1155 1156 retval = ibt_modify_qp(chan, IBT_CEP_SET_STATE, &modify_attr, NULL); 1157 if (retval != IBT_SUCCESS) { 1158 IBTF_DPRINTF_L2(ibtf_qp, "ibt_unpause_sendq: " 1159 "failed on chan %p: %d", chan, retval); 1160 } 1161 return (retval); 1162 } 1163 1164 1165 /* 1166 * Function: 1167 * ibt_resize_queues 1168 * Input: 1169 * chan A previously allocated channel handle. 1170 * flags QP Flags 1171 * IBT_SEND_Q 1172 * IBT_RECV_Q 1173 * request_sz Requested new sizes. 1174 * Output: 1175 * actual_sz Returned actual sizes. 1176 * Returns: 1177 * IBT_SUCCESS 1178 * Description: 1179 * Resize the SendQ/RecvQ sizes of a channel. Can only be called on 1180 * a previously opened channel. 1181 */ 1182 ibt_status_t 1183 ibt_resize_queues(ibt_channel_hdl_t chan, ibt_qflags_t flags, 1184 ibt_queue_sizes_t *request_sz, ibt_queue_sizes_t *actual_sz) 1185 { 1186 ibt_cep_modify_flags_t modify_flags = IBT_CEP_SET_STATE; 1187 ibt_qp_info_t modify_attr; 1188 ibt_status_t retval; 1189 1190 IBTF_DPRINTF_L3(ibtf_qp, "ibt_resize_queues(%p, 0x%X, %p, %p)", 1191 chan, flags, request_sz, actual_sz); 1192 1193 if ((flags & (IBT_SEND_Q | IBT_RECV_Q)) == 0) { 1194 IBTF_DPRINTF_L2(ibtf_qp, "ibt_resize_queues: " 1195 "Flags <0x%X> not set", flags); 1196 return (IBT_INVALID_PARAM); 1197 } 1198 1199 bzero(&modify_attr, sizeof (ibt_qp_info_t)); 1200 1201 modify_attr.qp_current_state = chan->ch_current_state; 1202 modify_attr.qp_trans = chan->ch_qp.qp_type; 1203 modify_attr.qp_state = chan->ch_current_state; 1204 1205 if (flags & IBT_SEND_Q) { 1206 modify_attr.qp_sq_sz = request_sz->qs_sq; 1207 modify_flags |= IBT_CEP_SET_SQ_SIZE; 1208 } 1209 1210 if (flags & IBT_RECV_Q) { 1211 modify_attr.qp_rq_sz = request_sz->qs_rq; 1212 modify_flags |= IBT_CEP_SET_RQ_SIZE; 1213 } 1214 1215 retval = ibt_modify_qp(chan, modify_flags, &modify_attr, actual_sz); 1216 if (retval != IBT_SUCCESS) { 1217 IBTF_DPRINTF_L2(ibtf_qp, "ibt_resize_queues: " 1218 "failed on QP %p: %d", chan, retval); 1219 } 1220 1221 return (retval); 1222 } 1223 1224 1225 /* 1226 * Function: 1227 * ibt_query_queues 1228 * Input: 1229 * chan A previously allocated channel handle. 1230 * Output: 1231 * actual_sz Returned actual sizes. 1232 * Returns: 1233 * IBT_SUCCESS 1234 * Description: 1235 * Query the SendQ/RecvQ sizes of a channel. 1236 */ 1237 ibt_status_t 1238 ibt_query_queues(ibt_channel_hdl_t chan, ibt_queue_sizes_t *actual_sz) 1239 { 1240 ibt_status_t retval; 1241 ibt_qp_query_attr_t qp_query_attr; 1242 1243 IBTF_DPRINTF_L3(ibtf_qp, "ibt_query_queues(%p)", chan); 1244 1245 /* Perform Query QP and retrieve QP sizes. */ 1246 retval = ibt_query_qp(chan, &qp_query_attr); 1247 if (retval != IBT_SUCCESS) { 1248 IBTF_DPRINTF_L2(ibtf_qp, "ibt_query_queues: " 1249 "ibt_query_qp failed: qp %p: %d", chan, retval); 1250 return (retval); 1251 } 1252 1253 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(actual_sz->qs_rq, 1254 actual_sz->qs_sq)) 1255 actual_sz->qs_sq = qp_query_attr.qp_info.qp_sq_sz; 1256 actual_sz->qs_rq = qp_query_attr.qp_info.qp_rq_sz; 1257 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(actual_sz->qs_rq, 1258 actual_sz->qs_sq)) 1259 chan->ch_current_state = qp_query_attr.qp_info.qp_state; 1260 1261 return (retval); 1262 } 1263 1264 1265 /* 1266 * Function: 1267 * ibt_modify_rdma 1268 * Input: 1269 * rc_chan A previously allocated channel handle. 1270 * 1271 * modify_flags Bitwise "or" of any of the following: 1272 * IBT_CEP_SET_RDMA_R Enable/Disable RDMA RD 1273 * IBT_CEP_SET_RDMA_W Enable/Disable RDMA WR 1274 * IBT_CEP_SET_ATOMIC Enable/Disable Atomics 1275 * 1276 * flags Channel End Point (CEP) Disable Flags (0 => enable). 1277 * IBT_CEP_NO_RDMA_RD Disable incoming RDMA RD's 1278 * IBT_CEP_NO_RDMA_WR Disable incoming RDMA WR's 1279 * IBT_CEP_NO_ATOMIC Disable incoming Atomics. 1280 * Output: 1281 * none. 1282 * Returns: 1283 * IBT_SUCCESS 1284 * IBT_QP_SRV_TYPE_INVALID 1285 * IBT_CHAN_HDL_INVALID 1286 * IBT_CHAN_ATOMICS_NOT_SUPPORTED 1287 * IBT_CHAN_STATE_INVALID 1288 * Description: 1289 * Enable/disable RDMA operations. To enable an operation clear the 1290 * "disable" flag. Can call this function when the channel is in 1291 * INIT, RTS or SQD states. If called in any other state 1292 * IBT_CHAN_STATE_INVALID is returned. When the operation completes the 1293 * channel state is left unchanged. 1294 */ 1295 ibt_status_t 1296 ibt_modify_rdma(ibt_channel_hdl_t rc_chan, 1297 ibt_cep_modify_flags_t modify_flags, ibt_cep_flags_t flags) 1298 { 1299 ibt_status_t retval; 1300 ibt_qp_info_t modify_attr; 1301 1302 IBTF_DPRINTF_L3(ibtf_qp, "ibt_modify_rdma(%p, 0x%x, 0x%x)", 1303 rc_chan, modify_flags, flags); 1304 1305 if (rc_chan->ch_qp.qp_type != IBT_RC_SRV) { 1306 IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_rdma: " 1307 "Invalid Channel type: 0x%X, Applicable only to RC Channel", 1308 rc_chan->ch_qp.qp_type); 1309 return (IBT_QP_SRV_TYPE_INVALID); 1310 } 1311 1312 bzero(&modify_attr, sizeof (ibt_qp_info_t)); 1313 1314 /* 1315 * Can only call this function when the channel in INIT, RTS or SQD 1316 * states. 1317 */ 1318 if ((rc_chan->ch_current_state != IBT_STATE_INIT) && 1319 (rc_chan->ch_current_state != IBT_STATE_RTS) && 1320 (rc_chan->ch_current_state != IBT_STATE_SQD)) { 1321 IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_rdma: Invalid Channel " 1322 "state: 0x%X", rc_chan->ch_current_state); 1323 return (IBT_CHAN_STATE_INVALID); 1324 } 1325 1326 modify_attr.qp_state = modify_attr.qp_current_state = 1327 rc_chan->ch_current_state; 1328 modify_attr.qp_trans = rc_chan->ch_qp.qp_type; 1329 modify_attr.qp_flags = flags; 1330 1331 modify_flags &= (IBT_CEP_SET_RDMA_R | IBT_CEP_SET_RDMA_W | 1332 IBT_CEP_SET_ATOMIC); 1333 modify_flags |= IBT_CEP_SET_STATE; 1334 1335 retval = ibt_modify_qp(rc_chan, modify_flags, &modify_attr, NULL); 1336 if (retval != IBT_SUCCESS) { 1337 IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_rdma: " 1338 "failed on chan %p: %d", rc_chan, retval); 1339 } 1340 return (retval); 1341 } 1342 1343 1344 /* 1345 * Function: 1346 * ibt_set_rdma_resource 1347 * Input: 1348 * chan A previously allocated RC channel handle. 1349 * modify_flags Bitwise "or" of any of the following: 1350 * IBT_CEP_SET_RDMARA_OUT Initiator depth (rdma_ra_out) 1351 * IBT_CEP_SET_RDMARA_IN Responder Resources 1352 * (rdma_ra_in) 1353 * rdma_ra_out Outgoing RDMA Reads/Atomics 1354 * rdma_ra_in Incoming RDMA Reads/Atomics 1355 * Output: 1356 * none. 1357 * Returns: 1358 * IBT_SUCCESS 1359 * Description: 1360 * Change the number of resources to be used for incoming and outgoing 1361 * RDMA reads & Atomics. Can only be called on a previously opened 1362 * RC channel. Can only be called on a paused channel, and this will 1363 * un-pause that channel. 1364 */ 1365 ibt_status_t 1366 ibt_set_rdma_resource(ibt_channel_hdl_t chan, 1367 ibt_cep_modify_flags_t modify_flags, uint8_t rdma_ra_out, 1368 uint8_t resp_rdma_ra_out) 1369 { 1370 ibt_qp_info_t modify_attr; 1371 ibt_status_t retval; 1372 1373 IBTF_DPRINTF_L3(ibtf_qp, "ibt_set_rdma_resource(%p, 0x%x, %d, %d)", 1374 chan, modify_flags, rdma_ra_out, resp_rdma_ra_out); 1375 1376 if (chan->ch_qp.qp_type != IBT_RC_SRV) { 1377 IBTF_DPRINTF_L2(ibtf_qp, "ibt_set_rdma_resource: " 1378 "Invalid Channel type: 0x%X, Applicable only to RC Channel", 1379 chan->ch_qp.qp_type); 1380 return (IBT_CHAN_SRV_TYPE_INVALID); 1381 } 1382 1383 bzero(&modify_attr, sizeof (ibt_qp_info_t)); 1384 1385 modify_attr.qp_trans = chan->ch_qp.qp_type; 1386 modify_attr.qp_state = IBT_STATE_SQD; 1387 1388 modify_attr.qp_transport.rc.rc_rdma_ra_out = rdma_ra_out; 1389 modify_attr.qp_transport.rc.rc_rdma_ra_in = resp_rdma_ra_out; 1390 modify_flags &= (IBT_CEP_SET_RDMARA_OUT | IBT_CEP_SET_RDMARA_IN); 1391 modify_flags |= IBT_CEP_SET_STATE; 1392 1393 retval = ibt_modify_qp(chan, modify_flags, &modify_attr, NULL); 1394 if (retval != IBT_SUCCESS) { 1395 IBTF_DPRINTF_L2(ibtf_qp, "ibt_set_rdma_resource: " 1396 "failed on chan %p: %d", chan, retval); 1397 } 1398 return (retval); 1399 } 1400 1401 1402 /* 1403 * Function: 1404 * ibt_change_port 1405 * Input: 1406 * rc_chan A previously allocated RC channel handle. 1407 * port_num New HCA port. 1408 * Output: 1409 * none. 1410 * Returns: 1411 * IBT_SUCCESS 1412 * Description: 1413 * Change the primary physical port of a channel. (This is done only if 1414 * HCA supports this capability). 1415 */ 1416 ibt_status_t 1417 ibt_change_port(ibt_channel_hdl_t chan, uint8_t port_num) 1418 { 1419 ibt_cep_modify_flags_t modify_flags; 1420 ibt_qp_info_t modify_attr; 1421 ibt_status_t retval; 1422 1423 IBTF_DPRINTF_L3(ibtf_qp, "ibt_change_port(%p, %d)", chan, port_num); 1424 1425 if (chan->ch_qp.qp_type != IBT_RC_SRV) { 1426 IBTF_DPRINTF_L2(ibtf_qp, "ibt_change_port: " 1427 "Invalid Channel type: 0x%X, Applicable only to RC Channel", 1428 chan->ch_qp.qp_type); 1429 return (IBT_CHAN_SRV_TYPE_INVALID); 1430 } 1431 bzero(&modify_attr, sizeof (ibt_qp_info_t)); 1432 1433 modify_attr.qp_state = IBT_STATE_SQD; 1434 modify_attr.qp_trans = chan->ch_qp.qp_type; 1435 modify_attr.qp_transport.rc.rc_path.cep_hca_port_num = port_num; 1436 1437 modify_flags = IBT_CEP_SET_STATE | IBT_CEP_SET_PORT; 1438 1439 retval = ibt_modify_qp(chan, modify_flags, &modify_attr, NULL); 1440 if (retval != IBT_SUCCESS) { 1441 IBTF_DPRINTF_L2(ibtf_qp, "ibt_change_port: " 1442 "failed on chan %p: %d", chan, retval); 1443 } 1444 return (retval); 1445 } 1446 1447 1448 void 1449 ibtl_init_cep_states(void) 1450 { 1451 int index; 1452 int ibt_nstate_inits; 1453 1454 IBTF_DPRINTF_L3(ibtf_qp, "ibtl_init_cep_states()"); 1455 1456 ibt_nstate_inits = sizeof (ibt_cep_next_state_inits) / 1457 sizeof (ibt_cep_next_state_inits[0]); 1458 1459 /* 1460 * Initialize CEP next state table, using an indirect lookup table so 1461 * that this code isn't dependent on the ibt_cep_state_t enum values. 1462 */ 1463 for (index = 0; index < ibt_nstate_inits; index++) { 1464 ibt_cep_state_t state; 1465 1466 state = ibt_cep_next_state_inits[index].current_state; 1467 1468 ibt_cep_next_state[state].next_state = 1469 ibt_cep_next_state_inits[index].next_state; 1470 1471 ibt_cep_next_state[state].modify_flags = 1472 ibt_cep_next_state_inits[index].modify_flags; 1473 } 1474 } 1475