1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 6 * Copyright (c) 2004 Intel Corporation. All rights reserved. 7 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 8 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 9 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 10 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 11 * 12 * This software is available to you under a choice of one of two 13 * licenses. You may choose to be licensed under the terms of the GNU 14 * General Public License (GPL) Version 2, available from the file 15 * COPYING in the main directory of this source tree, or the 16 * OpenIB.org BSD license below: 17 * 18 * Redistribution and use in source and binary forms, with or 19 * without modification, are permitted provided that the following 20 * conditions are met: 21 * 22 * - Redistributions of source code must retain the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer. 25 * 26 * - Redistributions in binary form must reproduce the above 27 * copyright notice, this list of conditions and the following 28 * disclaimer in the documentation and/or other materials 29 * provided with the distribution. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 35 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 36 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 37 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 38 * SOFTWARE. 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include <linux/errno.h> 45 #include <linux/err.h> 46 #include <linux/string.h> 47 #include <linux/slab.h> 48 #include <linux/in.h> 49 #include <linux/in6.h> 50 51 #include <rdma/ib_verbs.h> 52 #include <rdma/ib_cache.h> 53 #include <rdma/ib_addr.h> 54 55 #include <netinet/ip.h> 56 #include <netinet/ip6.h> 57 58 #include <machine/in_cksum.h> 59 60 #include "core_priv.h" 61 62 static const char * const ib_events[] = { 63 [IB_EVENT_CQ_ERR] = "CQ error", 64 [IB_EVENT_QP_FATAL] = "QP fatal error", 65 [IB_EVENT_QP_REQ_ERR] = "QP request error", 66 [IB_EVENT_QP_ACCESS_ERR] = "QP access error", 67 [IB_EVENT_COMM_EST] = "communication established", 68 [IB_EVENT_SQ_DRAINED] = "send queue drained", 69 [IB_EVENT_PATH_MIG] = "path migration successful", 70 [IB_EVENT_PATH_MIG_ERR] = "path migration error", 71 [IB_EVENT_DEVICE_FATAL] = "device fatal error", 72 [IB_EVENT_PORT_ACTIVE] = "port active", 73 [IB_EVENT_PORT_ERR] = "port error", 74 [IB_EVENT_LID_CHANGE] = "LID change", 75 [IB_EVENT_PKEY_CHANGE] = "P_key change", 76 [IB_EVENT_SM_CHANGE] = "SM change", 77 [IB_EVENT_SRQ_ERR] = "SRQ error", 78 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", 79 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", 80 [IB_EVENT_CLIENT_REREGISTER] = "client reregister", 81 [IB_EVENT_GID_CHANGE] = "GID changed", 82 }; 83 84 const char *__attribute_const__ ib_event_msg(enum ib_event_type event) 85 { 86 size_t index = event; 87 88 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? 89 ib_events[index] : "unrecognized event"; 90 } 91 EXPORT_SYMBOL(ib_event_msg); 92 93 static const char * const wc_statuses[] = { 94 [IB_WC_SUCCESS] = "success", 95 [IB_WC_LOC_LEN_ERR] = "local length error", 96 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", 97 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", 98 [IB_WC_LOC_PROT_ERR] = "local protection error", 99 [IB_WC_WR_FLUSH_ERR] = "WR flushed", 100 [IB_WC_MW_BIND_ERR] = "memory management operation error", 101 [IB_WC_BAD_RESP_ERR] = "bad response error", 102 [IB_WC_LOC_ACCESS_ERR] = "local access error", 103 [IB_WC_REM_INV_REQ_ERR] = "invalid request error", 104 [IB_WC_REM_ACCESS_ERR] = "remote access error", 105 [IB_WC_REM_OP_ERR] = "remote operation error", 106 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", 107 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", 108 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", 109 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", 110 [IB_WC_REM_ABORT_ERR] = "operation aborted", 111 [IB_WC_INV_EECN_ERR] = "invalid EE context number", 112 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", 113 [IB_WC_FATAL_ERR] = "fatal error", 114 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", 115 [IB_WC_GENERAL_ERR] = "general error", 116 }; 117 118 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) 119 { 120 size_t index = status; 121 122 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? 123 wc_statuses[index] : "unrecognized status"; 124 } 125 EXPORT_SYMBOL(ib_wc_status_msg); 126 127 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) 128 { 129 switch (rate) { 130 case IB_RATE_2_5_GBPS: return 1; 131 case IB_RATE_5_GBPS: return 2; 132 case IB_RATE_10_GBPS: return 4; 133 case IB_RATE_20_GBPS: return 8; 134 case IB_RATE_30_GBPS: return 12; 135 case IB_RATE_40_GBPS: return 16; 136 case IB_RATE_60_GBPS: return 24; 137 case IB_RATE_80_GBPS: return 32; 138 case IB_RATE_120_GBPS: return 48; 139 default: return -1; 140 } 141 } 142 EXPORT_SYMBOL(ib_rate_to_mult); 143 144 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) 145 { 146 switch (mult) { 147 case 1: return IB_RATE_2_5_GBPS; 148 case 2: return IB_RATE_5_GBPS; 149 case 4: return IB_RATE_10_GBPS; 150 case 8: return IB_RATE_20_GBPS; 151 case 12: return IB_RATE_30_GBPS; 152 case 16: return IB_RATE_40_GBPS; 153 case 24: return IB_RATE_60_GBPS; 154 case 32: return IB_RATE_80_GBPS; 155 case 48: return IB_RATE_120_GBPS; 156 default: return IB_RATE_PORT_CURRENT; 157 } 158 } 159 EXPORT_SYMBOL(mult_to_ib_rate); 160 161 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) 162 { 163 switch (rate) { 164 case IB_RATE_2_5_GBPS: return 2500; 165 case IB_RATE_5_GBPS: return 5000; 166 case IB_RATE_10_GBPS: return 10000; 167 case IB_RATE_20_GBPS: return 20000; 168 case IB_RATE_30_GBPS: return 30000; 169 case IB_RATE_40_GBPS: return 40000; 170 case IB_RATE_60_GBPS: return 60000; 171 case IB_RATE_80_GBPS: return 80000; 172 case IB_RATE_120_GBPS: return 120000; 173 case IB_RATE_14_GBPS: return 14062; 174 case IB_RATE_56_GBPS: return 56250; 175 case IB_RATE_112_GBPS: return 112500; 176 case IB_RATE_168_GBPS: return 168750; 177 case IB_RATE_25_GBPS: return 25781; 178 case IB_RATE_100_GBPS: return 103125; 179 case IB_RATE_200_GBPS: return 206250; 180 case IB_RATE_300_GBPS: return 309375; 181 default: return -1; 182 } 183 } 184 EXPORT_SYMBOL(ib_rate_to_mbps); 185 186 __attribute_const__ enum rdma_transport_type 187 rdma_node_get_transport(enum rdma_node_type node_type) 188 { 189 switch (node_type) { 190 case RDMA_NODE_IB_CA: 191 case RDMA_NODE_IB_SWITCH: 192 case RDMA_NODE_IB_ROUTER: 193 return RDMA_TRANSPORT_IB; 194 case RDMA_NODE_RNIC: 195 return RDMA_TRANSPORT_IWARP; 196 case RDMA_NODE_USNIC: 197 return RDMA_TRANSPORT_USNIC; 198 case RDMA_NODE_USNIC_UDP: 199 return RDMA_TRANSPORT_USNIC_UDP; 200 default: 201 BUG(); 202 return 0; 203 } 204 } 205 EXPORT_SYMBOL(rdma_node_get_transport); 206 207 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 208 { 209 if (device->get_link_layer) 210 return device->get_link_layer(device, port_num); 211 212 switch (rdma_node_get_transport(device->node_type)) { 213 case RDMA_TRANSPORT_IB: 214 return IB_LINK_LAYER_INFINIBAND; 215 case RDMA_TRANSPORT_IWARP: 216 case RDMA_TRANSPORT_USNIC: 217 case RDMA_TRANSPORT_USNIC_UDP: 218 return IB_LINK_LAYER_ETHERNET; 219 default: 220 return IB_LINK_LAYER_UNSPECIFIED; 221 } 222 } 223 EXPORT_SYMBOL(rdma_port_get_link_layer); 224 225 /* Protection domains */ 226 227 /** 228 * ib_alloc_pd - Allocates an unused protection domain. 229 * @device: The device on which to allocate the protection domain. 230 * 231 * A protection domain object provides an association between QPs, shared 232 * receive queues, address handles, memory regions, and memory windows. 233 * 234 * Every PD has a local_dma_lkey which can be used as the lkey value for local 235 * memory operations. 236 */ 237 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 238 const char *caller) 239 { 240 struct ib_pd *pd; 241 int mr_access_flags = 0; 242 243 pd = device->alloc_pd(device, NULL, NULL); 244 if (IS_ERR(pd)) 245 return pd; 246 247 pd->device = device; 248 pd->uobject = NULL; 249 pd->__internal_mr = NULL; 250 atomic_set(&pd->usecnt, 0); 251 pd->flags = flags; 252 253 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 254 pd->local_dma_lkey = device->local_dma_lkey; 255 else 256 mr_access_flags |= IB_ACCESS_LOCAL_WRITE; 257 258 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { 259 pr_warn("%s: enabling unsafe global rkey\n", caller); 260 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; 261 } 262 263 if (mr_access_flags) { 264 struct ib_mr *mr; 265 266 mr = pd->device->get_dma_mr(pd, mr_access_flags); 267 if (IS_ERR(mr)) { 268 ib_dealloc_pd(pd); 269 return ERR_CAST(mr); 270 } 271 272 mr->device = pd->device; 273 mr->pd = pd; 274 mr->uobject = NULL; 275 mr->need_inval = false; 276 277 pd->__internal_mr = mr; 278 279 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) 280 pd->local_dma_lkey = pd->__internal_mr->lkey; 281 282 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) 283 pd->unsafe_global_rkey = pd->__internal_mr->rkey; 284 } 285 286 return pd; 287 } 288 EXPORT_SYMBOL(__ib_alloc_pd); 289 290 /** 291 * ib_dealloc_pd - Deallocates a protection domain. 292 * @pd: The protection domain to deallocate. 293 * 294 * It is an error to call this function while any resources in the pd still 295 * exist. The caller is responsible to synchronously destroy them and 296 * guarantee no new allocations will happen. 297 */ 298 void ib_dealloc_pd(struct ib_pd *pd) 299 { 300 int ret; 301 302 if (pd->__internal_mr) { 303 ret = pd->device->dereg_mr(pd->__internal_mr); 304 WARN_ON(ret); 305 pd->__internal_mr = NULL; 306 } 307 308 /* uverbs manipulates usecnt with proper locking, while the kabi 309 requires the caller to guarantee we can't race here. */ 310 WARN_ON(atomic_read(&pd->usecnt)); 311 312 /* Making delalloc_pd a void return is a WIP, no driver should return 313 an error here. */ 314 ret = pd->device->dealloc_pd(pd); 315 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 316 } 317 EXPORT_SYMBOL(ib_dealloc_pd); 318 319 /* Address handles */ 320 321 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 322 { 323 struct ib_ah *ah; 324 325 ah = pd->device->create_ah(pd, ah_attr, NULL); 326 327 if (!IS_ERR(ah)) { 328 ah->device = pd->device; 329 ah->pd = pd; 330 ah->uobject = NULL; 331 atomic_inc(&pd->usecnt); 332 } 333 334 return ah; 335 } 336 EXPORT_SYMBOL(ib_create_ah); 337 338 static int ib_get_header_version(const union rdma_network_hdr *hdr) 339 { 340 const struct ip *ip4h = (const struct ip *)&hdr->roce4grh; 341 struct ip ip4h_checked; 342 const struct ip6_hdr *ip6h = (const struct ip6_hdr *)&hdr->ibgrh; 343 344 /* If it's IPv6, the version must be 6, otherwise, the first 345 * 20 bytes (before the IPv4 header) are garbled. 346 */ 347 if ((ip6h->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) 348 return (ip4h->ip_v == 4) ? 4 : 0; 349 /* version may be 6 or 4 because the first 20 bytes could be garbled */ 350 351 /* RoCE v2 requires no options, thus header length 352 * must be 5 words 353 */ 354 if (ip4h->ip_hl != 5) 355 return 6; 356 357 /* Verify checksum. 358 * We can't write on scattered buffers so we need to copy to 359 * temp buffer. 360 */ 361 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); 362 ip4h_checked.ip_sum = 0; 363 #if defined(INET) || defined(INET6) 364 ip4h_checked.ip_sum = in_cksum_hdr(&ip4h_checked); 365 #endif 366 /* if IPv4 header checksum is OK, believe it */ 367 if (ip4h->ip_sum == ip4h_checked.ip_sum) 368 return 4; 369 return 6; 370 } 371 372 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, 373 u8 port_num, 374 const struct ib_grh *grh) 375 { 376 int grh_version; 377 378 if (rdma_protocol_ib(device, port_num)) 379 return RDMA_NETWORK_IB; 380 381 grh_version = ib_get_header_version((const union rdma_network_hdr *)grh); 382 383 if (grh_version == 4) 384 return RDMA_NETWORK_IPV4; 385 386 if (grh->next_hdr == IPPROTO_UDP) 387 return RDMA_NETWORK_IPV6; 388 389 return RDMA_NETWORK_ROCE_V1; 390 } 391 392 struct find_gid_index_context { 393 u16 vlan_id; 394 enum ib_gid_type gid_type; 395 }; 396 397 static bool find_gid_index(const union ib_gid *gid, 398 const struct ib_gid_attr *gid_attr, 399 void *context) 400 { 401 struct find_gid_index_context *ctx = 402 (struct find_gid_index_context *)context; 403 404 if (ctx->gid_type != gid_attr->gid_type) 405 return false; 406 if (rdma_vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id) 407 return false; 408 return true; 409 } 410 411 static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, 412 u16 vlan_id, const union ib_gid *sgid, 413 enum ib_gid_type gid_type, 414 u16 *gid_index) 415 { 416 struct find_gid_index_context context = {.vlan_id = vlan_id, 417 .gid_type = gid_type}; 418 419 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index, 420 &context, gid_index); 421 } 422 423 static int get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 424 enum rdma_network_type net_type, 425 union ib_gid *sgid, union ib_gid *dgid) 426 { 427 struct sockaddr_in src_in; 428 struct sockaddr_in dst_in; 429 __be32 src_saddr, dst_saddr; 430 431 if (!sgid || !dgid) 432 return -EINVAL; 433 434 if (net_type == RDMA_NETWORK_IPV4) { 435 memcpy(&src_in.sin_addr.s_addr, 436 &hdr->roce4grh.ip_src, 4); 437 memcpy(&dst_in.sin_addr.s_addr, 438 &hdr->roce4grh.ip_dst, 4); 439 src_saddr = src_in.sin_addr.s_addr; 440 dst_saddr = dst_in.sin_addr.s_addr; 441 ipv6_addr_set_v4mapped(src_saddr, 442 (struct in6_addr *)sgid); 443 ipv6_addr_set_v4mapped(dst_saddr, 444 (struct in6_addr *)dgid); 445 return 0; 446 } else if (net_type == RDMA_NETWORK_IPV6 || 447 net_type == RDMA_NETWORK_IB) { 448 *dgid = hdr->ibgrh.dgid; 449 *sgid = hdr->ibgrh.sgid; 450 return 0; 451 } else { 452 return -EINVAL; 453 } 454 } 455 456 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 457 const struct ib_wc *wc, const struct ib_grh *grh, 458 struct ib_ah_attr *ah_attr) 459 { 460 u32 flow_class; 461 u16 gid_index = 0; 462 int ret; 463 enum rdma_network_type net_type = RDMA_NETWORK_IB; 464 enum ib_gid_type gid_type = IB_GID_TYPE_IB; 465 int hoplimit = 0xff; 466 union ib_gid dgid; 467 union ib_gid sgid; 468 469 memset(ah_attr, 0, sizeof *ah_attr); 470 if (rdma_cap_eth_ah(device, port_num)) { 471 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) 472 net_type = wc->network_hdr_type; 473 else 474 net_type = ib_get_net_type_by_grh(device, port_num, grh); 475 gid_type = ib_network_to_gid_type(net_type); 476 } 477 ret = get_gids_from_rdma_hdr((const union rdma_network_hdr *)grh, net_type, 478 &sgid, &dgid); 479 if (ret) 480 return ret; 481 482 if (rdma_protocol_roce(device, port_num)) { 483 struct ib_gid_attr dgid_attr; 484 const u16 vlan_id = (wc->wc_flags & IB_WC_WITH_VLAN) ? 485 wc->vlan_id : 0xffff; 486 487 if (!(wc->wc_flags & IB_WC_GRH)) 488 return -EPROTOTYPE; 489 490 ret = get_sgid_index_from_eth(device, port_num, vlan_id, 491 &dgid, gid_type, &gid_index); 492 if (ret) 493 return ret; 494 495 ret = ib_get_cached_gid(device, port_num, gid_index, &dgid, &dgid_attr); 496 if (ret) 497 return ret; 498 499 if (dgid_attr.ndev == NULL) 500 return -ENODEV; 501 502 ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid, ah_attr->dmac, 503 dgid_attr.ndev, &hoplimit); 504 505 dev_put(dgid_attr.ndev); 506 if (ret) 507 return ret; 508 } 509 510 ah_attr->dlid = wc->slid; 511 ah_attr->sl = wc->sl; 512 ah_attr->src_path_bits = wc->dlid_path_bits; 513 ah_attr->port_num = port_num; 514 515 if (wc->wc_flags & IB_WC_GRH) { 516 ah_attr->ah_flags = IB_AH_GRH; 517 ah_attr->grh.dgid = sgid; 518 519 if (!rdma_cap_eth_ah(device, port_num)) { 520 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { 521 ret = ib_find_cached_gid_by_port(device, &dgid, 522 IB_GID_TYPE_IB, 523 port_num, NULL, 524 &gid_index); 525 if (ret) 526 return ret; 527 } 528 } 529 530 ah_attr->grh.sgid_index = (u8) gid_index; 531 flow_class = be32_to_cpu(grh->version_tclass_flow); 532 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 533 ah_attr->grh.hop_limit = hoplimit; 534 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 535 } 536 return 0; 537 } 538 EXPORT_SYMBOL(ib_init_ah_from_wc); 539 540 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 541 const struct ib_grh *grh, u8 port_num) 542 { 543 struct ib_ah_attr ah_attr; 544 int ret; 545 546 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); 547 if (ret) 548 return ERR_PTR(ret); 549 550 return ib_create_ah(pd, &ah_attr); 551 } 552 EXPORT_SYMBOL(ib_create_ah_from_wc); 553 554 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 555 { 556 return ah->device->modify_ah ? 557 ah->device->modify_ah(ah, ah_attr) : 558 -ENOSYS; 559 } 560 EXPORT_SYMBOL(ib_modify_ah); 561 562 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 563 { 564 return ah->device->query_ah ? 565 ah->device->query_ah(ah, ah_attr) : 566 -ENOSYS; 567 } 568 EXPORT_SYMBOL(ib_query_ah); 569 570 int ib_destroy_ah(struct ib_ah *ah) 571 { 572 struct ib_pd *pd; 573 int ret; 574 575 pd = ah->pd; 576 ret = ah->device->destroy_ah(ah); 577 if (!ret) 578 atomic_dec(&pd->usecnt); 579 580 return ret; 581 } 582 EXPORT_SYMBOL(ib_destroy_ah); 583 584 /* Shared receive queues */ 585 586 struct ib_srq *ib_create_srq(struct ib_pd *pd, 587 struct ib_srq_init_attr *srq_init_attr) 588 { 589 struct ib_srq *srq; 590 591 if (!pd->device->create_srq) 592 return ERR_PTR(-ENOSYS); 593 594 srq = pd->device->create_srq(pd, srq_init_attr, NULL); 595 596 if (!IS_ERR(srq)) { 597 srq->device = pd->device; 598 srq->pd = pd; 599 srq->uobject = NULL; 600 srq->event_handler = srq_init_attr->event_handler; 601 srq->srq_context = srq_init_attr->srq_context; 602 srq->srq_type = srq_init_attr->srq_type; 603 if (srq->srq_type == IB_SRQT_XRC) { 604 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; 605 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; 606 atomic_inc(&srq->ext.xrc.xrcd->usecnt); 607 atomic_inc(&srq->ext.xrc.cq->usecnt); 608 } 609 atomic_inc(&pd->usecnt); 610 atomic_set(&srq->usecnt, 0); 611 } 612 613 return srq; 614 } 615 EXPORT_SYMBOL(ib_create_srq); 616 617 int ib_modify_srq(struct ib_srq *srq, 618 struct ib_srq_attr *srq_attr, 619 enum ib_srq_attr_mask srq_attr_mask) 620 { 621 return srq->device->modify_srq ? 622 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : 623 -ENOSYS; 624 } 625 EXPORT_SYMBOL(ib_modify_srq); 626 627 int ib_query_srq(struct ib_srq *srq, 628 struct ib_srq_attr *srq_attr) 629 { 630 return srq->device->query_srq ? 631 srq->device->query_srq(srq, srq_attr) : -ENOSYS; 632 } 633 EXPORT_SYMBOL(ib_query_srq); 634 635 int ib_destroy_srq(struct ib_srq *srq) 636 { 637 struct ib_pd *pd; 638 enum ib_srq_type srq_type; 639 struct ib_xrcd *uninitialized_var(xrcd); 640 struct ib_cq *uninitialized_var(cq); 641 int ret; 642 643 if (atomic_read(&srq->usecnt)) 644 return -EBUSY; 645 646 pd = srq->pd; 647 srq_type = srq->srq_type; 648 if (srq_type == IB_SRQT_XRC) { 649 xrcd = srq->ext.xrc.xrcd; 650 cq = srq->ext.xrc.cq; 651 } 652 653 ret = srq->device->destroy_srq(srq); 654 if (!ret) { 655 atomic_dec(&pd->usecnt); 656 if (srq_type == IB_SRQT_XRC) { 657 atomic_dec(&xrcd->usecnt); 658 atomic_dec(&cq->usecnt); 659 } 660 } 661 662 return ret; 663 } 664 EXPORT_SYMBOL(ib_destroy_srq); 665 666 /* Queue pairs */ 667 668 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) 669 { 670 struct ib_qp *qp = context; 671 unsigned long flags; 672 673 spin_lock_irqsave(&qp->device->event_handler_lock, flags); 674 list_for_each_entry(event->element.qp, &qp->open_list, open_list) 675 if (event->element.qp->event_handler) 676 event->element.qp->event_handler(event, event->element.qp->qp_context); 677 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); 678 } 679 680 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) 681 { 682 mutex_lock(&xrcd->tgt_qp_mutex); 683 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); 684 mutex_unlock(&xrcd->tgt_qp_mutex); 685 } 686 687 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, 688 void (*event_handler)(struct ib_event *, void *), 689 void *qp_context) 690 { 691 struct ib_qp *qp; 692 unsigned long flags; 693 694 qp = kzalloc(sizeof *qp, GFP_KERNEL); 695 if (!qp) 696 return ERR_PTR(-ENOMEM); 697 698 qp->real_qp = real_qp; 699 atomic_inc(&real_qp->usecnt); 700 qp->device = real_qp->device; 701 qp->event_handler = event_handler; 702 qp->qp_context = qp_context; 703 qp->qp_num = real_qp->qp_num; 704 qp->qp_type = real_qp->qp_type; 705 706 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 707 list_add(&qp->open_list, &real_qp->open_list); 708 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 709 710 return qp; 711 } 712 713 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 714 struct ib_qp_open_attr *qp_open_attr) 715 { 716 struct ib_qp *qp, *real_qp; 717 718 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) 719 return ERR_PTR(-EINVAL); 720 721 qp = ERR_PTR(-EINVAL); 722 mutex_lock(&xrcd->tgt_qp_mutex); 723 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { 724 if (real_qp->qp_num == qp_open_attr->qp_num) { 725 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, 726 qp_open_attr->qp_context); 727 break; 728 } 729 } 730 mutex_unlock(&xrcd->tgt_qp_mutex); 731 return qp; 732 } 733 EXPORT_SYMBOL(ib_open_qp); 734 735 static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, 736 struct ib_qp_init_attr *qp_init_attr) 737 { 738 struct ib_qp *real_qp = qp; 739 740 qp->event_handler = __ib_shared_qp_event_handler; 741 qp->qp_context = qp; 742 qp->pd = NULL; 743 qp->send_cq = qp->recv_cq = NULL; 744 qp->srq = NULL; 745 qp->xrcd = qp_init_attr->xrcd; 746 atomic_inc(&qp_init_attr->xrcd->usecnt); 747 INIT_LIST_HEAD(&qp->open_list); 748 749 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, 750 qp_init_attr->qp_context); 751 if (!IS_ERR(qp)) 752 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); 753 else 754 real_qp->device->destroy_qp(real_qp); 755 return qp; 756 } 757 758 struct ib_qp *ib_create_qp(struct ib_pd *pd, 759 struct ib_qp_init_attr *qp_init_attr) 760 { 761 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; 762 struct ib_qp *qp; 763 764 if (qp_init_attr->rwq_ind_tbl && 765 (qp_init_attr->recv_cq || 766 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || 767 qp_init_attr->cap.max_recv_sge)) 768 return ERR_PTR(-EINVAL); 769 770 qp = device->create_qp(pd, qp_init_attr, NULL); 771 if (IS_ERR(qp)) 772 return qp; 773 774 qp->device = device; 775 qp->real_qp = qp; 776 qp->uobject = NULL; 777 qp->qp_type = qp_init_attr->qp_type; 778 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; 779 780 atomic_set(&qp->usecnt, 0); 781 spin_lock_init(&qp->mr_lock); 782 783 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) 784 return ib_create_xrc_qp(qp, qp_init_attr); 785 786 qp->event_handler = qp_init_attr->event_handler; 787 qp->qp_context = qp_init_attr->qp_context; 788 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { 789 qp->recv_cq = NULL; 790 qp->srq = NULL; 791 } else { 792 qp->recv_cq = qp_init_attr->recv_cq; 793 if (qp_init_attr->recv_cq) 794 atomic_inc(&qp_init_attr->recv_cq->usecnt); 795 qp->srq = qp_init_attr->srq; 796 if (qp->srq) 797 atomic_inc(&qp_init_attr->srq->usecnt); 798 } 799 800 qp->pd = pd; 801 qp->send_cq = qp_init_attr->send_cq; 802 qp->xrcd = NULL; 803 804 atomic_inc(&pd->usecnt); 805 if (qp_init_attr->send_cq) 806 atomic_inc(&qp_init_attr->send_cq->usecnt); 807 if (qp_init_attr->rwq_ind_tbl) 808 atomic_inc(&qp->rwq_ind_tbl->usecnt); 809 810 /* 811 * Note: all hw drivers guarantee that max_send_sge is lower than 812 * the device RDMA WRITE SGE limit but not all hw drivers ensure that 813 * max_send_sge <= max_sge_rd. 814 */ 815 qp->max_write_sge = qp_init_attr->cap.max_send_sge; 816 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, 817 device->attrs.max_sge_rd); 818 819 return qp; 820 } 821 EXPORT_SYMBOL(ib_create_qp); 822 823 static const struct { 824 int valid; 825 enum ib_qp_attr_mask req_param[IB_QPT_MAX]; 826 enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; 827 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 828 [IB_QPS_RESET] = { 829 [IB_QPS_RESET] = { .valid = 1 }, 830 [IB_QPS_INIT] = { 831 .valid = 1, 832 .req_param = { 833 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 834 IB_QP_PORT | 835 IB_QP_QKEY), 836 [IB_QPT_RAW_PACKET] = IB_QP_PORT, 837 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 838 IB_QP_PORT | 839 IB_QP_ACCESS_FLAGS), 840 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 841 IB_QP_PORT | 842 IB_QP_ACCESS_FLAGS), 843 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 844 IB_QP_PORT | 845 IB_QP_ACCESS_FLAGS), 846 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 847 IB_QP_PORT | 848 IB_QP_ACCESS_FLAGS), 849 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 850 IB_QP_QKEY), 851 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 852 IB_QP_QKEY), 853 } 854 }, 855 }, 856 [IB_QPS_INIT] = { 857 [IB_QPS_RESET] = { .valid = 1 }, 858 [IB_QPS_ERR] = { .valid = 1 }, 859 [IB_QPS_INIT] = { 860 .valid = 1, 861 .opt_param = { 862 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 863 IB_QP_PORT | 864 IB_QP_QKEY), 865 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 866 IB_QP_PORT | 867 IB_QP_ACCESS_FLAGS), 868 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 869 IB_QP_PORT | 870 IB_QP_ACCESS_FLAGS), 871 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 872 IB_QP_PORT | 873 IB_QP_ACCESS_FLAGS), 874 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 875 IB_QP_PORT | 876 IB_QP_ACCESS_FLAGS), 877 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 878 IB_QP_QKEY), 879 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 880 IB_QP_QKEY), 881 } 882 }, 883 [IB_QPS_RTR] = { 884 .valid = 1, 885 .req_param = { 886 [IB_QPT_UC] = (IB_QP_AV | 887 IB_QP_PATH_MTU | 888 IB_QP_DEST_QPN | 889 IB_QP_RQ_PSN), 890 [IB_QPT_RC] = (IB_QP_AV | 891 IB_QP_PATH_MTU | 892 IB_QP_DEST_QPN | 893 IB_QP_RQ_PSN | 894 IB_QP_MAX_DEST_RD_ATOMIC | 895 IB_QP_MIN_RNR_TIMER), 896 [IB_QPT_XRC_INI] = (IB_QP_AV | 897 IB_QP_PATH_MTU | 898 IB_QP_DEST_QPN | 899 IB_QP_RQ_PSN), 900 [IB_QPT_XRC_TGT] = (IB_QP_AV | 901 IB_QP_PATH_MTU | 902 IB_QP_DEST_QPN | 903 IB_QP_RQ_PSN | 904 IB_QP_MAX_DEST_RD_ATOMIC | 905 IB_QP_MIN_RNR_TIMER), 906 }, 907 .opt_param = { 908 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 909 IB_QP_QKEY), 910 [IB_QPT_UC] = (IB_QP_ALT_PATH | 911 IB_QP_ACCESS_FLAGS | 912 IB_QP_PKEY_INDEX), 913 [IB_QPT_RC] = (IB_QP_ALT_PATH | 914 IB_QP_ACCESS_FLAGS | 915 IB_QP_PKEY_INDEX), 916 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | 917 IB_QP_ACCESS_FLAGS | 918 IB_QP_PKEY_INDEX), 919 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | 920 IB_QP_ACCESS_FLAGS | 921 IB_QP_PKEY_INDEX), 922 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 923 IB_QP_QKEY), 924 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 925 IB_QP_QKEY), 926 }, 927 }, 928 }, 929 [IB_QPS_RTR] = { 930 [IB_QPS_RESET] = { .valid = 1 }, 931 [IB_QPS_ERR] = { .valid = 1 }, 932 [IB_QPS_RTS] = { 933 .valid = 1, 934 .req_param = { 935 [IB_QPT_UD] = IB_QP_SQ_PSN, 936 [IB_QPT_UC] = IB_QP_SQ_PSN, 937 [IB_QPT_RC] = (IB_QP_TIMEOUT | 938 IB_QP_RETRY_CNT | 939 IB_QP_RNR_RETRY | 940 IB_QP_SQ_PSN | 941 IB_QP_MAX_QP_RD_ATOMIC), 942 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | 943 IB_QP_RETRY_CNT | 944 IB_QP_RNR_RETRY | 945 IB_QP_SQ_PSN | 946 IB_QP_MAX_QP_RD_ATOMIC), 947 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | 948 IB_QP_SQ_PSN), 949 [IB_QPT_SMI] = IB_QP_SQ_PSN, 950 [IB_QPT_GSI] = IB_QP_SQ_PSN, 951 }, 952 .opt_param = { 953 [IB_QPT_UD] = (IB_QP_CUR_STATE | 954 IB_QP_QKEY), 955 [IB_QPT_UC] = (IB_QP_CUR_STATE | 956 IB_QP_ALT_PATH | 957 IB_QP_ACCESS_FLAGS | 958 IB_QP_PATH_MIG_STATE), 959 [IB_QPT_RC] = (IB_QP_CUR_STATE | 960 IB_QP_ALT_PATH | 961 IB_QP_ACCESS_FLAGS | 962 IB_QP_MIN_RNR_TIMER | 963 IB_QP_PATH_MIG_STATE), 964 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 965 IB_QP_ALT_PATH | 966 IB_QP_ACCESS_FLAGS | 967 IB_QP_PATH_MIG_STATE), 968 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 969 IB_QP_ALT_PATH | 970 IB_QP_ACCESS_FLAGS | 971 IB_QP_MIN_RNR_TIMER | 972 IB_QP_PATH_MIG_STATE), 973 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 974 IB_QP_QKEY), 975 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 976 IB_QP_QKEY), 977 } 978 } 979 }, 980 [IB_QPS_RTS] = { 981 [IB_QPS_RESET] = { .valid = 1 }, 982 [IB_QPS_ERR] = { .valid = 1 }, 983 [IB_QPS_RTS] = { 984 .valid = 1, 985 .opt_param = { 986 [IB_QPT_UD] = (IB_QP_CUR_STATE | 987 IB_QP_QKEY), 988 [IB_QPT_UC] = (IB_QP_CUR_STATE | 989 IB_QP_ACCESS_FLAGS | 990 IB_QP_ALT_PATH | 991 IB_QP_PATH_MIG_STATE), 992 [IB_QPT_RC] = (IB_QP_CUR_STATE | 993 IB_QP_ACCESS_FLAGS | 994 IB_QP_ALT_PATH | 995 IB_QP_PATH_MIG_STATE | 996 IB_QP_MIN_RNR_TIMER), 997 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 998 IB_QP_ACCESS_FLAGS | 999 IB_QP_ALT_PATH | 1000 IB_QP_PATH_MIG_STATE), 1001 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1002 IB_QP_ACCESS_FLAGS | 1003 IB_QP_ALT_PATH | 1004 IB_QP_PATH_MIG_STATE | 1005 IB_QP_MIN_RNR_TIMER), 1006 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1007 IB_QP_QKEY), 1008 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1009 IB_QP_QKEY), 1010 } 1011 }, 1012 [IB_QPS_SQD] = { 1013 .valid = 1, 1014 .opt_param = { 1015 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1016 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1017 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1018 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1019 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ 1020 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1021 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY 1022 } 1023 }, 1024 }, 1025 [IB_QPS_SQD] = { 1026 [IB_QPS_RESET] = { .valid = 1 }, 1027 [IB_QPS_ERR] = { .valid = 1 }, 1028 [IB_QPS_RTS] = { 1029 .valid = 1, 1030 .opt_param = { 1031 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1032 IB_QP_QKEY), 1033 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1034 IB_QP_ALT_PATH | 1035 IB_QP_ACCESS_FLAGS | 1036 IB_QP_PATH_MIG_STATE), 1037 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1038 IB_QP_ALT_PATH | 1039 IB_QP_ACCESS_FLAGS | 1040 IB_QP_MIN_RNR_TIMER | 1041 IB_QP_PATH_MIG_STATE), 1042 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1043 IB_QP_ALT_PATH | 1044 IB_QP_ACCESS_FLAGS | 1045 IB_QP_PATH_MIG_STATE), 1046 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1047 IB_QP_ALT_PATH | 1048 IB_QP_ACCESS_FLAGS | 1049 IB_QP_MIN_RNR_TIMER | 1050 IB_QP_PATH_MIG_STATE), 1051 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1052 IB_QP_QKEY), 1053 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1054 IB_QP_QKEY), 1055 } 1056 }, 1057 [IB_QPS_SQD] = { 1058 .valid = 1, 1059 .opt_param = { 1060 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 1061 IB_QP_QKEY), 1062 [IB_QPT_UC] = (IB_QP_AV | 1063 IB_QP_ALT_PATH | 1064 IB_QP_ACCESS_FLAGS | 1065 IB_QP_PKEY_INDEX | 1066 IB_QP_PATH_MIG_STATE), 1067 [IB_QPT_RC] = (IB_QP_PORT | 1068 IB_QP_AV | 1069 IB_QP_TIMEOUT | 1070 IB_QP_RETRY_CNT | 1071 IB_QP_RNR_RETRY | 1072 IB_QP_MAX_QP_RD_ATOMIC | 1073 IB_QP_MAX_DEST_RD_ATOMIC | 1074 IB_QP_ALT_PATH | 1075 IB_QP_ACCESS_FLAGS | 1076 IB_QP_PKEY_INDEX | 1077 IB_QP_MIN_RNR_TIMER | 1078 IB_QP_PATH_MIG_STATE), 1079 [IB_QPT_XRC_INI] = (IB_QP_PORT | 1080 IB_QP_AV | 1081 IB_QP_TIMEOUT | 1082 IB_QP_RETRY_CNT | 1083 IB_QP_RNR_RETRY | 1084 IB_QP_MAX_QP_RD_ATOMIC | 1085 IB_QP_ALT_PATH | 1086 IB_QP_ACCESS_FLAGS | 1087 IB_QP_PKEY_INDEX | 1088 IB_QP_PATH_MIG_STATE), 1089 [IB_QPT_XRC_TGT] = (IB_QP_PORT | 1090 IB_QP_AV | 1091 IB_QP_TIMEOUT | 1092 IB_QP_MAX_DEST_RD_ATOMIC | 1093 IB_QP_ALT_PATH | 1094 IB_QP_ACCESS_FLAGS | 1095 IB_QP_PKEY_INDEX | 1096 IB_QP_MIN_RNR_TIMER | 1097 IB_QP_PATH_MIG_STATE), 1098 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 1099 IB_QP_QKEY), 1100 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 1101 IB_QP_QKEY), 1102 } 1103 } 1104 }, 1105 [IB_QPS_SQE] = { 1106 [IB_QPS_RESET] = { .valid = 1 }, 1107 [IB_QPS_ERR] = { .valid = 1 }, 1108 [IB_QPS_RTS] = { 1109 .valid = 1, 1110 .opt_param = { 1111 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1112 IB_QP_QKEY), 1113 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1114 IB_QP_ACCESS_FLAGS), 1115 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1116 IB_QP_QKEY), 1117 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1118 IB_QP_QKEY), 1119 } 1120 } 1121 }, 1122 [IB_QPS_ERR] = { 1123 [IB_QPS_RESET] = { .valid = 1 }, 1124 [IB_QPS_ERR] = { .valid = 1 } 1125 } 1126 }; 1127 1128 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1129 enum ib_qp_type type, enum ib_qp_attr_mask mask, 1130 enum rdma_link_layer ll) 1131 { 1132 enum ib_qp_attr_mask req_param, opt_param; 1133 1134 if (cur_state < 0 || cur_state > IB_QPS_ERR || 1135 next_state < 0 || next_state > IB_QPS_ERR) 1136 return 0; 1137 1138 if (mask & IB_QP_CUR_STATE && 1139 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && 1140 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) 1141 return 0; 1142 1143 if (!qp_state_table[cur_state][next_state].valid) 1144 return 0; 1145 1146 req_param = qp_state_table[cur_state][next_state].req_param[type]; 1147 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 1148 1149 if ((mask & req_param) != req_param) 1150 return 0; 1151 1152 if (mask & ~(req_param | opt_param | IB_QP_STATE)) 1153 return 0; 1154 1155 return 1; 1156 } 1157 EXPORT_SYMBOL(ib_modify_qp_is_ok); 1158 1159 int ib_resolve_eth_dmac(struct ib_device *device, 1160 struct ib_ah_attr *ah_attr) 1161 { 1162 struct ib_gid_attr sgid_attr; 1163 union ib_gid sgid; 1164 int hop_limit; 1165 int ret; 1166 1167 if (ah_attr->port_num < rdma_start_port(device) || 1168 ah_attr->port_num > rdma_end_port(device)) 1169 return -EINVAL; 1170 1171 if (!rdma_cap_eth_ah(device, ah_attr->port_num)) 1172 return 0; 1173 1174 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { 1175 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) { 1176 __be32 addr = 0; 1177 1178 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4); 1179 ip_eth_mc_map(addr, (char *)ah_attr->dmac); 1180 } else { 1181 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw, 1182 (char *)ah_attr->dmac); 1183 } 1184 return 0; 1185 } 1186 1187 ret = ib_query_gid(device, 1188 ah_attr->port_num, 1189 ah_attr->grh.sgid_index, 1190 &sgid, &sgid_attr); 1191 if (ret != 0) 1192 return (ret); 1193 if (!sgid_attr.ndev) 1194 return -ENXIO; 1195 1196 ret = rdma_addr_find_l2_eth_by_grh(&sgid, 1197 &ah_attr->grh.dgid, 1198 ah_attr->dmac, 1199 sgid_attr.ndev, &hop_limit); 1200 dev_put(sgid_attr.ndev); 1201 1202 ah_attr->grh.hop_limit = hop_limit; 1203 return ret; 1204 } 1205 EXPORT_SYMBOL(ib_resolve_eth_dmac); 1206 1207 1208 int ib_modify_qp(struct ib_qp *qp, 1209 struct ib_qp_attr *qp_attr, 1210 int qp_attr_mask) 1211 { 1212 if (qp_attr_mask & IB_QP_AV) { 1213 int ret; 1214 1215 ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr); 1216 if (ret) 1217 return ret; 1218 } 1219 1220 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 1221 } 1222 EXPORT_SYMBOL(ib_modify_qp); 1223 1224 int ib_query_qp(struct ib_qp *qp, 1225 struct ib_qp_attr *qp_attr, 1226 int qp_attr_mask, 1227 struct ib_qp_init_attr *qp_init_attr) 1228 { 1229 return qp->device->query_qp ? 1230 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : 1231 -ENOSYS; 1232 } 1233 EXPORT_SYMBOL(ib_query_qp); 1234 1235 int ib_close_qp(struct ib_qp *qp) 1236 { 1237 struct ib_qp *real_qp; 1238 unsigned long flags; 1239 1240 real_qp = qp->real_qp; 1241 if (real_qp == qp) 1242 return -EINVAL; 1243 1244 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 1245 list_del(&qp->open_list); 1246 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 1247 1248 atomic_dec(&real_qp->usecnt); 1249 kfree(qp); 1250 1251 return 0; 1252 } 1253 EXPORT_SYMBOL(ib_close_qp); 1254 1255 static int __ib_destroy_shared_qp(struct ib_qp *qp) 1256 { 1257 struct ib_xrcd *xrcd; 1258 struct ib_qp *real_qp; 1259 int ret; 1260 1261 real_qp = qp->real_qp; 1262 xrcd = real_qp->xrcd; 1263 1264 mutex_lock(&xrcd->tgt_qp_mutex); 1265 ib_close_qp(qp); 1266 if (atomic_read(&real_qp->usecnt) == 0) 1267 list_del(&real_qp->xrcd_list); 1268 else 1269 real_qp = NULL; 1270 mutex_unlock(&xrcd->tgt_qp_mutex); 1271 1272 if (real_qp) { 1273 ret = ib_destroy_qp(real_qp); 1274 if (!ret) 1275 atomic_dec(&xrcd->usecnt); 1276 else 1277 __ib_insert_xrcd_qp(xrcd, real_qp); 1278 } 1279 1280 return 0; 1281 } 1282 1283 int ib_destroy_qp(struct ib_qp *qp) 1284 { 1285 struct ib_pd *pd; 1286 struct ib_cq *scq, *rcq; 1287 struct ib_srq *srq; 1288 struct ib_rwq_ind_table *ind_tbl; 1289 int ret; 1290 1291 if (atomic_read(&qp->usecnt)) 1292 return -EBUSY; 1293 1294 if (qp->real_qp != qp) 1295 return __ib_destroy_shared_qp(qp); 1296 1297 pd = qp->pd; 1298 scq = qp->send_cq; 1299 rcq = qp->recv_cq; 1300 srq = qp->srq; 1301 ind_tbl = qp->rwq_ind_tbl; 1302 1303 ret = qp->device->destroy_qp(qp); 1304 if (!ret) { 1305 if (pd) 1306 atomic_dec(&pd->usecnt); 1307 if (scq) 1308 atomic_dec(&scq->usecnt); 1309 if (rcq) 1310 atomic_dec(&rcq->usecnt); 1311 if (srq) 1312 atomic_dec(&srq->usecnt); 1313 if (ind_tbl) 1314 atomic_dec(&ind_tbl->usecnt); 1315 } 1316 1317 return ret; 1318 } 1319 EXPORT_SYMBOL(ib_destroy_qp); 1320 1321 /* Completion queues */ 1322 1323 struct ib_cq *ib_create_cq(struct ib_device *device, 1324 ib_comp_handler comp_handler, 1325 void (*event_handler)(struct ib_event *, void *), 1326 void *cq_context, 1327 const struct ib_cq_init_attr *cq_attr) 1328 { 1329 struct ib_cq *cq; 1330 1331 cq = device->create_cq(device, cq_attr, NULL, NULL); 1332 1333 if (!IS_ERR(cq)) { 1334 cq->device = device; 1335 cq->uobject = NULL; 1336 cq->comp_handler = comp_handler; 1337 cq->event_handler = event_handler; 1338 cq->cq_context = cq_context; 1339 atomic_set(&cq->usecnt, 0); 1340 } 1341 1342 return cq; 1343 } 1344 EXPORT_SYMBOL(ib_create_cq); 1345 1346 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1347 { 1348 return cq->device->modify_cq ? 1349 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; 1350 } 1351 EXPORT_SYMBOL(ib_modify_cq); 1352 1353 int ib_destroy_cq(struct ib_cq *cq) 1354 { 1355 if (atomic_read(&cq->usecnt)) 1356 return -EBUSY; 1357 1358 return cq->device->destroy_cq(cq); 1359 } 1360 EXPORT_SYMBOL(ib_destroy_cq); 1361 1362 int ib_resize_cq(struct ib_cq *cq, int cqe) 1363 { 1364 return cq->device->resize_cq ? 1365 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; 1366 } 1367 EXPORT_SYMBOL(ib_resize_cq); 1368 1369 /* Memory regions */ 1370 1371 int ib_dereg_mr(struct ib_mr *mr) 1372 { 1373 struct ib_pd *pd = mr->pd; 1374 int ret; 1375 1376 ret = mr->device->dereg_mr(mr); 1377 if (!ret) 1378 atomic_dec(&pd->usecnt); 1379 1380 return ret; 1381 } 1382 EXPORT_SYMBOL(ib_dereg_mr); 1383 1384 /** 1385 * ib_alloc_mr() - Allocates a memory region 1386 * @pd: protection domain associated with the region 1387 * @mr_type: memory region type 1388 * @max_num_sg: maximum sg entries available for registration. 1389 * 1390 * Notes: 1391 * Memory registeration page/sg lists must not exceed max_num_sg. 1392 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed 1393 * max_num_sg * used_page_size. 1394 * 1395 */ 1396 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 1397 enum ib_mr_type mr_type, 1398 u32 max_num_sg) 1399 { 1400 struct ib_mr *mr; 1401 1402 if (!pd->device->alloc_mr) 1403 return ERR_PTR(-ENOSYS); 1404 1405 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); 1406 if (!IS_ERR(mr)) { 1407 mr->device = pd->device; 1408 mr->pd = pd; 1409 mr->uobject = NULL; 1410 atomic_inc(&pd->usecnt); 1411 mr->need_inval = false; 1412 } 1413 1414 return mr; 1415 } 1416 EXPORT_SYMBOL(ib_alloc_mr); 1417 1418 /* "Fast" memory regions */ 1419 1420 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1421 int mr_access_flags, 1422 struct ib_fmr_attr *fmr_attr) 1423 { 1424 struct ib_fmr *fmr; 1425 1426 if (!pd->device->alloc_fmr) 1427 return ERR_PTR(-ENOSYS); 1428 1429 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); 1430 if (!IS_ERR(fmr)) { 1431 fmr->device = pd->device; 1432 fmr->pd = pd; 1433 atomic_inc(&pd->usecnt); 1434 } 1435 1436 return fmr; 1437 } 1438 EXPORT_SYMBOL(ib_alloc_fmr); 1439 1440 int ib_unmap_fmr(struct list_head *fmr_list) 1441 { 1442 struct ib_fmr *fmr; 1443 1444 if (list_empty(fmr_list)) 1445 return 0; 1446 1447 fmr = list_entry(fmr_list->next, struct ib_fmr, list); 1448 return fmr->device->unmap_fmr(fmr_list); 1449 } 1450 EXPORT_SYMBOL(ib_unmap_fmr); 1451 1452 int ib_dealloc_fmr(struct ib_fmr *fmr) 1453 { 1454 struct ib_pd *pd; 1455 int ret; 1456 1457 pd = fmr->pd; 1458 ret = fmr->device->dealloc_fmr(fmr); 1459 if (!ret) 1460 atomic_dec(&pd->usecnt); 1461 1462 return ret; 1463 } 1464 EXPORT_SYMBOL(ib_dealloc_fmr); 1465 1466 /* Multicast groups */ 1467 1468 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) 1469 { 1470 struct ib_qp_init_attr init_attr = {}; 1471 struct ib_qp_attr attr = {}; 1472 int num_eth_ports = 0; 1473 int port; 1474 1475 /* If QP state >= init, it is assigned to a port and we can check this 1476 * port only. 1477 */ 1478 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { 1479 if (attr.qp_state >= IB_QPS_INIT) { 1480 if (rdma_port_get_link_layer(qp->device, attr.port_num) != 1481 IB_LINK_LAYER_INFINIBAND) 1482 return true; 1483 goto lid_check; 1484 } 1485 } 1486 1487 /* Can't get a quick answer, iterate over all ports */ 1488 for (port = 0; port < qp->device->phys_port_cnt; port++) 1489 if (rdma_port_get_link_layer(qp->device, port) != 1490 IB_LINK_LAYER_INFINIBAND) 1491 num_eth_ports++; 1492 1493 /* If we have at lease one Ethernet port, RoCE annex declares that 1494 * multicast LID should be ignored. We can't tell at this step if the 1495 * QP belongs to an IB or Ethernet port. 1496 */ 1497 if (num_eth_ports) 1498 return true; 1499 1500 /* If all the ports are IB, we can check according to IB spec. */ 1501 lid_check: 1502 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) || 1503 lid == be16_to_cpu(IB_LID_PERMISSIVE)); 1504 } 1505 1506 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1507 { 1508 int ret; 1509 1510 if (!qp->device->attach_mcast) 1511 return -ENOSYS; 1512 1513 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || 1514 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) 1515 return -EINVAL; 1516 1517 ret = qp->device->attach_mcast(qp, gid, lid); 1518 if (!ret) 1519 atomic_inc(&qp->usecnt); 1520 return ret; 1521 } 1522 EXPORT_SYMBOL(ib_attach_mcast); 1523 1524 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1525 { 1526 int ret; 1527 1528 if (!qp->device->detach_mcast) 1529 return -ENOSYS; 1530 1531 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || 1532 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) 1533 return -EINVAL; 1534 1535 ret = qp->device->detach_mcast(qp, gid, lid); 1536 if (!ret) 1537 atomic_dec(&qp->usecnt); 1538 return ret; 1539 } 1540 EXPORT_SYMBOL(ib_detach_mcast); 1541 1542 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) 1543 { 1544 struct ib_xrcd *xrcd; 1545 1546 if (!device->alloc_xrcd) 1547 return ERR_PTR(-ENOSYS); 1548 1549 xrcd = device->alloc_xrcd(device, NULL, NULL); 1550 if (!IS_ERR(xrcd)) { 1551 xrcd->device = device; 1552 xrcd->inode = NULL; 1553 atomic_set(&xrcd->usecnt, 0); 1554 mutex_init(&xrcd->tgt_qp_mutex); 1555 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 1556 } 1557 1558 return xrcd; 1559 } 1560 EXPORT_SYMBOL(ib_alloc_xrcd); 1561 1562 int ib_dealloc_xrcd(struct ib_xrcd *xrcd) 1563 { 1564 struct ib_qp *qp; 1565 int ret; 1566 1567 if (atomic_read(&xrcd->usecnt)) 1568 return -EBUSY; 1569 1570 while (!list_empty(&xrcd->tgt_qp_list)) { 1571 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); 1572 ret = ib_destroy_qp(qp); 1573 if (ret) 1574 return ret; 1575 } 1576 1577 return xrcd->device->dealloc_xrcd(xrcd); 1578 } 1579 EXPORT_SYMBOL(ib_dealloc_xrcd); 1580 1581 /** 1582 * ib_create_wq - Creates a WQ associated with the specified protection 1583 * domain. 1584 * @pd: The protection domain associated with the WQ. 1585 * @wq_init_attr: A list of initial attributes required to create the 1586 * WQ. If WQ creation succeeds, then the attributes are updated to 1587 * the actual capabilities of the created WQ. 1588 * 1589 * wq_init_attr->max_wr and wq_init_attr->max_sge determine 1590 * the requested size of the WQ, and set to the actual values allocated 1591 * on return. 1592 * If ib_create_wq() succeeds, then max_wr and max_sge will always be 1593 * at least as large as the requested values. 1594 */ 1595 struct ib_wq *ib_create_wq(struct ib_pd *pd, 1596 struct ib_wq_init_attr *wq_attr) 1597 { 1598 struct ib_wq *wq; 1599 1600 if (!pd->device->create_wq) 1601 return ERR_PTR(-ENOSYS); 1602 1603 wq = pd->device->create_wq(pd, wq_attr, NULL); 1604 if (!IS_ERR(wq)) { 1605 wq->event_handler = wq_attr->event_handler; 1606 wq->wq_context = wq_attr->wq_context; 1607 wq->wq_type = wq_attr->wq_type; 1608 wq->cq = wq_attr->cq; 1609 wq->device = pd->device; 1610 wq->pd = pd; 1611 wq->uobject = NULL; 1612 atomic_inc(&pd->usecnt); 1613 atomic_inc(&wq_attr->cq->usecnt); 1614 atomic_set(&wq->usecnt, 0); 1615 } 1616 return wq; 1617 } 1618 EXPORT_SYMBOL(ib_create_wq); 1619 1620 /** 1621 * ib_destroy_wq - Destroys the specified WQ. 1622 * @wq: The WQ to destroy. 1623 */ 1624 int ib_destroy_wq(struct ib_wq *wq) 1625 { 1626 int err; 1627 struct ib_cq *cq = wq->cq; 1628 struct ib_pd *pd = wq->pd; 1629 1630 if (atomic_read(&wq->usecnt)) 1631 return -EBUSY; 1632 1633 err = wq->device->destroy_wq(wq); 1634 if (!err) { 1635 atomic_dec(&pd->usecnt); 1636 atomic_dec(&cq->usecnt); 1637 } 1638 return err; 1639 } 1640 EXPORT_SYMBOL(ib_destroy_wq); 1641 1642 /** 1643 * ib_modify_wq - Modifies the specified WQ. 1644 * @wq: The WQ to modify. 1645 * @wq_attr: On input, specifies the WQ attributes to modify. 1646 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ 1647 * are being modified. 1648 * On output, the current values of selected WQ attributes are returned. 1649 */ 1650 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 1651 u32 wq_attr_mask) 1652 { 1653 int err; 1654 1655 if (!wq->device->modify_wq) 1656 return -ENOSYS; 1657 1658 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); 1659 return err; 1660 } 1661 EXPORT_SYMBOL(ib_modify_wq); 1662 1663 /* 1664 * ib_create_rwq_ind_table - Creates a RQ Indirection Table. 1665 * @device: The device on which to create the rwq indirection table. 1666 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to 1667 * create the Indirection Table. 1668 * 1669 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less 1670 * than the created ib_rwq_ind_table object and the caller is responsible 1671 * for its memory allocation/free. 1672 */ 1673 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 1674 struct ib_rwq_ind_table_init_attr *init_attr) 1675 { 1676 struct ib_rwq_ind_table *rwq_ind_table; 1677 int i; 1678 u32 table_size; 1679 1680 if (!device->create_rwq_ind_table) 1681 return ERR_PTR(-ENOSYS); 1682 1683 table_size = (1 << init_attr->log_ind_tbl_size); 1684 rwq_ind_table = device->create_rwq_ind_table(device, 1685 init_attr, NULL); 1686 if (IS_ERR(rwq_ind_table)) 1687 return rwq_ind_table; 1688 1689 rwq_ind_table->ind_tbl = init_attr->ind_tbl; 1690 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; 1691 rwq_ind_table->device = device; 1692 rwq_ind_table->uobject = NULL; 1693 atomic_set(&rwq_ind_table->usecnt, 0); 1694 1695 for (i = 0; i < table_size; i++) 1696 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); 1697 1698 return rwq_ind_table; 1699 } 1700 EXPORT_SYMBOL(ib_create_rwq_ind_table); 1701 1702 /* 1703 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. 1704 * @wq_ind_table: The Indirection Table to destroy. 1705 */ 1706 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) 1707 { 1708 int err, i; 1709 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); 1710 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; 1711 1712 if (atomic_read(&rwq_ind_table->usecnt)) 1713 return -EBUSY; 1714 1715 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); 1716 if (!err) { 1717 for (i = 0; i < table_size; i++) 1718 atomic_dec(&ind_tbl[i]->usecnt); 1719 } 1720 1721 return err; 1722 } 1723 EXPORT_SYMBOL(ib_destroy_rwq_ind_table); 1724 1725 struct ib_flow *ib_create_flow(struct ib_qp *qp, 1726 struct ib_flow_attr *flow_attr, 1727 int domain) 1728 { 1729 struct ib_flow *flow_id; 1730 if (!qp->device->create_flow) 1731 return ERR_PTR(-ENOSYS); 1732 1733 flow_id = qp->device->create_flow(qp, flow_attr, domain); 1734 if (!IS_ERR(flow_id)) 1735 atomic_inc(&qp->usecnt); 1736 return flow_id; 1737 } 1738 EXPORT_SYMBOL(ib_create_flow); 1739 1740 int ib_destroy_flow(struct ib_flow *flow_id) 1741 { 1742 int err; 1743 struct ib_qp *qp = flow_id->qp; 1744 1745 err = qp->device->destroy_flow(flow_id); 1746 if (!err) 1747 atomic_dec(&qp->usecnt); 1748 return err; 1749 } 1750 EXPORT_SYMBOL(ib_destroy_flow); 1751 1752 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 1753 struct ib_mr_status *mr_status) 1754 { 1755 return mr->device->check_mr_status ? 1756 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS; 1757 } 1758 EXPORT_SYMBOL(ib_check_mr_status); 1759 1760 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 1761 int state) 1762 { 1763 if (!device->set_vf_link_state) 1764 return -ENOSYS; 1765 1766 return device->set_vf_link_state(device, vf, port, state); 1767 } 1768 EXPORT_SYMBOL(ib_set_vf_link_state); 1769 1770 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 1771 struct ifla_vf_info *info) 1772 { 1773 if (!device->get_vf_config) 1774 return -ENOSYS; 1775 1776 return device->get_vf_config(device, vf, port, info); 1777 } 1778 EXPORT_SYMBOL(ib_get_vf_config); 1779 1780 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 1781 struct ifla_vf_stats *stats) 1782 { 1783 if (!device->get_vf_stats) 1784 return -ENOSYS; 1785 1786 return device->get_vf_stats(device, vf, port, stats); 1787 } 1788 EXPORT_SYMBOL(ib_get_vf_stats); 1789 1790 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 1791 int type) 1792 { 1793 if (!device->set_vf_guid) 1794 return -ENOSYS; 1795 1796 return device->set_vf_guid(device, vf, port, guid, type); 1797 } 1798 EXPORT_SYMBOL(ib_set_vf_guid); 1799 1800 /** 1801 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list 1802 * and set it the memory region. 1803 * @mr: memory region 1804 * @sg: dma mapped scatterlist 1805 * @sg_nents: number of entries in sg 1806 * @sg_offset: offset in bytes into sg 1807 * @page_size: page vector desired page size 1808 * 1809 * Constraints: 1810 * - The first sg element is allowed to have an offset. 1811 * - Each sg element must either be aligned to page_size or virtually 1812 * contiguous to the previous element. In case an sg element has a 1813 * non-contiguous offset, the mapping prefix will not include it. 1814 * - The last sg element is allowed to have length less than page_size. 1815 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size 1816 * then only max_num_sg entries will be mapped. 1817 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these 1818 * constraints holds and the page_size argument is ignored. 1819 * 1820 * Returns the number of sg elements that were mapped to the memory region. 1821 * 1822 * After this completes successfully, the memory region 1823 * is ready for registration. 1824 */ 1825 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 1826 unsigned int *sg_offset, unsigned int page_size) 1827 { 1828 if (unlikely(!mr->device->map_mr_sg)) 1829 return -ENOSYS; 1830 1831 mr->page_size = page_size; 1832 1833 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); 1834 } 1835 EXPORT_SYMBOL(ib_map_mr_sg); 1836 1837 /** 1838 * ib_sg_to_pages() - Convert the largest prefix of a sg list 1839 * to a page vector 1840 * @mr: memory region 1841 * @sgl: dma mapped scatterlist 1842 * @sg_nents: number of entries in sg 1843 * @sg_offset_p: IN: start offset in bytes into sg 1844 * OUT: offset in bytes for element n of the sg of the first 1845 * byte that has not been processed where n is the return 1846 * value of this function. 1847 * @set_page: driver page assignment function pointer 1848 * 1849 * Core service helper for drivers to convert the largest 1850 * prefix of given sg list to a page vector. The sg list 1851 * prefix converted is the prefix that meet the requirements 1852 * of ib_map_mr_sg. 1853 * 1854 * Returns the number of sg elements that were assigned to 1855 * a page vector. 1856 */ 1857 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 1858 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) 1859 { 1860 struct scatterlist *sg; 1861 u64 last_end_dma_addr = 0; 1862 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 1863 unsigned int last_page_off = 0; 1864 u64 page_mask = ~((u64)mr->page_size - 1); 1865 int i, ret; 1866 1867 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) 1868 return -EINVAL; 1869 1870 mr->iova = sg_dma_address(&sgl[0]) + sg_offset; 1871 mr->length = 0; 1872 1873 for_each_sg(sgl, sg, sg_nents, i) { 1874 u64 dma_addr = sg_dma_address(sg) + sg_offset; 1875 u64 prev_addr = dma_addr; 1876 unsigned int dma_len = sg_dma_len(sg) - sg_offset; 1877 u64 end_dma_addr = dma_addr + dma_len; 1878 u64 page_addr = dma_addr & page_mask; 1879 1880 /* 1881 * For the second and later elements, check whether either the 1882 * end of element i-1 or the start of element i is not aligned 1883 * on a page boundary. 1884 */ 1885 if (i && (last_page_off != 0 || page_addr != dma_addr)) { 1886 /* Stop mapping if there is a gap. */ 1887 if (last_end_dma_addr != dma_addr) 1888 break; 1889 1890 /* 1891 * Coalesce this element with the last. If it is small 1892 * enough just update mr->length. Otherwise start 1893 * mapping from the next page. 1894 */ 1895 goto next_page; 1896 } 1897 1898 do { 1899 ret = set_page(mr, page_addr); 1900 if (unlikely(ret < 0)) { 1901 sg_offset = prev_addr - sg_dma_address(sg); 1902 mr->length += prev_addr - dma_addr; 1903 if (sg_offset_p) 1904 *sg_offset_p = sg_offset; 1905 return i || sg_offset ? i : ret; 1906 } 1907 prev_addr = page_addr; 1908 next_page: 1909 page_addr += mr->page_size; 1910 } while (page_addr < end_dma_addr); 1911 1912 mr->length += dma_len; 1913 last_end_dma_addr = end_dma_addr; 1914 last_page_off = end_dma_addr & ~page_mask; 1915 1916 sg_offset = 0; 1917 } 1918 1919 if (sg_offset_p) 1920 *sg_offset_p = 0; 1921 return i; 1922 } 1923 EXPORT_SYMBOL(ib_sg_to_pages); 1924 1925 struct ib_drain_cqe { 1926 struct ib_cqe cqe; 1927 struct completion done; 1928 }; 1929 1930 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) 1931 { 1932 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, 1933 cqe); 1934 1935 complete(&cqe->done); 1936 } 1937 1938 /* 1939 * Post a WR and block until its completion is reaped for the SQ. 1940 */ 1941 static void __ib_drain_sq(struct ib_qp *qp) 1942 { 1943 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 1944 struct ib_drain_cqe sdrain; 1945 struct ib_send_wr *bad_swr; 1946 struct ib_rdma_wr swr = { 1947 .wr = { 1948 .opcode = IB_WR_RDMA_WRITE, 1949 .wr_cqe = &sdrain.cqe, 1950 }, 1951 }; 1952 int ret; 1953 1954 if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) { 1955 WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT, 1956 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 1957 return; 1958 } 1959 1960 sdrain.cqe.done = ib_drain_qp_done; 1961 init_completion(&sdrain.done); 1962 1963 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 1964 if (ret) { 1965 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 1966 return; 1967 } 1968 1969 ret = ib_post_send(qp, &swr.wr, &bad_swr); 1970 if (ret) { 1971 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 1972 return; 1973 } 1974 1975 wait_for_completion(&sdrain.done); 1976 } 1977 1978 /* 1979 * Post a WR and block until its completion is reaped for the RQ. 1980 */ 1981 static void __ib_drain_rq(struct ib_qp *qp) 1982 { 1983 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 1984 struct ib_drain_cqe rdrain; 1985 struct ib_recv_wr rwr = {}, *bad_rwr; 1986 int ret; 1987 1988 if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) { 1989 WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT, 1990 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 1991 return; 1992 } 1993 1994 rwr.wr_cqe = &rdrain.cqe; 1995 rdrain.cqe.done = ib_drain_qp_done; 1996 init_completion(&rdrain.done); 1997 1998 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 1999 if (ret) { 2000 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 2001 return; 2002 } 2003 2004 ret = ib_post_recv(qp, &rwr, &bad_rwr); 2005 if (ret) { 2006 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 2007 return; 2008 } 2009 2010 wait_for_completion(&rdrain.done); 2011 } 2012 2013 /** 2014 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the 2015 * application. 2016 * @qp: queue pair to drain 2017 * 2018 * If the device has a provider-specific drain function, then 2019 * call that. Otherwise call the generic drain function 2020 * __ib_drain_sq(). 2021 * 2022 * The caller must: 2023 * 2024 * ensure there is room in the CQ and SQ for the drain work request and 2025 * completion. 2026 * 2027 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 2028 * IB_POLL_DIRECT. 2029 * 2030 * ensure that there are no other contexts that are posting WRs concurrently. 2031 * Otherwise the drain is not guaranteed. 2032 */ 2033 void ib_drain_sq(struct ib_qp *qp) 2034 { 2035 if (qp->device->drain_sq) 2036 qp->device->drain_sq(qp); 2037 else 2038 __ib_drain_sq(qp); 2039 } 2040 EXPORT_SYMBOL(ib_drain_sq); 2041 2042 /** 2043 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the 2044 * application. 2045 * @qp: queue pair to drain 2046 * 2047 * If the device has a provider-specific drain function, then 2048 * call that. Otherwise call the generic drain function 2049 * __ib_drain_rq(). 2050 * 2051 * The caller must: 2052 * 2053 * ensure there is room in the CQ and RQ for the drain work request and 2054 * completion. 2055 * 2056 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 2057 * IB_POLL_DIRECT. 2058 * 2059 * ensure that there are no other contexts that are posting WRs concurrently. 2060 * Otherwise the drain is not guaranteed. 2061 */ 2062 void ib_drain_rq(struct ib_qp *qp) 2063 { 2064 if (qp->device->drain_rq) 2065 qp->device->drain_rq(qp); 2066 else 2067 __ib_drain_rq(qp); 2068 } 2069 EXPORT_SYMBOL(ib_drain_rq); 2070 2071 /** 2072 * ib_drain_qp() - Block until all CQEs have been consumed by the 2073 * application on both the RQ and SQ. 2074 * @qp: queue pair to drain 2075 * 2076 * The caller must: 2077 * 2078 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests 2079 * and completions. 2080 * 2081 * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be 2082 * IB_POLL_DIRECT. 2083 * 2084 * ensure that there are no other contexts that are posting WRs concurrently. 2085 * Otherwise the drain is not guaranteed. 2086 */ 2087 void ib_drain_qp(struct ib_qp *qp) 2088 { 2089 ib_drain_sq(qp); 2090 if (!qp->srq) 2091 ib_drain_rq(qp); 2092 } 2093 EXPORT_SYMBOL(ib_drain_qp); 2094