1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 6 * Copyright (c) 2004 Intel Corporation. All rights reserved. 7 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 8 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 9 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 10 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 11 * 12 * This software is available to you under a choice of one of two 13 * licenses. You may choose to be licensed under the terms of the GNU 14 * General Public License (GPL) Version 2, available from the file 15 * COPYING in the main directory of this source tree, or the 16 * OpenIB.org BSD license below: 17 * 18 * Redistribution and use in source and binary forms, with or 19 * without modification, are permitted provided that the following 20 * conditions are met: 21 * 22 * - Redistributions of source code must retain the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer. 25 * 26 * - Redistributions in binary form must reproduce the above 27 * copyright notice, this list of conditions and the following 28 * disclaimer in the documentation and/or other materials 29 * provided with the distribution. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 35 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 36 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 37 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 38 * SOFTWARE. 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include <linux/errno.h> 45 #include <linux/err.h> 46 #include <linux/string.h> 47 #include <linux/slab.h> 48 #include <linux/in.h> 49 #include <linux/in6.h> 50 51 #include <rdma/ib_verbs.h> 52 #include <rdma/ib_cache.h> 53 #include <rdma/ib_addr.h> 54 55 #include <netinet/ip.h> 56 #include <netinet/ip6.h> 57 58 #include <machine/in_cksum.h> 59 60 #include "core_priv.h" 61 62 static const char * const ib_events[] = { 63 [IB_EVENT_CQ_ERR] = "CQ error", 64 [IB_EVENT_QP_FATAL] = "QP fatal error", 65 [IB_EVENT_QP_REQ_ERR] = "QP request error", 66 [IB_EVENT_QP_ACCESS_ERR] = "QP access error", 67 [IB_EVENT_COMM_EST] = "communication established", 68 [IB_EVENT_SQ_DRAINED] = "send queue drained", 69 [IB_EVENT_PATH_MIG] = "path migration successful", 70 [IB_EVENT_PATH_MIG_ERR] = "path migration error", 71 [IB_EVENT_DEVICE_FATAL] = "device fatal error", 72 [IB_EVENT_PORT_ACTIVE] = "port active", 73 [IB_EVENT_PORT_ERR] = "port error", 74 [IB_EVENT_LID_CHANGE] = "LID change", 75 [IB_EVENT_PKEY_CHANGE] = "P_key change", 76 [IB_EVENT_SM_CHANGE] = "SM change", 77 [IB_EVENT_SRQ_ERR] = "SRQ error", 78 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", 79 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", 80 [IB_EVENT_CLIENT_REREGISTER] = "client reregister", 81 [IB_EVENT_GID_CHANGE] = "GID changed", 82 }; 83 84 const char *__attribute_const__ ib_event_msg(enum ib_event_type event) 85 { 86 size_t index = event; 87 88 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? 89 ib_events[index] : "unrecognized event"; 90 } 91 EXPORT_SYMBOL(ib_event_msg); 92 93 static const char * const wc_statuses[] = { 94 [IB_WC_SUCCESS] = "success", 95 [IB_WC_LOC_LEN_ERR] = "local length error", 96 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", 97 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", 98 [IB_WC_LOC_PROT_ERR] = "local protection error", 99 [IB_WC_WR_FLUSH_ERR] = "WR flushed", 100 [IB_WC_MW_BIND_ERR] = "memory management operation error", 101 [IB_WC_BAD_RESP_ERR] = "bad response error", 102 [IB_WC_LOC_ACCESS_ERR] = "local access error", 103 [IB_WC_REM_INV_REQ_ERR] = "invalid request error", 104 [IB_WC_REM_ACCESS_ERR] = "remote access error", 105 [IB_WC_REM_OP_ERR] = "remote operation error", 106 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", 107 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", 108 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", 109 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", 110 [IB_WC_REM_ABORT_ERR] = "operation aborted", 111 [IB_WC_INV_EECN_ERR] = "invalid EE context number", 112 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", 113 [IB_WC_FATAL_ERR] = "fatal error", 114 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", 115 [IB_WC_GENERAL_ERR] = "general error", 116 }; 117 118 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) 119 { 120 size_t index = status; 121 122 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? 123 wc_statuses[index] : "unrecognized status"; 124 } 125 EXPORT_SYMBOL(ib_wc_status_msg); 126 127 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) 128 { 129 switch (rate) { 130 case IB_RATE_2_5_GBPS: return 1; 131 case IB_RATE_5_GBPS: return 2; 132 case IB_RATE_10_GBPS: return 4; 133 case IB_RATE_20_GBPS: return 8; 134 case IB_RATE_30_GBPS: return 12; 135 case IB_RATE_40_GBPS: return 16; 136 case IB_RATE_60_GBPS: return 24; 137 case IB_RATE_80_GBPS: return 32; 138 case IB_RATE_120_GBPS: return 48; 139 case IB_RATE_28_GBPS: return 11; 140 case IB_RATE_50_GBPS: return 20; 141 case IB_RATE_400_GBPS: return 160; 142 case IB_RATE_600_GBPS: return 240; 143 default: return -1; 144 } 145 } 146 EXPORT_SYMBOL(ib_rate_to_mult); 147 148 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) 149 { 150 switch (mult) { 151 case 1: return IB_RATE_2_5_GBPS; 152 case 2: return IB_RATE_5_GBPS; 153 case 4: return IB_RATE_10_GBPS; 154 case 8: return IB_RATE_20_GBPS; 155 case 12: return IB_RATE_30_GBPS; 156 case 16: return IB_RATE_40_GBPS; 157 case 24: return IB_RATE_60_GBPS; 158 case 32: return IB_RATE_80_GBPS; 159 case 48: return IB_RATE_120_GBPS; 160 case 6: return IB_RATE_14_GBPS; 161 case 22: return IB_RATE_56_GBPS; 162 case 45: return IB_RATE_112_GBPS; 163 case 67: return IB_RATE_168_GBPS; 164 case 10: return IB_RATE_25_GBPS; 165 case 40: return IB_RATE_100_GBPS; 166 case 80: return IB_RATE_200_GBPS; 167 case 120: return IB_RATE_300_GBPS; 168 case 11: return IB_RATE_28_GBPS; 169 case 20: return IB_RATE_50_GBPS; 170 case 160: return IB_RATE_400_GBPS; 171 case 240: return IB_RATE_600_GBPS; 172 default: return IB_RATE_PORT_CURRENT; 173 } 174 } 175 EXPORT_SYMBOL(mult_to_ib_rate); 176 177 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) 178 { 179 switch (rate) { 180 case IB_RATE_2_5_GBPS: return 2500; 181 case IB_RATE_5_GBPS: return 5000; 182 case IB_RATE_10_GBPS: return 10000; 183 case IB_RATE_20_GBPS: return 20000; 184 case IB_RATE_30_GBPS: return 30000; 185 case IB_RATE_40_GBPS: return 40000; 186 case IB_RATE_60_GBPS: return 60000; 187 case IB_RATE_80_GBPS: return 80000; 188 case IB_RATE_120_GBPS: return 120000; 189 case IB_RATE_14_GBPS: return 14062; 190 case IB_RATE_56_GBPS: return 56250; 191 case IB_RATE_112_GBPS: return 112500; 192 case IB_RATE_168_GBPS: return 168750; 193 case IB_RATE_25_GBPS: return 25781; 194 case IB_RATE_100_GBPS: return 103125; 195 case IB_RATE_200_GBPS: return 206250; 196 case IB_RATE_300_GBPS: return 309375; 197 case IB_RATE_28_GBPS: return 28125; 198 case IB_RATE_50_GBPS: return 53125; 199 case IB_RATE_400_GBPS: return 425000; 200 case IB_RATE_600_GBPS: return 637500; 201 default: return -1; 202 } 203 } 204 EXPORT_SYMBOL(ib_rate_to_mbps); 205 206 __attribute_const__ enum rdma_transport_type 207 rdma_node_get_transport(enum rdma_node_type node_type) 208 { 209 switch (node_type) { 210 case RDMA_NODE_IB_CA: 211 case RDMA_NODE_IB_SWITCH: 212 case RDMA_NODE_IB_ROUTER: 213 return RDMA_TRANSPORT_IB; 214 case RDMA_NODE_RNIC: 215 return RDMA_TRANSPORT_IWARP; 216 case RDMA_NODE_USNIC: 217 return RDMA_TRANSPORT_USNIC; 218 case RDMA_NODE_USNIC_UDP: 219 return RDMA_TRANSPORT_USNIC_UDP; 220 default: 221 BUG(); 222 return 0; 223 } 224 } 225 EXPORT_SYMBOL(rdma_node_get_transport); 226 227 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 228 { 229 if (device->get_link_layer) 230 return device->get_link_layer(device, port_num); 231 232 switch (rdma_node_get_transport(device->node_type)) { 233 case RDMA_TRANSPORT_IB: 234 return IB_LINK_LAYER_INFINIBAND; 235 case RDMA_TRANSPORT_IWARP: 236 case RDMA_TRANSPORT_USNIC: 237 case RDMA_TRANSPORT_USNIC_UDP: 238 return IB_LINK_LAYER_ETHERNET; 239 default: 240 return IB_LINK_LAYER_UNSPECIFIED; 241 } 242 } 243 EXPORT_SYMBOL(rdma_port_get_link_layer); 244 245 /* Protection domains */ 246 247 /** 248 * ib_alloc_pd - Allocates an unused protection domain. 249 * @device: The device on which to allocate the protection domain. 250 * 251 * A protection domain object provides an association between QPs, shared 252 * receive queues, address handles, memory regions, and memory windows. 253 * 254 * Every PD has a local_dma_lkey which can be used as the lkey value for local 255 * memory operations. 256 */ 257 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 258 const char *caller) 259 { 260 struct ib_pd *pd; 261 int mr_access_flags = 0; 262 263 pd = device->alloc_pd(device, NULL, NULL); 264 if (IS_ERR(pd)) 265 return pd; 266 267 pd->device = device; 268 pd->uobject = NULL; 269 pd->__internal_mr = NULL; 270 atomic_set(&pd->usecnt, 0); 271 pd->flags = flags; 272 273 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 274 pd->local_dma_lkey = device->local_dma_lkey; 275 else 276 mr_access_flags |= IB_ACCESS_LOCAL_WRITE; 277 278 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { 279 pr_warn("%s: enabling unsafe global rkey\n", caller); 280 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; 281 } 282 283 if (mr_access_flags) { 284 struct ib_mr *mr; 285 286 mr = pd->device->get_dma_mr(pd, mr_access_flags); 287 if (IS_ERR(mr)) { 288 ib_dealloc_pd(pd); 289 return ERR_CAST(mr); 290 } 291 292 mr->device = pd->device; 293 mr->pd = pd; 294 mr->uobject = NULL; 295 mr->need_inval = false; 296 297 pd->__internal_mr = mr; 298 299 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) 300 pd->local_dma_lkey = pd->__internal_mr->lkey; 301 302 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) 303 pd->unsafe_global_rkey = pd->__internal_mr->rkey; 304 } 305 306 return pd; 307 } 308 EXPORT_SYMBOL(__ib_alloc_pd); 309 310 /** 311 * ib_dealloc_pd - Deallocates a protection domain. 312 * @pd: The protection domain to deallocate. 313 * 314 * It is an error to call this function while any resources in the pd still 315 * exist. The caller is responsible to synchronously destroy them and 316 * guarantee no new allocations will happen. 317 */ 318 void ib_dealloc_pd(struct ib_pd *pd) 319 { 320 int ret; 321 322 if (pd->__internal_mr) { 323 ret = pd->device->dereg_mr(pd->__internal_mr); 324 WARN_ON(ret); 325 pd->__internal_mr = NULL; 326 } 327 328 /* uverbs manipulates usecnt with proper locking, while the kabi 329 requires the caller to guarantee we can't race here. */ 330 WARN_ON(atomic_read(&pd->usecnt)); 331 332 /* Making delalloc_pd a void return is a WIP, no driver should return 333 an error here. */ 334 ret = pd->device->dealloc_pd(pd); 335 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 336 } 337 EXPORT_SYMBOL(ib_dealloc_pd); 338 339 /* Address handles */ 340 341 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 342 { 343 struct ib_ah *ah; 344 345 ah = pd->device->create_ah(pd, ah_attr, NULL); 346 347 if (!IS_ERR(ah)) { 348 ah->device = pd->device; 349 ah->pd = pd; 350 ah->uobject = NULL; 351 atomic_inc(&pd->usecnt); 352 } 353 354 return ah; 355 } 356 EXPORT_SYMBOL(ib_create_ah); 357 358 static int ib_get_header_version(const union rdma_network_hdr *hdr) 359 { 360 const struct ip *ip4h = (const struct ip *)&hdr->roce4grh; 361 struct ip ip4h_checked; 362 const struct ip6_hdr *ip6h = (const struct ip6_hdr *)&hdr->ibgrh; 363 364 /* If it's IPv6, the version must be 6, otherwise, the first 365 * 20 bytes (before the IPv4 header) are garbled. 366 */ 367 if ((ip6h->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) 368 return (ip4h->ip_v == 4) ? 4 : 0; 369 /* version may be 6 or 4 because the first 20 bytes could be garbled */ 370 371 /* RoCE v2 requires no options, thus header length 372 * must be 5 words 373 */ 374 if (ip4h->ip_hl != 5) 375 return 6; 376 377 /* Verify checksum. 378 * We can't write on scattered buffers so we need to copy to 379 * temp buffer. 380 */ 381 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); 382 ip4h_checked.ip_sum = 0; 383 #if defined(INET) || defined(INET6) 384 ip4h_checked.ip_sum = in_cksum_hdr(&ip4h_checked); 385 #endif 386 /* if IPv4 header checksum is OK, believe it */ 387 if (ip4h->ip_sum == ip4h_checked.ip_sum) 388 return 4; 389 return 6; 390 } 391 392 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, 393 u8 port_num, 394 const struct ib_grh *grh) 395 { 396 int grh_version; 397 398 if (rdma_protocol_ib(device, port_num)) 399 return RDMA_NETWORK_IB; 400 401 grh_version = ib_get_header_version((const union rdma_network_hdr *)grh); 402 403 if (grh_version == 4) 404 return RDMA_NETWORK_IPV4; 405 406 if (grh->next_hdr == IPPROTO_UDP) 407 return RDMA_NETWORK_IPV6; 408 409 return RDMA_NETWORK_ROCE_V1; 410 } 411 412 struct find_gid_index_context { 413 u16 vlan_id; 414 enum ib_gid_type gid_type; 415 }; 416 417 418 /* 419 * This function will return true only if a inspected GID index 420 * matches the request based on the GID type and VLAN configuration 421 */ 422 static bool find_gid_index(const union ib_gid *gid, 423 const struct ib_gid_attr *gid_attr, 424 void *context) 425 { 426 u16 vlan_diff; 427 struct find_gid_index_context *ctx = 428 (struct find_gid_index_context *)context; 429 430 if (ctx->gid_type != gid_attr->gid_type) 431 return false; 432 433 /* 434 * The following will verify: 435 * 1. VLAN ID matching for VLAN tagged requests. 436 * 2. prio-tagged/untagged to prio-tagged/untagged matching. 437 * 438 * This XOR is valid, since 0x0 < vlan_id < 0x0FFF. 439 */ 440 vlan_diff = rdma_vlan_dev_vlan_id(gid_attr->ndev) ^ ctx->vlan_id; 441 442 return (vlan_diff == 0x0000 || vlan_diff == 0xFFFF); 443 } 444 445 static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, 446 u16 vlan_id, const union ib_gid *sgid, 447 enum ib_gid_type gid_type, 448 u16 *gid_index) 449 { 450 struct find_gid_index_context context = {.vlan_id = vlan_id, 451 .gid_type = gid_type}; 452 453 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index, 454 &context, gid_index); 455 } 456 457 static int get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 458 enum rdma_network_type net_type, 459 union ib_gid *sgid, union ib_gid *dgid) 460 { 461 struct sockaddr_in src_in; 462 struct sockaddr_in dst_in; 463 __be32 src_saddr, dst_saddr; 464 465 if (!sgid || !dgid) 466 return -EINVAL; 467 468 if (net_type == RDMA_NETWORK_IPV4) { 469 memcpy(&src_in.sin_addr.s_addr, 470 &hdr->roce4grh.ip_src, 4); 471 memcpy(&dst_in.sin_addr.s_addr, 472 &hdr->roce4grh.ip_dst, 4); 473 src_saddr = src_in.sin_addr.s_addr; 474 dst_saddr = dst_in.sin_addr.s_addr; 475 ipv6_addr_set_v4mapped(src_saddr, 476 (struct in6_addr *)sgid); 477 ipv6_addr_set_v4mapped(dst_saddr, 478 (struct in6_addr *)dgid); 479 return 0; 480 } else if (net_type == RDMA_NETWORK_IPV6 || 481 net_type == RDMA_NETWORK_IB) { 482 *dgid = hdr->ibgrh.dgid; 483 *sgid = hdr->ibgrh.sgid; 484 return 0; 485 } else { 486 return -EINVAL; 487 } 488 } 489 490 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 491 const struct ib_wc *wc, const struct ib_grh *grh, 492 struct ib_ah_attr *ah_attr) 493 { 494 u32 flow_class; 495 u16 gid_index = 0; 496 int ret; 497 enum rdma_network_type net_type = RDMA_NETWORK_IB; 498 enum ib_gid_type gid_type = IB_GID_TYPE_IB; 499 int hoplimit = 0xff; 500 union ib_gid dgid; 501 union ib_gid sgid; 502 503 memset(ah_attr, 0, sizeof *ah_attr); 504 if (rdma_cap_eth_ah(device, port_num)) { 505 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) 506 net_type = wc->network_hdr_type; 507 else 508 net_type = ib_get_net_type_by_grh(device, port_num, grh); 509 gid_type = ib_network_to_gid_type(net_type); 510 } 511 ret = get_gids_from_rdma_hdr((const union rdma_network_hdr *)grh, net_type, 512 &sgid, &dgid); 513 if (ret) 514 return ret; 515 516 if (rdma_protocol_roce(device, port_num)) { 517 struct ib_gid_attr dgid_attr; 518 const u16 vlan_id = (wc->wc_flags & IB_WC_WITH_VLAN) ? 519 wc->vlan_id : 0xffff; 520 521 if (!(wc->wc_flags & IB_WC_GRH)) 522 return -EPROTOTYPE; 523 524 ret = get_sgid_index_from_eth(device, port_num, vlan_id, 525 &dgid, gid_type, &gid_index); 526 if (ret) 527 return ret; 528 529 ret = ib_get_cached_gid(device, port_num, gid_index, &dgid, &dgid_attr); 530 if (ret) 531 return ret; 532 533 if (dgid_attr.ndev == NULL) 534 return -ENODEV; 535 536 ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid, ah_attr->dmac, 537 dgid_attr.ndev, &hoplimit); 538 539 dev_put(dgid_attr.ndev); 540 if (ret) 541 return ret; 542 } 543 544 ah_attr->dlid = wc->slid; 545 ah_attr->sl = wc->sl; 546 ah_attr->src_path_bits = wc->dlid_path_bits; 547 ah_attr->port_num = port_num; 548 549 if (wc->wc_flags & IB_WC_GRH) { 550 ah_attr->ah_flags = IB_AH_GRH; 551 ah_attr->grh.dgid = sgid; 552 553 if (!rdma_cap_eth_ah(device, port_num)) { 554 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { 555 ret = ib_find_cached_gid_by_port(device, &dgid, 556 IB_GID_TYPE_IB, 557 port_num, NULL, 558 &gid_index); 559 if (ret) 560 return ret; 561 } 562 } 563 564 ah_attr->grh.sgid_index = (u8) gid_index; 565 flow_class = be32_to_cpu(grh->version_tclass_flow); 566 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 567 ah_attr->grh.hop_limit = hoplimit; 568 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 569 } 570 return 0; 571 } 572 EXPORT_SYMBOL(ib_init_ah_from_wc); 573 574 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 575 const struct ib_grh *grh, u8 port_num) 576 { 577 struct ib_ah_attr ah_attr; 578 int ret; 579 580 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); 581 if (ret) 582 return ERR_PTR(ret); 583 584 return ib_create_ah(pd, &ah_attr); 585 } 586 EXPORT_SYMBOL(ib_create_ah_from_wc); 587 588 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 589 { 590 return ah->device->modify_ah ? 591 ah->device->modify_ah(ah, ah_attr) : 592 -ENOSYS; 593 } 594 EXPORT_SYMBOL(ib_modify_ah); 595 596 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 597 { 598 return ah->device->query_ah ? 599 ah->device->query_ah(ah, ah_attr) : 600 -ENOSYS; 601 } 602 EXPORT_SYMBOL(ib_query_ah); 603 604 int ib_destroy_ah(struct ib_ah *ah) 605 { 606 struct ib_pd *pd; 607 int ret; 608 609 pd = ah->pd; 610 ret = ah->device->destroy_ah(ah); 611 if (!ret) 612 atomic_dec(&pd->usecnt); 613 614 return ret; 615 } 616 EXPORT_SYMBOL(ib_destroy_ah); 617 618 /* Shared receive queues */ 619 620 struct ib_srq *ib_create_srq(struct ib_pd *pd, 621 struct ib_srq_init_attr *srq_init_attr) 622 { 623 struct ib_srq *srq; 624 625 if (!pd->device->create_srq) 626 return ERR_PTR(-ENOSYS); 627 628 srq = pd->device->create_srq(pd, srq_init_attr, NULL); 629 630 if (!IS_ERR(srq)) { 631 srq->device = pd->device; 632 srq->pd = pd; 633 srq->uobject = NULL; 634 srq->event_handler = srq_init_attr->event_handler; 635 srq->srq_context = srq_init_attr->srq_context; 636 srq->srq_type = srq_init_attr->srq_type; 637 if (srq->srq_type == IB_SRQT_XRC) { 638 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; 639 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; 640 atomic_inc(&srq->ext.xrc.xrcd->usecnt); 641 atomic_inc(&srq->ext.xrc.cq->usecnt); 642 } 643 atomic_inc(&pd->usecnt); 644 atomic_set(&srq->usecnt, 0); 645 } 646 647 return srq; 648 } 649 EXPORT_SYMBOL(ib_create_srq); 650 651 int ib_modify_srq(struct ib_srq *srq, 652 struct ib_srq_attr *srq_attr, 653 enum ib_srq_attr_mask srq_attr_mask) 654 { 655 return srq->device->modify_srq ? 656 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : 657 -ENOSYS; 658 } 659 EXPORT_SYMBOL(ib_modify_srq); 660 661 int ib_query_srq(struct ib_srq *srq, 662 struct ib_srq_attr *srq_attr) 663 { 664 return srq->device->query_srq ? 665 srq->device->query_srq(srq, srq_attr) : -ENOSYS; 666 } 667 EXPORT_SYMBOL(ib_query_srq); 668 669 int ib_destroy_srq(struct ib_srq *srq) 670 { 671 struct ib_pd *pd; 672 enum ib_srq_type srq_type; 673 struct ib_xrcd *uninitialized_var(xrcd); 674 struct ib_cq *uninitialized_var(cq); 675 int ret; 676 677 if (atomic_read(&srq->usecnt)) 678 return -EBUSY; 679 680 pd = srq->pd; 681 srq_type = srq->srq_type; 682 if (srq_type == IB_SRQT_XRC) { 683 xrcd = srq->ext.xrc.xrcd; 684 cq = srq->ext.xrc.cq; 685 } 686 687 ret = srq->device->destroy_srq(srq); 688 if (!ret) { 689 atomic_dec(&pd->usecnt); 690 if (srq_type == IB_SRQT_XRC) { 691 atomic_dec(&xrcd->usecnt); 692 atomic_dec(&cq->usecnt); 693 } 694 } 695 696 return ret; 697 } 698 EXPORT_SYMBOL(ib_destroy_srq); 699 700 /* Queue pairs */ 701 702 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) 703 { 704 struct ib_qp *qp = context; 705 unsigned long flags; 706 707 spin_lock_irqsave(&qp->device->event_handler_lock, flags); 708 list_for_each_entry(event->element.qp, &qp->open_list, open_list) 709 if (event->element.qp->event_handler) 710 event->element.qp->event_handler(event, event->element.qp->qp_context); 711 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); 712 } 713 714 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) 715 { 716 mutex_lock(&xrcd->tgt_qp_mutex); 717 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); 718 mutex_unlock(&xrcd->tgt_qp_mutex); 719 } 720 721 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, 722 void (*event_handler)(struct ib_event *, void *), 723 void *qp_context) 724 { 725 struct ib_qp *qp; 726 unsigned long flags; 727 728 qp = kzalloc(sizeof *qp, GFP_KERNEL); 729 if (!qp) 730 return ERR_PTR(-ENOMEM); 731 732 qp->real_qp = real_qp; 733 atomic_inc(&real_qp->usecnt); 734 qp->device = real_qp->device; 735 qp->event_handler = event_handler; 736 qp->qp_context = qp_context; 737 qp->qp_num = real_qp->qp_num; 738 qp->qp_type = real_qp->qp_type; 739 740 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 741 list_add(&qp->open_list, &real_qp->open_list); 742 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 743 744 return qp; 745 } 746 747 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 748 struct ib_qp_open_attr *qp_open_attr) 749 { 750 struct ib_qp *qp, *real_qp; 751 752 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) 753 return ERR_PTR(-EINVAL); 754 755 qp = ERR_PTR(-EINVAL); 756 mutex_lock(&xrcd->tgt_qp_mutex); 757 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { 758 if (real_qp->qp_num == qp_open_attr->qp_num) { 759 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, 760 qp_open_attr->qp_context); 761 break; 762 } 763 } 764 mutex_unlock(&xrcd->tgt_qp_mutex); 765 return qp; 766 } 767 EXPORT_SYMBOL(ib_open_qp); 768 769 static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, 770 struct ib_qp_init_attr *qp_init_attr) 771 { 772 struct ib_qp *real_qp = qp; 773 774 qp->event_handler = __ib_shared_qp_event_handler; 775 qp->qp_context = qp; 776 qp->pd = NULL; 777 qp->send_cq = qp->recv_cq = NULL; 778 qp->srq = NULL; 779 qp->xrcd = qp_init_attr->xrcd; 780 atomic_inc(&qp_init_attr->xrcd->usecnt); 781 INIT_LIST_HEAD(&qp->open_list); 782 783 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, 784 qp_init_attr->qp_context); 785 if (!IS_ERR(qp)) 786 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); 787 else 788 real_qp->device->destroy_qp(real_qp); 789 return qp; 790 } 791 792 struct ib_qp *ib_create_qp(struct ib_pd *pd, 793 struct ib_qp_init_attr *qp_init_attr) 794 { 795 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; 796 struct ib_qp *qp; 797 798 if (qp_init_attr->rwq_ind_tbl && 799 (qp_init_attr->recv_cq || 800 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || 801 qp_init_attr->cap.max_recv_sge)) 802 return ERR_PTR(-EINVAL); 803 804 qp = device->create_qp(pd, qp_init_attr, NULL); 805 if (IS_ERR(qp)) 806 return qp; 807 808 qp->device = device; 809 qp->real_qp = qp; 810 qp->uobject = NULL; 811 qp->qp_type = qp_init_attr->qp_type; 812 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; 813 814 atomic_set(&qp->usecnt, 0); 815 spin_lock_init(&qp->mr_lock); 816 817 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) 818 return ib_create_xrc_qp(qp, qp_init_attr); 819 820 qp->event_handler = qp_init_attr->event_handler; 821 qp->qp_context = qp_init_attr->qp_context; 822 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { 823 qp->recv_cq = NULL; 824 qp->srq = NULL; 825 } else { 826 qp->recv_cq = qp_init_attr->recv_cq; 827 if (qp_init_attr->recv_cq) 828 atomic_inc(&qp_init_attr->recv_cq->usecnt); 829 qp->srq = qp_init_attr->srq; 830 if (qp->srq) 831 atomic_inc(&qp_init_attr->srq->usecnt); 832 } 833 834 qp->pd = pd; 835 qp->send_cq = qp_init_attr->send_cq; 836 qp->xrcd = NULL; 837 838 atomic_inc(&pd->usecnt); 839 if (qp_init_attr->send_cq) 840 atomic_inc(&qp_init_attr->send_cq->usecnt); 841 if (qp_init_attr->rwq_ind_tbl) 842 atomic_inc(&qp->rwq_ind_tbl->usecnt); 843 844 /* 845 * Note: all hw drivers guarantee that max_send_sge is lower than 846 * the device RDMA WRITE SGE limit but not all hw drivers ensure that 847 * max_send_sge <= max_sge_rd. 848 */ 849 qp->max_write_sge = qp_init_attr->cap.max_send_sge; 850 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, 851 device->attrs.max_sge_rd); 852 853 return qp; 854 } 855 EXPORT_SYMBOL(ib_create_qp); 856 857 static const struct { 858 int valid; 859 enum ib_qp_attr_mask req_param[IB_QPT_MAX]; 860 enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; 861 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 862 [IB_QPS_RESET] = { 863 [IB_QPS_RESET] = { .valid = 1 }, 864 [IB_QPS_INIT] = { 865 .valid = 1, 866 .req_param = { 867 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 868 IB_QP_PORT | 869 IB_QP_QKEY), 870 [IB_QPT_RAW_PACKET] = IB_QP_PORT, 871 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 872 IB_QP_PORT | 873 IB_QP_ACCESS_FLAGS), 874 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 875 IB_QP_PORT | 876 IB_QP_ACCESS_FLAGS), 877 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 878 IB_QP_PORT | 879 IB_QP_ACCESS_FLAGS), 880 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 881 IB_QP_PORT | 882 IB_QP_ACCESS_FLAGS), 883 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 884 IB_QP_QKEY), 885 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 886 IB_QP_QKEY), 887 } 888 }, 889 }, 890 [IB_QPS_INIT] = { 891 [IB_QPS_RESET] = { .valid = 1 }, 892 [IB_QPS_ERR] = { .valid = 1 }, 893 [IB_QPS_INIT] = { 894 .valid = 1, 895 .opt_param = { 896 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 897 IB_QP_PORT | 898 IB_QP_QKEY), 899 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 900 IB_QP_PORT | 901 IB_QP_ACCESS_FLAGS), 902 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 903 IB_QP_PORT | 904 IB_QP_ACCESS_FLAGS), 905 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 906 IB_QP_PORT | 907 IB_QP_ACCESS_FLAGS), 908 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 909 IB_QP_PORT | 910 IB_QP_ACCESS_FLAGS), 911 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 912 IB_QP_QKEY), 913 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 914 IB_QP_QKEY), 915 } 916 }, 917 [IB_QPS_RTR] = { 918 .valid = 1, 919 .req_param = { 920 [IB_QPT_UC] = (IB_QP_AV | 921 IB_QP_PATH_MTU | 922 IB_QP_DEST_QPN | 923 IB_QP_RQ_PSN), 924 [IB_QPT_RC] = (IB_QP_AV | 925 IB_QP_PATH_MTU | 926 IB_QP_DEST_QPN | 927 IB_QP_RQ_PSN | 928 IB_QP_MAX_DEST_RD_ATOMIC | 929 IB_QP_MIN_RNR_TIMER), 930 [IB_QPT_XRC_INI] = (IB_QP_AV | 931 IB_QP_PATH_MTU | 932 IB_QP_DEST_QPN | 933 IB_QP_RQ_PSN), 934 [IB_QPT_XRC_TGT] = (IB_QP_AV | 935 IB_QP_PATH_MTU | 936 IB_QP_DEST_QPN | 937 IB_QP_RQ_PSN | 938 IB_QP_MAX_DEST_RD_ATOMIC | 939 IB_QP_MIN_RNR_TIMER), 940 }, 941 .opt_param = { 942 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 943 IB_QP_QKEY), 944 [IB_QPT_UC] = (IB_QP_ALT_PATH | 945 IB_QP_ACCESS_FLAGS | 946 IB_QP_PKEY_INDEX), 947 [IB_QPT_RC] = (IB_QP_ALT_PATH | 948 IB_QP_ACCESS_FLAGS | 949 IB_QP_PKEY_INDEX), 950 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | 951 IB_QP_ACCESS_FLAGS | 952 IB_QP_PKEY_INDEX), 953 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | 954 IB_QP_ACCESS_FLAGS | 955 IB_QP_PKEY_INDEX), 956 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 957 IB_QP_QKEY), 958 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 959 IB_QP_QKEY), 960 }, 961 }, 962 }, 963 [IB_QPS_RTR] = { 964 [IB_QPS_RESET] = { .valid = 1 }, 965 [IB_QPS_ERR] = { .valid = 1 }, 966 [IB_QPS_RTS] = { 967 .valid = 1, 968 .req_param = { 969 [IB_QPT_UD] = IB_QP_SQ_PSN, 970 [IB_QPT_UC] = IB_QP_SQ_PSN, 971 [IB_QPT_RC] = (IB_QP_TIMEOUT | 972 IB_QP_RETRY_CNT | 973 IB_QP_RNR_RETRY | 974 IB_QP_SQ_PSN | 975 IB_QP_MAX_QP_RD_ATOMIC), 976 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | 977 IB_QP_RETRY_CNT | 978 IB_QP_RNR_RETRY | 979 IB_QP_SQ_PSN | 980 IB_QP_MAX_QP_RD_ATOMIC), 981 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | 982 IB_QP_SQ_PSN), 983 [IB_QPT_SMI] = IB_QP_SQ_PSN, 984 [IB_QPT_GSI] = IB_QP_SQ_PSN, 985 }, 986 .opt_param = { 987 [IB_QPT_UD] = (IB_QP_CUR_STATE | 988 IB_QP_QKEY), 989 [IB_QPT_UC] = (IB_QP_CUR_STATE | 990 IB_QP_ALT_PATH | 991 IB_QP_ACCESS_FLAGS | 992 IB_QP_PATH_MIG_STATE), 993 [IB_QPT_RC] = (IB_QP_CUR_STATE | 994 IB_QP_ALT_PATH | 995 IB_QP_ACCESS_FLAGS | 996 IB_QP_MIN_RNR_TIMER | 997 IB_QP_PATH_MIG_STATE), 998 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 999 IB_QP_ALT_PATH | 1000 IB_QP_ACCESS_FLAGS | 1001 IB_QP_PATH_MIG_STATE), 1002 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1003 IB_QP_ALT_PATH | 1004 IB_QP_ACCESS_FLAGS | 1005 IB_QP_MIN_RNR_TIMER | 1006 IB_QP_PATH_MIG_STATE), 1007 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1008 IB_QP_QKEY), 1009 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1010 IB_QP_QKEY), 1011 } 1012 } 1013 }, 1014 [IB_QPS_RTS] = { 1015 [IB_QPS_RESET] = { .valid = 1 }, 1016 [IB_QPS_ERR] = { .valid = 1 }, 1017 [IB_QPS_RTS] = { 1018 .valid = 1, 1019 .opt_param = { 1020 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1021 IB_QP_QKEY), 1022 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1023 IB_QP_ACCESS_FLAGS | 1024 IB_QP_ALT_PATH | 1025 IB_QP_PATH_MIG_STATE), 1026 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1027 IB_QP_ACCESS_FLAGS | 1028 IB_QP_ALT_PATH | 1029 IB_QP_PATH_MIG_STATE | 1030 IB_QP_MIN_RNR_TIMER), 1031 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1032 IB_QP_ACCESS_FLAGS | 1033 IB_QP_ALT_PATH | 1034 IB_QP_PATH_MIG_STATE), 1035 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1036 IB_QP_ACCESS_FLAGS | 1037 IB_QP_ALT_PATH | 1038 IB_QP_PATH_MIG_STATE | 1039 IB_QP_MIN_RNR_TIMER), 1040 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1041 IB_QP_QKEY), 1042 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1043 IB_QP_QKEY), 1044 } 1045 }, 1046 [IB_QPS_SQD] = { 1047 .valid = 1, 1048 .opt_param = { 1049 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1050 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1051 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1052 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1053 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ 1054 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1055 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY 1056 } 1057 }, 1058 }, 1059 [IB_QPS_SQD] = { 1060 [IB_QPS_RESET] = { .valid = 1 }, 1061 [IB_QPS_ERR] = { .valid = 1 }, 1062 [IB_QPS_RTS] = { 1063 .valid = 1, 1064 .opt_param = { 1065 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1066 IB_QP_QKEY), 1067 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1068 IB_QP_ALT_PATH | 1069 IB_QP_ACCESS_FLAGS | 1070 IB_QP_PATH_MIG_STATE), 1071 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1072 IB_QP_ALT_PATH | 1073 IB_QP_ACCESS_FLAGS | 1074 IB_QP_MIN_RNR_TIMER | 1075 IB_QP_PATH_MIG_STATE), 1076 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1077 IB_QP_ALT_PATH | 1078 IB_QP_ACCESS_FLAGS | 1079 IB_QP_PATH_MIG_STATE), 1080 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1081 IB_QP_ALT_PATH | 1082 IB_QP_ACCESS_FLAGS | 1083 IB_QP_MIN_RNR_TIMER | 1084 IB_QP_PATH_MIG_STATE), 1085 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1086 IB_QP_QKEY), 1087 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1088 IB_QP_QKEY), 1089 } 1090 }, 1091 [IB_QPS_SQD] = { 1092 .valid = 1, 1093 .opt_param = { 1094 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 1095 IB_QP_QKEY), 1096 [IB_QPT_UC] = (IB_QP_AV | 1097 IB_QP_ALT_PATH | 1098 IB_QP_ACCESS_FLAGS | 1099 IB_QP_PKEY_INDEX | 1100 IB_QP_PATH_MIG_STATE), 1101 [IB_QPT_RC] = (IB_QP_PORT | 1102 IB_QP_AV | 1103 IB_QP_TIMEOUT | 1104 IB_QP_RETRY_CNT | 1105 IB_QP_RNR_RETRY | 1106 IB_QP_MAX_QP_RD_ATOMIC | 1107 IB_QP_MAX_DEST_RD_ATOMIC | 1108 IB_QP_ALT_PATH | 1109 IB_QP_ACCESS_FLAGS | 1110 IB_QP_PKEY_INDEX | 1111 IB_QP_MIN_RNR_TIMER | 1112 IB_QP_PATH_MIG_STATE), 1113 [IB_QPT_XRC_INI] = (IB_QP_PORT | 1114 IB_QP_AV | 1115 IB_QP_TIMEOUT | 1116 IB_QP_RETRY_CNT | 1117 IB_QP_RNR_RETRY | 1118 IB_QP_MAX_QP_RD_ATOMIC | 1119 IB_QP_ALT_PATH | 1120 IB_QP_ACCESS_FLAGS | 1121 IB_QP_PKEY_INDEX | 1122 IB_QP_PATH_MIG_STATE), 1123 [IB_QPT_XRC_TGT] = (IB_QP_PORT | 1124 IB_QP_AV | 1125 IB_QP_TIMEOUT | 1126 IB_QP_MAX_DEST_RD_ATOMIC | 1127 IB_QP_ALT_PATH | 1128 IB_QP_ACCESS_FLAGS | 1129 IB_QP_PKEY_INDEX | 1130 IB_QP_MIN_RNR_TIMER | 1131 IB_QP_PATH_MIG_STATE), 1132 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 1133 IB_QP_QKEY), 1134 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 1135 IB_QP_QKEY), 1136 } 1137 } 1138 }, 1139 [IB_QPS_SQE] = { 1140 [IB_QPS_RESET] = { .valid = 1 }, 1141 [IB_QPS_ERR] = { .valid = 1 }, 1142 [IB_QPS_RTS] = { 1143 .valid = 1, 1144 .opt_param = { 1145 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1146 IB_QP_QKEY), 1147 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1148 IB_QP_ACCESS_FLAGS), 1149 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1150 IB_QP_QKEY), 1151 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1152 IB_QP_QKEY), 1153 } 1154 } 1155 }, 1156 [IB_QPS_ERR] = { 1157 [IB_QPS_RESET] = { .valid = 1 }, 1158 [IB_QPS_ERR] = { .valid = 1 } 1159 } 1160 }; 1161 1162 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1163 enum ib_qp_type type, enum ib_qp_attr_mask mask, 1164 enum rdma_link_layer ll) 1165 { 1166 enum ib_qp_attr_mask req_param, opt_param; 1167 1168 if (cur_state < 0 || cur_state > IB_QPS_ERR || 1169 next_state < 0 || next_state > IB_QPS_ERR) 1170 return 0; 1171 1172 if (mask & IB_QP_CUR_STATE && 1173 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && 1174 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) 1175 return 0; 1176 1177 if (!qp_state_table[cur_state][next_state].valid) 1178 return 0; 1179 1180 req_param = qp_state_table[cur_state][next_state].req_param[type]; 1181 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 1182 1183 if ((mask & req_param) != req_param) 1184 return 0; 1185 1186 if (mask & ~(req_param | opt_param | IB_QP_STATE)) 1187 return 0; 1188 1189 return 1; 1190 } 1191 EXPORT_SYMBOL(ib_modify_qp_is_ok); 1192 1193 int ib_resolve_eth_dmac(struct ib_device *device, 1194 struct ib_ah_attr *ah_attr) 1195 { 1196 struct ib_gid_attr sgid_attr; 1197 union ib_gid sgid; 1198 int hop_limit; 1199 int ret; 1200 1201 if (ah_attr->port_num < rdma_start_port(device) || 1202 ah_attr->port_num > rdma_end_port(device)) 1203 return -EINVAL; 1204 1205 if (!rdma_cap_eth_ah(device, ah_attr->port_num)) 1206 return 0; 1207 1208 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { 1209 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) { 1210 __be32 addr = 0; 1211 1212 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4); 1213 ip_eth_mc_map(addr, (char *)ah_attr->dmac); 1214 } else { 1215 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw, 1216 (char *)ah_attr->dmac); 1217 } 1218 return 0; 1219 } 1220 1221 ret = ib_query_gid(device, 1222 ah_attr->port_num, 1223 ah_attr->grh.sgid_index, 1224 &sgid, &sgid_attr); 1225 if (ret != 0) 1226 return (ret); 1227 if (!sgid_attr.ndev) 1228 return -ENXIO; 1229 1230 ret = rdma_addr_find_l2_eth_by_grh(&sgid, 1231 &ah_attr->grh.dgid, 1232 ah_attr->dmac, 1233 sgid_attr.ndev, &hop_limit); 1234 dev_put(sgid_attr.ndev); 1235 1236 ah_attr->grh.hop_limit = hop_limit; 1237 return ret; 1238 } 1239 EXPORT_SYMBOL(ib_resolve_eth_dmac); 1240 1241 1242 int ib_modify_qp(struct ib_qp *qp, 1243 struct ib_qp_attr *qp_attr, 1244 int qp_attr_mask) 1245 { 1246 if (qp_attr_mask & IB_QP_AV) { 1247 int ret; 1248 1249 ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr); 1250 if (ret) 1251 return ret; 1252 } 1253 1254 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 1255 } 1256 EXPORT_SYMBOL(ib_modify_qp); 1257 1258 int ib_query_qp(struct ib_qp *qp, 1259 struct ib_qp_attr *qp_attr, 1260 int qp_attr_mask, 1261 struct ib_qp_init_attr *qp_init_attr) 1262 { 1263 return qp->device->query_qp ? 1264 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : 1265 -ENOSYS; 1266 } 1267 EXPORT_SYMBOL(ib_query_qp); 1268 1269 int ib_close_qp(struct ib_qp *qp) 1270 { 1271 struct ib_qp *real_qp; 1272 unsigned long flags; 1273 1274 real_qp = qp->real_qp; 1275 if (real_qp == qp) 1276 return -EINVAL; 1277 1278 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 1279 list_del(&qp->open_list); 1280 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 1281 1282 atomic_dec(&real_qp->usecnt); 1283 kfree(qp); 1284 1285 return 0; 1286 } 1287 EXPORT_SYMBOL(ib_close_qp); 1288 1289 static int __ib_destroy_shared_qp(struct ib_qp *qp) 1290 { 1291 struct ib_xrcd *xrcd; 1292 struct ib_qp *real_qp; 1293 int ret; 1294 1295 real_qp = qp->real_qp; 1296 xrcd = real_qp->xrcd; 1297 1298 mutex_lock(&xrcd->tgt_qp_mutex); 1299 ib_close_qp(qp); 1300 if (atomic_read(&real_qp->usecnt) == 0) 1301 list_del(&real_qp->xrcd_list); 1302 else 1303 real_qp = NULL; 1304 mutex_unlock(&xrcd->tgt_qp_mutex); 1305 1306 if (real_qp) { 1307 ret = ib_destroy_qp(real_qp); 1308 if (!ret) 1309 atomic_dec(&xrcd->usecnt); 1310 else 1311 __ib_insert_xrcd_qp(xrcd, real_qp); 1312 } 1313 1314 return 0; 1315 } 1316 1317 int ib_destroy_qp(struct ib_qp *qp) 1318 { 1319 struct ib_pd *pd; 1320 struct ib_cq *scq, *rcq; 1321 struct ib_srq *srq; 1322 struct ib_rwq_ind_table *ind_tbl; 1323 int ret; 1324 1325 if (atomic_read(&qp->usecnt)) 1326 return -EBUSY; 1327 1328 if (qp->real_qp != qp) 1329 return __ib_destroy_shared_qp(qp); 1330 1331 pd = qp->pd; 1332 scq = qp->send_cq; 1333 rcq = qp->recv_cq; 1334 srq = qp->srq; 1335 ind_tbl = qp->rwq_ind_tbl; 1336 1337 ret = qp->device->destroy_qp(qp); 1338 if (!ret) { 1339 if (pd) 1340 atomic_dec(&pd->usecnt); 1341 if (scq) 1342 atomic_dec(&scq->usecnt); 1343 if (rcq) 1344 atomic_dec(&rcq->usecnt); 1345 if (srq) 1346 atomic_dec(&srq->usecnt); 1347 if (ind_tbl) 1348 atomic_dec(&ind_tbl->usecnt); 1349 } 1350 1351 return ret; 1352 } 1353 EXPORT_SYMBOL(ib_destroy_qp); 1354 1355 /* Completion queues */ 1356 1357 struct ib_cq *ib_create_cq(struct ib_device *device, 1358 ib_comp_handler comp_handler, 1359 void (*event_handler)(struct ib_event *, void *), 1360 void *cq_context, 1361 const struct ib_cq_init_attr *cq_attr) 1362 { 1363 struct ib_cq *cq; 1364 1365 cq = device->create_cq(device, cq_attr, NULL, NULL); 1366 1367 if (!IS_ERR(cq)) { 1368 cq->device = device; 1369 cq->uobject = NULL; 1370 cq->comp_handler = comp_handler; 1371 cq->event_handler = event_handler; 1372 cq->cq_context = cq_context; 1373 atomic_set(&cq->usecnt, 0); 1374 } 1375 1376 return cq; 1377 } 1378 EXPORT_SYMBOL(ib_create_cq); 1379 1380 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1381 { 1382 return cq->device->modify_cq ? 1383 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; 1384 } 1385 EXPORT_SYMBOL(ib_modify_cq); 1386 1387 int ib_destroy_cq(struct ib_cq *cq) 1388 { 1389 if (atomic_read(&cq->usecnt)) 1390 return -EBUSY; 1391 1392 return cq->device->destroy_cq(cq); 1393 } 1394 EXPORT_SYMBOL(ib_destroy_cq); 1395 1396 int ib_resize_cq(struct ib_cq *cq, int cqe) 1397 { 1398 return cq->device->resize_cq ? 1399 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; 1400 } 1401 EXPORT_SYMBOL(ib_resize_cq); 1402 1403 /* Memory regions */ 1404 1405 int ib_dereg_mr(struct ib_mr *mr) 1406 { 1407 struct ib_pd *pd = mr->pd; 1408 int ret; 1409 1410 ret = mr->device->dereg_mr(mr); 1411 if (!ret) 1412 atomic_dec(&pd->usecnt); 1413 1414 return ret; 1415 } 1416 EXPORT_SYMBOL(ib_dereg_mr); 1417 1418 /** 1419 * ib_alloc_mr() - Allocates a memory region 1420 * @pd: protection domain associated with the region 1421 * @mr_type: memory region type 1422 * @max_num_sg: maximum sg entries available for registration. 1423 * 1424 * Notes: 1425 * Memory registeration page/sg lists must not exceed max_num_sg. 1426 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed 1427 * max_num_sg * used_page_size. 1428 * 1429 */ 1430 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 1431 enum ib_mr_type mr_type, 1432 u32 max_num_sg) 1433 { 1434 struct ib_mr *mr; 1435 1436 if (!pd->device->alloc_mr) 1437 return ERR_PTR(-ENOSYS); 1438 1439 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); 1440 if (!IS_ERR(mr)) { 1441 mr->device = pd->device; 1442 mr->pd = pd; 1443 mr->uobject = NULL; 1444 atomic_inc(&pd->usecnt); 1445 mr->need_inval = false; 1446 } 1447 1448 return mr; 1449 } 1450 EXPORT_SYMBOL(ib_alloc_mr); 1451 1452 /* "Fast" memory regions */ 1453 1454 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1455 int mr_access_flags, 1456 struct ib_fmr_attr *fmr_attr) 1457 { 1458 struct ib_fmr *fmr; 1459 1460 if (!pd->device->alloc_fmr) 1461 return ERR_PTR(-ENOSYS); 1462 1463 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); 1464 if (!IS_ERR(fmr)) { 1465 fmr->device = pd->device; 1466 fmr->pd = pd; 1467 atomic_inc(&pd->usecnt); 1468 } 1469 1470 return fmr; 1471 } 1472 EXPORT_SYMBOL(ib_alloc_fmr); 1473 1474 int ib_unmap_fmr(struct list_head *fmr_list) 1475 { 1476 struct ib_fmr *fmr; 1477 1478 if (list_empty(fmr_list)) 1479 return 0; 1480 1481 fmr = list_entry(fmr_list->next, struct ib_fmr, list); 1482 return fmr->device->unmap_fmr(fmr_list); 1483 } 1484 EXPORT_SYMBOL(ib_unmap_fmr); 1485 1486 int ib_dealloc_fmr(struct ib_fmr *fmr) 1487 { 1488 struct ib_pd *pd; 1489 int ret; 1490 1491 pd = fmr->pd; 1492 ret = fmr->device->dealloc_fmr(fmr); 1493 if (!ret) 1494 atomic_dec(&pd->usecnt); 1495 1496 return ret; 1497 } 1498 EXPORT_SYMBOL(ib_dealloc_fmr); 1499 1500 /* Multicast groups */ 1501 1502 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) 1503 { 1504 struct ib_qp_init_attr init_attr = {}; 1505 struct ib_qp_attr attr = {}; 1506 int num_eth_ports = 0; 1507 int port; 1508 1509 /* If QP state >= init, it is assigned to a port and we can check this 1510 * port only. 1511 */ 1512 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { 1513 if (attr.qp_state >= IB_QPS_INIT) { 1514 if (rdma_port_get_link_layer(qp->device, attr.port_num) != 1515 IB_LINK_LAYER_INFINIBAND) 1516 return true; 1517 goto lid_check; 1518 } 1519 } 1520 1521 /* Can't get a quick answer, iterate over all ports */ 1522 for (port = 0; port < qp->device->phys_port_cnt; port++) 1523 if (rdma_port_get_link_layer(qp->device, port) != 1524 IB_LINK_LAYER_INFINIBAND) 1525 num_eth_ports++; 1526 1527 /* If we have at lease one Ethernet port, RoCE annex declares that 1528 * multicast LID should be ignored. We can't tell at this step if the 1529 * QP belongs to an IB or Ethernet port. 1530 */ 1531 if (num_eth_ports) 1532 return true; 1533 1534 /* If all the ports are IB, we can check according to IB spec. */ 1535 lid_check: 1536 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) || 1537 lid == be16_to_cpu(IB_LID_PERMISSIVE)); 1538 } 1539 1540 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1541 { 1542 int ret; 1543 1544 if (!qp->device->attach_mcast) 1545 return -ENOSYS; 1546 1547 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || 1548 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) 1549 return -EINVAL; 1550 1551 ret = qp->device->attach_mcast(qp, gid, lid); 1552 if (!ret) 1553 atomic_inc(&qp->usecnt); 1554 return ret; 1555 } 1556 EXPORT_SYMBOL(ib_attach_mcast); 1557 1558 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1559 { 1560 int ret; 1561 1562 if (!qp->device->detach_mcast) 1563 return -ENOSYS; 1564 1565 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || 1566 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) 1567 return -EINVAL; 1568 1569 ret = qp->device->detach_mcast(qp, gid, lid); 1570 if (!ret) 1571 atomic_dec(&qp->usecnt); 1572 return ret; 1573 } 1574 EXPORT_SYMBOL(ib_detach_mcast); 1575 1576 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) 1577 { 1578 struct ib_xrcd *xrcd; 1579 1580 if (!device->alloc_xrcd) 1581 return ERR_PTR(-ENOSYS); 1582 1583 xrcd = device->alloc_xrcd(device, NULL, NULL); 1584 if (!IS_ERR(xrcd)) { 1585 xrcd->device = device; 1586 xrcd->inode = NULL; 1587 atomic_set(&xrcd->usecnt, 0); 1588 mutex_init(&xrcd->tgt_qp_mutex); 1589 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 1590 } 1591 1592 return xrcd; 1593 } 1594 EXPORT_SYMBOL(ib_alloc_xrcd); 1595 1596 int ib_dealloc_xrcd(struct ib_xrcd *xrcd) 1597 { 1598 struct ib_qp *qp; 1599 int ret; 1600 1601 if (atomic_read(&xrcd->usecnt)) 1602 return -EBUSY; 1603 1604 while (!list_empty(&xrcd->tgt_qp_list)) { 1605 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); 1606 ret = ib_destroy_qp(qp); 1607 if (ret) 1608 return ret; 1609 } 1610 1611 return xrcd->device->dealloc_xrcd(xrcd); 1612 } 1613 EXPORT_SYMBOL(ib_dealloc_xrcd); 1614 1615 /** 1616 * ib_create_wq - Creates a WQ associated with the specified protection 1617 * domain. 1618 * @pd: The protection domain associated with the WQ. 1619 * @wq_init_attr: A list of initial attributes required to create the 1620 * WQ. If WQ creation succeeds, then the attributes are updated to 1621 * the actual capabilities of the created WQ. 1622 * 1623 * wq_init_attr->max_wr and wq_init_attr->max_sge determine 1624 * the requested size of the WQ, and set to the actual values allocated 1625 * on return. 1626 * If ib_create_wq() succeeds, then max_wr and max_sge will always be 1627 * at least as large as the requested values. 1628 */ 1629 struct ib_wq *ib_create_wq(struct ib_pd *pd, 1630 struct ib_wq_init_attr *wq_attr) 1631 { 1632 struct ib_wq *wq; 1633 1634 if (!pd->device->create_wq) 1635 return ERR_PTR(-ENOSYS); 1636 1637 wq = pd->device->create_wq(pd, wq_attr, NULL); 1638 if (!IS_ERR(wq)) { 1639 wq->event_handler = wq_attr->event_handler; 1640 wq->wq_context = wq_attr->wq_context; 1641 wq->wq_type = wq_attr->wq_type; 1642 wq->cq = wq_attr->cq; 1643 wq->device = pd->device; 1644 wq->pd = pd; 1645 wq->uobject = NULL; 1646 atomic_inc(&pd->usecnt); 1647 atomic_inc(&wq_attr->cq->usecnt); 1648 atomic_set(&wq->usecnt, 0); 1649 } 1650 return wq; 1651 } 1652 EXPORT_SYMBOL(ib_create_wq); 1653 1654 /** 1655 * ib_destroy_wq - Destroys the specified WQ. 1656 * @wq: The WQ to destroy. 1657 */ 1658 int ib_destroy_wq(struct ib_wq *wq) 1659 { 1660 int err; 1661 struct ib_cq *cq = wq->cq; 1662 struct ib_pd *pd = wq->pd; 1663 1664 if (atomic_read(&wq->usecnt)) 1665 return -EBUSY; 1666 1667 err = wq->device->destroy_wq(wq); 1668 if (!err) { 1669 atomic_dec(&pd->usecnt); 1670 atomic_dec(&cq->usecnt); 1671 } 1672 return err; 1673 } 1674 EXPORT_SYMBOL(ib_destroy_wq); 1675 1676 /** 1677 * ib_modify_wq - Modifies the specified WQ. 1678 * @wq: The WQ to modify. 1679 * @wq_attr: On input, specifies the WQ attributes to modify. 1680 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ 1681 * are being modified. 1682 * On output, the current values of selected WQ attributes are returned. 1683 */ 1684 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 1685 u32 wq_attr_mask) 1686 { 1687 int err; 1688 1689 if (!wq->device->modify_wq) 1690 return -ENOSYS; 1691 1692 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); 1693 return err; 1694 } 1695 EXPORT_SYMBOL(ib_modify_wq); 1696 1697 /* 1698 * ib_create_rwq_ind_table - Creates a RQ Indirection Table. 1699 * @device: The device on which to create the rwq indirection table. 1700 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to 1701 * create the Indirection Table. 1702 * 1703 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less 1704 * than the created ib_rwq_ind_table object and the caller is responsible 1705 * for its memory allocation/free. 1706 */ 1707 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 1708 struct ib_rwq_ind_table_init_attr *init_attr) 1709 { 1710 struct ib_rwq_ind_table *rwq_ind_table; 1711 int i; 1712 u32 table_size; 1713 1714 if (!device->create_rwq_ind_table) 1715 return ERR_PTR(-ENOSYS); 1716 1717 table_size = (1 << init_attr->log_ind_tbl_size); 1718 rwq_ind_table = device->create_rwq_ind_table(device, 1719 init_attr, NULL); 1720 if (IS_ERR(rwq_ind_table)) 1721 return rwq_ind_table; 1722 1723 rwq_ind_table->ind_tbl = init_attr->ind_tbl; 1724 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; 1725 rwq_ind_table->device = device; 1726 rwq_ind_table->uobject = NULL; 1727 atomic_set(&rwq_ind_table->usecnt, 0); 1728 1729 for (i = 0; i < table_size; i++) 1730 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); 1731 1732 return rwq_ind_table; 1733 } 1734 EXPORT_SYMBOL(ib_create_rwq_ind_table); 1735 1736 /* 1737 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. 1738 * @wq_ind_table: The Indirection Table to destroy. 1739 */ 1740 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) 1741 { 1742 int err, i; 1743 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); 1744 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; 1745 1746 if (atomic_read(&rwq_ind_table->usecnt)) 1747 return -EBUSY; 1748 1749 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); 1750 if (!err) { 1751 for (i = 0; i < table_size; i++) 1752 atomic_dec(&ind_tbl[i]->usecnt); 1753 } 1754 1755 return err; 1756 } 1757 EXPORT_SYMBOL(ib_destroy_rwq_ind_table); 1758 1759 struct ib_flow *ib_create_flow(struct ib_qp *qp, 1760 struct ib_flow_attr *flow_attr, 1761 int domain) 1762 { 1763 struct ib_flow *flow_id; 1764 if (!qp->device->create_flow) 1765 return ERR_PTR(-ENOSYS); 1766 1767 flow_id = qp->device->create_flow(qp, flow_attr, domain); 1768 if (!IS_ERR(flow_id)) 1769 atomic_inc(&qp->usecnt); 1770 return flow_id; 1771 } 1772 EXPORT_SYMBOL(ib_create_flow); 1773 1774 int ib_destroy_flow(struct ib_flow *flow_id) 1775 { 1776 int err; 1777 struct ib_qp *qp = flow_id->qp; 1778 1779 err = qp->device->destroy_flow(flow_id); 1780 if (!err) 1781 atomic_dec(&qp->usecnt); 1782 return err; 1783 } 1784 EXPORT_SYMBOL(ib_destroy_flow); 1785 1786 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 1787 struct ib_mr_status *mr_status) 1788 { 1789 return mr->device->check_mr_status ? 1790 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS; 1791 } 1792 EXPORT_SYMBOL(ib_check_mr_status); 1793 1794 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 1795 int state) 1796 { 1797 if (!device->set_vf_link_state) 1798 return -ENOSYS; 1799 1800 return device->set_vf_link_state(device, vf, port, state); 1801 } 1802 EXPORT_SYMBOL(ib_set_vf_link_state); 1803 1804 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 1805 struct ifla_vf_info *info) 1806 { 1807 if (!device->get_vf_config) 1808 return -ENOSYS; 1809 1810 return device->get_vf_config(device, vf, port, info); 1811 } 1812 EXPORT_SYMBOL(ib_get_vf_config); 1813 1814 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 1815 struct ifla_vf_stats *stats) 1816 { 1817 if (!device->get_vf_stats) 1818 return -ENOSYS; 1819 1820 return device->get_vf_stats(device, vf, port, stats); 1821 } 1822 EXPORT_SYMBOL(ib_get_vf_stats); 1823 1824 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 1825 int type) 1826 { 1827 if (!device->set_vf_guid) 1828 return -ENOSYS; 1829 1830 return device->set_vf_guid(device, vf, port, guid, type); 1831 } 1832 EXPORT_SYMBOL(ib_set_vf_guid); 1833 1834 /** 1835 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list 1836 * and set it the memory region. 1837 * @mr: memory region 1838 * @sg: dma mapped scatterlist 1839 * @sg_nents: number of entries in sg 1840 * @sg_offset: offset in bytes into sg 1841 * @page_size: page vector desired page size 1842 * 1843 * Constraints: 1844 * - The first sg element is allowed to have an offset. 1845 * - Each sg element must either be aligned to page_size or virtually 1846 * contiguous to the previous element. In case an sg element has a 1847 * non-contiguous offset, the mapping prefix will not include it. 1848 * - The last sg element is allowed to have length less than page_size. 1849 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size 1850 * then only max_num_sg entries will be mapped. 1851 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these 1852 * constraints holds and the page_size argument is ignored. 1853 * 1854 * Returns the number of sg elements that were mapped to the memory region. 1855 * 1856 * After this completes successfully, the memory region 1857 * is ready for registration. 1858 */ 1859 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 1860 unsigned int *sg_offset, unsigned int page_size) 1861 { 1862 if (unlikely(!mr->device->map_mr_sg)) 1863 return -ENOSYS; 1864 1865 mr->page_size = page_size; 1866 1867 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); 1868 } 1869 EXPORT_SYMBOL(ib_map_mr_sg); 1870 1871 /** 1872 * ib_sg_to_pages() - Convert the largest prefix of a sg list 1873 * to a page vector 1874 * @mr: memory region 1875 * @sgl: dma mapped scatterlist 1876 * @sg_nents: number of entries in sg 1877 * @sg_offset_p: IN: start offset in bytes into sg 1878 * OUT: offset in bytes for element n of the sg of the first 1879 * byte that has not been processed where n is the return 1880 * value of this function. 1881 * @set_page: driver page assignment function pointer 1882 * 1883 * Core service helper for drivers to convert the largest 1884 * prefix of given sg list to a page vector. The sg list 1885 * prefix converted is the prefix that meet the requirements 1886 * of ib_map_mr_sg. 1887 * 1888 * Returns the number of sg elements that were assigned to 1889 * a page vector. 1890 */ 1891 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 1892 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) 1893 { 1894 struct scatterlist *sg; 1895 u64 last_end_dma_addr = 0; 1896 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 1897 unsigned int last_page_off = 0; 1898 u64 page_mask = ~((u64)mr->page_size - 1); 1899 int i, ret; 1900 1901 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) 1902 return -EINVAL; 1903 1904 mr->iova = sg_dma_address(&sgl[0]) + sg_offset; 1905 mr->length = 0; 1906 1907 for_each_sg(sgl, sg, sg_nents, i) { 1908 u64 dma_addr = sg_dma_address(sg) + sg_offset; 1909 u64 prev_addr = dma_addr; 1910 unsigned int dma_len = sg_dma_len(sg) - sg_offset; 1911 u64 end_dma_addr = dma_addr + dma_len; 1912 u64 page_addr = dma_addr & page_mask; 1913 1914 /* 1915 * For the second and later elements, check whether either the 1916 * end of element i-1 or the start of element i is not aligned 1917 * on a page boundary. 1918 */ 1919 if (i && (last_page_off != 0 || page_addr != dma_addr)) { 1920 /* Stop mapping if there is a gap. */ 1921 if (last_end_dma_addr != dma_addr) 1922 break; 1923 1924 /* 1925 * Coalesce this element with the last. If it is small 1926 * enough just update mr->length. Otherwise start 1927 * mapping from the next page. 1928 */ 1929 goto next_page; 1930 } 1931 1932 do { 1933 ret = set_page(mr, page_addr); 1934 if (unlikely(ret < 0)) { 1935 sg_offset = prev_addr - sg_dma_address(sg); 1936 mr->length += prev_addr - dma_addr; 1937 if (sg_offset_p) 1938 *sg_offset_p = sg_offset; 1939 return i || sg_offset ? i : ret; 1940 } 1941 prev_addr = page_addr; 1942 next_page: 1943 page_addr += mr->page_size; 1944 } while (page_addr < end_dma_addr); 1945 1946 mr->length += dma_len; 1947 last_end_dma_addr = end_dma_addr; 1948 last_page_off = end_dma_addr & ~page_mask; 1949 1950 sg_offset = 0; 1951 } 1952 1953 if (sg_offset_p) 1954 *sg_offset_p = 0; 1955 return i; 1956 } 1957 EXPORT_SYMBOL(ib_sg_to_pages); 1958 1959 struct ib_drain_cqe { 1960 struct ib_cqe cqe; 1961 struct completion done; 1962 }; 1963 1964 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) 1965 { 1966 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, 1967 cqe); 1968 1969 complete(&cqe->done); 1970 } 1971 1972 /* 1973 * Post a WR and block until its completion is reaped for the SQ. 1974 */ 1975 static void __ib_drain_sq(struct ib_qp *qp) 1976 { 1977 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 1978 struct ib_drain_cqe sdrain; 1979 struct ib_send_wr *bad_swr; 1980 struct ib_rdma_wr swr = { 1981 .wr = { 1982 .opcode = IB_WR_RDMA_WRITE, 1983 .wr_cqe = &sdrain.cqe, 1984 }, 1985 }; 1986 int ret; 1987 1988 if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) { 1989 WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT, 1990 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 1991 return; 1992 } 1993 1994 sdrain.cqe.done = ib_drain_qp_done; 1995 init_completion(&sdrain.done); 1996 1997 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 1998 if (ret) { 1999 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 2000 return; 2001 } 2002 2003 ret = ib_post_send(qp, &swr.wr, &bad_swr); 2004 if (ret) { 2005 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 2006 return; 2007 } 2008 2009 wait_for_completion(&sdrain.done); 2010 } 2011 2012 /* 2013 * Post a WR and block until its completion is reaped for the RQ. 2014 */ 2015 static void __ib_drain_rq(struct ib_qp *qp) 2016 { 2017 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 2018 struct ib_drain_cqe rdrain; 2019 struct ib_recv_wr rwr = {}, *bad_rwr; 2020 int ret; 2021 2022 if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) { 2023 WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT, 2024 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 2025 return; 2026 } 2027 2028 rwr.wr_cqe = &rdrain.cqe; 2029 rdrain.cqe.done = ib_drain_qp_done; 2030 init_completion(&rdrain.done); 2031 2032 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 2033 if (ret) { 2034 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 2035 return; 2036 } 2037 2038 ret = ib_post_recv(qp, &rwr, &bad_rwr); 2039 if (ret) { 2040 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 2041 return; 2042 } 2043 2044 wait_for_completion(&rdrain.done); 2045 } 2046 2047 /** 2048 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the 2049 * application. 2050 * @qp: queue pair to drain 2051 * 2052 * If the device has a provider-specific drain function, then 2053 * call that. Otherwise call the generic drain function 2054 * __ib_drain_sq(). 2055 * 2056 * The caller must: 2057 * 2058 * ensure there is room in the CQ and SQ for the drain work request and 2059 * completion. 2060 * 2061 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 2062 * IB_POLL_DIRECT. 2063 * 2064 * ensure that there are no other contexts that are posting WRs concurrently. 2065 * Otherwise the drain is not guaranteed. 2066 */ 2067 void ib_drain_sq(struct ib_qp *qp) 2068 { 2069 if (qp->device->drain_sq) 2070 qp->device->drain_sq(qp); 2071 else 2072 __ib_drain_sq(qp); 2073 } 2074 EXPORT_SYMBOL(ib_drain_sq); 2075 2076 /** 2077 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the 2078 * application. 2079 * @qp: queue pair to drain 2080 * 2081 * If the device has a provider-specific drain function, then 2082 * call that. Otherwise call the generic drain function 2083 * __ib_drain_rq(). 2084 * 2085 * The caller must: 2086 * 2087 * ensure there is room in the CQ and RQ for the drain work request and 2088 * completion. 2089 * 2090 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 2091 * IB_POLL_DIRECT. 2092 * 2093 * ensure that there are no other contexts that are posting WRs concurrently. 2094 * Otherwise the drain is not guaranteed. 2095 */ 2096 void ib_drain_rq(struct ib_qp *qp) 2097 { 2098 if (qp->device->drain_rq) 2099 qp->device->drain_rq(qp); 2100 else 2101 __ib_drain_rq(qp); 2102 } 2103 EXPORT_SYMBOL(ib_drain_rq); 2104 2105 /** 2106 * ib_drain_qp() - Block until all CQEs have been consumed by the 2107 * application on both the RQ and SQ. 2108 * @qp: queue pair to drain 2109 * 2110 * The caller must: 2111 * 2112 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests 2113 * and completions. 2114 * 2115 * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be 2116 * IB_POLL_DIRECT. 2117 * 2118 * ensure that there are no other contexts that are posting WRs concurrently. 2119 * Otherwise the drain is not guaranteed. 2120 */ 2121 void ib_drain_qp(struct ib_qp *qp) 2122 { 2123 ib_drain_sq(qp); 2124 if (!qp->srq) 2125 ib_drain_rq(qp); 2126 } 2127 EXPORT_SYMBOL(ib_drain_qp); 2128