1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 6 * Copyright (c) 2004 Intel Corporation. All rights reserved. 7 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 8 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 9 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 10 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 11 * 12 * This software is available to you under a choice of one of two 13 * licenses. You may choose to be licensed under the terms of the GNU 14 * General Public License (GPL) Version 2, available from the file 15 * COPYING in the main directory of this source tree, or the 16 * OpenIB.org BSD license below: 17 * 18 * Redistribution and use in source and binary forms, with or 19 * without modification, are permitted provided that the following 20 * conditions are met: 21 * 22 * - Redistributions of source code must retain the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer. 25 * 26 * - Redistributions in binary form must reproduce the above 27 * copyright notice, this list of conditions and the following 28 * disclaimer in the documentation and/or other materials 29 * provided with the distribution. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 35 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 36 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 37 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 38 * SOFTWARE. 39 * 40 * $FreeBSD$ 41 */ 42 43 #include <linux/errno.h> 44 #include <linux/err.h> 45 #include <linux/string.h> 46 #include <linux/slab.h> 47 #include <linux/in.h> 48 #include <linux/in6.h> 49 50 #include <rdma/ib_verbs.h> 51 #include <rdma/ib_cache.h> 52 #include <rdma/ib_addr.h> 53 54 #include <netinet/ip.h> 55 #include <netinet/ip6.h> 56 57 #include <machine/in_cksum.h> 58 59 #include "core_priv.h" 60 61 static const char * const ib_events[] = { 62 [IB_EVENT_CQ_ERR] = "CQ error", 63 [IB_EVENT_QP_FATAL] = "QP fatal error", 64 [IB_EVENT_QP_REQ_ERR] = "QP request error", 65 [IB_EVENT_QP_ACCESS_ERR] = "QP access error", 66 [IB_EVENT_COMM_EST] = "communication established", 67 [IB_EVENT_SQ_DRAINED] = "send queue drained", 68 [IB_EVENT_PATH_MIG] = "path migration successful", 69 [IB_EVENT_PATH_MIG_ERR] = "path migration error", 70 [IB_EVENT_DEVICE_FATAL] = "device fatal error", 71 [IB_EVENT_PORT_ACTIVE] = "port active", 72 [IB_EVENT_PORT_ERR] = "port error", 73 [IB_EVENT_LID_CHANGE] = "LID change", 74 [IB_EVENT_PKEY_CHANGE] = "P_key change", 75 [IB_EVENT_SM_CHANGE] = "SM change", 76 [IB_EVENT_SRQ_ERR] = "SRQ error", 77 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", 78 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", 79 [IB_EVENT_CLIENT_REREGISTER] = "client reregister", 80 [IB_EVENT_GID_CHANGE] = "GID changed", 81 }; 82 83 const char *__attribute_const__ ib_event_msg(enum ib_event_type event) 84 { 85 size_t index = event; 86 87 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? 88 ib_events[index] : "unrecognized event"; 89 } 90 EXPORT_SYMBOL(ib_event_msg); 91 92 static const char * const wc_statuses[] = { 93 [IB_WC_SUCCESS] = "success", 94 [IB_WC_LOC_LEN_ERR] = "local length error", 95 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", 96 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", 97 [IB_WC_LOC_PROT_ERR] = "local protection error", 98 [IB_WC_WR_FLUSH_ERR] = "WR flushed", 99 [IB_WC_MW_BIND_ERR] = "memory management operation error", 100 [IB_WC_BAD_RESP_ERR] = "bad response error", 101 [IB_WC_LOC_ACCESS_ERR] = "local access error", 102 [IB_WC_REM_INV_REQ_ERR] = "invalid request error", 103 [IB_WC_REM_ACCESS_ERR] = "remote access error", 104 [IB_WC_REM_OP_ERR] = "remote operation error", 105 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", 106 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", 107 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", 108 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", 109 [IB_WC_REM_ABORT_ERR] = "operation aborted", 110 [IB_WC_INV_EECN_ERR] = "invalid EE context number", 111 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", 112 [IB_WC_FATAL_ERR] = "fatal error", 113 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", 114 [IB_WC_GENERAL_ERR] = "general error", 115 }; 116 117 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) 118 { 119 size_t index = status; 120 121 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? 122 wc_statuses[index] : "unrecognized status"; 123 } 124 EXPORT_SYMBOL(ib_wc_status_msg); 125 126 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) 127 { 128 switch (rate) { 129 case IB_RATE_2_5_GBPS: return 1; 130 case IB_RATE_5_GBPS: return 2; 131 case IB_RATE_10_GBPS: return 4; 132 case IB_RATE_20_GBPS: return 8; 133 case IB_RATE_30_GBPS: return 12; 134 case IB_RATE_40_GBPS: return 16; 135 case IB_RATE_60_GBPS: return 24; 136 case IB_RATE_80_GBPS: return 32; 137 case IB_RATE_120_GBPS: return 48; 138 default: return -1; 139 } 140 } 141 EXPORT_SYMBOL(ib_rate_to_mult); 142 143 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) 144 { 145 switch (mult) { 146 case 1: return IB_RATE_2_5_GBPS; 147 case 2: return IB_RATE_5_GBPS; 148 case 4: return IB_RATE_10_GBPS; 149 case 8: return IB_RATE_20_GBPS; 150 case 12: return IB_RATE_30_GBPS; 151 case 16: return IB_RATE_40_GBPS; 152 case 24: return IB_RATE_60_GBPS; 153 case 32: return IB_RATE_80_GBPS; 154 case 48: return IB_RATE_120_GBPS; 155 default: return IB_RATE_PORT_CURRENT; 156 } 157 } 158 EXPORT_SYMBOL(mult_to_ib_rate); 159 160 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) 161 { 162 switch (rate) { 163 case IB_RATE_2_5_GBPS: return 2500; 164 case IB_RATE_5_GBPS: return 5000; 165 case IB_RATE_10_GBPS: return 10000; 166 case IB_RATE_20_GBPS: return 20000; 167 case IB_RATE_30_GBPS: return 30000; 168 case IB_RATE_40_GBPS: return 40000; 169 case IB_RATE_60_GBPS: return 60000; 170 case IB_RATE_80_GBPS: return 80000; 171 case IB_RATE_120_GBPS: return 120000; 172 case IB_RATE_14_GBPS: return 14062; 173 case IB_RATE_56_GBPS: return 56250; 174 case IB_RATE_112_GBPS: return 112500; 175 case IB_RATE_168_GBPS: return 168750; 176 case IB_RATE_25_GBPS: return 25781; 177 case IB_RATE_100_GBPS: return 103125; 178 case IB_RATE_200_GBPS: return 206250; 179 case IB_RATE_300_GBPS: return 309375; 180 default: return -1; 181 } 182 } 183 EXPORT_SYMBOL(ib_rate_to_mbps); 184 185 __attribute_const__ enum rdma_transport_type 186 rdma_node_get_transport(enum rdma_node_type node_type) 187 { 188 switch (node_type) { 189 case RDMA_NODE_IB_CA: 190 case RDMA_NODE_IB_SWITCH: 191 case RDMA_NODE_IB_ROUTER: 192 return RDMA_TRANSPORT_IB; 193 case RDMA_NODE_RNIC: 194 return RDMA_TRANSPORT_IWARP; 195 case RDMA_NODE_USNIC: 196 return RDMA_TRANSPORT_USNIC; 197 case RDMA_NODE_USNIC_UDP: 198 return RDMA_TRANSPORT_USNIC_UDP; 199 default: 200 BUG(); 201 return 0; 202 } 203 } 204 EXPORT_SYMBOL(rdma_node_get_transport); 205 206 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 207 { 208 if (device->get_link_layer) 209 return device->get_link_layer(device, port_num); 210 211 switch (rdma_node_get_transport(device->node_type)) { 212 case RDMA_TRANSPORT_IB: 213 return IB_LINK_LAYER_INFINIBAND; 214 case RDMA_TRANSPORT_IWARP: 215 case RDMA_TRANSPORT_USNIC: 216 case RDMA_TRANSPORT_USNIC_UDP: 217 return IB_LINK_LAYER_ETHERNET; 218 default: 219 return IB_LINK_LAYER_UNSPECIFIED; 220 } 221 } 222 EXPORT_SYMBOL(rdma_port_get_link_layer); 223 224 /* Protection domains */ 225 226 /** 227 * ib_alloc_pd - Allocates an unused protection domain. 228 * @device: The device on which to allocate the protection domain. 229 * 230 * A protection domain object provides an association between QPs, shared 231 * receive queues, address handles, memory regions, and memory windows. 232 * 233 * Every PD has a local_dma_lkey which can be used as the lkey value for local 234 * memory operations. 235 */ 236 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 237 const char *caller) 238 { 239 struct ib_pd *pd; 240 int mr_access_flags = 0; 241 242 pd = device->alloc_pd(device, NULL, NULL); 243 if (IS_ERR(pd)) 244 return pd; 245 246 pd->device = device; 247 pd->uobject = NULL; 248 pd->__internal_mr = NULL; 249 atomic_set(&pd->usecnt, 0); 250 pd->flags = flags; 251 252 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 253 pd->local_dma_lkey = device->local_dma_lkey; 254 else 255 mr_access_flags |= IB_ACCESS_LOCAL_WRITE; 256 257 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { 258 pr_warn("%s: enabling unsafe global rkey\n", caller); 259 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; 260 } 261 262 if (mr_access_flags) { 263 struct ib_mr *mr; 264 265 mr = pd->device->get_dma_mr(pd, mr_access_flags); 266 if (IS_ERR(mr)) { 267 ib_dealloc_pd(pd); 268 return ERR_CAST(mr); 269 } 270 271 mr->device = pd->device; 272 mr->pd = pd; 273 mr->uobject = NULL; 274 mr->need_inval = false; 275 276 pd->__internal_mr = mr; 277 278 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) 279 pd->local_dma_lkey = pd->__internal_mr->lkey; 280 281 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) 282 pd->unsafe_global_rkey = pd->__internal_mr->rkey; 283 } 284 285 return pd; 286 } 287 EXPORT_SYMBOL(__ib_alloc_pd); 288 289 /** 290 * ib_dealloc_pd - Deallocates a protection domain. 291 * @pd: The protection domain to deallocate. 292 * 293 * It is an error to call this function while any resources in the pd still 294 * exist. The caller is responsible to synchronously destroy them and 295 * guarantee no new allocations will happen. 296 */ 297 void ib_dealloc_pd(struct ib_pd *pd) 298 { 299 int ret; 300 301 if (pd->__internal_mr) { 302 ret = pd->device->dereg_mr(pd->__internal_mr); 303 WARN_ON(ret); 304 pd->__internal_mr = NULL; 305 } 306 307 /* uverbs manipulates usecnt with proper locking, while the kabi 308 requires the caller to guarantee we can't race here. */ 309 WARN_ON(atomic_read(&pd->usecnt)); 310 311 /* Making delalloc_pd a void return is a WIP, no driver should return 312 an error here. */ 313 ret = pd->device->dealloc_pd(pd); 314 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 315 } 316 EXPORT_SYMBOL(ib_dealloc_pd); 317 318 /* Address handles */ 319 320 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 321 { 322 struct ib_ah *ah; 323 324 ah = pd->device->create_ah(pd, ah_attr, NULL); 325 326 if (!IS_ERR(ah)) { 327 ah->device = pd->device; 328 ah->pd = pd; 329 ah->uobject = NULL; 330 atomic_inc(&pd->usecnt); 331 } 332 333 return ah; 334 } 335 EXPORT_SYMBOL(ib_create_ah); 336 337 static int ib_get_header_version(const union rdma_network_hdr *hdr) 338 { 339 const struct ip *ip4h = (const struct ip *)&hdr->roce4grh; 340 struct ip ip4h_checked; 341 const struct ip6_hdr *ip6h = (const struct ip6_hdr *)&hdr->ibgrh; 342 343 /* If it's IPv6, the version must be 6, otherwise, the first 344 * 20 bytes (before the IPv4 header) are garbled. 345 */ 346 if ((ip6h->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) 347 return (ip4h->ip_v == 4) ? 4 : 0; 348 /* version may be 6 or 4 because the first 20 bytes could be garbled */ 349 350 /* RoCE v2 requires no options, thus header length 351 * must be 5 words 352 */ 353 if (ip4h->ip_hl != 5) 354 return 6; 355 356 /* Verify checksum. 357 * We can't write on scattered buffers so we need to copy to 358 * temp buffer. 359 */ 360 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); 361 ip4h_checked.ip_sum = 0; 362 #if defined(INET) || defined(INET6) 363 ip4h_checked.ip_sum = in_cksum_hdr(&ip4h_checked); 364 #endif 365 /* if IPv4 header checksum is OK, believe it */ 366 if (ip4h->ip_sum == ip4h_checked.ip_sum) 367 return 4; 368 return 6; 369 } 370 371 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, 372 u8 port_num, 373 const struct ib_grh *grh) 374 { 375 int grh_version; 376 377 if (rdma_protocol_ib(device, port_num)) 378 return RDMA_NETWORK_IB; 379 380 grh_version = ib_get_header_version((const union rdma_network_hdr *)grh); 381 382 if (grh_version == 4) 383 return RDMA_NETWORK_IPV4; 384 385 if (grh->next_hdr == IPPROTO_UDP) 386 return RDMA_NETWORK_IPV6; 387 388 return RDMA_NETWORK_ROCE_V1; 389 } 390 391 struct find_gid_index_context { 392 u16 vlan_id; 393 enum ib_gid_type gid_type; 394 }; 395 396 static bool find_gid_index(const union ib_gid *gid, 397 const struct ib_gid_attr *gid_attr, 398 void *context) 399 { 400 struct find_gid_index_context *ctx = 401 (struct find_gid_index_context *)context; 402 403 if (ctx->gid_type != gid_attr->gid_type) 404 return false; 405 406 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) || 407 (is_vlan_dev(gid_attr->ndev) && 408 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id)) 409 return false; 410 411 return true; 412 } 413 414 static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, 415 u16 vlan_id, const union ib_gid *sgid, 416 enum ib_gid_type gid_type, 417 u16 *gid_index) 418 { 419 struct find_gid_index_context context = {.vlan_id = vlan_id, 420 .gid_type = gid_type}; 421 422 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index, 423 &context, gid_index); 424 } 425 426 static int get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 427 enum rdma_network_type net_type, 428 union ib_gid *sgid, union ib_gid *dgid) 429 { 430 struct sockaddr_in src_in; 431 struct sockaddr_in dst_in; 432 __be32 src_saddr, dst_saddr; 433 434 if (!sgid || !dgid) 435 return -EINVAL; 436 437 if (net_type == RDMA_NETWORK_IPV4) { 438 memcpy(&src_in.sin_addr.s_addr, 439 &hdr->roce4grh.ip_src, 4); 440 memcpy(&dst_in.sin_addr.s_addr, 441 &hdr->roce4grh.ip_dst, 4); 442 src_saddr = src_in.sin_addr.s_addr; 443 dst_saddr = dst_in.sin_addr.s_addr; 444 ipv6_addr_set_v4mapped(src_saddr, 445 (struct in6_addr *)sgid); 446 ipv6_addr_set_v4mapped(dst_saddr, 447 (struct in6_addr *)dgid); 448 return 0; 449 } else if (net_type == RDMA_NETWORK_IPV6 || 450 net_type == RDMA_NETWORK_IB) { 451 *dgid = hdr->ibgrh.dgid; 452 *sgid = hdr->ibgrh.sgid; 453 return 0; 454 } else { 455 return -EINVAL; 456 } 457 } 458 459 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 460 const struct ib_wc *wc, const struct ib_grh *grh, 461 struct ib_ah_attr *ah_attr) 462 { 463 u32 flow_class; 464 u16 gid_index; 465 int ret; 466 enum rdma_network_type net_type = RDMA_NETWORK_IB; 467 enum ib_gid_type gid_type = IB_GID_TYPE_IB; 468 int hoplimit = 0xff; 469 union ib_gid dgid; 470 union ib_gid sgid; 471 472 memset(ah_attr, 0, sizeof *ah_attr); 473 if (rdma_cap_eth_ah(device, port_num)) { 474 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) 475 net_type = wc->network_hdr_type; 476 else 477 net_type = ib_get_net_type_by_grh(device, port_num, grh); 478 gid_type = ib_network_to_gid_type(net_type); 479 } 480 ret = get_gids_from_rdma_hdr((const union rdma_network_hdr *)grh, net_type, 481 &sgid, &dgid); 482 if (ret) 483 return ret; 484 485 if (rdma_protocol_roce(device, port_num)) { 486 struct ib_gid_attr dgid_attr; 487 const u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? 488 wc->vlan_id : 0xffff; 489 490 if (!(wc->wc_flags & IB_WC_GRH)) 491 return -EPROTOTYPE; 492 493 ret = get_sgid_index_from_eth(device, port_num, vlan_id, 494 &dgid, gid_type, &gid_index); 495 if (ret) 496 return ret; 497 498 ret = ib_get_cached_gid(device, port_num, gid_index, &dgid, &dgid_attr); 499 if (ret) 500 return ret; 501 502 if (dgid_attr.ndev == NULL) 503 return -ENODEV; 504 505 ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid, ah_attr->dmac, 506 dgid_attr.ndev, &hoplimit); 507 508 dev_put(dgid_attr.ndev); 509 if (ret) 510 return ret; 511 } 512 513 ah_attr->dlid = wc->slid; 514 ah_attr->sl = wc->sl; 515 ah_attr->src_path_bits = wc->dlid_path_bits; 516 ah_attr->port_num = port_num; 517 518 if (wc->wc_flags & IB_WC_GRH) { 519 ah_attr->ah_flags = IB_AH_GRH; 520 ah_attr->grh.dgid = sgid; 521 522 if (!rdma_cap_eth_ah(device, port_num)) { 523 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { 524 ret = ib_find_cached_gid_by_port(device, &dgid, 525 IB_GID_TYPE_IB, 526 port_num, NULL, 527 &gid_index); 528 if (ret) 529 return ret; 530 } else { 531 gid_index = 0; 532 } 533 } 534 535 ah_attr->grh.sgid_index = (u8) gid_index; 536 flow_class = be32_to_cpu(grh->version_tclass_flow); 537 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 538 ah_attr->grh.hop_limit = hoplimit; 539 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 540 } 541 return 0; 542 } 543 EXPORT_SYMBOL(ib_init_ah_from_wc); 544 545 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 546 const struct ib_grh *grh, u8 port_num) 547 { 548 struct ib_ah_attr ah_attr; 549 int ret; 550 551 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); 552 if (ret) 553 return ERR_PTR(ret); 554 555 return ib_create_ah(pd, &ah_attr); 556 } 557 EXPORT_SYMBOL(ib_create_ah_from_wc); 558 559 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 560 { 561 return ah->device->modify_ah ? 562 ah->device->modify_ah(ah, ah_attr) : 563 -ENOSYS; 564 } 565 EXPORT_SYMBOL(ib_modify_ah); 566 567 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 568 { 569 return ah->device->query_ah ? 570 ah->device->query_ah(ah, ah_attr) : 571 -ENOSYS; 572 } 573 EXPORT_SYMBOL(ib_query_ah); 574 575 int ib_destroy_ah(struct ib_ah *ah) 576 { 577 struct ib_pd *pd; 578 int ret; 579 580 pd = ah->pd; 581 ret = ah->device->destroy_ah(ah); 582 if (!ret) 583 atomic_dec(&pd->usecnt); 584 585 return ret; 586 } 587 EXPORT_SYMBOL(ib_destroy_ah); 588 589 /* Shared receive queues */ 590 591 struct ib_srq *ib_create_srq(struct ib_pd *pd, 592 struct ib_srq_init_attr *srq_init_attr) 593 { 594 struct ib_srq *srq; 595 596 if (!pd->device->create_srq) 597 return ERR_PTR(-ENOSYS); 598 599 srq = pd->device->create_srq(pd, srq_init_attr, NULL); 600 601 if (!IS_ERR(srq)) { 602 srq->device = pd->device; 603 srq->pd = pd; 604 srq->uobject = NULL; 605 srq->event_handler = srq_init_attr->event_handler; 606 srq->srq_context = srq_init_attr->srq_context; 607 srq->srq_type = srq_init_attr->srq_type; 608 if (srq->srq_type == IB_SRQT_XRC) { 609 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; 610 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; 611 atomic_inc(&srq->ext.xrc.xrcd->usecnt); 612 atomic_inc(&srq->ext.xrc.cq->usecnt); 613 } 614 atomic_inc(&pd->usecnt); 615 atomic_set(&srq->usecnt, 0); 616 } 617 618 return srq; 619 } 620 EXPORT_SYMBOL(ib_create_srq); 621 622 int ib_modify_srq(struct ib_srq *srq, 623 struct ib_srq_attr *srq_attr, 624 enum ib_srq_attr_mask srq_attr_mask) 625 { 626 return srq->device->modify_srq ? 627 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : 628 -ENOSYS; 629 } 630 EXPORT_SYMBOL(ib_modify_srq); 631 632 int ib_query_srq(struct ib_srq *srq, 633 struct ib_srq_attr *srq_attr) 634 { 635 return srq->device->query_srq ? 636 srq->device->query_srq(srq, srq_attr) : -ENOSYS; 637 } 638 EXPORT_SYMBOL(ib_query_srq); 639 640 int ib_destroy_srq(struct ib_srq *srq) 641 { 642 struct ib_pd *pd; 643 enum ib_srq_type srq_type; 644 struct ib_xrcd *uninitialized_var(xrcd); 645 struct ib_cq *uninitialized_var(cq); 646 int ret; 647 648 if (atomic_read(&srq->usecnt)) 649 return -EBUSY; 650 651 pd = srq->pd; 652 srq_type = srq->srq_type; 653 if (srq_type == IB_SRQT_XRC) { 654 xrcd = srq->ext.xrc.xrcd; 655 cq = srq->ext.xrc.cq; 656 } 657 658 ret = srq->device->destroy_srq(srq); 659 if (!ret) { 660 atomic_dec(&pd->usecnt); 661 if (srq_type == IB_SRQT_XRC) { 662 atomic_dec(&xrcd->usecnt); 663 atomic_dec(&cq->usecnt); 664 } 665 } 666 667 return ret; 668 } 669 EXPORT_SYMBOL(ib_destroy_srq); 670 671 /* Queue pairs */ 672 673 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) 674 { 675 struct ib_qp *qp = context; 676 unsigned long flags; 677 678 spin_lock_irqsave(&qp->device->event_handler_lock, flags); 679 list_for_each_entry(event->element.qp, &qp->open_list, open_list) 680 if (event->element.qp->event_handler) 681 event->element.qp->event_handler(event, event->element.qp->qp_context); 682 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); 683 } 684 685 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) 686 { 687 mutex_lock(&xrcd->tgt_qp_mutex); 688 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); 689 mutex_unlock(&xrcd->tgt_qp_mutex); 690 } 691 692 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, 693 void (*event_handler)(struct ib_event *, void *), 694 void *qp_context) 695 { 696 struct ib_qp *qp; 697 unsigned long flags; 698 699 qp = kzalloc(sizeof *qp, GFP_KERNEL); 700 if (!qp) 701 return ERR_PTR(-ENOMEM); 702 703 qp->real_qp = real_qp; 704 atomic_inc(&real_qp->usecnt); 705 qp->device = real_qp->device; 706 qp->event_handler = event_handler; 707 qp->qp_context = qp_context; 708 qp->qp_num = real_qp->qp_num; 709 qp->qp_type = real_qp->qp_type; 710 711 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 712 list_add(&qp->open_list, &real_qp->open_list); 713 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 714 715 return qp; 716 } 717 718 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 719 struct ib_qp_open_attr *qp_open_attr) 720 { 721 struct ib_qp *qp, *real_qp; 722 723 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) 724 return ERR_PTR(-EINVAL); 725 726 qp = ERR_PTR(-EINVAL); 727 mutex_lock(&xrcd->tgt_qp_mutex); 728 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { 729 if (real_qp->qp_num == qp_open_attr->qp_num) { 730 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, 731 qp_open_attr->qp_context); 732 break; 733 } 734 } 735 mutex_unlock(&xrcd->tgt_qp_mutex); 736 return qp; 737 } 738 EXPORT_SYMBOL(ib_open_qp); 739 740 static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, 741 struct ib_qp_init_attr *qp_init_attr) 742 { 743 struct ib_qp *real_qp = qp; 744 745 qp->event_handler = __ib_shared_qp_event_handler; 746 qp->qp_context = qp; 747 qp->pd = NULL; 748 qp->send_cq = qp->recv_cq = NULL; 749 qp->srq = NULL; 750 qp->xrcd = qp_init_attr->xrcd; 751 atomic_inc(&qp_init_attr->xrcd->usecnt); 752 INIT_LIST_HEAD(&qp->open_list); 753 754 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, 755 qp_init_attr->qp_context); 756 if (!IS_ERR(qp)) 757 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); 758 else 759 real_qp->device->destroy_qp(real_qp); 760 return qp; 761 } 762 763 struct ib_qp *ib_create_qp(struct ib_pd *pd, 764 struct ib_qp_init_attr *qp_init_attr) 765 { 766 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; 767 struct ib_qp *qp; 768 769 if (qp_init_attr->rwq_ind_tbl && 770 (qp_init_attr->recv_cq || 771 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || 772 qp_init_attr->cap.max_recv_sge)) 773 return ERR_PTR(-EINVAL); 774 775 qp = device->create_qp(pd, qp_init_attr, NULL); 776 if (IS_ERR(qp)) 777 return qp; 778 779 qp->device = device; 780 qp->real_qp = qp; 781 qp->uobject = NULL; 782 qp->qp_type = qp_init_attr->qp_type; 783 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; 784 785 atomic_set(&qp->usecnt, 0); 786 spin_lock_init(&qp->mr_lock); 787 788 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) 789 return ib_create_xrc_qp(qp, qp_init_attr); 790 791 qp->event_handler = qp_init_attr->event_handler; 792 qp->qp_context = qp_init_attr->qp_context; 793 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { 794 qp->recv_cq = NULL; 795 qp->srq = NULL; 796 } else { 797 qp->recv_cq = qp_init_attr->recv_cq; 798 if (qp_init_attr->recv_cq) 799 atomic_inc(&qp_init_attr->recv_cq->usecnt); 800 qp->srq = qp_init_attr->srq; 801 if (qp->srq) 802 atomic_inc(&qp_init_attr->srq->usecnt); 803 } 804 805 qp->pd = pd; 806 qp->send_cq = qp_init_attr->send_cq; 807 qp->xrcd = NULL; 808 809 atomic_inc(&pd->usecnt); 810 if (qp_init_attr->send_cq) 811 atomic_inc(&qp_init_attr->send_cq->usecnt); 812 if (qp_init_attr->rwq_ind_tbl) 813 atomic_inc(&qp->rwq_ind_tbl->usecnt); 814 815 /* 816 * Note: all hw drivers guarantee that max_send_sge is lower than 817 * the device RDMA WRITE SGE limit but not all hw drivers ensure that 818 * max_send_sge <= max_sge_rd. 819 */ 820 qp->max_write_sge = qp_init_attr->cap.max_send_sge; 821 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, 822 device->attrs.max_sge_rd); 823 824 return qp; 825 } 826 EXPORT_SYMBOL(ib_create_qp); 827 828 static const struct { 829 int valid; 830 enum ib_qp_attr_mask req_param[IB_QPT_MAX]; 831 enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; 832 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 833 [IB_QPS_RESET] = { 834 [IB_QPS_RESET] = { .valid = 1 }, 835 [IB_QPS_INIT] = { 836 .valid = 1, 837 .req_param = { 838 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 839 IB_QP_PORT | 840 IB_QP_QKEY), 841 [IB_QPT_RAW_PACKET] = IB_QP_PORT, 842 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 843 IB_QP_PORT | 844 IB_QP_ACCESS_FLAGS), 845 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 846 IB_QP_PORT | 847 IB_QP_ACCESS_FLAGS), 848 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 849 IB_QP_PORT | 850 IB_QP_ACCESS_FLAGS), 851 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 852 IB_QP_PORT | 853 IB_QP_ACCESS_FLAGS), 854 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 855 IB_QP_QKEY), 856 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 857 IB_QP_QKEY), 858 } 859 }, 860 }, 861 [IB_QPS_INIT] = { 862 [IB_QPS_RESET] = { .valid = 1 }, 863 [IB_QPS_ERR] = { .valid = 1 }, 864 [IB_QPS_INIT] = { 865 .valid = 1, 866 .opt_param = { 867 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 868 IB_QP_PORT | 869 IB_QP_QKEY), 870 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 871 IB_QP_PORT | 872 IB_QP_ACCESS_FLAGS), 873 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 874 IB_QP_PORT | 875 IB_QP_ACCESS_FLAGS), 876 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 877 IB_QP_PORT | 878 IB_QP_ACCESS_FLAGS), 879 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 880 IB_QP_PORT | 881 IB_QP_ACCESS_FLAGS), 882 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 883 IB_QP_QKEY), 884 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 885 IB_QP_QKEY), 886 } 887 }, 888 [IB_QPS_RTR] = { 889 .valid = 1, 890 .req_param = { 891 [IB_QPT_UC] = (IB_QP_AV | 892 IB_QP_PATH_MTU | 893 IB_QP_DEST_QPN | 894 IB_QP_RQ_PSN), 895 [IB_QPT_RC] = (IB_QP_AV | 896 IB_QP_PATH_MTU | 897 IB_QP_DEST_QPN | 898 IB_QP_RQ_PSN | 899 IB_QP_MAX_DEST_RD_ATOMIC | 900 IB_QP_MIN_RNR_TIMER), 901 [IB_QPT_XRC_INI] = (IB_QP_AV | 902 IB_QP_PATH_MTU | 903 IB_QP_DEST_QPN | 904 IB_QP_RQ_PSN), 905 [IB_QPT_XRC_TGT] = (IB_QP_AV | 906 IB_QP_PATH_MTU | 907 IB_QP_DEST_QPN | 908 IB_QP_RQ_PSN | 909 IB_QP_MAX_DEST_RD_ATOMIC | 910 IB_QP_MIN_RNR_TIMER), 911 }, 912 .opt_param = { 913 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 914 IB_QP_QKEY), 915 [IB_QPT_UC] = (IB_QP_ALT_PATH | 916 IB_QP_ACCESS_FLAGS | 917 IB_QP_PKEY_INDEX), 918 [IB_QPT_RC] = (IB_QP_ALT_PATH | 919 IB_QP_ACCESS_FLAGS | 920 IB_QP_PKEY_INDEX), 921 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | 922 IB_QP_ACCESS_FLAGS | 923 IB_QP_PKEY_INDEX), 924 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | 925 IB_QP_ACCESS_FLAGS | 926 IB_QP_PKEY_INDEX), 927 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 928 IB_QP_QKEY), 929 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 930 IB_QP_QKEY), 931 }, 932 }, 933 }, 934 [IB_QPS_RTR] = { 935 [IB_QPS_RESET] = { .valid = 1 }, 936 [IB_QPS_ERR] = { .valid = 1 }, 937 [IB_QPS_RTS] = { 938 .valid = 1, 939 .req_param = { 940 [IB_QPT_UD] = IB_QP_SQ_PSN, 941 [IB_QPT_UC] = IB_QP_SQ_PSN, 942 [IB_QPT_RC] = (IB_QP_TIMEOUT | 943 IB_QP_RETRY_CNT | 944 IB_QP_RNR_RETRY | 945 IB_QP_SQ_PSN | 946 IB_QP_MAX_QP_RD_ATOMIC), 947 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | 948 IB_QP_RETRY_CNT | 949 IB_QP_RNR_RETRY | 950 IB_QP_SQ_PSN | 951 IB_QP_MAX_QP_RD_ATOMIC), 952 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | 953 IB_QP_SQ_PSN), 954 [IB_QPT_SMI] = IB_QP_SQ_PSN, 955 [IB_QPT_GSI] = IB_QP_SQ_PSN, 956 }, 957 .opt_param = { 958 [IB_QPT_UD] = (IB_QP_CUR_STATE | 959 IB_QP_QKEY), 960 [IB_QPT_UC] = (IB_QP_CUR_STATE | 961 IB_QP_ALT_PATH | 962 IB_QP_ACCESS_FLAGS | 963 IB_QP_PATH_MIG_STATE), 964 [IB_QPT_RC] = (IB_QP_CUR_STATE | 965 IB_QP_ALT_PATH | 966 IB_QP_ACCESS_FLAGS | 967 IB_QP_MIN_RNR_TIMER | 968 IB_QP_PATH_MIG_STATE), 969 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 970 IB_QP_ALT_PATH | 971 IB_QP_ACCESS_FLAGS | 972 IB_QP_PATH_MIG_STATE), 973 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 974 IB_QP_ALT_PATH | 975 IB_QP_ACCESS_FLAGS | 976 IB_QP_MIN_RNR_TIMER | 977 IB_QP_PATH_MIG_STATE), 978 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 979 IB_QP_QKEY), 980 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 981 IB_QP_QKEY), 982 } 983 } 984 }, 985 [IB_QPS_RTS] = { 986 [IB_QPS_RESET] = { .valid = 1 }, 987 [IB_QPS_ERR] = { .valid = 1 }, 988 [IB_QPS_RTS] = { 989 .valid = 1, 990 .opt_param = { 991 [IB_QPT_UD] = (IB_QP_CUR_STATE | 992 IB_QP_QKEY), 993 [IB_QPT_UC] = (IB_QP_CUR_STATE | 994 IB_QP_ACCESS_FLAGS | 995 IB_QP_ALT_PATH | 996 IB_QP_PATH_MIG_STATE), 997 [IB_QPT_RC] = (IB_QP_CUR_STATE | 998 IB_QP_ACCESS_FLAGS | 999 IB_QP_ALT_PATH | 1000 IB_QP_PATH_MIG_STATE | 1001 IB_QP_MIN_RNR_TIMER), 1002 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1003 IB_QP_ACCESS_FLAGS | 1004 IB_QP_ALT_PATH | 1005 IB_QP_PATH_MIG_STATE), 1006 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1007 IB_QP_ACCESS_FLAGS | 1008 IB_QP_ALT_PATH | 1009 IB_QP_PATH_MIG_STATE | 1010 IB_QP_MIN_RNR_TIMER), 1011 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1012 IB_QP_QKEY), 1013 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1014 IB_QP_QKEY), 1015 } 1016 }, 1017 [IB_QPS_SQD] = { 1018 .valid = 1, 1019 .opt_param = { 1020 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1021 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1022 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1023 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1024 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ 1025 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1026 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY 1027 } 1028 }, 1029 }, 1030 [IB_QPS_SQD] = { 1031 [IB_QPS_RESET] = { .valid = 1 }, 1032 [IB_QPS_ERR] = { .valid = 1 }, 1033 [IB_QPS_RTS] = { 1034 .valid = 1, 1035 .opt_param = { 1036 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1037 IB_QP_QKEY), 1038 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1039 IB_QP_ALT_PATH | 1040 IB_QP_ACCESS_FLAGS | 1041 IB_QP_PATH_MIG_STATE), 1042 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1043 IB_QP_ALT_PATH | 1044 IB_QP_ACCESS_FLAGS | 1045 IB_QP_MIN_RNR_TIMER | 1046 IB_QP_PATH_MIG_STATE), 1047 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1048 IB_QP_ALT_PATH | 1049 IB_QP_ACCESS_FLAGS | 1050 IB_QP_PATH_MIG_STATE), 1051 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1052 IB_QP_ALT_PATH | 1053 IB_QP_ACCESS_FLAGS | 1054 IB_QP_MIN_RNR_TIMER | 1055 IB_QP_PATH_MIG_STATE), 1056 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1057 IB_QP_QKEY), 1058 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1059 IB_QP_QKEY), 1060 } 1061 }, 1062 [IB_QPS_SQD] = { 1063 .valid = 1, 1064 .opt_param = { 1065 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 1066 IB_QP_QKEY), 1067 [IB_QPT_UC] = (IB_QP_AV | 1068 IB_QP_ALT_PATH | 1069 IB_QP_ACCESS_FLAGS | 1070 IB_QP_PKEY_INDEX | 1071 IB_QP_PATH_MIG_STATE), 1072 [IB_QPT_RC] = (IB_QP_PORT | 1073 IB_QP_AV | 1074 IB_QP_TIMEOUT | 1075 IB_QP_RETRY_CNT | 1076 IB_QP_RNR_RETRY | 1077 IB_QP_MAX_QP_RD_ATOMIC | 1078 IB_QP_MAX_DEST_RD_ATOMIC | 1079 IB_QP_ALT_PATH | 1080 IB_QP_ACCESS_FLAGS | 1081 IB_QP_PKEY_INDEX | 1082 IB_QP_MIN_RNR_TIMER | 1083 IB_QP_PATH_MIG_STATE), 1084 [IB_QPT_XRC_INI] = (IB_QP_PORT | 1085 IB_QP_AV | 1086 IB_QP_TIMEOUT | 1087 IB_QP_RETRY_CNT | 1088 IB_QP_RNR_RETRY | 1089 IB_QP_MAX_QP_RD_ATOMIC | 1090 IB_QP_ALT_PATH | 1091 IB_QP_ACCESS_FLAGS | 1092 IB_QP_PKEY_INDEX | 1093 IB_QP_PATH_MIG_STATE), 1094 [IB_QPT_XRC_TGT] = (IB_QP_PORT | 1095 IB_QP_AV | 1096 IB_QP_TIMEOUT | 1097 IB_QP_MAX_DEST_RD_ATOMIC | 1098 IB_QP_ALT_PATH | 1099 IB_QP_ACCESS_FLAGS | 1100 IB_QP_PKEY_INDEX | 1101 IB_QP_MIN_RNR_TIMER | 1102 IB_QP_PATH_MIG_STATE), 1103 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 1104 IB_QP_QKEY), 1105 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 1106 IB_QP_QKEY), 1107 } 1108 } 1109 }, 1110 [IB_QPS_SQE] = { 1111 [IB_QPS_RESET] = { .valid = 1 }, 1112 [IB_QPS_ERR] = { .valid = 1 }, 1113 [IB_QPS_RTS] = { 1114 .valid = 1, 1115 .opt_param = { 1116 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1117 IB_QP_QKEY), 1118 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1119 IB_QP_ACCESS_FLAGS), 1120 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1121 IB_QP_QKEY), 1122 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1123 IB_QP_QKEY), 1124 } 1125 } 1126 }, 1127 [IB_QPS_ERR] = { 1128 [IB_QPS_RESET] = { .valid = 1 }, 1129 [IB_QPS_ERR] = { .valid = 1 } 1130 } 1131 }; 1132 1133 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1134 enum ib_qp_type type, enum ib_qp_attr_mask mask, 1135 enum rdma_link_layer ll) 1136 { 1137 enum ib_qp_attr_mask req_param, opt_param; 1138 1139 if (cur_state < 0 || cur_state > IB_QPS_ERR || 1140 next_state < 0 || next_state > IB_QPS_ERR) 1141 return 0; 1142 1143 if (mask & IB_QP_CUR_STATE && 1144 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && 1145 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) 1146 return 0; 1147 1148 if (!qp_state_table[cur_state][next_state].valid) 1149 return 0; 1150 1151 req_param = qp_state_table[cur_state][next_state].req_param[type]; 1152 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 1153 1154 if ((mask & req_param) != req_param) 1155 return 0; 1156 1157 if (mask & ~(req_param | opt_param | IB_QP_STATE)) 1158 return 0; 1159 1160 return 1; 1161 } 1162 EXPORT_SYMBOL(ib_modify_qp_is_ok); 1163 1164 int ib_resolve_eth_dmac(struct ib_device *device, 1165 struct ib_ah_attr *ah_attr) 1166 { 1167 int ret = 0; 1168 1169 if (ah_attr->port_num < rdma_start_port(device) || 1170 ah_attr->port_num > rdma_end_port(device)) 1171 return -EINVAL; 1172 1173 if (!rdma_cap_eth_ah(device, ah_attr->port_num)) 1174 return 0; 1175 1176 if (rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { 1177 rdma_get_ll_mac((struct in6_addr *)ah_attr->grh.dgid.raw, 1178 ah_attr->dmac); 1179 } else { 1180 union ib_gid sgid; 1181 struct ib_gid_attr sgid_attr; 1182 int hop_limit; 1183 1184 ret = ib_query_gid(device, 1185 ah_attr->port_num, 1186 ah_attr->grh.sgid_index, 1187 &sgid, &sgid_attr); 1188 1189 if (ret || !sgid_attr.ndev) { 1190 if (!ret) 1191 ret = -ENXIO; 1192 goto out; 1193 } 1194 1195 ret = rdma_addr_find_l2_eth_by_grh(&sgid, 1196 &ah_attr->grh.dgid, 1197 ah_attr->dmac, 1198 sgid_attr.ndev, &hop_limit); 1199 1200 dev_put(sgid_attr.ndev); 1201 1202 ah_attr->grh.hop_limit = hop_limit; 1203 } 1204 out: 1205 return ret; 1206 } 1207 EXPORT_SYMBOL(ib_resolve_eth_dmac); 1208 1209 1210 int ib_modify_qp(struct ib_qp *qp, 1211 struct ib_qp_attr *qp_attr, 1212 int qp_attr_mask) 1213 { 1214 if (qp_attr_mask & IB_QP_AV) { 1215 int ret; 1216 1217 ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr); 1218 if (ret) 1219 return ret; 1220 } 1221 1222 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 1223 } 1224 EXPORT_SYMBOL(ib_modify_qp); 1225 1226 int ib_query_qp(struct ib_qp *qp, 1227 struct ib_qp_attr *qp_attr, 1228 int qp_attr_mask, 1229 struct ib_qp_init_attr *qp_init_attr) 1230 { 1231 return qp->device->query_qp ? 1232 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : 1233 -ENOSYS; 1234 } 1235 EXPORT_SYMBOL(ib_query_qp); 1236 1237 int ib_close_qp(struct ib_qp *qp) 1238 { 1239 struct ib_qp *real_qp; 1240 unsigned long flags; 1241 1242 real_qp = qp->real_qp; 1243 if (real_qp == qp) 1244 return -EINVAL; 1245 1246 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 1247 list_del(&qp->open_list); 1248 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 1249 1250 atomic_dec(&real_qp->usecnt); 1251 kfree(qp); 1252 1253 return 0; 1254 } 1255 EXPORT_SYMBOL(ib_close_qp); 1256 1257 static int __ib_destroy_shared_qp(struct ib_qp *qp) 1258 { 1259 struct ib_xrcd *xrcd; 1260 struct ib_qp *real_qp; 1261 int ret; 1262 1263 real_qp = qp->real_qp; 1264 xrcd = real_qp->xrcd; 1265 1266 mutex_lock(&xrcd->tgt_qp_mutex); 1267 ib_close_qp(qp); 1268 if (atomic_read(&real_qp->usecnt) == 0) 1269 list_del(&real_qp->xrcd_list); 1270 else 1271 real_qp = NULL; 1272 mutex_unlock(&xrcd->tgt_qp_mutex); 1273 1274 if (real_qp) { 1275 ret = ib_destroy_qp(real_qp); 1276 if (!ret) 1277 atomic_dec(&xrcd->usecnt); 1278 else 1279 __ib_insert_xrcd_qp(xrcd, real_qp); 1280 } 1281 1282 return 0; 1283 } 1284 1285 int ib_destroy_qp(struct ib_qp *qp) 1286 { 1287 struct ib_pd *pd; 1288 struct ib_cq *scq, *rcq; 1289 struct ib_srq *srq; 1290 struct ib_rwq_ind_table *ind_tbl; 1291 int ret; 1292 1293 if (atomic_read(&qp->usecnt)) 1294 return -EBUSY; 1295 1296 if (qp->real_qp != qp) 1297 return __ib_destroy_shared_qp(qp); 1298 1299 pd = qp->pd; 1300 scq = qp->send_cq; 1301 rcq = qp->recv_cq; 1302 srq = qp->srq; 1303 ind_tbl = qp->rwq_ind_tbl; 1304 1305 ret = qp->device->destroy_qp(qp); 1306 if (!ret) { 1307 if (pd) 1308 atomic_dec(&pd->usecnt); 1309 if (scq) 1310 atomic_dec(&scq->usecnt); 1311 if (rcq) 1312 atomic_dec(&rcq->usecnt); 1313 if (srq) 1314 atomic_dec(&srq->usecnt); 1315 if (ind_tbl) 1316 atomic_dec(&ind_tbl->usecnt); 1317 } 1318 1319 return ret; 1320 } 1321 EXPORT_SYMBOL(ib_destroy_qp); 1322 1323 /* Completion queues */ 1324 1325 struct ib_cq *ib_create_cq(struct ib_device *device, 1326 ib_comp_handler comp_handler, 1327 void (*event_handler)(struct ib_event *, void *), 1328 void *cq_context, 1329 const struct ib_cq_init_attr *cq_attr) 1330 { 1331 struct ib_cq *cq; 1332 1333 cq = device->create_cq(device, cq_attr, NULL, NULL); 1334 1335 if (!IS_ERR(cq)) { 1336 cq->device = device; 1337 cq->uobject = NULL; 1338 cq->comp_handler = comp_handler; 1339 cq->event_handler = event_handler; 1340 cq->cq_context = cq_context; 1341 atomic_set(&cq->usecnt, 0); 1342 } 1343 1344 return cq; 1345 } 1346 EXPORT_SYMBOL(ib_create_cq); 1347 1348 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1349 { 1350 return cq->device->modify_cq ? 1351 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; 1352 } 1353 EXPORT_SYMBOL(ib_modify_cq); 1354 1355 int ib_destroy_cq(struct ib_cq *cq) 1356 { 1357 if (atomic_read(&cq->usecnt)) 1358 return -EBUSY; 1359 1360 return cq->device->destroy_cq(cq); 1361 } 1362 EXPORT_SYMBOL(ib_destroy_cq); 1363 1364 int ib_resize_cq(struct ib_cq *cq, int cqe) 1365 { 1366 return cq->device->resize_cq ? 1367 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; 1368 } 1369 EXPORT_SYMBOL(ib_resize_cq); 1370 1371 /* Memory regions */ 1372 1373 int ib_dereg_mr(struct ib_mr *mr) 1374 { 1375 struct ib_pd *pd = mr->pd; 1376 int ret; 1377 1378 ret = mr->device->dereg_mr(mr); 1379 if (!ret) 1380 atomic_dec(&pd->usecnt); 1381 1382 return ret; 1383 } 1384 EXPORT_SYMBOL(ib_dereg_mr); 1385 1386 /** 1387 * ib_alloc_mr() - Allocates a memory region 1388 * @pd: protection domain associated with the region 1389 * @mr_type: memory region type 1390 * @max_num_sg: maximum sg entries available for registration. 1391 * 1392 * Notes: 1393 * Memory registeration page/sg lists must not exceed max_num_sg. 1394 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed 1395 * max_num_sg * used_page_size. 1396 * 1397 */ 1398 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 1399 enum ib_mr_type mr_type, 1400 u32 max_num_sg) 1401 { 1402 struct ib_mr *mr; 1403 1404 if (!pd->device->alloc_mr) 1405 return ERR_PTR(-ENOSYS); 1406 1407 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); 1408 if (!IS_ERR(mr)) { 1409 mr->device = pd->device; 1410 mr->pd = pd; 1411 mr->uobject = NULL; 1412 atomic_inc(&pd->usecnt); 1413 mr->need_inval = false; 1414 } 1415 1416 return mr; 1417 } 1418 EXPORT_SYMBOL(ib_alloc_mr); 1419 1420 /* "Fast" memory regions */ 1421 1422 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1423 int mr_access_flags, 1424 struct ib_fmr_attr *fmr_attr) 1425 { 1426 struct ib_fmr *fmr; 1427 1428 if (!pd->device->alloc_fmr) 1429 return ERR_PTR(-ENOSYS); 1430 1431 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); 1432 if (!IS_ERR(fmr)) { 1433 fmr->device = pd->device; 1434 fmr->pd = pd; 1435 atomic_inc(&pd->usecnt); 1436 } 1437 1438 return fmr; 1439 } 1440 EXPORT_SYMBOL(ib_alloc_fmr); 1441 1442 int ib_unmap_fmr(struct list_head *fmr_list) 1443 { 1444 struct ib_fmr *fmr; 1445 1446 if (list_empty(fmr_list)) 1447 return 0; 1448 1449 fmr = list_entry(fmr_list->next, struct ib_fmr, list); 1450 return fmr->device->unmap_fmr(fmr_list); 1451 } 1452 EXPORT_SYMBOL(ib_unmap_fmr); 1453 1454 int ib_dealloc_fmr(struct ib_fmr *fmr) 1455 { 1456 struct ib_pd *pd; 1457 int ret; 1458 1459 pd = fmr->pd; 1460 ret = fmr->device->dealloc_fmr(fmr); 1461 if (!ret) 1462 atomic_dec(&pd->usecnt); 1463 1464 return ret; 1465 } 1466 EXPORT_SYMBOL(ib_dealloc_fmr); 1467 1468 /* Multicast groups */ 1469 1470 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1471 { 1472 int ret; 1473 1474 if (!qp->device->attach_mcast) 1475 return -ENOSYS; 1476 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1477 return -EINVAL; 1478 1479 ret = qp->device->attach_mcast(qp, gid, lid); 1480 if (!ret) 1481 atomic_inc(&qp->usecnt); 1482 return ret; 1483 } 1484 EXPORT_SYMBOL(ib_attach_mcast); 1485 1486 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1487 { 1488 int ret; 1489 1490 if (!qp->device->detach_mcast) 1491 return -ENOSYS; 1492 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1493 return -EINVAL; 1494 1495 ret = qp->device->detach_mcast(qp, gid, lid); 1496 if (!ret) 1497 atomic_dec(&qp->usecnt); 1498 return ret; 1499 } 1500 EXPORT_SYMBOL(ib_detach_mcast); 1501 1502 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) 1503 { 1504 struct ib_xrcd *xrcd; 1505 1506 if (!device->alloc_xrcd) 1507 return ERR_PTR(-ENOSYS); 1508 1509 xrcd = device->alloc_xrcd(device, NULL, NULL); 1510 if (!IS_ERR(xrcd)) { 1511 xrcd->device = device; 1512 xrcd->inode = NULL; 1513 atomic_set(&xrcd->usecnt, 0); 1514 mutex_init(&xrcd->tgt_qp_mutex); 1515 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 1516 } 1517 1518 return xrcd; 1519 } 1520 EXPORT_SYMBOL(ib_alloc_xrcd); 1521 1522 int ib_dealloc_xrcd(struct ib_xrcd *xrcd) 1523 { 1524 struct ib_qp *qp; 1525 int ret; 1526 1527 if (atomic_read(&xrcd->usecnt)) 1528 return -EBUSY; 1529 1530 while (!list_empty(&xrcd->tgt_qp_list)) { 1531 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); 1532 ret = ib_destroy_qp(qp); 1533 if (ret) 1534 return ret; 1535 } 1536 1537 return xrcd->device->dealloc_xrcd(xrcd); 1538 } 1539 EXPORT_SYMBOL(ib_dealloc_xrcd); 1540 1541 /** 1542 * ib_create_wq - Creates a WQ associated with the specified protection 1543 * domain. 1544 * @pd: The protection domain associated with the WQ. 1545 * @wq_init_attr: A list of initial attributes required to create the 1546 * WQ. If WQ creation succeeds, then the attributes are updated to 1547 * the actual capabilities of the created WQ. 1548 * 1549 * wq_init_attr->max_wr and wq_init_attr->max_sge determine 1550 * the requested size of the WQ, and set to the actual values allocated 1551 * on return. 1552 * If ib_create_wq() succeeds, then max_wr and max_sge will always be 1553 * at least as large as the requested values. 1554 */ 1555 struct ib_wq *ib_create_wq(struct ib_pd *pd, 1556 struct ib_wq_init_attr *wq_attr) 1557 { 1558 struct ib_wq *wq; 1559 1560 if (!pd->device->create_wq) 1561 return ERR_PTR(-ENOSYS); 1562 1563 wq = pd->device->create_wq(pd, wq_attr, NULL); 1564 if (!IS_ERR(wq)) { 1565 wq->event_handler = wq_attr->event_handler; 1566 wq->wq_context = wq_attr->wq_context; 1567 wq->wq_type = wq_attr->wq_type; 1568 wq->cq = wq_attr->cq; 1569 wq->device = pd->device; 1570 wq->pd = pd; 1571 wq->uobject = NULL; 1572 atomic_inc(&pd->usecnt); 1573 atomic_inc(&wq_attr->cq->usecnt); 1574 atomic_set(&wq->usecnt, 0); 1575 } 1576 return wq; 1577 } 1578 EXPORT_SYMBOL(ib_create_wq); 1579 1580 /** 1581 * ib_destroy_wq - Destroys the specified WQ. 1582 * @wq: The WQ to destroy. 1583 */ 1584 int ib_destroy_wq(struct ib_wq *wq) 1585 { 1586 int err; 1587 struct ib_cq *cq = wq->cq; 1588 struct ib_pd *pd = wq->pd; 1589 1590 if (atomic_read(&wq->usecnt)) 1591 return -EBUSY; 1592 1593 err = wq->device->destroy_wq(wq); 1594 if (!err) { 1595 atomic_dec(&pd->usecnt); 1596 atomic_dec(&cq->usecnt); 1597 } 1598 return err; 1599 } 1600 EXPORT_SYMBOL(ib_destroy_wq); 1601 1602 /** 1603 * ib_modify_wq - Modifies the specified WQ. 1604 * @wq: The WQ to modify. 1605 * @wq_attr: On input, specifies the WQ attributes to modify. 1606 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ 1607 * are being modified. 1608 * On output, the current values of selected WQ attributes are returned. 1609 */ 1610 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 1611 u32 wq_attr_mask) 1612 { 1613 int err; 1614 1615 if (!wq->device->modify_wq) 1616 return -ENOSYS; 1617 1618 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); 1619 return err; 1620 } 1621 EXPORT_SYMBOL(ib_modify_wq); 1622 1623 /* 1624 * ib_create_rwq_ind_table - Creates a RQ Indirection Table. 1625 * @device: The device on which to create the rwq indirection table. 1626 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to 1627 * create the Indirection Table. 1628 * 1629 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less 1630 * than the created ib_rwq_ind_table object and the caller is responsible 1631 * for its memory allocation/free. 1632 */ 1633 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 1634 struct ib_rwq_ind_table_init_attr *init_attr) 1635 { 1636 struct ib_rwq_ind_table *rwq_ind_table; 1637 int i; 1638 u32 table_size; 1639 1640 if (!device->create_rwq_ind_table) 1641 return ERR_PTR(-ENOSYS); 1642 1643 table_size = (1 << init_attr->log_ind_tbl_size); 1644 rwq_ind_table = device->create_rwq_ind_table(device, 1645 init_attr, NULL); 1646 if (IS_ERR(rwq_ind_table)) 1647 return rwq_ind_table; 1648 1649 rwq_ind_table->ind_tbl = init_attr->ind_tbl; 1650 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; 1651 rwq_ind_table->device = device; 1652 rwq_ind_table->uobject = NULL; 1653 atomic_set(&rwq_ind_table->usecnt, 0); 1654 1655 for (i = 0; i < table_size; i++) 1656 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); 1657 1658 return rwq_ind_table; 1659 } 1660 EXPORT_SYMBOL(ib_create_rwq_ind_table); 1661 1662 /* 1663 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. 1664 * @wq_ind_table: The Indirection Table to destroy. 1665 */ 1666 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) 1667 { 1668 int err, i; 1669 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); 1670 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; 1671 1672 if (atomic_read(&rwq_ind_table->usecnt)) 1673 return -EBUSY; 1674 1675 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); 1676 if (!err) { 1677 for (i = 0; i < table_size; i++) 1678 atomic_dec(&ind_tbl[i]->usecnt); 1679 } 1680 1681 return err; 1682 } 1683 EXPORT_SYMBOL(ib_destroy_rwq_ind_table); 1684 1685 struct ib_flow *ib_create_flow(struct ib_qp *qp, 1686 struct ib_flow_attr *flow_attr, 1687 int domain) 1688 { 1689 struct ib_flow *flow_id; 1690 if (!qp->device->create_flow) 1691 return ERR_PTR(-ENOSYS); 1692 1693 flow_id = qp->device->create_flow(qp, flow_attr, domain); 1694 if (!IS_ERR(flow_id)) 1695 atomic_inc(&qp->usecnt); 1696 return flow_id; 1697 } 1698 EXPORT_SYMBOL(ib_create_flow); 1699 1700 int ib_destroy_flow(struct ib_flow *flow_id) 1701 { 1702 int err; 1703 struct ib_qp *qp = flow_id->qp; 1704 1705 err = qp->device->destroy_flow(flow_id); 1706 if (!err) 1707 atomic_dec(&qp->usecnt); 1708 return err; 1709 } 1710 EXPORT_SYMBOL(ib_destroy_flow); 1711 1712 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 1713 struct ib_mr_status *mr_status) 1714 { 1715 return mr->device->check_mr_status ? 1716 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS; 1717 } 1718 EXPORT_SYMBOL(ib_check_mr_status); 1719 1720 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 1721 int state) 1722 { 1723 if (!device->set_vf_link_state) 1724 return -ENOSYS; 1725 1726 return device->set_vf_link_state(device, vf, port, state); 1727 } 1728 EXPORT_SYMBOL(ib_set_vf_link_state); 1729 1730 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 1731 struct ifla_vf_info *info) 1732 { 1733 if (!device->get_vf_config) 1734 return -ENOSYS; 1735 1736 return device->get_vf_config(device, vf, port, info); 1737 } 1738 EXPORT_SYMBOL(ib_get_vf_config); 1739 1740 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 1741 struct ifla_vf_stats *stats) 1742 { 1743 if (!device->get_vf_stats) 1744 return -ENOSYS; 1745 1746 return device->get_vf_stats(device, vf, port, stats); 1747 } 1748 EXPORT_SYMBOL(ib_get_vf_stats); 1749 1750 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 1751 int type) 1752 { 1753 if (!device->set_vf_guid) 1754 return -ENOSYS; 1755 1756 return device->set_vf_guid(device, vf, port, guid, type); 1757 } 1758 EXPORT_SYMBOL(ib_set_vf_guid); 1759 1760 /** 1761 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list 1762 * and set it the memory region. 1763 * @mr: memory region 1764 * @sg: dma mapped scatterlist 1765 * @sg_nents: number of entries in sg 1766 * @sg_offset: offset in bytes into sg 1767 * @page_size: page vector desired page size 1768 * 1769 * Constraints: 1770 * - The first sg element is allowed to have an offset. 1771 * - Each sg element must either be aligned to page_size or virtually 1772 * contiguous to the previous element. In case an sg element has a 1773 * non-contiguous offset, the mapping prefix will not include it. 1774 * - The last sg element is allowed to have length less than page_size. 1775 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size 1776 * then only max_num_sg entries will be mapped. 1777 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these 1778 * constraints holds and the page_size argument is ignored. 1779 * 1780 * Returns the number of sg elements that were mapped to the memory region. 1781 * 1782 * After this completes successfully, the memory region 1783 * is ready for registration. 1784 */ 1785 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 1786 unsigned int *sg_offset, unsigned int page_size) 1787 { 1788 if (unlikely(!mr->device->map_mr_sg)) 1789 return -ENOSYS; 1790 1791 mr->page_size = page_size; 1792 1793 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); 1794 } 1795 EXPORT_SYMBOL(ib_map_mr_sg); 1796 1797 /** 1798 * ib_sg_to_pages() - Convert the largest prefix of a sg list 1799 * to a page vector 1800 * @mr: memory region 1801 * @sgl: dma mapped scatterlist 1802 * @sg_nents: number of entries in sg 1803 * @sg_offset_p: IN: start offset in bytes into sg 1804 * OUT: offset in bytes for element n of the sg of the first 1805 * byte that has not been processed where n is the return 1806 * value of this function. 1807 * @set_page: driver page assignment function pointer 1808 * 1809 * Core service helper for drivers to convert the largest 1810 * prefix of given sg list to a page vector. The sg list 1811 * prefix converted is the prefix that meet the requirements 1812 * of ib_map_mr_sg. 1813 * 1814 * Returns the number of sg elements that were assigned to 1815 * a page vector. 1816 */ 1817 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 1818 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) 1819 { 1820 struct scatterlist *sg; 1821 u64 last_end_dma_addr = 0; 1822 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 1823 unsigned int last_page_off = 0; 1824 u64 page_mask = ~((u64)mr->page_size - 1); 1825 int i, ret; 1826 1827 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) 1828 return -EINVAL; 1829 1830 mr->iova = sg_dma_address(&sgl[0]) + sg_offset; 1831 mr->length = 0; 1832 1833 for_each_sg(sgl, sg, sg_nents, i) { 1834 u64 dma_addr = sg_dma_address(sg) + sg_offset; 1835 u64 prev_addr = dma_addr; 1836 unsigned int dma_len = sg_dma_len(sg) - sg_offset; 1837 u64 end_dma_addr = dma_addr + dma_len; 1838 u64 page_addr = dma_addr & page_mask; 1839 1840 /* 1841 * For the second and later elements, check whether either the 1842 * end of element i-1 or the start of element i is not aligned 1843 * on a page boundary. 1844 */ 1845 if (i && (last_page_off != 0 || page_addr != dma_addr)) { 1846 /* Stop mapping if there is a gap. */ 1847 if (last_end_dma_addr != dma_addr) 1848 break; 1849 1850 /* 1851 * Coalesce this element with the last. If it is small 1852 * enough just update mr->length. Otherwise start 1853 * mapping from the next page. 1854 */ 1855 goto next_page; 1856 } 1857 1858 do { 1859 ret = set_page(mr, page_addr); 1860 if (unlikely(ret < 0)) { 1861 sg_offset = prev_addr - sg_dma_address(sg); 1862 mr->length += prev_addr - dma_addr; 1863 if (sg_offset_p) 1864 *sg_offset_p = sg_offset; 1865 return i || sg_offset ? i : ret; 1866 } 1867 prev_addr = page_addr; 1868 next_page: 1869 page_addr += mr->page_size; 1870 } while (page_addr < end_dma_addr); 1871 1872 mr->length += dma_len; 1873 last_end_dma_addr = end_dma_addr; 1874 last_page_off = end_dma_addr & ~page_mask; 1875 1876 sg_offset = 0; 1877 } 1878 1879 if (sg_offset_p) 1880 *sg_offset_p = 0; 1881 return i; 1882 } 1883 EXPORT_SYMBOL(ib_sg_to_pages); 1884 1885 struct ib_drain_cqe { 1886 struct ib_cqe cqe; 1887 struct completion done; 1888 }; 1889 1890 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) 1891 { 1892 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, 1893 cqe); 1894 1895 complete(&cqe->done); 1896 } 1897 1898 /* 1899 * Post a WR and block until its completion is reaped for the SQ. 1900 */ 1901 static void __ib_drain_sq(struct ib_qp *qp) 1902 { 1903 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 1904 struct ib_drain_cqe sdrain; 1905 struct ib_send_wr swr = {}, *bad_swr; 1906 int ret; 1907 1908 if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) { 1909 WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT, 1910 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 1911 return; 1912 } 1913 1914 swr.wr_cqe = &sdrain.cqe; 1915 sdrain.cqe.done = ib_drain_qp_done; 1916 init_completion(&sdrain.done); 1917 1918 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 1919 if (ret) { 1920 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 1921 return; 1922 } 1923 1924 ret = ib_post_send(qp, &swr, &bad_swr); 1925 if (ret) { 1926 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 1927 return; 1928 } 1929 1930 wait_for_completion(&sdrain.done); 1931 } 1932 1933 /* 1934 * Post a WR and block until its completion is reaped for the RQ. 1935 */ 1936 static void __ib_drain_rq(struct ib_qp *qp) 1937 { 1938 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 1939 struct ib_drain_cqe rdrain; 1940 struct ib_recv_wr rwr = {}, *bad_rwr; 1941 int ret; 1942 1943 if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) { 1944 WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT, 1945 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 1946 return; 1947 } 1948 1949 rwr.wr_cqe = &rdrain.cqe; 1950 rdrain.cqe.done = ib_drain_qp_done; 1951 init_completion(&rdrain.done); 1952 1953 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 1954 if (ret) { 1955 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 1956 return; 1957 } 1958 1959 ret = ib_post_recv(qp, &rwr, &bad_rwr); 1960 if (ret) { 1961 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 1962 return; 1963 } 1964 1965 wait_for_completion(&rdrain.done); 1966 } 1967 1968 /** 1969 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the 1970 * application. 1971 * @qp: queue pair to drain 1972 * 1973 * If the device has a provider-specific drain function, then 1974 * call that. Otherwise call the generic drain function 1975 * __ib_drain_sq(). 1976 * 1977 * The caller must: 1978 * 1979 * ensure there is room in the CQ and SQ for the drain work request and 1980 * completion. 1981 * 1982 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 1983 * IB_POLL_DIRECT. 1984 * 1985 * ensure that there are no other contexts that are posting WRs concurrently. 1986 * Otherwise the drain is not guaranteed. 1987 */ 1988 void ib_drain_sq(struct ib_qp *qp) 1989 { 1990 if (qp->device->drain_sq) 1991 qp->device->drain_sq(qp); 1992 else 1993 __ib_drain_sq(qp); 1994 } 1995 EXPORT_SYMBOL(ib_drain_sq); 1996 1997 /** 1998 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the 1999 * application. 2000 * @qp: queue pair to drain 2001 * 2002 * If the device has a provider-specific drain function, then 2003 * call that. Otherwise call the generic drain function 2004 * __ib_drain_rq(). 2005 * 2006 * The caller must: 2007 * 2008 * ensure there is room in the CQ and RQ for the drain work request and 2009 * completion. 2010 * 2011 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 2012 * IB_POLL_DIRECT. 2013 * 2014 * ensure that there are no other contexts that are posting WRs concurrently. 2015 * Otherwise the drain is not guaranteed. 2016 */ 2017 void ib_drain_rq(struct ib_qp *qp) 2018 { 2019 if (qp->device->drain_rq) 2020 qp->device->drain_rq(qp); 2021 else 2022 __ib_drain_rq(qp); 2023 } 2024 EXPORT_SYMBOL(ib_drain_rq); 2025 2026 /** 2027 * ib_drain_qp() - Block until all CQEs have been consumed by the 2028 * application on both the RQ and SQ. 2029 * @qp: queue pair to drain 2030 * 2031 * The caller must: 2032 * 2033 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests 2034 * and completions. 2035 * 2036 * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be 2037 * IB_POLL_DIRECT. 2038 * 2039 * ensure that there are no other contexts that are posting WRs concurrently. 2040 * Otherwise the drain is not guaranteed. 2041 */ 2042 void ib_drain_qp(struct ib_qp *qp) 2043 { 2044 ib_drain_sq(qp); 2045 if (!qp->srq) 2046 ib_drain_rq(qp); 2047 } 2048 EXPORT_SYMBOL(ib_drain_qp); 2049