1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 5 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 6 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 7 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #define LINUXKPI_PARAM_PREFIX ibcore_ 42 43 #include <linux/completion.h> 44 #include <linux/in.h> 45 #include <linux/in6.h> 46 #include <linux/mutex.h> 47 #include <linux/random.h> 48 #include <linux/idr.h> 49 #include <linux/inetdevice.h> 50 #include <linux/slab.h> 51 #include <linux/module.h> 52 #include <net/route.h> 53 54 #include <net/tcp.h> 55 #include <net/ipv6.h> 56 57 #include <netinet6/scope6_var.h> 58 #include <netinet6/ip6_var.h> 59 60 #include <rdma/rdma_cm.h> 61 #include <rdma/rdma_cm_ib.h> 62 #include <rdma/ib.h> 63 #include <rdma/ib_addr.h> 64 #include <rdma/ib_cache.h> 65 #include <rdma/ib_cm.h> 66 #include <rdma/ib_sa.h> 67 #include <rdma/iw_cm.h> 68 69 #include <sys/priv.h> 70 71 #include "core_priv.h" 72 73 MODULE_AUTHOR("Sean Hefty"); 74 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 75 MODULE_LICENSE("Dual BSD/GPL"); 76 77 #define CMA_CM_RESPONSE_TIMEOUT 20 78 #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000 79 #define CMA_MAX_CM_RETRIES 15 80 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 81 #define CMA_IBOE_PACKET_LIFETIME 18 82 83 static const char * const cma_events[] = { 84 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", 85 [RDMA_CM_EVENT_ADDR_ERROR] = "address error", 86 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", 87 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", 88 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", 89 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", 90 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", 91 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", 92 [RDMA_CM_EVENT_REJECTED] = "rejected", 93 [RDMA_CM_EVENT_ESTABLISHED] = "established", 94 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", 95 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", 96 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", 97 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", 98 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", 99 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", 100 }; 101 102 const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) 103 { 104 size_t index = event; 105 106 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? 107 cma_events[index] : "unrecognized event"; 108 } 109 EXPORT_SYMBOL(rdma_event_msg); 110 111 static int cma_check_linklocal(struct rdma_dev_addr *, struct sockaddr *); 112 static void cma_add_one(struct ib_device *device); 113 static void cma_remove_one(struct ib_device *device, void *client_data); 114 115 static struct ib_client cma_client = { 116 .name = "cma", 117 .add = cma_add_one, 118 .remove = cma_remove_one 119 }; 120 121 static struct ib_sa_client sa_client; 122 static struct rdma_addr_client addr_client; 123 static LIST_HEAD(dev_list); 124 static LIST_HEAD(listen_any_list); 125 static DEFINE_MUTEX(lock); 126 static struct workqueue_struct *cma_wq; 127 128 struct cma_pernet { 129 struct idr tcp_ps; 130 struct idr udp_ps; 131 struct idr ipoib_ps; 132 struct idr ib_ps; 133 }; 134 135 VNET_DEFINE(struct cma_pernet, cma_pernet); 136 137 static struct cma_pernet *cma_pernet_ptr(struct vnet *vnet) 138 { 139 struct cma_pernet *retval; 140 141 CURVNET_SET_QUIET(vnet); 142 retval = &VNET(cma_pernet); 143 CURVNET_RESTORE(); 144 145 return (retval); 146 } 147 148 static struct idr *cma_pernet_idr(struct vnet *net, enum rdma_port_space ps) 149 { 150 struct cma_pernet *pernet = cma_pernet_ptr(net); 151 152 switch (ps) { 153 case RDMA_PS_TCP: 154 return &pernet->tcp_ps; 155 case RDMA_PS_UDP: 156 return &pernet->udp_ps; 157 case RDMA_PS_IPOIB: 158 return &pernet->ipoib_ps; 159 case RDMA_PS_IB: 160 return &pernet->ib_ps; 161 default: 162 return NULL; 163 } 164 } 165 166 struct cma_device { 167 struct list_head list; 168 struct ib_device *device; 169 struct completion comp; 170 atomic_t refcount; 171 struct list_head id_list; 172 struct sysctl_ctx_list sysctl_ctx; 173 enum ib_gid_type *default_gid_type; 174 }; 175 176 struct rdma_bind_list { 177 enum rdma_port_space ps; 178 struct hlist_head owners; 179 unsigned short port; 180 }; 181 182 struct class_port_info_context { 183 struct ib_class_port_info *class_port_info; 184 struct ib_device *device; 185 struct completion done; 186 struct ib_sa_query *sa_query; 187 u8 port_num; 188 }; 189 190 static int cma_ps_alloc(struct vnet *vnet, enum rdma_port_space ps, 191 struct rdma_bind_list *bind_list, int snum) 192 { 193 struct idr *idr = cma_pernet_idr(vnet, ps); 194 195 return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL); 196 } 197 198 static struct rdma_bind_list *cma_ps_find(struct vnet *net, 199 enum rdma_port_space ps, int snum) 200 { 201 struct idr *idr = cma_pernet_idr(net, ps); 202 203 return idr_find(idr, snum); 204 } 205 206 static void cma_ps_remove(struct vnet *net, enum rdma_port_space ps, int snum) 207 { 208 struct idr *idr = cma_pernet_idr(net, ps); 209 210 idr_remove(idr, snum); 211 } 212 213 enum { 214 CMA_OPTION_AFONLY, 215 }; 216 217 void cma_ref_dev(struct cma_device *cma_dev) 218 { 219 atomic_inc(&cma_dev->refcount); 220 } 221 222 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 223 void *cookie) 224 { 225 struct cma_device *cma_dev; 226 struct cma_device *found_cma_dev = NULL; 227 228 mutex_lock(&lock); 229 230 list_for_each_entry(cma_dev, &dev_list, list) 231 if (filter(cma_dev->device, cookie)) { 232 found_cma_dev = cma_dev; 233 break; 234 } 235 236 if (found_cma_dev) 237 cma_ref_dev(found_cma_dev); 238 mutex_unlock(&lock); 239 return found_cma_dev; 240 } 241 242 int cma_get_default_gid_type(struct cma_device *cma_dev, 243 unsigned int port) 244 { 245 if (port < rdma_start_port(cma_dev->device) || 246 port > rdma_end_port(cma_dev->device)) 247 return -EINVAL; 248 249 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; 250 } 251 252 int cma_set_default_gid_type(struct cma_device *cma_dev, 253 unsigned int port, 254 enum ib_gid_type default_gid_type) 255 { 256 unsigned long supported_gids; 257 258 if (port < rdma_start_port(cma_dev->device) || 259 port > rdma_end_port(cma_dev->device)) 260 return -EINVAL; 261 262 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); 263 264 if (!(supported_gids & 1 << default_gid_type)) 265 return -EINVAL; 266 267 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = 268 default_gid_type; 269 270 return 0; 271 } 272 273 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) 274 { 275 return cma_dev->device; 276 } 277 278 /* 279 * Device removal can occur at anytime, so we need extra handling to 280 * serialize notifying the user of device removal with other callbacks. 281 * We do this by disabling removal notification while a callback is in process, 282 * and reporting it after the callback completes. 283 */ 284 struct rdma_id_private { 285 struct rdma_cm_id id; 286 287 struct rdma_bind_list *bind_list; 288 struct hlist_node node; 289 struct list_head list; /* listen_any_list or cma_device.list */ 290 struct list_head listen_list; /* per device listens */ 291 struct cma_device *cma_dev; 292 struct list_head mc_list; 293 294 int internal_id; 295 enum rdma_cm_state state; 296 spinlock_t lock; 297 struct mutex qp_mutex; 298 299 struct completion comp; 300 atomic_t refcount; 301 struct mutex handler_mutex; 302 303 int backlog; 304 int timeout_ms; 305 struct ib_sa_query *query; 306 int query_id; 307 union { 308 struct ib_cm_id *ib; 309 struct iw_cm_id *iw; 310 } cm_id; 311 312 u32 seq_num; 313 u32 qkey; 314 u32 qp_num; 315 pid_t owner; 316 u32 options; 317 u8 srq; 318 u8 tos; 319 u8 reuseaddr; 320 u8 afonly; 321 enum ib_gid_type gid_type; 322 }; 323 324 struct cma_multicast { 325 struct rdma_id_private *id_priv; 326 union { 327 struct ib_sa_multicast *ib; 328 } multicast; 329 struct list_head list; 330 void *context; 331 struct sockaddr_storage addr; 332 struct kref mcref; 333 bool igmp_joined; 334 u8 join_state; 335 }; 336 337 struct cma_work { 338 struct work_struct work; 339 struct rdma_id_private *id; 340 enum rdma_cm_state old_state; 341 enum rdma_cm_state new_state; 342 struct rdma_cm_event event; 343 }; 344 345 struct cma_ndev_work { 346 struct work_struct work; 347 struct rdma_id_private *id; 348 struct rdma_cm_event event; 349 }; 350 351 struct iboe_mcast_work { 352 struct work_struct work; 353 struct rdma_id_private *id; 354 struct cma_multicast *mc; 355 }; 356 357 union cma_ip_addr { 358 struct in6_addr ip6; 359 struct { 360 __be32 pad[3]; 361 __be32 addr; 362 } ip4; 363 }; 364 365 struct cma_hdr { 366 u8 cma_version; 367 u8 ip_version; /* IP version: 7:4 */ 368 __be16 port; 369 union cma_ip_addr src_addr; 370 union cma_ip_addr dst_addr; 371 }; 372 373 #define CMA_VERSION 0x00 374 375 struct cma_req_info { 376 struct ib_device *device; 377 int port; 378 union ib_gid local_gid; 379 __be64 service_id; 380 u16 pkey; 381 bool has_gid:1; 382 }; 383 384 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) 385 { 386 unsigned long flags; 387 int ret; 388 389 spin_lock_irqsave(&id_priv->lock, flags); 390 ret = (id_priv->state == comp); 391 spin_unlock_irqrestore(&id_priv->lock, flags); 392 return ret; 393 } 394 395 static int cma_comp_exch(struct rdma_id_private *id_priv, 396 enum rdma_cm_state comp, enum rdma_cm_state exch) 397 { 398 unsigned long flags; 399 int ret; 400 401 spin_lock_irqsave(&id_priv->lock, flags); 402 if ((ret = (id_priv->state == comp))) 403 id_priv->state = exch; 404 spin_unlock_irqrestore(&id_priv->lock, flags); 405 return ret; 406 } 407 408 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, 409 enum rdma_cm_state exch) 410 { 411 unsigned long flags; 412 enum rdma_cm_state old; 413 414 spin_lock_irqsave(&id_priv->lock, flags); 415 old = id_priv->state; 416 id_priv->state = exch; 417 spin_unlock_irqrestore(&id_priv->lock, flags); 418 return old; 419 } 420 421 static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) 422 { 423 return hdr->ip_version >> 4; 424 } 425 426 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 427 { 428 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 429 } 430 431 static int cma_igmp_send(struct net_device *ndev, const union ib_gid *mgid, bool join) 432 { 433 int retval; 434 435 if (ndev) { 436 union { 437 struct sockaddr sock; 438 struct sockaddr_storage storage; 439 } addr; 440 441 rdma_gid2ip(&addr.sock, mgid); 442 443 CURVNET_SET_QUIET(ndev->if_vnet); 444 if (join) 445 retval = -if_addmulti(ndev, &addr.sock, NULL); 446 else 447 retval = -if_delmulti(ndev, &addr.sock); 448 CURVNET_RESTORE(); 449 } else { 450 retval = -ENODEV; 451 } 452 return retval; 453 } 454 455 static void _cma_attach_to_dev(struct rdma_id_private *id_priv, 456 struct cma_device *cma_dev) 457 { 458 cma_ref_dev(cma_dev); 459 id_priv->cma_dev = cma_dev; 460 id_priv->gid_type = 0; 461 id_priv->id.device = cma_dev->device; 462 id_priv->id.route.addr.dev_addr.transport = 463 rdma_node_get_transport(cma_dev->device->node_type); 464 list_add_tail(&id_priv->list, &cma_dev->id_list); 465 } 466 467 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 468 struct cma_device *cma_dev) 469 { 470 _cma_attach_to_dev(id_priv, cma_dev); 471 id_priv->gid_type = 472 cma_dev->default_gid_type[id_priv->id.port_num - 473 rdma_start_port(cma_dev->device)]; 474 } 475 476 void cma_deref_dev(struct cma_device *cma_dev) 477 { 478 if (atomic_dec_and_test(&cma_dev->refcount)) 479 complete(&cma_dev->comp); 480 } 481 482 static inline void release_mc(struct kref *kref) 483 { 484 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); 485 486 kfree(mc->multicast.ib); 487 kfree(mc); 488 } 489 490 static void cma_release_dev(struct rdma_id_private *id_priv) 491 { 492 mutex_lock(&lock); 493 list_del(&id_priv->list); 494 cma_deref_dev(id_priv->cma_dev); 495 id_priv->cma_dev = NULL; 496 mutex_unlock(&lock); 497 } 498 499 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) 500 { 501 return (struct sockaddr *) &id_priv->id.route.addr.src_addr; 502 } 503 504 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) 505 { 506 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 507 } 508 509 static inline unsigned short cma_family(struct rdma_id_private *id_priv) 510 { 511 return id_priv->id.route.addr.src_addr.ss_family; 512 } 513 514 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) 515 { 516 struct ib_sa_mcmember_rec rec; 517 int ret = 0; 518 519 if (id_priv->qkey) { 520 if (qkey && id_priv->qkey != qkey) 521 return -EINVAL; 522 return 0; 523 } 524 525 if (qkey) { 526 id_priv->qkey = qkey; 527 return 0; 528 } 529 530 switch (id_priv->id.ps) { 531 case RDMA_PS_UDP: 532 case RDMA_PS_IB: 533 id_priv->qkey = RDMA_UDP_QKEY; 534 break; 535 case RDMA_PS_IPOIB: 536 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 537 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 538 id_priv->id.port_num, &rec.mgid, 539 &rec); 540 if (!ret) 541 id_priv->qkey = be32_to_cpu(rec.qkey); 542 break; 543 default: 544 break; 545 } 546 return ret; 547 } 548 549 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) 550 { 551 dev_addr->dev_type = ARPHRD_INFINIBAND; 552 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); 553 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); 554 } 555 556 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 557 { 558 int ret; 559 560 if (addr->sa_family != AF_IB) { 561 ret = rdma_translate_ip(addr, dev_addr); 562 } else { 563 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 564 ret = 0; 565 } 566 567 return ret; 568 } 569 570 static inline int cma_validate_port(struct ib_device *device, u8 port, 571 enum ib_gid_type gid_type, 572 union ib_gid *gid, 573 const struct rdma_dev_addr *dev_addr) 574 { 575 const int dev_type = dev_addr->dev_type; 576 struct net_device *ndev; 577 int ret = -ENODEV; 578 579 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) 580 return ret; 581 582 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 583 return ret; 584 585 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 586 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 587 } else { 588 ndev = NULL; 589 gid_type = IB_GID_TYPE_IB; 590 } 591 592 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, 593 ndev, NULL); 594 595 if (ndev) 596 dev_put(ndev); 597 598 return ret; 599 } 600 601 static int cma_acquire_dev(struct rdma_id_private *id_priv, 602 struct rdma_id_private *listen_id_priv) 603 { 604 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 605 struct cma_device *cma_dev; 606 union ib_gid gid, iboe_gid, *gidp; 607 int ret = -ENODEV; 608 u8 port; 609 610 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 611 id_priv->id.ps == RDMA_PS_IPOIB) 612 return -EINVAL; 613 614 mutex_lock(&lock); 615 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 616 &iboe_gid); 617 618 memcpy(&gid, dev_addr->src_dev_addr + 619 rdma_addr_gid_offset(dev_addr), sizeof gid); 620 621 if (listen_id_priv) { 622 cma_dev = listen_id_priv->cma_dev; 623 port = listen_id_priv->id.port_num; 624 625 if (rdma_is_port_valid(cma_dev->device, port)) { 626 gidp = rdma_protocol_roce(cma_dev->device, port) ? 627 &iboe_gid : &gid; 628 629 ret = cma_validate_port(cma_dev->device, port, 630 rdma_protocol_ib(cma_dev->device, port) ? 631 IB_GID_TYPE_IB : 632 listen_id_priv->gid_type, gidp, dev_addr); 633 if (!ret) { 634 id_priv->id.port_num = port; 635 goto out; 636 } 637 } 638 } 639 640 list_for_each_entry(cma_dev, &dev_list, list) { 641 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { 642 if (listen_id_priv && 643 listen_id_priv->cma_dev == cma_dev && 644 listen_id_priv->id.port_num == port) 645 continue; 646 647 gidp = rdma_protocol_roce(cma_dev->device, port) ? 648 &iboe_gid : &gid; 649 650 ret = cma_validate_port(cma_dev->device, port, 651 rdma_protocol_ib(cma_dev->device, port) ? 652 IB_GID_TYPE_IB : 653 cma_dev->default_gid_type[port - 1], 654 gidp, dev_addr); 655 if (!ret) { 656 id_priv->id.port_num = port; 657 goto out; 658 } 659 } 660 } 661 662 out: 663 if (!ret) 664 cma_attach_to_dev(id_priv, cma_dev); 665 666 mutex_unlock(&lock); 667 return ret; 668 } 669 670 /* 671 * Select the source IB device and address to reach the destination IB address. 672 */ 673 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) 674 { 675 struct cma_device *cma_dev, *cur_dev; 676 struct sockaddr_ib *addr; 677 union ib_gid gid, sgid, *dgid; 678 u16 pkey, index; 679 u8 p; 680 int i; 681 682 cma_dev = NULL; 683 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); 684 dgid = (union ib_gid *) &addr->sib_addr; 685 pkey = ntohs(addr->sib_pkey); 686 687 list_for_each_entry(cur_dev, &dev_list, list) { 688 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 689 if (!rdma_cap_af_ib(cur_dev->device, p)) 690 continue; 691 692 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) 693 continue; 694 695 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, 696 &gid, NULL); 697 i++) { 698 if (!memcmp(&gid, dgid, sizeof(gid))) { 699 cma_dev = cur_dev; 700 sgid = gid; 701 id_priv->id.port_num = p; 702 goto found; 703 } 704 705 if (!cma_dev && (gid.global.subnet_prefix == 706 dgid->global.subnet_prefix)) { 707 cma_dev = cur_dev; 708 sgid = gid; 709 id_priv->id.port_num = p; 710 } 711 } 712 } 713 } 714 715 if (!cma_dev) 716 return -ENODEV; 717 718 found: 719 cma_attach_to_dev(id_priv, cma_dev); 720 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 721 memcpy(&addr->sib_addr, &sgid, sizeof sgid); 722 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 723 return 0; 724 } 725 726 static void cma_deref_id(struct rdma_id_private *id_priv) 727 { 728 if (atomic_dec_and_test(&id_priv->refcount)) 729 complete(&id_priv->comp); 730 } 731 732 struct rdma_cm_id *rdma_create_id(struct vnet *net, 733 rdma_cm_event_handler event_handler, 734 void *context, enum rdma_port_space ps, 735 enum ib_qp_type qp_type) 736 { 737 struct rdma_id_private *id_priv; 738 739 #ifdef VIMAGE 740 if (net == NULL) 741 return ERR_PTR(-EINVAL); 742 #endif 743 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 744 if (!id_priv) 745 return ERR_PTR(-ENOMEM); 746 747 id_priv->owner = task_pid_nr(current); 748 id_priv->state = RDMA_CM_IDLE; 749 id_priv->id.context = context; 750 id_priv->id.event_handler = event_handler; 751 id_priv->id.ps = ps; 752 id_priv->id.qp_type = qp_type; 753 spin_lock_init(&id_priv->lock); 754 mutex_init(&id_priv->qp_mutex); 755 init_completion(&id_priv->comp); 756 atomic_set(&id_priv->refcount, 1); 757 mutex_init(&id_priv->handler_mutex); 758 INIT_LIST_HEAD(&id_priv->listen_list); 759 INIT_LIST_HEAD(&id_priv->mc_list); 760 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 761 id_priv->id.route.addr.dev_addr.net = net; 762 763 return &id_priv->id; 764 } 765 EXPORT_SYMBOL(rdma_create_id); 766 767 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 768 { 769 struct ib_qp_attr qp_attr; 770 int qp_attr_mask, ret; 771 772 qp_attr.qp_state = IB_QPS_INIT; 773 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 774 if (ret) 775 return ret; 776 777 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 778 if (ret) 779 return ret; 780 781 qp_attr.qp_state = IB_QPS_RTR; 782 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 783 if (ret) 784 return ret; 785 786 qp_attr.qp_state = IB_QPS_RTS; 787 qp_attr.sq_psn = 0; 788 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 789 790 return ret; 791 } 792 793 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 794 { 795 struct ib_qp_attr qp_attr; 796 int qp_attr_mask, ret; 797 798 qp_attr.qp_state = IB_QPS_INIT; 799 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 800 if (ret) 801 return ret; 802 803 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 804 } 805 806 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 807 struct ib_qp_init_attr *qp_init_attr) 808 { 809 struct rdma_id_private *id_priv; 810 struct ib_qp *qp; 811 int ret; 812 813 id_priv = container_of(id, struct rdma_id_private, id); 814 if (id->device != pd->device) 815 return -EINVAL; 816 817 qp_init_attr->port_num = id->port_num; 818 qp = ib_create_qp(pd, qp_init_attr); 819 if (IS_ERR(qp)) 820 return PTR_ERR(qp); 821 822 if (id->qp_type == IB_QPT_UD) 823 ret = cma_init_ud_qp(id_priv, qp); 824 else 825 ret = cma_init_conn_qp(id_priv, qp); 826 if (ret) 827 goto err; 828 829 id->qp = qp; 830 id_priv->qp_num = qp->qp_num; 831 id_priv->srq = (qp->srq != NULL); 832 return 0; 833 err: 834 ib_destroy_qp(qp); 835 return ret; 836 } 837 EXPORT_SYMBOL(rdma_create_qp); 838 839 void rdma_destroy_qp(struct rdma_cm_id *id) 840 { 841 struct rdma_id_private *id_priv; 842 843 id_priv = container_of(id, struct rdma_id_private, id); 844 mutex_lock(&id_priv->qp_mutex); 845 ib_destroy_qp(id_priv->id.qp); 846 id_priv->id.qp = NULL; 847 mutex_unlock(&id_priv->qp_mutex); 848 } 849 EXPORT_SYMBOL(rdma_destroy_qp); 850 851 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 852 struct rdma_conn_param *conn_param) 853 { 854 struct ib_qp_attr qp_attr; 855 int qp_attr_mask, ret; 856 union ib_gid sgid; 857 858 mutex_lock(&id_priv->qp_mutex); 859 if (!id_priv->id.qp) { 860 ret = 0; 861 goto out; 862 } 863 864 /* Need to update QP attributes from default values. */ 865 qp_attr.qp_state = IB_QPS_INIT; 866 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 867 if (ret) 868 goto out; 869 870 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 871 if (ret) 872 goto out; 873 874 qp_attr.qp_state = IB_QPS_RTR; 875 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 876 if (ret) 877 goto out; 878 879 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, 880 qp_attr.ah_attr.grh.sgid_index, &sgid, NULL); 881 if (ret) 882 goto out; 883 884 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); 885 886 if (conn_param) 887 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 888 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 889 out: 890 mutex_unlock(&id_priv->qp_mutex); 891 return ret; 892 } 893 894 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 895 struct rdma_conn_param *conn_param) 896 { 897 struct ib_qp_attr qp_attr; 898 int qp_attr_mask, ret; 899 900 mutex_lock(&id_priv->qp_mutex); 901 if (!id_priv->id.qp) { 902 ret = 0; 903 goto out; 904 } 905 906 qp_attr.qp_state = IB_QPS_RTS; 907 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 908 if (ret) 909 goto out; 910 911 if (conn_param) 912 qp_attr.max_rd_atomic = conn_param->initiator_depth; 913 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 914 out: 915 mutex_unlock(&id_priv->qp_mutex); 916 return ret; 917 } 918 919 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 920 { 921 struct ib_qp_attr qp_attr; 922 int ret; 923 924 mutex_lock(&id_priv->qp_mutex); 925 if (!id_priv->id.qp) { 926 ret = 0; 927 goto out; 928 } 929 930 qp_attr.qp_state = IB_QPS_ERR; 931 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 932 out: 933 mutex_unlock(&id_priv->qp_mutex); 934 return ret; 935 } 936 937 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 938 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 939 { 940 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 941 int ret; 942 u16 pkey; 943 944 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) 945 pkey = 0xffff; 946 else 947 pkey = ib_addr_get_pkey(dev_addr); 948 949 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 950 pkey, &qp_attr->pkey_index); 951 if (ret) 952 return ret; 953 954 qp_attr->port_num = id_priv->id.port_num; 955 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 956 957 if (id_priv->id.qp_type == IB_QPT_UD) { 958 ret = cma_set_qkey(id_priv, 0); 959 if (ret) 960 return ret; 961 962 qp_attr->qkey = id_priv->qkey; 963 *qp_attr_mask |= IB_QP_QKEY; 964 } else { 965 qp_attr->qp_access_flags = 0; 966 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 967 } 968 return 0; 969 } 970 971 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 972 int *qp_attr_mask) 973 { 974 struct rdma_id_private *id_priv; 975 int ret = 0; 976 977 id_priv = container_of(id, struct rdma_id_private, id); 978 if (rdma_cap_ib_cm(id->device, id->port_num)) { 979 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) 980 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 981 else 982 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 983 qp_attr_mask); 984 985 if (qp_attr->qp_state == IB_QPS_RTR) 986 qp_attr->rq_psn = id_priv->seq_num; 987 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 988 if (!id_priv->cm_id.iw) { 989 qp_attr->qp_access_flags = 0; 990 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 991 } else 992 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 993 qp_attr_mask); 994 qp_attr->port_num = id_priv->id.port_num; 995 *qp_attr_mask |= IB_QP_PORT; 996 } else 997 ret = -ENOSYS; 998 999 return ret; 1000 } 1001 EXPORT_SYMBOL(rdma_init_qp_attr); 1002 1003 static inline int cma_zero_addr(struct sockaddr *addr) 1004 { 1005 switch (addr->sa_family) { 1006 case AF_INET: 1007 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); 1008 case AF_INET6: 1009 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr); 1010 case AF_IB: 1011 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr); 1012 default: 1013 return 0; 1014 } 1015 } 1016 1017 static inline int cma_loopback_addr(struct sockaddr *addr) 1018 { 1019 switch (addr->sa_family) { 1020 case AF_INET: 1021 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); 1022 case AF_INET6: 1023 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr); 1024 case AF_IB: 1025 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr); 1026 default: 1027 return 0; 1028 } 1029 } 1030 1031 static inline int cma_any_addr(struct sockaddr *addr) 1032 { 1033 return cma_zero_addr(addr) || cma_loopback_addr(addr); 1034 } 1035 1036 static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) 1037 { 1038 if (src->sa_family != dst->sa_family) 1039 return -1; 1040 1041 switch (src->sa_family) { 1042 case AF_INET: 1043 return ((struct sockaddr_in *) src)->sin_addr.s_addr != 1044 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 1045 case AF_INET6: 1046 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, 1047 &((struct sockaddr_in6 *) dst)->sin6_addr); 1048 default: 1049 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, 1050 &((struct sockaddr_ib *) dst)->sib_addr); 1051 } 1052 } 1053 1054 static __be16 cma_port(struct sockaddr *addr) 1055 { 1056 struct sockaddr_ib *sib; 1057 1058 switch (addr->sa_family) { 1059 case AF_INET: 1060 return ((struct sockaddr_in *) addr)->sin_port; 1061 case AF_INET6: 1062 return ((struct sockaddr_in6 *) addr)->sin6_port; 1063 case AF_IB: 1064 sib = (struct sockaddr_ib *) addr; 1065 return htons((u16) (be64_to_cpu(sib->sib_sid) & 1066 be64_to_cpu(sib->sib_sid_mask))); 1067 default: 1068 return 0; 1069 } 1070 } 1071 1072 static inline int cma_any_port(struct sockaddr *addr) 1073 { 1074 return !cma_port(addr); 1075 } 1076 1077 static void cma_save_ib_info(struct sockaddr *src_addr, 1078 struct sockaddr *dst_addr, 1079 struct rdma_cm_id *listen_id, 1080 struct ib_sa_path_rec *path) 1081 { 1082 struct sockaddr_ib *listen_ib, *ib; 1083 1084 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 1085 if (src_addr) { 1086 ib = (struct sockaddr_ib *)src_addr; 1087 ib->sib_family = AF_IB; 1088 if (path) { 1089 ib->sib_pkey = path->pkey; 1090 ib->sib_flowinfo = path->flow_label; 1091 memcpy(&ib->sib_addr, &path->sgid, 16); 1092 ib->sib_sid = path->service_id; 1093 ib->sib_scope_id = 0; 1094 } else { 1095 ib->sib_pkey = listen_ib->sib_pkey; 1096 ib->sib_flowinfo = listen_ib->sib_flowinfo; 1097 ib->sib_addr = listen_ib->sib_addr; 1098 ib->sib_sid = listen_ib->sib_sid; 1099 ib->sib_scope_id = listen_ib->sib_scope_id; 1100 } 1101 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 1102 } 1103 if (dst_addr) { 1104 ib = (struct sockaddr_ib *)dst_addr; 1105 ib->sib_family = AF_IB; 1106 if (path) { 1107 ib->sib_pkey = path->pkey; 1108 ib->sib_flowinfo = path->flow_label; 1109 memcpy(&ib->sib_addr, &path->dgid, 16); 1110 } 1111 } 1112 } 1113 1114 static void cma_save_ip4_info(struct sockaddr_in *src_addr, 1115 struct sockaddr_in *dst_addr, 1116 struct cma_hdr *hdr, 1117 __be16 local_port) 1118 { 1119 if (src_addr) { 1120 *src_addr = (struct sockaddr_in) { 1121 .sin_len = sizeof(struct sockaddr_in), 1122 .sin_family = AF_INET, 1123 .sin_addr.s_addr = hdr->dst_addr.ip4.addr, 1124 .sin_port = local_port, 1125 }; 1126 } 1127 1128 if (dst_addr) { 1129 *dst_addr = (struct sockaddr_in) { 1130 .sin_len = sizeof(struct sockaddr_in), 1131 .sin_family = AF_INET, 1132 .sin_addr.s_addr = hdr->src_addr.ip4.addr, 1133 .sin_port = hdr->port, 1134 }; 1135 } 1136 } 1137 1138 static void cma_ip6_clear_scope_id(struct in6_addr *addr) 1139 { 1140 /* make sure link local scope ID gets zeroed */ 1141 if (IN6_IS_SCOPE_LINKLOCAL(addr) || 1142 IN6_IS_ADDR_MC_INTFACELOCAL(addr)) { 1143 /* use byte-access to be alignment safe */ 1144 addr->s6_addr[2] = 0; 1145 addr->s6_addr[3] = 0; 1146 } 1147 } 1148 1149 static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, 1150 struct sockaddr_in6 *dst_addr, 1151 struct cma_hdr *hdr, 1152 __be16 local_port) 1153 { 1154 if (src_addr) { 1155 *src_addr = (struct sockaddr_in6) { 1156 .sin6_len = sizeof(struct sockaddr_in6), 1157 .sin6_family = AF_INET6, 1158 .sin6_addr = hdr->dst_addr.ip6, 1159 .sin6_port = local_port, 1160 }; 1161 cma_ip6_clear_scope_id(&src_addr->sin6_addr); 1162 } 1163 1164 if (dst_addr) { 1165 *dst_addr = (struct sockaddr_in6) { 1166 .sin6_len = sizeof(struct sockaddr_in6), 1167 .sin6_family = AF_INET6, 1168 .sin6_addr = hdr->src_addr.ip6, 1169 .sin6_port = hdr->port, 1170 }; 1171 cma_ip6_clear_scope_id(&dst_addr->sin6_addr); 1172 } 1173 } 1174 1175 static u16 cma_port_from_service_id(__be64 service_id) 1176 { 1177 return (u16)be64_to_cpu(service_id); 1178 } 1179 1180 static int cma_save_ip_info(struct sockaddr *src_addr, 1181 struct sockaddr *dst_addr, 1182 struct ib_cm_event *ib_event, 1183 __be64 service_id) 1184 { 1185 struct cma_hdr *hdr; 1186 __be16 port; 1187 1188 hdr = ib_event->private_data; 1189 if (hdr->cma_version != CMA_VERSION) 1190 return -EINVAL; 1191 1192 port = htons(cma_port_from_service_id(service_id)); 1193 1194 switch (cma_get_ip_ver(hdr)) { 1195 case 4: 1196 cma_save_ip4_info((struct sockaddr_in *)src_addr, 1197 (struct sockaddr_in *)dst_addr, hdr, port); 1198 break; 1199 case 6: 1200 cma_save_ip6_info((struct sockaddr_in6 *)src_addr, 1201 (struct sockaddr_in6 *)dst_addr, hdr, port); 1202 break; 1203 default: 1204 return -EAFNOSUPPORT; 1205 } 1206 1207 return 0; 1208 } 1209 1210 static int cma_save_net_info(struct sockaddr *src_addr, 1211 struct sockaddr *dst_addr, 1212 struct rdma_cm_id *listen_id, 1213 struct ib_cm_event *ib_event, 1214 sa_family_t sa_family, __be64 service_id) 1215 { 1216 if (sa_family == AF_IB) { 1217 if (ib_event->event == IB_CM_REQ_RECEIVED) 1218 cma_save_ib_info(src_addr, dst_addr, listen_id, 1219 ib_event->param.req_rcvd.primary_path); 1220 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1221 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); 1222 return 0; 1223 } 1224 1225 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); 1226 } 1227 1228 static int cma_save_req_info(const struct ib_cm_event *ib_event, 1229 struct cma_req_info *req) 1230 { 1231 const struct ib_cm_req_event_param *req_param = 1232 &ib_event->param.req_rcvd; 1233 const struct ib_cm_sidr_req_event_param *sidr_param = 1234 &ib_event->param.sidr_req_rcvd; 1235 1236 switch (ib_event->event) { 1237 case IB_CM_REQ_RECEIVED: 1238 req->device = req_param->listen_id->device; 1239 req->port = req_param->port; 1240 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1241 sizeof(req->local_gid)); 1242 req->has_gid = true; 1243 req->service_id = req_param->primary_path->service_id; 1244 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1245 if (req->pkey != req_param->bth_pkey) 1246 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" 1247 "RDMA CMA: in the future this may cause the request to be dropped\n", 1248 req_param->bth_pkey, req->pkey); 1249 break; 1250 case IB_CM_SIDR_REQ_RECEIVED: 1251 req->device = sidr_param->listen_id->device; 1252 req->port = sidr_param->port; 1253 req->has_gid = false; 1254 req->service_id = sidr_param->service_id; 1255 req->pkey = sidr_param->pkey; 1256 if (req->pkey != sidr_param->bth_pkey) 1257 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" 1258 "RDMA CMA: in the future this may cause the request to be dropped\n", 1259 sidr_param->bth_pkey, req->pkey); 1260 break; 1261 default: 1262 return -EINVAL; 1263 } 1264 1265 return 0; 1266 } 1267 1268 static bool validate_ipv4_net_dev(struct net_device *net_dev, 1269 const struct sockaddr_in *dst_addr, 1270 const struct sockaddr_in *src_addr) 1271 { 1272 #ifdef INET 1273 struct sockaddr_in src_tmp = *src_addr; 1274 __be32 daddr = dst_addr->sin_addr.s_addr, 1275 saddr = src_addr->sin_addr.s_addr; 1276 struct net_device *dst_dev; 1277 struct rtentry *rte; 1278 bool ret; 1279 1280 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1281 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || 1282 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || 1283 ipv4_is_loopback(saddr)) 1284 return false; 1285 1286 dst_dev = ip_dev_find(net_dev->if_vnet, daddr); 1287 if (dst_dev != net_dev) { 1288 if (dst_dev != NULL) 1289 dev_put(dst_dev); 1290 return false; 1291 } 1292 dev_put(dst_dev); 1293 1294 /* 1295 * Check for loopback. 1296 */ 1297 if (saddr == daddr) 1298 return true; 1299 1300 /* 1301 * Make sure the socket address length field 1302 * is set, else rtalloc1() will fail. 1303 */ 1304 src_tmp.sin_len = sizeof(src_tmp); 1305 1306 CURVNET_SET(net_dev->if_vnet); 1307 rte = rtalloc1((struct sockaddr *)&src_tmp, 1, 0); 1308 if (rte != NULL) { 1309 ret = (rte->rt_ifp == net_dev); 1310 RTFREE_LOCKED(rte); 1311 } else { 1312 ret = false; 1313 } 1314 CURVNET_RESTORE(); 1315 return ret; 1316 #else 1317 return false; 1318 #endif 1319 } 1320 1321 static bool validate_ipv6_net_dev(struct net_device *net_dev, 1322 const struct sockaddr_in6 *dst_addr, 1323 const struct sockaddr_in6 *src_addr) 1324 { 1325 #ifdef INET6 1326 struct sockaddr_in6 src_tmp = *src_addr; 1327 struct sockaddr_in6 dst_tmp = *dst_addr; 1328 struct net_device *dst_dev; 1329 struct rtentry *rte; 1330 bool ret; 1331 1332 dst_dev = ip6_dev_find(net_dev->if_vnet, dst_tmp.sin6_addr, 1333 net_dev->if_index); 1334 if (dst_dev != net_dev) { 1335 if (dst_dev != NULL) 1336 dev_put(dst_dev); 1337 return false; 1338 } 1339 dev_put(dst_dev); 1340 1341 CURVNET_SET(net_dev->if_vnet); 1342 1343 /* 1344 * Make sure the socket address length field 1345 * is set, else rtalloc1() will fail. 1346 */ 1347 src_tmp.sin6_len = sizeof(src_tmp); 1348 1349 /* 1350 * Make sure the scope ID gets embedded, else rtalloc1() will 1351 * resolve to the loopback interface. 1352 */ 1353 src_tmp.sin6_scope_id = net_dev->if_index; 1354 sa6_embedscope(&src_tmp, 0); 1355 1356 dst_tmp.sin6_scope_id = net_dev->if_index; 1357 sa6_embedscope(&dst_tmp, 0); 1358 1359 /* 1360 * Check for loopback after scope ID 1361 * has been embedded: 1362 */ 1363 if (memcmp(&src_tmp.sin6_addr, &dst_tmp.sin6_addr, 1364 sizeof(dst_tmp.sin6_addr)) == 0) { 1365 ret = true; 1366 } else { 1367 /* non-loopback case */ 1368 rte = rtalloc1((struct sockaddr *)&src_tmp, 1, 0); 1369 if (rte != NULL) { 1370 ret = (rte->rt_ifp == net_dev); 1371 RTFREE_LOCKED(rte); 1372 } else { 1373 ret = false; 1374 } 1375 } 1376 CURVNET_RESTORE(); 1377 return ret; 1378 #else 1379 return false; 1380 #endif 1381 } 1382 1383 static bool validate_net_dev(struct net_device *net_dev, 1384 const struct sockaddr *daddr, 1385 const struct sockaddr *saddr) 1386 { 1387 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; 1388 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; 1389 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1390 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; 1391 1392 switch (daddr->sa_family) { 1393 case AF_INET: 1394 return saddr->sa_family == AF_INET && 1395 validate_ipv4_net_dev(net_dev, daddr4, saddr4); 1396 1397 case AF_INET6: 1398 return saddr->sa_family == AF_INET6 && 1399 validate_ipv6_net_dev(net_dev, daddr6, saddr6); 1400 1401 default: 1402 return false; 1403 } 1404 } 1405 1406 static struct net_device * 1407 roce_get_net_dev_by_cm_event(struct ib_device *device, u8 port_num, 1408 const struct ib_cm_event *ib_event) 1409 { 1410 struct ib_gid_attr sgid_attr; 1411 union ib_gid sgid; 1412 int err = -EINVAL; 1413 1414 if (ib_event->event == IB_CM_REQ_RECEIVED) { 1415 err = ib_get_cached_gid(device, port_num, 1416 ib_event->param.req_rcvd.ppath_sgid_index, &sgid, &sgid_attr); 1417 } else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 1418 err = ib_get_cached_gid(device, port_num, 1419 ib_event->param.sidr_req_rcvd.sgid_index, &sgid, &sgid_attr); 1420 } 1421 if (err) 1422 return (NULL); 1423 return (sgid_attr.ndev); 1424 } 1425 1426 static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, 1427 const struct cma_req_info *req) 1428 { 1429 struct sockaddr_storage listen_addr_storage, src_addr_storage; 1430 struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, 1431 *src_addr = (struct sockaddr *)&src_addr_storage; 1432 struct net_device *net_dev; 1433 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; 1434 int err; 1435 1436 err = cma_save_ip_info(listen_addr, src_addr, ib_event, 1437 req->service_id); 1438 if (err) 1439 return ERR_PTR(err); 1440 1441 if (rdma_protocol_roce(req->device, req->port)) { 1442 net_dev = roce_get_net_dev_by_cm_event(req->device, req->port, 1443 ib_event); 1444 } else { 1445 net_dev = ib_get_net_dev_by_params(req->device, req->port, 1446 req->pkey, 1447 gid, listen_addr); 1448 } 1449 if (!net_dev) 1450 return ERR_PTR(-ENODEV); 1451 1452 if (!validate_net_dev(net_dev, listen_addr, src_addr)) { 1453 dev_put(net_dev); 1454 return ERR_PTR(-EHOSTUNREACH); 1455 } 1456 1457 return net_dev; 1458 } 1459 1460 static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id) 1461 { 1462 return (be64_to_cpu(service_id) >> 16) & 0xffff; 1463 } 1464 1465 static bool cma_match_private_data(struct rdma_id_private *id_priv, 1466 const struct cma_hdr *hdr) 1467 { 1468 struct sockaddr *addr = cma_src_addr(id_priv); 1469 __be32 ip4_addr; 1470 struct in6_addr ip6_addr; 1471 1472 if (cma_any_addr(addr) && !id_priv->afonly) 1473 return true; 1474 1475 switch (addr->sa_family) { 1476 case AF_INET: 1477 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; 1478 if (cma_get_ip_ver(hdr) != 4) 1479 return false; 1480 if (!cma_any_addr(addr) && 1481 hdr->dst_addr.ip4.addr != ip4_addr) 1482 return false; 1483 break; 1484 case AF_INET6: 1485 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; 1486 if (cma_get_ip_ver(hdr) != 6) 1487 return false; 1488 cma_ip6_clear_scope_id(&ip6_addr); 1489 if (!cma_any_addr(addr) && 1490 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) 1491 return false; 1492 break; 1493 case AF_IB: 1494 return true; 1495 default: 1496 return false; 1497 } 1498 1499 return true; 1500 } 1501 1502 static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num) 1503 { 1504 enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num); 1505 enum rdma_transport_type transport = 1506 rdma_node_get_transport(device->node_type); 1507 1508 return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB; 1509 } 1510 1511 static bool cma_protocol_roce(const struct rdma_cm_id *id) 1512 { 1513 struct ib_device *device = id->device; 1514 const int port_num = id->port_num ?: rdma_start_port(device); 1515 1516 return cma_protocol_roce_dev_port(device, port_num); 1517 } 1518 1519 static bool cma_match_net_dev(const struct rdma_cm_id *id, 1520 const struct net_device *net_dev, 1521 u8 port_num) 1522 { 1523 const struct rdma_addr *addr = &id->route.addr; 1524 1525 if (!net_dev) 1526 /* This request is an AF_IB request or a RoCE request */ 1527 return (!id->port_num || id->port_num == port_num) && 1528 (addr->src_addr.ss_family == AF_IB || 1529 cma_protocol_roce_dev_port(id->device, port_num)); 1530 1531 return !addr->dev_addr.bound_dev_if || 1532 (net_eq(dev_net(net_dev), addr->dev_addr.net) && 1533 addr->dev_addr.bound_dev_if == net_dev->if_index); 1534 } 1535 1536 static struct rdma_id_private *cma_find_listener( 1537 const struct rdma_bind_list *bind_list, 1538 const struct ib_cm_id *cm_id, 1539 const struct ib_cm_event *ib_event, 1540 const struct cma_req_info *req, 1541 const struct net_device *net_dev) 1542 { 1543 struct rdma_id_private *id_priv, *id_priv_dev; 1544 1545 if (!bind_list) 1546 return ERR_PTR(-EINVAL); 1547 1548 hlist_for_each_entry(id_priv, &bind_list->owners, node) { 1549 if (cma_match_private_data(id_priv, ib_event->private_data)) { 1550 if (id_priv->id.device == cm_id->device && 1551 cma_match_net_dev(&id_priv->id, net_dev, req->port)) 1552 return id_priv; 1553 list_for_each_entry(id_priv_dev, 1554 &id_priv->listen_list, 1555 listen_list) { 1556 if (id_priv_dev->id.device == cm_id->device && 1557 cma_match_net_dev(&id_priv_dev->id, net_dev, req->port)) 1558 return id_priv_dev; 1559 } 1560 } 1561 } 1562 1563 return ERR_PTR(-EINVAL); 1564 } 1565 1566 static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, 1567 struct ib_cm_event *ib_event, 1568 struct net_device **net_dev) 1569 { 1570 struct cma_req_info req; 1571 struct rdma_bind_list *bind_list; 1572 struct rdma_id_private *id_priv; 1573 int err; 1574 1575 err = cma_save_req_info(ib_event, &req); 1576 if (err) 1577 return ERR_PTR(err); 1578 1579 *net_dev = cma_get_net_dev(ib_event, &req); 1580 if (IS_ERR(*net_dev)) { 1581 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { 1582 /* Assuming the protocol is AF_IB */ 1583 *net_dev = NULL; 1584 } else { 1585 return ERR_CAST(*net_dev); 1586 } 1587 } 1588 1589 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, 1590 rdma_ps_from_service_id(req.service_id), 1591 cma_port_from_service_id(req.service_id)); 1592 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); 1593 if (IS_ERR(id_priv) && *net_dev) { 1594 dev_put(*net_dev); 1595 *net_dev = NULL; 1596 } 1597 1598 return id_priv; 1599 } 1600 1601 static inline int cma_user_data_offset(struct rdma_id_private *id_priv) 1602 { 1603 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); 1604 } 1605 1606 static void cma_cancel_route(struct rdma_id_private *id_priv) 1607 { 1608 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { 1609 if (id_priv->query) 1610 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 1611 } 1612 } 1613 1614 static void cma_cancel_listens(struct rdma_id_private *id_priv) 1615 { 1616 struct rdma_id_private *dev_id_priv; 1617 1618 /* 1619 * Remove from listen_any_list to prevent added devices from spawning 1620 * additional listen requests. 1621 */ 1622 mutex_lock(&lock); 1623 list_del(&id_priv->list); 1624 1625 while (!list_empty(&id_priv->listen_list)) { 1626 dev_id_priv = list_entry(id_priv->listen_list.next, 1627 struct rdma_id_private, listen_list); 1628 /* sync with device removal to avoid duplicate destruction */ 1629 list_del_init(&dev_id_priv->list); 1630 list_del(&dev_id_priv->listen_list); 1631 mutex_unlock(&lock); 1632 1633 rdma_destroy_id(&dev_id_priv->id); 1634 mutex_lock(&lock); 1635 } 1636 mutex_unlock(&lock); 1637 } 1638 1639 static void cma_cancel_operation(struct rdma_id_private *id_priv, 1640 enum rdma_cm_state state) 1641 { 1642 switch (state) { 1643 case RDMA_CM_ADDR_QUERY: 1644 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 1645 break; 1646 case RDMA_CM_ROUTE_QUERY: 1647 cma_cancel_route(id_priv); 1648 break; 1649 case RDMA_CM_LISTEN: 1650 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) 1651 cma_cancel_listens(id_priv); 1652 break; 1653 default: 1654 break; 1655 } 1656 } 1657 1658 static void cma_release_port(struct rdma_id_private *id_priv) 1659 { 1660 struct rdma_bind_list *bind_list = id_priv->bind_list; 1661 struct vnet *net = id_priv->id.route.addr.dev_addr.net; 1662 1663 if (!bind_list) 1664 return; 1665 1666 mutex_lock(&lock); 1667 hlist_del(&id_priv->node); 1668 if (hlist_empty(&bind_list->owners)) { 1669 cma_ps_remove(net, bind_list->ps, bind_list->port); 1670 kfree(bind_list); 1671 } 1672 mutex_unlock(&lock); 1673 } 1674 1675 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 1676 { 1677 struct cma_multicast *mc; 1678 1679 while (!list_empty(&id_priv->mc_list)) { 1680 mc = container_of(id_priv->mc_list.next, 1681 struct cma_multicast, list); 1682 list_del(&mc->list); 1683 if (rdma_cap_ib_mcast(id_priv->cma_dev->device, 1684 id_priv->id.port_num)) { 1685 ib_sa_free_multicast(mc->multicast.ib); 1686 kfree(mc); 1687 } else { 1688 if (mc->igmp_joined) { 1689 struct rdma_dev_addr *dev_addr = 1690 &id_priv->id.route.addr.dev_addr; 1691 struct net_device *ndev = NULL; 1692 1693 if (dev_addr->bound_dev_if) 1694 ndev = dev_get_by_index(dev_addr->net, 1695 dev_addr->bound_dev_if); 1696 if (ndev) { 1697 cma_igmp_send(ndev, 1698 &mc->multicast.ib->rec.mgid, 1699 false); 1700 dev_put(ndev); 1701 } 1702 } 1703 kref_put(&mc->mcref, release_mc); 1704 } 1705 } 1706 } 1707 1708 void rdma_destroy_id(struct rdma_cm_id *id) 1709 { 1710 struct rdma_id_private *id_priv; 1711 enum rdma_cm_state state; 1712 1713 id_priv = container_of(id, struct rdma_id_private, id); 1714 state = cma_exch(id_priv, RDMA_CM_DESTROYING); 1715 cma_cancel_operation(id_priv, state); 1716 1717 /* 1718 * Wait for any active callback to finish. New callbacks will find 1719 * the id_priv state set to destroying and abort. 1720 */ 1721 mutex_lock(&id_priv->handler_mutex); 1722 mutex_unlock(&id_priv->handler_mutex); 1723 1724 if (id_priv->cma_dev) { 1725 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 1726 if (id_priv->cm_id.ib) 1727 ib_destroy_cm_id(id_priv->cm_id.ib); 1728 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 1729 if (id_priv->cm_id.iw) 1730 iw_destroy_cm_id(id_priv->cm_id.iw); 1731 } 1732 cma_leave_mc_groups(id_priv); 1733 cma_release_dev(id_priv); 1734 } 1735 1736 cma_release_port(id_priv); 1737 cma_deref_id(id_priv); 1738 wait_for_completion(&id_priv->comp); 1739 1740 if (id_priv->internal_id) 1741 cma_deref_id(id_priv->id.context); 1742 1743 kfree(id_priv->id.route.path_rec); 1744 kfree(id_priv); 1745 } 1746 EXPORT_SYMBOL(rdma_destroy_id); 1747 1748 static int cma_rep_recv(struct rdma_id_private *id_priv) 1749 { 1750 int ret; 1751 1752 ret = cma_modify_qp_rtr(id_priv, NULL); 1753 if (ret) 1754 goto reject; 1755 1756 ret = cma_modify_qp_rts(id_priv, NULL); 1757 if (ret) 1758 goto reject; 1759 1760 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 1761 if (ret) 1762 goto reject; 1763 1764 return 0; 1765 reject: 1766 cma_modify_qp_err(id_priv); 1767 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 1768 NULL, 0, NULL, 0); 1769 return ret; 1770 } 1771 1772 static void cma_set_rep_event_data(struct rdma_cm_event *event, 1773 struct ib_cm_rep_event_param *rep_data, 1774 void *private_data) 1775 { 1776 event->param.conn.private_data = private_data; 1777 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 1778 event->param.conn.responder_resources = rep_data->responder_resources; 1779 event->param.conn.initiator_depth = rep_data->initiator_depth; 1780 event->param.conn.flow_control = rep_data->flow_control; 1781 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 1782 event->param.conn.srq = rep_data->srq; 1783 event->param.conn.qp_num = rep_data->remote_qpn; 1784 } 1785 1786 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1787 { 1788 struct rdma_id_private *id_priv = cm_id->context; 1789 struct rdma_cm_event event; 1790 int ret = 0; 1791 1792 mutex_lock(&id_priv->handler_mutex); 1793 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1794 id_priv->state != RDMA_CM_CONNECT) || 1795 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1796 id_priv->state != RDMA_CM_DISCONNECT)) 1797 goto out; 1798 1799 memset(&event, 0, sizeof event); 1800 switch (ib_event->event) { 1801 case IB_CM_REQ_ERROR: 1802 case IB_CM_REP_ERROR: 1803 event.event = RDMA_CM_EVENT_UNREACHABLE; 1804 event.status = -ETIMEDOUT; 1805 break; 1806 case IB_CM_REP_RECEIVED: 1807 if (id_priv->id.qp) { 1808 event.status = cma_rep_recv(id_priv); 1809 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 1810 RDMA_CM_EVENT_ESTABLISHED; 1811 } else { 1812 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 1813 } 1814 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 1815 ib_event->private_data); 1816 break; 1817 case IB_CM_RTU_RECEIVED: 1818 case IB_CM_USER_ESTABLISHED: 1819 event.event = RDMA_CM_EVENT_ESTABLISHED; 1820 break; 1821 case IB_CM_DREQ_ERROR: 1822 event.status = -ETIMEDOUT; /* fall through */ 1823 case IB_CM_DREQ_RECEIVED: 1824 case IB_CM_DREP_RECEIVED: 1825 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 1826 RDMA_CM_DISCONNECT)) 1827 goto out; 1828 event.event = RDMA_CM_EVENT_DISCONNECTED; 1829 break; 1830 case IB_CM_TIMEWAIT_EXIT: 1831 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 1832 break; 1833 case IB_CM_MRA_RECEIVED: 1834 /* ignore event */ 1835 goto out; 1836 case IB_CM_REJ_RECEIVED: 1837 cma_modify_qp_err(id_priv); 1838 event.status = ib_event->param.rej_rcvd.reason; 1839 event.event = RDMA_CM_EVENT_REJECTED; 1840 event.param.conn.private_data = ib_event->private_data; 1841 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 1842 break; 1843 default: 1844 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 1845 ib_event->event); 1846 goto out; 1847 } 1848 1849 ret = id_priv->id.event_handler(&id_priv->id, &event); 1850 if (ret) { 1851 /* Destroy the CM ID by returning a non-zero value. */ 1852 id_priv->cm_id.ib = NULL; 1853 cma_exch(id_priv, RDMA_CM_DESTROYING); 1854 mutex_unlock(&id_priv->handler_mutex); 1855 rdma_destroy_id(&id_priv->id); 1856 return ret; 1857 } 1858 out: 1859 mutex_unlock(&id_priv->handler_mutex); 1860 return ret; 1861 } 1862 1863 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 1864 struct ib_cm_event *ib_event, 1865 struct net_device *net_dev) 1866 { 1867 struct rdma_id_private *id_priv; 1868 struct rdma_cm_id *id; 1869 struct rdma_route *rt; 1870 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1871 const __be64 service_id = 1872 ib_event->param.req_rcvd.primary_path->service_id; 1873 int ret; 1874 1875 id = rdma_create_id(listen_id->route.addr.dev_addr.net, 1876 listen_id->event_handler, listen_id->context, 1877 listen_id->ps, ib_event->param.req_rcvd.qp_type); 1878 if (IS_ERR(id)) 1879 return NULL; 1880 1881 id_priv = container_of(id, struct rdma_id_private, id); 1882 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1883 (struct sockaddr *)&id->route.addr.dst_addr, 1884 listen_id, ib_event, ss_family, service_id)) 1885 goto err; 1886 1887 rt = &id->route; 1888 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1889 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1890 GFP_KERNEL); 1891 if (!rt->path_rec) 1892 goto err; 1893 1894 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1895 if (rt->num_paths == 2) 1896 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1897 1898 if (net_dev) { 1899 ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL); 1900 if (ret) 1901 goto err; 1902 } else { 1903 if (!cma_protocol_roce(listen_id) && 1904 cma_any_addr(cma_src_addr(id_priv))) { 1905 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 1906 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 1907 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 1908 } else if (!cma_any_addr(cma_src_addr(id_priv))) { 1909 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); 1910 if (ret) 1911 goto err; 1912 } 1913 } 1914 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1915 1916 id_priv->state = RDMA_CM_CONNECT; 1917 return id_priv; 1918 1919 err: 1920 rdma_destroy_id(id); 1921 return NULL; 1922 } 1923 1924 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1925 struct ib_cm_event *ib_event, 1926 struct net_device *net_dev) 1927 { 1928 struct rdma_id_private *id_priv; 1929 struct rdma_cm_id *id; 1930 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1931 struct vnet *net = listen_id->route.addr.dev_addr.net; 1932 int ret; 1933 1934 id = rdma_create_id(net, listen_id->event_handler, listen_id->context, 1935 listen_id->ps, IB_QPT_UD); 1936 if (IS_ERR(id)) 1937 return NULL; 1938 1939 id_priv = container_of(id, struct rdma_id_private, id); 1940 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1941 (struct sockaddr *)&id->route.addr.dst_addr, 1942 listen_id, ib_event, ss_family, 1943 ib_event->param.sidr_req_rcvd.service_id)) 1944 goto err; 1945 1946 if (net_dev) { 1947 ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL); 1948 if (ret) 1949 goto err; 1950 } else { 1951 if (!cma_any_addr(cma_src_addr(id_priv))) { 1952 ret = cma_translate_addr(cma_src_addr(id_priv), 1953 &id->route.addr.dev_addr); 1954 if (ret) 1955 goto err; 1956 } 1957 } 1958 1959 id_priv->state = RDMA_CM_CONNECT; 1960 return id_priv; 1961 err: 1962 rdma_destroy_id(id); 1963 return NULL; 1964 } 1965 1966 static void cma_set_req_event_data(struct rdma_cm_event *event, 1967 struct ib_cm_req_event_param *req_data, 1968 void *private_data, int offset) 1969 { 1970 event->param.conn.private_data = (char *)private_data + offset; 1971 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1972 event->param.conn.responder_resources = req_data->responder_resources; 1973 event->param.conn.initiator_depth = req_data->initiator_depth; 1974 event->param.conn.flow_control = req_data->flow_control; 1975 event->param.conn.retry_count = req_data->retry_count; 1976 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1977 event->param.conn.srq = req_data->srq; 1978 event->param.conn.qp_num = req_data->remote_qpn; 1979 } 1980 1981 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) 1982 { 1983 return (((ib_event->event == IB_CM_REQ_RECEIVED) && 1984 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 1985 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 1986 (id->qp_type == IB_QPT_UD)) || 1987 (!id->qp_type)); 1988 } 1989 1990 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1991 { 1992 struct rdma_id_private *listen_id, *conn_id = NULL; 1993 struct rdma_cm_event event; 1994 struct net_device *net_dev; 1995 int offset, ret; 1996 1997 listen_id = cma_id_from_event(cm_id, ib_event, &net_dev); 1998 if (IS_ERR(listen_id)) 1999 return PTR_ERR(listen_id); 2000 2001 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) { 2002 ret = -EINVAL; 2003 goto net_dev_put; 2004 } 2005 2006 mutex_lock(&listen_id->handler_mutex); 2007 if (listen_id->state != RDMA_CM_LISTEN) { 2008 ret = -ECONNABORTED; 2009 goto err1; 2010 } 2011 2012 memset(&event, 0, sizeof event); 2013 offset = cma_user_data_offset(listen_id); 2014 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2015 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 2016 conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev); 2017 event.param.ud.private_data = (char *)ib_event->private_data + offset; 2018 event.param.ud.private_data_len = 2019 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 2020 } else { 2021 conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev); 2022 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 2023 ib_event->private_data, offset); 2024 } 2025 if (!conn_id) { 2026 ret = -ENOMEM; 2027 goto err1; 2028 } 2029 2030 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2031 ret = cma_acquire_dev(conn_id, listen_id); 2032 if (ret) 2033 goto err2; 2034 2035 conn_id->cm_id.ib = cm_id; 2036 cm_id->context = conn_id; 2037 cm_id->cm_handler = cma_ib_handler; 2038 2039 /* 2040 * Protect against the user destroying conn_id from another thread 2041 * until we're done accessing it. 2042 */ 2043 atomic_inc(&conn_id->refcount); 2044 ret = conn_id->id.event_handler(&conn_id->id, &event); 2045 if (ret) 2046 goto err3; 2047 /* 2048 * Acquire mutex to prevent user executing rdma_destroy_id() 2049 * while we're accessing the cm_id. 2050 */ 2051 mutex_lock(&lock); 2052 if (cma_comp(conn_id, RDMA_CM_CONNECT) && 2053 (conn_id->id.qp_type != IB_QPT_UD)) 2054 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 2055 mutex_unlock(&lock); 2056 mutex_unlock(&conn_id->handler_mutex); 2057 mutex_unlock(&listen_id->handler_mutex); 2058 cma_deref_id(conn_id); 2059 if (net_dev) 2060 dev_put(net_dev); 2061 return 0; 2062 2063 err3: 2064 cma_deref_id(conn_id); 2065 /* Destroy the CM ID by returning a non-zero value. */ 2066 conn_id->cm_id.ib = NULL; 2067 err2: 2068 cma_exch(conn_id, RDMA_CM_DESTROYING); 2069 mutex_unlock(&conn_id->handler_mutex); 2070 err1: 2071 mutex_unlock(&listen_id->handler_mutex); 2072 if (conn_id) 2073 rdma_destroy_id(&conn_id->id); 2074 2075 net_dev_put: 2076 if (net_dev) 2077 dev_put(net_dev); 2078 2079 return ret; 2080 } 2081 2082 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) 2083 { 2084 if (addr->sa_family == AF_IB) 2085 return ((struct sockaddr_ib *) addr)->sib_sid; 2086 2087 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); 2088 } 2089 EXPORT_SYMBOL(rdma_get_service_id); 2090 2091 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 2092 { 2093 struct rdma_id_private *id_priv = iw_id->context; 2094 struct rdma_cm_event event; 2095 int ret = 0; 2096 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2097 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2098 2099 mutex_lock(&id_priv->handler_mutex); 2100 if (id_priv->state != RDMA_CM_CONNECT) 2101 goto out; 2102 2103 memset(&event, 0, sizeof event); 2104 switch (iw_event->event) { 2105 case IW_CM_EVENT_CLOSE: 2106 event.event = RDMA_CM_EVENT_DISCONNECTED; 2107 break; 2108 case IW_CM_EVENT_CONNECT_REPLY: 2109 memcpy(cma_src_addr(id_priv), laddr, 2110 rdma_addr_size(laddr)); 2111 memcpy(cma_dst_addr(id_priv), raddr, 2112 rdma_addr_size(raddr)); 2113 switch (iw_event->status) { 2114 case 0: 2115 event.event = RDMA_CM_EVENT_ESTABLISHED; 2116 event.param.conn.initiator_depth = iw_event->ird; 2117 event.param.conn.responder_resources = iw_event->ord; 2118 break; 2119 case -ECONNRESET: 2120 case -ECONNREFUSED: 2121 event.event = RDMA_CM_EVENT_REJECTED; 2122 break; 2123 case -ETIMEDOUT: 2124 event.event = RDMA_CM_EVENT_UNREACHABLE; 2125 break; 2126 default: 2127 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 2128 break; 2129 } 2130 break; 2131 case IW_CM_EVENT_ESTABLISHED: 2132 event.event = RDMA_CM_EVENT_ESTABLISHED; 2133 event.param.conn.initiator_depth = iw_event->ird; 2134 event.param.conn.responder_resources = iw_event->ord; 2135 break; 2136 default: 2137 BUG_ON(1); 2138 } 2139 2140 event.status = iw_event->status; 2141 event.param.conn.private_data = iw_event->private_data; 2142 event.param.conn.private_data_len = iw_event->private_data_len; 2143 ret = id_priv->id.event_handler(&id_priv->id, &event); 2144 if (ret) { 2145 /* Destroy the CM ID by returning a non-zero value. */ 2146 id_priv->cm_id.iw = NULL; 2147 cma_exch(id_priv, RDMA_CM_DESTROYING); 2148 mutex_unlock(&id_priv->handler_mutex); 2149 rdma_destroy_id(&id_priv->id); 2150 return ret; 2151 } 2152 2153 out: 2154 mutex_unlock(&id_priv->handler_mutex); 2155 return ret; 2156 } 2157 2158 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 2159 struct iw_cm_event *iw_event) 2160 { 2161 struct rdma_cm_id *new_cm_id; 2162 struct rdma_id_private *listen_id, *conn_id; 2163 struct rdma_cm_event event; 2164 int ret = -ECONNABORTED; 2165 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2166 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2167 2168 listen_id = cm_id->context; 2169 2170 mutex_lock(&listen_id->handler_mutex); 2171 if (listen_id->state != RDMA_CM_LISTEN) 2172 goto out; 2173 2174 /* Create a new RDMA id for the new IW CM ID */ 2175 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2176 listen_id->id.event_handler, 2177 listen_id->id.context, 2178 RDMA_PS_TCP, IB_QPT_RC); 2179 if (IS_ERR(new_cm_id)) { 2180 ret = -ENOMEM; 2181 goto out; 2182 } 2183 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 2184 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2185 conn_id->state = RDMA_CM_CONNECT; 2186 2187 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); 2188 if (ret) { 2189 mutex_unlock(&conn_id->handler_mutex); 2190 rdma_destroy_id(new_cm_id); 2191 goto out; 2192 } 2193 2194 ret = cma_acquire_dev(conn_id, listen_id); 2195 if (ret) { 2196 mutex_unlock(&conn_id->handler_mutex); 2197 rdma_destroy_id(new_cm_id); 2198 goto out; 2199 } 2200 2201 conn_id->cm_id.iw = cm_id; 2202 cm_id->context = conn_id; 2203 cm_id->cm_handler = cma_iw_handler; 2204 2205 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); 2206 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); 2207 2208 memset(&event, 0, sizeof event); 2209 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2210 event.param.conn.private_data = iw_event->private_data; 2211 event.param.conn.private_data_len = iw_event->private_data_len; 2212 event.param.conn.initiator_depth = iw_event->ird; 2213 event.param.conn.responder_resources = iw_event->ord; 2214 2215 /* 2216 * Protect against the user destroying conn_id from another thread 2217 * until we're done accessing it. 2218 */ 2219 atomic_inc(&conn_id->refcount); 2220 ret = conn_id->id.event_handler(&conn_id->id, &event); 2221 if (ret) { 2222 /* User wants to destroy the CM ID */ 2223 conn_id->cm_id.iw = NULL; 2224 cma_exch(conn_id, RDMA_CM_DESTROYING); 2225 mutex_unlock(&conn_id->handler_mutex); 2226 cma_deref_id(conn_id); 2227 rdma_destroy_id(&conn_id->id); 2228 goto out; 2229 } 2230 2231 mutex_unlock(&conn_id->handler_mutex); 2232 cma_deref_id(conn_id); 2233 2234 out: 2235 mutex_unlock(&listen_id->handler_mutex); 2236 return ret; 2237 } 2238 2239 static int cma_ib_listen(struct rdma_id_private *id_priv) 2240 { 2241 struct sockaddr *addr; 2242 struct ib_cm_id *id; 2243 __be64 svc_id; 2244 2245 addr = cma_src_addr(id_priv); 2246 svc_id = rdma_get_service_id(&id_priv->id, addr); 2247 id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id); 2248 if (IS_ERR(id)) 2249 return PTR_ERR(id); 2250 id_priv->cm_id.ib = id; 2251 2252 return 0; 2253 } 2254 2255 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 2256 { 2257 int ret; 2258 struct iw_cm_id *id; 2259 2260 id = iw_create_cm_id(id_priv->id.device, 2261 iw_conn_req_handler, 2262 id_priv); 2263 if (IS_ERR(id)) 2264 return PTR_ERR(id); 2265 2266 id->tos = id_priv->tos; 2267 id_priv->cm_id.iw = id; 2268 2269 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), 2270 rdma_addr_size(cma_src_addr(id_priv))); 2271 2272 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 2273 2274 if (ret) { 2275 iw_destroy_cm_id(id_priv->cm_id.iw); 2276 id_priv->cm_id.iw = NULL; 2277 } 2278 2279 return ret; 2280 } 2281 2282 static int cma_listen_handler(struct rdma_cm_id *id, 2283 struct rdma_cm_event *event) 2284 { 2285 struct rdma_id_private *id_priv = id->context; 2286 2287 id->context = id_priv->id.context; 2288 id->event_handler = id_priv->id.event_handler; 2289 return id_priv->id.event_handler(id, event); 2290 } 2291 2292 static void cma_listen_on_dev(struct rdma_id_private *id_priv, 2293 struct cma_device *cma_dev) 2294 { 2295 struct rdma_id_private *dev_id_priv; 2296 struct rdma_cm_id *id; 2297 struct vnet *net = id_priv->id.route.addr.dev_addr.net; 2298 int ret; 2299 2300 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2301 return; 2302 2303 id = rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, 2304 id_priv->id.qp_type); 2305 if (IS_ERR(id)) 2306 return; 2307 2308 dev_id_priv = container_of(id, struct rdma_id_private, id); 2309 2310 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2311 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), 2312 rdma_addr_size(cma_src_addr(id_priv))); 2313 2314 _cma_attach_to_dev(dev_id_priv, cma_dev); 2315 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 2316 atomic_inc(&id_priv->refcount); 2317 dev_id_priv->internal_id = 1; 2318 dev_id_priv->afonly = id_priv->afonly; 2319 2320 ret = rdma_listen(id, id_priv->backlog); 2321 if (ret) 2322 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n", 2323 ret, cma_dev->device->name); 2324 } 2325 2326 static void cma_listen_on_all(struct rdma_id_private *id_priv) 2327 { 2328 struct cma_device *cma_dev; 2329 2330 mutex_lock(&lock); 2331 list_add_tail(&id_priv->list, &listen_any_list); 2332 list_for_each_entry(cma_dev, &dev_list, list) 2333 cma_listen_on_dev(id_priv, cma_dev); 2334 mutex_unlock(&lock); 2335 } 2336 2337 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 2338 { 2339 struct rdma_id_private *id_priv; 2340 2341 id_priv = container_of(id, struct rdma_id_private, id); 2342 id_priv->tos = (u8) tos; 2343 } 2344 EXPORT_SYMBOL(rdma_set_service_type); 2345 2346 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 2347 void *context) 2348 { 2349 struct cma_work *work = context; 2350 struct rdma_route *route; 2351 2352 route = &work->id->id.route; 2353 2354 if (!status) { 2355 route->num_paths = 1; 2356 *route->path_rec = *path_rec; 2357 } else { 2358 work->old_state = RDMA_CM_ROUTE_QUERY; 2359 work->new_state = RDMA_CM_ADDR_RESOLVED; 2360 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 2361 work->event.status = status; 2362 } 2363 2364 queue_work(cma_wq, &work->work); 2365 } 2366 2367 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 2368 struct cma_work *work) 2369 { 2370 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2371 struct ib_sa_path_rec path_rec; 2372 ib_sa_comp_mask comp_mask; 2373 struct sockaddr_in6 *sin6; 2374 struct sockaddr_ib *sib; 2375 2376 memset(&path_rec, 0, sizeof path_rec); 2377 rdma_addr_get_sgid(dev_addr, &path_rec.sgid); 2378 rdma_addr_get_dgid(dev_addr, &path_rec.dgid); 2379 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2380 path_rec.numb_path = 1; 2381 path_rec.reversible = 1; 2382 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 2383 2384 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2385 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 2386 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 2387 2388 switch (cma_family(id_priv)) { 2389 case AF_INET: 2390 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 2391 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 2392 break; 2393 case AF_INET6: 2394 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 2395 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 2396 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2397 break; 2398 case AF_IB: 2399 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 2400 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); 2401 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2402 break; 2403 } 2404 2405 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 2406 id_priv->id.port_num, &path_rec, 2407 comp_mask, timeout_ms, 2408 GFP_KERNEL, cma_query_handler, 2409 work, &id_priv->query); 2410 2411 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 2412 } 2413 2414 static void cma_work_handler(struct work_struct *_work) 2415 { 2416 struct cma_work *work = container_of(_work, struct cma_work, work); 2417 struct rdma_id_private *id_priv = work->id; 2418 int destroy = 0; 2419 2420 mutex_lock(&id_priv->handler_mutex); 2421 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 2422 goto out; 2423 2424 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2425 cma_exch(id_priv, RDMA_CM_DESTROYING); 2426 destroy = 1; 2427 } 2428 out: 2429 mutex_unlock(&id_priv->handler_mutex); 2430 cma_deref_id(id_priv); 2431 if (destroy) 2432 rdma_destroy_id(&id_priv->id); 2433 kfree(work); 2434 } 2435 2436 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 2437 { 2438 struct rdma_route *route = &id_priv->id.route; 2439 struct cma_work *work; 2440 int ret; 2441 2442 work = kzalloc(sizeof *work, GFP_KERNEL); 2443 if (!work) 2444 return -ENOMEM; 2445 2446 work->id = id_priv; 2447 INIT_WORK(&work->work, cma_work_handler); 2448 work->old_state = RDMA_CM_ROUTE_QUERY; 2449 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2450 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2451 2452 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 2453 if (!route->path_rec) { 2454 ret = -ENOMEM; 2455 goto err1; 2456 } 2457 2458 ret = cma_query_ib_route(id_priv, timeout_ms, work); 2459 if (ret) 2460 goto err2; 2461 2462 return 0; 2463 err2: 2464 kfree(route->path_rec); 2465 route->path_rec = NULL; 2466 err1: 2467 kfree(work); 2468 return ret; 2469 } 2470 2471 int rdma_set_ib_paths(struct rdma_cm_id *id, 2472 struct ib_sa_path_rec *path_rec, int num_paths) 2473 { 2474 struct rdma_id_private *id_priv; 2475 int ret; 2476 2477 id_priv = container_of(id, struct rdma_id_private, id); 2478 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2479 RDMA_CM_ROUTE_RESOLVED)) 2480 return -EINVAL; 2481 2482 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, 2483 GFP_KERNEL); 2484 if (!id->route.path_rec) { 2485 ret = -ENOMEM; 2486 goto err; 2487 } 2488 2489 id->route.num_paths = num_paths; 2490 return 0; 2491 err: 2492 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 2493 return ret; 2494 } 2495 EXPORT_SYMBOL(rdma_set_ib_paths); 2496 2497 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 2498 { 2499 struct cma_work *work; 2500 2501 work = kzalloc(sizeof *work, GFP_KERNEL); 2502 if (!work) 2503 return -ENOMEM; 2504 2505 work->id = id_priv; 2506 INIT_WORK(&work->work, cma_work_handler); 2507 work->old_state = RDMA_CM_ROUTE_QUERY; 2508 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2509 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2510 queue_work(cma_wq, &work->work); 2511 return 0; 2512 } 2513 2514 static int iboe_tos_to_sl(struct net_device *ndev, int tos) 2515 { 2516 /* get service level, SL, from IPv4 type of service, TOS */ 2517 int sl = (tos >> 5) & 0x7; 2518 2519 /* final mappings are done by the vendor specific drivers */ 2520 return sl; 2521 } 2522 2523 static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, 2524 unsigned long supported_gids, 2525 enum ib_gid_type default_gid) 2526 { 2527 if ((network_type == RDMA_NETWORK_IPV4 || 2528 network_type == RDMA_NETWORK_IPV6) && 2529 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) 2530 return IB_GID_TYPE_ROCE_UDP_ENCAP; 2531 2532 return default_gid; 2533 } 2534 2535 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 2536 { 2537 struct rdma_route *route = &id_priv->id.route; 2538 struct rdma_addr *addr = &route->addr; 2539 struct cma_work *work; 2540 int ret; 2541 struct net_device *ndev = NULL; 2542 2543 2544 work = kzalloc(sizeof *work, GFP_KERNEL); 2545 if (!work) 2546 return -ENOMEM; 2547 2548 work->id = id_priv; 2549 INIT_WORK(&work->work, cma_work_handler); 2550 2551 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 2552 if (!route->path_rec) { 2553 ret = -ENOMEM; 2554 goto err1; 2555 } 2556 2557 route->num_paths = 1; 2558 2559 if (addr->dev_addr.bound_dev_if) { 2560 unsigned long supported_gids; 2561 2562 ndev = dev_get_by_index(addr->dev_addr.net, 2563 addr->dev_addr.bound_dev_if); 2564 if (!ndev) { 2565 ret = -ENODEV; 2566 goto err2; 2567 } 2568 2569 route->path_rec->net = ndev->if_vnet; 2570 route->path_rec->ifindex = ndev->if_index; 2571 supported_gids = roce_gid_type_mask_support(id_priv->id.device, 2572 id_priv->id.port_num); 2573 route->path_rec->gid_type = 2574 cma_route_gid_type(addr->dev_addr.network, 2575 supported_gids, 2576 id_priv->gid_type); 2577 } 2578 if (!ndev) { 2579 ret = -ENODEV; 2580 goto err2; 2581 } 2582 2583 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); 2584 2585 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 2586 &route->path_rec->sgid); 2587 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, 2588 &route->path_rec->dgid); 2589 2590 /* Use the hint from IP Stack to select GID Type */ 2591 if (route->path_rec->gid_type < ib_network_to_gid_type(addr->dev_addr.network)) 2592 route->path_rec->gid_type = ib_network_to_gid_type(addr->dev_addr.network); 2593 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) 2594 /* TODO: get the hoplimit from the inet/inet6 device */ 2595 route->path_rec->hop_limit = addr->dev_addr.hoplimit; 2596 else 2597 route->path_rec->hop_limit = 1; 2598 route->path_rec->reversible = 1; 2599 route->path_rec->pkey = cpu_to_be16(0xffff); 2600 route->path_rec->mtu_selector = IB_SA_EQ; 2601 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos); 2602 route->path_rec->traffic_class = id_priv->tos; 2603 route->path_rec->mtu = iboe_get_mtu(ndev->if_mtu); 2604 route->path_rec->rate_selector = IB_SA_EQ; 2605 route->path_rec->rate = iboe_get_rate(ndev); 2606 dev_put(ndev); 2607 route->path_rec->packet_life_time_selector = IB_SA_EQ; 2608 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; 2609 if (!route->path_rec->mtu) { 2610 ret = -EINVAL; 2611 goto err2; 2612 } 2613 2614 work->old_state = RDMA_CM_ROUTE_QUERY; 2615 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2616 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2617 work->event.status = 0; 2618 2619 queue_work(cma_wq, &work->work); 2620 2621 return 0; 2622 2623 err2: 2624 kfree(route->path_rec); 2625 route->path_rec = NULL; 2626 err1: 2627 kfree(work); 2628 return ret; 2629 } 2630 2631 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 2632 { 2633 struct rdma_id_private *id_priv; 2634 int ret; 2635 2636 id_priv = container_of(id, struct rdma_id_private, id); 2637 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 2638 return -EINVAL; 2639 2640 atomic_inc(&id_priv->refcount); 2641 if (rdma_cap_ib_sa(id->device, id->port_num)) 2642 ret = cma_resolve_ib_route(id_priv, timeout_ms); 2643 else if (rdma_protocol_roce(id->device, id->port_num)) 2644 ret = cma_resolve_iboe_route(id_priv); 2645 else if (rdma_protocol_iwarp(id->device, id->port_num)) 2646 ret = cma_resolve_iw_route(id_priv, timeout_ms); 2647 else 2648 ret = -ENOSYS; 2649 2650 if (ret) 2651 goto err; 2652 2653 return 0; 2654 err: 2655 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 2656 cma_deref_id(id_priv); 2657 return ret; 2658 } 2659 EXPORT_SYMBOL(rdma_resolve_route); 2660 2661 static void cma_set_loopback(struct sockaddr *addr) 2662 { 2663 switch (addr->sa_family) { 2664 case AF_INET: 2665 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 2666 break; 2667 case AF_INET6: 2668 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 2669 0, 0, 0, htonl(1)); 2670 break; 2671 default: 2672 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 2673 0, 0, 0, htonl(1)); 2674 break; 2675 } 2676 } 2677 2678 static int cma_bind_loopback(struct rdma_id_private *id_priv) 2679 { 2680 struct cma_device *cma_dev, *cur_dev; 2681 struct ib_port_attr port_attr; 2682 union ib_gid gid; 2683 u16 pkey; 2684 int ret; 2685 u8 p; 2686 2687 cma_dev = NULL; 2688 mutex_lock(&lock); 2689 list_for_each_entry(cur_dev, &dev_list, list) { 2690 if (cma_family(id_priv) == AF_IB && 2691 !rdma_cap_ib_cm(cur_dev->device, 1)) 2692 continue; 2693 2694 if (!cma_dev) 2695 cma_dev = cur_dev; 2696 2697 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 2698 if (!ib_query_port(cur_dev->device, p, &port_attr) && 2699 port_attr.state == IB_PORT_ACTIVE) { 2700 cma_dev = cur_dev; 2701 goto port_found; 2702 } 2703 } 2704 } 2705 2706 if (!cma_dev) { 2707 ret = -ENODEV; 2708 goto out; 2709 } 2710 2711 p = 1; 2712 2713 port_found: 2714 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL); 2715 if (ret) 2716 goto out; 2717 2718 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 2719 if (ret) 2720 goto out; 2721 2722 id_priv->id.route.addr.dev_addr.dev_type = 2723 (rdma_protocol_ib(cma_dev->device, p)) ? 2724 ARPHRD_INFINIBAND : ARPHRD_ETHER; 2725 2726 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2727 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 2728 id_priv->id.port_num = p; 2729 cma_attach_to_dev(id_priv, cma_dev); 2730 cma_set_loopback(cma_src_addr(id_priv)); 2731 out: 2732 mutex_unlock(&lock); 2733 return ret; 2734 } 2735 2736 static void addr_handler(int status, struct sockaddr *src_addr, 2737 struct rdma_dev_addr *dev_addr, void *context) 2738 { 2739 struct rdma_id_private *id_priv = context; 2740 struct rdma_cm_event event; 2741 2742 memset(&event, 0, sizeof event); 2743 mutex_lock(&id_priv->handler_mutex); 2744 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 2745 RDMA_CM_ADDR_RESOLVED)) 2746 goto out; 2747 2748 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); 2749 if (!status && !id_priv->cma_dev) 2750 status = cma_acquire_dev(id_priv, NULL); 2751 2752 if (status) { 2753 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2754 RDMA_CM_ADDR_BOUND)) 2755 goto out; 2756 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2757 event.status = status; 2758 } else 2759 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2760 2761 if (id_priv->id.event_handler(&id_priv->id, &event)) { 2762 cma_exch(id_priv, RDMA_CM_DESTROYING); 2763 mutex_unlock(&id_priv->handler_mutex); 2764 cma_deref_id(id_priv); 2765 rdma_destroy_id(&id_priv->id); 2766 return; 2767 } 2768 out: 2769 mutex_unlock(&id_priv->handler_mutex); 2770 cma_deref_id(id_priv); 2771 } 2772 2773 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 2774 { 2775 struct cma_work *work; 2776 union ib_gid gid; 2777 int ret; 2778 2779 work = kzalloc(sizeof *work, GFP_KERNEL); 2780 if (!work) 2781 return -ENOMEM; 2782 2783 if (!id_priv->cma_dev) { 2784 ret = cma_bind_loopback(id_priv); 2785 if (ret) 2786 goto err; 2787 } 2788 2789 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2790 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 2791 2792 work->id = id_priv; 2793 INIT_WORK(&work->work, cma_work_handler); 2794 work->old_state = RDMA_CM_ADDR_QUERY; 2795 work->new_state = RDMA_CM_ADDR_RESOLVED; 2796 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2797 queue_work(cma_wq, &work->work); 2798 return 0; 2799 err: 2800 kfree(work); 2801 return ret; 2802 } 2803 2804 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) 2805 { 2806 struct cma_work *work; 2807 int ret; 2808 2809 work = kzalloc(sizeof *work, GFP_KERNEL); 2810 if (!work) 2811 return -ENOMEM; 2812 2813 if (!id_priv->cma_dev) { 2814 ret = cma_resolve_ib_dev(id_priv); 2815 if (ret) 2816 goto err; 2817 } 2818 2819 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) 2820 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); 2821 2822 work->id = id_priv; 2823 INIT_WORK(&work->work, cma_work_handler); 2824 work->old_state = RDMA_CM_ADDR_QUERY; 2825 work->new_state = RDMA_CM_ADDR_RESOLVED; 2826 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2827 queue_work(cma_wq, &work->work); 2828 return 0; 2829 err: 2830 kfree(work); 2831 return ret; 2832 } 2833 2834 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2835 struct sockaddr *dst_addr) 2836 { 2837 if (!src_addr || !src_addr->sa_family) { 2838 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2839 src_addr->sa_family = dst_addr->sa_family; 2840 if (dst_addr->sa_family == AF_INET6) { 2841 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2842 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2843 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 2844 if (IN6_IS_SCOPE_LINKLOCAL(&dst_addr6->sin6_addr) || 2845 IN6_IS_ADDR_MC_INTFACELOCAL(&dst_addr6->sin6_addr)) 2846 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; 2847 } else if (dst_addr->sa_family == AF_IB) { 2848 ((struct sockaddr_ib *) src_addr)->sib_pkey = 2849 ((struct sockaddr_ib *) dst_addr)->sib_pkey; 2850 } 2851 } 2852 return rdma_bind_addr(id, src_addr); 2853 } 2854 2855 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2856 struct sockaddr *dst_addr, int timeout_ms) 2857 { 2858 struct rdma_id_private *id_priv; 2859 int ret; 2860 2861 id_priv = container_of(id, struct rdma_id_private, id); 2862 if (id_priv->state == RDMA_CM_IDLE) { 2863 ret = cma_bind_addr(id, src_addr, dst_addr); 2864 if (ret) 2865 return ret; 2866 } 2867 2868 if (cma_family(id_priv) != dst_addr->sa_family) 2869 return -EINVAL; 2870 2871 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) 2872 return -EINVAL; 2873 2874 atomic_inc(&id_priv->refcount); 2875 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); 2876 if (cma_any_addr(dst_addr)) { 2877 ret = cma_resolve_loopback(id_priv); 2878 } else { 2879 if (dst_addr->sa_family == AF_IB) { 2880 ret = cma_resolve_ib_addr(id_priv); 2881 } else { 2882 ret = cma_check_linklocal(&id->route.addr.dev_addr, dst_addr); 2883 if (ret) 2884 goto err; 2885 2886 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), 2887 dst_addr, &id->route.addr.dev_addr, 2888 timeout_ms, addr_handler, id_priv); 2889 } 2890 } 2891 if (ret) 2892 goto err; 2893 2894 return 0; 2895 err: 2896 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 2897 cma_deref_id(id_priv); 2898 return ret; 2899 } 2900 EXPORT_SYMBOL(rdma_resolve_addr); 2901 2902 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) 2903 { 2904 struct rdma_id_private *id_priv; 2905 unsigned long flags; 2906 int ret; 2907 2908 id_priv = container_of(id, struct rdma_id_private, id); 2909 spin_lock_irqsave(&id_priv->lock, flags); 2910 if (reuse || id_priv->state == RDMA_CM_IDLE) { 2911 id_priv->reuseaddr = reuse; 2912 ret = 0; 2913 } else { 2914 ret = -EINVAL; 2915 } 2916 spin_unlock_irqrestore(&id_priv->lock, flags); 2917 return ret; 2918 } 2919 EXPORT_SYMBOL(rdma_set_reuseaddr); 2920 2921 int rdma_set_afonly(struct rdma_cm_id *id, int afonly) 2922 { 2923 struct rdma_id_private *id_priv; 2924 unsigned long flags; 2925 int ret; 2926 2927 id_priv = container_of(id, struct rdma_id_private, id); 2928 spin_lock_irqsave(&id_priv->lock, flags); 2929 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { 2930 id_priv->options |= (1 << CMA_OPTION_AFONLY); 2931 id_priv->afonly = afonly; 2932 ret = 0; 2933 } else { 2934 ret = -EINVAL; 2935 } 2936 spin_unlock_irqrestore(&id_priv->lock, flags); 2937 return ret; 2938 } 2939 EXPORT_SYMBOL(rdma_set_afonly); 2940 2941 static void cma_bind_port(struct rdma_bind_list *bind_list, 2942 struct rdma_id_private *id_priv) 2943 { 2944 struct sockaddr *addr; 2945 struct sockaddr_ib *sib; 2946 u64 sid, mask; 2947 __be16 port; 2948 2949 addr = cma_src_addr(id_priv); 2950 port = htons(bind_list->port); 2951 2952 switch (addr->sa_family) { 2953 case AF_INET: 2954 ((struct sockaddr_in *) addr)->sin_port = port; 2955 break; 2956 case AF_INET6: 2957 ((struct sockaddr_in6 *) addr)->sin6_port = port; 2958 break; 2959 case AF_IB: 2960 sib = (struct sockaddr_ib *) addr; 2961 sid = be64_to_cpu(sib->sib_sid); 2962 mask = be64_to_cpu(sib->sib_sid_mask); 2963 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); 2964 sib->sib_sid_mask = cpu_to_be64(~0ULL); 2965 break; 2966 } 2967 id_priv->bind_list = bind_list; 2968 hlist_add_head(&id_priv->node, &bind_list->owners); 2969 } 2970 2971 static int cma_alloc_port(enum rdma_port_space ps, 2972 struct rdma_id_private *id_priv, unsigned short snum) 2973 { 2974 struct rdma_bind_list *bind_list; 2975 int ret; 2976 2977 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2978 if (!bind_list) 2979 return -ENOMEM; 2980 2981 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, 2982 snum); 2983 if (ret < 0) 2984 goto err; 2985 2986 bind_list->ps = ps; 2987 bind_list->port = (unsigned short)ret; 2988 cma_bind_port(bind_list, id_priv); 2989 return 0; 2990 err: 2991 kfree(bind_list); 2992 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; 2993 } 2994 2995 static int cma_alloc_any_port(enum rdma_port_space ps, 2996 struct rdma_id_private *id_priv) 2997 { 2998 static unsigned int last_used_port; 2999 int low, high, remaining; 3000 unsigned int rover; 3001 struct vnet *net = id_priv->id.route.addr.dev_addr.net; 3002 u32 rand; 3003 3004 inet_get_local_port_range(net, &low, &high); 3005 remaining = (high - low) + 1; 3006 get_random_bytes(&rand, sizeof(rand)); 3007 rover = rand % remaining + low; 3008 retry: 3009 if (last_used_port != rover && 3010 !cma_ps_find(net, ps, (unsigned short)rover)) { 3011 int ret = cma_alloc_port(ps, id_priv, rover); 3012 /* 3013 * Remember previously used port number in order to avoid 3014 * re-using same port immediately after it is closed. 3015 */ 3016 if (!ret) 3017 last_used_port = rover; 3018 if (ret != -EADDRNOTAVAIL) 3019 return ret; 3020 } 3021 if (--remaining) { 3022 rover++; 3023 if ((rover < low) || (rover > high)) 3024 rover = low; 3025 goto retry; 3026 } 3027 return -EADDRNOTAVAIL; 3028 } 3029 3030 /* 3031 * Check that the requested port is available. This is called when trying to 3032 * bind to a specific port, or when trying to listen on a bound port. In 3033 * the latter case, the provided id_priv may already be on the bind_list, but 3034 * we still need to check that it's okay to start listening. 3035 */ 3036 static int cma_check_port(struct rdma_bind_list *bind_list, 3037 struct rdma_id_private *id_priv, uint8_t reuseaddr) 3038 { 3039 struct rdma_id_private *cur_id; 3040 struct sockaddr *addr, *cur_addr; 3041 3042 addr = cma_src_addr(id_priv); 3043 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3044 if (id_priv == cur_id) 3045 continue; 3046 3047 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && 3048 cur_id->reuseaddr) 3049 continue; 3050 3051 cur_addr = cma_src_addr(cur_id); 3052 if (id_priv->afonly && cur_id->afonly && 3053 (addr->sa_family != cur_addr->sa_family)) 3054 continue; 3055 3056 if (cma_any_addr(addr) || cma_any_addr(cur_addr)) 3057 return -EADDRNOTAVAIL; 3058 3059 if (!cma_addr_cmp(addr, cur_addr)) 3060 return -EADDRINUSE; 3061 } 3062 return 0; 3063 } 3064 3065 static int cma_use_port(enum rdma_port_space ps, 3066 struct rdma_id_private *id_priv) 3067 { 3068 struct rdma_bind_list *bind_list; 3069 unsigned short snum; 3070 int ret; 3071 3072 snum = ntohs(cma_port(cma_src_addr(id_priv))); 3073 if (snum < IPPORT_RESERVED && 3074 priv_check(curthread, PRIV_NETINET_BINDANY) != 0) 3075 return -EACCES; 3076 3077 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); 3078 if (!bind_list) { 3079 ret = cma_alloc_port(ps, id_priv, snum); 3080 } else { 3081 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); 3082 if (!ret) 3083 cma_bind_port(bind_list, id_priv); 3084 } 3085 return ret; 3086 } 3087 3088 static int cma_bind_listen(struct rdma_id_private *id_priv) 3089 { 3090 struct rdma_bind_list *bind_list = id_priv->bind_list; 3091 int ret = 0; 3092 3093 mutex_lock(&lock); 3094 if (bind_list->owners.first->next) 3095 ret = cma_check_port(bind_list, id_priv, 0); 3096 mutex_unlock(&lock); 3097 return ret; 3098 } 3099 3100 static enum rdma_port_space cma_select_inet_ps( 3101 struct rdma_id_private *id_priv) 3102 { 3103 switch (id_priv->id.ps) { 3104 case RDMA_PS_TCP: 3105 case RDMA_PS_UDP: 3106 case RDMA_PS_IPOIB: 3107 case RDMA_PS_IB: 3108 return id_priv->id.ps; 3109 default: 3110 3111 return 0; 3112 } 3113 } 3114 3115 static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv) 3116 { 3117 enum rdma_port_space ps = 0; 3118 struct sockaddr_ib *sib; 3119 u64 sid_ps, mask, sid; 3120 3121 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 3122 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; 3123 sid = be64_to_cpu(sib->sib_sid) & mask; 3124 3125 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { 3126 sid_ps = RDMA_IB_IP_PS_IB; 3127 ps = RDMA_PS_IB; 3128 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && 3129 (sid == (RDMA_IB_IP_PS_TCP & mask))) { 3130 sid_ps = RDMA_IB_IP_PS_TCP; 3131 ps = RDMA_PS_TCP; 3132 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && 3133 (sid == (RDMA_IB_IP_PS_UDP & mask))) { 3134 sid_ps = RDMA_IB_IP_PS_UDP; 3135 ps = RDMA_PS_UDP; 3136 } 3137 3138 if (ps) { 3139 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); 3140 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | 3141 be64_to_cpu(sib->sib_sid_mask)); 3142 } 3143 return ps; 3144 } 3145 3146 static int cma_get_port(struct rdma_id_private *id_priv) 3147 { 3148 enum rdma_port_space ps; 3149 int ret; 3150 3151 if (cma_family(id_priv) != AF_IB) 3152 ps = cma_select_inet_ps(id_priv); 3153 else 3154 ps = cma_select_ib_ps(id_priv); 3155 if (!ps) 3156 return -EPROTONOSUPPORT; 3157 3158 mutex_lock(&lock); 3159 if (cma_any_port(cma_src_addr(id_priv))) 3160 ret = cma_alloc_any_port(ps, id_priv); 3161 else 3162 ret = cma_use_port(ps, id_priv); 3163 mutex_unlock(&lock); 3164 3165 return ret; 3166 } 3167 3168 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 3169 struct sockaddr *addr) 3170 { 3171 #ifdef INET6 3172 struct sockaddr_in6 sin6; 3173 3174 if (addr->sa_family != AF_INET6) 3175 return 0; 3176 3177 sin6 = *(struct sockaddr_in6 *)addr; 3178 3179 if (IN6_IS_SCOPE_LINKLOCAL(&sin6.sin6_addr) || 3180 IN6_IS_ADDR_MC_INTFACELOCAL(&sin6.sin6_addr)) { 3181 bool failure; 3182 3183 CURVNET_SET_QUIET(dev_addr->net); 3184 failure = sa6_recoverscope(&sin6) || sin6.sin6_scope_id == 0; 3185 CURVNET_RESTORE(); 3186 3187 /* check if IPv6 scope ID is not set */ 3188 if (failure) 3189 return -EINVAL; 3190 dev_addr->bound_dev_if = sin6.sin6_scope_id; 3191 } 3192 #endif 3193 return 0; 3194 } 3195 3196 int rdma_listen(struct rdma_cm_id *id, int backlog) 3197 { 3198 struct rdma_id_private *id_priv; 3199 int ret; 3200 3201 id_priv = container_of(id, struct rdma_id_private, id); 3202 if (id_priv->state == RDMA_CM_IDLE) { 3203 id->route.addr.src_addr.ss_family = AF_INET; 3204 ret = rdma_bind_addr(id, cma_src_addr(id_priv)); 3205 if (ret) 3206 return ret; 3207 } 3208 3209 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) 3210 return -EINVAL; 3211 3212 if (id_priv->reuseaddr) { 3213 ret = cma_bind_listen(id_priv); 3214 if (ret) 3215 goto err; 3216 } 3217 3218 id_priv->backlog = backlog; 3219 if (id->device) { 3220 if (rdma_cap_ib_cm(id->device, 1)) { 3221 ret = cma_ib_listen(id_priv); 3222 if (ret) 3223 goto err; 3224 } else if (rdma_cap_iw_cm(id->device, 1)) { 3225 ret = cma_iw_listen(id_priv, backlog); 3226 if (ret) 3227 goto err; 3228 } else { 3229 ret = -ENOSYS; 3230 goto err; 3231 } 3232 } else 3233 cma_listen_on_all(id_priv); 3234 3235 return 0; 3236 err: 3237 id_priv->backlog = 0; 3238 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 3239 return ret; 3240 } 3241 EXPORT_SYMBOL(rdma_listen); 3242 3243 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 3244 { 3245 struct rdma_id_private *id_priv; 3246 int ret; 3247 3248 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && 3249 addr->sa_family != AF_IB) 3250 return -EAFNOSUPPORT; 3251 3252 id_priv = container_of(id, struct rdma_id_private, id); 3253 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 3254 return -EINVAL; 3255 3256 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 3257 if (ret) 3258 goto err1; 3259 3260 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); 3261 if (!cma_any_addr(addr)) { 3262 ret = cma_translate_addr(addr, &id->route.addr.dev_addr); 3263 if (ret) 3264 goto err1; 3265 3266 ret = cma_acquire_dev(id_priv, NULL); 3267 if (ret) 3268 goto err1; 3269 } 3270 3271 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 3272 if (addr->sa_family == AF_INET) 3273 id_priv->afonly = 1; 3274 #ifdef INET6 3275 else if (addr->sa_family == AF_INET6) { 3276 CURVNET_SET_QUIET(id_priv->id.route.addr.dev_addr.net); 3277 id_priv->afonly = V_ip6_v6only; 3278 CURVNET_RESTORE(); 3279 } 3280 #endif 3281 } 3282 ret = cma_get_port(id_priv); 3283 if (ret) 3284 goto err2; 3285 3286 return 0; 3287 err2: 3288 if (id_priv->cma_dev) 3289 cma_release_dev(id_priv); 3290 err1: 3291 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 3292 return ret; 3293 } 3294 EXPORT_SYMBOL(rdma_bind_addr); 3295 3296 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) 3297 { 3298 struct cma_hdr *cma_hdr; 3299 3300 cma_hdr = hdr; 3301 cma_hdr->cma_version = CMA_VERSION; 3302 if (cma_family(id_priv) == AF_INET) { 3303 struct sockaddr_in *src4, *dst4; 3304 3305 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); 3306 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); 3307 3308 cma_set_ip_ver(cma_hdr, 4); 3309 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 3310 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 3311 cma_hdr->port = src4->sin_port; 3312 } else if (cma_family(id_priv) == AF_INET6) { 3313 struct sockaddr_in6 *src6, *dst6; 3314 3315 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 3316 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); 3317 3318 cma_set_ip_ver(cma_hdr, 6); 3319 cma_hdr->src_addr.ip6 = src6->sin6_addr; 3320 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 3321 cma_hdr->port = src6->sin6_port; 3322 cma_ip6_clear_scope_id(&cma_hdr->src_addr.ip6); 3323 cma_ip6_clear_scope_id(&cma_hdr->dst_addr.ip6); 3324 } 3325 return 0; 3326 } 3327 3328 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 3329 struct ib_cm_event *ib_event) 3330 { 3331 struct rdma_id_private *id_priv = cm_id->context; 3332 struct rdma_cm_event event; 3333 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 3334 int ret = 0; 3335 3336 mutex_lock(&id_priv->handler_mutex); 3337 if (id_priv->state != RDMA_CM_CONNECT) 3338 goto out; 3339 3340 memset(&event, 0, sizeof event); 3341 switch (ib_event->event) { 3342 case IB_CM_SIDR_REQ_ERROR: 3343 event.event = RDMA_CM_EVENT_UNREACHABLE; 3344 event.status = -ETIMEDOUT; 3345 break; 3346 case IB_CM_SIDR_REP_RECEIVED: 3347 event.param.ud.private_data = ib_event->private_data; 3348 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 3349 if (rep->status != IB_SIDR_SUCCESS) { 3350 event.event = RDMA_CM_EVENT_UNREACHABLE; 3351 event.status = ib_event->param.sidr_rep_rcvd.status; 3352 break; 3353 } 3354 ret = cma_set_qkey(id_priv, rep->qkey); 3355 if (ret) { 3356 event.event = RDMA_CM_EVENT_ADDR_ERROR; 3357 event.status = ret; 3358 break; 3359 } 3360 ret = ib_init_ah_from_path(id_priv->id.device, 3361 id_priv->id.port_num, 3362 id_priv->id.route.path_rec, 3363 &event.param.ud.ah_attr); 3364 if (ret) { 3365 event.event = RDMA_CM_EVENT_ADDR_ERROR; 3366 event.status = ret; 3367 break; 3368 } 3369 event.param.ud.qp_num = rep->qpn; 3370 event.param.ud.qkey = rep->qkey; 3371 event.event = RDMA_CM_EVENT_ESTABLISHED; 3372 event.status = 0; 3373 break; 3374 default: 3375 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 3376 ib_event->event); 3377 goto out; 3378 } 3379 3380 ret = id_priv->id.event_handler(&id_priv->id, &event); 3381 if (ret) { 3382 /* Destroy the CM ID by returning a non-zero value. */ 3383 id_priv->cm_id.ib = NULL; 3384 cma_exch(id_priv, RDMA_CM_DESTROYING); 3385 mutex_unlock(&id_priv->handler_mutex); 3386 rdma_destroy_id(&id_priv->id); 3387 return ret; 3388 } 3389 out: 3390 mutex_unlock(&id_priv->handler_mutex); 3391 return ret; 3392 } 3393 3394 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 3395 struct rdma_conn_param *conn_param) 3396 { 3397 struct ib_cm_sidr_req_param req; 3398 struct ib_cm_id *id; 3399 void *private_data; 3400 int offset, ret; 3401 3402 memset(&req, 0, sizeof req); 3403 offset = cma_user_data_offset(id_priv); 3404 req.private_data_len = offset + conn_param->private_data_len; 3405 if (req.private_data_len < conn_param->private_data_len) 3406 return -EINVAL; 3407 3408 if (req.private_data_len) { 3409 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3410 if (!private_data) 3411 return -ENOMEM; 3412 } else { 3413 private_data = NULL; 3414 } 3415 3416 if (conn_param->private_data && conn_param->private_data_len) 3417 memcpy((char *)private_data + offset, conn_param->private_data, 3418 conn_param->private_data_len); 3419 3420 if (private_data) { 3421 ret = cma_format_hdr(private_data, id_priv); 3422 if (ret) 3423 goto out; 3424 req.private_data = private_data; 3425 } 3426 3427 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 3428 id_priv); 3429 if (IS_ERR(id)) { 3430 ret = PTR_ERR(id); 3431 goto out; 3432 } 3433 id_priv->cm_id.ib = id; 3434 3435 req.path = id_priv->id.route.path_rec; 3436 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3437 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 3438 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3439 3440 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 3441 if (ret) { 3442 ib_destroy_cm_id(id_priv->cm_id.ib); 3443 id_priv->cm_id.ib = NULL; 3444 } 3445 out: 3446 kfree(private_data); 3447 return ret; 3448 } 3449 3450 static int cma_connect_ib(struct rdma_id_private *id_priv, 3451 struct rdma_conn_param *conn_param) 3452 { 3453 struct ib_cm_req_param req; 3454 struct rdma_route *route; 3455 void *private_data; 3456 struct ib_cm_id *id; 3457 int offset, ret; 3458 3459 memset(&req, 0, sizeof req); 3460 offset = cma_user_data_offset(id_priv); 3461 req.private_data_len = offset + conn_param->private_data_len; 3462 if (req.private_data_len < conn_param->private_data_len) 3463 return -EINVAL; 3464 3465 if (req.private_data_len) { 3466 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3467 if (!private_data) 3468 return -ENOMEM; 3469 } else { 3470 private_data = NULL; 3471 } 3472 3473 if (conn_param->private_data && conn_param->private_data_len) 3474 memcpy((char *)private_data + offset, conn_param->private_data, 3475 conn_param->private_data_len); 3476 3477 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); 3478 if (IS_ERR(id)) { 3479 ret = PTR_ERR(id); 3480 goto out; 3481 } 3482 id_priv->cm_id.ib = id; 3483 3484 route = &id_priv->id.route; 3485 if (private_data) { 3486 ret = cma_format_hdr(private_data, id_priv); 3487 if (ret) 3488 goto out; 3489 req.private_data = private_data; 3490 } 3491 3492 req.primary_path = &route->path_rec[0]; 3493 if (route->num_paths == 2) 3494 req.alternate_path = &route->path_rec[1]; 3495 3496 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3497 req.qp_num = id_priv->qp_num; 3498 req.qp_type = id_priv->id.qp_type; 3499 req.starting_psn = id_priv->seq_num; 3500 req.responder_resources = conn_param->responder_resources; 3501 req.initiator_depth = conn_param->initiator_depth; 3502 req.flow_control = conn_param->flow_control; 3503 req.retry_count = min_t(u8, 7, conn_param->retry_count); 3504 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3505 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3506 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3507 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3508 req.srq = id_priv->srq ? 1 : 0; 3509 3510 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 3511 out: 3512 if (ret && !IS_ERR(id)) { 3513 ib_destroy_cm_id(id); 3514 id_priv->cm_id.ib = NULL; 3515 } 3516 3517 kfree(private_data); 3518 return ret; 3519 } 3520 3521 static int cma_connect_iw(struct rdma_id_private *id_priv, 3522 struct rdma_conn_param *conn_param) 3523 { 3524 struct iw_cm_id *cm_id; 3525 int ret; 3526 struct iw_cm_conn_param iw_param; 3527 3528 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 3529 if (IS_ERR(cm_id)) 3530 return PTR_ERR(cm_id); 3531 3532 cm_id->tos = id_priv->tos; 3533 id_priv->cm_id.iw = cm_id; 3534 3535 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), 3536 rdma_addr_size(cma_src_addr(id_priv))); 3537 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), 3538 rdma_addr_size(cma_dst_addr(id_priv))); 3539 3540 ret = cma_modify_qp_rtr(id_priv, conn_param); 3541 if (ret) 3542 goto out; 3543 3544 if (conn_param) { 3545 iw_param.ord = conn_param->initiator_depth; 3546 iw_param.ird = conn_param->responder_resources; 3547 iw_param.private_data = conn_param->private_data; 3548 iw_param.private_data_len = conn_param->private_data_len; 3549 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; 3550 } else { 3551 memset(&iw_param, 0, sizeof iw_param); 3552 iw_param.qpn = id_priv->qp_num; 3553 } 3554 ret = iw_cm_connect(cm_id, &iw_param); 3555 out: 3556 if (ret) { 3557 iw_destroy_cm_id(cm_id); 3558 id_priv->cm_id.iw = NULL; 3559 } 3560 return ret; 3561 } 3562 3563 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3564 { 3565 struct rdma_id_private *id_priv; 3566 int ret; 3567 3568 id_priv = container_of(id, struct rdma_id_private, id); 3569 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 3570 return -EINVAL; 3571 3572 if (!id->qp) { 3573 id_priv->qp_num = conn_param->qp_num; 3574 id_priv->srq = conn_param->srq; 3575 } 3576 3577 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3578 if (id->qp_type == IB_QPT_UD) 3579 ret = cma_resolve_ib_udp(id_priv, conn_param); 3580 else 3581 ret = cma_connect_ib(id_priv, conn_param); 3582 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3583 ret = cma_connect_iw(id_priv, conn_param); 3584 else 3585 ret = -ENOSYS; 3586 if (ret) 3587 goto err; 3588 3589 return 0; 3590 err: 3591 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 3592 return ret; 3593 } 3594 EXPORT_SYMBOL(rdma_connect); 3595 3596 static int cma_accept_ib(struct rdma_id_private *id_priv, 3597 struct rdma_conn_param *conn_param) 3598 { 3599 struct ib_cm_rep_param rep; 3600 int ret; 3601 3602 ret = cma_modify_qp_rtr(id_priv, conn_param); 3603 if (ret) 3604 goto out; 3605 3606 ret = cma_modify_qp_rts(id_priv, conn_param); 3607 if (ret) 3608 goto out; 3609 3610 memset(&rep, 0, sizeof rep); 3611 rep.qp_num = id_priv->qp_num; 3612 rep.starting_psn = id_priv->seq_num; 3613 rep.private_data = conn_param->private_data; 3614 rep.private_data_len = conn_param->private_data_len; 3615 rep.responder_resources = conn_param->responder_resources; 3616 rep.initiator_depth = conn_param->initiator_depth; 3617 rep.failover_accepted = 0; 3618 rep.flow_control = conn_param->flow_control; 3619 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3620 rep.srq = id_priv->srq ? 1 : 0; 3621 3622 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 3623 out: 3624 return ret; 3625 } 3626 3627 static int cma_accept_iw(struct rdma_id_private *id_priv, 3628 struct rdma_conn_param *conn_param) 3629 { 3630 struct iw_cm_conn_param iw_param; 3631 int ret; 3632 3633 ret = cma_modify_qp_rtr(id_priv, conn_param); 3634 if (ret) 3635 return ret; 3636 3637 iw_param.ord = conn_param->initiator_depth; 3638 iw_param.ird = conn_param->responder_resources; 3639 iw_param.private_data = conn_param->private_data; 3640 iw_param.private_data_len = conn_param->private_data_len; 3641 if (id_priv->id.qp) { 3642 iw_param.qpn = id_priv->qp_num; 3643 } else 3644 iw_param.qpn = conn_param->qp_num; 3645 3646 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 3647 } 3648 3649 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 3650 enum ib_cm_sidr_status status, u32 qkey, 3651 const void *private_data, int private_data_len) 3652 { 3653 struct ib_cm_sidr_rep_param rep; 3654 int ret; 3655 3656 memset(&rep, 0, sizeof rep); 3657 rep.status = status; 3658 if (status == IB_SIDR_SUCCESS) { 3659 ret = cma_set_qkey(id_priv, qkey); 3660 if (ret) 3661 return ret; 3662 rep.qp_num = id_priv->qp_num; 3663 rep.qkey = id_priv->qkey; 3664 } 3665 rep.private_data = private_data; 3666 rep.private_data_len = private_data_len; 3667 3668 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 3669 } 3670 3671 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3672 { 3673 struct rdma_id_private *id_priv; 3674 int ret; 3675 3676 id_priv = container_of(id, struct rdma_id_private, id); 3677 3678 id_priv->owner = task_pid_nr(current); 3679 3680 if (!cma_comp(id_priv, RDMA_CM_CONNECT)) 3681 return -EINVAL; 3682 3683 if (!id->qp && conn_param) { 3684 id_priv->qp_num = conn_param->qp_num; 3685 id_priv->srq = conn_param->srq; 3686 } 3687 3688 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3689 if (id->qp_type == IB_QPT_UD) { 3690 if (conn_param) 3691 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3692 conn_param->qkey, 3693 conn_param->private_data, 3694 conn_param->private_data_len); 3695 else 3696 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3697 0, NULL, 0); 3698 } else { 3699 if (conn_param) 3700 ret = cma_accept_ib(id_priv, conn_param); 3701 else 3702 ret = cma_rep_recv(id_priv); 3703 } 3704 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3705 ret = cma_accept_iw(id_priv, conn_param); 3706 else 3707 ret = -ENOSYS; 3708 3709 if (ret) 3710 goto reject; 3711 3712 return 0; 3713 reject: 3714 cma_modify_qp_err(id_priv); 3715 rdma_reject(id, NULL, 0); 3716 return ret; 3717 } 3718 EXPORT_SYMBOL(rdma_accept); 3719 3720 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 3721 { 3722 struct rdma_id_private *id_priv; 3723 int ret; 3724 3725 id_priv = container_of(id, struct rdma_id_private, id); 3726 if (!id_priv->cm_id.ib) 3727 return -EINVAL; 3728 3729 switch (id->device->node_type) { 3730 case RDMA_NODE_IB_CA: 3731 ret = ib_cm_notify(id_priv->cm_id.ib, event); 3732 break; 3733 default: 3734 ret = 0; 3735 break; 3736 } 3737 return ret; 3738 } 3739 EXPORT_SYMBOL(rdma_notify); 3740 3741 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 3742 u8 private_data_len) 3743 { 3744 struct rdma_id_private *id_priv; 3745 int ret; 3746 3747 id_priv = container_of(id, struct rdma_id_private, id); 3748 if (!id_priv->cm_id.ib) 3749 return -EINVAL; 3750 3751 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3752 if (id->qp_type == IB_QPT_UD) 3753 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, 3754 private_data, private_data_len); 3755 else 3756 ret = ib_send_cm_rej(id_priv->cm_id.ib, 3757 IB_CM_REJ_CONSUMER_DEFINED, NULL, 3758 0, private_data, private_data_len); 3759 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3760 ret = iw_cm_reject(id_priv->cm_id.iw, 3761 private_data, private_data_len); 3762 } else 3763 ret = -ENOSYS; 3764 3765 return ret; 3766 } 3767 EXPORT_SYMBOL(rdma_reject); 3768 3769 int rdma_disconnect(struct rdma_cm_id *id) 3770 { 3771 struct rdma_id_private *id_priv; 3772 int ret; 3773 3774 id_priv = container_of(id, struct rdma_id_private, id); 3775 if (!id_priv->cm_id.ib) 3776 return -EINVAL; 3777 3778 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3779 ret = cma_modify_qp_err(id_priv); 3780 if (ret) 3781 goto out; 3782 /* Initiate or respond to a disconnect. */ 3783 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 3784 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 3785 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3786 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 3787 } else 3788 ret = -EINVAL; 3789 3790 out: 3791 return ret; 3792 } 3793 EXPORT_SYMBOL(rdma_disconnect); 3794 3795 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 3796 { 3797 struct rdma_id_private *id_priv; 3798 struct cma_multicast *mc = multicast->context; 3799 struct rdma_cm_event event; 3800 int ret = 0; 3801 3802 id_priv = mc->id_priv; 3803 mutex_lock(&id_priv->handler_mutex); 3804 if (id_priv->state != RDMA_CM_ADDR_BOUND && 3805 id_priv->state != RDMA_CM_ADDR_RESOLVED) 3806 goto out; 3807 3808 if (!status) 3809 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 3810 mutex_lock(&id_priv->qp_mutex); 3811 if (!status && id_priv->id.qp) 3812 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 3813 be16_to_cpu(multicast->rec.mlid)); 3814 mutex_unlock(&id_priv->qp_mutex); 3815 3816 memset(&event, 0, sizeof event); 3817 event.status = status; 3818 event.param.ud.private_data = mc->context; 3819 if (!status) { 3820 struct rdma_dev_addr *dev_addr = 3821 &id_priv->id.route.addr.dev_addr; 3822 struct net_device *ndev = 3823 dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 3824 enum ib_gid_type gid_type = 3825 id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 3826 rdma_start_port(id_priv->cma_dev->device)]; 3827 3828 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 3829 ret = ib_init_ah_from_mcmember(id_priv->id.device, 3830 id_priv->id.port_num, 3831 &multicast->rec, 3832 ndev, gid_type, 3833 &event.param.ud.ah_attr); 3834 if (ret) 3835 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 3836 3837 event.param.ud.qp_num = 0xFFFFFF; 3838 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 3839 if (ndev) 3840 dev_put(ndev); 3841 } else 3842 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 3843 3844 ret = id_priv->id.event_handler(&id_priv->id, &event); 3845 if (ret) { 3846 cma_exch(id_priv, RDMA_CM_DESTROYING); 3847 mutex_unlock(&id_priv->handler_mutex); 3848 rdma_destroy_id(&id_priv->id); 3849 return 0; 3850 } 3851 3852 out: 3853 mutex_unlock(&id_priv->handler_mutex); 3854 return 0; 3855 } 3856 3857 static void cma_set_mgid(struct rdma_id_private *id_priv, 3858 struct sockaddr *addr, union ib_gid *mgid) 3859 { 3860 unsigned char mc_map[MAX_ADDR_LEN]; 3861 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3862 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 3863 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 3864 3865 if (cma_any_addr(addr)) { 3866 memset(mgid, 0, sizeof *mgid); 3867 } else if ((addr->sa_family == AF_INET6) && 3868 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 3869 0xFF10A01B)) { 3870 /* IPv6 address is an SA assigned MGID. */ 3871 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3872 } else if (addr->sa_family == AF_IB) { 3873 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); 3874 } else if (addr->sa_family == AF_INET6) { 3875 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 3876 if (id_priv->id.ps == RDMA_PS_UDP) 3877 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3878 *mgid = *(union ib_gid *) (mc_map + 4); 3879 } else { 3880 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 3881 if (id_priv->id.ps == RDMA_PS_UDP) 3882 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3883 *mgid = *(union ib_gid *) (mc_map + 4); 3884 } 3885 } 3886 3887 static void cma_query_sa_classport_info_cb(int status, 3888 struct ib_class_port_info *rec, 3889 void *context) 3890 { 3891 struct class_port_info_context *cb_ctx = context; 3892 3893 WARN_ON(!context); 3894 3895 if (status || !rec) { 3896 pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n", 3897 cb_ctx->device->name, cb_ctx->port_num, status); 3898 goto out; 3899 } 3900 3901 memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info)); 3902 3903 out: 3904 complete(&cb_ctx->done); 3905 } 3906 3907 static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num, 3908 struct ib_class_port_info *class_port_info) 3909 { 3910 struct class_port_info_context *cb_ctx; 3911 int ret; 3912 3913 cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL); 3914 if (!cb_ctx) 3915 return -ENOMEM; 3916 3917 cb_ctx->device = device; 3918 cb_ctx->class_port_info = class_port_info; 3919 cb_ctx->port_num = port_num; 3920 init_completion(&cb_ctx->done); 3921 3922 ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num, 3923 CMA_QUERY_CLASSPORT_INFO_TIMEOUT, 3924 GFP_KERNEL, cma_query_sa_classport_info_cb, 3925 cb_ctx, &cb_ctx->sa_query); 3926 if (ret < 0) { 3927 pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n", 3928 device->name, port_num, ret); 3929 goto out; 3930 } 3931 3932 wait_for_completion(&cb_ctx->done); 3933 3934 out: 3935 kfree(cb_ctx); 3936 return ret; 3937 } 3938 3939 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 3940 struct cma_multicast *mc) 3941 { 3942 struct ib_sa_mcmember_rec rec; 3943 struct ib_class_port_info class_port_info; 3944 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3945 ib_sa_comp_mask comp_mask; 3946 int ret; 3947 3948 ib_addr_get_mgid(dev_addr, &rec.mgid); 3949 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 3950 &rec.mgid, &rec); 3951 if (ret) 3952 return ret; 3953 3954 ret = cma_set_qkey(id_priv, 0); 3955 if (ret) 3956 return ret; 3957 3958 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 3959 rec.qkey = cpu_to_be32(id_priv->qkey); 3960 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 3961 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 3962 rec.join_state = mc->join_state; 3963 3964 if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) { 3965 ret = cma_query_sa_classport_info(id_priv->id.device, 3966 id_priv->id.port_num, 3967 &class_port_info); 3968 3969 if (ret) 3970 return ret; 3971 3972 if (!(ib_get_cpi_capmask2(&class_port_info) & 3973 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) { 3974 pr_warn("RDMA CM: %s port %u Unable to multicast join\n" 3975 "RDMA CM: SM doesn't support Send Only Full Member option\n", 3976 id_priv->id.device->name, id_priv->id.port_num); 3977 return -EOPNOTSUPP; 3978 } 3979 } 3980 3981 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 3982 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 3983 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 3984 IB_SA_MCMEMBER_REC_FLOW_LABEL | 3985 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 3986 3987 if (id_priv->id.ps == RDMA_PS_IPOIB) 3988 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 3989 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 3990 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 3991 IB_SA_MCMEMBER_REC_MTU | 3992 IB_SA_MCMEMBER_REC_HOP_LIMIT; 3993 3994 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 3995 id_priv->id.port_num, &rec, 3996 comp_mask, GFP_KERNEL, 3997 cma_ib_mc_handler, mc); 3998 return PTR_ERR_OR_ZERO(mc->multicast.ib); 3999 } 4000 4001 static void iboe_mcast_work_handler(struct work_struct *work) 4002 { 4003 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); 4004 struct cma_multicast *mc = mw->mc; 4005 struct ib_sa_multicast *m = mc->multicast.ib; 4006 4007 mc->multicast.ib->context = mc; 4008 cma_ib_mc_handler(0, m); 4009 kref_put(&mc->mcref, release_mc); 4010 kfree(mw); 4011 } 4012 4013 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, 4014 enum ib_gid_type gid_type) 4015 { 4016 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 4017 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 4018 4019 if (cma_any_addr(addr)) { 4020 memset(mgid, 0, sizeof *mgid); 4021 } else if (addr->sa_family == AF_INET6) { 4022 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 4023 } else { 4024 mgid->raw[0] = 4025 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff; 4026 mgid->raw[1] = 4027 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e; 4028 mgid->raw[2] = 0; 4029 mgid->raw[3] = 0; 4030 mgid->raw[4] = 0; 4031 mgid->raw[5] = 0; 4032 mgid->raw[6] = 0; 4033 mgid->raw[7] = 0; 4034 mgid->raw[8] = 0; 4035 mgid->raw[9] = 0; 4036 mgid->raw[10] = 0xff; 4037 mgid->raw[11] = 0xff; 4038 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 4039 } 4040 } 4041 4042 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 4043 struct cma_multicast *mc) 4044 { 4045 struct iboe_mcast_work *work; 4046 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4047 int err = 0; 4048 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 4049 struct net_device *ndev = NULL; 4050 enum ib_gid_type gid_type; 4051 bool send_only; 4052 4053 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 4054 4055 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 4056 return -EINVAL; 4057 4058 work = kzalloc(sizeof *work, GFP_KERNEL); 4059 if (!work) 4060 return -ENOMEM; 4061 4062 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); 4063 if (!mc->multicast.ib) { 4064 err = -ENOMEM; 4065 goto out1; 4066 } 4067 4068 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 4069 rdma_start_port(id_priv->cma_dev->device)]; 4070 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type); 4071 4072 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); 4073 if (id_priv->id.ps == RDMA_PS_UDP) 4074 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 4075 4076 if (dev_addr->bound_dev_if) 4077 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 4078 if (!ndev) { 4079 err = -ENODEV; 4080 goto out2; 4081 } 4082 mc->multicast.ib->rec.rate = iboe_get_rate(ndev); 4083 mc->multicast.ib->rec.hop_limit = 1; 4084 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->if_mtu); 4085 4086 if (addr->sa_family == AF_INET || addr->sa_family == AF_INET6) { 4087 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 4088 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; 4089 if (!send_only) { 4090 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, 4091 true); 4092 if (!err) 4093 mc->igmp_joined = true; 4094 } 4095 } 4096 } else { 4097 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 4098 err = -ENOTSUPP; 4099 } 4100 dev_put(ndev); 4101 if (err || !mc->multicast.ib->rec.mtu) { 4102 if (!err) 4103 err = -EINVAL; 4104 goto out2; 4105 } 4106 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 4107 &mc->multicast.ib->rec.port_gid); 4108 work->id = id_priv; 4109 work->mc = mc; 4110 INIT_WORK(&work->work, iboe_mcast_work_handler); 4111 kref_get(&mc->mcref); 4112 queue_work(cma_wq, &work->work); 4113 4114 return 0; 4115 4116 out2: 4117 kfree(mc->multicast.ib); 4118 out1: 4119 kfree(work); 4120 return err; 4121 } 4122 4123 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 4124 u8 join_state, void *context) 4125 { 4126 struct rdma_id_private *id_priv; 4127 struct cma_multicast *mc; 4128 int ret; 4129 4130 if (!id->device) 4131 return -EINVAL; 4132 4133 id_priv = container_of(id, struct rdma_id_private, id); 4134 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 4135 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 4136 return -EINVAL; 4137 4138 mc = kmalloc(sizeof *mc, GFP_KERNEL); 4139 if (!mc) 4140 return -ENOMEM; 4141 4142 memcpy(&mc->addr, addr, rdma_addr_size(addr)); 4143 mc->context = context; 4144 mc->id_priv = id_priv; 4145 mc->igmp_joined = false; 4146 mc->join_state = join_state; 4147 spin_lock(&id_priv->lock); 4148 list_add(&mc->list, &id_priv->mc_list); 4149 spin_unlock(&id_priv->lock); 4150 4151 if (rdma_protocol_roce(id->device, id->port_num)) { 4152 kref_init(&mc->mcref); 4153 ret = cma_iboe_join_multicast(id_priv, mc); 4154 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) 4155 ret = cma_join_ib_multicast(id_priv, mc); 4156 else 4157 ret = -ENOSYS; 4158 4159 if (ret) { 4160 spin_lock_irq(&id_priv->lock); 4161 list_del(&mc->list); 4162 spin_unlock_irq(&id_priv->lock); 4163 kfree(mc); 4164 } 4165 return ret; 4166 } 4167 EXPORT_SYMBOL(rdma_join_multicast); 4168 4169 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 4170 { 4171 struct rdma_id_private *id_priv; 4172 struct cma_multicast *mc; 4173 4174 id_priv = container_of(id, struct rdma_id_private, id); 4175 spin_lock_irq(&id_priv->lock); 4176 list_for_each_entry(mc, &id_priv->mc_list, list) { 4177 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { 4178 list_del(&mc->list); 4179 spin_unlock_irq(&id_priv->lock); 4180 4181 if (id->qp) 4182 ib_detach_mcast(id->qp, 4183 &mc->multicast.ib->rec.mgid, 4184 be16_to_cpu(mc->multicast.ib->rec.mlid)); 4185 4186 BUG_ON(id_priv->cma_dev->device != id->device); 4187 4188 if (rdma_cap_ib_mcast(id->device, id->port_num)) { 4189 ib_sa_free_multicast(mc->multicast.ib); 4190 kfree(mc); 4191 } else if (rdma_protocol_roce(id->device, id->port_num)) { 4192 if (mc->igmp_joined) { 4193 struct rdma_dev_addr *dev_addr = 4194 &id->route.addr.dev_addr; 4195 struct net_device *ndev = NULL; 4196 4197 if (dev_addr->bound_dev_if) 4198 ndev = dev_get_by_index(dev_addr->net, 4199 dev_addr->bound_dev_if); 4200 if (ndev) { 4201 cma_igmp_send(ndev, 4202 &mc->multicast.ib->rec.mgid, 4203 false); 4204 dev_put(ndev); 4205 } 4206 mc->igmp_joined = false; 4207 } 4208 kref_put(&mc->mcref, release_mc); 4209 } 4210 return; 4211 } 4212 } 4213 spin_unlock_irq(&id_priv->lock); 4214 } 4215 EXPORT_SYMBOL(rdma_leave_multicast); 4216 4217 static int 4218 sysctl_cma_default_roce_mode(SYSCTL_HANDLER_ARGS) 4219 { 4220 struct cma_device *cma_dev = arg1; 4221 const int port = arg2; 4222 char buf[64]; 4223 int error; 4224 4225 strlcpy(buf, ib_cache_gid_type_str( 4226 cma_get_default_gid_type(cma_dev, port)), sizeof(buf)); 4227 4228 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 4229 if (error != 0 || req->newptr == NULL) 4230 goto done; 4231 4232 error = ib_cache_gid_parse_type_str(buf); 4233 if (error < 0) { 4234 error = EINVAL; 4235 goto done; 4236 } 4237 4238 cma_set_default_gid_type(cma_dev, port, error); 4239 error = 0; 4240 done: 4241 return (error); 4242 } 4243 4244 static void cma_add_one(struct ib_device *device) 4245 { 4246 struct cma_device *cma_dev; 4247 struct rdma_id_private *id_priv; 4248 unsigned int i; 4249 4250 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 4251 if (!cma_dev) 4252 return; 4253 4254 sysctl_ctx_init(&cma_dev->sysctl_ctx); 4255 4256 cma_dev->device = device; 4257 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, 4258 sizeof(*cma_dev->default_gid_type), 4259 GFP_KERNEL); 4260 if (!cma_dev->default_gid_type) { 4261 kfree(cma_dev); 4262 return; 4263 } 4264 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 4265 unsigned long supported_gids; 4266 unsigned int default_gid_type; 4267 4268 supported_gids = roce_gid_type_mask_support(device, i); 4269 4270 if (WARN_ON(!supported_gids)) { 4271 /* set something valid */ 4272 default_gid_type = 0; 4273 } else if (test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) { 4274 /* prefer RoCEv2, if supported */ 4275 default_gid_type = IB_GID_TYPE_ROCE_UDP_ENCAP; 4276 } else { 4277 default_gid_type = find_first_bit(&supported_gids, 4278 BITS_PER_LONG); 4279 } 4280 cma_dev->default_gid_type[i - rdma_start_port(device)] = 4281 default_gid_type; 4282 } 4283 4284 init_completion(&cma_dev->comp); 4285 atomic_set(&cma_dev->refcount, 1); 4286 INIT_LIST_HEAD(&cma_dev->id_list); 4287 ib_set_client_data(device, &cma_client, cma_dev); 4288 4289 mutex_lock(&lock); 4290 list_add_tail(&cma_dev->list, &dev_list); 4291 list_for_each_entry(id_priv, &listen_any_list, list) 4292 cma_listen_on_dev(id_priv, cma_dev); 4293 mutex_unlock(&lock); 4294 4295 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 4296 char buf[64]; 4297 4298 snprintf(buf, sizeof(buf), "default_roce_mode_port%d", i); 4299 4300 (void) SYSCTL_ADD_PROC(&cma_dev->sysctl_ctx, 4301 SYSCTL_CHILDREN(device->ports_parent->parent->oidp), 4302 OID_AUTO, buf, CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 4303 cma_dev, i, &sysctl_cma_default_roce_mode, "A", 4304 "Default RoCE mode. Valid values: IB/RoCE v1 and RoCE v2"); 4305 } 4306 } 4307 4308 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 4309 { 4310 struct rdma_cm_event event; 4311 enum rdma_cm_state state; 4312 int ret = 0; 4313 4314 /* Record that we want to remove the device */ 4315 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); 4316 if (state == RDMA_CM_DESTROYING) 4317 return 0; 4318 4319 cma_cancel_operation(id_priv, state); 4320 mutex_lock(&id_priv->handler_mutex); 4321 4322 /* Check for destruction from another callback. */ 4323 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) 4324 goto out; 4325 4326 memset(&event, 0, sizeof event); 4327 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 4328 ret = id_priv->id.event_handler(&id_priv->id, &event); 4329 out: 4330 mutex_unlock(&id_priv->handler_mutex); 4331 return ret; 4332 } 4333 4334 static void cma_process_remove(struct cma_device *cma_dev) 4335 { 4336 struct rdma_id_private *id_priv; 4337 int ret; 4338 4339 mutex_lock(&lock); 4340 while (!list_empty(&cma_dev->id_list)) { 4341 id_priv = list_entry(cma_dev->id_list.next, 4342 struct rdma_id_private, list); 4343 4344 list_del(&id_priv->listen_list); 4345 list_del_init(&id_priv->list); 4346 atomic_inc(&id_priv->refcount); 4347 mutex_unlock(&lock); 4348 4349 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 4350 cma_deref_id(id_priv); 4351 if (ret) 4352 rdma_destroy_id(&id_priv->id); 4353 4354 mutex_lock(&lock); 4355 } 4356 mutex_unlock(&lock); 4357 4358 cma_deref_dev(cma_dev); 4359 wait_for_completion(&cma_dev->comp); 4360 } 4361 4362 static void cma_remove_one(struct ib_device *device, void *client_data) 4363 { 4364 struct cma_device *cma_dev = client_data; 4365 4366 if (!cma_dev) 4367 return; 4368 4369 mutex_lock(&lock); 4370 list_del(&cma_dev->list); 4371 mutex_unlock(&lock); 4372 4373 cma_process_remove(cma_dev); 4374 sysctl_ctx_free(&cma_dev->sysctl_ctx); 4375 kfree(cma_dev->default_gid_type); 4376 kfree(cma_dev); 4377 } 4378 4379 static void cma_init_vnet(void *arg) 4380 { 4381 struct cma_pernet *pernet = &VNET(cma_pernet); 4382 4383 idr_init(&pernet->tcp_ps); 4384 idr_init(&pernet->udp_ps); 4385 idr_init(&pernet->ipoib_ps); 4386 idr_init(&pernet->ib_ps); 4387 } 4388 VNET_SYSINIT(cma_init_vnet, SI_SUB_OFED_MODINIT - 1, SI_ORDER_FIRST, cma_init_vnet, NULL); 4389 4390 static void cma_destroy_vnet(void *arg) 4391 { 4392 struct cma_pernet *pernet = &VNET(cma_pernet); 4393 4394 idr_destroy(&pernet->tcp_ps); 4395 idr_destroy(&pernet->udp_ps); 4396 idr_destroy(&pernet->ipoib_ps); 4397 idr_destroy(&pernet->ib_ps); 4398 } 4399 VNET_SYSUNINIT(cma_destroy_vnet, SI_SUB_OFED_MODINIT - 1, SI_ORDER_SECOND, cma_destroy_vnet, NULL); 4400 4401 static int __init cma_init(void) 4402 { 4403 int ret; 4404 4405 cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); 4406 if (!cma_wq) 4407 return -ENOMEM; 4408 4409 ib_sa_register_client(&sa_client); 4410 rdma_addr_register_client(&addr_client); 4411 4412 ret = ib_register_client(&cma_client); 4413 if (ret) 4414 goto err; 4415 4416 cma_configfs_init(); 4417 4418 return 0; 4419 4420 err: 4421 rdma_addr_unregister_client(&addr_client); 4422 ib_sa_unregister_client(&sa_client); 4423 destroy_workqueue(cma_wq); 4424 return ret; 4425 } 4426 4427 static void __exit cma_cleanup(void) 4428 { 4429 cma_configfs_exit(); 4430 ib_unregister_client(&cma_client); 4431 rdma_addr_unregister_client(&addr_client); 4432 ib_sa_unregister_client(&sa_client); 4433 destroy_workqueue(cma_wq); 4434 } 4435 4436 module_init(cma_init); 4437 module_exit(cma_cleanup); 4438