1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 5 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 6 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 7 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 */ 37 38 #include <sys/cdefs.h> 39 #define LINUXKPI_PARAM_PREFIX ibcore_ 40 41 #include "opt_inet.h" 42 #include "opt_inet6.h" 43 44 #include <linux/completion.h> 45 #include <linux/in.h> 46 #include <linux/in6.h> 47 #include <linux/mutex.h> 48 #include <linux/random.h> 49 #include <linux/idr.h> 50 #include <linux/slab.h> 51 #include <linux/module.h> 52 #include <net/route.h> 53 #include <net/route/nhop.h> 54 55 #include <net/tcp.h> 56 #include <net/ipv6.h> 57 58 #include <netinet/in_fib.h> 59 60 #include <netinet6/in6_fib.h> 61 #include <netinet6/scope6_var.h> 62 #include <netinet6/ip6_var.h> 63 64 #include <rdma/rdma_cm.h> 65 #include <rdma/rdma_cm_ib.h> 66 #include <rdma/rdma_sdp.h> 67 #include <rdma/ib.h> 68 #include <rdma/ib_addr.h> 69 #include <rdma/ib_cache.h> 70 #include <rdma/ib_cm.h> 71 #include <rdma/ib_sa.h> 72 #include <rdma/iw_cm.h> 73 74 #include <sys/priv.h> 75 76 #include "core_priv.h" 77 78 MODULE_AUTHOR("Sean Hefty"); 79 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 80 MODULE_LICENSE("Dual BSD/GPL"); 81 82 #define CMA_CM_RESPONSE_TIMEOUT 20 83 #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000 84 #define CMA_MAX_CM_RETRIES 15 85 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 86 #define CMA_IBOE_PACKET_LIFETIME 18 87 88 static const char * const cma_events[] = { 89 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", 90 [RDMA_CM_EVENT_ADDR_ERROR] = "address error", 91 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", 92 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", 93 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", 94 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", 95 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", 96 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", 97 [RDMA_CM_EVENT_REJECTED] = "rejected", 98 [RDMA_CM_EVENT_ESTABLISHED] = "established", 99 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", 100 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", 101 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", 102 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", 103 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", 104 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", 105 }; 106 107 const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) 108 { 109 size_t index = event; 110 111 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? 112 cma_events[index] : "unrecognized event"; 113 } 114 EXPORT_SYMBOL(rdma_event_msg); 115 116 const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, 117 int reason) 118 { 119 if (rdma_ib_or_roce(id->device, id->port_num)) 120 return ibcm_reject_msg(reason); 121 122 if (rdma_protocol_iwarp(id->device, id->port_num)) 123 return iwcm_reject_msg(reason); 124 125 WARN_ON_ONCE(1); 126 return "unrecognized transport"; 127 } 128 EXPORT_SYMBOL(rdma_reject_msg); 129 130 static int cma_check_linklocal(struct rdma_dev_addr *, struct sockaddr *); 131 static void cma_add_one(struct ib_device *device); 132 static void cma_remove_one(struct ib_device *device, void *client_data); 133 static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id); 134 135 static struct ib_client cma_client = { 136 .name = "cma", 137 .add = cma_add_one, 138 .remove = cma_remove_one 139 }; 140 141 static struct ib_sa_client sa_client; 142 static struct rdma_addr_client addr_client; 143 static LIST_HEAD(dev_list); 144 static LIST_HEAD(listen_any_list); 145 static DEFINE_MUTEX(lock); 146 static struct workqueue_struct *cma_wq; 147 148 struct cma_pernet { 149 struct idr tcp_ps; 150 struct idr udp_ps; 151 struct idr ipoib_ps; 152 struct idr ib_ps; 153 struct idr sdp_ps; 154 }; 155 156 VNET_DEFINE(struct cma_pernet, cma_pernet); 157 158 static struct cma_pernet *cma_pernet_ptr(struct vnet *vnet) 159 { 160 struct cma_pernet *retval; 161 162 CURVNET_SET_QUIET(vnet); 163 retval = &VNET(cma_pernet); 164 CURVNET_RESTORE(); 165 166 return (retval); 167 } 168 169 static struct idr *cma_pernet_idr(struct vnet *net, enum rdma_port_space ps) 170 { 171 struct cma_pernet *pernet = cma_pernet_ptr(net); 172 173 switch (ps) { 174 case RDMA_PS_TCP: 175 return &pernet->tcp_ps; 176 case RDMA_PS_UDP: 177 return &pernet->udp_ps; 178 case RDMA_PS_IPOIB: 179 return &pernet->ipoib_ps; 180 case RDMA_PS_IB: 181 return &pernet->ib_ps; 182 case RDMA_PS_SDP: 183 return &pernet->sdp_ps; 184 default: 185 return NULL; 186 } 187 } 188 189 struct cma_device { 190 struct list_head list; 191 struct ib_device *device; 192 struct completion comp; 193 atomic_t refcount; 194 struct list_head id_list; 195 struct sysctl_ctx_list sysctl_ctx; 196 enum ib_gid_type *default_gid_type; 197 }; 198 199 struct rdma_bind_list { 200 enum rdma_port_space ps; 201 struct hlist_head owners; 202 unsigned short port; 203 }; 204 205 struct class_port_info_context { 206 struct ib_class_port_info *class_port_info; 207 struct ib_device *device; 208 struct completion done; 209 struct ib_sa_query *sa_query; 210 u8 port_num; 211 }; 212 213 static int cma_ps_alloc(struct vnet *vnet, enum rdma_port_space ps, 214 struct rdma_bind_list *bind_list, int snum) 215 { 216 struct idr *idr = cma_pernet_idr(vnet, ps); 217 218 return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL); 219 } 220 221 static struct rdma_bind_list *cma_ps_find(struct vnet *net, 222 enum rdma_port_space ps, int snum) 223 { 224 struct idr *idr = cma_pernet_idr(net, ps); 225 226 return idr_find(idr, snum); 227 } 228 229 static void cma_ps_remove(struct vnet *net, enum rdma_port_space ps, int snum) 230 { 231 struct idr *idr = cma_pernet_idr(net, ps); 232 233 idr_remove(idr, snum); 234 } 235 236 enum { 237 CMA_OPTION_AFONLY, 238 }; 239 240 void cma_ref_dev(struct cma_device *cma_dev) 241 { 242 atomic_inc(&cma_dev->refcount); 243 } 244 245 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 246 void *cookie) 247 { 248 struct cma_device *cma_dev; 249 struct cma_device *found_cma_dev = NULL; 250 251 mutex_lock(&lock); 252 253 list_for_each_entry(cma_dev, &dev_list, list) 254 if (filter(cma_dev->device, cookie)) { 255 found_cma_dev = cma_dev; 256 break; 257 } 258 259 if (found_cma_dev) 260 cma_ref_dev(found_cma_dev); 261 mutex_unlock(&lock); 262 return found_cma_dev; 263 } 264 265 int cma_get_default_gid_type(struct cma_device *cma_dev, 266 unsigned int port) 267 { 268 if (!rdma_is_port_valid(cma_dev->device, port)) 269 return -EINVAL; 270 271 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; 272 } 273 274 int cma_set_default_gid_type(struct cma_device *cma_dev, 275 unsigned int port, 276 enum ib_gid_type default_gid_type) 277 { 278 unsigned long supported_gids; 279 280 if (!rdma_is_port_valid(cma_dev->device, port)) 281 return -EINVAL; 282 283 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); 284 285 if (!(supported_gids & 1 << default_gid_type)) 286 return -EINVAL; 287 288 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = 289 default_gid_type; 290 291 return 0; 292 } 293 294 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) 295 { 296 return cma_dev->device; 297 } 298 299 /* 300 * Device removal can occur at anytime, so we need extra handling to 301 * serialize notifying the user of device removal with other callbacks. 302 * We do this by disabling removal notification while a callback is in process, 303 * and reporting it after the callback completes. 304 */ 305 struct rdma_id_private { 306 struct rdma_cm_id id; 307 308 struct rdma_bind_list *bind_list; 309 struct hlist_node node; 310 struct list_head list; /* listen_any_list or cma_device.list */ 311 struct list_head listen_list; /* per device listens */ 312 struct cma_device *cma_dev; 313 struct list_head mc_list; 314 315 int internal_id; 316 enum rdma_cm_state state; 317 spinlock_t lock; 318 struct mutex qp_mutex; 319 320 struct completion comp; 321 atomic_t refcount; 322 struct mutex handler_mutex; 323 324 int backlog; 325 int timeout_ms; 326 struct ib_sa_query *query; 327 int query_id; 328 union { 329 struct ib_cm_id *ib; 330 struct iw_cm_id *iw; 331 } cm_id; 332 333 u32 seq_num; 334 u32 qkey; 335 u32 qp_num; 336 pid_t owner; 337 u32 options; 338 u8 srq; 339 u8 tos; 340 u8 timeout_set:1; 341 u8 reuseaddr; 342 u8 afonly; 343 u8 timeout; 344 enum ib_gid_type gid_type; 345 }; 346 347 struct cma_multicast { 348 struct rdma_id_private *id_priv; 349 union { 350 struct ib_sa_multicast *ib; 351 } multicast; 352 struct list_head list; 353 void *context; 354 struct sockaddr_storage addr; 355 struct kref mcref; 356 bool igmp_joined; 357 u8 join_state; 358 }; 359 360 struct cma_work { 361 struct work_struct work; 362 struct rdma_id_private *id; 363 enum rdma_cm_state old_state; 364 enum rdma_cm_state new_state; 365 struct rdma_cm_event event; 366 }; 367 368 struct cma_ndev_work { 369 struct work_struct work; 370 struct rdma_id_private *id; 371 struct rdma_cm_event event; 372 }; 373 374 struct iboe_mcast_work { 375 struct work_struct work; 376 struct rdma_id_private *id; 377 struct cma_multicast *mc; 378 }; 379 380 struct cma_hdr { 381 u8 cma_version; 382 u8 ip_version; /* IP version: 7:4 */ 383 __be16 port; 384 union cma_ip_addr src_addr; 385 union cma_ip_addr dst_addr; 386 }; 387 388 #define CMA_VERSION 0x00 389 #define SDP_MAJ_VERSION 0x2 390 391 struct cma_req_info { 392 struct ib_device *device; 393 int port; 394 union ib_gid local_gid; 395 __be64 service_id; 396 u16 pkey; 397 bool has_gid:1; 398 }; 399 400 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) 401 { 402 unsigned long flags; 403 int ret; 404 405 spin_lock_irqsave(&id_priv->lock, flags); 406 ret = (id_priv->state == comp); 407 spin_unlock_irqrestore(&id_priv->lock, flags); 408 return ret; 409 } 410 411 static int cma_comp_exch(struct rdma_id_private *id_priv, 412 enum rdma_cm_state comp, enum rdma_cm_state exch) 413 { 414 unsigned long flags; 415 int ret; 416 417 spin_lock_irqsave(&id_priv->lock, flags); 418 if ((ret = (id_priv->state == comp))) 419 id_priv->state = exch; 420 spin_unlock_irqrestore(&id_priv->lock, flags); 421 return ret; 422 } 423 424 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, 425 enum rdma_cm_state exch) 426 { 427 unsigned long flags; 428 enum rdma_cm_state old; 429 430 spin_lock_irqsave(&id_priv->lock, flags); 431 old = id_priv->state; 432 id_priv->state = exch; 433 spin_unlock_irqrestore(&id_priv->lock, flags); 434 return old; 435 } 436 437 static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) 438 { 439 return hdr->ip_version >> 4; 440 } 441 442 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 443 { 444 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 445 } 446 447 static inline u8 sdp_get_majv(u8 sdp_version) 448 { 449 return sdp_version >> 4; 450 } 451 452 static inline u8 sdp_get_ip_ver(const struct sdp_hh *hh) 453 { 454 return hh->ipv_cap >> 4; 455 } 456 457 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) 458 { 459 hh->ipv_cap = (ip_ver << 4) | (hh->ipv_cap & 0xF); 460 } 461 462 static int cma_igmp_send(if_t ndev, const union ib_gid *mgid, bool join) 463 { 464 int retval; 465 466 if (ndev) { 467 union rdma_sockaddr addr; 468 469 rdma_gid2ip(&addr._sockaddr, mgid); 470 471 CURVNET_SET_QUIET(if_getvnet(ndev)); 472 if (join) 473 retval = -if_addmulti(ndev, &addr._sockaddr, NULL); 474 else 475 retval = -if_delmulti(ndev, &addr._sockaddr); 476 CURVNET_RESTORE(); 477 } else { 478 retval = -ENODEV; 479 } 480 return retval; 481 } 482 483 static void _cma_attach_to_dev(struct rdma_id_private *id_priv, 484 struct cma_device *cma_dev) 485 { 486 cma_ref_dev(cma_dev); 487 id_priv->cma_dev = cma_dev; 488 id_priv->gid_type = 0; 489 id_priv->id.device = cma_dev->device; 490 id_priv->id.route.addr.dev_addr.transport = 491 rdma_node_get_transport(cma_dev->device->node_type); 492 list_add_tail(&id_priv->list, &cma_dev->id_list); 493 } 494 495 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 496 struct cma_device *cma_dev) 497 { 498 _cma_attach_to_dev(id_priv, cma_dev); 499 id_priv->gid_type = 500 cma_dev->default_gid_type[id_priv->id.port_num - 501 rdma_start_port(cma_dev->device)]; 502 } 503 504 void cma_deref_dev(struct cma_device *cma_dev) 505 { 506 if (atomic_dec_and_test(&cma_dev->refcount)) 507 complete(&cma_dev->comp); 508 } 509 510 static inline void release_mc(struct kref *kref) 511 { 512 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); 513 514 kfree(mc->multicast.ib); 515 kfree(mc); 516 } 517 518 static void cma_release_dev(struct rdma_id_private *id_priv) 519 { 520 mutex_lock(&lock); 521 list_del(&id_priv->list); 522 cma_deref_dev(id_priv->cma_dev); 523 id_priv->cma_dev = NULL; 524 mutex_unlock(&lock); 525 } 526 527 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) 528 { 529 return (struct sockaddr *) &id_priv->id.route.addr.src_addr; 530 } 531 532 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) 533 { 534 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 535 } 536 537 static inline unsigned short cma_family(struct rdma_id_private *id_priv) 538 { 539 return id_priv->id.route.addr.src_addr.ss_family; 540 } 541 542 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) 543 { 544 struct ib_sa_mcmember_rec rec; 545 int ret = 0; 546 547 if (id_priv->qkey) { 548 if (qkey && id_priv->qkey != qkey) 549 return -EINVAL; 550 return 0; 551 } 552 553 if (qkey) { 554 id_priv->qkey = qkey; 555 return 0; 556 } 557 558 switch (id_priv->id.ps) { 559 case RDMA_PS_UDP: 560 case RDMA_PS_IB: 561 id_priv->qkey = RDMA_UDP_QKEY; 562 break; 563 case RDMA_PS_IPOIB: 564 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 565 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 566 id_priv->id.port_num, &rec.mgid, 567 &rec); 568 if (!ret) 569 id_priv->qkey = be32_to_cpu(rec.qkey); 570 break; 571 default: 572 break; 573 } 574 return ret; 575 } 576 577 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) 578 { 579 dev_addr->dev_type = ARPHRD_INFINIBAND; 580 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); 581 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); 582 } 583 584 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 585 { 586 int ret; 587 588 if (addr->sa_family != AF_IB) { 589 ret = rdma_translate_ip(addr, dev_addr); 590 } else { 591 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 592 ret = 0; 593 } 594 595 return ret; 596 } 597 598 static inline int cma_validate_port(struct ib_device *device, u8 port, 599 enum ib_gid_type gid_type, 600 union ib_gid *gid, 601 const struct rdma_dev_addr *dev_addr) 602 { 603 const int dev_type = dev_addr->dev_type; 604 if_t ndev; 605 int ret = -ENODEV; 606 607 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) 608 return ret; 609 610 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 611 return ret; 612 613 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 614 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 615 } else { 616 ndev = NULL; 617 gid_type = IB_GID_TYPE_IB; 618 } 619 620 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, 621 ndev, NULL); 622 623 if (ndev) 624 dev_put(ndev); 625 626 return ret; 627 } 628 629 static int cma_acquire_dev(struct rdma_id_private *id_priv, 630 struct rdma_id_private *listen_id_priv) 631 { 632 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 633 struct cma_device *cma_dev; 634 union ib_gid gid, iboe_gid, *gidp; 635 int ret = -ENODEV; 636 u8 port; 637 638 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 639 id_priv->id.ps == RDMA_PS_IPOIB) 640 return -EINVAL; 641 642 mutex_lock(&lock); 643 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 644 &iboe_gid); 645 646 memcpy(&gid, dev_addr->src_dev_addr + 647 rdma_addr_gid_offset(dev_addr), sizeof gid); 648 649 if (listen_id_priv) { 650 cma_dev = listen_id_priv->cma_dev; 651 port = listen_id_priv->id.port_num; 652 653 if (rdma_is_port_valid(cma_dev->device, port)) { 654 gidp = rdma_protocol_roce(cma_dev->device, port) ? 655 &iboe_gid : &gid; 656 657 ret = cma_validate_port(cma_dev->device, port, 658 rdma_protocol_ib(cma_dev->device, port) ? 659 IB_GID_TYPE_IB : 660 listen_id_priv->gid_type, gidp, dev_addr); 661 if (!ret) { 662 id_priv->id.port_num = port; 663 goto out; 664 } 665 } 666 } 667 668 list_for_each_entry(cma_dev, &dev_list, list) { 669 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { 670 if (listen_id_priv && 671 listen_id_priv->cma_dev == cma_dev && 672 listen_id_priv->id.port_num == port) 673 continue; 674 675 gidp = rdma_protocol_roce(cma_dev->device, port) ? 676 &iboe_gid : &gid; 677 678 ret = cma_validate_port(cma_dev->device, port, 679 rdma_protocol_ib(cma_dev->device, port) ? 680 IB_GID_TYPE_IB : 681 cma_dev->default_gid_type[port - 1], 682 gidp, dev_addr); 683 if (!ret) { 684 id_priv->id.port_num = port; 685 goto out; 686 } 687 } 688 } 689 690 out: 691 if (!ret) 692 cma_attach_to_dev(id_priv, cma_dev); 693 694 mutex_unlock(&lock); 695 return ret; 696 } 697 698 /* 699 * Select the source IB device and address to reach the destination IB address. 700 */ 701 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) 702 { 703 struct cma_device *cma_dev, *cur_dev; 704 struct sockaddr_ib *addr; 705 union ib_gid gid, sgid, *dgid; 706 u16 pkey, index; 707 u8 p; 708 int i; 709 710 cma_dev = NULL; 711 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); 712 dgid = (union ib_gid *) &addr->sib_addr; 713 pkey = ntohs(addr->sib_pkey); 714 715 list_for_each_entry(cur_dev, &dev_list, list) { 716 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 717 if (!rdma_cap_af_ib(cur_dev->device, p)) 718 continue; 719 720 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) 721 continue; 722 723 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, 724 &gid, NULL); 725 i++) { 726 if (!memcmp(&gid, dgid, sizeof(gid))) { 727 cma_dev = cur_dev; 728 sgid = gid; 729 id_priv->id.port_num = p; 730 goto found; 731 } 732 733 if (!cma_dev && (gid.global.subnet_prefix == 734 dgid->global.subnet_prefix)) { 735 cma_dev = cur_dev; 736 sgid = gid; 737 id_priv->id.port_num = p; 738 } 739 } 740 } 741 } 742 743 if (!cma_dev) 744 return -ENODEV; 745 746 found: 747 cma_attach_to_dev(id_priv, cma_dev); 748 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 749 memcpy(&addr->sib_addr, &sgid, sizeof sgid); 750 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 751 return 0; 752 } 753 754 static void cma_deref_id(struct rdma_id_private *id_priv) 755 { 756 if (atomic_dec_and_test(&id_priv->refcount)) 757 complete(&id_priv->comp); 758 } 759 760 struct rdma_cm_id *rdma_create_id(struct vnet *net, 761 rdma_cm_event_handler event_handler, 762 void *context, enum rdma_port_space ps, 763 enum ib_qp_type qp_type) 764 { 765 struct rdma_id_private *id_priv; 766 767 #ifdef VIMAGE 768 if (net == NULL) 769 return ERR_PTR(-EINVAL); 770 #endif 771 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 772 if (!id_priv) 773 return ERR_PTR(-ENOMEM); 774 775 id_priv->owner = task_pid_nr(current); 776 id_priv->state = RDMA_CM_IDLE; 777 id_priv->id.context = context; 778 id_priv->id.event_handler = event_handler; 779 id_priv->id.ps = ps; 780 id_priv->id.qp_type = qp_type; 781 id_priv->timeout_set = false; 782 spin_lock_init(&id_priv->lock); 783 mutex_init(&id_priv->qp_mutex); 784 init_completion(&id_priv->comp); 785 atomic_set(&id_priv->refcount, 1); 786 mutex_init(&id_priv->handler_mutex); 787 INIT_LIST_HEAD(&id_priv->listen_list); 788 INIT_LIST_HEAD(&id_priv->mc_list); 789 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 790 id_priv->seq_num &= 0x00ffffff; 791 id_priv->id.route.addr.dev_addr.net = net; 792 793 return &id_priv->id; 794 } 795 EXPORT_SYMBOL(rdma_create_id); 796 797 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 798 { 799 struct ib_qp_attr qp_attr; 800 int qp_attr_mask, ret; 801 802 qp_attr.qp_state = IB_QPS_INIT; 803 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 804 if (ret) 805 return ret; 806 807 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 808 if (ret) 809 return ret; 810 811 qp_attr.qp_state = IB_QPS_RTR; 812 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 813 if (ret) 814 return ret; 815 816 qp_attr.qp_state = IB_QPS_RTS; 817 qp_attr.sq_psn = 0; 818 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 819 820 return ret; 821 } 822 823 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 824 { 825 struct ib_qp_attr qp_attr; 826 int qp_attr_mask, ret; 827 828 qp_attr.qp_state = IB_QPS_INIT; 829 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 830 if (ret) 831 return ret; 832 833 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 834 } 835 836 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 837 struct ib_qp_init_attr *qp_init_attr) 838 { 839 struct rdma_id_private *id_priv; 840 struct ib_qp *qp; 841 int ret; 842 843 id_priv = container_of(id, struct rdma_id_private, id); 844 if (id->device != pd->device) 845 return -EINVAL; 846 847 qp_init_attr->port_num = id->port_num; 848 qp = ib_create_qp(pd, qp_init_attr); 849 if (IS_ERR(qp)) 850 return PTR_ERR(qp); 851 852 if (id->qp_type == IB_QPT_UD) 853 ret = cma_init_ud_qp(id_priv, qp); 854 else 855 ret = cma_init_conn_qp(id_priv, qp); 856 if (ret) 857 goto err; 858 859 id->qp = qp; 860 id_priv->qp_num = qp->qp_num; 861 id_priv->srq = (qp->srq != NULL); 862 return 0; 863 err: 864 ib_destroy_qp(qp); 865 return ret; 866 } 867 EXPORT_SYMBOL(rdma_create_qp); 868 869 void rdma_destroy_qp(struct rdma_cm_id *id) 870 { 871 struct rdma_id_private *id_priv; 872 873 id_priv = container_of(id, struct rdma_id_private, id); 874 mutex_lock(&id_priv->qp_mutex); 875 ib_destroy_qp(id_priv->id.qp); 876 id_priv->id.qp = NULL; 877 mutex_unlock(&id_priv->qp_mutex); 878 } 879 EXPORT_SYMBOL(rdma_destroy_qp); 880 881 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 882 struct rdma_conn_param *conn_param) 883 { 884 struct ib_qp_attr qp_attr; 885 int qp_attr_mask, ret; 886 union ib_gid sgid; 887 888 mutex_lock(&id_priv->qp_mutex); 889 if (!id_priv->id.qp) { 890 ret = 0; 891 goto out; 892 } 893 894 /* Need to update QP attributes from default values. */ 895 qp_attr.qp_state = IB_QPS_INIT; 896 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 897 if (ret) 898 goto out; 899 900 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 901 if (ret) 902 goto out; 903 904 qp_attr.qp_state = IB_QPS_RTR; 905 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 906 if (ret) 907 goto out; 908 909 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, 910 qp_attr.ah_attr.grh.sgid_index, &sgid, NULL); 911 if (ret) 912 goto out; 913 914 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); 915 916 if (conn_param) 917 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 918 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 919 out: 920 mutex_unlock(&id_priv->qp_mutex); 921 return ret; 922 } 923 924 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 925 struct rdma_conn_param *conn_param) 926 { 927 struct ib_qp_attr qp_attr; 928 int qp_attr_mask, ret; 929 930 mutex_lock(&id_priv->qp_mutex); 931 if (!id_priv->id.qp) { 932 ret = 0; 933 goto out; 934 } 935 936 qp_attr.qp_state = IB_QPS_RTS; 937 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 938 if (ret) 939 goto out; 940 941 if (conn_param) 942 qp_attr.max_rd_atomic = conn_param->initiator_depth; 943 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 944 out: 945 mutex_unlock(&id_priv->qp_mutex); 946 return ret; 947 } 948 949 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 950 { 951 struct ib_qp_attr qp_attr; 952 int ret; 953 954 mutex_lock(&id_priv->qp_mutex); 955 if (!id_priv->id.qp) { 956 ret = 0; 957 goto out; 958 } 959 960 qp_attr.qp_state = IB_QPS_ERR; 961 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 962 out: 963 mutex_unlock(&id_priv->qp_mutex); 964 return ret; 965 } 966 967 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 968 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 969 { 970 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 971 int ret; 972 u16 pkey; 973 974 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) 975 pkey = 0xffff; 976 else 977 pkey = ib_addr_get_pkey(dev_addr); 978 979 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 980 pkey, &qp_attr->pkey_index); 981 if (ret) 982 return ret; 983 984 qp_attr->port_num = id_priv->id.port_num; 985 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 986 987 if (id_priv->id.qp_type == IB_QPT_UD) { 988 ret = cma_set_qkey(id_priv, 0); 989 if (ret) 990 return ret; 991 992 qp_attr->qkey = id_priv->qkey; 993 *qp_attr_mask |= IB_QP_QKEY; 994 } else { 995 qp_attr->qp_access_flags = 0; 996 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 997 } 998 return 0; 999 } 1000 1001 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 1002 int *qp_attr_mask) 1003 { 1004 struct rdma_id_private *id_priv; 1005 int ret = 0; 1006 1007 id_priv = container_of(id, struct rdma_id_private, id); 1008 if (rdma_cap_ib_cm(id->device, id->port_num)) { 1009 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) 1010 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 1011 else 1012 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 1013 qp_attr_mask); 1014 1015 if (qp_attr->qp_state == IB_QPS_RTR) 1016 qp_attr->rq_psn = id_priv->seq_num; 1017 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 1018 if (!id_priv->cm_id.iw) { 1019 qp_attr->qp_access_flags = 0; 1020 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 1021 } else 1022 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 1023 qp_attr_mask); 1024 qp_attr->port_num = id_priv->id.port_num; 1025 *qp_attr_mask |= IB_QP_PORT; 1026 } else 1027 ret = -ENOSYS; 1028 1029 if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) 1030 qp_attr->timeout = id_priv->timeout; 1031 1032 return ret; 1033 } 1034 EXPORT_SYMBOL(rdma_init_qp_attr); 1035 1036 static inline int cma_zero_addr(struct sockaddr *addr) 1037 { 1038 switch (addr->sa_family) { 1039 case AF_INET: 1040 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); 1041 case AF_INET6: 1042 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr); 1043 case AF_IB: 1044 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr); 1045 default: 1046 return 0; 1047 } 1048 } 1049 1050 static inline int cma_loopback_addr(struct sockaddr *addr) 1051 { 1052 switch (addr->sa_family) { 1053 #ifdef INET 1054 /* 1055 * ipv4_is_loopback() requires an inet variable via vnet, 1056 * not present if INET is not included. 1057 */ 1058 case AF_INET: 1059 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); 1060 #endif 1061 #ifdef INET6 1062 case AF_INET6: 1063 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr); 1064 #endif 1065 case AF_IB: 1066 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr); 1067 default: 1068 return 0; 1069 } 1070 } 1071 1072 static inline bool cma_any_addr(struct vnet *vnet, struct sockaddr *addr) 1073 { 1074 bool ret; 1075 1076 CURVNET_SET_QUIET(vnet); 1077 ret = cma_zero_addr(addr) || cma_loopback_addr(addr); 1078 CURVNET_RESTORE(); 1079 1080 return (ret); 1081 } 1082 1083 static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) 1084 { 1085 if (src->sa_family != dst->sa_family) 1086 return -1; 1087 1088 switch (src->sa_family) { 1089 case AF_INET: 1090 return ((struct sockaddr_in *) src)->sin_addr.s_addr != 1091 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 1092 case AF_INET6: 1093 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, 1094 &((struct sockaddr_in6 *) dst)->sin6_addr); 1095 default: 1096 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, 1097 &((struct sockaddr_ib *) dst)->sib_addr); 1098 } 1099 } 1100 1101 static __be16 cma_port(struct sockaddr *addr) 1102 { 1103 struct sockaddr_ib *sib; 1104 1105 switch (addr->sa_family) { 1106 case AF_INET: 1107 return ((struct sockaddr_in *) addr)->sin_port; 1108 case AF_INET6: 1109 return ((struct sockaddr_in6 *) addr)->sin6_port; 1110 case AF_IB: 1111 sib = (struct sockaddr_ib *) addr; 1112 return htons((u16) (be64_to_cpu(sib->sib_sid) & 1113 be64_to_cpu(sib->sib_sid_mask))); 1114 default: 1115 return 0; 1116 } 1117 } 1118 1119 static inline int cma_any_port(struct sockaddr *addr) 1120 { 1121 return !cma_port(addr); 1122 } 1123 1124 static void cma_save_ib_info(struct sockaddr *src_addr, 1125 struct sockaddr *dst_addr, 1126 struct rdma_cm_id *listen_id, 1127 struct ib_sa_path_rec *path) 1128 { 1129 struct sockaddr_ib *listen_ib, *ib; 1130 1131 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 1132 if (src_addr) { 1133 ib = (struct sockaddr_ib *)src_addr; 1134 ib->sib_family = AF_IB; 1135 if (path) { 1136 ib->sib_pkey = path->pkey; 1137 ib->sib_flowinfo = path->flow_label; 1138 memcpy(&ib->sib_addr, &path->sgid, 16); 1139 ib->sib_sid = path->service_id; 1140 ib->sib_scope_id = 0; 1141 } else { 1142 ib->sib_pkey = listen_ib->sib_pkey; 1143 ib->sib_flowinfo = listen_ib->sib_flowinfo; 1144 ib->sib_addr = listen_ib->sib_addr; 1145 ib->sib_sid = listen_ib->sib_sid; 1146 ib->sib_scope_id = listen_ib->sib_scope_id; 1147 } 1148 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 1149 } 1150 if (dst_addr) { 1151 ib = (struct sockaddr_ib *)dst_addr; 1152 ib->sib_family = AF_IB; 1153 if (path) { 1154 ib->sib_pkey = path->pkey; 1155 ib->sib_flowinfo = path->flow_label; 1156 memcpy(&ib->sib_addr, &path->dgid, 16); 1157 } 1158 } 1159 } 1160 1161 static void cma_save_ip4_info(struct sockaddr_in *src_addr, 1162 struct sockaddr_in *dst_addr, 1163 struct cma_hdr *hdr, 1164 __be16 local_port) 1165 { 1166 if (src_addr) { 1167 *src_addr = (struct sockaddr_in) { 1168 .sin_len = sizeof(struct sockaddr_in), 1169 .sin_family = AF_INET, 1170 .sin_addr.s_addr = hdr->dst_addr.ip4.addr, 1171 .sin_port = local_port, 1172 }; 1173 } 1174 1175 if (dst_addr) { 1176 *dst_addr = (struct sockaddr_in) { 1177 .sin_len = sizeof(struct sockaddr_in), 1178 .sin_family = AF_INET, 1179 .sin_addr.s_addr = hdr->src_addr.ip4.addr, 1180 .sin_port = hdr->port, 1181 }; 1182 } 1183 } 1184 1185 static void cma_ip6_clear_scope_id(struct in6_addr *addr) 1186 { 1187 /* make sure link local scope ID gets zeroed */ 1188 if (IN6_IS_SCOPE_LINKLOCAL(addr) || 1189 IN6_IS_ADDR_MC_INTFACELOCAL(addr)) { 1190 /* use byte-access to be alignment safe */ 1191 addr->s6_addr[2] = 0; 1192 addr->s6_addr[3] = 0; 1193 } 1194 } 1195 1196 static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, 1197 struct sockaddr_in6 *dst_addr, 1198 struct cma_hdr *hdr, 1199 __be16 local_port) 1200 { 1201 if (src_addr) { 1202 *src_addr = (struct sockaddr_in6) { 1203 .sin6_len = sizeof(struct sockaddr_in6), 1204 .sin6_family = AF_INET6, 1205 .sin6_addr = hdr->dst_addr.ip6, 1206 .sin6_port = local_port, 1207 }; 1208 cma_ip6_clear_scope_id(&src_addr->sin6_addr); 1209 } 1210 1211 if (dst_addr) { 1212 *dst_addr = (struct sockaddr_in6) { 1213 .sin6_len = sizeof(struct sockaddr_in6), 1214 .sin6_family = AF_INET6, 1215 .sin6_addr = hdr->src_addr.ip6, 1216 .sin6_port = hdr->port, 1217 }; 1218 cma_ip6_clear_scope_id(&dst_addr->sin6_addr); 1219 } 1220 } 1221 1222 static u16 cma_port_from_service_id(__be64 service_id) 1223 { 1224 return (u16)be64_to_cpu(service_id); 1225 } 1226 1227 static int sdp_save_ip_info(struct sockaddr *src_addr, 1228 struct sockaddr *dst_addr, 1229 const struct sdp_hh *hdr, 1230 __be64 service_id) 1231 { 1232 __be16 local_port; 1233 1234 BUG_ON(src_addr == NULL || dst_addr == NULL); 1235 1236 if (sdp_get_majv(hdr->majv_minv) != SDP_MAJ_VERSION) 1237 return -EINVAL; 1238 1239 local_port = htons(cma_port_from_service_id(service_id)); 1240 1241 switch (sdp_get_ip_ver(hdr)) { 1242 case 4: { 1243 struct sockaddr_in *s4, *d4; 1244 1245 s4 = (void *)src_addr; 1246 d4 = (void *)dst_addr; 1247 1248 *s4 = (struct sockaddr_in) { 1249 .sin_len = sizeof(*s4), 1250 .sin_family = AF_INET, 1251 .sin_addr.s_addr = hdr->dst_addr.ip4.addr, 1252 .sin_port = local_port, 1253 }; 1254 *d4 = (struct sockaddr_in) { 1255 .sin_len = sizeof(*d4), 1256 .sin_family = AF_INET, 1257 .sin_addr.s_addr = hdr->src_addr.ip4.addr, 1258 .sin_port = hdr->port, 1259 }; 1260 break; 1261 } 1262 case 6: { 1263 struct sockaddr_in6 *s6, *d6; 1264 1265 s6 = (void *)src_addr; 1266 d6 = (void *)dst_addr; 1267 1268 *s6 = (struct sockaddr_in6) { 1269 .sin6_len = sizeof(*s6), 1270 .sin6_family = AF_INET6, 1271 .sin6_addr = hdr->dst_addr.ip6, 1272 .sin6_port = local_port, 1273 }; 1274 *d6 = (struct sockaddr_in6) { 1275 .sin6_len = sizeof(*d6), 1276 .sin6_family = AF_INET6, 1277 .sin6_addr = hdr->src_addr.ip6, 1278 .sin6_port = hdr->port, 1279 }; 1280 cma_ip6_clear_scope_id(&s6->sin6_addr); 1281 cma_ip6_clear_scope_id(&d6->sin6_addr); 1282 break; 1283 } 1284 default: 1285 return -EAFNOSUPPORT; 1286 } 1287 1288 return 0; 1289 } 1290 1291 static int cma_save_ip_info(struct sockaddr *src_addr, 1292 struct sockaddr *dst_addr, 1293 struct ib_cm_event *ib_event, 1294 __be64 service_id) 1295 { 1296 struct cma_hdr *hdr; 1297 __be16 port; 1298 1299 if (rdma_ps_from_service_id(service_id) == RDMA_PS_SDP) 1300 return sdp_save_ip_info(src_addr, dst_addr, 1301 ib_event->private_data, service_id); 1302 1303 hdr = ib_event->private_data; 1304 if (hdr->cma_version != CMA_VERSION) 1305 return -EINVAL; 1306 1307 port = htons(cma_port_from_service_id(service_id)); 1308 1309 switch (cma_get_ip_ver(hdr)) { 1310 case 4: 1311 cma_save_ip4_info((struct sockaddr_in *)src_addr, 1312 (struct sockaddr_in *)dst_addr, hdr, port); 1313 break; 1314 case 6: 1315 cma_save_ip6_info((struct sockaddr_in6 *)src_addr, 1316 (struct sockaddr_in6 *)dst_addr, hdr, port); 1317 break; 1318 default: 1319 return -EAFNOSUPPORT; 1320 } 1321 1322 return 0; 1323 } 1324 1325 static int cma_save_net_info(struct sockaddr *src_addr, 1326 struct sockaddr *dst_addr, 1327 struct rdma_cm_id *listen_id, 1328 struct ib_cm_event *ib_event, 1329 sa_family_t sa_family, __be64 service_id) 1330 { 1331 if (sa_family == AF_IB) { 1332 if (ib_event->event == IB_CM_REQ_RECEIVED) 1333 cma_save_ib_info(src_addr, dst_addr, listen_id, 1334 ib_event->param.req_rcvd.primary_path); 1335 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1336 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); 1337 return 0; 1338 } 1339 1340 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); 1341 } 1342 1343 static int cma_save_req_info(const struct ib_cm_event *ib_event, 1344 struct cma_req_info *req) 1345 { 1346 const struct ib_cm_req_event_param *req_param = 1347 &ib_event->param.req_rcvd; 1348 const struct ib_cm_sidr_req_event_param *sidr_param = 1349 &ib_event->param.sidr_req_rcvd; 1350 1351 switch (ib_event->event) { 1352 case IB_CM_REQ_RECEIVED: 1353 req->device = req_param->listen_id->device; 1354 req->port = req_param->port; 1355 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1356 sizeof(req->local_gid)); 1357 req->has_gid = true; 1358 req->service_id = req_param->primary_path->service_id; 1359 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1360 if (req->pkey != req_param->bth_pkey) 1361 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" 1362 "RDMA CMA: in the future this may cause the request to be dropped\n", 1363 req_param->bth_pkey, req->pkey); 1364 break; 1365 case IB_CM_SIDR_REQ_RECEIVED: 1366 req->device = sidr_param->listen_id->device; 1367 req->port = sidr_param->port; 1368 req->has_gid = false; 1369 req->service_id = sidr_param->service_id; 1370 req->pkey = sidr_param->pkey; 1371 if (req->pkey != sidr_param->bth_pkey) 1372 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" 1373 "RDMA CMA: in the future this may cause the request to be dropped\n", 1374 sidr_param->bth_pkey, req->pkey); 1375 break; 1376 default: 1377 return -EINVAL; 1378 } 1379 1380 return 0; 1381 } 1382 1383 #ifdef INET 1384 static bool validate_ipv4_net_dev_addr(struct vnet *vnet, 1385 const __be32 saddr, const __be32 daddr) 1386 { 1387 bool ret; 1388 CURVNET_SET(vnet); 1389 ret = ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1390 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || 1391 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || 1392 ipv4_is_loopback(saddr); 1393 CURVNET_RESTORE(); 1394 return (ret); 1395 } 1396 #endif 1397 1398 static bool validate_ipv4_net_dev(if_t net_dev, 1399 const struct sockaddr_in *dst_addr, 1400 const struct sockaddr_in *src_addr) 1401 { 1402 #ifdef INET 1403 __be32 daddr = dst_addr->sin_addr.s_addr, 1404 saddr = src_addr->sin_addr.s_addr; 1405 if_t dst_dev; 1406 struct nhop_object *nh; 1407 bool ret; 1408 1409 if (validate_ipv4_net_dev_addr(if_getvnet(net_dev), saddr, daddr)) 1410 return false; 1411 1412 dst_dev = ip_ifp_find(if_getvnet(net_dev), daddr); 1413 if (dst_dev != net_dev) { 1414 if (dst_dev != NULL) 1415 dev_put(dst_dev); 1416 return false; 1417 } 1418 dev_put(dst_dev); 1419 1420 /* 1421 * Check for loopback. 1422 */ 1423 if (saddr == daddr) 1424 return true; 1425 1426 CURVNET_SET(if_getvnet(net_dev)); 1427 nh = fib4_lookup(RT_DEFAULT_FIB, src_addr->sin_addr, 0, NHR_NONE, 0); 1428 if (nh != NULL) 1429 ret = (nh->nh_ifp == net_dev); 1430 else 1431 ret = false; 1432 CURVNET_RESTORE(); 1433 return ret; 1434 #else 1435 return false; 1436 #endif 1437 } 1438 1439 static bool validate_ipv6_net_dev(if_t net_dev, 1440 const struct sockaddr_in6 *dst_addr, 1441 const struct sockaddr_in6 *src_addr) 1442 { 1443 #ifdef INET6 1444 struct sockaddr_in6 src_tmp = *src_addr; 1445 struct sockaddr_in6 dst_tmp = *dst_addr; 1446 if_t dst_dev; 1447 struct nhop_object *nh; 1448 bool ret; 1449 1450 dst_dev = ip6_ifp_find(if_getvnet(net_dev), dst_tmp.sin6_addr, 1451 if_getindex(net_dev)); 1452 if (dst_dev != net_dev) { 1453 if (dst_dev != NULL) 1454 dev_put(dst_dev); 1455 return false; 1456 } 1457 dev_put(dst_dev); 1458 1459 CURVNET_SET(if_getvnet(net_dev)); 1460 1461 /* 1462 * Make sure the scope ID gets embedded. 1463 */ 1464 src_tmp.sin6_scope_id = if_getindex(net_dev); 1465 sa6_embedscope(&src_tmp, 0); 1466 1467 dst_tmp.sin6_scope_id = if_getindex(net_dev); 1468 sa6_embedscope(&dst_tmp, 0); 1469 1470 /* 1471 * Check for loopback after scope ID 1472 * has been embedded: 1473 */ 1474 if (memcmp(&src_tmp.sin6_addr, &dst_tmp.sin6_addr, 1475 sizeof(dst_tmp.sin6_addr)) == 0) { 1476 ret = true; 1477 } else { 1478 /* non-loopback case */ 1479 nh = fib6_lookup(RT_DEFAULT_FIB, &src_addr->sin6_addr, 1480 if_getindex(net_dev), NHR_NONE, 0); 1481 if (nh != NULL) 1482 ret = (nh->nh_ifp == net_dev); 1483 else 1484 ret = false; 1485 } 1486 CURVNET_RESTORE(); 1487 return ret; 1488 #else 1489 return false; 1490 #endif 1491 } 1492 1493 static bool validate_net_dev(if_t net_dev, 1494 const struct sockaddr *daddr, 1495 const struct sockaddr *saddr) 1496 { 1497 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; 1498 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; 1499 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1500 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; 1501 1502 switch (daddr->sa_family) { 1503 case AF_INET: 1504 return saddr->sa_family == AF_INET && 1505 validate_ipv4_net_dev(net_dev, daddr4, saddr4); 1506 1507 case AF_INET6: 1508 return saddr->sa_family == AF_INET6 && 1509 validate_ipv6_net_dev(net_dev, daddr6, saddr6); 1510 1511 default: 1512 return false; 1513 } 1514 } 1515 1516 static if_t 1517 roce_get_net_dev_by_cm_event(struct ib_device *device, u8 port_num, 1518 const struct ib_cm_event *ib_event) 1519 { 1520 struct ib_gid_attr sgid_attr; 1521 union ib_gid sgid; 1522 int err = -EINVAL; 1523 1524 if (ib_event->event == IB_CM_REQ_RECEIVED) { 1525 err = ib_get_cached_gid(device, port_num, 1526 ib_event->param.req_rcvd.ppath_sgid_index, &sgid, &sgid_attr); 1527 } else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 1528 err = ib_get_cached_gid(device, port_num, 1529 ib_event->param.sidr_req_rcvd.sgid_index, &sgid, &sgid_attr); 1530 } 1531 if (err) 1532 return (NULL); 1533 return (sgid_attr.ndev); 1534 } 1535 1536 static if_t cma_get_net_dev(struct ib_cm_event *ib_event, 1537 const struct cma_req_info *req) 1538 { 1539 struct sockaddr_storage listen_addr_storage, src_addr_storage; 1540 struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, 1541 *src_addr = (struct sockaddr *)&src_addr_storage; 1542 if_t net_dev; 1543 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; 1544 struct epoch_tracker et; 1545 int err; 1546 1547 err = cma_save_ip_info(listen_addr, src_addr, ib_event, 1548 req->service_id); 1549 if (err) 1550 return ERR_PTR(err); 1551 1552 if (rdma_protocol_roce(req->device, req->port)) { 1553 net_dev = roce_get_net_dev_by_cm_event(req->device, req->port, 1554 ib_event); 1555 } else { 1556 net_dev = ib_get_net_dev_by_params(req->device, req->port, 1557 req->pkey, 1558 gid, listen_addr); 1559 } 1560 if (!net_dev) 1561 return ERR_PTR(-ENODEV); 1562 1563 NET_EPOCH_ENTER(et); 1564 if (!validate_net_dev(net_dev, listen_addr, src_addr)) { 1565 NET_EPOCH_EXIT(et); 1566 dev_put(net_dev); 1567 return ERR_PTR(-EHOSTUNREACH); 1568 } 1569 NET_EPOCH_EXIT(et); 1570 1571 return net_dev; 1572 } 1573 1574 static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id) 1575 { 1576 return (be64_to_cpu(service_id) >> 16) & 0xffff; 1577 } 1578 1579 static bool sdp_match_private_data(struct rdma_id_private *id_priv, 1580 const struct sdp_hh *hdr, 1581 struct sockaddr *addr) 1582 { 1583 __be32 ip4_addr; 1584 struct in6_addr ip6_addr; 1585 struct vnet *vnet = id_priv->id.route.addr.dev_addr.net; 1586 1587 switch (addr->sa_family) { 1588 case AF_INET: 1589 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; 1590 if (sdp_get_ip_ver(hdr) != 4) 1591 return false; 1592 if (!cma_any_addr(vnet, addr) && 1593 hdr->dst_addr.ip4.addr != ip4_addr) 1594 return false; 1595 break; 1596 case AF_INET6: 1597 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; 1598 if (sdp_get_ip_ver(hdr) != 6) 1599 return false; 1600 cma_ip6_clear_scope_id(&ip6_addr); 1601 if (!cma_any_addr(vnet, addr) && 1602 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) 1603 return false; 1604 break; 1605 case AF_IB: 1606 return true; 1607 default: 1608 return false; 1609 } 1610 1611 return true; 1612 } 1613 1614 static bool cma_match_private_data(struct rdma_id_private *id_priv, 1615 const void *vhdr) 1616 { 1617 const struct cma_hdr *hdr = vhdr; 1618 struct sockaddr *addr = cma_src_addr(id_priv); 1619 struct vnet *vnet = id_priv->id.route.addr.dev_addr.net; 1620 __be32 ip4_addr; 1621 struct in6_addr ip6_addr; 1622 1623 if (cma_any_addr(vnet, addr) && !id_priv->afonly) 1624 return true; 1625 1626 if (id_priv->id.ps == RDMA_PS_SDP) 1627 return sdp_match_private_data(id_priv, vhdr, addr); 1628 1629 switch (addr->sa_family) { 1630 case AF_INET: 1631 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; 1632 if (cma_get_ip_ver(hdr) != 4) 1633 return false; 1634 if (!cma_any_addr(vnet, addr) && 1635 hdr->dst_addr.ip4.addr != ip4_addr) 1636 return false; 1637 break; 1638 case AF_INET6: 1639 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; 1640 if (cma_get_ip_ver(hdr) != 6) 1641 return false; 1642 cma_ip6_clear_scope_id(&ip6_addr); 1643 if (!cma_any_addr(vnet, addr) && 1644 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) 1645 return false; 1646 break; 1647 case AF_IB: 1648 return true; 1649 default: 1650 return false; 1651 } 1652 1653 return true; 1654 } 1655 1656 static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num) 1657 { 1658 enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num); 1659 enum rdma_transport_type transport = 1660 rdma_node_get_transport(device->node_type); 1661 1662 return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB; 1663 } 1664 1665 static bool cma_protocol_roce(const struct rdma_cm_id *id) 1666 { 1667 struct ib_device *device = id->device; 1668 const int port_num = id->port_num ?: rdma_start_port(device); 1669 1670 return cma_protocol_roce_dev_port(device, port_num); 1671 } 1672 1673 static bool cma_match_net_dev(const struct rdma_cm_id *id, 1674 const if_t net_dev, 1675 u8 port_num) 1676 { 1677 const struct rdma_addr *addr = &id->route.addr; 1678 1679 if (!net_dev) { 1680 if (id->port_num && id->port_num != port_num) 1681 return false; 1682 1683 if (id->ps == RDMA_PS_SDP) { 1684 if (addr->src_addr.ss_family == AF_INET || 1685 addr->src_addr.ss_family == AF_INET6) 1686 return true; 1687 return false; 1688 } 1689 /* This request is an AF_IB request or a RoCE request */ 1690 return addr->src_addr.ss_family == AF_IB || 1691 cma_protocol_roce_dev_port(id->device, port_num); 1692 } 1693 1694 return !addr->dev_addr.bound_dev_if || 1695 (net_eq(dev_net(net_dev), addr->dev_addr.net) && 1696 addr->dev_addr.bound_dev_if == if_getindex(net_dev)); 1697 } 1698 1699 static struct rdma_id_private *cma_find_listener( 1700 const struct rdma_bind_list *bind_list, 1701 const struct ib_cm_id *cm_id, 1702 const struct ib_cm_event *ib_event, 1703 const struct cma_req_info *req, 1704 const if_t net_dev) 1705 { 1706 struct rdma_id_private *id_priv, *id_priv_dev; 1707 1708 if (!bind_list) 1709 return ERR_PTR(-EINVAL); 1710 1711 hlist_for_each_entry(id_priv, &bind_list->owners, node) { 1712 if (cma_match_private_data(id_priv, ib_event->private_data)) { 1713 if (id_priv->id.device == cm_id->device && 1714 cma_match_net_dev(&id_priv->id, net_dev, req->port)) 1715 return id_priv; 1716 list_for_each_entry(id_priv_dev, 1717 &id_priv->listen_list, 1718 listen_list) { 1719 if (id_priv_dev->id.device == cm_id->device && 1720 cma_match_net_dev(&id_priv_dev->id, net_dev, req->port)) 1721 return id_priv_dev; 1722 } 1723 } 1724 } 1725 1726 return ERR_PTR(-EINVAL); 1727 } 1728 1729 static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, 1730 struct ib_cm_event *ib_event, 1731 if_t *net_dev) 1732 { 1733 struct cma_req_info req; 1734 struct rdma_bind_list *bind_list; 1735 struct rdma_id_private *id_priv; 1736 int err; 1737 1738 err = cma_save_req_info(ib_event, &req); 1739 if (err) 1740 return ERR_PTR(err); 1741 1742 if (rdma_ps_from_service_id(cm_id->service_id) == RDMA_PS_SDP) { 1743 *net_dev = NULL; 1744 goto there_is_no_net_dev; 1745 } 1746 1747 *net_dev = cma_get_net_dev(ib_event, &req); 1748 if (IS_ERR(*net_dev)) { 1749 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { 1750 /* Assuming the protocol is AF_IB */ 1751 *net_dev = NULL; 1752 } else { 1753 return ERR_CAST(*net_dev); 1754 } 1755 } 1756 1757 there_is_no_net_dev: 1758 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, 1759 rdma_ps_from_service_id(req.service_id), 1760 cma_port_from_service_id(req.service_id)); 1761 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); 1762 if (IS_ERR(id_priv) && *net_dev) { 1763 dev_put(*net_dev); 1764 *net_dev = NULL; 1765 } 1766 1767 return id_priv; 1768 } 1769 1770 static inline int cma_user_data_offset(struct rdma_id_private *id_priv) 1771 { 1772 if (cma_family(id_priv) == AF_IB) 1773 return 0; 1774 if (id_priv->id.ps == RDMA_PS_SDP) 1775 return 0; 1776 return sizeof(struct cma_hdr); 1777 } 1778 1779 static void cma_cancel_route(struct rdma_id_private *id_priv) 1780 { 1781 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { 1782 if (id_priv->query) 1783 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 1784 } 1785 } 1786 1787 static void cma_cancel_listens(struct rdma_id_private *id_priv) 1788 { 1789 struct rdma_id_private *dev_id_priv; 1790 1791 /* 1792 * Remove from listen_any_list to prevent added devices from spawning 1793 * additional listen requests. 1794 */ 1795 mutex_lock(&lock); 1796 list_del(&id_priv->list); 1797 1798 while (!list_empty(&id_priv->listen_list)) { 1799 dev_id_priv = list_entry(id_priv->listen_list.next, 1800 struct rdma_id_private, listen_list); 1801 /* sync with device removal to avoid duplicate destruction */ 1802 list_del_init(&dev_id_priv->list); 1803 list_del(&dev_id_priv->listen_list); 1804 mutex_unlock(&lock); 1805 1806 rdma_destroy_id(&dev_id_priv->id); 1807 mutex_lock(&lock); 1808 } 1809 mutex_unlock(&lock); 1810 } 1811 1812 static void cma_cancel_operation(struct rdma_id_private *id_priv, 1813 enum rdma_cm_state state) 1814 { 1815 struct vnet *vnet = id_priv->id.route.addr.dev_addr.net; 1816 1817 switch (state) { 1818 case RDMA_CM_ADDR_QUERY: 1819 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 1820 break; 1821 case RDMA_CM_ROUTE_QUERY: 1822 cma_cancel_route(id_priv); 1823 break; 1824 case RDMA_CM_LISTEN: 1825 if (cma_any_addr(vnet, cma_src_addr(id_priv)) && !id_priv->cma_dev) 1826 cma_cancel_listens(id_priv); 1827 break; 1828 default: 1829 break; 1830 } 1831 } 1832 1833 static void cma_release_port(struct rdma_id_private *id_priv) 1834 { 1835 struct rdma_bind_list *bind_list = id_priv->bind_list; 1836 struct vnet *net = id_priv->id.route.addr.dev_addr.net; 1837 1838 if (!bind_list) 1839 return; 1840 1841 mutex_lock(&lock); 1842 hlist_del(&id_priv->node); 1843 if (hlist_empty(&bind_list->owners)) { 1844 cma_ps_remove(net, bind_list->ps, bind_list->port); 1845 kfree(bind_list); 1846 } 1847 mutex_unlock(&lock); 1848 } 1849 1850 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 1851 { 1852 struct cma_multicast *mc; 1853 1854 while (!list_empty(&id_priv->mc_list)) { 1855 mc = container_of(id_priv->mc_list.next, 1856 struct cma_multicast, list); 1857 list_del(&mc->list); 1858 if (rdma_cap_ib_mcast(id_priv->cma_dev->device, 1859 id_priv->id.port_num)) { 1860 ib_sa_free_multicast(mc->multicast.ib); 1861 kfree(mc); 1862 } else { 1863 if (mc->igmp_joined) { 1864 struct rdma_dev_addr *dev_addr = 1865 &id_priv->id.route.addr.dev_addr; 1866 if_t ndev = NULL; 1867 1868 if (dev_addr->bound_dev_if) 1869 ndev = dev_get_by_index(dev_addr->net, 1870 dev_addr->bound_dev_if); 1871 if (ndev) { 1872 cma_igmp_send(ndev, 1873 &mc->multicast.ib->rec.mgid, 1874 false); 1875 dev_put(ndev); 1876 } 1877 } 1878 kref_put(&mc->mcref, release_mc); 1879 } 1880 } 1881 } 1882 1883 void rdma_destroy_id(struct rdma_cm_id *id) 1884 { 1885 struct rdma_id_private *id_priv; 1886 enum rdma_cm_state state; 1887 1888 id_priv = container_of(id, struct rdma_id_private, id); 1889 state = cma_exch(id_priv, RDMA_CM_DESTROYING); 1890 cma_cancel_operation(id_priv, state); 1891 1892 /* 1893 * Wait for any active callback to finish. New callbacks will find 1894 * the id_priv state set to destroying and abort. 1895 */ 1896 mutex_lock(&id_priv->handler_mutex); 1897 mutex_unlock(&id_priv->handler_mutex); 1898 1899 if (id_priv->cma_dev) { 1900 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 1901 if (id_priv->cm_id.ib) 1902 ib_destroy_cm_id(id_priv->cm_id.ib); 1903 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 1904 if (id_priv->cm_id.iw) 1905 iw_destroy_cm_id(id_priv->cm_id.iw); 1906 } 1907 cma_leave_mc_groups(id_priv); 1908 cma_release_dev(id_priv); 1909 } 1910 1911 cma_release_port(id_priv); 1912 cma_deref_id(id_priv); 1913 wait_for_completion(&id_priv->comp); 1914 1915 if (id_priv->internal_id) 1916 cma_deref_id(id_priv->id.context); 1917 1918 kfree(id_priv->id.route.path_rec); 1919 kfree(id_priv); 1920 } 1921 EXPORT_SYMBOL(rdma_destroy_id); 1922 1923 static int cma_rep_recv(struct rdma_id_private *id_priv) 1924 { 1925 int ret; 1926 1927 ret = cma_modify_qp_rtr(id_priv, NULL); 1928 if (ret) 1929 goto reject; 1930 1931 ret = cma_modify_qp_rts(id_priv, NULL); 1932 if (ret) 1933 goto reject; 1934 1935 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 1936 if (ret) 1937 goto reject; 1938 1939 return 0; 1940 reject: 1941 cma_modify_qp_err(id_priv); 1942 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 1943 NULL, 0, NULL, 0); 1944 return ret; 1945 } 1946 1947 static int sdp_verify_rep(const struct sdp_hah *data) 1948 { 1949 if (sdp_get_majv(data->majv_minv) != SDP_MAJ_VERSION) 1950 return -EINVAL; 1951 return 0; 1952 } 1953 1954 static void cma_set_rep_event_data(struct rdma_cm_event *event, 1955 struct ib_cm_rep_event_param *rep_data, 1956 void *private_data) 1957 { 1958 event->param.conn.private_data = private_data; 1959 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 1960 event->param.conn.responder_resources = rep_data->responder_resources; 1961 event->param.conn.initiator_depth = rep_data->initiator_depth; 1962 event->param.conn.flow_control = rep_data->flow_control; 1963 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 1964 event->param.conn.srq = rep_data->srq; 1965 event->param.conn.qp_num = rep_data->remote_qpn; 1966 } 1967 1968 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1969 { 1970 struct rdma_id_private *id_priv = cm_id->context; 1971 struct rdma_cm_event event; 1972 int ret = 0; 1973 1974 mutex_lock(&id_priv->handler_mutex); 1975 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1976 id_priv->state != RDMA_CM_CONNECT) || 1977 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1978 id_priv->state != RDMA_CM_DISCONNECT)) 1979 goto out; 1980 1981 memset(&event, 0, sizeof event); 1982 switch (ib_event->event) { 1983 case IB_CM_REQ_ERROR: 1984 case IB_CM_REP_ERROR: 1985 event.event = RDMA_CM_EVENT_UNREACHABLE; 1986 event.status = -ETIMEDOUT; 1987 break; 1988 case IB_CM_REP_RECEIVED: 1989 if (id_priv->id.ps == RDMA_PS_SDP) { 1990 event.status = sdp_verify_rep(ib_event->private_data); 1991 if (event.status) 1992 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1993 else 1994 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 1995 } else { 1996 if (id_priv->id.qp) { 1997 event.status = cma_rep_recv(id_priv); 1998 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 1999 RDMA_CM_EVENT_ESTABLISHED; 2000 } else { 2001 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 2002 } 2003 } 2004 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 2005 ib_event->private_data); 2006 break; 2007 case IB_CM_RTU_RECEIVED: 2008 case IB_CM_USER_ESTABLISHED: 2009 event.event = RDMA_CM_EVENT_ESTABLISHED; 2010 break; 2011 case IB_CM_DREQ_ERROR: 2012 event.status = -ETIMEDOUT; /* fall through */ 2013 case IB_CM_DREQ_RECEIVED: 2014 case IB_CM_DREP_RECEIVED: 2015 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 2016 RDMA_CM_DISCONNECT)) 2017 goto out; 2018 event.event = RDMA_CM_EVENT_DISCONNECTED; 2019 break; 2020 case IB_CM_TIMEWAIT_EXIT: 2021 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 2022 break; 2023 case IB_CM_MRA_RECEIVED: 2024 /* ignore event */ 2025 goto out; 2026 case IB_CM_REJ_RECEIVED: 2027 cma_modify_qp_err(id_priv); 2028 event.status = ib_event->param.rej_rcvd.reason; 2029 event.event = RDMA_CM_EVENT_REJECTED; 2030 event.param.conn.private_data = ib_event->private_data; 2031 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 2032 break; 2033 default: 2034 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 2035 ib_event->event); 2036 goto out; 2037 } 2038 2039 ret = id_priv->id.event_handler(&id_priv->id, &event); 2040 if (ret) { 2041 /* Destroy the CM ID by returning a non-zero value. */ 2042 id_priv->cm_id.ib = NULL; 2043 cma_exch(id_priv, RDMA_CM_DESTROYING); 2044 mutex_unlock(&id_priv->handler_mutex); 2045 rdma_destroy_id(&id_priv->id); 2046 return ret; 2047 } 2048 out: 2049 mutex_unlock(&id_priv->handler_mutex); 2050 return ret; 2051 } 2052 2053 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 2054 struct ib_cm_event *ib_event, 2055 if_t net_dev) 2056 { 2057 struct rdma_id_private *id_priv; 2058 struct rdma_cm_id *id; 2059 struct rdma_route *rt; 2060 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 2061 const __be64 service_id = 2062 ib_event->param.req_rcvd.primary_path->service_id; 2063 struct vnet *vnet = listen_id->route.addr.dev_addr.net; 2064 int ret; 2065 2066 id = rdma_create_id(vnet, 2067 listen_id->event_handler, listen_id->context, 2068 listen_id->ps, ib_event->param.req_rcvd.qp_type); 2069 if (IS_ERR(id)) 2070 return NULL; 2071 2072 id_priv = container_of(id, struct rdma_id_private, id); 2073 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 2074 (struct sockaddr *)&id->route.addr.dst_addr, 2075 listen_id, ib_event, ss_family, service_id)) 2076 goto err; 2077 2078 rt = &id->route; 2079 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 2080 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 2081 GFP_KERNEL); 2082 if (!rt->path_rec) 2083 goto err; 2084 2085 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 2086 if (rt->num_paths == 2) 2087 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 2088 2089 if (net_dev) { 2090 ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL); 2091 if (ret) 2092 goto err; 2093 } else { 2094 if (!cma_protocol_roce(listen_id) && 2095 cma_any_addr(vnet, cma_src_addr(id_priv))) { 2096 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 2097 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 2098 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 2099 } else if (!cma_any_addr(vnet, cma_src_addr(id_priv))) { 2100 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); 2101 if (ret) 2102 goto err; 2103 } 2104 } 2105 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 2106 2107 id_priv->state = RDMA_CM_CONNECT; 2108 return id_priv; 2109 2110 err: 2111 rdma_destroy_id(id); 2112 return NULL; 2113 } 2114 2115 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 2116 struct ib_cm_event *ib_event, 2117 if_t net_dev) 2118 { 2119 struct rdma_id_private *id_priv; 2120 struct rdma_cm_id *id; 2121 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 2122 struct vnet *vnet = listen_id->route.addr.dev_addr.net; 2123 int ret; 2124 2125 id = rdma_create_id(vnet, listen_id->event_handler, listen_id->context, 2126 listen_id->ps, IB_QPT_UD); 2127 if (IS_ERR(id)) 2128 return NULL; 2129 2130 id_priv = container_of(id, struct rdma_id_private, id); 2131 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 2132 (struct sockaddr *)&id->route.addr.dst_addr, 2133 listen_id, ib_event, ss_family, 2134 ib_event->param.sidr_req_rcvd.service_id)) 2135 goto err; 2136 2137 if (net_dev) { 2138 ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL); 2139 if (ret) 2140 goto err; 2141 } else { 2142 if (!cma_any_addr(vnet, cma_src_addr(id_priv))) { 2143 ret = cma_translate_addr(cma_src_addr(id_priv), 2144 &id->route.addr.dev_addr); 2145 if (ret) 2146 goto err; 2147 } 2148 } 2149 2150 id_priv->state = RDMA_CM_CONNECT; 2151 return id_priv; 2152 err: 2153 rdma_destroy_id(id); 2154 return NULL; 2155 } 2156 2157 static void cma_set_req_event_data(struct rdma_cm_event *event, 2158 struct ib_cm_req_event_param *req_data, 2159 void *private_data, int offset) 2160 { 2161 event->param.conn.private_data = (char *)private_data + offset; 2162 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 2163 event->param.conn.responder_resources = req_data->responder_resources; 2164 event->param.conn.initiator_depth = req_data->initiator_depth; 2165 event->param.conn.flow_control = req_data->flow_control; 2166 event->param.conn.retry_count = req_data->retry_count; 2167 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 2168 event->param.conn.srq = req_data->srq; 2169 event->param.conn.qp_num = req_data->remote_qpn; 2170 } 2171 2172 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) 2173 { 2174 return (((ib_event->event == IB_CM_REQ_RECEIVED) && 2175 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 2176 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 2177 (id->qp_type == IB_QPT_UD)) || 2178 (!id->qp_type)); 2179 } 2180 2181 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 2182 { 2183 struct rdma_id_private *listen_id, *conn_id = NULL; 2184 struct rdma_cm_event event; 2185 if_t net_dev; 2186 int offset, ret; 2187 2188 listen_id = cma_id_from_event(cm_id, ib_event, &net_dev); 2189 if (IS_ERR(listen_id)) 2190 return PTR_ERR(listen_id); 2191 2192 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) { 2193 ret = -EINVAL; 2194 goto net_dev_put; 2195 } 2196 2197 mutex_lock(&listen_id->handler_mutex); 2198 if (listen_id->state != RDMA_CM_LISTEN) { 2199 ret = -ECONNABORTED; 2200 goto err1; 2201 } 2202 2203 memset(&event, 0, sizeof event); 2204 offset = cma_user_data_offset(listen_id); 2205 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2206 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 2207 conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev); 2208 event.param.ud.private_data = (char *)ib_event->private_data + offset; 2209 event.param.ud.private_data_len = 2210 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 2211 } else { 2212 conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev); 2213 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 2214 ib_event->private_data, offset); 2215 } 2216 if (!conn_id) { 2217 ret = -ENOMEM; 2218 goto err1; 2219 } 2220 2221 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2222 ret = cma_acquire_dev(conn_id, listen_id); 2223 if (ret) 2224 goto err2; 2225 2226 conn_id->cm_id.ib = cm_id; 2227 cm_id->context = conn_id; 2228 cm_id->cm_handler = cma_ib_handler; 2229 2230 /* 2231 * Protect against the user destroying conn_id from another thread 2232 * until we're done accessing it. 2233 */ 2234 atomic_inc(&conn_id->refcount); 2235 ret = conn_id->id.event_handler(&conn_id->id, &event); 2236 if (ret) 2237 goto err3; 2238 /* 2239 * Acquire mutex to prevent user executing rdma_destroy_id() 2240 * while we're accessing the cm_id. 2241 */ 2242 mutex_lock(&lock); 2243 if (cma_comp(conn_id, RDMA_CM_CONNECT) && 2244 (conn_id->id.qp_type != IB_QPT_UD)) 2245 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 2246 mutex_unlock(&lock); 2247 mutex_unlock(&conn_id->handler_mutex); 2248 mutex_unlock(&listen_id->handler_mutex); 2249 cma_deref_id(conn_id); 2250 if (net_dev) 2251 dev_put(net_dev); 2252 return 0; 2253 2254 err3: 2255 cma_deref_id(conn_id); 2256 /* Destroy the CM ID by returning a non-zero value. */ 2257 conn_id->cm_id.ib = NULL; 2258 err2: 2259 cma_exch(conn_id, RDMA_CM_DESTROYING); 2260 mutex_unlock(&conn_id->handler_mutex); 2261 err1: 2262 mutex_unlock(&listen_id->handler_mutex); 2263 if (conn_id) 2264 rdma_destroy_id(&conn_id->id); 2265 2266 net_dev_put: 2267 if (net_dev) 2268 dev_put(net_dev); 2269 2270 return ret; 2271 } 2272 2273 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) 2274 { 2275 if (addr->sa_family == AF_IB) 2276 return ((struct sockaddr_ib *) addr)->sib_sid; 2277 2278 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); 2279 } 2280 EXPORT_SYMBOL(rdma_get_service_id); 2281 2282 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 2283 { 2284 struct rdma_id_private *id_priv = iw_id->context; 2285 struct rdma_cm_event event; 2286 int ret = 0; 2287 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2288 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2289 2290 mutex_lock(&id_priv->handler_mutex); 2291 if (id_priv->state != RDMA_CM_CONNECT) 2292 goto out; 2293 2294 memset(&event, 0, sizeof event); 2295 switch (iw_event->event) { 2296 case IW_CM_EVENT_CLOSE: 2297 event.event = RDMA_CM_EVENT_DISCONNECTED; 2298 break; 2299 case IW_CM_EVENT_CONNECT_REPLY: 2300 memcpy(cma_src_addr(id_priv), laddr, 2301 rdma_addr_size(laddr)); 2302 memcpy(cma_dst_addr(id_priv), raddr, 2303 rdma_addr_size(raddr)); 2304 switch (iw_event->status) { 2305 case 0: 2306 event.event = RDMA_CM_EVENT_ESTABLISHED; 2307 event.param.conn.initiator_depth = iw_event->ird; 2308 event.param.conn.responder_resources = iw_event->ord; 2309 break; 2310 case -ECONNRESET: 2311 case -ECONNREFUSED: 2312 event.event = RDMA_CM_EVENT_REJECTED; 2313 break; 2314 case -ETIMEDOUT: 2315 event.event = RDMA_CM_EVENT_UNREACHABLE; 2316 break; 2317 default: 2318 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 2319 break; 2320 } 2321 break; 2322 case IW_CM_EVENT_ESTABLISHED: 2323 event.event = RDMA_CM_EVENT_ESTABLISHED; 2324 event.param.conn.initiator_depth = iw_event->ird; 2325 event.param.conn.responder_resources = iw_event->ord; 2326 break; 2327 default: 2328 BUG_ON(1); 2329 } 2330 2331 event.status = iw_event->status; 2332 event.param.conn.private_data = iw_event->private_data; 2333 event.param.conn.private_data_len = iw_event->private_data_len; 2334 ret = id_priv->id.event_handler(&id_priv->id, &event); 2335 if (ret) { 2336 /* Destroy the CM ID by returning a non-zero value. */ 2337 id_priv->cm_id.iw = NULL; 2338 cma_exch(id_priv, RDMA_CM_DESTROYING); 2339 mutex_unlock(&id_priv->handler_mutex); 2340 rdma_destroy_id(&id_priv->id); 2341 return ret; 2342 } 2343 2344 out: 2345 mutex_unlock(&id_priv->handler_mutex); 2346 return ret; 2347 } 2348 2349 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 2350 struct iw_cm_event *iw_event) 2351 { 2352 struct rdma_cm_id *new_cm_id; 2353 struct rdma_id_private *listen_id, *conn_id; 2354 struct rdma_cm_event event; 2355 int ret = -ECONNABORTED; 2356 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2357 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2358 2359 listen_id = cm_id->context; 2360 2361 mutex_lock(&listen_id->handler_mutex); 2362 if (listen_id->state != RDMA_CM_LISTEN) 2363 goto out; 2364 2365 /* Create a new RDMA id for the new IW CM ID */ 2366 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2367 listen_id->id.event_handler, 2368 listen_id->id.context, 2369 RDMA_PS_TCP, IB_QPT_RC); 2370 if (IS_ERR(new_cm_id)) { 2371 ret = -ENOMEM; 2372 goto out; 2373 } 2374 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 2375 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2376 conn_id->state = RDMA_CM_CONNECT; 2377 2378 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); 2379 if (ret) { 2380 mutex_unlock(&conn_id->handler_mutex); 2381 rdma_destroy_id(new_cm_id); 2382 goto out; 2383 } 2384 2385 ret = cma_acquire_dev(conn_id, listen_id); 2386 if (ret) { 2387 mutex_unlock(&conn_id->handler_mutex); 2388 rdma_destroy_id(new_cm_id); 2389 goto out; 2390 } 2391 2392 conn_id->cm_id.iw = cm_id; 2393 cm_id->context = conn_id; 2394 cm_id->cm_handler = cma_iw_handler; 2395 2396 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); 2397 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); 2398 2399 memset(&event, 0, sizeof event); 2400 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2401 event.param.conn.private_data = iw_event->private_data; 2402 event.param.conn.private_data_len = iw_event->private_data_len; 2403 event.param.conn.initiator_depth = iw_event->ird; 2404 event.param.conn.responder_resources = iw_event->ord; 2405 2406 /* 2407 * Protect against the user destroying conn_id from another thread 2408 * until we're done accessing it. 2409 */ 2410 atomic_inc(&conn_id->refcount); 2411 ret = conn_id->id.event_handler(&conn_id->id, &event); 2412 if (ret) { 2413 /* User wants to destroy the CM ID */ 2414 conn_id->cm_id.iw = NULL; 2415 cma_exch(conn_id, RDMA_CM_DESTROYING); 2416 mutex_unlock(&conn_id->handler_mutex); 2417 cma_deref_id(conn_id); 2418 rdma_destroy_id(&conn_id->id); 2419 goto out; 2420 } 2421 2422 mutex_unlock(&conn_id->handler_mutex); 2423 cma_deref_id(conn_id); 2424 2425 out: 2426 mutex_unlock(&listen_id->handler_mutex); 2427 return ret; 2428 } 2429 2430 static int cma_ib_listen(struct rdma_id_private *id_priv) 2431 { 2432 struct sockaddr *addr; 2433 struct ib_cm_id *id; 2434 __be64 svc_id; 2435 2436 addr = cma_src_addr(id_priv); 2437 svc_id = rdma_get_service_id(&id_priv->id, addr); 2438 id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id); 2439 if (IS_ERR(id)) 2440 return PTR_ERR(id); 2441 id_priv->cm_id.ib = id; 2442 2443 return 0; 2444 } 2445 2446 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 2447 { 2448 int ret; 2449 struct iw_cm_id *id; 2450 2451 id = iw_create_cm_id(id_priv->id.device, 2452 iw_conn_req_handler, 2453 id_priv); 2454 if (IS_ERR(id)) 2455 return PTR_ERR(id); 2456 2457 id->tos = id_priv->tos; 2458 id_priv->cm_id.iw = id; 2459 2460 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), 2461 rdma_addr_size(cma_src_addr(id_priv))); 2462 2463 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 2464 2465 if (ret) { 2466 iw_destroy_cm_id(id_priv->cm_id.iw); 2467 id_priv->cm_id.iw = NULL; 2468 } 2469 2470 return ret; 2471 } 2472 2473 static int cma_listen_handler(struct rdma_cm_id *id, 2474 struct rdma_cm_event *event) 2475 { 2476 struct rdma_id_private *id_priv = id->context; 2477 2478 id->context = id_priv->id.context; 2479 id->event_handler = id_priv->id.event_handler; 2480 return id_priv->id.event_handler(id, event); 2481 } 2482 2483 static void cma_listen_on_dev(struct rdma_id_private *id_priv, 2484 struct cma_device *cma_dev) 2485 { 2486 struct rdma_id_private *dev_id_priv; 2487 struct rdma_cm_id *id; 2488 struct vnet *net = id_priv->id.route.addr.dev_addr.net; 2489 int ret; 2490 2491 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2492 return; 2493 2494 id = rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, 2495 id_priv->id.qp_type); 2496 if (IS_ERR(id)) 2497 return; 2498 2499 dev_id_priv = container_of(id, struct rdma_id_private, id); 2500 2501 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2502 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), 2503 rdma_addr_size(cma_src_addr(id_priv))); 2504 2505 _cma_attach_to_dev(dev_id_priv, cma_dev); 2506 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 2507 atomic_inc(&id_priv->refcount); 2508 dev_id_priv->internal_id = 1; 2509 dev_id_priv->afonly = id_priv->afonly; 2510 2511 ret = rdma_listen(id, id_priv->backlog); 2512 if (ret) 2513 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n", 2514 ret, cma_dev->device->name); 2515 } 2516 2517 static void cma_listen_on_all(struct rdma_id_private *id_priv) 2518 { 2519 struct cma_device *cma_dev; 2520 2521 mutex_lock(&lock); 2522 list_add_tail(&id_priv->list, &listen_any_list); 2523 list_for_each_entry(cma_dev, &dev_list, list) 2524 cma_listen_on_dev(id_priv, cma_dev); 2525 mutex_unlock(&lock); 2526 } 2527 2528 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 2529 { 2530 struct rdma_id_private *id_priv; 2531 2532 id_priv = container_of(id, struct rdma_id_private, id); 2533 id_priv->tos = (u8) tos; 2534 } 2535 EXPORT_SYMBOL(rdma_set_service_type); 2536 2537 /** 2538 * rdma_set_ack_timeout() - Set the ack timeout of QP associated 2539 * with a connection identifier. 2540 * @id: Communication identifier to associated with service type. 2541 * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec. 2542 * 2543 * This function should be called before rdma_connect() on active side, 2544 * and on passive side before rdma_accept(). It is applicable to primary 2545 * path only. The timeout will affect the local side of the QP, it is not 2546 * negotiated with remote side and zero disables the timer. 2547 * 2548 * Return: 0 for success 2549 */ 2550 int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) 2551 { 2552 struct rdma_id_private *id_priv; 2553 2554 if (id->qp_type != IB_QPT_RC) 2555 return -EINVAL; 2556 2557 id_priv = container_of(id, struct rdma_id_private, id); 2558 id_priv->timeout = timeout; 2559 id_priv->timeout_set = true; 2560 2561 return 0; 2562 } 2563 EXPORT_SYMBOL(rdma_set_ack_timeout); 2564 2565 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 2566 void *context) 2567 { 2568 struct cma_work *work = context; 2569 struct rdma_route *route; 2570 2571 route = &work->id->id.route; 2572 2573 if (!status) { 2574 route->num_paths = 1; 2575 *route->path_rec = *path_rec; 2576 } else { 2577 work->old_state = RDMA_CM_ROUTE_QUERY; 2578 work->new_state = RDMA_CM_ADDR_RESOLVED; 2579 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 2580 work->event.status = status; 2581 } 2582 2583 queue_work(cma_wq, &work->work); 2584 } 2585 2586 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 2587 struct cma_work *work) 2588 { 2589 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2590 struct ib_sa_path_rec path_rec; 2591 ib_sa_comp_mask comp_mask; 2592 struct sockaddr_in6 *sin6; 2593 struct sockaddr_ib *sib; 2594 2595 memset(&path_rec, 0, sizeof path_rec); 2596 rdma_addr_get_sgid(dev_addr, &path_rec.sgid); 2597 rdma_addr_get_dgid(dev_addr, &path_rec.dgid); 2598 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2599 path_rec.numb_path = 1; 2600 path_rec.reversible = 1; 2601 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 2602 2603 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2604 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 2605 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 2606 2607 switch (cma_family(id_priv)) { 2608 case AF_INET: 2609 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 2610 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 2611 break; 2612 case AF_INET6: 2613 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 2614 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 2615 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2616 break; 2617 case AF_IB: 2618 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 2619 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); 2620 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2621 break; 2622 } 2623 2624 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 2625 id_priv->id.port_num, &path_rec, 2626 comp_mask, timeout_ms, 2627 GFP_KERNEL, cma_query_handler, 2628 work, &id_priv->query); 2629 2630 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 2631 } 2632 2633 static void cma_work_handler(struct work_struct *_work) 2634 { 2635 struct cma_work *work = container_of(_work, struct cma_work, work); 2636 struct rdma_id_private *id_priv = work->id; 2637 int destroy = 0; 2638 2639 mutex_lock(&id_priv->handler_mutex); 2640 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 2641 goto out; 2642 2643 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2644 cma_exch(id_priv, RDMA_CM_DESTROYING); 2645 destroy = 1; 2646 } 2647 out: 2648 mutex_unlock(&id_priv->handler_mutex); 2649 cma_deref_id(id_priv); 2650 if (destroy) 2651 rdma_destroy_id(&id_priv->id); 2652 kfree(work); 2653 } 2654 2655 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 2656 { 2657 struct rdma_route *route = &id_priv->id.route; 2658 struct cma_work *work; 2659 int ret; 2660 2661 work = kzalloc(sizeof *work, GFP_KERNEL); 2662 if (!work) 2663 return -ENOMEM; 2664 2665 work->id = id_priv; 2666 INIT_WORK(&work->work, cma_work_handler); 2667 work->old_state = RDMA_CM_ROUTE_QUERY; 2668 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2669 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2670 2671 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 2672 if (!route->path_rec) { 2673 ret = -ENOMEM; 2674 goto err1; 2675 } 2676 2677 ret = cma_query_ib_route(id_priv, timeout_ms, work); 2678 if (ret) 2679 goto err2; 2680 2681 return 0; 2682 err2: 2683 kfree(route->path_rec); 2684 route->path_rec = NULL; 2685 err1: 2686 kfree(work); 2687 return ret; 2688 } 2689 2690 int rdma_set_ib_paths(struct rdma_cm_id *id, 2691 struct ib_sa_path_rec *path_rec, int num_paths) 2692 { 2693 struct rdma_id_private *id_priv; 2694 int ret; 2695 2696 id_priv = container_of(id, struct rdma_id_private, id); 2697 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2698 RDMA_CM_ROUTE_RESOLVED)) 2699 return -EINVAL; 2700 2701 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, 2702 GFP_KERNEL); 2703 if (!id->route.path_rec) { 2704 ret = -ENOMEM; 2705 goto err; 2706 } 2707 2708 id->route.num_paths = num_paths; 2709 return 0; 2710 err: 2711 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 2712 return ret; 2713 } 2714 EXPORT_SYMBOL(rdma_set_ib_paths); 2715 2716 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 2717 { 2718 struct cma_work *work; 2719 2720 work = kzalloc(sizeof *work, GFP_KERNEL); 2721 if (!work) 2722 return -ENOMEM; 2723 2724 work->id = id_priv; 2725 INIT_WORK(&work->work, cma_work_handler); 2726 work->old_state = RDMA_CM_ROUTE_QUERY; 2727 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2728 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2729 queue_work(cma_wq, &work->work); 2730 return 0; 2731 } 2732 2733 static int iboe_tos_to_sl(if_t ndev, int tos) 2734 { 2735 /* get service level, SL, from IPv4 type of service, TOS */ 2736 int sl = (tos >> 5) & 0x7; 2737 2738 /* final mappings are done by the vendor specific drivers */ 2739 return sl; 2740 } 2741 2742 static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, 2743 unsigned long supported_gids, 2744 enum ib_gid_type default_gid) 2745 { 2746 if ((network_type == RDMA_NETWORK_IPV4 || 2747 network_type == RDMA_NETWORK_IPV6) && 2748 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) 2749 return IB_GID_TYPE_ROCE_UDP_ENCAP; 2750 2751 return default_gid; 2752 } 2753 2754 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 2755 { 2756 struct rdma_route *route = &id_priv->id.route; 2757 struct rdma_addr *addr = &route->addr; 2758 struct cma_work *work; 2759 int ret; 2760 if_t ndev = NULL; 2761 2762 2763 work = kzalloc(sizeof *work, GFP_KERNEL); 2764 if (!work) 2765 return -ENOMEM; 2766 2767 work->id = id_priv; 2768 INIT_WORK(&work->work, cma_work_handler); 2769 2770 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 2771 if (!route->path_rec) { 2772 ret = -ENOMEM; 2773 goto err1; 2774 } 2775 2776 route->num_paths = 1; 2777 2778 if (addr->dev_addr.bound_dev_if) { 2779 unsigned long supported_gids; 2780 2781 ndev = dev_get_by_index(addr->dev_addr.net, 2782 addr->dev_addr.bound_dev_if); 2783 if (!ndev) { 2784 ret = -ENODEV; 2785 goto err2; 2786 } 2787 2788 route->path_rec->net = if_getvnet(ndev); 2789 route->path_rec->ifindex = if_getindex(ndev); 2790 supported_gids = roce_gid_type_mask_support(id_priv->id.device, 2791 id_priv->id.port_num); 2792 route->path_rec->gid_type = 2793 cma_route_gid_type(addr->dev_addr.network, 2794 supported_gids, 2795 id_priv->gid_type); 2796 } 2797 if (!ndev) { 2798 ret = -ENODEV; 2799 goto err2; 2800 } 2801 2802 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); 2803 2804 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 2805 &route->path_rec->sgid); 2806 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, 2807 &route->path_rec->dgid); 2808 2809 /* Use the hint from IP Stack to select GID Type */ 2810 if (route->path_rec->gid_type < ib_network_to_gid_type(addr->dev_addr.network)) 2811 route->path_rec->gid_type = ib_network_to_gid_type(addr->dev_addr.network); 2812 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) 2813 /* TODO: get the hoplimit from the inet/inet6 device */ 2814 route->path_rec->hop_limit = addr->dev_addr.hoplimit; 2815 else 2816 route->path_rec->hop_limit = 1; 2817 route->path_rec->reversible = 1; 2818 route->path_rec->pkey = cpu_to_be16(0xffff); 2819 route->path_rec->mtu_selector = IB_SA_EQ; 2820 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos); 2821 route->path_rec->traffic_class = id_priv->tos; 2822 route->path_rec->mtu = iboe_get_mtu(if_getmtu(ndev)); 2823 route->path_rec->rate_selector = IB_SA_EQ; 2824 route->path_rec->rate = iboe_get_rate(ndev); 2825 dev_put(ndev); 2826 route->path_rec->packet_life_time_selector = IB_SA_EQ; 2827 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; 2828 if (!route->path_rec->mtu) { 2829 ret = -EINVAL; 2830 goto err2; 2831 } 2832 2833 work->old_state = RDMA_CM_ROUTE_QUERY; 2834 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2835 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2836 work->event.status = 0; 2837 2838 queue_work(cma_wq, &work->work); 2839 2840 return 0; 2841 2842 err2: 2843 kfree(route->path_rec); 2844 route->path_rec = NULL; 2845 err1: 2846 kfree(work); 2847 return ret; 2848 } 2849 2850 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 2851 { 2852 struct rdma_id_private *id_priv; 2853 int ret; 2854 2855 id_priv = container_of(id, struct rdma_id_private, id); 2856 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 2857 return -EINVAL; 2858 2859 atomic_inc(&id_priv->refcount); 2860 if (rdma_cap_ib_sa(id->device, id->port_num)) 2861 ret = cma_resolve_ib_route(id_priv, timeout_ms); 2862 else if (rdma_protocol_roce(id->device, id->port_num)) 2863 ret = cma_resolve_iboe_route(id_priv); 2864 else if (rdma_protocol_iwarp(id->device, id->port_num)) 2865 ret = cma_resolve_iw_route(id_priv, timeout_ms); 2866 else 2867 ret = -ENOSYS; 2868 2869 if (ret) 2870 goto err; 2871 2872 return 0; 2873 err: 2874 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 2875 cma_deref_id(id_priv); 2876 return ret; 2877 } 2878 EXPORT_SYMBOL(rdma_resolve_route); 2879 2880 static void cma_set_loopback(struct sockaddr *addr) 2881 { 2882 switch (addr->sa_family) { 2883 case AF_INET: 2884 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 2885 break; 2886 case AF_INET6: 2887 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 2888 0, 0, 0, htonl(1)); 2889 break; 2890 default: 2891 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 2892 0, 0, 0, htonl(1)); 2893 break; 2894 } 2895 } 2896 2897 static int cma_bind_loopback(struct rdma_id_private *id_priv) 2898 { 2899 struct cma_device *cma_dev, *cur_dev; 2900 struct ib_port_attr port_attr; 2901 union ib_gid gid; 2902 u16 pkey; 2903 int ret; 2904 u8 p; 2905 2906 cma_dev = NULL; 2907 mutex_lock(&lock); 2908 list_for_each_entry(cur_dev, &dev_list, list) { 2909 if (cma_family(id_priv) == AF_IB && 2910 !rdma_cap_ib_cm(cur_dev->device, 1)) 2911 continue; 2912 2913 if (!cma_dev) 2914 cma_dev = cur_dev; 2915 2916 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 2917 if (!ib_query_port(cur_dev->device, p, &port_attr) && 2918 port_attr.state == IB_PORT_ACTIVE) { 2919 cma_dev = cur_dev; 2920 goto port_found; 2921 } 2922 } 2923 } 2924 2925 if (!cma_dev) { 2926 ret = -ENODEV; 2927 goto out; 2928 } 2929 2930 p = 1; 2931 2932 port_found: 2933 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL); 2934 if (ret) 2935 goto out; 2936 2937 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 2938 if (ret) 2939 goto out; 2940 2941 id_priv->id.route.addr.dev_addr.dev_type = 2942 (rdma_protocol_ib(cma_dev->device, p)) ? 2943 ARPHRD_INFINIBAND : ARPHRD_ETHER; 2944 2945 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2946 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 2947 id_priv->id.port_num = p; 2948 cma_attach_to_dev(id_priv, cma_dev); 2949 cma_set_loopback(cma_src_addr(id_priv)); 2950 out: 2951 mutex_unlock(&lock); 2952 return ret; 2953 } 2954 2955 static void addr_handler(int status, struct sockaddr *src_addr, 2956 struct rdma_dev_addr *dev_addr, void *context) 2957 { 2958 struct rdma_id_private *id_priv = context; 2959 struct rdma_cm_event event; 2960 2961 memset(&event, 0, sizeof event); 2962 mutex_lock(&id_priv->handler_mutex); 2963 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 2964 RDMA_CM_ADDR_RESOLVED)) 2965 goto out; 2966 2967 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); 2968 if (!status && !id_priv->cma_dev) 2969 status = cma_acquire_dev(id_priv, NULL); 2970 2971 if (status) { 2972 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2973 RDMA_CM_ADDR_BOUND)) 2974 goto out; 2975 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2976 event.status = status; 2977 } else 2978 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2979 2980 if (id_priv->id.event_handler(&id_priv->id, &event)) { 2981 cma_exch(id_priv, RDMA_CM_DESTROYING); 2982 mutex_unlock(&id_priv->handler_mutex); 2983 cma_deref_id(id_priv); 2984 rdma_destroy_id(&id_priv->id); 2985 return; 2986 } 2987 out: 2988 mutex_unlock(&id_priv->handler_mutex); 2989 cma_deref_id(id_priv); 2990 } 2991 2992 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 2993 { 2994 struct cma_work *work; 2995 union ib_gid gid; 2996 int ret; 2997 2998 work = kzalloc(sizeof *work, GFP_KERNEL); 2999 if (!work) 3000 return -ENOMEM; 3001 3002 if (!id_priv->cma_dev) { 3003 ret = cma_bind_loopback(id_priv); 3004 if (ret) 3005 goto err; 3006 } 3007 3008 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 3009 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 3010 3011 work->id = id_priv; 3012 INIT_WORK(&work->work, cma_work_handler); 3013 work->old_state = RDMA_CM_ADDR_QUERY; 3014 work->new_state = RDMA_CM_ADDR_RESOLVED; 3015 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 3016 queue_work(cma_wq, &work->work); 3017 return 0; 3018 err: 3019 kfree(work); 3020 return ret; 3021 } 3022 3023 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) 3024 { 3025 struct cma_work *work; 3026 int ret; 3027 3028 work = kzalloc(sizeof *work, GFP_KERNEL); 3029 if (!work) 3030 return -ENOMEM; 3031 3032 if (!id_priv->cma_dev) { 3033 ret = cma_resolve_ib_dev(id_priv); 3034 if (ret) 3035 goto err; 3036 } 3037 3038 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) 3039 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); 3040 3041 work->id = id_priv; 3042 INIT_WORK(&work->work, cma_work_handler); 3043 work->old_state = RDMA_CM_ADDR_QUERY; 3044 work->new_state = RDMA_CM_ADDR_RESOLVED; 3045 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 3046 queue_work(cma_wq, &work->work); 3047 return 0; 3048 err: 3049 kfree(work); 3050 return ret; 3051 } 3052 3053 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 3054 struct sockaddr *dst_addr) 3055 { 3056 if (!src_addr || !src_addr->sa_family) { 3057 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 3058 src_addr->sa_family = dst_addr->sa_family; 3059 if (dst_addr->sa_family == AF_INET6) { 3060 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 3061 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 3062 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 3063 if (IN6_IS_SCOPE_LINKLOCAL(&dst_addr6->sin6_addr) || 3064 IN6_IS_ADDR_MC_INTFACELOCAL(&dst_addr6->sin6_addr)) 3065 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; 3066 } else if (dst_addr->sa_family == AF_IB) { 3067 ((struct sockaddr_ib *) src_addr)->sib_pkey = 3068 ((struct sockaddr_ib *) dst_addr)->sib_pkey; 3069 } 3070 } 3071 return rdma_bind_addr(id, src_addr); 3072 } 3073 3074 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 3075 struct sockaddr *dst_addr, int timeout_ms) 3076 { 3077 struct rdma_id_private *id_priv; 3078 struct vnet *vnet = id->route.addr.dev_addr.net; 3079 int ret; 3080 3081 id_priv = container_of(id, struct rdma_id_private, id); 3082 if (id_priv->state == RDMA_CM_IDLE) { 3083 ret = cma_bind_addr(id, src_addr, dst_addr); 3084 if (ret) 3085 return ret; 3086 } 3087 3088 if (cma_family(id_priv) != dst_addr->sa_family) 3089 return -EINVAL; 3090 3091 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) 3092 return -EINVAL; 3093 3094 atomic_inc(&id_priv->refcount); 3095 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); 3096 if (cma_any_addr(vnet, dst_addr)) { 3097 ret = cma_resolve_loopback(id_priv); 3098 } else { 3099 if (dst_addr->sa_family == AF_IB) { 3100 ret = cma_resolve_ib_addr(id_priv); 3101 } else { 3102 ret = cma_check_linklocal(&id->route.addr.dev_addr, dst_addr); 3103 if (ret) 3104 goto err; 3105 3106 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), 3107 dst_addr, &id->route.addr.dev_addr, 3108 timeout_ms, addr_handler, id_priv); 3109 } 3110 } 3111 if (ret) 3112 goto err; 3113 3114 return 0; 3115 err: 3116 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 3117 cma_deref_id(id_priv); 3118 return ret; 3119 } 3120 EXPORT_SYMBOL(rdma_resolve_addr); 3121 3122 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) 3123 { 3124 struct rdma_id_private *id_priv; 3125 unsigned long flags; 3126 int ret; 3127 3128 id_priv = container_of(id, struct rdma_id_private, id); 3129 spin_lock_irqsave(&id_priv->lock, flags); 3130 if (reuse || id_priv->state == RDMA_CM_IDLE) { 3131 id_priv->reuseaddr = reuse; 3132 ret = 0; 3133 } else { 3134 ret = -EINVAL; 3135 } 3136 spin_unlock_irqrestore(&id_priv->lock, flags); 3137 return ret; 3138 } 3139 EXPORT_SYMBOL(rdma_set_reuseaddr); 3140 3141 int rdma_set_afonly(struct rdma_cm_id *id, int afonly) 3142 { 3143 struct rdma_id_private *id_priv; 3144 unsigned long flags; 3145 int ret; 3146 3147 id_priv = container_of(id, struct rdma_id_private, id); 3148 spin_lock_irqsave(&id_priv->lock, flags); 3149 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { 3150 id_priv->options |= (1 << CMA_OPTION_AFONLY); 3151 id_priv->afonly = afonly; 3152 ret = 0; 3153 } else { 3154 ret = -EINVAL; 3155 } 3156 spin_unlock_irqrestore(&id_priv->lock, flags); 3157 return ret; 3158 } 3159 EXPORT_SYMBOL(rdma_set_afonly); 3160 3161 static void cma_bind_port(struct rdma_bind_list *bind_list, 3162 struct rdma_id_private *id_priv) 3163 { 3164 struct sockaddr *addr; 3165 struct sockaddr_ib *sib; 3166 u64 sid, mask; 3167 __be16 port; 3168 3169 addr = cma_src_addr(id_priv); 3170 port = htons(bind_list->port); 3171 3172 switch (addr->sa_family) { 3173 case AF_INET: 3174 ((struct sockaddr_in *) addr)->sin_port = port; 3175 break; 3176 case AF_INET6: 3177 ((struct sockaddr_in6 *) addr)->sin6_port = port; 3178 break; 3179 case AF_IB: 3180 sib = (struct sockaddr_ib *) addr; 3181 sid = be64_to_cpu(sib->sib_sid); 3182 mask = be64_to_cpu(sib->sib_sid_mask); 3183 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); 3184 sib->sib_sid_mask = cpu_to_be64(~0ULL); 3185 break; 3186 } 3187 id_priv->bind_list = bind_list; 3188 hlist_add_head(&id_priv->node, &bind_list->owners); 3189 } 3190 3191 static int cma_alloc_port(enum rdma_port_space ps, 3192 struct rdma_id_private *id_priv, unsigned short snum) 3193 { 3194 struct rdma_bind_list *bind_list; 3195 int ret; 3196 3197 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 3198 if (!bind_list) 3199 return -ENOMEM; 3200 3201 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, 3202 snum); 3203 if (ret < 0) 3204 goto err; 3205 3206 bind_list->ps = ps; 3207 bind_list->port = (unsigned short)ret; 3208 cma_bind_port(bind_list, id_priv); 3209 return 0; 3210 err: 3211 kfree(bind_list); 3212 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; 3213 } 3214 3215 static int cma_alloc_any_port(enum rdma_port_space ps, 3216 struct rdma_id_private *id_priv) 3217 { 3218 static unsigned int last_used_port; 3219 int low, high, remaining; 3220 unsigned int rover; 3221 struct vnet *net = id_priv->id.route.addr.dev_addr.net; 3222 u32 rand; 3223 3224 inet_get_local_port_range(net, &low, &high); 3225 remaining = (high - low) + 1; 3226 get_random_bytes(&rand, sizeof(rand)); 3227 rover = rand % remaining + low; 3228 retry: 3229 if (last_used_port != rover && 3230 !cma_ps_find(net, ps, (unsigned short)rover)) { 3231 int ret = cma_alloc_port(ps, id_priv, rover); 3232 /* 3233 * Remember previously used port number in order to avoid 3234 * re-using same port immediately after it is closed. 3235 */ 3236 if (!ret) 3237 last_used_port = rover; 3238 if (ret != -EADDRNOTAVAIL) 3239 return ret; 3240 } 3241 if (--remaining) { 3242 rover++; 3243 if ((rover < low) || (rover > high)) 3244 rover = low; 3245 goto retry; 3246 } 3247 return -EADDRNOTAVAIL; 3248 } 3249 3250 /* 3251 * Check that the requested port is available. This is called when trying to 3252 * bind to a specific port, or when trying to listen on a bound port. In 3253 * the latter case, the provided id_priv may already be on the bind_list, but 3254 * we still need to check that it's okay to start listening. 3255 */ 3256 static int cma_check_port(struct rdma_bind_list *bind_list, 3257 struct rdma_id_private *id_priv, uint8_t reuseaddr) 3258 { 3259 struct rdma_id_private *cur_id; 3260 struct sockaddr *addr, *cur_addr; 3261 struct vnet *vnet; 3262 3263 addr = cma_src_addr(id_priv); 3264 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3265 if (id_priv == cur_id) 3266 continue; 3267 3268 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && 3269 cur_id->reuseaddr) 3270 continue; 3271 3272 cur_addr = cma_src_addr(cur_id); 3273 if (id_priv->afonly && cur_id->afonly && 3274 (addr->sa_family != cur_addr->sa_family)) 3275 continue; 3276 3277 vnet = cur_id->id.route.addr.dev_addr.net; 3278 if (cma_any_addr(vnet, addr) || cma_any_addr(vnet, cur_addr)) 3279 return -EADDRNOTAVAIL; 3280 3281 if (!cma_addr_cmp(addr, cur_addr)) 3282 return -EADDRINUSE; 3283 } 3284 return 0; 3285 } 3286 3287 static int cma_use_port(enum rdma_port_space ps, 3288 struct rdma_id_private *id_priv) 3289 { 3290 struct rdma_bind_list *bind_list; 3291 unsigned short snum; 3292 int ret; 3293 3294 snum = ntohs(cma_port(cma_src_addr(id_priv))); 3295 if (snum < IPPORT_RESERVED && 3296 priv_check(curthread, PRIV_NETINET_BINDANY) != 0) 3297 return -EACCES; 3298 3299 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); 3300 if (!bind_list) { 3301 ret = cma_alloc_port(ps, id_priv, snum); 3302 } else { 3303 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); 3304 if (!ret) 3305 cma_bind_port(bind_list, id_priv); 3306 } 3307 return ret; 3308 } 3309 3310 static int cma_bind_listen(struct rdma_id_private *id_priv) 3311 { 3312 struct rdma_bind_list *bind_list = id_priv->bind_list; 3313 int ret = 0; 3314 3315 mutex_lock(&lock); 3316 if (bind_list->owners.first->next) 3317 ret = cma_check_port(bind_list, id_priv, 0); 3318 mutex_unlock(&lock); 3319 return ret; 3320 } 3321 3322 static enum rdma_port_space cma_select_inet_ps( 3323 struct rdma_id_private *id_priv) 3324 { 3325 switch (id_priv->id.ps) { 3326 case RDMA_PS_TCP: 3327 case RDMA_PS_UDP: 3328 case RDMA_PS_IPOIB: 3329 case RDMA_PS_IB: 3330 case RDMA_PS_SDP: 3331 return id_priv->id.ps; 3332 default: 3333 3334 return 0; 3335 } 3336 } 3337 3338 static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv) 3339 { 3340 enum rdma_port_space ps = 0; 3341 struct sockaddr_ib *sib; 3342 u64 sid_ps, mask, sid; 3343 3344 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 3345 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; 3346 sid = be64_to_cpu(sib->sib_sid) & mask; 3347 3348 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { 3349 sid_ps = RDMA_IB_IP_PS_IB; 3350 ps = RDMA_PS_IB; 3351 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && 3352 (sid == (RDMA_IB_IP_PS_TCP & mask))) { 3353 sid_ps = RDMA_IB_IP_PS_TCP; 3354 ps = RDMA_PS_TCP; 3355 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && 3356 (sid == (RDMA_IB_IP_PS_UDP & mask))) { 3357 sid_ps = RDMA_IB_IP_PS_UDP; 3358 ps = RDMA_PS_UDP; 3359 } 3360 3361 if (ps) { 3362 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); 3363 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | 3364 be64_to_cpu(sib->sib_sid_mask)); 3365 } 3366 return ps; 3367 } 3368 3369 static int cma_get_port(struct rdma_id_private *id_priv) 3370 { 3371 enum rdma_port_space ps; 3372 int ret; 3373 3374 if (cma_family(id_priv) != AF_IB) 3375 ps = cma_select_inet_ps(id_priv); 3376 else 3377 ps = cma_select_ib_ps(id_priv); 3378 if (!ps) 3379 return -EPROTONOSUPPORT; 3380 3381 mutex_lock(&lock); 3382 if (cma_any_port(cma_src_addr(id_priv))) 3383 ret = cma_alloc_any_port(ps, id_priv); 3384 else 3385 ret = cma_use_port(ps, id_priv); 3386 mutex_unlock(&lock); 3387 3388 return ret; 3389 } 3390 3391 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 3392 struct sockaddr *addr) 3393 { 3394 #ifdef INET6 3395 struct sockaddr_in6 sin6; 3396 3397 if (addr->sa_family != AF_INET6) 3398 return 0; 3399 3400 sin6 = *(struct sockaddr_in6 *)addr; 3401 3402 if (IN6_IS_SCOPE_LINKLOCAL(&sin6.sin6_addr) || 3403 IN6_IS_ADDR_MC_INTFACELOCAL(&sin6.sin6_addr)) { 3404 bool failure; 3405 3406 CURVNET_SET_QUIET(dev_addr->net); 3407 failure = sa6_recoverscope(&sin6) || sin6.sin6_scope_id == 0; 3408 CURVNET_RESTORE(); 3409 3410 /* check if IPv6 scope ID is not set */ 3411 if (failure) 3412 return -EINVAL; 3413 dev_addr->bound_dev_if = sin6.sin6_scope_id; 3414 } 3415 #endif 3416 return 0; 3417 } 3418 3419 int rdma_listen(struct rdma_cm_id *id, int backlog) 3420 { 3421 struct rdma_id_private *id_priv; 3422 int ret; 3423 3424 id_priv = container_of(id, struct rdma_id_private, id); 3425 if (id_priv->state == RDMA_CM_IDLE) { 3426 id->route.addr.src_addr.ss_family = AF_INET; 3427 ret = rdma_bind_addr(id, cma_src_addr(id_priv)); 3428 if (ret) 3429 return ret; 3430 } 3431 3432 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) 3433 return -EINVAL; 3434 3435 if (id_priv->reuseaddr) { 3436 ret = cma_bind_listen(id_priv); 3437 if (ret) 3438 goto err; 3439 } 3440 3441 id_priv->backlog = backlog; 3442 if (id->device) { 3443 if (rdma_cap_ib_cm(id->device, 1)) { 3444 ret = cma_ib_listen(id_priv); 3445 if (ret) 3446 goto err; 3447 } else if (rdma_cap_iw_cm(id->device, 1)) { 3448 ret = cma_iw_listen(id_priv, backlog); 3449 if (ret) 3450 goto err; 3451 } else { 3452 ret = -ENOSYS; 3453 goto err; 3454 } 3455 } else 3456 cma_listen_on_all(id_priv); 3457 3458 return 0; 3459 err: 3460 id_priv->backlog = 0; 3461 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 3462 return ret; 3463 } 3464 EXPORT_SYMBOL(rdma_listen); 3465 3466 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 3467 { 3468 struct rdma_id_private *id_priv; 3469 struct vnet *vnet = id->route.addr.dev_addr.net; 3470 int ret; 3471 3472 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && 3473 addr->sa_family != AF_IB) 3474 return -EAFNOSUPPORT; 3475 3476 id_priv = container_of(id, struct rdma_id_private, id); 3477 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 3478 return -EINVAL; 3479 3480 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 3481 if (ret) 3482 goto err1; 3483 3484 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); 3485 if (!cma_any_addr(vnet, addr)) { 3486 ret = cma_translate_addr(addr, &id->route.addr.dev_addr); 3487 if (ret) 3488 goto err1; 3489 3490 ret = cma_acquire_dev(id_priv, NULL); 3491 if (ret) 3492 goto err1; 3493 } 3494 3495 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 3496 if (addr->sa_family == AF_INET) 3497 id_priv->afonly = 1; 3498 #ifdef INET6 3499 else if (addr->sa_family == AF_INET6) { 3500 CURVNET_SET_QUIET(vnet); 3501 id_priv->afonly = V_ip6_v6only; 3502 CURVNET_RESTORE(); 3503 } 3504 #endif 3505 } 3506 ret = cma_get_port(id_priv); 3507 if (ret) 3508 goto err2; 3509 3510 return 0; 3511 err2: 3512 if (id_priv->cma_dev) 3513 cma_release_dev(id_priv); 3514 err1: 3515 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 3516 return ret; 3517 } 3518 EXPORT_SYMBOL(rdma_bind_addr); 3519 3520 static int sdp_format_hdr(struct sdp_hh *sdp_hdr, struct rdma_id_private *id_priv) 3521 { 3522 /* 3523 * XXXCEM: CMA just sets the version itself rather than relying on 3524 * passed in packet to have the major version set. Should we? 3525 */ 3526 if (sdp_get_majv(sdp_hdr->majv_minv) != SDP_MAJ_VERSION) 3527 return -EINVAL; 3528 3529 if (cma_family(id_priv) == AF_INET) { 3530 struct sockaddr_in *src4, *dst4; 3531 3532 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); 3533 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); 3534 3535 sdp_set_ip_ver(sdp_hdr, 4); 3536 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 3537 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 3538 sdp_hdr->port = src4->sin_port; 3539 } else if (cma_family(id_priv) == AF_INET6) { 3540 struct sockaddr_in6 *src6, *dst6; 3541 3542 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 3543 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); 3544 3545 sdp_set_ip_ver(sdp_hdr, 6); 3546 sdp_hdr->src_addr.ip6 = src6->sin6_addr; 3547 sdp_hdr->dst_addr.ip6 = dst6->sin6_addr; 3548 sdp_hdr->port = src6->sin6_port; 3549 cma_ip6_clear_scope_id(&sdp_hdr->src_addr.ip6); 3550 cma_ip6_clear_scope_id(&sdp_hdr->dst_addr.ip6); 3551 } else 3552 return -EAFNOSUPPORT; 3553 return 0; 3554 } 3555 3556 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) 3557 { 3558 struct cma_hdr *cma_hdr; 3559 3560 if (id_priv->id.ps == RDMA_PS_SDP) 3561 return sdp_format_hdr(hdr, id_priv); 3562 3563 cma_hdr = hdr; 3564 cma_hdr->cma_version = CMA_VERSION; 3565 if (cma_family(id_priv) == AF_INET) { 3566 struct sockaddr_in *src4, *dst4; 3567 3568 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); 3569 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); 3570 3571 cma_set_ip_ver(cma_hdr, 4); 3572 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 3573 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 3574 cma_hdr->port = src4->sin_port; 3575 } else if (cma_family(id_priv) == AF_INET6) { 3576 struct sockaddr_in6 *src6, *dst6; 3577 3578 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 3579 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); 3580 3581 cma_set_ip_ver(cma_hdr, 6); 3582 cma_hdr->src_addr.ip6 = src6->sin6_addr; 3583 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 3584 cma_hdr->port = src6->sin6_port; 3585 cma_ip6_clear_scope_id(&cma_hdr->src_addr.ip6); 3586 cma_ip6_clear_scope_id(&cma_hdr->dst_addr.ip6); 3587 } 3588 return 0; 3589 } 3590 3591 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 3592 struct ib_cm_event *ib_event) 3593 { 3594 struct rdma_id_private *id_priv = cm_id->context; 3595 struct rdma_cm_event event; 3596 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 3597 int ret = 0; 3598 3599 mutex_lock(&id_priv->handler_mutex); 3600 if (id_priv->state != RDMA_CM_CONNECT) 3601 goto out; 3602 3603 memset(&event, 0, sizeof event); 3604 switch (ib_event->event) { 3605 case IB_CM_SIDR_REQ_ERROR: 3606 event.event = RDMA_CM_EVENT_UNREACHABLE; 3607 event.status = -ETIMEDOUT; 3608 break; 3609 case IB_CM_SIDR_REP_RECEIVED: 3610 event.param.ud.private_data = ib_event->private_data; 3611 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 3612 if (rep->status != IB_SIDR_SUCCESS) { 3613 event.event = RDMA_CM_EVENT_UNREACHABLE; 3614 event.status = ib_event->param.sidr_rep_rcvd.status; 3615 break; 3616 } 3617 ret = cma_set_qkey(id_priv, rep->qkey); 3618 if (ret) { 3619 event.event = RDMA_CM_EVENT_ADDR_ERROR; 3620 event.status = ret; 3621 break; 3622 } 3623 ret = ib_init_ah_from_path(id_priv->id.device, 3624 id_priv->id.port_num, 3625 id_priv->id.route.path_rec, 3626 &event.param.ud.ah_attr); 3627 if (ret) { 3628 event.event = RDMA_CM_EVENT_ADDR_ERROR; 3629 event.status = ret; 3630 break; 3631 } 3632 event.param.ud.qp_num = rep->qpn; 3633 event.param.ud.qkey = rep->qkey; 3634 event.event = RDMA_CM_EVENT_ESTABLISHED; 3635 event.status = 0; 3636 break; 3637 default: 3638 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 3639 ib_event->event); 3640 goto out; 3641 } 3642 3643 ret = id_priv->id.event_handler(&id_priv->id, &event); 3644 if (ret) { 3645 /* Destroy the CM ID by returning a non-zero value. */ 3646 id_priv->cm_id.ib = NULL; 3647 cma_exch(id_priv, RDMA_CM_DESTROYING); 3648 mutex_unlock(&id_priv->handler_mutex); 3649 rdma_destroy_id(&id_priv->id); 3650 return ret; 3651 } 3652 out: 3653 mutex_unlock(&id_priv->handler_mutex); 3654 return ret; 3655 } 3656 3657 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 3658 struct rdma_conn_param *conn_param) 3659 { 3660 struct ib_cm_sidr_req_param req; 3661 struct ib_cm_id *id; 3662 void *private_data; 3663 int offset, ret; 3664 3665 memset(&req, 0, sizeof req); 3666 offset = cma_user_data_offset(id_priv); 3667 req.private_data_len = offset + conn_param->private_data_len; 3668 if (req.private_data_len < conn_param->private_data_len) 3669 return -EINVAL; 3670 3671 if (req.private_data_len) { 3672 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3673 if (!private_data) 3674 return -ENOMEM; 3675 } else { 3676 private_data = NULL; 3677 } 3678 3679 if (conn_param->private_data && conn_param->private_data_len) 3680 memcpy((char *)private_data + offset, conn_param->private_data, 3681 conn_param->private_data_len); 3682 3683 if (private_data) { 3684 ret = cma_format_hdr(private_data, id_priv); 3685 if (ret) 3686 goto out; 3687 req.private_data = private_data; 3688 } 3689 3690 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 3691 id_priv); 3692 if (IS_ERR(id)) { 3693 ret = PTR_ERR(id); 3694 goto out; 3695 } 3696 id_priv->cm_id.ib = id; 3697 3698 req.path = id_priv->id.route.path_rec; 3699 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3700 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 3701 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3702 3703 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 3704 if (ret) { 3705 ib_destroy_cm_id(id_priv->cm_id.ib); 3706 id_priv->cm_id.ib = NULL; 3707 } 3708 out: 3709 kfree(private_data); 3710 return ret; 3711 } 3712 3713 static int cma_connect_ib(struct rdma_id_private *id_priv, 3714 struct rdma_conn_param *conn_param) 3715 { 3716 struct ib_cm_req_param req; 3717 struct rdma_route *route; 3718 void *private_data; 3719 struct ib_cm_id *id; 3720 int offset, ret; 3721 3722 memset(&req, 0, sizeof req); 3723 offset = cma_user_data_offset(id_priv); 3724 req.private_data_len = offset + conn_param->private_data_len; 3725 if (req.private_data_len < conn_param->private_data_len) 3726 return -EINVAL; 3727 3728 if (req.private_data_len) { 3729 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3730 if (!private_data) 3731 return -ENOMEM; 3732 } else { 3733 private_data = NULL; 3734 } 3735 3736 if (conn_param->private_data && conn_param->private_data_len) 3737 memcpy((char *)private_data + offset, conn_param->private_data, 3738 conn_param->private_data_len); 3739 3740 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); 3741 if (IS_ERR(id)) { 3742 ret = PTR_ERR(id); 3743 goto out; 3744 } 3745 id_priv->cm_id.ib = id; 3746 3747 route = &id_priv->id.route; 3748 if (private_data) { 3749 ret = cma_format_hdr(private_data, id_priv); 3750 if (ret) 3751 goto out; 3752 req.private_data = private_data; 3753 } 3754 3755 req.primary_path = &route->path_rec[0]; 3756 if (route->num_paths == 2) 3757 req.alternate_path = &route->path_rec[1]; 3758 3759 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3760 req.qp_num = id_priv->qp_num; 3761 req.qp_type = id_priv->id.qp_type; 3762 req.starting_psn = id_priv->seq_num; 3763 req.responder_resources = conn_param->responder_resources; 3764 req.initiator_depth = conn_param->initiator_depth; 3765 req.flow_control = conn_param->flow_control; 3766 req.retry_count = min_t(u8, 7, conn_param->retry_count); 3767 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3768 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3769 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3770 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3771 req.srq = id_priv->srq ? 1 : 0; 3772 3773 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 3774 out: 3775 if (ret && !IS_ERR(id)) { 3776 ib_destroy_cm_id(id); 3777 id_priv->cm_id.ib = NULL; 3778 } 3779 3780 kfree(private_data); 3781 return ret; 3782 } 3783 3784 static int cma_connect_iw(struct rdma_id_private *id_priv, 3785 struct rdma_conn_param *conn_param) 3786 { 3787 struct iw_cm_id *cm_id; 3788 int ret; 3789 struct iw_cm_conn_param iw_param; 3790 3791 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 3792 if (IS_ERR(cm_id)) 3793 return PTR_ERR(cm_id); 3794 3795 cm_id->tos = id_priv->tos; 3796 id_priv->cm_id.iw = cm_id; 3797 3798 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), 3799 rdma_addr_size(cma_src_addr(id_priv))); 3800 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), 3801 rdma_addr_size(cma_dst_addr(id_priv))); 3802 3803 ret = cma_modify_qp_rtr(id_priv, conn_param); 3804 if (ret) 3805 goto out; 3806 3807 if (conn_param) { 3808 iw_param.ord = conn_param->initiator_depth; 3809 iw_param.ird = conn_param->responder_resources; 3810 iw_param.private_data = conn_param->private_data; 3811 iw_param.private_data_len = conn_param->private_data_len; 3812 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; 3813 } else { 3814 memset(&iw_param, 0, sizeof iw_param); 3815 iw_param.qpn = id_priv->qp_num; 3816 } 3817 ret = iw_cm_connect(cm_id, &iw_param); 3818 out: 3819 if (ret) { 3820 iw_destroy_cm_id(cm_id); 3821 id_priv->cm_id.iw = NULL; 3822 } 3823 return ret; 3824 } 3825 3826 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3827 { 3828 struct rdma_id_private *id_priv; 3829 int ret; 3830 3831 id_priv = container_of(id, struct rdma_id_private, id); 3832 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 3833 return -EINVAL; 3834 3835 if (!id->qp) { 3836 id_priv->qp_num = conn_param->qp_num; 3837 id_priv->srq = conn_param->srq; 3838 } 3839 3840 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3841 if (id->qp_type == IB_QPT_UD) 3842 ret = cma_resolve_ib_udp(id_priv, conn_param); 3843 else 3844 ret = cma_connect_ib(id_priv, conn_param); 3845 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3846 ret = cma_connect_iw(id_priv, conn_param); 3847 else 3848 ret = -ENOSYS; 3849 if (ret) 3850 goto err; 3851 3852 return 0; 3853 err: 3854 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 3855 return ret; 3856 } 3857 EXPORT_SYMBOL(rdma_connect); 3858 3859 static int cma_accept_ib(struct rdma_id_private *id_priv, 3860 struct rdma_conn_param *conn_param) 3861 { 3862 struct ib_cm_rep_param rep; 3863 int ret; 3864 3865 ret = cma_modify_qp_rtr(id_priv, conn_param); 3866 if (ret) 3867 goto out; 3868 3869 ret = cma_modify_qp_rts(id_priv, conn_param); 3870 if (ret) 3871 goto out; 3872 3873 memset(&rep, 0, sizeof rep); 3874 rep.qp_num = id_priv->qp_num; 3875 rep.starting_psn = id_priv->seq_num; 3876 rep.private_data = conn_param->private_data; 3877 rep.private_data_len = conn_param->private_data_len; 3878 rep.responder_resources = conn_param->responder_resources; 3879 rep.initiator_depth = conn_param->initiator_depth; 3880 rep.failover_accepted = 0; 3881 rep.flow_control = conn_param->flow_control; 3882 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3883 rep.srq = id_priv->srq ? 1 : 0; 3884 3885 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 3886 out: 3887 return ret; 3888 } 3889 3890 static int cma_accept_iw(struct rdma_id_private *id_priv, 3891 struct rdma_conn_param *conn_param) 3892 { 3893 struct iw_cm_conn_param iw_param; 3894 int ret; 3895 3896 ret = cma_modify_qp_rtr(id_priv, conn_param); 3897 if (ret) 3898 return ret; 3899 3900 iw_param.ord = conn_param->initiator_depth; 3901 iw_param.ird = conn_param->responder_resources; 3902 iw_param.private_data = conn_param->private_data; 3903 iw_param.private_data_len = conn_param->private_data_len; 3904 if (id_priv->id.qp) { 3905 iw_param.qpn = id_priv->qp_num; 3906 } else 3907 iw_param.qpn = conn_param->qp_num; 3908 3909 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 3910 } 3911 3912 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 3913 enum ib_cm_sidr_status status, u32 qkey, 3914 const void *private_data, int private_data_len) 3915 { 3916 struct ib_cm_sidr_rep_param rep; 3917 int ret; 3918 3919 memset(&rep, 0, sizeof rep); 3920 rep.status = status; 3921 if (status == IB_SIDR_SUCCESS) { 3922 ret = cma_set_qkey(id_priv, qkey); 3923 if (ret) 3924 return ret; 3925 rep.qp_num = id_priv->qp_num; 3926 rep.qkey = id_priv->qkey; 3927 } 3928 rep.private_data = private_data; 3929 rep.private_data_len = private_data_len; 3930 3931 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 3932 } 3933 3934 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3935 { 3936 struct rdma_id_private *id_priv; 3937 int ret; 3938 3939 id_priv = container_of(id, struct rdma_id_private, id); 3940 3941 id_priv->owner = task_pid_nr(current); 3942 3943 if (!cma_comp(id_priv, RDMA_CM_CONNECT)) 3944 return -EINVAL; 3945 3946 if (!id->qp && conn_param) { 3947 id_priv->qp_num = conn_param->qp_num; 3948 id_priv->srq = conn_param->srq; 3949 } 3950 3951 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3952 if (id->qp_type == IB_QPT_UD) { 3953 if (conn_param) 3954 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3955 conn_param->qkey, 3956 conn_param->private_data, 3957 conn_param->private_data_len); 3958 else 3959 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3960 0, NULL, 0); 3961 } else { 3962 if (conn_param) 3963 ret = cma_accept_ib(id_priv, conn_param); 3964 else 3965 ret = cma_rep_recv(id_priv); 3966 } 3967 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3968 ret = cma_accept_iw(id_priv, conn_param); 3969 else 3970 ret = -ENOSYS; 3971 3972 if (ret) 3973 goto reject; 3974 3975 return 0; 3976 reject: 3977 cma_modify_qp_err(id_priv); 3978 rdma_reject(id, NULL, 0); 3979 return ret; 3980 } 3981 EXPORT_SYMBOL(rdma_accept); 3982 3983 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 3984 { 3985 struct rdma_id_private *id_priv; 3986 int ret; 3987 3988 id_priv = container_of(id, struct rdma_id_private, id); 3989 if (!id_priv->cm_id.ib) 3990 return -EINVAL; 3991 3992 switch (id->device->node_type) { 3993 case RDMA_NODE_IB_CA: 3994 ret = ib_cm_notify(id_priv->cm_id.ib, event); 3995 break; 3996 default: 3997 ret = 0; 3998 break; 3999 } 4000 return ret; 4001 } 4002 EXPORT_SYMBOL(rdma_notify); 4003 4004 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 4005 u8 private_data_len) 4006 { 4007 struct rdma_id_private *id_priv; 4008 int ret; 4009 4010 id_priv = container_of(id, struct rdma_id_private, id); 4011 if (!id_priv->cm_id.ib) 4012 return -EINVAL; 4013 4014 if (rdma_cap_ib_cm(id->device, id->port_num)) { 4015 if (id->qp_type == IB_QPT_UD) 4016 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, 4017 private_data, private_data_len); 4018 else 4019 ret = ib_send_cm_rej(id_priv->cm_id.ib, 4020 IB_CM_REJ_CONSUMER_DEFINED, NULL, 4021 0, private_data, private_data_len); 4022 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4023 ret = iw_cm_reject(id_priv->cm_id.iw, 4024 private_data, private_data_len); 4025 } else 4026 ret = -ENOSYS; 4027 4028 return ret; 4029 } 4030 EXPORT_SYMBOL(rdma_reject); 4031 4032 int rdma_disconnect(struct rdma_cm_id *id) 4033 { 4034 struct rdma_id_private *id_priv; 4035 int ret; 4036 4037 id_priv = container_of(id, struct rdma_id_private, id); 4038 if (!id_priv->cm_id.ib) 4039 return -EINVAL; 4040 4041 if (rdma_cap_ib_cm(id->device, id->port_num)) { 4042 ret = cma_modify_qp_err(id_priv); 4043 if (ret) 4044 goto out; 4045 /* Initiate or respond to a disconnect. */ 4046 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 4047 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 4048 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4049 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 4050 } else 4051 ret = -EINVAL; 4052 4053 out: 4054 return ret; 4055 } 4056 EXPORT_SYMBOL(rdma_disconnect); 4057 4058 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 4059 { 4060 struct rdma_id_private *id_priv; 4061 struct cma_multicast *mc = multicast->context; 4062 struct rdma_cm_event event; 4063 int ret = 0; 4064 4065 id_priv = mc->id_priv; 4066 mutex_lock(&id_priv->handler_mutex); 4067 if (id_priv->state != RDMA_CM_ADDR_BOUND && 4068 id_priv->state != RDMA_CM_ADDR_RESOLVED) 4069 goto out; 4070 4071 if (!status) 4072 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 4073 mutex_lock(&id_priv->qp_mutex); 4074 if (!status && id_priv->id.qp) 4075 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 4076 be16_to_cpu(multicast->rec.mlid)); 4077 mutex_unlock(&id_priv->qp_mutex); 4078 4079 memset(&event, 0, sizeof event); 4080 event.status = status; 4081 event.param.ud.private_data = mc->context; 4082 if (!status) { 4083 struct rdma_dev_addr *dev_addr = 4084 &id_priv->id.route.addr.dev_addr; 4085 if_t ndev = 4086 dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 4087 enum ib_gid_type gid_type = 4088 id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 4089 rdma_start_port(id_priv->cma_dev->device)]; 4090 4091 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 4092 ret = ib_init_ah_from_mcmember(id_priv->id.device, 4093 id_priv->id.port_num, 4094 &multicast->rec, 4095 ndev, gid_type, 4096 &event.param.ud.ah_attr); 4097 if (ret) 4098 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 4099 4100 event.param.ud.qp_num = 0xFFFFFF; 4101 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 4102 if (ndev) 4103 dev_put(ndev); 4104 } else 4105 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 4106 4107 ret = id_priv->id.event_handler(&id_priv->id, &event); 4108 if (ret) { 4109 cma_exch(id_priv, RDMA_CM_DESTROYING); 4110 mutex_unlock(&id_priv->handler_mutex); 4111 rdma_destroy_id(&id_priv->id); 4112 return 0; 4113 } 4114 4115 out: 4116 mutex_unlock(&id_priv->handler_mutex); 4117 return 0; 4118 } 4119 4120 static void cma_set_mgid(struct rdma_id_private *id_priv, 4121 struct sockaddr *addr, union ib_gid *mgid) 4122 { 4123 unsigned char mc_map[MAX_ADDR_LEN]; 4124 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4125 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 4126 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 4127 4128 if (cma_any_addr(dev_addr->net, addr)) { 4129 memset(mgid, 0, sizeof *mgid); 4130 } else if ((addr->sa_family == AF_INET6) && 4131 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 4132 0xFF10A01B)) { 4133 /* IPv6 address is an SA assigned MGID. */ 4134 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 4135 } else if (addr->sa_family == AF_IB) { 4136 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); 4137 } else if (addr->sa_family == AF_INET6) { 4138 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 4139 if (id_priv->id.ps == RDMA_PS_UDP) 4140 mc_map[7] = 0x01; /* Use RDMA CM signature */ 4141 *mgid = *(union ib_gid *) (mc_map + 4); 4142 } else { 4143 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 4144 if (id_priv->id.ps == RDMA_PS_UDP) 4145 mc_map[7] = 0x01; /* Use RDMA CM signature */ 4146 *mgid = *(union ib_gid *) (mc_map + 4); 4147 } 4148 } 4149 4150 static void cma_query_sa_classport_info_cb(int status, 4151 struct ib_class_port_info *rec, 4152 void *context) 4153 { 4154 struct class_port_info_context *cb_ctx = context; 4155 4156 WARN_ON(!context); 4157 4158 if (status || !rec) { 4159 pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n", 4160 cb_ctx->device->name, cb_ctx->port_num, status); 4161 goto out; 4162 } 4163 4164 memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info)); 4165 4166 out: 4167 complete(&cb_ctx->done); 4168 } 4169 4170 static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num, 4171 struct ib_class_port_info *class_port_info) 4172 { 4173 struct class_port_info_context *cb_ctx; 4174 int ret; 4175 4176 cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL); 4177 if (!cb_ctx) 4178 return -ENOMEM; 4179 4180 cb_ctx->device = device; 4181 cb_ctx->class_port_info = class_port_info; 4182 cb_ctx->port_num = port_num; 4183 init_completion(&cb_ctx->done); 4184 4185 ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num, 4186 CMA_QUERY_CLASSPORT_INFO_TIMEOUT, 4187 GFP_KERNEL, cma_query_sa_classport_info_cb, 4188 cb_ctx, &cb_ctx->sa_query); 4189 if (ret < 0) { 4190 pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n", 4191 device->name, port_num, ret); 4192 goto out; 4193 } 4194 4195 wait_for_completion(&cb_ctx->done); 4196 4197 out: 4198 kfree(cb_ctx); 4199 return ret; 4200 } 4201 4202 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 4203 struct cma_multicast *mc) 4204 { 4205 struct ib_sa_mcmember_rec rec; 4206 struct ib_class_port_info class_port_info; 4207 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4208 ib_sa_comp_mask comp_mask; 4209 int ret; 4210 4211 ib_addr_get_mgid(dev_addr, &rec.mgid); 4212 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 4213 &rec.mgid, &rec); 4214 if (ret) 4215 return ret; 4216 4217 ret = cma_set_qkey(id_priv, 0); 4218 if (ret) 4219 return ret; 4220 4221 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 4222 rec.qkey = cpu_to_be32(id_priv->qkey); 4223 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 4224 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 4225 rec.join_state = mc->join_state; 4226 4227 if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) { 4228 ret = cma_query_sa_classport_info(id_priv->id.device, 4229 id_priv->id.port_num, 4230 &class_port_info); 4231 4232 if (ret) 4233 return ret; 4234 4235 if (!(ib_get_cpi_capmask2(&class_port_info) & 4236 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) { 4237 pr_warn("RDMA CM: %s port %u Unable to multicast join\n" 4238 "RDMA CM: SM doesn't support Send Only Full Member option\n", 4239 id_priv->id.device->name, id_priv->id.port_num); 4240 return -EOPNOTSUPP; 4241 } 4242 } 4243 4244 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 4245 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 4246 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 4247 IB_SA_MCMEMBER_REC_FLOW_LABEL | 4248 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 4249 4250 if (id_priv->id.ps == RDMA_PS_IPOIB) 4251 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 4252 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 4253 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 4254 IB_SA_MCMEMBER_REC_MTU | 4255 IB_SA_MCMEMBER_REC_HOP_LIMIT; 4256 4257 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 4258 id_priv->id.port_num, &rec, 4259 comp_mask, GFP_KERNEL, 4260 cma_ib_mc_handler, mc); 4261 return PTR_ERR_OR_ZERO(mc->multicast.ib); 4262 } 4263 4264 static void iboe_mcast_work_handler(struct work_struct *work) 4265 { 4266 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); 4267 struct cma_multicast *mc = mw->mc; 4268 struct ib_sa_multicast *m = mc->multicast.ib; 4269 4270 mc->multicast.ib->context = mc; 4271 cma_ib_mc_handler(0, m); 4272 kref_put(&mc->mcref, release_mc); 4273 kfree(mw); 4274 } 4275 4276 static void cma_iboe_set_mgid(struct vnet *vnet, struct sockaddr *addr, 4277 union ib_gid *mgid, enum ib_gid_type gid_type) 4278 { 4279 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 4280 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 4281 4282 if (cma_any_addr(vnet, addr)) { 4283 memset(mgid, 0, sizeof *mgid); 4284 } else if (addr->sa_family == AF_INET6) { 4285 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 4286 } else { 4287 mgid->raw[0] = 4288 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff; 4289 mgid->raw[1] = 4290 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e; 4291 mgid->raw[2] = 0; 4292 mgid->raw[3] = 0; 4293 mgid->raw[4] = 0; 4294 mgid->raw[5] = 0; 4295 mgid->raw[6] = 0; 4296 mgid->raw[7] = 0; 4297 mgid->raw[8] = 0; 4298 mgid->raw[9] = 0; 4299 mgid->raw[10] = 0xff; 4300 mgid->raw[11] = 0xff; 4301 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 4302 } 4303 } 4304 4305 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 4306 struct cma_multicast *mc) 4307 { 4308 struct iboe_mcast_work *work; 4309 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4310 int err = 0; 4311 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 4312 if_t ndev = NULL; 4313 enum ib_gid_type gid_type; 4314 bool send_only; 4315 4316 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 4317 4318 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 4319 return -EINVAL; 4320 4321 work = kzalloc(sizeof *work, GFP_KERNEL); 4322 if (!work) 4323 return -ENOMEM; 4324 4325 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); 4326 if (!mc->multicast.ib) { 4327 err = -ENOMEM; 4328 goto out1; 4329 } 4330 4331 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 4332 rdma_start_port(id_priv->cma_dev->device)]; 4333 cma_iboe_set_mgid(dev_addr->net, addr, &mc->multicast.ib->rec.mgid, gid_type); 4334 4335 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); 4336 if (id_priv->id.ps == RDMA_PS_UDP) 4337 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 4338 4339 if (dev_addr->bound_dev_if) 4340 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 4341 if (!ndev) { 4342 err = -ENODEV; 4343 goto out2; 4344 } 4345 mc->multicast.ib->rec.rate = iboe_get_rate(ndev); 4346 mc->multicast.ib->rec.hop_limit = 1; 4347 mc->multicast.ib->rec.mtu = iboe_get_mtu(if_getmtu(ndev)); 4348 4349 if (addr->sa_family == AF_INET || addr->sa_family == AF_INET6) { 4350 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 4351 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; 4352 if (!send_only) { 4353 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, 4354 true); 4355 if (!err) 4356 mc->igmp_joined = true; 4357 } 4358 } 4359 } else { 4360 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 4361 err = -ENOTSUPP; 4362 } 4363 dev_put(ndev); 4364 if (err || !mc->multicast.ib->rec.mtu) { 4365 if (!err) 4366 err = -EINVAL; 4367 goto out2; 4368 } 4369 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 4370 &mc->multicast.ib->rec.port_gid); 4371 work->id = id_priv; 4372 work->mc = mc; 4373 INIT_WORK(&work->work, iboe_mcast_work_handler); 4374 kref_get(&mc->mcref); 4375 queue_work(cma_wq, &work->work); 4376 4377 return 0; 4378 4379 out2: 4380 kfree(mc->multicast.ib); 4381 out1: 4382 kfree(work); 4383 return err; 4384 } 4385 4386 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 4387 u8 join_state, void *context) 4388 { 4389 struct rdma_id_private *id_priv; 4390 struct cma_multicast *mc; 4391 int ret; 4392 4393 if (!id->device) 4394 return -EINVAL; 4395 4396 id_priv = container_of(id, struct rdma_id_private, id); 4397 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 4398 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 4399 return -EINVAL; 4400 4401 mc = kmalloc(sizeof *mc, GFP_KERNEL); 4402 if (!mc) 4403 return -ENOMEM; 4404 4405 memcpy(&mc->addr, addr, rdma_addr_size(addr)); 4406 mc->context = context; 4407 mc->id_priv = id_priv; 4408 mc->igmp_joined = false; 4409 mc->join_state = join_state; 4410 spin_lock(&id_priv->lock); 4411 list_add(&mc->list, &id_priv->mc_list); 4412 spin_unlock(&id_priv->lock); 4413 4414 if (rdma_protocol_roce(id->device, id->port_num)) { 4415 kref_init(&mc->mcref); 4416 ret = cma_iboe_join_multicast(id_priv, mc); 4417 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) 4418 ret = cma_join_ib_multicast(id_priv, mc); 4419 else 4420 ret = -ENOSYS; 4421 4422 if (ret) { 4423 spin_lock_irq(&id_priv->lock); 4424 list_del(&mc->list); 4425 spin_unlock_irq(&id_priv->lock); 4426 kfree(mc); 4427 } 4428 return ret; 4429 } 4430 EXPORT_SYMBOL(rdma_join_multicast); 4431 4432 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 4433 { 4434 struct rdma_id_private *id_priv; 4435 struct cma_multicast *mc; 4436 4437 id_priv = container_of(id, struct rdma_id_private, id); 4438 spin_lock_irq(&id_priv->lock); 4439 list_for_each_entry(mc, &id_priv->mc_list, list) { 4440 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { 4441 list_del(&mc->list); 4442 spin_unlock_irq(&id_priv->lock); 4443 4444 if (id->qp) 4445 ib_detach_mcast(id->qp, 4446 &mc->multicast.ib->rec.mgid, 4447 be16_to_cpu(mc->multicast.ib->rec.mlid)); 4448 4449 BUG_ON(id_priv->cma_dev->device != id->device); 4450 4451 if (rdma_cap_ib_mcast(id->device, id->port_num)) { 4452 ib_sa_free_multicast(mc->multicast.ib); 4453 kfree(mc); 4454 } else if (rdma_protocol_roce(id->device, id->port_num)) { 4455 if (mc->igmp_joined) { 4456 struct rdma_dev_addr *dev_addr = 4457 &id->route.addr.dev_addr; 4458 if_t ndev = NULL; 4459 4460 if (dev_addr->bound_dev_if) 4461 ndev = dev_get_by_index(dev_addr->net, 4462 dev_addr->bound_dev_if); 4463 if (ndev) { 4464 cma_igmp_send(ndev, 4465 &mc->multicast.ib->rec.mgid, 4466 false); 4467 dev_put(ndev); 4468 } 4469 mc->igmp_joined = false; 4470 } 4471 kref_put(&mc->mcref, release_mc); 4472 } 4473 return; 4474 } 4475 } 4476 spin_unlock_irq(&id_priv->lock); 4477 } 4478 EXPORT_SYMBOL(rdma_leave_multicast); 4479 4480 static int 4481 sysctl_cma_default_roce_mode(SYSCTL_HANDLER_ARGS) 4482 { 4483 struct cma_device *cma_dev = arg1; 4484 const int port = arg2; 4485 char buf[64]; 4486 int error; 4487 4488 strlcpy(buf, ib_cache_gid_type_str( 4489 cma_get_default_gid_type(cma_dev, port)), sizeof(buf)); 4490 4491 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 4492 if (error != 0 || req->newptr == NULL) 4493 goto done; 4494 4495 error = ib_cache_gid_parse_type_str(buf); 4496 if (error < 0) { 4497 error = EINVAL; 4498 goto done; 4499 } 4500 4501 cma_set_default_gid_type(cma_dev, port, error); 4502 error = 0; 4503 done: 4504 return (error); 4505 } 4506 4507 static void cma_add_one(struct ib_device *device) 4508 { 4509 struct cma_device *cma_dev; 4510 struct rdma_id_private *id_priv; 4511 unsigned int i; 4512 4513 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 4514 if (!cma_dev) 4515 return; 4516 4517 sysctl_ctx_init(&cma_dev->sysctl_ctx); 4518 4519 cma_dev->device = device; 4520 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, 4521 sizeof(*cma_dev->default_gid_type), 4522 GFP_KERNEL); 4523 if (!cma_dev->default_gid_type) { 4524 kfree(cma_dev); 4525 return; 4526 } 4527 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 4528 unsigned long supported_gids; 4529 unsigned int default_gid_type; 4530 4531 supported_gids = roce_gid_type_mask_support(device, i); 4532 4533 if (WARN_ON(!supported_gids)) { 4534 /* set something valid */ 4535 default_gid_type = 0; 4536 } else if (test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) { 4537 /* prefer RoCEv2, if supported */ 4538 default_gid_type = IB_GID_TYPE_ROCE_UDP_ENCAP; 4539 } else { 4540 default_gid_type = find_first_bit(&supported_gids, 4541 BITS_PER_LONG); 4542 } 4543 cma_dev->default_gid_type[i - rdma_start_port(device)] = 4544 default_gid_type; 4545 } 4546 4547 init_completion(&cma_dev->comp); 4548 atomic_set(&cma_dev->refcount, 1); 4549 INIT_LIST_HEAD(&cma_dev->id_list); 4550 ib_set_client_data(device, &cma_client, cma_dev); 4551 4552 mutex_lock(&lock); 4553 list_add_tail(&cma_dev->list, &dev_list); 4554 list_for_each_entry(id_priv, &listen_any_list, list) 4555 cma_listen_on_dev(id_priv, cma_dev); 4556 mutex_unlock(&lock); 4557 4558 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 4559 char buf[64]; 4560 4561 snprintf(buf, sizeof(buf), "default_roce_mode_port%d", i); 4562 4563 (void) SYSCTL_ADD_PROC(&cma_dev->sysctl_ctx, 4564 SYSCTL_CHILDREN(device->ports_parent->parent->oidp), 4565 OID_AUTO, buf, CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 4566 cma_dev, i, &sysctl_cma_default_roce_mode, "A", 4567 "Default RoCE mode. Valid values: IB/RoCE v1 and RoCE v2"); 4568 } 4569 } 4570 4571 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 4572 { 4573 struct rdma_cm_event event; 4574 enum rdma_cm_state state; 4575 int ret = 0; 4576 4577 /* Record that we want to remove the device */ 4578 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); 4579 if (state == RDMA_CM_DESTROYING) 4580 return 0; 4581 4582 cma_cancel_operation(id_priv, state); 4583 mutex_lock(&id_priv->handler_mutex); 4584 4585 /* Check for destruction from another callback. */ 4586 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) 4587 goto out; 4588 4589 memset(&event, 0, sizeof event); 4590 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 4591 ret = id_priv->id.event_handler(&id_priv->id, &event); 4592 out: 4593 mutex_unlock(&id_priv->handler_mutex); 4594 return ret; 4595 } 4596 4597 static void cma_process_remove(struct cma_device *cma_dev) 4598 { 4599 struct rdma_id_private *id_priv; 4600 int ret; 4601 4602 mutex_lock(&lock); 4603 while (!list_empty(&cma_dev->id_list)) { 4604 id_priv = list_entry(cma_dev->id_list.next, 4605 struct rdma_id_private, list); 4606 4607 list_del(&id_priv->listen_list); 4608 list_del_init(&id_priv->list); 4609 atomic_inc(&id_priv->refcount); 4610 mutex_unlock(&lock); 4611 4612 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 4613 cma_deref_id(id_priv); 4614 if (ret) 4615 rdma_destroy_id(&id_priv->id); 4616 4617 mutex_lock(&lock); 4618 } 4619 mutex_unlock(&lock); 4620 4621 cma_deref_dev(cma_dev); 4622 wait_for_completion(&cma_dev->comp); 4623 } 4624 4625 static void cma_remove_one(struct ib_device *device, void *client_data) 4626 { 4627 struct cma_device *cma_dev = client_data; 4628 4629 if (!cma_dev) 4630 return; 4631 4632 mutex_lock(&lock); 4633 list_del(&cma_dev->list); 4634 mutex_unlock(&lock); 4635 4636 cma_process_remove(cma_dev); 4637 sysctl_ctx_free(&cma_dev->sysctl_ctx); 4638 kfree(cma_dev->default_gid_type); 4639 kfree(cma_dev); 4640 } 4641 4642 static void cma_init_vnet(void *arg) 4643 { 4644 struct cma_pernet *pernet = &VNET(cma_pernet); 4645 4646 idr_init(&pernet->tcp_ps); 4647 idr_init(&pernet->udp_ps); 4648 idr_init(&pernet->ipoib_ps); 4649 idr_init(&pernet->ib_ps); 4650 idr_init(&pernet->sdp_ps); 4651 } 4652 VNET_SYSINIT(cma_init_vnet, SI_SUB_OFED_MODINIT - 1, SI_ORDER_FIRST, cma_init_vnet, NULL); 4653 4654 static void cma_destroy_vnet(void *arg) 4655 { 4656 struct cma_pernet *pernet = &VNET(cma_pernet); 4657 4658 idr_destroy(&pernet->tcp_ps); 4659 idr_destroy(&pernet->udp_ps); 4660 idr_destroy(&pernet->ipoib_ps); 4661 idr_destroy(&pernet->ib_ps); 4662 idr_destroy(&pernet->sdp_ps); 4663 } 4664 VNET_SYSUNINIT(cma_destroy_vnet, SI_SUB_OFED_MODINIT - 1, SI_ORDER_SECOND, cma_destroy_vnet, NULL); 4665 4666 static int __init cma_init(void) 4667 { 4668 int ret; 4669 4670 cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); 4671 if (!cma_wq) 4672 return -ENOMEM; 4673 4674 ib_sa_register_client(&sa_client); 4675 rdma_addr_register_client(&addr_client); 4676 4677 ret = ib_register_client(&cma_client); 4678 if (ret) 4679 goto err; 4680 4681 cma_configfs_init(); 4682 4683 return 0; 4684 4685 err: 4686 rdma_addr_unregister_client(&addr_client); 4687 ib_sa_unregister_client(&sa_client); 4688 destroy_workqueue(cma_wq); 4689 return ret; 4690 } 4691 4692 static void __exit cma_cleanup(void) 4693 { 4694 cma_configfs_exit(); 4695 ib_unregister_client(&cma_client); 4696 rdma_addr_unregister_client(&addr_client); 4697 ib_sa_unregister_client(&sa_client); 4698 destroy_workqueue(cma_wq); 4699 } 4700 4701 module_init_order(cma_init, SI_ORDER_FOURTH); 4702 module_exit_order(cma_cleanup, SI_ORDER_FOURTH); 4703