1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <sys/cdefs.h> 36 #include <linux/completion.h> 37 #include <linux/file.h> 38 #include <linux/mutex.h> 39 #include <linux/poll.h> 40 #include <linux/sched.h> 41 #include <linux/idr.h> 42 #include <linux/in.h> 43 #include <linux/in6.h> 44 #include <linux/miscdevice.h> 45 #include <linux/slab.h> 46 #include <linux/module.h> 47 48 #include <sys/filio.h> 49 50 #include <rdma/rdma_user_cm.h> 51 #include <rdma/ib_marshall.h> 52 #include <rdma/rdma_cm.h> 53 #include <rdma/rdma_cm_ib.h> 54 #include <rdma/ib_addr.h> 55 #include <rdma/ib.h> 56 57 MODULE_AUTHOR("Sean Hefty"); 58 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); 59 MODULE_LICENSE("Dual BSD/GPL"); 60 61 static unsigned int max_backlog = 1024; 62 63 struct ucma_file { 64 struct mutex mut; 65 struct file *filp; 66 struct list_head ctx_list; 67 struct list_head event_list; 68 wait_queue_head_t poll_wait; 69 struct workqueue_struct *close_wq; 70 }; 71 72 struct ucma_context { 73 int id; 74 struct completion comp; 75 atomic_t ref; 76 int events_reported; 77 int backlog; 78 79 struct ucma_file *file; 80 struct rdma_cm_id *cm_id; 81 u64 uid; 82 83 struct list_head list; 84 struct list_head mc_list; 85 /* mark that device is in process of destroying the internal HW 86 * resources, protected by the global mut 87 */ 88 int closing; 89 /* sync between removal event and id destroy, protected by file mut */ 90 int destroying; 91 struct work_struct close_work; 92 }; 93 94 struct ucma_multicast { 95 struct ucma_context *ctx; 96 int id; 97 int events_reported; 98 99 u64 uid; 100 u8 join_state; 101 struct list_head list; 102 struct sockaddr_storage addr; 103 }; 104 105 struct ucma_event { 106 struct ucma_context *ctx; 107 struct ucma_multicast *mc; 108 struct list_head list; 109 struct rdma_cm_id *cm_id; 110 struct rdma_ucm_event_resp resp; 111 struct work_struct close_work; 112 }; 113 114 static DEFINE_MUTEX(mut); 115 static DEFINE_IDR(ctx_idr); 116 static DEFINE_IDR(multicast_idr); 117 118 static inline struct ucma_context *_ucma_find_context(int id, 119 struct ucma_file *file) 120 { 121 struct ucma_context *ctx; 122 123 ctx = idr_find(&ctx_idr, id); 124 if (!ctx) 125 ctx = ERR_PTR(-ENOENT); 126 else if (ctx->file != file || !ctx->cm_id) 127 ctx = ERR_PTR(-EINVAL); 128 return ctx; 129 } 130 131 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) 132 { 133 struct ucma_context *ctx; 134 135 mutex_lock(&mut); 136 ctx = _ucma_find_context(id, file); 137 if (!IS_ERR(ctx)) { 138 if (ctx->closing) 139 ctx = ERR_PTR(-EIO); 140 else 141 atomic_inc(&ctx->ref); 142 } 143 mutex_unlock(&mut); 144 return ctx; 145 } 146 147 static void ucma_put_ctx(struct ucma_context *ctx) 148 { 149 if (atomic_dec_and_test(&ctx->ref)) 150 complete(&ctx->comp); 151 } 152 153 /* 154 * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the 155 * CM_ID is bound. 156 */ 157 static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id) 158 { 159 struct ucma_context *ctx = ucma_get_ctx(file, id); 160 161 if (IS_ERR(ctx)) 162 return ctx; 163 if (!ctx->cm_id->device) { 164 ucma_put_ctx(ctx); 165 return ERR_PTR(-EINVAL); 166 } 167 return ctx; 168 } 169 170 static void ucma_close_event_id(struct work_struct *work) 171 { 172 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work); 173 174 rdma_destroy_id(uevent_close->cm_id); 175 kfree(uevent_close); 176 } 177 178 static void ucma_close_id(struct work_struct *work) 179 { 180 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work); 181 182 /* once all inflight tasks are finished, we close all underlying 183 * resources. The context is still alive till its explicit destryoing 184 * by its creator. 185 */ 186 ucma_put_ctx(ctx); 187 wait_for_completion(&ctx->comp); 188 /* No new events will be generated after destroying the id. */ 189 rdma_destroy_id(ctx->cm_id); 190 } 191 192 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) 193 { 194 struct ucma_context *ctx; 195 196 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 197 if (!ctx) 198 return NULL; 199 200 INIT_WORK(&ctx->close_work, ucma_close_id); 201 atomic_set(&ctx->ref, 1); 202 init_completion(&ctx->comp); 203 INIT_LIST_HEAD(&ctx->mc_list); 204 ctx->file = file; 205 206 mutex_lock(&mut); 207 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); 208 mutex_unlock(&mut); 209 if (ctx->id < 0) 210 goto error; 211 212 list_add_tail(&ctx->list, &file->ctx_list); 213 return ctx; 214 215 error: 216 kfree(ctx); 217 return NULL; 218 } 219 220 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) 221 { 222 struct ucma_multicast *mc; 223 224 mc = kzalloc(sizeof(*mc), GFP_KERNEL); 225 if (!mc) 226 return NULL; 227 228 mutex_lock(&mut); 229 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL); 230 mutex_unlock(&mut); 231 if (mc->id < 0) 232 goto error; 233 234 mc->ctx = ctx; 235 list_add_tail(&mc->list, &ctx->mc_list); 236 return mc; 237 238 error: 239 kfree(mc); 240 return NULL; 241 } 242 243 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, 244 struct rdma_conn_param *src) 245 { 246 if (src->private_data_len) 247 memcpy(dst->private_data, src->private_data, 248 src->private_data_len); 249 dst->private_data_len = src->private_data_len; 250 dst->responder_resources =src->responder_resources; 251 dst->initiator_depth = src->initiator_depth; 252 dst->flow_control = src->flow_control; 253 dst->retry_count = src->retry_count; 254 dst->rnr_retry_count = src->rnr_retry_count; 255 dst->srq = src->srq; 256 dst->qp_num = src->qp_num; 257 } 258 259 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst, 260 struct rdma_ud_param *src) 261 { 262 if (src->private_data_len) 263 memcpy(dst->private_data, src->private_data, 264 src->private_data_len); 265 dst->private_data_len = src->private_data_len; 266 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr); 267 dst->qp_num = src->qp_num; 268 dst->qkey = src->qkey; 269 } 270 271 static void ucma_set_event_context(struct ucma_context *ctx, 272 struct rdma_cm_event *event, 273 struct ucma_event *uevent) 274 { 275 uevent->ctx = ctx; 276 switch (event->event) { 277 case RDMA_CM_EVENT_MULTICAST_JOIN: 278 case RDMA_CM_EVENT_MULTICAST_ERROR: 279 uevent->mc = __DECONST(struct ucma_multicast *, 280 event->param.ud.private_data); 281 uevent->resp.uid = uevent->mc->uid; 282 uevent->resp.id = uevent->mc->id; 283 break; 284 default: 285 uevent->resp.uid = ctx->uid; 286 uevent->resp.id = ctx->id; 287 break; 288 } 289 } 290 291 /* Called with file->mut locked for the relevant context. */ 292 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) 293 { 294 struct ucma_context *ctx = cm_id->context; 295 struct ucma_event *con_req_eve; 296 int event_found = 0; 297 298 if (ctx->destroying) 299 return; 300 301 /* only if context is pointing to cm_id that it owns it and can be 302 * queued to be closed, otherwise that cm_id is an inflight one that 303 * is part of that context event list pending to be detached and 304 * reattached to its new context as part of ucma_get_event, 305 * handled separately below. 306 */ 307 if (ctx->cm_id == cm_id) { 308 mutex_lock(&mut); 309 ctx->closing = 1; 310 mutex_unlock(&mut); 311 queue_work(ctx->file->close_wq, &ctx->close_work); 312 return; 313 } 314 315 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) { 316 if (con_req_eve->cm_id == cm_id && 317 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { 318 list_del(&con_req_eve->list); 319 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id); 320 queue_work(ctx->file->close_wq, &con_req_eve->close_work); 321 event_found = 1; 322 break; 323 } 324 } 325 if (!event_found) 326 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n"); 327 } 328 329 static int ucma_event_handler(struct rdma_cm_id *cm_id, 330 struct rdma_cm_event *event) 331 { 332 struct ucma_event *uevent; 333 struct ucma_context *ctx = cm_id->context; 334 int ret = 0; 335 336 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); 337 if (!uevent) 338 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; 339 340 mutex_lock(&ctx->file->mut); 341 uevent->cm_id = cm_id; 342 ucma_set_event_context(ctx, event, uevent); 343 uevent->resp.event = event->event; 344 uevent->resp.status = event->status; 345 if (cm_id->qp_type == IB_QPT_UD) 346 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); 347 else 348 ucma_copy_conn_event(&uevent->resp.param.conn, 349 &event->param.conn); 350 351 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 352 if (!ctx->backlog) { 353 ret = -ENOMEM; 354 kfree(uevent); 355 goto out; 356 } 357 ctx->backlog--; 358 } else if (!ctx->uid || ctx->cm_id != cm_id) { 359 /* 360 * We ignore events for new connections until userspace has set 361 * their context. This can only happen if an error occurs on a 362 * new connection before the user accepts it. This is okay, 363 * since the accept will just fail later. However, we do need 364 * to release the underlying HW resources in case of a device 365 * removal event. 366 */ 367 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) 368 ucma_removal_event_handler(cm_id); 369 370 kfree(uevent); 371 goto out; 372 } 373 374 list_add_tail(&uevent->list, &ctx->file->event_list); 375 wake_up_interruptible(&ctx->file->poll_wait); 376 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) 377 ucma_removal_event_handler(cm_id); 378 out: 379 mutex_unlock(&ctx->file->mut); 380 return ret; 381 } 382 383 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, 384 int in_len, int out_len) 385 { 386 struct ucma_context *ctx; 387 struct rdma_ucm_get_event cmd; 388 struct ucma_event *uevent; 389 int ret = 0; 390 391 if (out_len < sizeof uevent->resp) 392 return -ENOSPC; 393 394 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 395 return -EFAULT; 396 397 mutex_lock(&file->mut); 398 while (list_empty(&file->event_list)) { 399 mutex_unlock(&file->mut); 400 401 if (file->filp->f_flags & O_NONBLOCK) 402 return -EAGAIN; 403 404 if (wait_event_interruptible(file->poll_wait, 405 !list_empty(&file->event_list))) 406 return -ERESTARTSYS; 407 408 mutex_lock(&file->mut); 409 } 410 411 uevent = list_entry(file->event_list.next, struct ucma_event, list); 412 413 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { 414 ctx = ucma_alloc_ctx(file); 415 if (!ctx) { 416 ret = -ENOMEM; 417 goto done; 418 } 419 uevent->ctx->backlog++; 420 ctx->cm_id = uevent->cm_id; 421 ctx->cm_id->context = ctx; 422 uevent->resp.id = ctx->id; 423 } 424 425 if (copy_to_user((void __user *)(unsigned long)cmd.response, 426 &uevent->resp, sizeof uevent->resp)) { 427 ret = -EFAULT; 428 goto done; 429 } 430 431 list_del(&uevent->list); 432 uevent->ctx->events_reported++; 433 if (uevent->mc) 434 uevent->mc->events_reported++; 435 kfree(uevent); 436 done: 437 mutex_unlock(&file->mut); 438 return ret; 439 } 440 441 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) 442 { 443 switch (cmd->ps) { 444 case RDMA_PS_TCP: 445 *qp_type = IB_QPT_RC; 446 return 0; 447 case RDMA_PS_UDP: 448 case RDMA_PS_IPOIB: 449 *qp_type = IB_QPT_UD; 450 return 0; 451 case RDMA_PS_IB: 452 *qp_type = cmd->qp_type; 453 return 0; 454 default: 455 return -EINVAL; 456 } 457 } 458 459 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, 460 int in_len, int out_len) 461 { 462 struct rdma_ucm_create_id cmd; 463 struct rdma_ucm_create_id_resp resp; 464 struct ucma_context *ctx; 465 struct rdma_cm_id *cm_id; 466 enum ib_qp_type qp_type; 467 int ret; 468 469 if (out_len < sizeof(resp)) 470 return -ENOSPC; 471 472 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 473 return -EFAULT; 474 475 ret = ucma_get_qp_type(&cmd, &qp_type); 476 if (ret) 477 return ret; 478 479 mutex_lock(&file->mut); 480 ctx = ucma_alloc_ctx(file); 481 mutex_unlock(&file->mut); 482 if (!ctx) 483 return -ENOMEM; 484 485 ctx->uid = cmd.uid; 486 cm_id = rdma_create_id(TD_TO_VNET(curthread), 487 ucma_event_handler, ctx, cmd.ps, qp_type); 488 if (IS_ERR(cm_id)) { 489 ret = PTR_ERR(cm_id); 490 goto err1; 491 } 492 493 resp.id = ctx->id; 494 if (copy_to_user((void __user *)(unsigned long)cmd.response, 495 &resp, sizeof(resp))) { 496 ret = -EFAULT; 497 goto err2; 498 } 499 500 ctx->cm_id = cm_id; 501 return 0; 502 503 err2: 504 rdma_destroy_id(cm_id); 505 err1: 506 mutex_lock(&mut); 507 idr_remove(&ctx_idr, ctx->id); 508 mutex_unlock(&mut); 509 mutex_lock(&file->mut); 510 list_del(&ctx->list); 511 mutex_unlock(&file->mut); 512 kfree(ctx); 513 return ret; 514 } 515 516 static void ucma_cleanup_multicast(struct ucma_context *ctx) 517 { 518 struct ucma_multicast *mc, *tmp; 519 520 mutex_lock(&mut); 521 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { 522 list_del(&mc->list); 523 idr_remove(&multicast_idr, mc->id); 524 kfree(mc); 525 } 526 mutex_unlock(&mut); 527 } 528 529 static void ucma_cleanup_mc_events(struct ucma_multicast *mc) 530 { 531 struct ucma_event *uevent, *tmp; 532 533 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { 534 if (uevent->mc != mc) 535 continue; 536 537 list_del(&uevent->list); 538 kfree(uevent); 539 } 540 } 541 542 /* 543 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At 544 * this point, no new events will be reported from the hardware. However, we 545 * still need to cleanup the UCMA context for this ID. Specifically, there 546 * might be events that have not yet been consumed by the user space software. 547 * These might include pending connect requests which we have not completed 548 * processing. We cannot call rdma_destroy_id while holding the lock of the 549 * context (file->mut), as it might cause a deadlock. We therefore extract all 550 * relevant events from the context pending events list while holding the 551 * mutex. After that we release them as needed. 552 */ 553 static int ucma_free_ctx(struct ucma_context *ctx) 554 { 555 int events_reported; 556 struct ucma_event *uevent, *tmp; 557 LIST_HEAD(list); 558 559 560 ucma_cleanup_multicast(ctx); 561 562 /* Cleanup events not yet reported to the user. */ 563 mutex_lock(&ctx->file->mut); 564 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { 565 if (uevent->ctx == ctx) 566 list_move_tail(&uevent->list, &list); 567 } 568 list_del(&ctx->list); 569 mutex_unlock(&ctx->file->mut); 570 571 list_for_each_entry_safe(uevent, tmp, &list, list) { 572 list_del(&uevent->list); 573 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) 574 rdma_destroy_id(uevent->cm_id); 575 kfree(uevent); 576 } 577 578 events_reported = ctx->events_reported; 579 kfree(ctx); 580 return events_reported; 581 } 582 583 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, 584 int in_len, int out_len) 585 { 586 struct rdma_ucm_destroy_id cmd; 587 struct rdma_ucm_destroy_id_resp resp; 588 struct ucma_context *ctx; 589 int ret = 0; 590 591 if (out_len < sizeof(resp)) 592 return -ENOSPC; 593 594 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 595 return -EFAULT; 596 597 mutex_lock(&mut); 598 ctx = _ucma_find_context(cmd.id, file); 599 if (!IS_ERR(ctx)) 600 idr_remove(&ctx_idr, ctx->id); 601 mutex_unlock(&mut); 602 603 if (IS_ERR(ctx)) 604 return PTR_ERR(ctx); 605 606 mutex_lock(&ctx->file->mut); 607 ctx->destroying = 1; 608 mutex_unlock(&ctx->file->mut); 609 610 flush_workqueue(ctx->file->close_wq); 611 /* At this point it's guaranteed that there is no inflight 612 * closing task */ 613 mutex_lock(&mut); 614 if (!ctx->closing) { 615 mutex_unlock(&mut); 616 ucma_put_ctx(ctx); 617 wait_for_completion(&ctx->comp); 618 rdma_destroy_id(ctx->cm_id); 619 } else { 620 mutex_unlock(&mut); 621 } 622 623 resp.events_reported = ucma_free_ctx(ctx); 624 if (copy_to_user((void __user *)(unsigned long)cmd.response, 625 &resp, sizeof(resp))) 626 ret = -EFAULT; 627 628 return ret; 629 } 630 631 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, 632 int in_len, int out_len) 633 { 634 struct rdma_ucm_bind_ip cmd; 635 struct ucma_context *ctx; 636 int ret; 637 638 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 639 return -EFAULT; 640 641 if (!rdma_addr_size_in6(&cmd.addr)) 642 return -EINVAL; 643 644 ctx = ucma_get_ctx(file, cmd.id); 645 if (IS_ERR(ctx)) 646 return PTR_ERR(ctx); 647 648 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); 649 ucma_put_ctx(ctx); 650 return ret; 651 } 652 653 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, 654 int in_len, int out_len) 655 { 656 struct rdma_ucm_bind cmd; 657 struct ucma_context *ctx; 658 int ret; 659 660 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 661 return -EFAULT; 662 663 if (cmd.reserved || !cmd.addr_size || 664 cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) 665 return -EINVAL; 666 667 ctx = ucma_get_ctx(file, cmd.id); 668 if (IS_ERR(ctx)) 669 return PTR_ERR(ctx); 670 671 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); 672 ucma_put_ctx(ctx); 673 return ret; 674 } 675 676 static ssize_t ucma_resolve_ip(struct ucma_file *file, 677 const char __user *inbuf, 678 int in_len, int out_len) 679 { 680 struct rdma_ucm_resolve_ip cmd; 681 struct ucma_context *ctx; 682 int ret; 683 684 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 685 return -EFAULT; 686 687 if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) || 688 !rdma_addr_size_in6(&cmd.dst_addr)) 689 return -EINVAL; 690 691 ctx = ucma_get_ctx(file, cmd.id); 692 if (IS_ERR(ctx)) 693 return PTR_ERR(ctx); 694 695 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 696 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); 697 ucma_put_ctx(ctx); 698 return ret; 699 } 700 701 static ssize_t ucma_resolve_addr(struct ucma_file *file, 702 const char __user *inbuf, 703 int in_len, int out_len) 704 { 705 struct rdma_ucm_resolve_addr cmd; 706 struct ucma_context *ctx; 707 int ret; 708 709 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 710 return -EFAULT; 711 712 if (cmd.reserved || 713 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || 714 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) 715 return -EINVAL; 716 717 ctx = ucma_get_ctx(file, cmd.id); 718 if (IS_ERR(ctx)) 719 return PTR_ERR(ctx); 720 721 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 722 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); 723 ucma_put_ctx(ctx); 724 return ret; 725 } 726 727 static ssize_t ucma_resolve_route(struct ucma_file *file, 728 const char __user *inbuf, 729 int in_len, int out_len) 730 { 731 struct rdma_ucm_resolve_route cmd; 732 struct ucma_context *ctx; 733 int ret; 734 735 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 736 return -EFAULT; 737 738 ctx = ucma_get_ctx_dev(file, cmd.id); 739 if (IS_ERR(ctx)) 740 return PTR_ERR(ctx); 741 742 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); 743 ucma_put_ctx(ctx); 744 return ret; 745 } 746 747 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, 748 struct rdma_route *route) 749 { 750 struct rdma_dev_addr *dev_addr; 751 752 resp->num_paths = route->num_paths; 753 switch (route->num_paths) { 754 case 0: 755 dev_addr = &route->addr.dev_addr; 756 rdma_addr_get_dgid(dev_addr, 757 (union ib_gid *) &resp->ib_route[0].dgid); 758 rdma_addr_get_sgid(dev_addr, 759 (union ib_gid *) &resp->ib_route[0].sgid); 760 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 761 break; 762 case 2: 763 ib_copy_path_rec_to_user(&resp->ib_route[1], 764 &route->path_rec[1]); 765 /* fall through */ 766 case 1: 767 ib_copy_path_rec_to_user(&resp->ib_route[0], 768 &route->path_rec[0]); 769 break; 770 default: 771 break; 772 } 773 } 774 775 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, 776 struct rdma_route *route) 777 { 778 779 resp->num_paths = route->num_paths; 780 switch (route->num_paths) { 781 case 0: 782 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, 783 (union ib_gid *)&resp->ib_route[0].dgid); 784 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr, 785 (union ib_gid *)&resp->ib_route[0].sgid); 786 resp->ib_route[0].pkey = cpu_to_be16(0xffff); 787 break; 788 case 2: 789 ib_copy_path_rec_to_user(&resp->ib_route[1], 790 &route->path_rec[1]); 791 /* fall through */ 792 case 1: 793 ib_copy_path_rec_to_user(&resp->ib_route[0], 794 &route->path_rec[0]); 795 break; 796 default: 797 break; 798 } 799 } 800 801 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp, 802 struct rdma_route *route) 803 { 804 struct rdma_dev_addr *dev_addr; 805 806 dev_addr = &route->addr.dev_addr; 807 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); 808 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); 809 } 810 811 static ssize_t ucma_query_route(struct ucma_file *file, 812 const char __user *inbuf, 813 int in_len, int out_len) 814 { 815 struct rdma_ucm_query cmd; 816 struct rdma_ucm_query_route_resp resp; 817 struct ucma_context *ctx; 818 struct sockaddr *addr; 819 int ret = 0; 820 821 if (out_len < sizeof(resp)) 822 return -ENOSPC; 823 824 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 825 return -EFAULT; 826 827 ctx = ucma_get_ctx(file, cmd.id); 828 if (IS_ERR(ctx)) 829 return PTR_ERR(ctx); 830 831 memset(&resp, 0, sizeof resp); 832 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; 833 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? 834 sizeof(struct sockaddr_in) : 835 sizeof(struct sockaddr_in6)); 836 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; 837 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? 838 sizeof(struct sockaddr_in) : 839 sizeof(struct sockaddr_in6)); 840 if (!ctx->cm_id->device) 841 goto out; 842 843 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; 844 resp.port_num = ctx->cm_id->port_num; 845 846 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) 847 ucma_copy_ib_route(&resp, &ctx->cm_id->route); 848 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) 849 ucma_copy_iboe_route(&resp, &ctx->cm_id->route); 850 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) 851 ucma_copy_iw_route(&resp, &ctx->cm_id->route); 852 853 out: 854 if (copy_to_user((void __user *)(unsigned long)cmd.response, 855 &resp, sizeof(resp))) 856 ret = -EFAULT; 857 858 ucma_put_ctx(ctx); 859 return ret; 860 } 861 862 static void ucma_query_device_addr(struct rdma_cm_id *cm_id, 863 struct rdma_ucm_query_addr_resp *resp) 864 { 865 if (!cm_id->device) 866 return; 867 868 resp->node_guid = (__force __u64) cm_id->device->node_guid; 869 resp->port_num = cm_id->port_num; 870 resp->pkey = (__force __u16) cpu_to_be16( 871 ib_addr_get_pkey(&cm_id->route.addr.dev_addr)); 872 } 873 874 static ssize_t ucma_query_addr(struct ucma_context *ctx, 875 void __user *response, int out_len) 876 { 877 struct rdma_ucm_query_addr_resp resp; 878 struct sockaddr *addr; 879 int ret = 0; 880 881 if (out_len < sizeof(resp)) 882 return -ENOSPC; 883 884 memset(&resp, 0, sizeof resp); 885 886 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; 887 resp.src_size = rdma_addr_size(addr); 888 memcpy(&resp.src_addr, addr, resp.src_size); 889 890 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; 891 resp.dst_size = rdma_addr_size(addr); 892 memcpy(&resp.dst_addr, addr, resp.dst_size); 893 894 ucma_query_device_addr(ctx->cm_id, &resp); 895 896 if (copy_to_user(response, &resp, sizeof(resp))) 897 ret = -EFAULT; 898 899 return ret; 900 } 901 902 static ssize_t ucma_query_path(struct ucma_context *ctx, 903 void __user *response, int out_len) 904 { 905 struct rdma_ucm_query_path_resp *resp; 906 int i, ret = 0; 907 908 if (out_len < sizeof(*resp)) 909 return -ENOSPC; 910 911 resp = kzalloc(out_len, GFP_KERNEL); 912 if (!resp) 913 return -ENOMEM; 914 915 resp->num_paths = ctx->cm_id->route.num_paths; 916 for (i = 0, out_len -= sizeof(*resp); 917 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); 918 i++, out_len -= sizeof(struct ib_path_rec_data)) { 919 920 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | 921 IB_PATH_BIDIRECTIONAL; 922 ib_sa_pack_path(&ctx->cm_id->route.path_rec[i], 923 &resp->path_data[i].path_rec); 924 } 925 926 if (copy_to_user(response, resp, 927 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data)))) 928 ret = -EFAULT; 929 930 kfree(resp); 931 return ret; 932 } 933 934 static ssize_t ucma_query_gid(struct ucma_context *ctx, 935 void __user *response, int out_len) 936 { 937 struct rdma_ucm_query_addr_resp resp; 938 struct sockaddr_ib *addr; 939 int ret = 0; 940 941 if (out_len < sizeof(resp)) 942 return -ENOSPC; 943 944 memset(&resp, 0, sizeof resp); 945 946 ucma_query_device_addr(ctx->cm_id, &resp); 947 948 addr = (struct sockaddr_ib *) &resp.src_addr; 949 resp.src_size = sizeof(*addr); 950 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { 951 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); 952 } else { 953 addr->sib_family = AF_IB; 954 addr->sib_pkey = (__force __be16) resp.pkey; 955 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr, 956 (union ib_gid *) &addr->sib_addr); 957 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) 958 &ctx->cm_id->route.addr.src_addr); 959 } 960 961 addr = (struct sockaddr_ib *) &resp.dst_addr; 962 resp.dst_size = sizeof(*addr); 963 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { 964 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); 965 } else { 966 addr->sib_family = AF_IB; 967 addr->sib_pkey = (__force __be16) resp.pkey; 968 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr, 969 (union ib_gid *) &addr->sib_addr); 970 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) 971 &ctx->cm_id->route.addr.dst_addr); 972 } 973 974 if (copy_to_user(response, &resp, sizeof(resp))) 975 ret = -EFAULT; 976 977 return ret; 978 } 979 980 static ssize_t ucma_query(struct ucma_file *file, 981 const char __user *inbuf, 982 int in_len, int out_len) 983 { 984 struct rdma_ucm_query cmd; 985 struct ucma_context *ctx; 986 void __user *response; 987 int ret; 988 989 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 990 return -EFAULT; 991 992 response = (void __user *)(unsigned long) cmd.response; 993 ctx = ucma_get_ctx(file, cmd.id); 994 if (IS_ERR(ctx)) 995 return PTR_ERR(ctx); 996 997 switch (cmd.option) { 998 case RDMA_USER_CM_QUERY_ADDR: 999 ret = ucma_query_addr(ctx, response, out_len); 1000 break; 1001 case RDMA_USER_CM_QUERY_PATH: 1002 ret = ucma_query_path(ctx, response, out_len); 1003 break; 1004 case RDMA_USER_CM_QUERY_GID: 1005 ret = ucma_query_gid(ctx, response, out_len); 1006 break; 1007 default: 1008 ret = -ENOSYS; 1009 break; 1010 } 1011 1012 ucma_put_ctx(ctx); 1013 return ret; 1014 } 1015 1016 static void ucma_copy_conn_param(struct rdma_cm_id *id, 1017 struct rdma_conn_param *dst, 1018 struct rdma_ucm_conn_param *src) 1019 { 1020 dst->private_data = src->private_data; 1021 dst->private_data_len = src->private_data_len; 1022 dst->responder_resources =src->responder_resources; 1023 dst->initiator_depth = src->initiator_depth; 1024 dst->flow_control = src->flow_control; 1025 dst->retry_count = src->retry_count; 1026 dst->rnr_retry_count = src->rnr_retry_count; 1027 dst->srq = src->srq; 1028 dst->qp_num = src->qp_num; 1029 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; 1030 } 1031 1032 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, 1033 int in_len, int out_len) 1034 { 1035 struct rdma_ucm_connect cmd; 1036 struct rdma_conn_param conn_param; 1037 struct ucma_context *ctx; 1038 int ret; 1039 1040 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1041 return -EFAULT; 1042 1043 if (!cmd.conn_param.valid) 1044 return -EINVAL; 1045 1046 ctx = ucma_get_ctx_dev(file, cmd.id); 1047 if (IS_ERR(ctx)) 1048 return PTR_ERR(ctx); 1049 1050 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); 1051 ret = rdma_connect(ctx->cm_id, &conn_param); 1052 ucma_put_ctx(ctx); 1053 return ret; 1054 } 1055 1056 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, 1057 int in_len, int out_len) 1058 { 1059 struct rdma_ucm_listen cmd; 1060 struct ucma_context *ctx; 1061 int ret; 1062 1063 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1064 return -EFAULT; 1065 1066 ctx = ucma_get_ctx(file, cmd.id); 1067 if (IS_ERR(ctx)) 1068 return PTR_ERR(ctx); 1069 1070 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? 1071 cmd.backlog : max_backlog; 1072 ret = rdma_listen(ctx->cm_id, ctx->backlog); 1073 ucma_put_ctx(ctx); 1074 return ret; 1075 } 1076 1077 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, 1078 int in_len, int out_len) 1079 { 1080 struct rdma_ucm_accept cmd; 1081 struct rdma_conn_param conn_param; 1082 struct ucma_context *ctx; 1083 int ret; 1084 1085 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1086 return -EFAULT; 1087 1088 ctx = ucma_get_ctx_dev(file, cmd.id); 1089 if (IS_ERR(ctx)) 1090 return PTR_ERR(ctx); 1091 1092 if (cmd.conn_param.valid) { 1093 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); 1094 mutex_lock(&file->mut); 1095 ret = rdma_accept(ctx->cm_id, &conn_param); 1096 if (!ret) 1097 ctx->uid = cmd.uid; 1098 mutex_unlock(&file->mut); 1099 } else 1100 ret = rdma_accept(ctx->cm_id, NULL); 1101 1102 ucma_put_ctx(ctx); 1103 return ret; 1104 } 1105 1106 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, 1107 int in_len, int out_len) 1108 { 1109 struct rdma_ucm_reject cmd; 1110 struct ucma_context *ctx; 1111 int ret; 1112 1113 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1114 return -EFAULT; 1115 1116 ctx = ucma_get_ctx_dev(file, cmd.id); 1117 if (IS_ERR(ctx)) 1118 return PTR_ERR(ctx); 1119 1120 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); 1121 ucma_put_ctx(ctx); 1122 return ret; 1123 } 1124 1125 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, 1126 int in_len, int out_len) 1127 { 1128 struct rdma_ucm_disconnect cmd; 1129 struct ucma_context *ctx; 1130 int ret; 1131 1132 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1133 return -EFAULT; 1134 1135 ctx = ucma_get_ctx_dev(file, cmd.id); 1136 if (IS_ERR(ctx)) 1137 return PTR_ERR(ctx); 1138 1139 ret = rdma_disconnect(ctx->cm_id); 1140 ucma_put_ctx(ctx); 1141 return ret; 1142 } 1143 1144 static ssize_t ucma_init_qp_attr(struct ucma_file *file, 1145 const char __user *inbuf, 1146 int in_len, int out_len) 1147 { 1148 struct rdma_ucm_init_qp_attr cmd; 1149 struct ib_uverbs_qp_attr resp; 1150 struct ucma_context *ctx; 1151 struct ib_qp_attr qp_attr; 1152 int ret; 1153 1154 if (out_len < sizeof(resp)) 1155 return -ENOSPC; 1156 1157 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1158 return -EFAULT; 1159 1160 ctx = ucma_get_ctx_dev(file, cmd.id); 1161 if (IS_ERR(ctx)) 1162 return PTR_ERR(ctx); 1163 1164 resp.qp_attr_mask = 0; 1165 memset(&qp_attr, 0, sizeof qp_attr); 1166 qp_attr.qp_state = cmd.qp_state; 1167 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); 1168 if (ret) 1169 goto out; 1170 1171 ib_copy_qp_attr_to_user(&resp, &qp_attr); 1172 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1173 &resp, sizeof(resp))) 1174 ret = -EFAULT; 1175 1176 out: 1177 ucma_put_ctx(ctx); 1178 return ret; 1179 } 1180 1181 static int ucma_set_option_id(struct ucma_context *ctx, int optname, 1182 void *optval, size_t optlen) 1183 { 1184 int ret = 0; 1185 1186 switch (optname) { 1187 case RDMA_OPTION_ID_TOS: 1188 if (optlen != sizeof(u8)) { 1189 ret = -EINVAL; 1190 break; 1191 } 1192 rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); 1193 break; 1194 case RDMA_OPTION_ID_REUSEADDR: 1195 if (optlen != sizeof(int)) { 1196 ret = -EINVAL; 1197 break; 1198 } 1199 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); 1200 break; 1201 case RDMA_OPTION_ID_AFONLY: 1202 if (optlen != sizeof(int)) { 1203 ret = -EINVAL; 1204 break; 1205 } 1206 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); 1207 break; 1208 case RDMA_OPTION_ID_ACK_TIMEOUT: 1209 if (optlen != sizeof(u8)) { 1210 ret = -EINVAL; 1211 break; 1212 } 1213 ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval)); 1214 break; 1215 default: 1216 ret = -ENOSYS; 1217 } 1218 1219 return ret; 1220 } 1221 1222 static int ucma_set_ib_path(struct ucma_context *ctx, 1223 struct ib_path_rec_data *path_data, size_t optlen) 1224 { 1225 struct ib_sa_path_rec sa_path; 1226 struct rdma_cm_event event; 1227 int ret; 1228 1229 if (optlen % sizeof(*path_data)) 1230 return -EINVAL; 1231 1232 for (; optlen; optlen -= sizeof(*path_data), path_data++) { 1233 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY | 1234 IB_PATH_BIDIRECTIONAL)) 1235 break; 1236 } 1237 1238 if (!optlen) 1239 return -EINVAL; 1240 1241 memset(&sa_path, 0, sizeof(sa_path)); 1242 1243 ib_sa_unpack_path(path_data->path_rec, &sa_path); 1244 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); 1245 if (ret) 1246 return ret; 1247 1248 memset(&event, 0, sizeof event); 1249 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1250 return ucma_event_handler(ctx->cm_id, &event); 1251 } 1252 1253 static int ucma_set_option_ib(struct ucma_context *ctx, int optname, 1254 void *optval, size_t optlen) 1255 { 1256 int ret; 1257 1258 switch (optname) { 1259 case RDMA_OPTION_IB_PATH: 1260 ret = ucma_set_ib_path(ctx, optval, optlen); 1261 break; 1262 default: 1263 ret = -ENOSYS; 1264 } 1265 1266 return ret; 1267 } 1268 1269 static int ucma_set_option_level(struct ucma_context *ctx, int level, 1270 int optname, void *optval, size_t optlen) 1271 { 1272 int ret; 1273 1274 switch (level) { 1275 case RDMA_OPTION_ID: 1276 ret = ucma_set_option_id(ctx, optname, optval, optlen); 1277 break; 1278 case RDMA_OPTION_IB: 1279 ret = ucma_set_option_ib(ctx, optname, optval, optlen); 1280 break; 1281 default: 1282 ret = -ENOSYS; 1283 } 1284 1285 return ret; 1286 } 1287 1288 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, 1289 int in_len, int out_len) 1290 { 1291 struct rdma_ucm_set_option cmd; 1292 struct ucma_context *ctx; 1293 void *optval; 1294 int ret; 1295 1296 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1297 return -EFAULT; 1298 1299 ctx = ucma_get_ctx(file, cmd.id); 1300 if (IS_ERR(ctx)) 1301 return PTR_ERR(ctx); 1302 1303 optval = memdup_user((void __user *) (unsigned long) cmd.optval, 1304 cmd.optlen); 1305 if (IS_ERR(optval)) { 1306 ret = PTR_ERR(optval); 1307 goto out; 1308 } 1309 1310 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, 1311 cmd.optlen); 1312 kfree(optval); 1313 1314 out: 1315 ucma_put_ctx(ctx); 1316 return ret; 1317 } 1318 1319 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, 1320 int in_len, int out_len) 1321 { 1322 struct rdma_ucm_notify cmd; 1323 struct ucma_context *ctx; 1324 int ret; 1325 1326 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1327 return -EFAULT; 1328 1329 ctx = ucma_get_ctx(file, cmd.id); 1330 if (IS_ERR(ctx)) 1331 return PTR_ERR(ctx); 1332 1333 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); 1334 ucma_put_ctx(ctx); 1335 return ret; 1336 } 1337 1338 static ssize_t ucma_process_join(struct ucma_file *file, 1339 struct rdma_ucm_join_mcast *cmd, int out_len) 1340 { 1341 struct rdma_ucm_create_id_resp resp; 1342 struct ucma_context *ctx; 1343 struct ucma_multicast *mc; 1344 struct sockaddr *addr; 1345 int ret; 1346 u8 join_state; 1347 1348 if (out_len < sizeof(resp)) 1349 return -ENOSPC; 1350 1351 addr = (struct sockaddr *) &cmd->addr; 1352 if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) 1353 return -EINVAL; 1354 1355 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) 1356 join_state = BIT(FULLMEMBER_JOIN); 1357 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER) 1358 join_state = BIT(SENDONLY_FULLMEMBER_JOIN); 1359 else 1360 return -EINVAL; 1361 1362 ctx = ucma_get_ctx_dev(file, cmd->id); 1363 if (IS_ERR(ctx)) 1364 return PTR_ERR(ctx); 1365 1366 mutex_lock(&file->mut); 1367 mc = ucma_alloc_multicast(ctx); 1368 if (!mc) { 1369 ret = -ENOMEM; 1370 goto err1; 1371 } 1372 mc->join_state = join_state; 1373 mc->uid = cmd->uid; 1374 memcpy(&mc->addr, addr, cmd->addr_size); 1375 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, 1376 join_state, mc); 1377 if (ret) 1378 goto err2; 1379 1380 resp.id = mc->id; 1381 if (copy_to_user((void __user *)(unsigned long) cmd->response, 1382 &resp, sizeof(resp))) { 1383 ret = -EFAULT; 1384 goto err3; 1385 } 1386 1387 mutex_unlock(&file->mut); 1388 ucma_put_ctx(ctx); 1389 return 0; 1390 1391 err3: 1392 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); 1393 ucma_cleanup_mc_events(mc); 1394 err2: 1395 mutex_lock(&mut); 1396 idr_remove(&multicast_idr, mc->id); 1397 mutex_unlock(&mut); 1398 list_del(&mc->list); 1399 kfree(mc); 1400 err1: 1401 mutex_unlock(&file->mut); 1402 ucma_put_ctx(ctx); 1403 return ret; 1404 } 1405 1406 static ssize_t ucma_join_ip_multicast(struct ucma_file *file, 1407 const char __user *inbuf, 1408 int in_len, int out_len) 1409 { 1410 struct rdma_ucm_join_ip_mcast cmd; 1411 struct rdma_ucm_join_mcast join_cmd; 1412 1413 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1414 return -EFAULT; 1415 1416 join_cmd.response = cmd.response; 1417 join_cmd.uid = cmd.uid; 1418 join_cmd.id = cmd.id; 1419 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); 1420 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; 1421 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); 1422 1423 return ucma_process_join(file, &join_cmd, out_len); 1424 } 1425 1426 static ssize_t ucma_join_multicast(struct ucma_file *file, 1427 const char __user *inbuf, 1428 int in_len, int out_len) 1429 { 1430 struct rdma_ucm_join_mcast cmd; 1431 1432 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1433 return -EFAULT; 1434 1435 if (!rdma_addr_size_kss(&cmd.addr)) 1436 return -EINVAL; 1437 1438 return ucma_process_join(file, &cmd, out_len); 1439 } 1440 1441 static ssize_t ucma_leave_multicast(struct ucma_file *file, 1442 const char __user *inbuf, 1443 int in_len, int out_len) 1444 { 1445 struct rdma_ucm_destroy_id cmd; 1446 struct rdma_ucm_destroy_id_resp resp; 1447 struct ucma_multicast *mc; 1448 int ret = 0; 1449 1450 if (out_len < sizeof(resp)) 1451 return -ENOSPC; 1452 1453 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1454 return -EFAULT; 1455 1456 mutex_lock(&mut); 1457 mc = idr_find(&multicast_idr, cmd.id); 1458 if (!mc) 1459 mc = ERR_PTR(-ENOENT); 1460 else if (mc->ctx->file != file) 1461 mc = ERR_PTR(-EINVAL); 1462 else if (!atomic_inc_not_zero(&mc->ctx->ref)) 1463 mc = ERR_PTR(-ENXIO); 1464 else 1465 idr_remove(&multicast_idr, mc->id); 1466 mutex_unlock(&mut); 1467 1468 if (IS_ERR(mc)) { 1469 ret = PTR_ERR(mc); 1470 goto out; 1471 } 1472 1473 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); 1474 mutex_lock(&mc->ctx->file->mut); 1475 ucma_cleanup_mc_events(mc); 1476 list_del(&mc->list); 1477 mutex_unlock(&mc->ctx->file->mut); 1478 1479 ucma_put_ctx(mc->ctx); 1480 resp.events_reported = mc->events_reported; 1481 kfree(mc); 1482 1483 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1484 &resp, sizeof(resp))) 1485 ret = -EFAULT; 1486 out: 1487 return ret; 1488 } 1489 1490 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) 1491 { 1492 /* Acquire mutex's based on pointer comparison to prevent deadlock. */ 1493 if (file1 < file2) { 1494 mutex_lock(&file1->mut); 1495 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING); 1496 } else { 1497 mutex_lock(&file2->mut); 1498 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING); 1499 } 1500 } 1501 1502 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2) 1503 { 1504 if (file1 < file2) { 1505 mutex_unlock(&file2->mut); 1506 mutex_unlock(&file1->mut); 1507 } else { 1508 mutex_unlock(&file1->mut); 1509 mutex_unlock(&file2->mut); 1510 } 1511 } 1512 1513 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) 1514 { 1515 struct ucma_event *uevent, *tmp; 1516 1517 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) 1518 if (uevent->ctx == ctx) 1519 list_move_tail(&uevent->list, &file->event_list); 1520 } 1521 1522 static ssize_t ucma_migrate_id(struct ucma_file *new_file, 1523 const char __user *inbuf, 1524 int in_len, int out_len) 1525 { 1526 struct rdma_ucm_migrate_id cmd; 1527 struct rdma_ucm_migrate_resp resp; 1528 struct ucma_context *ctx; 1529 struct fd f; 1530 struct ucma_file *cur_file; 1531 int ret = 0; 1532 1533 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1534 return -EFAULT; 1535 1536 /* Get current fd to protect against it being closed */ 1537 f = fdget(cmd.fd); 1538 if (!f.file) 1539 return -ENOENT; 1540 1541 /* Validate current fd and prevent destruction of id. */ 1542 ctx = ucma_get_ctx(f.file->private_data, cmd.id); 1543 if (IS_ERR(ctx)) { 1544 ret = PTR_ERR(ctx); 1545 goto file_put; 1546 } 1547 1548 cur_file = ctx->file; 1549 if (cur_file == new_file) { 1550 resp.events_reported = ctx->events_reported; 1551 goto response; 1552 } 1553 1554 /* 1555 * Migrate events between fd's, maintaining order, and avoiding new 1556 * events being added before existing events. 1557 */ 1558 ucma_lock_files(cur_file, new_file); 1559 mutex_lock(&mut); 1560 1561 list_move_tail(&ctx->list, &new_file->ctx_list); 1562 ucma_move_events(ctx, new_file); 1563 ctx->file = new_file; 1564 resp.events_reported = ctx->events_reported; 1565 1566 mutex_unlock(&mut); 1567 ucma_unlock_files(cur_file, new_file); 1568 1569 response: 1570 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1571 &resp, sizeof(resp))) 1572 ret = -EFAULT; 1573 1574 ucma_put_ctx(ctx); 1575 file_put: 1576 fdput(f); 1577 return ret; 1578 } 1579 1580 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, 1581 const char __user *inbuf, 1582 int in_len, int out_len) = { 1583 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, 1584 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, 1585 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip, 1586 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip, 1587 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route, 1588 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, 1589 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, 1590 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, 1591 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, 1592 [RDMA_USER_CM_CMD_REJECT] = ucma_reject, 1593 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, 1594 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, 1595 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, 1596 [RDMA_USER_CM_CMD_GET_OPTION] = NULL, 1597 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, 1598 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, 1599 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast, 1600 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, 1601 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id, 1602 [RDMA_USER_CM_CMD_QUERY] = ucma_query, 1603 [RDMA_USER_CM_CMD_BIND] = ucma_bind, 1604 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, 1605 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast 1606 }; 1607 1608 static ssize_t ucma_write(struct file *filp, const char __user *buf, 1609 size_t len, loff_t *pos) 1610 { 1611 struct ucma_file *file = filp->private_data; 1612 struct rdma_ucm_cmd_hdr hdr; 1613 ssize_t ret; 1614 1615 if (WARN_ON_ONCE(!ib_safe_file_access(filp))) 1616 return -EACCES; 1617 1618 if (len < sizeof(hdr)) 1619 return -EINVAL; 1620 1621 if (copy_from_user(&hdr, buf, sizeof(hdr))) 1622 return -EFAULT; 1623 1624 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) 1625 return -EINVAL; 1626 1627 if (hdr.in + sizeof(hdr) > len) 1628 return -EINVAL; 1629 1630 if (!ucma_cmd_table[hdr.cmd]) 1631 return -ENOSYS; 1632 1633 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); 1634 if (!ret) 1635 ret = len; 1636 1637 return ret; 1638 } 1639 1640 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait) 1641 { 1642 struct ucma_file *file = filp->private_data; 1643 unsigned int mask = 0; 1644 1645 poll_wait(filp, &file->poll_wait, wait); 1646 1647 if (!list_empty(&file->event_list)) 1648 mask = POLLIN | POLLRDNORM; 1649 1650 return mask; 1651 } 1652 1653 /* 1654 * ucma_open() does not need the BKL: 1655 * 1656 * - no global state is referred to; 1657 * - there is no ioctl method to race against; 1658 * - no further module initialization is required for open to work 1659 * after the device is registered. 1660 */ 1661 static int ucma_open(struct inode *inode, struct file *filp) 1662 { 1663 struct ucma_file *file; 1664 1665 file = kmalloc(sizeof *file, GFP_KERNEL); 1666 if (!file) 1667 return -ENOMEM; 1668 1669 file->close_wq = alloc_ordered_workqueue("ucma_close_id", 1670 WQ_MEM_RECLAIM); 1671 if (!file->close_wq) { 1672 kfree(file); 1673 return -ENOMEM; 1674 } 1675 1676 INIT_LIST_HEAD(&file->event_list); 1677 INIT_LIST_HEAD(&file->ctx_list); 1678 init_waitqueue_head(&file->poll_wait); 1679 mutex_init(&file->mut); 1680 1681 filp->private_data = file; 1682 file->filp = filp; 1683 1684 return nonseekable_open(inode, filp); 1685 } 1686 1687 static int ucma_close(struct inode *inode, struct file *filp) 1688 { 1689 struct ucma_file *file = filp->private_data; 1690 struct ucma_context *ctx, *tmp; 1691 1692 mutex_lock(&file->mut); 1693 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { 1694 ctx->destroying = 1; 1695 mutex_unlock(&file->mut); 1696 1697 mutex_lock(&mut); 1698 idr_remove(&ctx_idr, ctx->id); 1699 mutex_unlock(&mut); 1700 1701 flush_workqueue(file->close_wq); 1702 /* At that step once ctx was marked as destroying and workqueue 1703 * was flushed we are safe from any inflights handlers that 1704 * might put other closing task. 1705 */ 1706 mutex_lock(&mut); 1707 if (!ctx->closing) { 1708 mutex_unlock(&mut); 1709 ucma_put_ctx(ctx); 1710 wait_for_completion(&ctx->comp); 1711 /* rdma_destroy_id ensures that no event handlers are 1712 * inflight for that id before releasing it. 1713 */ 1714 rdma_destroy_id(ctx->cm_id); 1715 } else { 1716 mutex_unlock(&mut); 1717 } 1718 1719 ucma_free_ctx(ctx); 1720 mutex_lock(&file->mut); 1721 } 1722 mutex_unlock(&file->mut); 1723 destroy_workqueue(file->close_wq); 1724 kfree(file); 1725 return 0; 1726 } 1727 1728 static long 1729 ucma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1730 { 1731 1732 switch (cmd) { 1733 case FIONBIO: 1734 case FIOASYNC: 1735 return (0); 1736 default: 1737 return (-ENOTTY); 1738 } 1739 } 1740 1741 static const struct file_operations ucma_fops = { 1742 .owner = THIS_MODULE, 1743 .open = ucma_open, 1744 .release = ucma_close, 1745 .write = ucma_write, 1746 .unlocked_ioctl = ucma_ioctl, 1747 .poll = ucma_poll, 1748 .llseek = no_llseek, 1749 }; 1750 1751 static struct miscdevice ucma_misc = { 1752 .minor = MISC_DYNAMIC_MINOR, 1753 .name = "rdma_cm", 1754 .nodename = "infiniband/rdma_cm", 1755 .mode = 0666, 1756 .fops = &ucma_fops, 1757 }; 1758 1759 static ssize_t show_abi_version(struct device *dev, 1760 struct device_attribute *attr, 1761 char *buf) 1762 { 1763 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); 1764 } 1765 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); 1766 1767 static int __init ucma_init(void) 1768 { 1769 int ret; 1770 1771 ret = misc_register(&ucma_misc); 1772 if (ret) 1773 return ret; 1774 1775 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); 1776 if (ret) { 1777 pr_err("rdma_ucm: couldn't create abi_version attr\n"); 1778 goto err1; 1779 } 1780 1781 return 0; 1782 err1: 1783 misc_deregister(&ucma_misc); 1784 return ret; 1785 } 1786 1787 static void __exit ucma_cleanup(void) 1788 { 1789 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1790 misc_deregister(&ucma_misc); 1791 idr_destroy(&ctx_idr); 1792 idr_destroy(&multicast_idr); 1793 } 1794 1795 module_init_order(ucma_init, SI_ORDER_FIFTH); 1796 module_exit_order(ucma_cleanup, SI_ORDER_FIFTH); 1797