1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2005 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 6 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 7 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #define LINUXKPI_PARAM_PREFIX ibcore_ 42 43 #include <linux/file.h> 44 #include <linux/fs.h> 45 #include <linux/slab.h> 46 #include <linux/sched.h> 47 #include <linux/rbtree.h> 48 49 #include <asm/uaccess.h> 50 51 #include "uverbs.h" 52 #include "core_priv.h" 53 54 #include <sys/priv.h> 55 56 struct uverbs_lock_class { 57 char name[16]; 58 }; 59 60 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 61 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 62 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 63 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 64 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 65 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 66 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 67 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 68 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 69 static struct uverbs_lock_class wq_lock_class = { .name = "WQ-uobj" }; 70 static struct uverbs_lock_class rwq_ind_table_lock_class = { .name = "IND_TBL-uobj" }; 71 72 /* 73 * The ib_uobject locking scheme is as follows: 74 * 75 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 76 * needs to be held during all idr write operations. When an object is 77 * looked up, a reference must be taken on the object's kref before 78 * dropping this lock. For read operations, the rcu_read_lock() 79 * and rcu_write_lock() but similarly the kref reference is grabbed 80 * before the rcu_read_unlock(). 81 * 82 * - Each object also has an rwsem. This rwsem must be held for 83 * reading while an operation that uses the object is performed. 84 * For example, while registering an MR, the associated PD's 85 * uobject.mutex must be held for reading. The rwsem must be held 86 * for writing while initializing or destroying an object. 87 * 88 * - In addition, each object has a "live" flag. If this flag is not 89 * set, then lookups of the object will fail even if it is found in 90 * the idr. This handles a reader that blocks and does not acquire 91 * the rwsem until after the object is destroyed. The destroy 92 * operation will set the live flag to 0 and then drop the rwsem; 93 * this will allow the reader to acquire the rwsem, see that the 94 * live flag is 0, and then drop the rwsem and its reference to 95 * object. The underlying storage will not be freed until the last 96 * reference to the object is dropped. 97 */ 98 99 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 100 struct ib_ucontext *context, struct uverbs_lock_class *c) 101 { 102 uobj->user_handle = user_handle; 103 uobj->context = context; 104 kref_init(&uobj->ref); 105 init_rwsem(&uobj->mutex); 106 uobj->live = 0; 107 } 108 109 static void release_uobj(struct kref *kref) 110 { 111 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu); 112 } 113 114 static void put_uobj(struct ib_uobject *uobj) 115 { 116 kref_put(&uobj->ref, release_uobj); 117 } 118 119 static void put_uobj_read(struct ib_uobject *uobj) 120 { 121 up_read(&uobj->mutex); 122 put_uobj(uobj); 123 } 124 125 static void put_uobj_write(struct ib_uobject *uobj) 126 { 127 up_write(&uobj->mutex); 128 put_uobj(uobj); 129 } 130 131 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 132 { 133 int ret; 134 135 idr_preload(GFP_KERNEL); 136 spin_lock(&ib_uverbs_idr_lock); 137 138 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT); 139 if (ret >= 0) 140 uobj->id = ret; 141 142 spin_unlock(&ib_uverbs_idr_lock); 143 idr_preload_end(); 144 145 return ret < 0 ? ret : 0; 146 } 147 148 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 149 { 150 spin_lock(&ib_uverbs_idr_lock); 151 idr_remove(idr, uobj->id); 152 spin_unlock(&ib_uverbs_idr_lock); 153 } 154 155 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 156 struct ib_ucontext *context) 157 { 158 struct ib_uobject *uobj; 159 160 rcu_read_lock(); 161 uobj = idr_find(idr, id); 162 if (uobj) { 163 if (uobj->context == context) 164 kref_get(&uobj->ref); 165 else 166 uobj = NULL; 167 } 168 rcu_read_unlock(); 169 170 return uobj; 171 } 172 173 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 174 struct ib_ucontext *context, int nested) 175 { 176 struct ib_uobject *uobj; 177 178 uobj = __idr_get_uobj(idr, id, context); 179 if (!uobj) 180 return NULL; 181 182 if (nested) 183 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 184 else 185 down_read(&uobj->mutex); 186 if (!uobj->live) { 187 put_uobj_read(uobj); 188 return NULL; 189 } 190 191 return uobj; 192 } 193 194 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 195 struct ib_ucontext *context) 196 { 197 struct ib_uobject *uobj; 198 199 uobj = __idr_get_uobj(idr, id, context); 200 if (!uobj) 201 return NULL; 202 203 down_write(&uobj->mutex); 204 if (!uobj->live) { 205 put_uobj_write(uobj); 206 return NULL; 207 } 208 209 return uobj; 210 } 211 212 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 213 int nested) 214 { 215 struct ib_uobject *uobj; 216 217 uobj = idr_read_uobj(idr, id, context, nested); 218 return uobj ? uobj->object : NULL; 219 } 220 221 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 222 { 223 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 224 } 225 226 static void put_pd_read(struct ib_pd *pd) 227 { 228 put_uobj_read(pd->uobject); 229 } 230 231 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 232 { 233 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 234 } 235 236 static void put_cq_read(struct ib_cq *cq) 237 { 238 put_uobj_read(cq->uobject); 239 } 240 241 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 242 { 243 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 244 } 245 246 static void put_ah_read(struct ib_ah *ah) 247 { 248 put_uobj_read(ah->uobject); 249 } 250 251 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 252 { 253 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 254 } 255 256 static struct ib_wq *idr_read_wq(int wq_handle, struct ib_ucontext *context) 257 { 258 return idr_read_obj(&ib_uverbs_wq_idr, wq_handle, context, 0); 259 } 260 261 static void put_wq_read(struct ib_wq *wq) 262 { 263 put_uobj_read(wq->uobject); 264 } 265 266 static struct ib_rwq_ind_table *idr_read_rwq_indirection_table(int ind_table_handle, 267 struct ib_ucontext *context) 268 { 269 return idr_read_obj(&ib_uverbs_rwq_ind_tbl_idr, ind_table_handle, context, 0); 270 } 271 272 static void put_rwq_indirection_table_read(struct ib_rwq_ind_table *ind_table) 273 { 274 put_uobj_read(ind_table->uobject); 275 } 276 277 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 278 { 279 struct ib_uobject *uobj; 280 281 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 282 return uobj ? uobj->object : NULL; 283 } 284 285 static void put_qp_read(struct ib_qp *qp) 286 { 287 put_uobj_read(qp->uobject); 288 } 289 290 static void put_qp_write(struct ib_qp *qp) 291 { 292 put_uobj_write(qp->uobject); 293 } 294 295 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 296 { 297 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 298 } 299 300 static void put_srq_read(struct ib_srq *srq) 301 { 302 put_uobj_read(srq->uobject); 303 } 304 305 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 306 struct ib_uobject **uobj) 307 { 308 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 309 return *uobj ? (*uobj)->object : NULL; 310 } 311 312 static void put_xrcd_read(struct ib_uobject *uobj) 313 { 314 put_uobj_read(uobj); 315 } 316 317 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 318 struct ib_device *ib_dev, 319 const char __user *buf, 320 int in_len, int out_len) 321 { 322 struct ib_uverbs_get_context cmd; 323 struct ib_uverbs_get_context_resp resp; 324 struct ib_udata udata; 325 struct ib_ucontext *ucontext; 326 struct file *filp; 327 int ret; 328 329 if (out_len < sizeof resp) 330 return -ENOSPC; 331 332 if (copy_from_user(&cmd, buf, sizeof cmd)) 333 return -EFAULT; 334 335 mutex_lock(&file->mutex); 336 337 if (file->ucontext) { 338 ret = -EINVAL; 339 goto err; 340 } 341 342 INIT_UDATA(&udata, buf + sizeof cmd, 343 (unsigned long) cmd.response + sizeof resp, 344 in_len - sizeof cmd, out_len - sizeof resp); 345 346 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 347 if (IS_ERR(ucontext)) { 348 ret = PTR_ERR(ucontext); 349 goto err; 350 } 351 352 ucontext->device = ib_dev; 353 INIT_LIST_HEAD(&ucontext->pd_list); 354 INIT_LIST_HEAD(&ucontext->mr_list); 355 INIT_LIST_HEAD(&ucontext->mw_list); 356 INIT_LIST_HEAD(&ucontext->cq_list); 357 INIT_LIST_HEAD(&ucontext->qp_list); 358 INIT_LIST_HEAD(&ucontext->srq_list); 359 INIT_LIST_HEAD(&ucontext->ah_list); 360 INIT_LIST_HEAD(&ucontext->wq_list); 361 INIT_LIST_HEAD(&ucontext->rwq_ind_tbl_list); 362 INIT_LIST_HEAD(&ucontext->xrcd_list); 363 INIT_LIST_HEAD(&ucontext->rule_list); 364 rcu_read_lock(); 365 ucontext->tgid = get_pid(task_pid_group_leader(current)); 366 rcu_read_unlock(); 367 ucontext->closing = 0; 368 369 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 370 ucontext->umem_tree = RB_ROOT; 371 init_rwsem(&ucontext->umem_rwsem); 372 ucontext->odp_mrs_count = 0; 373 INIT_LIST_HEAD(&ucontext->no_private_counters); 374 375 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 376 ucontext->invalidate_range = NULL; 377 378 #endif 379 380 resp.num_comp_vectors = file->device->num_comp_vectors; 381 382 ret = get_unused_fd_flags(O_CLOEXEC); 383 if (ret < 0) 384 goto err_free; 385 resp.async_fd = ret; 386 387 filp = ib_uverbs_alloc_event_file(file, ib_dev, 1); 388 if (IS_ERR(filp)) { 389 ret = PTR_ERR(filp); 390 goto err_fd; 391 } 392 393 if (copy_to_user((void __user *) (unsigned long) cmd.response, 394 &resp, sizeof resp)) { 395 ret = -EFAULT; 396 goto err_file; 397 } 398 399 file->ucontext = ucontext; 400 401 fd_install(resp.async_fd, filp); 402 403 mutex_unlock(&file->mutex); 404 405 return in_len; 406 407 err_file: 408 ib_uverbs_free_async_event_file(file); 409 fput(filp); 410 411 err_fd: 412 put_unused_fd(resp.async_fd); 413 414 err_free: 415 put_pid(ucontext->tgid); 416 ib_dev->dealloc_ucontext(ucontext); 417 418 err: 419 mutex_unlock(&file->mutex); 420 return ret; 421 } 422 423 static void copy_query_dev_fields(struct ib_uverbs_file *file, 424 struct ib_device *ib_dev, 425 struct ib_uverbs_query_device_resp *resp, 426 struct ib_device_attr *attr) 427 { 428 resp->fw_ver = attr->fw_ver; 429 resp->node_guid = ib_dev->node_guid; 430 resp->sys_image_guid = attr->sys_image_guid; 431 resp->max_mr_size = attr->max_mr_size; 432 resp->page_size_cap = attr->page_size_cap; 433 resp->vendor_id = attr->vendor_id; 434 resp->vendor_part_id = attr->vendor_part_id; 435 resp->hw_ver = attr->hw_ver; 436 resp->max_qp = attr->max_qp; 437 resp->max_qp_wr = attr->max_qp_wr; 438 resp->device_cap_flags = (u32)(attr->device_cap_flags); 439 resp->max_sge = attr->max_sge; 440 resp->max_sge_rd = attr->max_sge_rd; 441 resp->max_cq = attr->max_cq; 442 resp->max_cqe = attr->max_cqe; 443 resp->max_mr = attr->max_mr; 444 resp->max_pd = attr->max_pd; 445 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 446 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 447 resp->max_res_rd_atom = attr->max_res_rd_atom; 448 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 449 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 450 resp->atomic_cap = attr->atomic_cap; 451 resp->max_ee = attr->max_ee; 452 resp->max_rdd = attr->max_rdd; 453 resp->max_mw = attr->max_mw; 454 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 455 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 456 resp->max_mcast_grp = attr->max_mcast_grp; 457 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 458 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 459 resp->max_ah = attr->max_ah; 460 resp->max_fmr = attr->max_fmr; 461 resp->max_map_per_fmr = attr->max_map_per_fmr; 462 resp->max_srq = attr->max_srq; 463 resp->max_srq_wr = attr->max_srq_wr; 464 resp->max_srq_sge = attr->max_srq_sge; 465 resp->max_pkeys = attr->max_pkeys; 466 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 467 resp->phys_port_cnt = ib_dev->phys_port_cnt; 468 } 469 470 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 471 struct ib_device *ib_dev, 472 const char __user *buf, 473 int in_len, int out_len) 474 { 475 struct ib_uverbs_query_device cmd; 476 struct ib_uverbs_query_device_resp resp; 477 478 if (out_len < sizeof resp) 479 return -ENOSPC; 480 481 if (copy_from_user(&cmd, buf, sizeof cmd)) 482 return -EFAULT; 483 484 memset(&resp, 0, sizeof resp); 485 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 486 487 if (copy_to_user((void __user *) (unsigned long) cmd.response, 488 &resp, sizeof resp)) 489 return -EFAULT; 490 491 return in_len; 492 } 493 494 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 495 struct ib_device *ib_dev, 496 const char __user *buf, 497 int in_len, int out_len) 498 { 499 struct ib_uverbs_query_port cmd; 500 struct ib_uverbs_query_port_resp resp; 501 struct ib_port_attr attr; 502 int ret; 503 504 if (out_len < sizeof resp) 505 return -ENOSPC; 506 507 if (copy_from_user(&cmd, buf, sizeof cmd)) 508 return -EFAULT; 509 510 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 511 if (ret) 512 return ret; 513 514 memset(&resp, 0, sizeof resp); 515 516 resp.state = attr.state; 517 resp.max_mtu = attr.max_mtu; 518 resp.active_mtu = attr.active_mtu; 519 resp.gid_tbl_len = attr.gid_tbl_len; 520 resp.port_cap_flags = attr.port_cap_flags; 521 resp.max_msg_sz = attr.max_msg_sz; 522 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 523 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 524 resp.pkey_tbl_len = attr.pkey_tbl_len; 525 resp.lid = attr.lid; 526 resp.sm_lid = attr.sm_lid; 527 resp.lmc = attr.lmc; 528 resp.max_vl_num = attr.max_vl_num; 529 resp.sm_sl = attr.sm_sl; 530 resp.subnet_timeout = attr.subnet_timeout; 531 resp.init_type_reply = attr.init_type_reply; 532 resp.active_width = attr.active_width; 533 resp.active_speed = attr.active_speed; 534 resp.phys_state = attr.phys_state; 535 resp.link_layer = rdma_port_get_link_layer(ib_dev, 536 cmd.port_num); 537 538 if (copy_to_user((void __user *) (unsigned long) cmd.response, 539 &resp, sizeof resp)) 540 return -EFAULT; 541 542 return in_len; 543 } 544 545 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 546 struct ib_device *ib_dev, 547 const char __user *buf, 548 int in_len, int out_len) 549 { 550 struct ib_uverbs_alloc_pd cmd; 551 struct ib_uverbs_alloc_pd_resp resp; 552 struct ib_udata udata; 553 struct ib_uobject *uobj; 554 struct ib_pd *pd; 555 int ret; 556 557 if (out_len < sizeof resp) 558 return -ENOSPC; 559 560 if (copy_from_user(&cmd, buf, sizeof cmd)) 561 return -EFAULT; 562 563 INIT_UDATA(&udata, buf + sizeof cmd, 564 (unsigned long) cmd.response + sizeof resp, 565 in_len - sizeof cmd, out_len - sizeof resp); 566 567 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 568 if (!uobj) 569 return -ENOMEM; 570 571 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 572 down_write(&uobj->mutex); 573 574 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 575 if (IS_ERR(pd)) { 576 ret = PTR_ERR(pd); 577 goto err; 578 } 579 580 pd->device = ib_dev; 581 pd->uobject = uobj; 582 pd->__internal_mr = NULL; 583 atomic_set(&pd->usecnt, 0); 584 585 uobj->object = pd; 586 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 587 if (ret) 588 goto err_idr; 589 590 memset(&resp, 0, sizeof resp); 591 resp.pd_handle = uobj->id; 592 593 if (copy_to_user((void __user *) (unsigned long) cmd.response, 594 &resp, sizeof resp)) { 595 ret = -EFAULT; 596 goto err_copy; 597 } 598 599 mutex_lock(&file->mutex); 600 list_add_tail(&uobj->list, &file->ucontext->pd_list); 601 mutex_unlock(&file->mutex); 602 603 uobj->live = 1; 604 605 up_write(&uobj->mutex); 606 607 return in_len; 608 609 err_copy: 610 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 611 612 err_idr: 613 ib_dealloc_pd(pd); 614 615 err: 616 put_uobj_write(uobj); 617 return ret; 618 } 619 620 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 621 struct ib_device *ib_dev, 622 const char __user *buf, 623 int in_len, int out_len) 624 { 625 struct ib_uverbs_dealloc_pd cmd; 626 struct ib_uobject *uobj; 627 struct ib_pd *pd; 628 int ret; 629 630 if (copy_from_user(&cmd, buf, sizeof cmd)) 631 return -EFAULT; 632 633 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 634 if (!uobj) 635 return -EINVAL; 636 pd = uobj->object; 637 638 if (atomic_read(&pd->usecnt)) { 639 ret = -EBUSY; 640 goto err_put; 641 } 642 643 ret = pd->device->dealloc_pd(uobj->object); 644 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 645 if (ret) 646 goto err_put; 647 648 uobj->live = 0; 649 put_uobj_write(uobj); 650 651 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 652 653 mutex_lock(&file->mutex); 654 list_del(&uobj->list); 655 mutex_unlock(&file->mutex); 656 657 put_uobj(uobj); 658 659 return in_len; 660 661 err_put: 662 put_uobj_write(uobj); 663 return ret; 664 } 665 666 struct xrcd_table_entry { 667 struct rb_node node; 668 struct ib_xrcd *xrcd; 669 struct inode *inode; 670 }; 671 672 static int xrcd_table_insert(struct ib_uverbs_device *dev, 673 struct inode *inode, 674 struct ib_xrcd *xrcd) 675 { 676 struct xrcd_table_entry *entry, *scan; 677 struct rb_node **p = &dev->xrcd_tree.rb_node; 678 struct rb_node *parent = NULL; 679 680 entry = kmalloc(sizeof *entry, GFP_KERNEL); 681 if (!entry) 682 return -ENOMEM; 683 684 entry->xrcd = xrcd; 685 entry->inode = inode; 686 687 while (*p) { 688 parent = *p; 689 scan = rb_entry(parent, struct xrcd_table_entry, node); 690 691 if (inode < scan->inode) { 692 p = &(*p)->rb_left; 693 } else if (inode > scan->inode) { 694 p = &(*p)->rb_right; 695 } else { 696 kfree(entry); 697 return -EEXIST; 698 } 699 } 700 701 rb_link_node(&entry->node, parent, p); 702 rb_insert_color(&entry->node, &dev->xrcd_tree); 703 igrab(inode); 704 return 0; 705 } 706 707 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 708 struct inode *inode) 709 { 710 struct xrcd_table_entry *entry; 711 struct rb_node *p = dev->xrcd_tree.rb_node; 712 713 while (p) { 714 entry = rb_entry(p, struct xrcd_table_entry, node); 715 716 if (inode < entry->inode) 717 p = p->rb_left; 718 else if (inode > entry->inode) 719 p = p->rb_right; 720 else 721 return entry; 722 } 723 724 return NULL; 725 } 726 727 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 728 { 729 struct xrcd_table_entry *entry; 730 731 entry = xrcd_table_search(dev, inode); 732 if (!entry) 733 return NULL; 734 735 return entry->xrcd; 736 } 737 738 static void xrcd_table_delete(struct ib_uverbs_device *dev, 739 struct inode *inode) 740 { 741 struct xrcd_table_entry *entry; 742 743 entry = xrcd_table_search(dev, inode); 744 if (entry) { 745 iput(inode); 746 rb_erase(&entry->node, &dev->xrcd_tree); 747 kfree(entry); 748 } 749 } 750 751 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 752 struct ib_device *ib_dev, 753 const char __user *buf, int in_len, 754 int out_len) 755 { 756 struct ib_uverbs_open_xrcd cmd; 757 struct ib_uverbs_open_xrcd_resp resp; 758 struct ib_udata udata; 759 struct ib_uxrcd_object *obj; 760 struct ib_xrcd *xrcd = NULL; 761 struct fd f = {NULL}; 762 struct inode *inode = NULL; 763 int ret = 0; 764 int new_xrcd = 0; 765 766 if (out_len < sizeof resp) 767 return -ENOSPC; 768 769 if (copy_from_user(&cmd, buf, sizeof cmd)) 770 return -EFAULT; 771 772 INIT_UDATA(&udata, buf + sizeof cmd, 773 (unsigned long) cmd.response + sizeof resp, 774 in_len - sizeof cmd, out_len - sizeof resp); 775 776 mutex_lock(&file->device->xrcd_tree_mutex); 777 778 if (cmd.fd != -1) { 779 /* search for file descriptor */ 780 f = fdget(cmd.fd); 781 if (!f.file) { 782 ret = -EBADF; 783 goto err_tree_mutex_unlock; 784 } 785 786 inode = f.file->f_dentry->d_inode; 787 xrcd = find_xrcd(file->device, inode); 788 if (!xrcd && !(cmd.oflags & O_CREAT)) { 789 /* no file descriptor. Need CREATE flag */ 790 ret = -EAGAIN; 791 goto err_tree_mutex_unlock; 792 } 793 794 if (xrcd && cmd.oflags & O_EXCL) { 795 ret = -EINVAL; 796 goto err_tree_mutex_unlock; 797 } 798 } 799 800 obj = kmalloc(sizeof *obj, GFP_KERNEL); 801 if (!obj) { 802 ret = -ENOMEM; 803 goto err_tree_mutex_unlock; 804 } 805 806 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 807 808 down_write(&obj->uobject.mutex); 809 810 if (!xrcd) { 811 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 812 if (IS_ERR(xrcd)) { 813 ret = PTR_ERR(xrcd); 814 goto err; 815 } 816 817 xrcd->inode = inode; 818 xrcd->device = ib_dev; 819 atomic_set(&xrcd->usecnt, 0); 820 mutex_init(&xrcd->tgt_qp_mutex); 821 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 822 new_xrcd = 1; 823 } 824 825 atomic_set(&obj->refcnt, 0); 826 obj->uobject.object = xrcd; 827 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 828 if (ret) 829 goto err_idr; 830 831 memset(&resp, 0, sizeof resp); 832 resp.xrcd_handle = obj->uobject.id; 833 834 if (inode) { 835 if (new_xrcd) { 836 /* create new inode/xrcd table entry */ 837 ret = xrcd_table_insert(file->device, inode, xrcd); 838 if (ret) 839 goto err_insert_xrcd; 840 } 841 atomic_inc(&xrcd->usecnt); 842 } 843 844 if (copy_to_user((void __user *) (unsigned long) cmd.response, 845 &resp, sizeof resp)) { 846 ret = -EFAULT; 847 goto err_copy; 848 } 849 850 if (f.file) 851 fdput(f); 852 853 mutex_lock(&file->mutex); 854 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 855 mutex_unlock(&file->mutex); 856 857 obj->uobject.live = 1; 858 up_write(&obj->uobject.mutex); 859 860 mutex_unlock(&file->device->xrcd_tree_mutex); 861 return in_len; 862 863 err_copy: 864 if (inode) { 865 if (new_xrcd) 866 xrcd_table_delete(file->device, inode); 867 atomic_dec(&xrcd->usecnt); 868 } 869 870 err_insert_xrcd: 871 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 872 873 err_idr: 874 ib_dealloc_xrcd(xrcd); 875 876 err: 877 put_uobj_write(&obj->uobject); 878 879 err_tree_mutex_unlock: 880 if (f.file) 881 fdput(f); 882 883 mutex_unlock(&file->device->xrcd_tree_mutex); 884 885 return ret; 886 } 887 888 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 889 struct ib_device *ib_dev, 890 const char __user *buf, int in_len, 891 int out_len) 892 { 893 struct ib_uverbs_close_xrcd cmd; 894 struct ib_uobject *uobj; 895 struct ib_xrcd *xrcd = NULL; 896 struct inode *inode = NULL; 897 struct ib_uxrcd_object *obj; 898 int live; 899 int ret = 0; 900 901 if (copy_from_user(&cmd, buf, sizeof cmd)) 902 return -EFAULT; 903 904 mutex_lock(&file->device->xrcd_tree_mutex); 905 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 906 if (!uobj) { 907 ret = -EINVAL; 908 goto out; 909 } 910 911 xrcd = uobj->object; 912 inode = xrcd->inode; 913 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 914 if (atomic_read(&obj->refcnt)) { 915 put_uobj_write(uobj); 916 ret = -EBUSY; 917 goto out; 918 } 919 920 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 921 ret = ib_dealloc_xrcd(uobj->object); 922 if (!ret) 923 uobj->live = 0; 924 } 925 926 live = uobj->live; 927 if (inode && ret) 928 atomic_inc(&xrcd->usecnt); 929 930 put_uobj_write(uobj); 931 932 if (ret) 933 goto out; 934 935 if (inode && !live) 936 xrcd_table_delete(file->device, inode); 937 938 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 939 mutex_lock(&file->mutex); 940 list_del(&uobj->list); 941 mutex_unlock(&file->mutex); 942 943 put_uobj(uobj); 944 ret = in_len; 945 946 out: 947 mutex_unlock(&file->device->xrcd_tree_mutex); 948 return ret; 949 } 950 951 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 952 struct ib_xrcd *xrcd) 953 { 954 struct inode *inode; 955 956 inode = xrcd->inode; 957 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 958 return; 959 960 ib_dealloc_xrcd(xrcd); 961 962 if (inode) 963 xrcd_table_delete(dev, inode); 964 } 965 966 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 967 struct ib_device *ib_dev, 968 const char __user *buf, int in_len, 969 int out_len) 970 { 971 struct ib_uverbs_reg_mr cmd; 972 struct ib_uverbs_reg_mr_resp resp; 973 struct ib_udata udata; 974 struct ib_uobject *uobj; 975 struct ib_pd *pd; 976 struct ib_mr *mr; 977 int ret; 978 979 if (out_len < sizeof resp) 980 return -ENOSPC; 981 982 if (copy_from_user(&cmd, buf, sizeof cmd)) 983 return -EFAULT; 984 985 INIT_UDATA(&udata, buf + sizeof cmd, 986 (unsigned long) cmd.response + sizeof resp, 987 in_len - sizeof cmd, out_len - sizeof resp); 988 989 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 990 return -EINVAL; 991 992 ret = ib_check_mr_access(cmd.access_flags); 993 if (ret) 994 return ret; 995 996 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 997 if (!uobj) 998 return -ENOMEM; 999 1000 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 1001 down_write(&uobj->mutex); 1002 1003 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1004 if (!pd) { 1005 ret = -EINVAL; 1006 goto err_free; 1007 } 1008 1009 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 1010 if (!(pd->device->attrs.device_cap_flags & 1011 IB_DEVICE_ON_DEMAND_PAGING)) { 1012 pr_debug("ODP support not available\n"); 1013 ret = -EINVAL; 1014 goto err_put; 1015 } 1016 } 1017 1018 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 1019 cmd.access_flags, &udata); 1020 if (IS_ERR(mr)) { 1021 ret = PTR_ERR(mr); 1022 goto err_put; 1023 } 1024 1025 mr->device = pd->device; 1026 mr->pd = pd; 1027 mr->uobject = uobj; 1028 atomic_inc(&pd->usecnt); 1029 1030 uobj->object = mr; 1031 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 1032 if (ret) 1033 goto err_unreg; 1034 1035 memset(&resp, 0, sizeof resp); 1036 resp.lkey = mr->lkey; 1037 resp.rkey = mr->rkey; 1038 resp.mr_handle = uobj->id; 1039 1040 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1041 &resp, sizeof resp)) { 1042 ret = -EFAULT; 1043 goto err_copy; 1044 } 1045 1046 put_pd_read(pd); 1047 1048 mutex_lock(&file->mutex); 1049 list_add_tail(&uobj->list, &file->ucontext->mr_list); 1050 mutex_unlock(&file->mutex); 1051 1052 uobj->live = 1; 1053 1054 up_write(&uobj->mutex); 1055 1056 return in_len; 1057 1058 err_copy: 1059 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1060 1061 err_unreg: 1062 ib_dereg_mr(mr); 1063 1064 err_put: 1065 put_pd_read(pd); 1066 1067 err_free: 1068 put_uobj_write(uobj); 1069 return ret; 1070 } 1071 1072 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 1073 struct ib_device *ib_dev, 1074 const char __user *buf, int in_len, 1075 int out_len) 1076 { 1077 struct ib_uverbs_rereg_mr cmd; 1078 struct ib_uverbs_rereg_mr_resp resp; 1079 struct ib_udata udata; 1080 struct ib_pd *pd = NULL; 1081 struct ib_mr *mr; 1082 struct ib_pd *old_pd; 1083 int ret; 1084 struct ib_uobject *uobj; 1085 1086 if (out_len < sizeof(resp)) 1087 return -ENOSPC; 1088 1089 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1090 return -EFAULT; 1091 1092 INIT_UDATA(&udata, buf + sizeof(cmd), 1093 (unsigned long) cmd.response + sizeof(resp), 1094 in_len - sizeof(cmd), out_len - sizeof(resp)); 1095 1096 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 1097 return -EINVAL; 1098 1099 if ((cmd.flags & IB_MR_REREG_TRANS) && 1100 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 1101 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 1102 return -EINVAL; 1103 1104 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, 1105 file->ucontext); 1106 1107 if (!uobj) 1108 return -EINVAL; 1109 1110 mr = uobj->object; 1111 1112 if (cmd.flags & IB_MR_REREG_ACCESS) { 1113 ret = ib_check_mr_access(cmd.access_flags); 1114 if (ret) 1115 goto put_uobjs; 1116 } 1117 1118 if (cmd.flags & IB_MR_REREG_PD) { 1119 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1120 if (!pd) { 1121 ret = -EINVAL; 1122 goto put_uobjs; 1123 } 1124 } 1125 1126 old_pd = mr->pd; 1127 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 1128 cmd.length, cmd.hca_va, 1129 cmd.access_flags, pd, &udata); 1130 if (!ret) { 1131 if (cmd.flags & IB_MR_REREG_PD) { 1132 atomic_inc(&pd->usecnt); 1133 mr->pd = pd; 1134 atomic_dec(&old_pd->usecnt); 1135 } 1136 } else { 1137 goto put_uobj_pd; 1138 } 1139 1140 memset(&resp, 0, sizeof(resp)); 1141 resp.lkey = mr->lkey; 1142 resp.rkey = mr->rkey; 1143 1144 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1145 &resp, sizeof(resp))) 1146 ret = -EFAULT; 1147 else 1148 ret = in_len; 1149 1150 put_uobj_pd: 1151 if (cmd.flags & IB_MR_REREG_PD) 1152 put_pd_read(pd); 1153 1154 put_uobjs: 1155 1156 put_uobj_write(mr->uobject); 1157 1158 return ret; 1159 } 1160 1161 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1162 struct ib_device *ib_dev, 1163 const char __user *buf, int in_len, 1164 int out_len) 1165 { 1166 struct ib_uverbs_dereg_mr cmd; 1167 struct ib_mr *mr; 1168 struct ib_uobject *uobj; 1169 int ret = -EINVAL; 1170 1171 if (copy_from_user(&cmd, buf, sizeof cmd)) 1172 return -EFAULT; 1173 1174 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1175 if (!uobj) 1176 return -EINVAL; 1177 1178 mr = uobj->object; 1179 1180 ret = ib_dereg_mr(mr); 1181 if (!ret) 1182 uobj->live = 0; 1183 1184 put_uobj_write(uobj); 1185 1186 if (ret) 1187 return ret; 1188 1189 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1190 1191 mutex_lock(&file->mutex); 1192 list_del(&uobj->list); 1193 mutex_unlock(&file->mutex); 1194 1195 put_uobj(uobj); 1196 1197 return in_len; 1198 } 1199 1200 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1201 struct ib_device *ib_dev, 1202 const char __user *buf, int in_len, 1203 int out_len) 1204 { 1205 struct ib_uverbs_alloc_mw cmd; 1206 struct ib_uverbs_alloc_mw_resp resp; 1207 struct ib_uobject *uobj; 1208 struct ib_pd *pd; 1209 struct ib_mw *mw; 1210 struct ib_udata udata; 1211 int ret; 1212 1213 if (out_len < sizeof(resp)) 1214 return -ENOSPC; 1215 1216 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1217 return -EFAULT; 1218 1219 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1220 if (!uobj) 1221 return -ENOMEM; 1222 1223 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1224 down_write(&uobj->mutex); 1225 1226 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1227 if (!pd) { 1228 ret = -EINVAL; 1229 goto err_free; 1230 } 1231 1232 INIT_UDATA(&udata, buf + sizeof(cmd), 1233 (unsigned long)cmd.response + sizeof(resp), 1234 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1235 out_len - sizeof(resp)); 1236 1237 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 1238 if (IS_ERR(mw)) { 1239 ret = PTR_ERR(mw); 1240 goto err_put; 1241 } 1242 1243 mw->device = pd->device; 1244 mw->pd = pd; 1245 mw->uobject = uobj; 1246 atomic_inc(&pd->usecnt); 1247 1248 uobj->object = mw; 1249 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1250 if (ret) 1251 goto err_unalloc; 1252 1253 memset(&resp, 0, sizeof(resp)); 1254 resp.rkey = mw->rkey; 1255 resp.mw_handle = uobj->id; 1256 1257 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1258 &resp, sizeof(resp))) { 1259 ret = -EFAULT; 1260 goto err_copy; 1261 } 1262 1263 put_pd_read(pd); 1264 1265 mutex_lock(&file->mutex); 1266 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1267 mutex_unlock(&file->mutex); 1268 1269 uobj->live = 1; 1270 1271 up_write(&uobj->mutex); 1272 1273 return in_len; 1274 1275 err_copy: 1276 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1277 1278 err_unalloc: 1279 uverbs_dealloc_mw(mw); 1280 1281 err_put: 1282 put_pd_read(pd); 1283 1284 err_free: 1285 put_uobj_write(uobj); 1286 return ret; 1287 } 1288 1289 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1290 struct ib_device *ib_dev, 1291 const char __user *buf, int in_len, 1292 int out_len) 1293 { 1294 struct ib_uverbs_dealloc_mw cmd; 1295 struct ib_mw *mw; 1296 struct ib_uobject *uobj; 1297 int ret = -EINVAL; 1298 1299 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1300 return -EFAULT; 1301 1302 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1303 if (!uobj) 1304 return -EINVAL; 1305 1306 mw = uobj->object; 1307 1308 ret = uverbs_dealloc_mw(mw); 1309 if (!ret) 1310 uobj->live = 0; 1311 1312 put_uobj_write(uobj); 1313 1314 if (ret) 1315 return ret; 1316 1317 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1318 1319 mutex_lock(&file->mutex); 1320 list_del(&uobj->list); 1321 mutex_unlock(&file->mutex); 1322 1323 put_uobj(uobj); 1324 1325 return in_len; 1326 } 1327 1328 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1329 struct ib_device *ib_dev, 1330 const char __user *buf, int in_len, 1331 int out_len) 1332 { 1333 struct ib_uverbs_create_comp_channel cmd; 1334 struct ib_uverbs_create_comp_channel_resp resp; 1335 struct file *filp; 1336 int ret; 1337 1338 if (out_len < sizeof resp) 1339 return -ENOSPC; 1340 1341 if (copy_from_user(&cmd, buf, sizeof cmd)) 1342 return -EFAULT; 1343 1344 ret = get_unused_fd_flags(O_CLOEXEC); 1345 if (ret < 0) 1346 return ret; 1347 resp.fd = ret; 1348 1349 filp = ib_uverbs_alloc_event_file(file, ib_dev, 0); 1350 if (IS_ERR(filp)) { 1351 put_unused_fd(resp.fd); 1352 return PTR_ERR(filp); 1353 } 1354 1355 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1356 &resp, sizeof resp)) { 1357 put_unused_fd(resp.fd); 1358 fput(filp); 1359 return -EFAULT; 1360 } 1361 1362 fd_install(resp.fd, filp); 1363 return in_len; 1364 } 1365 1366 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 1367 struct ib_device *ib_dev, 1368 struct ib_udata *ucore, 1369 struct ib_udata *uhw, 1370 struct ib_uverbs_ex_create_cq *cmd, 1371 size_t cmd_sz, 1372 int (*cb)(struct ib_uverbs_file *file, 1373 struct ib_ucq_object *obj, 1374 struct ib_uverbs_ex_create_cq_resp *resp, 1375 struct ib_udata *udata, 1376 void *context), 1377 void *context) 1378 { 1379 struct ib_ucq_object *obj; 1380 struct ib_uverbs_event_file *ev_file = NULL; 1381 struct ib_cq *cq; 1382 int ret; 1383 struct ib_uverbs_ex_create_cq_resp resp; 1384 struct ib_cq_init_attr attr = {}; 1385 1386 if (cmd->comp_vector >= file->device->num_comp_vectors) 1387 return ERR_PTR(-EINVAL); 1388 1389 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1390 if (!obj) 1391 return ERR_PTR(-ENOMEM); 1392 1393 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class); 1394 down_write(&obj->uobject.mutex); 1395 1396 if (cmd->comp_channel >= 0) { 1397 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel); 1398 if (!ev_file) { 1399 ret = -EINVAL; 1400 goto err; 1401 } 1402 } 1403 1404 obj->uverbs_file = file; 1405 obj->comp_events_reported = 0; 1406 obj->async_events_reported = 0; 1407 INIT_LIST_HEAD(&obj->comp_list); 1408 INIT_LIST_HEAD(&obj->async_list); 1409 1410 attr.cqe = cmd->cqe; 1411 attr.comp_vector = cmd->comp_vector; 1412 1413 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1414 attr.flags = cmd->flags; 1415 1416 cq = ib_dev->create_cq(ib_dev, &attr, 1417 file->ucontext, uhw); 1418 if (IS_ERR(cq)) { 1419 ret = PTR_ERR(cq); 1420 goto err_file; 1421 } 1422 1423 cq->device = ib_dev; 1424 cq->uobject = &obj->uobject; 1425 cq->comp_handler = ib_uverbs_comp_handler; 1426 cq->event_handler = ib_uverbs_cq_event_handler; 1427 cq->cq_context = ev_file; 1428 atomic_set(&cq->usecnt, 0); 1429 1430 obj->uobject.object = cq; 1431 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1432 if (ret) 1433 goto err_free; 1434 1435 memset(&resp, 0, sizeof resp); 1436 resp.base.cq_handle = obj->uobject.id; 1437 resp.base.cqe = cq->cqe; 1438 1439 resp.response_length = offsetof(typeof(resp), response_length) + 1440 sizeof(resp.response_length); 1441 1442 ret = cb(file, obj, &resp, ucore, context); 1443 if (ret) 1444 goto err_cb; 1445 1446 mutex_lock(&file->mutex); 1447 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1448 mutex_unlock(&file->mutex); 1449 1450 obj->uobject.live = 1; 1451 1452 up_write(&obj->uobject.mutex); 1453 1454 return obj; 1455 1456 err_cb: 1457 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1458 1459 err_free: 1460 ib_destroy_cq(cq); 1461 1462 err_file: 1463 if (ev_file) 1464 ib_uverbs_release_ucq(file, ev_file, obj); 1465 1466 err: 1467 put_uobj_write(&obj->uobject); 1468 1469 return ERR_PTR(ret); 1470 } 1471 1472 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1473 struct ib_ucq_object *obj, 1474 struct ib_uverbs_ex_create_cq_resp *resp, 1475 struct ib_udata *ucore, void *context) 1476 { 1477 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1478 return -EFAULT; 1479 1480 return 0; 1481 } 1482 1483 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1484 struct ib_device *ib_dev, 1485 const char __user *buf, int in_len, 1486 int out_len) 1487 { 1488 struct ib_uverbs_create_cq cmd; 1489 struct ib_uverbs_ex_create_cq cmd_ex; 1490 struct ib_uverbs_create_cq_resp resp; 1491 struct ib_udata ucore; 1492 struct ib_udata uhw; 1493 struct ib_ucq_object *obj; 1494 1495 if (out_len < sizeof(resp)) 1496 return -ENOSPC; 1497 1498 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1499 return -EFAULT; 1500 1501 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); 1502 1503 INIT_UDATA(&uhw, buf + sizeof(cmd), 1504 (unsigned long)cmd.response + sizeof(resp), 1505 in_len - sizeof(cmd), out_len - sizeof(resp)); 1506 1507 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1508 cmd_ex.user_handle = cmd.user_handle; 1509 cmd_ex.cqe = cmd.cqe; 1510 cmd_ex.comp_vector = cmd.comp_vector; 1511 cmd_ex.comp_channel = cmd.comp_channel; 1512 1513 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1514 offsetof(typeof(cmd_ex), comp_channel) + 1515 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1516 NULL); 1517 1518 if (IS_ERR(obj)) 1519 return PTR_ERR(obj); 1520 1521 return in_len; 1522 } 1523 1524 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1525 struct ib_ucq_object *obj, 1526 struct ib_uverbs_ex_create_cq_resp *resp, 1527 struct ib_udata *ucore, void *context) 1528 { 1529 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1530 return -EFAULT; 1531 1532 return 0; 1533 } 1534 1535 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1536 struct ib_device *ib_dev, 1537 struct ib_udata *ucore, 1538 struct ib_udata *uhw) 1539 { 1540 struct ib_uverbs_ex_create_cq_resp resp; 1541 struct ib_uverbs_ex_create_cq cmd; 1542 struct ib_ucq_object *obj; 1543 int err; 1544 1545 if (ucore->inlen < sizeof(cmd)) 1546 return -EINVAL; 1547 1548 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1549 if (err) 1550 return err; 1551 1552 if (cmd.comp_mask) 1553 return -EINVAL; 1554 1555 if (cmd.reserved) 1556 return -EINVAL; 1557 1558 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1559 sizeof(resp.response_length))) 1560 return -ENOSPC; 1561 1562 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1563 min(ucore->inlen, sizeof(cmd)), 1564 ib_uverbs_ex_create_cq_cb, NULL); 1565 1566 if (IS_ERR(obj)) 1567 return PTR_ERR(obj); 1568 1569 return 0; 1570 } 1571 1572 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1573 struct ib_device *ib_dev, 1574 const char __user *buf, int in_len, 1575 int out_len) 1576 { 1577 struct ib_uverbs_resize_cq cmd; 1578 struct ib_uverbs_resize_cq_resp resp; 1579 struct ib_udata udata; 1580 struct ib_cq *cq; 1581 int ret = -EINVAL; 1582 1583 if (copy_from_user(&cmd, buf, sizeof cmd)) 1584 return -EFAULT; 1585 1586 INIT_UDATA(&udata, buf + sizeof cmd, 1587 (unsigned long) cmd.response + sizeof resp, 1588 in_len - sizeof cmd, out_len - sizeof resp); 1589 1590 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1591 if (!cq) 1592 return -EINVAL; 1593 1594 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1595 if (ret) 1596 goto out; 1597 1598 resp.cqe = cq->cqe; 1599 1600 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1601 &resp, sizeof resp.cqe)) 1602 ret = -EFAULT; 1603 1604 out: 1605 put_cq_read(cq); 1606 1607 return ret ? ret : in_len; 1608 } 1609 1610 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1611 { 1612 struct ib_uverbs_wc tmp; 1613 1614 tmp.wr_id = wc->wr_id; 1615 tmp.status = wc->status; 1616 tmp.opcode = wc->opcode; 1617 tmp.vendor_err = wc->vendor_err; 1618 tmp.byte_len = wc->byte_len; 1619 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1620 tmp.qp_num = wc->qp->qp_num; 1621 tmp.src_qp = wc->src_qp; 1622 tmp.wc_flags = wc->wc_flags; 1623 tmp.pkey_index = wc->pkey_index; 1624 tmp.slid = wc->slid; 1625 tmp.sl = wc->sl; 1626 tmp.dlid_path_bits = wc->dlid_path_bits; 1627 tmp.port_num = wc->port_num; 1628 tmp.reserved = 0; 1629 1630 if (copy_to_user(dest, &tmp, sizeof tmp)) 1631 return -EFAULT; 1632 1633 return 0; 1634 } 1635 1636 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1637 struct ib_device *ib_dev, 1638 const char __user *buf, int in_len, 1639 int out_len) 1640 { 1641 struct ib_uverbs_poll_cq cmd; 1642 struct ib_uverbs_poll_cq_resp resp; 1643 u8 __user *header_ptr; 1644 u8 __user *data_ptr; 1645 struct ib_cq *cq; 1646 struct ib_wc wc; 1647 int ret; 1648 1649 if (copy_from_user(&cmd, buf, sizeof cmd)) 1650 return -EFAULT; 1651 1652 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1653 if (!cq) 1654 return -EINVAL; 1655 1656 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1657 header_ptr = (void __user *)(unsigned long) cmd.response; 1658 data_ptr = header_ptr + sizeof resp; 1659 1660 memset(&resp, 0, sizeof resp); 1661 while (resp.count < cmd.ne) { 1662 ret = ib_poll_cq(cq, 1, &wc); 1663 if (ret < 0) 1664 goto out_put; 1665 if (!ret) 1666 break; 1667 1668 ret = copy_wc_to_user(data_ptr, &wc); 1669 if (ret) 1670 goto out_put; 1671 1672 data_ptr += sizeof(struct ib_uverbs_wc); 1673 ++resp.count; 1674 } 1675 1676 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1677 ret = -EFAULT; 1678 goto out_put; 1679 } 1680 1681 ret = in_len; 1682 1683 out_put: 1684 put_cq_read(cq); 1685 return ret; 1686 } 1687 1688 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1689 struct ib_device *ib_dev, 1690 const char __user *buf, int in_len, 1691 int out_len) 1692 { 1693 struct ib_uverbs_req_notify_cq cmd; 1694 struct ib_cq *cq; 1695 1696 if (copy_from_user(&cmd, buf, sizeof cmd)) 1697 return -EFAULT; 1698 1699 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1700 if (!cq) 1701 return -EINVAL; 1702 1703 ib_req_notify_cq(cq, cmd.solicited_only ? 1704 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1705 1706 put_cq_read(cq); 1707 1708 return in_len; 1709 } 1710 1711 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1712 struct ib_device *ib_dev, 1713 const char __user *buf, int in_len, 1714 int out_len) 1715 { 1716 struct ib_uverbs_destroy_cq cmd; 1717 struct ib_uverbs_destroy_cq_resp resp; 1718 struct ib_uobject *uobj; 1719 struct ib_cq *cq; 1720 struct ib_ucq_object *obj; 1721 struct ib_uverbs_event_file *ev_file; 1722 int ret = -EINVAL; 1723 1724 if (copy_from_user(&cmd, buf, sizeof cmd)) 1725 return -EFAULT; 1726 1727 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1728 if (!uobj) 1729 return -EINVAL; 1730 cq = uobj->object; 1731 ev_file = cq->cq_context; 1732 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1733 1734 ret = ib_destroy_cq(cq); 1735 if (!ret) 1736 uobj->live = 0; 1737 1738 put_uobj_write(uobj); 1739 1740 if (ret) 1741 return ret; 1742 1743 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1744 1745 mutex_lock(&file->mutex); 1746 list_del(&uobj->list); 1747 mutex_unlock(&file->mutex); 1748 1749 ib_uverbs_release_ucq(file, ev_file, obj); 1750 1751 memset(&resp, 0, sizeof resp); 1752 resp.comp_events_reported = obj->comp_events_reported; 1753 resp.async_events_reported = obj->async_events_reported; 1754 1755 put_uobj(uobj); 1756 1757 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1758 &resp, sizeof resp)) 1759 return -EFAULT; 1760 1761 return in_len; 1762 } 1763 1764 static int create_qp(struct ib_uverbs_file *file, 1765 struct ib_udata *ucore, 1766 struct ib_udata *uhw, 1767 struct ib_uverbs_ex_create_qp *cmd, 1768 size_t cmd_sz, 1769 int (*cb)(struct ib_uverbs_file *file, 1770 struct ib_uverbs_ex_create_qp_resp *resp, 1771 struct ib_udata *udata), 1772 void *context) 1773 { 1774 struct ib_uqp_object *obj; 1775 struct ib_device *device; 1776 struct ib_pd *pd = NULL; 1777 struct ib_xrcd *xrcd = NULL; 1778 struct ib_uobject *uninitialized_var(xrcd_uobj); 1779 struct ib_cq *scq = NULL, *rcq = NULL; 1780 struct ib_srq *srq = NULL; 1781 struct ib_qp *qp; 1782 char *buf; 1783 struct ib_qp_init_attr attr = {}; 1784 struct ib_uverbs_ex_create_qp_resp resp; 1785 int ret; 1786 struct ib_rwq_ind_table *ind_tbl = NULL; 1787 bool has_sq = true; 1788 1789 if (cmd->qp_type == IB_QPT_RAW_PACKET && priv_check(curthread, PRIV_NET_RAW) != 0) 1790 return -EPERM; 1791 1792 obj = kzalloc(sizeof *obj, GFP_KERNEL); 1793 if (!obj) 1794 return -ENOMEM; 1795 1796 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, 1797 &qp_lock_class); 1798 mutex_init(&obj->mcast_lock); 1799 down_write(&obj->uevent.uobject.mutex); 1800 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1801 sizeof(cmd->rwq_ind_tbl_handle) && 1802 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1803 ind_tbl = idr_read_rwq_indirection_table(cmd->rwq_ind_tbl_handle, 1804 file->ucontext); 1805 if (!ind_tbl) { 1806 ret = -EINVAL; 1807 goto err_put; 1808 } 1809 1810 attr.rwq_ind_tbl = ind_tbl; 1811 } 1812 1813 if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) + 1814 sizeof(cmd->reserved1)) && cmd->reserved1) { 1815 ret = -EOPNOTSUPP; 1816 goto err_put; 1817 } 1818 1819 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1820 ret = -EINVAL; 1821 goto err_put; 1822 } 1823 1824 if (ind_tbl && !cmd->max_send_wr) 1825 has_sq = false; 1826 1827 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1828 xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext, 1829 &xrcd_uobj); 1830 if (!xrcd) { 1831 ret = -EINVAL; 1832 goto err_put; 1833 } 1834 device = xrcd->device; 1835 } else { 1836 if (cmd->qp_type == IB_QPT_XRC_INI) { 1837 cmd->max_recv_wr = 0; 1838 cmd->max_recv_sge = 0; 1839 } else { 1840 if (cmd->is_srq) { 1841 srq = idr_read_srq(cmd->srq_handle, 1842 file->ucontext); 1843 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1844 ret = -EINVAL; 1845 goto err_put; 1846 } 1847 } 1848 1849 if (!ind_tbl) { 1850 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1851 rcq = idr_read_cq(cmd->recv_cq_handle, 1852 file->ucontext, 0); 1853 if (!rcq) { 1854 ret = -EINVAL; 1855 goto err_put; 1856 } 1857 } 1858 } 1859 } 1860 1861 if (has_sq) 1862 scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq); 1863 if (!ind_tbl) 1864 rcq = rcq ?: scq; 1865 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 1866 if (!pd || (!scq && has_sq)) { 1867 ret = -EINVAL; 1868 goto err_put; 1869 } 1870 1871 device = pd->device; 1872 } 1873 1874 attr.event_handler = ib_uverbs_qp_event_handler; 1875 attr.qp_context = file; 1876 attr.send_cq = scq; 1877 attr.recv_cq = rcq; 1878 attr.srq = srq; 1879 attr.xrcd = xrcd; 1880 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1881 IB_SIGNAL_REQ_WR; 1882 attr.qp_type = cmd->qp_type; 1883 attr.create_flags = 0; 1884 1885 attr.cap.max_send_wr = cmd->max_send_wr; 1886 attr.cap.max_recv_wr = cmd->max_recv_wr; 1887 attr.cap.max_send_sge = cmd->max_send_sge; 1888 attr.cap.max_recv_sge = cmd->max_recv_sge; 1889 attr.cap.max_inline_data = cmd->max_inline_data; 1890 1891 obj->uevent.events_reported = 0; 1892 INIT_LIST_HEAD(&obj->uevent.event_list); 1893 INIT_LIST_HEAD(&obj->mcast_list); 1894 1895 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1896 sizeof(cmd->create_flags)) 1897 attr.create_flags = cmd->create_flags; 1898 1899 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1900 IB_QP_CREATE_CROSS_CHANNEL | 1901 IB_QP_CREATE_MANAGED_SEND | 1902 IB_QP_CREATE_MANAGED_RECV | 1903 IB_QP_CREATE_SCATTER_FCS)) { 1904 ret = -EINVAL; 1905 goto err_put; 1906 } 1907 1908 buf = (char *)cmd + sizeof(*cmd); 1909 if (cmd_sz > sizeof(*cmd)) 1910 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1911 cmd_sz - sizeof(*cmd) - 1))) { 1912 ret = -EINVAL; 1913 goto err_put; 1914 } 1915 1916 if (cmd->qp_type == IB_QPT_XRC_TGT) 1917 qp = ib_create_qp(pd, &attr); 1918 else 1919 qp = device->create_qp(pd, &attr, uhw); 1920 1921 if (IS_ERR(qp)) { 1922 ret = PTR_ERR(qp); 1923 goto err_put; 1924 } 1925 1926 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1927 qp->real_qp = qp; 1928 qp->device = device; 1929 qp->pd = pd; 1930 qp->send_cq = attr.send_cq; 1931 qp->recv_cq = attr.recv_cq; 1932 qp->srq = attr.srq; 1933 qp->rwq_ind_tbl = ind_tbl; 1934 qp->event_handler = attr.event_handler; 1935 qp->qp_context = attr.qp_context; 1936 qp->qp_type = attr.qp_type; 1937 atomic_set(&qp->usecnt, 0); 1938 atomic_inc(&pd->usecnt); 1939 if (attr.send_cq) 1940 atomic_inc(&attr.send_cq->usecnt); 1941 if (attr.recv_cq) 1942 atomic_inc(&attr.recv_cq->usecnt); 1943 if (attr.srq) 1944 atomic_inc(&attr.srq->usecnt); 1945 if (ind_tbl) 1946 atomic_inc(&ind_tbl->usecnt); 1947 } else { 1948 /* It is done in _ib_create_qp for other QP types */ 1949 qp->uobject = &obj->uevent.uobject; 1950 } 1951 qp->uobject = &obj->uevent.uobject; 1952 1953 obj->uevent.uobject.object = qp; 1954 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1955 if (ret) 1956 goto err_destroy; 1957 1958 memset(&resp, 0, sizeof resp); 1959 resp.base.qpn = qp->qp_num; 1960 resp.base.qp_handle = obj->uevent.uobject.id; 1961 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1962 resp.base.max_send_sge = attr.cap.max_send_sge; 1963 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1964 resp.base.max_send_wr = attr.cap.max_send_wr; 1965 resp.base.max_inline_data = attr.cap.max_inline_data; 1966 1967 resp.response_length = offsetof(typeof(resp), response_length) + 1968 sizeof(resp.response_length); 1969 1970 ret = cb(file, &resp, ucore); 1971 if (ret) 1972 goto err_cb; 1973 1974 if (xrcd) { 1975 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1976 uobject); 1977 atomic_inc(&obj->uxrcd->refcnt); 1978 put_xrcd_read(xrcd_uobj); 1979 } 1980 1981 if (pd) 1982 put_pd_read(pd); 1983 if (scq) 1984 put_cq_read(scq); 1985 if (rcq && rcq != scq) 1986 put_cq_read(rcq); 1987 if (srq) 1988 put_srq_read(srq); 1989 if (ind_tbl) 1990 put_rwq_indirection_table_read(ind_tbl); 1991 1992 mutex_lock(&file->mutex); 1993 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1994 mutex_unlock(&file->mutex); 1995 1996 obj->uevent.uobject.live = 1; 1997 1998 up_write(&obj->uevent.uobject.mutex); 1999 2000 return 0; 2001 err_cb: 2002 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2003 2004 err_destroy: 2005 ib_destroy_qp(qp); 2006 2007 err_put: 2008 if (xrcd) 2009 put_xrcd_read(xrcd_uobj); 2010 if (pd) 2011 put_pd_read(pd); 2012 if (scq) 2013 put_cq_read(scq); 2014 if (rcq && rcq != scq) 2015 put_cq_read(rcq); 2016 if (srq) 2017 put_srq_read(srq); 2018 if (ind_tbl) 2019 put_rwq_indirection_table_read(ind_tbl); 2020 2021 put_uobj_write(&obj->uevent.uobject); 2022 return ret; 2023 } 2024 2025 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 2026 struct ib_uverbs_ex_create_qp_resp *resp, 2027 struct ib_udata *ucore) 2028 { 2029 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 2030 return -EFAULT; 2031 2032 return 0; 2033 } 2034 2035 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 2036 struct ib_device *ib_dev, 2037 const char __user *buf, int in_len, 2038 int out_len) 2039 { 2040 struct ib_uverbs_create_qp cmd; 2041 struct ib_uverbs_ex_create_qp cmd_ex; 2042 struct ib_udata ucore; 2043 struct ib_udata uhw; 2044 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 2045 int err; 2046 2047 if (out_len < resp_size) 2048 return -ENOSPC; 2049 2050 if (copy_from_user(&cmd, buf, sizeof(cmd))) 2051 return -EFAULT; 2052 2053 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), 2054 resp_size); 2055 INIT_UDATA(&uhw, buf + sizeof(cmd), 2056 (unsigned long)cmd.response + resp_size, 2057 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 2058 out_len - resp_size); 2059 2060 memset(&cmd_ex, 0, sizeof(cmd_ex)); 2061 cmd_ex.user_handle = cmd.user_handle; 2062 cmd_ex.pd_handle = cmd.pd_handle; 2063 cmd_ex.send_cq_handle = cmd.send_cq_handle; 2064 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 2065 cmd_ex.srq_handle = cmd.srq_handle; 2066 cmd_ex.max_send_wr = cmd.max_send_wr; 2067 cmd_ex.max_recv_wr = cmd.max_recv_wr; 2068 cmd_ex.max_send_sge = cmd.max_send_sge; 2069 cmd_ex.max_recv_sge = cmd.max_recv_sge; 2070 cmd_ex.max_inline_data = cmd.max_inline_data; 2071 cmd_ex.sq_sig_all = cmd.sq_sig_all; 2072 cmd_ex.qp_type = cmd.qp_type; 2073 cmd_ex.is_srq = cmd.is_srq; 2074 2075 err = create_qp(file, &ucore, &uhw, &cmd_ex, 2076 offsetof(typeof(cmd_ex), is_srq) + 2077 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 2078 NULL); 2079 2080 if (err) 2081 return err; 2082 2083 return in_len; 2084 } 2085 2086 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 2087 struct ib_uverbs_ex_create_qp_resp *resp, 2088 struct ib_udata *ucore) 2089 { 2090 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 2091 return -EFAULT; 2092 2093 return 0; 2094 } 2095 2096 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 2097 struct ib_device *ib_dev, 2098 struct ib_udata *ucore, 2099 struct ib_udata *uhw) 2100 { 2101 struct ib_uverbs_ex_create_qp_resp resp; 2102 struct ib_uverbs_ex_create_qp cmd = {0}; 2103 int err; 2104 2105 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 2106 sizeof(cmd.comp_mask))) 2107 return -EINVAL; 2108 2109 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2110 if (err) 2111 return err; 2112 2113 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 2114 return -EINVAL; 2115 2116 if (cmd.reserved) 2117 return -EINVAL; 2118 2119 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 2120 sizeof(resp.response_length))) 2121 return -ENOSPC; 2122 2123 err = create_qp(file, ucore, uhw, &cmd, 2124 min(ucore->inlen, sizeof(cmd)), 2125 ib_uverbs_ex_create_qp_cb, NULL); 2126 2127 if (err) 2128 return err; 2129 2130 return 0; 2131 } 2132 2133 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 2134 struct ib_device *ib_dev, 2135 const char __user *buf, int in_len, int out_len) 2136 { 2137 struct ib_uverbs_open_qp cmd; 2138 struct ib_uverbs_create_qp_resp resp; 2139 struct ib_udata udata; 2140 struct ib_uqp_object *obj; 2141 struct ib_xrcd *xrcd; 2142 struct ib_uobject *uninitialized_var(xrcd_uobj); 2143 struct ib_qp *qp; 2144 struct ib_qp_open_attr attr; 2145 int ret; 2146 2147 if (out_len < sizeof resp) 2148 return -ENOSPC; 2149 2150 if (copy_from_user(&cmd, buf, sizeof cmd)) 2151 return -EFAULT; 2152 2153 INIT_UDATA(&udata, buf + sizeof cmd, 2154 (unsigned long) cmd.response + sizeof resp, 2155 in_len - sizeof cmd, out_len - sizeof resp); 2156 2157 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2158 if (!obj) 2159 return -ENOMEM; 2160 2161 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 2162 down_write(&obj->uevent.uobject.mutex); 2163 2164 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 2165 if (!xrcd) { 2166 ret = -EINVAL; 2167 goto err_put; 2168 } 2169 2170 attr.event_handler = ib_uverbs_qp_event_handler; 2171 attr.qp_context = file; 2172 attr.qp_num = cmd.qpn; 2173 attr.qp_type = cmd.qp_type; 2174 2175 obj->uevent.events_reported = 0; 2176 INIT_LIST_HEAD(&obj->uevent.event_list); 2177 INIT_LIST_HEAD(&obj->mcast_list); 2178 2179 qp = ib_open_qp(xrcd, &attr); 2180 if (IS_ERR(qp)) { 2181 ret = PTR_ERR(qp); 2182 goto err_put; 2183 } 2184 2185 qp->uobject = &obj->uevent.uobject; 2186 2187 obj->uevent.uobject.object = qp; 2188 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2189 if (ret) 2190 goto err_destroy; 2191 2192 memset(&resp, 0, sizeof resp); 2193 resp.qpn = qp->qp_num; 2194 resp.qp_handle = obj->uevent.uobject.id; 2195 2196 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2197 &resp, sizeof resp)) { 2198 ret = -EFAULT; 2199 goto err_remove; 2200 } 2201 2202 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2203 atomic_inc(&obj->uxrcd->refcnt); 2204 put_xrcd_read(xrcd_uobj); 2205 2206 mutex_lock(&file->mutex); 2207 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 2208 mutex_unlock(&file->mutex); 2209 2210 obj->uevent.uobject.live = 1; 2211 2212 up_write(&obj->uevent.uobject.mutex); 2213 2214 return in_len; 2215 2216 err_remove: 2217 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2218 2219 err_destroy: 2220 ib_destroy_qp(qp); 2221 2222 err_put: 2223 put_xrcd_read(xrcd_uobj); 2224 put_uobj_write(&obj->uevent.uobject); 2225 return ret; 2226 } 2227 2228 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 2229 struct ib_device *ib_dev, 2230 const char __user *buf, int in_len, 2231 int out_len) 2232 { 2233 struct ib_uverbs_query_qp cmd; 2234 struct ib_uverbs_query_qp_resp resp; 2235 struct ib_qp *qp; 2236 struct ib_qp_attr *attr; 2237 struct ib_qp_init_attr *init_attr; 2238 int ret; 2239 2240 if (copy_from_user(&cmd, buf, sizeof cmd)) 2241 return -EFAULT; 2242 2243 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2244 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 2245 if (!attr || !init_attr) { 2246 ret = -ENOMEM; 2247 goto out; 2248 } 2249 2250 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2251 if (!qp) { 2252 ret = -EINVAL; 2253 goto out; 2254 } 2255 2256 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 2257 2258 put_qp_read(qp); 2259 2260 if (ret) 2261 goto out; 2262 2263 memset(&resp, 0, sizeof resp); 2264 2265 resp.qp_state = attr->qp_state; 2266 resp.cur_qp_state = attr->cur_qp_state; 2267 resp.path_mtu = attr->path_mtu; 2268 resp.path_mig_state = attr->path_mig_state; 2269 resp.qkey = attr->qkey; 2270 resp.rq_psn = attr->rq_psn; 2271 resp.sq_psn = attr->sq_psn; 2272 resp.dest_qp_num = attr->dest_qp_num; 2273 resp.qp_access_flags = attr->qp_access_flags; 2274 resp.pkey_index = attr->pkey_index; 2275 resp.alt_pkey_index = attr->alt_pkey_index; 2276 resp.sq_draining = attr->sq_draining; 2277 resp.max_rd_atomic = attr->max_rd_atomic; 2278 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 2279 resp.min_rnr_timer = attr->min_rnr_timer; 2280 resp.port_num = attr->port_num; 2281 resp.timeout = attr->timeout; 2282 resp.retry_cnt = attr->retry_cnt; 2283 resp.rnr_retry = attr->rnr_retry; 2284 resp.alt_port_num = attr->alt_port_num; 2285 resp.alt_timeout = attr->alt_timeout; 2286 2287 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 2288 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 2289 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 2290 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 2291 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 2292 resp.dest.dlid = attr->ah_attr.dlid; 2293 resp.dest.sl = attr->ah_attr.sl; 2294 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 2295 resp.dest.static_rate = attr->ah_attr.static_rate; 2296 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 2297 resp.dest.port_num = attr->ah_attr.port_num; 2298 2299 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 2300 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 2301 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 2302 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 2303 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 2304 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 2305 resp.alt_dest.sl = attr->alt_ah_attr.sl; 2306 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 2307 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 2308 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 2309 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 2310 2311 resp.max_send_wr = init_attr->cap.max_send_wr; 2312 resp.max_recv_wr = init_attr->cap.max_recv_wr; 2313 resp.max_send_sge = init_attr->cap.max_send_sge; 2314 resp.max_recv_sge = init_attr->cap.max_recv_sge; 2315 resp.max_inline_data = init_attr->cap.max_inline_data; 2316 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 2317 2318 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2319 &resp, sizeof resp)) 2320 ret = -EFAULT; 2321 2322 out: 2323 kfree(attr); 2324 kfree(init_attr); 2325 2326 return ret ? ret : in_len; 2327 } 2328 2329 /* Remove ignored fields set in the attribute mask */ 2330 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 2331 { 2332 switch (qp_type) { 2333 case IB_QPT_XRC_INI: 2334 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 2335 case IB_QPT_XRC_TGT: 2336 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 2337 IB_QP_RNR_RETRY); 2338 default: 2339 return mask; 2340 } 2341 } 2342 2343 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2344 struct ib_device *ib_dev, 2345 const char __user *buf, int in_len, 2346 int out_len) 2347 { 2348 struct ib_uverbs_modify_qp cmd; 2349 struct ib_udata udata; 2350 struct ib_qp *qp; 2351 struct ib_qp_attr *attr; 2352 int ret; 2353 2354 if (copy_from_user(&cmd, buf, sizeof cmd)) 2355 return -EFAULT; 2356 2357 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2358 out_len); 2359 2360 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2361 if (!attr) 2362 return -ENOMEM; 2363 2364 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2365 if (!qp) { 2366 ret = -EINVAL; 2367 goto out; 2368 } 2369 2370 if ((cmd.attr_mask & IB_QP_PORT) && 2371 !rdma_is_port_valid(qp->device, cmd.port_num)) { 2372 ret = -EINVAL; 2373 goto release_qp; 2374 } 2375 2376 if ((cmd.attr_mask & IB_QP_AV) && 2377 !rdma_is_port_valid(qp->device, cmd.dest.port_num)) { 2378 ret = -EINVAL; 2379 goto release_qp; 2380 } 2381 2382 if ((cmd.attr_mask & IB_QP_ALT_PATH) && 2383 (!rdma_is_port_valid(qp->device, cmd.alt_port_num) || 2384 !rdma_is_port_valid(qp->device, cmd.alt_dest.port_num))) { 2385 ret = -EINVAL; 2386 goto release_qp; 2387 } 2388 2389 attr->qp_state = cmd.qp_state; 2390 attr->cur_qp_state = cmd.cur_qp_state; 2391 attr->path_mtu = cmd.path_mtu; 2392 attr->path_mig_state = cmd.path_mig_state; 2393 attr->qkey = cmd.qkey; 2394 attr->rq_psn = cmd.rq_psn; 2395 attr->sq_psn = cmd.sq_psn; 2396 attr->dest_qp_num = cmd.dest_qp_num; 2397 attr->qp_access_flags = cmd.qp_access_flags; 2398 attr->pkey_index = cmd.pkey_index; 2399 attr->alt_pkey_index = cmd.alt_pkey_index; 2400 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 2401 attr->max_rd_atomic = cmd.max_rd_atomic; 2402 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 2403 attr->min_rnr_timer = cmd.min_rnr_timer; 2404 attr->port_num = cmd.port_num; 2405 attr->timeout = cmd.timeout; 2406 attr->retry_cnt = cmd.retry_cnt; 2407 attr->rnr_retry = cmd.rnr_retry; 2408 attr->alt_port_num = cmd.alt_port_num; 2409 attr->alt_timeout = cmd.alt_timeout; 2410 2411 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 2412 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 2413 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 2414 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 2415 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 2416 attr->ah_attr.dlid = cmd.dest.dlid; 2417 attr->ah_attr.sl = cmd.dest.sl; 2418 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 2419 attr->ah_attr.static_rate = cmd.dest.static_rate; 2420 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 2421 attr->ah_attr.port_num = cmd.dest.port_num; 2422 2423 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 2424 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 2425 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 2426 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 2427 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 2428 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 2429 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 2430 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 2431 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 2432 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 2433 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 2434 2435 if (qp->real_qp == qp) { 2436 if (cmd.attr_mask & IB_QP_AV) { 2437 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); 2438 if (ret) 2439 goto release_qp; 2440 } 2441 ret = qp->device->modify_qp(qp, attr, 2442 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2443 } else { 2444 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2445 } 2446 2447 if (ret) 2448 goto release_qp; 2449 2450 ret = in_len; 2451 2452 release_qp: 2453 put_qp_read(qp); 2454 2455 out: 2456 kfree(attr); 2457 2458 return ret; 2459 } 2460 2461 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2462 struct ib_device *ib_dev, 2463 const char __user *buf, int in_len, 2464 int out_len) 2465 { 2466 struct ib_uverbs_destroy_qp cmd; 2467 struct ib_uverbs_destroy_qp_resp resp; 2468 struct ib_uobject *uobj; 2469 struct ib_qp *qp; 2470 struct ib_uqp_object *obj; 2471 int ret = -EINVAL; 2472 2473 if (copy_from_user(&cmd, buf, sizeof cmd)) 2474 return -EFAULT; 2475 2476 memset(&resp, 0, sizeof resp); 2477 2478 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2479 if (!uobj) 2480 return -EINVAL; 2481 qp = uobj->object; 2482 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2483 2484 if (!list_empty(&obj->mcast_list)) { 2485 put_uobj_write(uobj); 2486 return -EBUSY; 2487 } 2488 2489 ret = ib_destroy_qp(qp); 2490 if (!ret) 2491 uobj->live = 0; 2492 2493 put_uobj_write(uobj); 2494 2495 if (ret) 2496 return ret; 2497 2498 if (obj->uxrcd) 2499 atomic_dec(&obj->uxrcd->refcnt); 2500 2501 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2502 2503 mutex_lock(&file->mutex); 2504 list_del(&uobj->list); 2505 mutex_unlock(&file->mutex); 2506 2507 ib_uverbs_release_uevent(file, &obj->uevent); 2508 2509 resp.events_reported = obj->uevent.events_reported; 2510 2511 put_uobj(uobj); 2512 2513 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2514 &resp, sizeof resp)) 2515 return -EFAULT; 2516 2517 return in_len; 2518 } 2519 2520 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2521 { 2522 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2523 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2524 }; 2525 2526 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2527 struct ib_device *ib_dev, 2528 const char __user *buf, int in_len, 2529 int out_len) 2530 { 2531 struct ib_uverbs_post_send cmd; 2532 struct ib_uverbs_post_send_resp resp; 2533 struct ib_uverbs_send_wr *user_wr; 2534 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2535 struct ib_qp *qp; 2536 int i, sg_ind; 2537 int is_ud; 2538 ssize_t ret = -EINVAL; 2539 size_t next_size; 2540 2541 if (copy_from_user(&cmd, buf, sizeof cmd)) 2542 return -EFAULT; 2543 2544 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2545 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2546 return -EINVAL; 2547 2548 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2549 return -EINVAL; 2550 2551 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2552 if (!user_wr) 2553 return -ENOMEM; 2554 2555 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2556 if (!qp) 2557 goto out; 2558 2559 is_ud = qp->qp_type == IB_QPT_UD; 2560 sg_ind = 0; 2561 last = NULL; 2562 for (i = 0; i < cmd.wr_count; ++i) { 2563 if (copy_from_user(user_wr, 2564 buf + sizeof cmd + i * cmd.wqe_size, 2565 cmd.wqe_size)) { 2566 ret = -EFAULT; 2567 goto out_put; 2568 } 2569 2570 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2571 ret = -EINVAL; 2572 goto out_put; 2573 } 2574 2575 if (is_ud) { 2576 struct ib_ud_wr *ud; 2577 2578 if (user_wr->opcode != IB_WR_SEND && 2579 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2580 ret = -EINVAL; 2581 goto out_put; 2582 } 2583 2584 next_size = sizeof(*ud); 2585 ud = alloc_wr(next_size, user_wr->num_sge); 2586 if (!ud) { 2587 ret = -ENOMEM; 2588 goto out_put; 2589 } 2590 2591 ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext); 2592 if (!ud->ah) { 2593 kfree(ud); 2594 ret = -EINVAL; 2595 goto out_put; 2596 } 2597 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2598 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2599 2600 next = &ud->wr; 2601 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2602 user_wr->opcode == IB_WR_RDMA_WRITE || 2603 user_wr->opcode == IB_WR_RDMA_READ) { 2604 struct ib_rdma_wr *rdma; 2605 2606 next_size = sizeof(*rdma); 2607 rdma = alloc_wr(next_size, user_wr->num_sge); 2608 if (!rdma) { 2609 ret = -ENOMEM; 2610 goto out_put; 2611 } 2612 2613 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2614 rdma->rkey = user_wr->wr.rdma.rkey; 2615 2616 next = &rdma->wr; 2617 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2618 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2619 struct ib_atomic_wr *atomic; 2620 2621 next_size = sizeof(*atomic); 2622 atomic = alloc_wr(next_size, user_wr->num_sge); 2623 if (!atomic) { 2624 ret = -ENOMEM; 2625 goto out_put; 2626 } 2627 2628 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2629 atomic->compare_add = user_wr->wr.atomic.compare_add; 2630 atomic->swap = user_wr->wr.atomic.swap; 2631 atomic->rkey = user_wr->wr.atomic.rkey; 2632 2633 next = &atomic->wr; 2634 } else if (user_wr->opcode == IB_WR_SEND || 2635 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2636 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2637 next_size = sizeof(*next); 2638 next = alloc_wr(next_size, user_wr->num_sge); 2639 if (!next) { 2640 ret = -ENOMEM; 2641 goto out_put; 2642 } 2643 } else { 2644 ret = -EINVAL; 2645 goto out_put; 2646 } 2647 2648 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2649 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2650 next->ex.imm_data = 2651 (__be32 __force) user_wr->ex.imm_data; 2652 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2653 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2654 } 2655 2656 if (!last) 2657 wr = next; 2658 else 2659 last->next = next; 2660 last = next; 2661 2662 next->next = NULL; 2663 next->wr_id = user_wr->wr_id; 2664 next->num_sge = user_wr->num_sge; 2665 next->opcode = user_wr->opcode; 2666 next->send_flags = user_wr->send_flags; 2667 2668 if (next->num_sge) { 2669 next->sg_list = (void *)((char *)next + 2670 ALIGN(next_size, sizeof(struct ib_sge))); 2671 if (copy_from_user(next->sg_list, 2672 (const char *)buf + sizeof cmd + 2673 cmd.wr_count * cmd.wqe_size + 2674 sg_ind * sizeof (struct ib_sge), 2675 next->num_sge * sizeof (struct ib_sge))) { 2676 ret = -EFAULT; 2677 goto out_put; 2678 } 2679 sg_ind += next->num_sge; 2680 } else 2681 next->sg_list = NULL; 2682 } 2683 2684 resp.bad_wr = 0; 2685 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2686 if (ret) 2687 for (next = wr; next; next = next->next) { 2688 ++resp.bad_wr; 2689 if (next == bad_wr) 2690 break; 2691 } 2692 2693 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2694 &resp, sizeof resp)) 2695 ret = -EFAULT; 2696 2697 out_put: 2698 put_qp_read(qp); 2699 2700 while (wr) { 2701 if (is_ud && ud_wr(wr)->ah) 2702 put_ah_read(ud_wr(wr)->ah); 2703 next = wr->next; 2704 kfree(wr); 2705 wr = next; 2706 } 2707 2708 out: 2709 kfree(user_wr); 2710 2711 return ret ? ret : in_len; 2712 } 2713 2714 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2715 int in_len, 2716 u32 wr_count, 2717 u32 sge_count, 2718 u32 wqe_size) 2719 { 2720 struct ib_uverbs_recv_wr *user_wr; 2721 struct ib_recv_wr *wr = NULL, *last, *next; 2722 int sg_ind; 2723 int i; 2724 int ret; 2725 2726 if (in_len < wqe_size * wr_count + 2727 sge_count * sizeof (struct ib_uverbs_sge)) 2728 return ERR_PTR(-EINVAL); 2729 2730 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2731 return ERR_PTR(-EINVAL); 2732 2733 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2734 if (!user_wr) 2735 return ERR_PTR(-ENOMEM); 2736 2737 sg_ind = 0; 2738 last = NULL; 2739 for (i = 0; i < wr_count; ++i) { 2740 if (copy_from_user(user_wr, buf + i * wqe_size, 2741 wqe_size)) { 2742 ret = -EFAULT; 2743 goto err; 2744 } 2745 2746 if (user_wr->num_sge + sg_ind > sge_count) { 2747 ret = -EINVAL; 2748 goto err; 2749 } 2750 2751 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2752 user_wr->num_sge * sizeof (struct ib_sge), 2753 GFP_KERNEL); 2754 if (!next) { 2755 ret = -ENOMEM; 2756 goto err; 2757 } 2758 2759 if (!last) 2760 wr = next; 2761 else 2762 last->next = next; 2763 last = next; 2764 2765 next->next = NULL; 2766 next->wr_id = user_wr->wr_id; 2767 next->num_sge = user_wr->num_sge; 2768 2769 if (next->num_sge) { 2770 next->sg_list = (void *)((char *)next + 2771 ALIGN(sizeof *next, sizeof (struct ib_sge))); 2772 if (copy_from_user(next->sg_list, 2773 (const char *)buf + wr_count * wqe_size + 2774 sg_ind * sizeof (struct ib_sge), 2775 next->num_sge * sizeof (struct ib_sge))) { 2776 ret = -EFAULT; 2777 goto err; 2778 } 2779 sg_ind += next->num_sge; 2780 } else 2781 next->sg_list = NULL; 2782 } 2783 2784 kfree(user_wr); 2785 return wr; 2786 2787 err: 2788 kfree(user_wr); 2789 2790 while (wr) { 2791 next = wr->next; 2792 kfree(wr); 2793 wr = next; 2794 } 2795 2796 return ERR_PTR(ret); 2797 } 2798 2799 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2800 struct ib_device *ib_dev, 2801 const char __user *buf, int in_len, 2802 int out_len) 2803 { 2804 struct ib_uverbs_post_recv cmd; 2805 struct ib_uverbs_post_recv_resp resp; 2806 struct ib_recv_wr *wr, *next, *bad_wr; 2807 struct ib_qp *qp; 2808 ssize_t ret = -EINVAL; 2809 2810 if (copy_from_user(&cmd, buf, sizeof cmd)) 2811 return -EFAULT; 2812 2813 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2814 in_len - sizeof cmd, cmd.wr_count, 2815 cmd.sge_count, cmd.wqe_size); 2816 if (IS_ERR(wr)) 2817 return PTR_ERR(wr); 2818 2819 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2820 if (!qp) 2821 goto out; 2822 2823 resp.bad_wr = 0; 2824 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2825 2826 put_qp_read(qp); 2827 2828 if (ret) 2829 for (next = wr; next; next = next->next) { 2830 ++resp.bad_wr; 2831 if (next == bad_wr) 2832 break; 2833 } 2834 2835 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2836 &resp, sizeof resp)) 2837 ret = -EFAULT; 2838 2839 out: 2840 while (wr) { 2841 next = wr->next; 2842 kfree(wr); 2843 wr = next; 2844 } 2845 2846 return ret ? ret : in_len; 2847 } 2848 2849 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2850 struct ib_device *ib_dev, 2851 const char __user *buf, int in_len, 2852 int out_len) 2853 { 2854 struct ib_uverbs_post_srq_recv cmd; 2855 struct ib_uverbs_post_srq_recv_resp resp; 2856 struct ib_recv_wr *wr, *next, *bad_wr; 2857 struct ib_srq *srq; 2858 ssize_t ret = -EINVAL; 2859 2860 if (copy_from_user(&cmd, buf, sizeof cmd)) 2861 return -EFAULT; 2862 2863 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2864 in_len - sizeof cmd, cmd.wr_count, 2865 cmd.sge_count, cmd.wqe_size); 2866 if (IS_ERR(wr)) 2867 return PTR_ERR(wr); 2868 2869 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2870 if (!srq) 2871 goto out; 2872 2873 resp.bad_wr = 0; 2874 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2875 2876 put_srq_read(srq); 2877 2878 if (ret) 2879 for (next = wr; next; next = next->next) { 2880 ++resp.bad_wr; 2881 if (next == bad_wr) 2882 break; 2883 } 2884 2885 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2886 &resp, sizeof resp)) 2887 ret = -EFAULT; 2888 2889 out: 2890 while (wr) { 2891 next = wr->next; 2892 kfree(wr); 2893 wr = next; 2894 } 2895 2896 return ret ? ret : in_len; 2897 } 2898 2899 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2900 struct ib_device *ib_dev, 2901 const char __user *buf, int in_len, 2902 int out_len) 2903 { 2904 struct ib_uverbs_create_ah cmd; 2905 struct ib_uverbs_create_ah_resp resp; 2906 struct ib_uobject *uobj; 2907 struct ib_pd *pd; 2908 struct ib_ah *ah; 2909 struct ib_ah_attr attr; 2910 int ret; 2911 struct ib_udata udata; 2912 2913 if (out_len < sizeof resp) 2914 return -ENOSPC; 2915 2916 if (copy_from_user(&cmd, buf, sizeof cmd)) 2917 return -EFAULT; 2918 2919 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) 2920 return -EINVAL; 2921 2922 INIT_UDATA(&udata, buf + sizeof(cmd), 2923 (unsigned long)cmd.response + sizeof(resp), 2924 in_len - sizeof(cmd), out_len - sizeof(resp)); 2925 2926 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2927 if (!uobj) 2928 return -ENOMEM; 2929 2930 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2931 down_write(&uobj->mutex); 2932 2933 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2934 if (!pd) { 2935 ret = -EINVAL; 2936 goto err; 2937 } 2938 2939 attr.dlid = cmd.attr.dlid; 2940 attr.sl = cmd.attr.sl; 2941 attr.src_path_bits = cmd.attr.src_path_bits; 2942 attr.static_rate = cmd.attr.static_rate; 2943 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2944 attr.port_num = cmd.attr.port_num; 2945 attr.grh.flow_label = cmd.attr.grh.flow_label; 2946 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2947 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2948 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2949 memset(&attr.dmac, 0, sizeof(attr.dmac)); 2950 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2951 2952 ah = pd->device->create_ah(pd, &attr, &udata); 2953 2954 if (IS_ERR(ah)) { 2955 ret = PTR_ERR(ah); 2956 goto err_put; 2957 } 2958 2959 ah->device = pd->device; 2960 ah->pd = pd; 2961 atomic_inc(&pd->usecnt); 2962 ah->uobject = uobj; 2963 uobj->object = ah; 2964 2965 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2966 if (ret) 2967 goto err_destroy; 2968 2969 resp.ah_handle = uobj->id; 2970 2971 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2972 &resp, sizeof resp)) { 2973 ret = -EFAULT; 2974 goto err_copy; 2975 } 2976 2977 put_pd_read(pd); 2978 2979 mutex_lock(&file->mutex); 2980 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2981 mutex_unlock(&file->mutex); 2982 2983 uobj->live = 1; 2984 2985 up_write(&uobj->mutex); 2986 2987 return in_len; 2988 2989 err_copy: 2990 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2991 2992 err_destroy: 2993 ib_destroy_ah(ah); 2994 2995 err_put: 2996 put_pd_read(pd); 2997 2998 err: 2999 put_uobj_write(uobj); 3000 return ret; 3001 } 3002 3003 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 3004 struct ib_device *ib_dev, 3005 const char __user *buf, int in_len, int out_len) 3006 { 3007 struct ib_uverbs_destroy_ah cmd; 3008 struct ib_ah *ah; 3009 struct ib_uobject *uobj; 3010 int ret; 3011 3012 if (copy_from_user(&cmd, buf, sizeof cmd)) 3013 return -EFAULT; 3014 3015 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 3016 if (!uobj) 3017 return -EINVAL; 3018 ah = uobj->object; 3019 3020 ret = ib_destroy_ah(ah); 3021 if (!ret) 3022 uobj->live = 0; 3023 3024 put_uobj_write(uobj); 3025 3026 if (ret) 3027 return ret; 3028 3029 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 3030 3031 mutex_lock(&file->mutex); 3032 list_del(&uobj->list); 3033 mutex_unlock(&file->mutex); 3034 3035 put_uobj(uobj); 3036 3037 return in_len; 3038 } 3039 3040 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 3041 struct ib_device *ib_dev, 3042 const char __user *buf, int in_len, 3043 int out_len) 3044 { 3045 struct ib_uverbs_attach_mcast cmd; 3046 struct ib_qp *qp; 3047 struct ib_uqp_object *obj; 3048 struct ib_uverbs_mcast_entry *mcast; 3049 int ret; 3050 3051 if (copy_from_user(&cmd, buf, sizeof cmd)) 3052 return -EFAULT; 3053 3054 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 3055 if (!qp) 3056 return -EINVAL; 3057 3058 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 3059 3060 mutex_lock(&obj->mcast_lock); 3061 list_for_each_entry(mcast, &obj->mcast_list, list) 3062 if (cmd.mlid == mcast->lid && 3063 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 3064 ret = 0; 3065 goto out_put; 3066 } 3067 3068 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 3069 if (!mcast) { 3070 ret = -ENOMEM; 3071 goto out_put; 3072 } 3073 3074 mcast->lid = cmd.mlid; 3075 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 3076 3077 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 3078 if (!ret) 3079 list_add_tail(&mcast->list, &obj->mcast_list); 3080 else 3081 kfree(mcast); 3082 3083 out_put: 3084 mutex_unlock(&obj->mcast_lock); 3085 put_qp_write(qp); 3086 3087 return ret ? ret : in_len; 3088 } 3089 3090 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 3091 struct ib_device *ib_dev, 3092 const char __user *buf, int in_len, 3093 int out_len) 3094 { 3095 struct ib_uverbs_detach_mcast cmd; 3096 struct ib_uqp_object *obj; 3097 struct ib_qp *qp; 3098 struct ib_uverbs_mcast_entry *mcast; 3099 int ret = -EINVAL; 3100 bool found = false; 3101 3102 if (copy_from_user(&cmd, buf, sizeof cmd)) 3103 return -EFAULT; 3104 3105 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 3106 if (!qp) 3107 return -EINVAL; 3108 3109 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 3110 mutex_lock(&obj->mcast_lock); 3111 3112 list_for_each_entry(mcast, &obj->mcast_list, list) 3113 if (cmd.mlid == mcast->lid && 3114 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 3115 list_del(&mcast->list); 3116 kfree(mcast); 3117 found = true; 3118 break; 3119 } 3120 3121 if (!found) { 3122 ret = -EINVAL; 3123 goto out_put; 3124 } 3125 3126 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid); 3127 3128 out_put: 3129 mutex_unlock(&obj->mcast_lock); 3130 put_qp_write(qp); 3131 3132 return ret ? ret : in_len; 3133 } 3134 3135 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec) 3136 { 3137 /* Returns user space filter size, includes padding */ 3138 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 3139 } 3140 3141 static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size, 3142 u16 ib_real_filter_sz) 3143 { 3144 /* 3145 * User space filter structures must be 64 bit aligned, otherwise this 3146 * may pass, but we won't handle additional new attributes. 3147 */ 3148 3149 if (kern_filter_size > ib_real_filter_sz) { 3150 if (memchr_inv((char *)kern_spec_filter + 3151 ib_real_filter_sz, 0, 3152 kern_filter_size - ib_real_filter_sz)) 3153 return -EINVAL; 3154 return ib_real_filter_sz; 3155 } 3156 return kern_filter_size; 3157 } 3158 3159 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 3160 union ib_flow_spec *ib_spec) 3161 { 3162 ssize_t actual_filter_sz; 3163 ssize_t kern_filter_sz; 3164 ssize_t ib_filter_sz; 3165 void *kern_spec_mask; 3166 void *kern_spec_val; 3167 3168 if (kern_spec->reserved) 3169 return -EINVAL; 3170 3171 ib_spec->type = kern_spec->type; 3172 3173 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 3174 /* User flow spec size must be aligned to 4 bytes */ 3175 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 3176 return -EINVAL; 3177 3178 kern_spec_val = (char *)kern_spec + 3179 sizeof(struct ib_uverbs_flow_spec_hdr); 3180 kern_spec_mask = (char *)kern_spec_val + kern_filter_sz; 3181 3182 switch (ib_spec->type) { 3183 case IB_FLOW_SPEC_ETH: 3184 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 3185 actual_filter_sz = spec_filter_size(kern_spec_mask, 3186 kern_filter_sz, 3187 ib_filter_sz); 3188 if (actual_filter_sz <= 0) 3189 return -EINVAL; 3190 ib_spec->size = sizeof(struct ib_flow_spec_eth); 3191 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 3192 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 3193 break; 3194 case IB_FLOW_SPEC_IPV4: 3195 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 3196 actual_filter_sz = spec_filter_size(kern_spec_mask, 3197 kern_filter_sz, 3198 ib_filter_sz); 3199 if (actual_filter_sz <= 0) 3200 return -EINVAL; 3201 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 3202 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 3203 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 3204 break; 3205 case IB_FLOW_SPEC_IPV6: 3206 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 3207 actual_filter_sz = spec_filter_size(kern_spec_mask, 3208 kern_filter_sz, 3209 ib_filter_sz); 3210 if (actual_filter_sz <= 0) 3211 return -EINVAL; 3212 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 3213 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 3214 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 3215 3216 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 3217 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 3218 return -EINVAL; 3219 break; 3220 case IB_FLOW_SPEC_TCP: 3221 case IB_FLOW_SPEC_UDP: 3222 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 3223 actual_filter_sz = spec_filter_size(kern_spec_mask, 3224 kern_filter_sz, 3225 ib_filter_sz); 3226 if (actual_filter_sz <= 0) 3227 return -EINVAL; 3228 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 3229 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 3230 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 3231 break; 3232 default: 3233 return -EINVAL; 3234 } 3235 return 0; 3236 } 3237 3238 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 3239 struct ib_device *ib_dev, 3240 struct ib_udata *ucore, 3241 struct ib_udata *uhw) 3242 { 3243 struct ib_uverbs_ex_create_wq cmd = {}; 3244 struct ib_uverbs_ex_create_wq_resp resp = {}; 3245 struct ib_uwq_object *obj; 3246 int err = 0; 3247 struct ib_cq *cq; 3248 struct ib_pd *pd; 3249 struct ib_wq *wq; 3250 struct ib_wq_init_attr wq_init_attr = {}; 3251 size_t required_cmd_sz; 3252 size_t required_resp_len; 3253 3254 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 3255 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 3256 3257 if (ucore->inlen < required_cmd_sz) 3258 return -EINVAL; 3259 3260 if (ucore->outlen < required_resp_len) 3261 return -ENOSPC; 3262 3263 if (ucore->inlen > sizeof(cmd) && 3264 !ib_is_udata_cleared(ucore, sizeof(cmd), 3265 ucore->inlen - sizeof(cmd))) 3266 return -EOPNOTSUPP; 3267 3268 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3269 if (err) 3270 return err; 3271 3272 if (cmd.comp_mask) 3273 return -EOPNOTSUPP; 3274 3275 obj = kmalloc(sizeof(*obj), GFP_KERNEL); 3276 if (!obj) 3277 return -ENOMEM; 3278 3279 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, 3280 &wq_lock_class); 3281 down_write(&obj->uevent.uobject.mutex); 3282 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 3283 if (!pd) { 3284 err = -EINVAL; 3285 goto err_uobj; 3286 } 3287 3288 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 3289 if (!cq) { 3290 err = -EINVAL; 3291 goto err_put_pd; 3292 } 3293 3294 wq_init_attr.cq = cq; 3295 wq_init_attr.max_sge = cmd.max_sge; 3296 wq_init_attr.max_wr = cmd.max_wr; 3297 wq_init_attr.wq_context = file; 3298 wq_init_attr.wq_type = cmd.wq_type; 3299 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 3300 obj->uevent.events_reported = 0; 3301 INIT_LIST_HEAD(&obj->uevent.event_list); 3302 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 3303 if (IS_ERR(wq)) { 3304 err = PTR_ERR(wq); 3305 goto err_put_cq; 3306 } 3307 3308 wq->uobject = &obj->uevent.uobject; 3309 obj->uevent.uobject.object = wq; 3310 wq->wq_type = wq_init_attr.wq_type; 3311 wq->cq = cq; 3312 wq->pd = pd; 3313 wq->device = pd->device; 3314 wq->wq_context = wq_init_attr.wq_context; 3315 atomic_set(&wq->usecnt, 0); 3316 atomic_inc(&pd->usecnt); 3317 atomic_inc(&cq->usecnt); 3318 wq->uobject = &obj->uevent.uobject; 3319 obj->uevent.uobject.object = wq; 3320 err = idr_add_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); 3321 if (err) 3322 goto destroy_wq; 3323 3324 memset(&resp, 0, sizeof(resp)); 3325 resp.wq_handle = obj->uevent.uobject.id; 3326 resp.max_sge = wq_init_attr.max_sge; 3327 resp.max_wr = wq_init_attr.max_wr; 3328 resp.wqn = wq->wq_num; 3329 resp.response_length = required_resp_len; 3330 err = ib_copy_to_udata(ucore, 3331 &resp, resp.response_length); 3332 if (err) 3333 goto err_copy; 3334 3335 put_pd_read(pd); 3336 put_cq_read(cq); 3337 3338 mutex_lock(&file->mutex); 3339 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->wq_list); 3340 mutex_unlock(&file->mutex); 3341 3342 obj->uevent.uobject.live = 1; 3343 up_write(&obj->uevent.uobject.mutex); 3344 return 0; 3345 3346 err_copy: 3347 idr_remove_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); 3348 destroy_wq: 3349 ib_destroy_wq(wq); 3350 err_put_cq: 3351 put_cq_read(cq); 3352 err_put_pd: 3353 put_pd_read(pd); 3354 err_uobj: 3355 put_uobj_write(&obj->uevent.uobject); 3356 3357 return err; 3358 } 3359 3360 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3361 struct ib_device *ib_dev, 3362 struct ib_udata *ucore, 3363 struct ib_udata *uhw) 3364 { 3365 struct ib_uverbs_ex_destroy_wq cmd = {}; 3366 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3367 struct ib_wq *wq; 3368 struct ib_uobject *uobj; 3369 struct ib_uwq_object *obj; 3370 size_t required_cmd_sz; 3371 size_t required_resp_len; 3372 int ret; 3373 3374 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3375 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3376 3377 if (ucore->inlen < required_cmd_sz) 3378 return -EINVAL; 3379 3380 if (ucore->outlen < required_resp_len) 3381 return -ENOSPC; 3382 3383 if (ucore->inlen > sizeof(cmd) && 3384 !ib_is_udata_cleared(ucore, sizeof(cmd), 3385 ucore->inlen - sizeof(cmd))) 3386 return -EOPNOTSUPP; 3387 3388 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3389 if (ret) 3390 return ret; 3391 3392 if (cmd.comp_mask) 3393 return -EOPNOTSUPP; 3394 3395 resp.response_length = required_resp_len; 3396 uobj = idr_write_uobj(&ib_uverbs_wq_idr, cmd.wq_handle, 3397 file->ucontext); 3398 if (!uobj) 3399 return -EINVAL; 3400 3401 wq = uobj->object; 3402 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3403 ret = ib_destroy_wq(wq); 3404 if (!ret) 3405 uobj->live = 0; 3406 3407 put_uobj_write(uobj); 3408 if (ret) 3409 return ret; 3410 3411 idr_remove_uobj(&ib_uverbs_wq_idr, uobj); 3412 3413 mutex_lock(&file->mutex); 3414 list_del(&uobj->list); 3415 mutex_unlock(&file->mutex); 3416 3417 ib_uverbs_release_uevent(file, &obj->uevent); 3418 resp.events_reported = obj->uevent.events_reported; 3419 put_uobj(uobj); 3420 3421 ret = ib_copy_to_udata(ucore, &resp, resp.response_length); 3422 if (ret) 3423 return ret; 3424 3425 return 0; 3426 } 3427 3428 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3429 struct ib_device *ib_dev, 3430 struct ib_udata *ucore, 3431 struct ib_udata *uhw) 3432 { 3433 struct ib_uverbs_ex_modify_wq cmd = {}; 3434 struct ib_wq *wq; 3435 struct ib_wq_attr wq_attr = {}; 3436 size_t required_cmd_sz; 3437 int ret; 3438 3439 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3440 if (ucore->inlen < required_cmd_sz) 3441 return -EINVAL; 3442 3443 if (ucore->inlen > sizeof(cmd) && 3444 !ib_is_udata_cleared(ucore, sizeof(cmd), 3445 ucore->inlen - sizeof(cmd))) 3446 return -EOPNOTSUPP; 3447 3448 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3449 if (ret) 3450 return ret; 3451 3452 if (!cmd.attr_mask) 3453 return -EINVAL; 3454 3455 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE)) 3456 return -EINVAL; 3457 3458 wq = idr_read_wq(cmd.wq_handle, file->ucontext); 3459 if (!wq) 3460 return -EINVAL; 3461 3462 wq_attr.curr_wq_state = cmd.curr_wq_state; 3463 wq_attr.wq_state = cmd.wq_state; 3464 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3465 put_wq_read(wq); 3466 return ret; 3467 } 3468 3469 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3470 struct ib_device *ib_dev, 3471 struct ib_udata *ucore, 3472 struct ib_udata *uhw) 3473 { 3474 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3475 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3476 struct ib_uobject *uobj; 3477 int err = 0; 3478 struct ib_rwq_ind_table_init_attr init_attr = {}; 3479 struct ib_rwq_ind_table *rwq_ind_tbl; 3480 struct ib_wq **wqs = NULL; 3481 u32 *wqs_handles = NULL; 3482 struct ib_wq *wq = NULL; 3483 int i, j, num_read_wqs; 3484 u32 num_wq_handles; 3485 u32 expected_in_size; 3486 size_t required_cmd_sz_header; 3487 size_t required_resp_len; 3488 3489 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3490 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3491 3492 if (ucore->inlen < required_cmd_sz_header) 3493 return -EINVAL; 3494 3495 if (ucore->outlen < required_resp_len) 3496 return -ENOSPC; 3497 3498 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3499 if (err) 3500 return err; 3501 3502 ucore->inbuf = (const char *)ucore->inbuf + required_cmd_sz_header; 3503 ucore->inlen -= required_cmd_sz_header; 3504 3505 if (cmd.comp_mask) 3506 return -EOPNOTSUPP; 3507 3508 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3509 return -EINVAL; 3510 3511 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3512 expected_in_size = num_wq_handles * sizeof(__u32); 3513 if (num_wq_handles == 1) 3514 /* input size for wq handles is u64 aligned */ 3515 expected_in_size += sizeof(__u32); 3516 3517 if (ucore->inlen < expected_in_size) 3518 return -EINVAL; 3519 3520 if (ucore->inlen > expected_in_size && 3521 !ib_is_udata_cleared(ucore, expected_in_size, 3522 ucore->inlen - expected_in_size)) 3523 return -EOPNOTSUPP; 3524 3525 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3526 GFP_KERNEL); 3527 if (!wqs_handles) 3528 return -ENOMEM; 3529 3530 err = ib_copy_from_udata(wqs_handles, ucore, 3531 num_wq_handles * sizeof(__u32)); 3532 if (err) 3533 goto err_free; 3534 3535 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3536 if (!wqs) { 3537 err = -ENOMEM; 3538 goto err_free; 3539 } 3540 3541 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3542 num_read_wqs++) { 3543 wq = idr_read_wq(wqs_handles[num_read_wqs], file->ucontext); 3544 if (!wq) { 3545 err = -EINVAL; 3546 goto put_wqs; 3547 } 3548 3549 wqs[num_read_wqs] = wq; 3550 } 3551 3552 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 3553 if (!uobj) { 3554 err = -ENOMEM; 3555 goto put_wqs; 3556 } 3557 3558 init_uobj(uobj, 0, file->ucontext, &rwq_ind_table_lock_class); 3559 down_write(&uobj->mutex); 3560 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3561 init_attr.ind_tbl = wqs; 3562 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3563 3564 if (IS_ERR(rwq_ind_tbl)) { 3565 err = PTR_ERR(rwq_ind_tbl); 3566 goto err_uobj; 3567 } 3568 3569 rwq_ind_tbl->ind_tbl = wqs; 3570 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3571 rwq_ind_tbl->uobject = uobj; 3572 uobj->object = rwq_ind_tbl; 3573 rwq_ind_tbl->device = ib_dev; 3574 atomic_set(&rwq_ind_tbl->usecnt, 0); 3575 3576 for (i = 0; i < num_wq_handles; i++) 3577 atomic_inc(&wqs[i]->usecnt); 3578 3579 err = idr_add_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3580 if (err) 3581 goto destroy_ind_tbl; 3582 3583 resp.ind_tbl_handle = uobj->id; 3584 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3585 resp.response_length = required_resp_len; 3586 3587 err = ib_copy_to_udata(ucore, 3588 &resp, resp.response_length); 3589 if (err) 3590 goto err_copy; 3591 3592 kfree(wqs_handles); 3593 3594 for (j = 0; j < num_read_wqs; j++) 3595 put_wq_read(wqs[j]); 3596 3597 mutex_lock(&file->mutex); 3598 list_add_tail(&uobj->list, &file->ucontext->rwq_ind_tbl_list); 3599 mutex_unlock(&file->mutex); 3600 3601 uobj->live = 1; 3602 3603 up_write(&uobj->mutex); 3604 return 0; 3605 3606 err_copy: 3607 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3608 destroy_ind_tbl: 3609 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3610 err_uobj: 3611 put_uobj_write(uobj); 3612 put_wqs: 3613 for (j = 0; j < num_read_wqs; j++) 3614 put_wq_read(wqs[j]); 3615 err_free: 3616 kfree(wqs_handles); 3617 kfree(wqs); 3618 return err; 3619 } 3620 3621 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3622 struct ib_device *ib_dev, 3623 struct ib_udata *ucore, 3624 struct ib_udata *uhw) 3625 { 3626 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3627 struct ib_rwq_ind_table *rwq_ind_tbl; 3628 struct ib_uobject *uobj; 3629 int ret; 3630 struct ib_wq **ind_tbl; 3631 size_t required_cmd_sz; 3632 3633 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3634 3635 if (ucore->inlen < required_cmd_sz) 3636 return -EINVAL; 3637 3638 if (ucore->inlen > sizeof(cmd) && 3639 !ib_is_udata_cleared(ucore, sizeof(cmd), 3640 ucore->inlen - sizeof(cmd))) 3641 return -EOPNOTSUPP; 3642 3643 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3644 if (ret) 3645 return ret; 3646 3647 if (cmd.comp_mask) 3648 return -EOPNOTSUPP; 3649 3650 uobj = idr_write_uobj(&ib_uverbs_rwq_ind_tbl_idr, cmd.ind_tbl_handle, 3651 file->ucontext); 3652 if (!uobj) 3653 return -EINVAL; 3654 rwq_ind_tbl = uobj->object; 3655 ind_tbl = rwq_ind_tbl->ind_tbl; 3656 3657 ret = ib_destroy_rwq_ind_table(rwq_ind_tbl); 3658 if (!ret) 3659 uobj->live = 0; 3660 3661 put_uobj_write(uobj); 3662 3663 if (ret) 3664 return ret; 3665 3666 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3667 3668 mutex_lock(&file->mutex); 3669 list_del(&uobj->list); 3670 mutex_unlock(&file->mutex); 3671 3672 put_uobj(uobj); 3673 kfree(ind_tbl); 3674 return ret; 3675 } 3676 3677 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3678 struct ib_device *ib_dev, 3679 struct ib_udata *ucore, 3680 struct ib_udata *uhw) 3681 { 3682 struct ib_uverbs_create_flow cmd; 3683 struct ib_uverbs_create_flow_resp resp; 3684 struct ib_uobject *uobj; 3685 struct ib_flow *flow_id; 3686 struct ib_uverbs_flow_attr *kern_flow_attr; 3687 struct ib_flow_attr *flow_attr; 3688 struct ib_qp *qp; 3689 int err = 0; 3690 void *kern_spec; 3691 void *ib_spec; 3692 int i; 3693 3694 if (ucore->inlen < sizeof(cmd)) 3695 return -EINVAL; 3696 3697 if (ucore->outlen < sizeof(resp)) 3698 return -ENOSPC; 3699 3700 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3701 if (err) 3702 return err; 3703 3704 ucore->inbuf = (const char *)ucore->inbuf + sizeof(cmd); 3705 ucore->inlen -= sizeof(cmd); 3706 3707 if (cmd.comp_mask) 3708 return -EINVAL; 3709 3710 if (priv_check(curthread, PRIV_NET_RAW) != 0) 3711 return -EPERM; 3712 3713 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3714 return -EINVAL; 3715 3716 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3717 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3718 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3719 return -EINVAL; 3720 3721 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3722 return -EINVAL; 3723 3724 if (cmd.flow_attr.size > ucore->inlen || 3725 cmd.flow_attr.size > 3726 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3727 return -EINVAL; 3728 3729 if (cmd.flow_attr.reserved[0] || 3730 cmd.flow_attr.reserved[1]) 3731 return -EINVAL; 3732 3733 if (cmd.flow_attr.num_of_specs) { 3734 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3735 GFP_KERNEL); 3736 if (!kern_flow_attr) 3737 return -ENOMEM; 3738 3739 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3740 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3741 cmd.flow_attr.size); 3742 if (err) 3743 goto err_free_attr; 3744 } else { 3745 kern_flow_attr = &cmd.flow_attr; 3746 } 3747 3748 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 3749 if (!uobj) { 3750 err = -ENOMEM; 3751 goto err_free_attr; 3752 } 3753 init_uobj(uobj, 0, file->ucontext, &rule_lock_class); 3754 down_write(&uobj->mutex); 3755 3756 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 3757 if (!qp) { 3758 err = -EINVAL; 3759 goto err_uobj; 3760 } 3761 3762 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * 3763 sizeof(union ib_flow_spec), GFP_KERNEL); 3764 if (!flow_attr) { 3765 err = -ENOMEM; 3766 goto err_put; 3767 } 3768 3769 flow_attr->type = kern_flow_attr->type; 3770 flow_attr->priority = kern_flow_attr->priority; 3771 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3772 flow_attr->port = kern_flow_attr->port; 3773 flow_attr->flags = kern_flow_attr->flags; 3774 flow_attr->size = sizeof(*flow_attr); 3775 3776 kern_spec = kern_flow_attr + 1; 3777 ib_spec = flow_attr + 1; 3778 for (i = 0; i < flow_attr->num_of_specs && 3779 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3780 cmd.flow_attr.size >= 3781 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3782 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3783 if (err) 3784 goto err_free; 3785 flow_attr->size += 3786 ((union ib_flow_spec *) ib_spec)->size; 3787 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3788 kern_spec = (char *)kern_spec + ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3789 ib_spec = (char *)ib_spec + ((union ib_flow_spec *)ib_spec)->size; 3790 } 3791 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3792 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3793 i, cmd.flow_attr.size); 3794 err = -EINVAL; 3795 goto err_free; 3796 } 3797 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3798 if (IS_ERR(flow_id)) { 3799 err = PTR_ERR(flow_id); 3800 goto err_free; 3801 } 3802 flow_id->qp = qp; 3803 flow_id->uobject = uobj; 3804 uobj->object = flow_id; 3805 3806 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj); 3807 if (err) 3808 goto destroy_flow; 3809 3810 memset(&resp, 0, sizeof(resp)); 3811 resp.flow_handle = uobj->id; 3812 3813 err = ib_copy_to_udata(ucore, 3814 &resp, sizeof(resp)); 3815 if (err) 3816 goto err_copy; 3817 3818 put_qp_read(qp); 3819 mutex_lock(&file->mutex); 3820 list_add_tail(&uobj->list, &file->ucontext->rule_list); 3821 mutex_unlock(&file->mutex); 3822 3823 uobj->live = 1; 3824 3825 up_write(&uobj->mutex); 3826 kfree(flow_attr); 3827 if (cmd.flow_attr.num_of_specs) 3828 kfree(kern_flow_attr); 3829 return 0; 3830 err_copy: 3831 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3832 destroy_flow: 3833 ib_destroy_flow(flow_id); 3834 err_free: 3835 kfree(flow_attr); 3836 err_put: 3837 put_qp_read(qp); 3838 err_uobj: 3839 put_uobj_write(uobj); 3840 err_free_attr: 3841 if (cmd.flow_attr.num_of_specs) 3842 kfree(kern_flow_attr); 3843 return err; 3844 } 3845 3846 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3847 struct ib_device *ib_dev, 3848 struct ib_udata *ucore, 3849 struct ib_udata *uhw) 3850 { 3851 struct ib_uverbs_destroy_flow cmd; 3852 struct ib_flow *flow_id; 3853 struct ib_uobject *uobj; 3854 int ret; 3855 3856 if (ucore->inlen < sizeof(cmd)) 3857 return -EINVAL; 3858 3859 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3860 if (ret) 3861 return ret; 3862 3863 if (cmd.comp_mask) 3864 return -EINVAL; 3865 3866 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 3867 file->ucontext); 3868 if (!uobj) 3869 return -EINVAL; 3870 flow_id = uobj->object; 3871 3872 ret = ib_destroy_flow(flow_id); 3873 if (!ret) 3874 uobj->live = 0; 3875 3876 put_uobj_write(uobj); 3877 3878 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3879 3880 mutex_lock(&file->mutex); 3881 list_del(&uobj->list); 3882 mutex_unlock(&file->mutex); 3883 3884 put_uobj(uobj); 3885 3886 return ret; 3887 } 3888 3889 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3890 struct ib_device *ib_dev, 3891 struct ib_uverbs_create_xsrq *cmd, 3892 struct ib_udata *udata) 3893 { 3894 struct ib_uverbs_create_srq_resp resp; 3895 struct ib_usrq_object *obj; 3896 struct ib_pd *pd; 3897 struct ib_srq *srq; 3898 struct ib_uobject *uninitialized_var(xrcd_uobj); 3899 struct ib_srq_init_attr attr; 3900 int ret; 3901 3902 obj = kmalloc(sizeof *obj, GFP_KERNEL); 3903 if (!obj) 3904 return -ENOMEM; 3905 3906 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 3907 down_write(&obj->uevent.uobject.mutex); 3908 3909 if (cmd->srq_type == IB_SRQT_XRC) { 3910 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 3911 if (!attr.ext.xrc.xrcd) { 3912 ret = -EINVAL; 3913 goto err; 3914 } 3915 3916 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3917 atomic_inc(&obj->uxrcd->refcnt); 3918 3919 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 3920 if (!attr.ext.xrc.cq) { 3921 ret = -EINVAL; 3922 goto err_put_xrcd; 3923 } 3924 } 3925 3926 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 3927 if (!pd) { 3928 ret = -EINVAL; 3929 goto err_put_cq; 3930 } 3931 3932 attr.event_handler = ib_uverbs_srq_event_handler; 3933 attr.srq_context = file; 3934 attr.srq_type = cmd->srq_type; 3935 attr.attr.max_wr = cmd->max_wr; 3936 attr.attr.max_sge = cmd->max_sge; 3937 attr.attr.srq_limit = cmd->srq_limit; 3938 3939 obj->uevent.events_reported = 0; 3940 INIT_LIST_HEAD(&obj->uevent.event_list); 3941 3942 srq = pd->device->create_srq(pd, &attr, udata); 3943 if (IS_ERR(srq)) { 3944 ret = PTR_ERR(srq); 3945 goto err_put; 3946 } 3947 3948 srq->device = pd->device; 3949 srq->pd = pd; 3950 srq->srq_type = cmd->srq_type; 3951 srq->uobject = &obj->uevent.uobject; 3952 srq->event_handler = attr.event_handler; 3953 srq->srq_context = attr.srq_context; 3954 3955 if (cmd->srq_type == IB_SRQT_XRC) { 3956 srq->ext.xrc.cq = attr.ext.xrc.cq; 3957 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3958 atomic_inc(&attr.ext.xrc.cq->usecnt); 3959 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3960 } 3961 3962 atomic_inc(&pd->usecnt); 3963 atomic_set(&srq->usecnt, 0); 3964 3965 obj->uevent.uobject.object = srq; 3966 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3967 if (ret) 3968 goto err_destroy; 3969 3970 memset(&resp, 0, sizeof resp); 3971 resp.srq_handle = obj->uevent.uobject.id; 3972 resp.max_wr = attr.attr.max_wr; 3973 resp.max_sge = attr.attr.max_sge; 3974 if (cmd->srq_type == IB_SRQT_XRC) 3975 resp.srqn = srq->ext.xrc.srq_num; 3976 3977 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3978 &resp, sizeof resp)) { 3979 ret = -EFAULT; 3980 goto err_copy; 3981 } 3982 3983 if (cmd->srq_type == IB_SRQT_XRC) { 3984 put_uobj_read(xrcd_uobj); 3985 put_cq_read(attr.ext.xrc.cq); 3986 } 3987 put_pd_read(pd); 3988 3989 mutex_lock(&file->mutex); 3990 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 3991 mutex_unlock(&file->mutex); 3992 3993 obj->uevent.uobject.live = 1; 3994 3995 up_write(&obj->uevent.uobject.mutex); 3996 3997 return 0; 3998 3999 err_copy: 4000 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 4001 4002 err_destroy: 4003 ib_destroy_srq(srq); 4004 4005 err_put: 4006 put_pd_read(pd); 4007 4008 err_put_cq: 4009 if (cmd->srq_type == IB_SRQT_XRC) 4010 put_cq_read(attr.ext.xrc.cq); 4011 4012 err_put_xrcd: 4013 if (cmd->srq_type == IB_SRQT_XRC) { 4014 atomic_dec(&obj->uxrcd->refcnt); 4015 put_uobj_read(xrcd_uobj); 4016 } 4017 4018 err: 4019 put_uobj_write(&obj->uevent.uobject); 4020 return ret; 4021 } 4022 4023 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 4024 struct ib_device *ib_dev, 4025 const char __user *buf, int in_len, 4026 int out_len) 4027 { 4028 struct ib_uverbs_create_srq cmd; 4029 struct ib_uverbs_create_xsrq xcmd; 4030 struct ib_uverbs_create_srq_resp resp; 4031 struct ib_udata udata; 4032 int ret; 4033 4034 if (out_len < sizeof resp) 4035 return -ENOSPC; 4036 4037 if (copy_from_user(&cmd, buf, sizeof cmd)) 4038 return -EFAULT; 4039 4040 xcmd.response = cmd.response; 4041 xcmd.user_handle = cmd.user_handle; 4042 xcmd.srq_type = IB_SRQT_BASIC; 4043 xcmd.pd_handle = cmd.pd_handle; 4044 xcmd.max_wr = cmd.max_wr; 4045 xcmd.max_sge = cmd.max_sge; 4046 xcmd.srq_limit = cmd.srq_limit; 4047 4048 INIT_UDATA(&udata, buf + sizeof cmd, 4049 (unsigned long) cmd.response + sizeof resp, 4050 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 4051 out_len - sizeof resp); 4052 4053 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 4054 if (ret) 4055 return ret; 4056 4057 return in_len; 4058 } 4059 4060 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 4061 struct ib_device *ib_dev, 4062 const char __user *buf, int in_len, int out_len) 4063 { 4064 struct ib_uverbs_create_xsrq cmd; 4065 struct ib_uverbs_create_srq_resp resp; 4066 struct ib_udata udata; 4067 int ret; 4068 4069 if (out_len < sizeof resp) 4070 return -ENOSPC; 4071 4072 if (copy_from_user(&cmd, buf, sizeof cmd)) 4073 return -EFAULT; 4074 4075 INIT_UDATA(&udata, buf + sizeof cmd, 4076 (unsigned long) cmd.response + sizeof resp, 4077 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 4078 out_len - sizeof resp); 4079 4080 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 4081 if (ret) 4082 return ret; 4083 4084 return in_len; 4085 } 4086 4087 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 4088 struct ib_device *ib_dev, 4089 const char __user *buf, int in_len, 4090 int out_len) 4091 { 4092 struct ib_uverbs_modify_srq cmd; 4093 struct ib_udata udata; 4094 struct ib_srq *srq; 4095 struct ib_srq_attr attr; 4096 int ret; 4097 4098 if (copy_from_user(&cmd, buf, sizeof cmd)) 4099 return -EFAULT; 4100 4101 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 4102 out_len); 4103 4104 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 4105 if (!srq) 4106 return -EINVAL; 4107 4108 attr.max_wr = cmd.max_wr; 4109 attr.srq_limit = cmd.srq_limit; 4110 4111 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 4112 4113 put_srq_read(srq); 4114 4115 return ret ? ret : in_len; 4116 } 4117 4118 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 4119 struct ib_device *ib_dev, 4120 const char __user *buf, 4121 int in_len, int out_len) 4122 { 4123 struct ib_uverbs_query_srq cmd; 4124 struct ib_uverbs_query_srq_resp resp; 4125 struct ib_srq_attr attr; 4126 struct ib_srq *srq; 4127 int ret; 4128 4129 if (out_len < sizeof resp) 4130 return -ENOSPC; 4131 4132 if (copy_from_user(&cmd, buf, sizeof cmd)) 4133 return -EFAULT; 4134 4135 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 4136 if (!srq) 4137 return -EINVAL; 4138 4139 ret = ib_query_srq(srq, &attr); 4140 4141 put_srq_read(srq); 4142 4143 if (ret) 4144 return ret; 4145 4146 memset(&resp, 0, sizeof resp); 4147 4148 resp.max_wr = attr.max_wr; 4149 resp.max_sge = attr.max_sge; 4150 resp.srq_limit = attr.srq_limit; 4151 4152 if (copy_to_user((void __user *) (unsigned long) cmd.response, 4153 &resp, sizeof resp)) 4154 return -EFAULT; 4155 4156 return in_len; 4157 } 4158 4159 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 4160 struct ib_device *ib_dev, 4161 const char __user *buf, int in_len, 4162 int out_len) 4163 { 4164 struct ib_uverbs_destroy_srq cmd; 4165 struct ib_uverbs_destroy_srq_resp resp; 4166 struct ib_uobject *uobj; 4167 struct ib_srq *srq; 4168 struct ib_uevent_object *obj; 4169 int ret = -EINVAL; 4170 struct ib_usrq_object *us; 4171 enum ib_srq_type srq_type; 4172 4173 if (copy_from_user(&cmd, buf, sizeof cmd)) 4174 return -EFAULT; 4175 4176 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 4177 if (!uobj) 4178 return -EINVAL; 4179 srq = uobj->object; 4180 obj = container_of(uobj, struct ib_uevent_object, uobject); 4181 srq_type = srq->srq_type; 4182 4183 ret = ib_destroy_srq(srq); 4184 if (!ret) 4185 uobj->live = 0; 4186 4187 put_uobj_write(uobj); 4188 4189 if (ret) 4190 return ret; 4191 4192 if (srq_type == IB_SRQT_XRC) { 4193 us = container_of(obj, struct ib_usrq_object, uevent); 4194 atomic_dec(&us->uxrcd->refcnt); 4195 } 4196 4197 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 4198 4199 mutex_lock(&file->mutex); 4200 list_del(&uobj->list); 4201 mutex_unlock(&file->mutex); 4202 4203 ib_uverbs_release_uevent(file, obj); 4204 4205 memset(&resp, 0, sizeof resp); 4206 resp.events_reported = obj->events_reported; 4207 4208 put_uobj(uobj); 4209 4210 if (copy_to_user((void __user *) (unsigned long) cmd.response, 4211 &resp, sizeof resp)) 4212 ret = -EFAULT; 4213 4214 return ret ? ret : in_len; 4215 } 4216 4217 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 4218 struct ib_device *ib_dev, 4219 struct ib_udata *ucore, 4220 struct ib_udata *uhw) 4221 { 4222 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 4223 struct ib_uverbs_ex_query_device cmd; 4224 struct ib_device_attr attr = {0}; 4225 int err; 4226 4227 if (ucore->inlen < sizeof(cmd)) 4228 return -EINVAL; 4229 4230 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 4231 if (err) 4232 return err; 4233 4234 if (cmd.comp_mask) 4235 return -EINVAL; 4236 4237 if (cmd.reserved) 4238 return -EINVAL; 4239 4240 resp.response_length = offsetof(typeof(resp), odp_caps); 4241 4242 if (ucore->outlen < resp.response_length) 4243 return -ENOSPC; 4244 4245 err = ib_dev->query_device(ib_dev, &attr, uhw); 4246 if (err) 4247 return err; 4248 4249 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 4250 4251 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 4252 goto end; 4253 4254 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 4255 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 4256 resp.odp_caps.per_transport_caps.rc_odp_caps = 4257 attr.odp_caps.per_transport_caps.rc_odp_caps; 4258 resp.odp_caps.per_transport_caps.uc_odp_caps = 4259 attr.odp_caps.per_transport_caps.uc_odp_caps; 4260 resp.odp_caps.per_transport_caps.ud_odp_caps = 4261 attr.odp_caps.per_transport_caps.ud_odp_caps; 4262 #endif 4263 resp.response_length += sizeof(resp.odp_caps); 4264 4265 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 4266 goto end; 4267 4268 resp.timestamp_mask = attr.timestamp_mask; 4269 resp.response_length += sizeof(resp.timestamp_mask); 4270 4271 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 4272 goto end; 4273 4274 resp.hca_core_clock = attr.hca_core_clock; 4275 resp.response_length += sizeof(resp.hca_core_clock); 4276 4277 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 4278 goto end; 4279 4280 resp.device_cap_flags_ex = attr.device_cap_flags; 4281 resp.response_length += sizeof(resp.device_cap_flags_ex); 4282 4283 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 4284 goto end; 4285 4286 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 4287 resp.rss_caps.max_rwq_indirection_tables = 4288 attr.rss_caps.max_rwq_indirection_tables; 4289 resp.rss_caps.max_rwq_indirection_table_size = 4290 attr.rss_caps.max_rwq_indirection_table_size; 4291 4292 resp.response_length += sizeof(resp.rss_caps); 4293 4294 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 4295 goto end; 4296 4297 resp.max_wq_type_rq = attr.max_wq_type_rq; 4298 resp.response_length += sizeof(resp.max_wq_type_rq); 4299 end: 4300 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 4301 return err; 4302 } 4303