1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2005 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 6 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 7 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 * 37 * $FreeBSD$ 38 */ 39 40 #define LINUXKPI_PARAM_PREFIX ibcore_ 41 42 #include <linux/file.h> 43 #include <linux/fs.h> 44 #include <linux/slab.h> 45 #include <linux/sched.h> 46 #include <linux/rbtree.h> 47 48 #include <asm/uaccess.h> 49 50 #include "uverbs.h" 51 #include "core_priv.h" 52 53 #include <sys/priv.h> 54 55 struct uverbs_lock_class { 56 char name[16]; 57 }; 58 59 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 60 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 61 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 62 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 63 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 64 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 65 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 66 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 67 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 68 static struct uverbs_lock_class wq_lock_class = { .name = "WQ-uobj" }; 69 static struct uverbs_lock_class rwq_ind_table_lock_class = { .name = "IND_TBL-uobj" }; 70 71 /* 72 * The ib_uobject locking scheme is as follows: 73 * 74 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 75 * needs to be held during all idr write operations. When an object is 76 * looked up, a reference must be taken on the object's kref before 77 * dropping this lock. For read operations, the rcu_read_lock() 78 * and rcu_write_lock() but similarly the kref reference is grabbed 79 * before the rcu_read_unlock(). 80 * 81 * - Each object also has an rwsem. This rwsem must be held for 82 * reading while an operation that uses the object is performed. 83 * For example, while registering an MR, the associated PD's 84 * uobject.mutex must be held for reading. The rwsem must be held 85 * for writing while initializing or destroying an object. 86 * 87 * - In addition, each object has a "live" flag. If this flag is not 88 * set, then lookups of the object will fail even if it is found in 89 * the idr. This handles a reader that blocks and does not acquire 90 * the rwsem until after the object is destroyed. The destroy 91 * operation will set the live flag to 0 and then drop the rwsem; 92 * this will allow the reader to acquire the rwsem, see that the 93 * live flag is 0, and then drop the rwsem and its reference to 94 * object. The underlying storage will not be freed until the last 95 * reference to the object is dropped. 96 */ 97 98 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 99 struct ib_ucontext *context, struct uverbs_lock_class *c) 100 { 101 uobj->user_handle = user_handle; 102 uobj->context = context; 103 kref_init(&uobj->ref); 104 init_rwsem(&uobj->mutex); 105 uobj->live = 0; 106 } 107 108 static void release_uobj(struct kref *kref) 109 { 110 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu); 111 } 112 113 static void put_uobj(struct ib_uobject *uobj) 114 { 115 kref_put(&uobj->ref, release_uobj); 116 } 117 118 static void put_uobj_read(struct ib_uobject *uobj) 119 { 120 up_read(&uobj->mutex); 121 put_uobj(uobj); 122 } 123 124 static void put_uobj_write(struct ib_uobject *uobj) 125 { 126 up_write(&uobj->mutex); 127 put_uobj(uobj); 128 } 129 130 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 131 { 132 int ret; 133 134 idr_preload(GFP_KERNEL); 135 spin_lock(&ib_uverbs_idr_lock); 136 137 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT); 138 if (ret >= 0) 139 uobj->id = ret; 140 141 spin_unlock(&ib_uverbs_idr_lock); 142 idr_preload_end(); 143 144 return ret < 0 ? ret : 0; 145 } 146 147 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 148 { 149 spin_lock(&ib_uverbs_idr_lock); 150 idr_remove(idr, uobj->id); 151 spin_unlock(&ib_uverbs_idr_lock); 152 } 153 154 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 155 struct ib_ucontext *context) 156 { 157 struct ib_uobject *uobj; 158 159 rcu_read_lock(); 160 uobj = idr_find(idr, id); 161 if (uobj) { 162 if (uobj->context == context) 163 kref_get(&uobj->ref); 164 else 165 uobj = NULL; 166 } 167 rcu_read_unlock(); 168 169 return uobj; 170 } 171 172 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 173 struct ib_ucontext *context, int nested) 174 { 175 struct ib_uobject *uobj; 176 177 uobj = __idr_get_uobj(idr, id, context); 178 if (!uobj) 179 return NULL; 180 181 if (nested) 182 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 183 else 184 down_read(&uobj->mutex); 185 if (!uobj->live) { 186 put_uobj_read(uobj); 187 return NULL; 188 } 189 190 return uobj; 191 } 192 193 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 194 struct ib_ucontext *context) 195 { 196 struct ib_uobject *uobj; 197 198 uobj = __idr_get_uobj(idr, id, context); 199 if (!uobj) 200 return NULL; 201 202 down_write(&uobj->mutex); 203 if (!uobj->live) { 204 put_uobj_write(uobj); 205 return NULL; 206 } 207 208 return uobj; 209 } 210 211 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 212 int nested) 213 { 214 struct ib_uobject *uobj; 215 216 uobj = idr_read_uobj(idr, id, context, nested); 217 return uobj ? uobj->object : NULL; 218 } 219 220 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 221 { 222 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 223 } 224 225 static void put_pd_read(struct ib_pd *pd) 226 { 227 put_uobj_read(pd->uobject); 228 } 229 230 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 231 { 232 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 233 } 234 235 static void put_cq_read(struct ib_cq *cq) 236 { 237 put_uobj_read(cq->uobject); 238 } 239 240 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 241 { 242 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 243 } 244 245 static void put_ah_read(struct ib_ah *ah) 246 { 247 put_uobj_read(ah->uobject); 248 } 249 250 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 251 { 252 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 253 } 254 255 static struct ib_wq *idr_read_wq(int wq_handle, struct ib_ucontext *context) 256 { 257 return idr_read_obj(&ib_uverbs_wq_idr, wq_handle, context, 0); 258 } 259 260 static void put_wq_read(struct ib_wq *wq) 261 { 262 put_uobj_read(wq->uobject); 263 } 264 265 static struct ib_rwq_ind_table *idr_read_rwq_indirection_table(int ind_table_handle, 266 struct ib_ucontext *context) 267 { 268 return idr_read_obj(&ib_uverbs_rwq_ind_tbl_idr, ind_table_handle, context, 0); 269 } 270 271 static void put_rwq_indirection_table_read(struct ib_rwq_ind_table *ind_table) 272 { 273 put_uobj_read(ind_table->uobject); 274 } 275 276 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 277 { 278 struct ib_uobject *uobj; 279 280 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 281 return uobj ? uobj->object : NULL; 282 } 283 284 static void put_qp_read(struct ib_qp *qp) 285 { 286 put_uobj_read(qp->uobject); 287 } 288 289 static void put_qp_write(struct ib_qp *qp) 290 { 291 put_uobj_write(qp->uobject); 292 } 293 294 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 295 { 296 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 297 } 298 299 static void put_srq_read(struct ib_srq *srq) 300 { 301 put_uobj_read(srq->uobject); 302 } 303 304 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 305 struct ib_uobject **uobj) 306 { 307 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 308 return *uobj ? (*uobj)->object : NULL; 309 } 310 311 static void put_xrcd_read(struct ib_uobject *uobj) 312 { 313 put_uobj_read(uobj); 314 } 315 316 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 317 struct ib_device *ib_dev, 318 const char __user *buf, 319 int in_len, int out_len) 320 { 321 struct ib_uverbs_get_context cmd; 322 struct ib_uverbs_get_context_resp resp; 323 struct ib_udata udata; 324 struct ib_ucontext *ucontext; 325 struct file *filp; 326 int ret; 327 328 if (out_len < sizeof resp) 329 return -ENOSPC; 330 331 if (copy_from_user(&cmd, buf, sizeof cmd)) 332 return -EFAULT; 333 334 mutex_lock(&file->mutex); 335 336 if (file->ucontext) { 337 ret = -EINVAL; 338 goto err; 339 } 340 341 INIT_UDATA(&udata, buf + sizeof cmd, 342 (unsigned long) cmd.response + sizeof resp, 343 in_len - sizeof cmd, out_len - sizeof resp); 344 345 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 346 if (IS_ERR(ucontext)) { 347 ret = PTR_ERR(ucontext); 348 goto err; 349 } 350 351 ucontext->device = ib_dev; 352 INIT_LIST_HEAD(&ucontext->pd_list); 353 INIT_LIST_HEAD(&ucontext->mr_list); 354 INIT_LIST_HEAD(&ucontext->mw_list); 355 INIT_LIST_HEAD(&ucontext->cq_list); 356 INIT_LIST_HEAD(&ucontext->qp_list); 357 INIT_LIST_HEAD(&ucontext->srq_list); 358 INIT_LIST_HEAD(&ucontext->ah_list); 359 INIT_LIST_HEAD(&ucontext->wq_list); 360 INIT_LIST_HEAD(&ucontext->rwq_ind_tbl_list); 361 INIT_LIST_HEAD(&ucontext->xrcd_list); 362 INIT_LIST_HEAD(&ucontext->rule_list); 363 rcu_read_lock(); 364 ucontext->tgid = get_pid(task_pid_group_leader(current)); 365 rcu_read_unlock(); 366 ucontext->closing = 0; 367 368 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 369 ucontext->umem_tree = RB_ROOT; 370 init_rwsem(&ucontext->umem_rwsem); 371 ucontext->odp_mrs_count = 0; 372 INIT_LIST_HEAD(&ucontext->no_private_counters); 373 374 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 375 ucontext->invalidate_range = NULL; 376 377 #endif 378 379 resp.num_comp_vectors = file->device->num_comp_vectors; 380 381 ret = get_unused_fd_flags(O_CLOEXEC); 382 if (ret < 0) 383 goto err_free; 384 resp.async_fd = ret; 385 386 filp = ib_uverbs_alloc_event_file(file, ib_dev, 1); 387 if (IS_ERR(filp)) { 388 ret = PTR_ERR(filp); 389 goto err_fd; 390 } 391 392 if (copy_to_user((void __user *) (unsigned long) cmd.response, 393 &resp, sizeof resp)) { 394 ret = -EFAULT; 395 goto err_file; 396 } 397 398 file->ucontext = ucontext; 399 400 fd_install(resp.async_fd, filp); 401 402 mutex_unlock(&file->mutex); 403 404 return in_len; 405 406 err_file: 407 ib_uverbs_free_async_event_file(file); 408 fput(filp); 409 410 err_fd: 411 put_unused_fd(resp.async_fd); 412 413 err_free: 414 put_pid(ucontext->tgid); 415 ib_dev->dealloc_ucontext(ucontext); 416 417 err: 418 mutex_unlock(&file->mutex); 419 return ret; 420 } 421 422 static void copy_query_dev_fields(struct ib_uverbs_file *file, 423 struct ib_device *ib_dev, 424 struct ib_uverbs_query_device_resp *resp, 425 struct ib_device_attr *attr) 426 { 427 resp->fw_ver = attr->fw_ver; 428 resp->node_guid = ib_dev->node_guid; 429 resp->sys_image_guid = attr->sys_image_guid; 430 resp->max_mr_size = attr->max_mr_size; 431 resp->page_size_cap = attr->page_size_cap; 432 resp->vendor_id = attr->vendor_id; 433 resp->vendor_part_id = attr->vendor_part_id; 434 resp->hw_ver = attr->hw_ver; 435 resp->max_qp = attr->max_qp; 436 resp->max_qp_wr = attr->max_qp_wr; 437 resp->device_cap_flags = (u32)(attr->device_cap_flags); 438 resp->max_sge = attr->max_sge; 439 resp->max_sge_rd = attr->max_sge_rd; 440 resp->max_cq = attr->max_cq; 441 resp->max_cqe = attr->max_cqe; 442 resp->max_mr = attr->max_mr; 443 resp->max_pd = attr->max_pd; 444 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 445 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 446 resp->max_res_rd_atom = attr->max_res_rd_atom; 447 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 448 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 449 resp->atomic_cap = attr->atomic_cap; 450 resp->max_ee = attr->max_ee; 451 resp->max_rdd = attr->max_rdd; 452 resp->max_mw = attr->max_mw; 453 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 454 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 455 resp->max_mcast_grp = attr->max_mcast_grp; 456 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 457 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 458 resp->max_ah = attr->max_ah; 459 resp->max_fmr = attr->max_fmr; 460 resp->max_map_per_fmr = attr->max_map_per_fmr; 461 resp->max_srq = attr->max_srq; 462 resp->max_srq_wr = attr->max_srq_wr; 463 resp->max_srq_sge = attr->max_srq_sge; 464 resp->max_pkeys = attr->max_pkeys; 465 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 466 resp->phys_port_cnt = ib_dev->phys_port_cnt; 467 } 468 469 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 470 struct ib_device *ib_dev, 471 const char __user *buf, 472 int in_len, int out_len) 473 { 474 struct ib_uverbs_query_device cmd; 475 struct ib_uverbs_query_device_resp resp; 476 477 if (out_len < sizeof resp) 478 return -ENOSPC; 479 480 if (copy_from_user(&cmd, buf, sizeof cmd)) 481 return -EFAULT; 482 483 memset(&resp, 0, sizeof resp); 484 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 485 486 if (copy_to_user((void __user *) (unsigned long) cmd.response, 487 &resp, sizeof resp)) 488 return -EFAULT; 489 490 return in_len; 491 } 492 493 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 494 struct ib_device *ib_dev, 495 const char __user *buf, 496 int in_len, int out_len) 497 { 498 struct ib_uverbs_query_port cmd; 499 struct ib_uverbs_query_port_resp resp; 500 struct ib_port_attr attr; 501 int ret; 502 503 if (out_len < sizeof resp) 504 return -ENOSPC; 505 506 if (copy_from_user(&cmd, buf, sizeof cmd)) 507 return -EFAULT; 508 509 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 510 if (ret) 511 return ret; 512 513 memset(&resp, 0, sizeof resp); 514 515 resp.state = attr.state; 516 resp.max_mtu = attr.max_mtu; 517 resp.active_mtu = attr.active_mtu; 518 resp.gid_tbl_len = attr.gid_tbl_len; 519 resp.port_cap_flags = attr.port_cap_flags; 520 resp.max_msg_sz = attr.max_msg_sz; 521 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 522 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 523 resp.pkey_tbl_len = attr.pkey_tbl_len; 524 resp.lid = attr.lid; 525 resp.sm_lid = attr.sm_lid; 526 resp.lmc = attr.lmc; 527 resp.max_vl_num = attr.max_vl_num; 528 resp.sm_sl = attr.sm_sl; 529 resp.subnet_timeout = attr.subnet_timeout; 530 resp.init_type_reply = attr.init_type_reply; 531 resp.active_width = attr.active_width; 532 resp.active_speed = attr.active_speed; 533 resp.phys_state = attr.phys_state; 534 resp.link_layer = rdma_port_get_link_layer(ib_dev, 535 cmd.port_num); 536 537 if (copy_to_user((void __user *) (unsigned long) cmd.response, 538 &resp, sizeof resp)) 539 return -EFAULT; 540 541 return in_len; 542 } 543 544 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 545 struct ib_device *ib_dev, 546 const char __user *buf, 547 int in_len, int out_len) 548 { 549 struct ib_uverbs_alloc_pd cmd; 550 struct ib_uverbs_alloc_pd_resp resp; 551 struct ib_udata udata; 552 struct ib_uobject *uobj; 553 struct ib_pd *pd; 554 int ret; 555 556 if (out_len < sizeof resp) 557 return -ENOSPC; 558 559 if (copy_from_user(&cmd, buf, sizeof cmd)) 560 return -EFAULT; 561 562 INIT_UDATA(&udata, buf + sizeof cmd, 563 (unsigned long) cmd.response + sizeof resp, 564 in_len - sizeof cmd, out_len - sizeof resp); 565 566 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 567 if (!uobj) 568 return -ENOMEM; 569 570 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 571 down_write(&uobj->mutex); 572 573 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 574 if (IS_ERR(pd)) { 575 ret = PTR_ERR(pd); 576 goto err; 577 } 578 579 pd->device = ib_dev; 580 pd->uobject = uobj; 581 pd->__internal_mr = NULL; 582 atomic_set(&pd->usecnt, 0); 583 584 uobj->object = pd; 585 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 586 if (ret) 587 goto err_idr; 588 589 memset(&resp, 0, sizeof resp); 590 resp.pd_handle = uobj->id; 591 592 if (copy_to_user((void __user *) (unsigned long) cmd.response, 593 &resp, sizeof resp)) { 594 ret = -EFAULT; 595 goto err_copy; 596 } 597 598 mutex_lock(&file->mutex); 599 list_add_tail(&uobj->list, &file->ucontext->pd_list); 600 mutex_unlock(&file->mutex); 601 602 uobj->live = 1; 603 604 up_write(&uobj->mutex); 605 606 return in_len; 607 608 err_copy: 609 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 610 611 err_idr: 612 ib_dealloc_pd(pd); 613 614 err: 615 put_uobj_write(uobj); 616 return ret; 617 } 618 619 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 620 struct ib_device *ib_dev, 621 const char __user *buf, 622 int in_len, int out_len) 623 { 624 struct ib_uverbs_dealloc_pd cmd; 625 struct ib_uobject *uobj; 626 struct ib_pd *pd; 627 int ret; 628 629 if (copy_from_user(&cmd, buf, sizeof cmd)) 630 return -EFAULT; 631 632 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 633 if (!uobj) 634 return -EINVAL; 635 pd = uobj->object; 636 637 if (atomic_read(&pd->usecnt)) { 638 ret = -EBUSY; 639 goto err_put; 640 } 641 642 ret = pd->device->dealloc_pd(uobj->object); 643 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 644 if (ret) 645 goto err_put; 646 647 uobj->live = 0; 648 put_uobj_write(uobj); 649 650 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 651 652 mutex_lock(&file->mutex); 653 list_del(&uobj->list); 654 mutex_unlock(&file->mutex); 655 656 put_uobj(uobj); 657 658 return in_len; 659 660 err_put: 661 put_uobj_write(uobj); 662 return ret; 663 } 664 665 struct xrcd_table_entry { 666 struct rb_node node; 667 struct ib_xrcd *xrcd; 668 struct inode *inode; 669 }; 670 671 static int xrcd_table_insert(struct ib_uverbs_device *dev, 672 struct inode *inode, 673 struct ib_xrcd *xrcd) 674 { 675 struct xrcd_table_entry *entry, *scan; 676 struct rb_node **p = &dev->xrcd_tree.rb_node; 677 struct rb_node *parent = NULL; 678 679 entry = kmalloc(sizeof *entry, GFP_KERNEL); 680 if (!entry) 681 return -ENOMEM; 682 683 entry->xrcd = xrcd; 684 entry->inode = inode; 685 686 while (*p) { 687 parent = *p; 688 scan = rb_entry(parent, struct xrcd_table_entry, node); 689 690 if (inode < scan->inode) { 691 p = &(*p)->rb_left; 692 } else if (inode > scan->inode) { 693 p = &(*p)->rb_right; 694 } else { 695 kfree(entry); 696 return -EEXIST; 697 } 698 } 699 700 rb_link_node(&entry->node, parent, p); 701 rb_insert_color(&entry->node, &dev->xrcd_tree); 702 igrab(inode); 703 return 0; 704 } 705 706 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 707 struct inode *inode) 708 { 709 struct xrcd_table_entry *entry; 710 struct rb_node *p = dev->xrcd_tree.rb_node; 711 712 while (p) { 713 entry = rb_entry(p, struct xrcd_table_entry, node); 714 715 if (inode < entry->inode) 716 p = p->rb_left; 717 else if (inode > entry->inode) 718 p = p->rb_right; 719 else 720 return entry; 721 } 722 723 return NULL; 724 } 725 726 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 727 { 728 struct xrcd_table_entry *entry; 729 730 entry = xrcd_table_search(dev, inode); 731 if (!entry) 732 return NULL; 733 734 return entry->xrcd; 735 } 736 737 static void xrcd_table_delete(struct ib_uverbs_device *dev, 738 struct inode *inode) 739 { 740 struct xrcd_table_entry *entry; 741 742 entry = xrcd_table_search(dev, inode); 743 if (entry) { 744 iput(inode); 745 rb_erase(&entry->node, &dev->xrcd_tree); 746 kfree(entry); 747 } 748 } 749 750 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 751 struct ib_device *ib_dev, 752 const char __user *buf, int in_len, 753 int out_len) 754 { 755 struct ib_uverbs_open_xrcd cmd; 756 struct ib_uverbs_open_xrcd_resp resp; 757 struct ib_udata udata; 758 struct ib_uxrcd_object *obj; 759 struct ib_xrcd *xrcd = NULL; 760 struct fd f = {NULL}; 761 struct inode *inode = NULL; 762 int ret = 0; 763 int new_xrcd = 0; 764 765 if (out_len < sizeof resp) 766 return -ENOSPC; 767 768 if (copy_from_user(&cmd, buf, sizeof cmd)) 769 return -EFAULT; 770 771 INIT_UDATA(&udata, buf + sizeof cmd, 772 (unsigned long) cmd.response + sizeof resp, 773 in_len - sizeof cmd, out_len - sizeof resp); 774 775 mutex_lock(&file->device->xrcd_tree_mutex); 776 777 if (cmd.fd != -1) { 778 /* search for file descriptor */ 779 f = fdget(cmd.fd); 780 if (!f.file) { 781 ret = -EBADF; 782 goto err_tree_mutex_unlock; 783 } 784 785 inode = f.file->f_dentry->d_inode; 786 xrcd = find_xrcd(file->device, inode); 787 if (!xrcd && !(cmd.oflags & O_CREAT)) { 788 /* no file descriptor. Need CREATE flag */ 789 ret = -EAGAIN; 790 goto err_tree_mutex_unlock; 791 } 792 793 if (xrcd && cmd.oflags & O_EXCL) { 794 ret = -EINVAL; 795 goto err_tree_mutex_unlock; 796 } 797 } 798 799 obj = kmalloc(sizeof *obj, GFP_KERNEL); 800 if (!obj) { 801 ret = -ENOMEM; 802 goto err_tree_mutex_unlock; 803 } 804 805 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 806 807 down_write(&obj->uobject.mutex); 808 809 if (!xrcd) { 810 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 811 if (IS_ERR(xrcd)) { 812 ret = PTR_ERR(xrcd); 813 goto err; 814 } 815 816 xrcd->inode = inode; 817 xrcd->device = ib_dev; 818 atomic_set(&xrcd->usecnt, 0); 819 mutex_init(&xrcd->tgt_qp_mutex); 820 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 821 new_xrcd = 1; 822 } 823 824 atomic_set(&obj->refcnt, 0); 825 obj->uobject.object = xrcd; 826 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 827 if (ret) 828 goto err_idr; 829 830 memset(&resp, 0, sizeof resp); 831 resp.xrcd_handle = obj->uobject.id; 832 833 if (inode) { 834 if (new_xrcd) { 835 /* create new inode/xrcd table entry */ 836 ret = xrcd_table_insert(file->device, inode, xrcd); 837 if (ret) 838 goto err_insert_xrcd; 839 } 840 atomic_inc(&xrcd->usecnt); 841 } 842 843 if (copy_to_user((void __user *) (unsigned long) cmd.response, 844 &resp, sizeof resp)) { 845 ret = -EFAULT; 846 goto err_copy; 847 } 848 849 if (f.file) 850 fdput(f); 851 852 mutex_lock(&file->mutex); 853 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 854 mutex_unlock(&file->mutex); 855 856 obj->uobject.live = 1; 857 up_write(&obj->uobject.mutex); 858 859 mutex_unlock(&file->device->xrcd_tree_mutex); 860 return in_len; 861 862 err_copy: 863 if (inode) { 864 if (new_xrcd) 865 xrcd_table_delete(file->device, inode); 866 atomic_dec(&xrcd->usecnt); 867 } 868 869 err_insert_xrcd: 870 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 871 872 err_idr: 873 ib_dealloc_xrcd(xrcd); 874 875 err: 876 put_uobj_write(&obj->uobject); 877 878 err_tree_mutex_unlock: 879 if (f.file) 880 fdput(f); 881 882 mutex_unlock(&file->device->xrcd_tree_mutex); 883 884 return ret; 885 } 886 887 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 888 struct ib_device *ib_dev, 889 const char __user *buf, int in_len, 890 int out_len) 891 { 892 struct ib_uverbs_close_xrcd cmd; 893 struct ib_uobject *uobj; 894 struct ib_xrcd *xrcd = NULL; 895 struct inode *inode = NULL; 896 struct ib_uxrcd_object *obj; 897 int live; 898 int ret = 0; 899 900 if (copy_from_user(&cmd, buf, sizeof cmd)) 901 return -EFAULT; 902 903 mutex_lock(&file->device->xrcd_tree_mutex); 904 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 905 if (!uobj) { 906 ret = -EINVAL; 907 goto out; 908 } 909 910 xrcd = uobj->object; 911 inode = xrcd->inode; 912 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 913 if (atomic_read(&obj->refcnt)) { 914 put_uobj_write(uobj); 915 ret = -EBUSY; 916 goto out; 917 } 918 919 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 920 ret = ib_dealloc_xrcd(uobj->object); 921 if (!ret) 922 uobj->live = 0; 923 } 924 925 live = uobj->live; 926 if (inode && ret) 927 atomic_inc(&xrcd->usecnt); 928 929 put_uobj_write(uobj); 930 931 if (ret) 932 goto out; 933 934 if (inode && !live) 935 xrcd_table_delete(file->device, inode); 936 937 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 938 mutex_lock(&file->mutex); 939 list_del(&uobj->list); 940 mutex_unlock(&file->mutex); 941 942 put_uobj(uobj); 943 ret = in_len; 944 945 out: 946 mutex_unlock(&file->device->xrcd_tree_mutex); 947 return ret; 948 } 949 950 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 951 struct ib_xrcd *xrcd) 952 { 953 struct inode *inode; 954 955 inode = xrcd->inode; 956 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 957 return; 958 959 ib_dealloc_xrcd(xrcd); 960 961 if (inode) 962 xrcd_table_delete(dev, inode); 963 } 964 965 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 966 struct ib_device *ib_dev, 967 const char __user *buf, int in_len, 968 int out_len) 969 { 970 struct ib_uverbs_reg_mr cmd; 971 struct ib_uverbs_reg_mr_resp resp; 972 struct ib_udata udata; 973 struct ib_uobject *uobj; 974 struct ib_pd *pd; 975 struct ib_mr *mr; 976 int ret; 977 978 if (out_len < sizeof resp) 979 return -ENOSPC; 980 981 if (copy_from_user(&cmd, buf, sizeof cmd)) 982 return -EFAULT; 983 984 INIT_UDATA(&udata, buf + sizeof cmd, 985 (unsigned long) cmd.response + sizeof resp, 986 in_len - sizeof cmd, out_len - sizeof resp); 987 988 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 989 return -EINVAL; 990 991 ret = ib_check_mr_access(cmd.access_flags); 992 if (ret) 993 return ret; 994 995 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 996 if (!uobj) 997 return -ENOMEM; 998 999 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 1000 down_write(&uobj->mutex); 1001 1002 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1003 if (!pd) { 1004 ret = -EINVAL; 1005 goto err_free; 1006 } 1007 1008 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 1009 if (!(pd->device->attrs.device_cap_flags & 1010 IB_DEVICE_ON_DEMAND_PAGING)) { 1011 pr_debug("ODP support not available\n"); 1012 ret = -EINVAL; 1013 goto err_put; 1014 } 1015 } 1016 1017 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 1018 cmd.access_flags, &udata); 1019 if (IS_ERR(mr)) { 1020 ret = PTR_ERR(mr); 1021 goto err_put; 1022 } 1023 1024 mr->device = pd->device; 1025 mr->pd = pd; 1026 mr->uobject = uobj; 1027 atomic_inc(&pd->usecnt); 1028 1029 uobj->object = mr; 1030 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 1031 if (ret) 1032 goto err_unreg; 1033 1034 memset(&resp, 0, sizeof resp); 1035 resp.lkey = mr->lkey; 1036 resp.rkey = mr->rkey; 1037 resp.mr_handle = uobj->id; 1038 1039 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1040 &resp, sizeof resp)) { 1041 ret = -EFAULT; 1042 goto err_copy; 1043 } 1044 1045 put_pd_read(pd); 1046 1047 mutex_lock(&file->mutex); 1048 list_add_tail(&uobj->list, &file->ucontext->mr_list); 1049 mutex_unlock(&file->mutex); 1050 1051 uobj->live = 1; 1052 1053 up_write(&uobj->mutex); 1054 1055 return in_len; 1056 1057 err_copy: 1058 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1059 1060 err_unreg: 1061 ib_dereg_mr(mr); 1062 1063 err_put: 1064 put_pd_read(pd); 1065 1066 err_free: 1067 put_uobj_write(uobj); 1068 return ret; 1069 } 1070 1071 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 1072 struct ib_device *ib_dev, 1073 const char __user *buf, int in_len, 1074 int out_len) 1075 { 1076 struct ib_uverbs_rereg_mr cmd; 1077 struct ib_uverbs_rereg_mr_resp resp; 1078 struct ib_udata udata; 1079 struct ib_pd *pd = NULL; 1080 struct ib_mr *mr; 1081 struct ib_pd *old_pd; 1082 int ret; 1083 struct ib_uobject *uobj; 1084 1085 if (out_len < sizeof(resp)) 1086 return -ENOSPC; 1087 1088 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1089 return -EFAULT; 1090 1091 INIT_UDATA(&udata, buf + sizeof(cmd), 1092 (unsigned long) cmd.response + sizeof(resp), 1093 in_len - sizeof(cmd), out_len - sizeof(resp)); 1094 1095 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 1096 return -EINVAL; 1097 1098 if ((cmd.flags & IB_MR_REREG_TRANS) && 1099 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 1100 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 1101 return -EINVAL; 1102 1103 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, 1104 file->ucontext); 1105 1106 if (!uobj) 1107 return -EINVAL; 1108 1109 mr = uobj->object; 1110 1111 if (cmd.flags & IB_MR_REREG_ACCESS) { 1112 ret = ib_check_mr_access(cmd.access_flags); 1113 if (ret) 1114 goto put_uobjs; 1115 } 1116 1117 if (cmd.flags & IB_MR_REREG_PD) { 1118 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1119 if (!pd) { 1120 ret = -EINVAL; 1121 goto put_uobjs; 1122 } 1123 } 1124 1125 old_pd = mr->pd; 1126 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 1127 cmd.length, cmd.hca_va, 1128 cmd.access_flags, pd, &udata); 1129 if (!ret) { 1130 if (cmd.flags & IB_MR_REREG_PD) { 1131 atomic_inc(&pd->usecnt); 1132 mr->pd = pd; 1133 atomic_dec(&old_pd->usecnt); 1134 } 1135 } else { 1136 goto put_uobj_pd; 1137 } 1138 1139 memset(&resp, 0, sizeof(resp)); 1140 resp.lkey = mr->lkey; 1141 resp.rkey = mr->rkey; 1142 1143 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1144 &resp, sizeof(resp))) 1145 ret = -EFAULT; 1146 else 1147 ret = in_len; 1148 1149 put_uobj_pd: 1150 if (cmd.flags & IB_MR_REREG_PD) 1151 put_pd_read(pd); 1152 1153 put_uobjs: 1154 1155 put_uobj_write(mr->uobject); 1156 1157 return ret; 1158 } 1159 1160 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1161 struct ib_device *ib_dev, 1162 const char __user *buf, int in_len, 1163 int out_len) 1164 { 1165 struct ib_uverbs_dereg_mr cmd; 1166 struct ib_mr *mr; 1167 struct ib_uobject *uobj; 1168 int ret = -EINVAL; 1169 1170 if (copy_from_user(&cmd, buf, sizeof cmd)) 1171 return -EFAULT; 1172 1173 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1174 if (!uobj) 1175 return -EINVAL; 1176 1177 mr = uobj->object; 1178 1179 ret = ib_dereg_mr(mr); 1180 if (!ret) 1181 uobj->live = 0; 1182 1183 put_uobj_write(uobj); 1184 1185 if (ret) 1186 return ret; 1187 1188 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1189 1190 mutex_lock(&file->mutex); 1191 list_del(&uobj->list); 1192 mutex_unlock(&file->mutex); 1193 1194 put_uobj(uobj); 1195 1196 return in_len; 1197 } 1198 1199 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1200 struct ib_device *ib_dev, 1201 const char __user *buf, int in_len, 1202 int out_len) 1203 { 1204 struct ib_uverbs_alloc_mw cmd; 1205 struct ib_uverbs_alloc_mw_resp resp; 1206 struct ib_uobject *uobj; 1207 struct ib_pd *pd; 1208 struct ib_mw *mw; 1209 struct ib_udata udata; 1210 int ret; 1211 1212 if (out_len < sizeof(resp)) 1213 return -ENOSPC; 1214 1215 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1216 return -EFAULT; 1217 1218 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1219 if (!uobj) 1220 return -ENOMEM; 1221 1222 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1223 down_write(&uobj->mutex); 1224 1225 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1226 if (!pd) { 1227 ret = -EINVAL; 1228 goto err_free; 1229 } 1230 1231 INIT_UDATA(&udata, buf + sizeof(cmd), 1232 (unsigned long)cmd.response + sizeof(resp), 1233 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1234 out_len - sizeof(resp)); 1235 1236 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 1237 if (IS_ERR(mw)) { 1238 ret = PTR_ERR(mw); 1239 goto err_put; 1240 } 1241 1242 mw->device = pd->device; 1243 mw->pd = pd; 1244 mw->uobject = uobj; 1245 atomic_inc(&pd->usecnt); 1246 1247 uobj->object = mw; 1248 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1249 if (ret) 1250 goto err_unalloc; 1251 1252 memset(&resp, 0, sizeof(resp)); 1253 resp.rkey = mw->rkey; 1254 resp.mw_handle = uobj->id; 1255 1256 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1257 &resp, sizeof(resp))) { 1258 ret = -EFAULT; 1259 goto err_copy; 1260 } 1261 1262 put_pd_read(pd); 1263 1264 mutex_lock(&file->mutex); 1265 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1266 mutex_unlock(&file->mutex); 1267 1268 uobj->live = 1; 1269 1270 up_write(&uobj->mutex); 1271 1272 return in_len; 1273 1274 err_copy: 1275 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1276 1277 err_unalloc: 1278 uverbs_dealloc_mw(mw); 1279 1280 err_put: 1281 put_pd_read(pd); 1282 1283 err_free: 1284 put_uobj_write(uobj); 1285 return ret; 1286 } 1287 1288 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1289 struct ib_device *ib_dev, 1290 const char __user *buf, int in_len, 1291 int out_len) 1292 { 1293 struct ib_uverbs_dealloc_mw cmd; 1294 struct ib_mw *mw; 1295 struct ib_uobject *uobj; 1296 int ret = -EINVAL; 1297 1298 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1299 return -EFAULT; 1300 1301 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1302 if (!uobj) 1303 return -EINVAL; 1304 1305 mw = uobj->object; 1306 1307 ret = uverbs_dealloc_mw(mw); 1308 if (!ret) 1309 uobj->live = 0; 1310 1311 put_uobj_write(uobj); 1312 1313 if (ret) 1314 return ret; 1315 1316 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1317 1318 mutex_lock(&file->mutex); 1319 list_del(&uobj->list); 1320 mutex_unlock(&file->mutex); 1321 1322 put_uobj(uobj); 1323 1324 return in_len; 1325 } 1326 1327 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1328 struct ib_device *ib_dev, 1329 const char __user *buf, int in_len, 1330 int out_len) 1331 { 1332 struct ib_uverbs_create_comp_channel cmd; 1333 struct ib_uverbs_create_comp_channel_resp resp; 1334 struct file *filp; 1335 int ret; 1336 1337 if (out_len < sizeof resp) 1338 return -ENOSPC; 1339 1340 if (copy_from_user(&cmd, buf, sizeof cmd)) 1341 return -EFAULT; 1342 1343 ret = get_unused_fd_flags(O_CLOEXEC); 1344 if (ret < 0) 1345 return ret; 1346 resp.fd = ret; 1347 1348 filp = ib_uverbs_alloc_event_file(file, ib_dev, 0); 1349 if (IS_ERR(filp)) { 1350 put_unused_fd(resp.fd); 1351 return PTR_ERR(filp); 1352 } 1353 1354 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1355 &resp, sizeof resp)) { 1356 put_unused_fd(resp.fd); 1357 fput(filp); 1358 return -EFAULT; 1359 } 1360 1361 fd_install(resp.fd, filp); 1362 return in_len; 1363 } 1364 1365 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 1366 struct ib_device *ib_dev, 1367 struct ib_udata *ucore, 1368 struct ib_udata *uhw, 1369 struct ib_uverbs_ex_create_cq *cmd, 1370 size_t cmd_sz, 1371 int (*cb)(struct ib_uverbs_file *file, 1372 struct ib_ucq_object *obj, 1373 struct ib_uverbs_ex_create_cq_resp *resp, 1374 struct ib_udata *udata, 1375 void *context), 1376 void *context) 1377 { 1378 struct ib_ucq_object *obj; 1379 struct ib_uverbs_event_file *ev_file = NULL; 1380 struct ib_cq *cq; 1381 int ret; 1382 struct ib_uverbs_ex_create_cq_resp resp; 1383 struct ib_cq_init_attr attr = {}; 1384 1385 if (cmd->comp_vector >= file->device->num_comp_vectors) 1386 return ERR_PTR(-EINVAL); 1387 1388 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1389 if (!obj) 1390 return ERR_PTR(-ENOMEM); 1391 1392 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class); 1393 down_write(&obj->uobject.mutex); 1394 1395 if (cmd->comp_channel >= 0) { 1396 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel); 1397 if (!ev_file) { 1398 ret = -EINVAL; 1399 goto err; 1400 } 1401 } 1402 1403 obj->uverbs_file = file; 1404 obj->comp_events_reported = 0; 1405 obj->async_events_reported = 0; 1406 INIT_LIST_HEAD(&obj->comp_list); 1407 INIT_LIST_HEAD(&obj->async_list); 1408 1409 attr.cqe = cmd->cqe; 1410 attr.comp_vector = cmd->comp_vector; 1411 1412 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1413 attr.flags = cmd->flags; 1414 1415 cq = ib_dev->create_cq(ib_dev, &attr, 1416 file->ucontext, uhw); 1417 if (IS_ERR(cq)) { 1418 ret = PTR_ERR(cq); 1419 goto err_file; 1420 } 1421 1422 cq->device = ib_dev; 1423 cq->uobject = &obj->uobject; 1424 cq->comp_handler = ib_uverbs_comp_handler; 1425 cq->event_handler = ib_uverbs_cq_event_handler; 1426 cq->cq_context = ev_file; 1427 atomic_set(&cq->usecnt, 0); 1428 1429 obj->uobject.object = cq; 1430 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1431 if (ret) 1432 goto err_free; 1433 1434 memset(&resp, 0, sizeof resp); 1435 resp.base.cq_handle = obj->uobject.id; 1436 resp.base.cqe = cq->cqe; 1437 1438 resp.response_length = offsetof(typeof(resp), response_length) + 1439 sizeof(resp.response_length); 1440 1441 ret = cb(file, obj, &resp, ucore, context); 1442 if (ret) 1443 goto err_cb; 1444 1445 mutex_lock(&file->mutex); 1446 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1447 mutex_unlock(&file->mutex); 1448 1449 obj->uobject.live = 1; 1450 1451 up_write(&obj->uobject.mutex); 1452 1453 return obj; 1454 1455 err_cb: 1456 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1457 1458 err_free: 1459 ib_destroy_cq(cq); 1460 1461 err_file: 1462 if (ev_file) 1463 ib_uverbs_release_ucq(file, ev_file, obj); 1464 1465 err: 1466 put_uobj_write(&obj->uobject); 1467 1468 return ERR_PTR(ret); 1469 } 1470 1471 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1472 struct ib_ucq_object *obj, 1473 struct ib_uverbs_ex_create_cq_resp *resp, 1474 struct ib_udata *ucore, void *context) 1475 { 1476 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1477 return -EFAULT; 1478 1479 return 0; 1480 } 1481 1482 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1483 struct ib_device *ib_dev, 1484 const char __user *buf, int in_len, 1485 int out_len) 1486 { 1487 struct ib_uverbs_create_cq cmd; 1488 struct ib_uverbs_ex_create_cq cmd_ex; 1489 struct ib_uverbs_create_cq_resp resp; 1490 struct ib_udata ucore; 1491 struct ib_udata uhw; 1492 struct ib_ucq_object *obj; 1493 1494 if (out_len < sizeof(resp)) 1495 return -ENOSPC; 1496 1497 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1498 return -EFAULT; 1499 1500 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); 1501 1502 INIT_UDATA(&uhw, buf + sizeof(cmd), 1503 (unsigned long)cmd.response + sizeof(resp), 1504 in_len - sizeof(cmd), out_len - sizeof(resp)); 1505 1506 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1507 cmd_ex.user_handle = cmd.user_handle; 1508 cmd_ex.cqe = cmd.cqe; 1509 cmd_ex.comp_vector = cmd.comp_vector; 1510 cmd_ex.comp_channel = cmd.comp_channel; 1511 1512 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1513 offsetof(typeof(cmd_ex), comp_channel) + 1514 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1515 NULL); 1516 1517 if (IS_ERR(obj)) 1518 return PTR_ERR(obj); 1519 1520 return in_len; 1521 } 1522 1523 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1524 struct ib_ucq_object *obj, 1525 struct ib_uverbs_ex_create_cq_resp *resp, 1526 struct ib_udata *ucore, void *context) 1527 { 1528 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1529 return -EFAULT; 1530 1531 return 0; 1532 } 1533 1534 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1535 struct ib_device *ib_dev, 1536 struct ib_udata *ucore, 1537 struct ib_udata *uhw) 1538 { 1539 struct ib_uverbs_ex_create_cq_resp resp; 1540 struct ib_uverbs_ex_create_cq cmd; 1541 struct ib_ucq_object *obj; 1542 int err; 1543 1544 if (ucore->inlen < sizeof(cmd)) 1545 return -EINVAL; 1546 1547 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1548 if (err) 1549 return err; 1550 1551 if (cmd.comp_mask) 1552 return -EINVAL; 1553 1554 if (cmd.reserved) 1555 return -EINVAL; 1556 1557 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1558 sizeof(resp.response_length))) 1559 return -ENOSPC; 1560 1561 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1562 min(ucore->inlen, sizeof(cmd)), 1563 ib_uverbs_ex_create_cq_cb, NULL); 1564 1565 if (IS_ERR(obj)) 1566 return PTR_ERR(obj); 1567 1568 return 0; 1569 } 1570 1571 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1572 struct ib_device *ib_dev, 1573 const char __user *buf, int in_len, 1574 int out_len) 1575 { 1576 struct ib_uverbs_resize_cq cmd; 1577 struct ib_uverbs_resize_cq_resp resp; 1578 struct ib_udata udata; 1579 struct ib_cq *cq; 1580 int ret = -EINVAL; 1581 1582 if (copy_from_user(&cmd, buf, sizeof cmd)) 1583 return -EFAULT; 1584 1585 INIT_UDATA(&udata, buf + sizeof cmd, 1586 (unsigned long) cmd.response + sizeof resp, 1587 in_len - sizeof cmd, out_len - sizeof resp); 1588 1589 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1590 if (!cq) 1591 return -EINVAL; 1592 1593 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1594 if (ret) 1595 goto out; 1596 1597 resp.cqe = cq->cqe; 1598 1599 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1600 &resp, sizeof resp.cqe)) 1601 ret = -EFAULT; 1602 1603 out: 1604 put_cq_read(cq); 1605 1606 return ret ? ret : in_len; 1607 } 1608 1609 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1610 { 1611 struct ib_uverbs_wc tmp; 1612 1613 tmp.wr_id = wc->wr_id; 1614 tmp.status = wc->status; 1615 tmp.opcode = wc->opcode; 1616 tmp.vendor_err = wc->vendor_err; 1617 tmp.byte_len = wc->byte_len; 1618 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1619 tmp.qp_num = wc->qp->qp_num; 1620 tmp.src_qp = wc->src_qp; 1621 tmp.wc_flags = wc->wc_flags; 1622 tmp.pkey_index = wc->pkey_index; 1623 tmp.slid = wc->slid; 1624 tmp.sl = wc->sl; 1625 tmp.dlid_path_bits = wc->dlid_path_bits; 1626 tmp.port_num = wc->port_num; 1627 tmp.reserved = 0; 1628 1629 if (copy_to_user(dest, &tmp, sizeof tmp)) 1630 return -EFAULT; 1631 1632 return 0; 1633 } 1634 1635 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1636 struct ib_device *ib_dev, 1637 const char __user *buf, int in_len, 1638 int out_len) 1639 { 1640 struct ib_uverbs_poll_cq cmd; 1641 struct ib_uverbs_poll_cq_resp resp; 1642 u8 __user *header_ptr; 1643 u8 __user *data_ptr; 1644 struct ib_cq *cq; 1645 struct ib_wc wc; 1646 int ret; 1647 1648 if (copy_from_user(&cmd, buf, sizeof cmd)) 1649 return -EFAULT; 1650 1651 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1652 if (!cq) 1653 return -EINVAL; 1654 1655 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1656 header_ptr = (void __user *)(unsigned long) cmd.response; 1657 data_ptr = header_ptr + sizeof resp; 1658 1659 memset(&resp, 0, sizeof resp); 1660 while (resp.count < cmd.ne) { 1661 ret = ib_poll_cq(cq, 1, &wc); 1662 if (ret < 0) 1663 goto out_put; 1664 if (!ret) 1665 break; 1666 1667 ret = copy_wc_to_user(data_ptr, &wc); 1668 if (ret) 1669 goto out_put; 1670 1671 data_ptr += sizeof(struct ib_uverbs_wc); 1672 ++resp.count; 1673 } 1674 1675 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1676 ret = -EFAULT; 1677 goto out_put; 1678 } 1679 1680 ret = in_len; 1681 1682 out_put: 1683 put_cq_read(cq); 1684 return ret; 1685 } 1686 1687 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1688 struct ib_device *ib_dev, 1689 const char __user *buf, int in_len, 1690 int out_len) 1691 { 1692 struct ib_uverbs_req_notify_cq cmd; 1693 struct ib_cq *cq; 1694 1695 if (copy_from_user(&cmd, buf, sizeof cmd)) 1696 return -EFAULT; 1697 1698 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1699 if (!cq) 1700 return -EINVAL; 1701 1702 ib_req_notify_cq(cq, cmd.solicited_only ? 1703 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1704 1705 put_cq_read(cq); 1706 1707 return in_len; 1708 } 1709 1710 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1711 struct ib_device *ib_dev, 1712 const char __user *buf, int in_len, 1713 int out_len) 1714 { 1715 struct ib_uverbs_destroy_cq cmd; 1716 struct ib_uverbs_destroy_cq_resp resp; 1717 struct ib_uobject *uobj; 1718 struct ib_cq *cq; 1719 struct ib_ucq_object *obj; 1720 struct ib_uverbs_event_file *ev_file; 1721 int ret = -EINVAL; 1722 1723 if (copy_from_user(&cmd, buf, sizeof cmd)) 1724 return -EFAULT; 1725 1726 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1727 if (!uobj) 1728 return -EINVAL; 1729 cq = uobj->object; 1730 ev_file = cq->cq_context; 1731 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1732 1733 ret = ib_destroy_cq(cq); 1734 if (!ret) 1735 uobj->live = 0; 1736 1737 put_uobj_write(uobj); 1738 1739 if (ret) 1740 return ret; 1741 1742 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1743 1744 mutex_lock(&file->mutex); 1745 list_del(&uobj->list); 1746 mutex_unlock(&file->mutex); 1747 1748 ib_uverbs_release_ucq(file, ev_file, obj); 1749 1750 memset(&resp, 0, sizeof resp); 1751 resp.comp_events_reported = obj->comp_events_reported; 1752 resp.async_events_reported = obj->async_events_reported; 1753 1754 put_uobj(uobj); 1755 1756 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1757 &resp, sizeof resp)) 1758 return -EFAULT; 1759 1760 return in_len; 1761 } 1762 1763 static int create_qp(struct ib_uverbs_file *file, 1764 struct ib_udata *ucore, 1765 struct ib_udata *uhw, 1766 struct ib_uverbs_ex_create_qp *cmd, 1767 size_t cmd_sz, 1768 int (*cb)(struct ib_uverbs_file *file, 1769 struct ib_uverbs_ex_create_qp_resp *resp, 1770 struct ib_udata *udata), 1771 void *context) 1772 { 1773 struct ib_uqp_object *obj; 1774 struct ib_device *device; 1775 struct ib_pd *pd = NULL; 1776 struct ib_xrcd *xrcd = NULL; 1777 struct ib_uobject *uninitialized_var(xrcd_uobj); 1778 struct ib_cq *scq = NULL, *rcq = NULL; 1779 struct ib_srq *srq = NULL; 1780 struct ib_qp *qp; 1781 char *buf; 1782 struct ib_qp_init_attr attr = {}; 1783 struct ib_uverbs_ex_create_qp_resp resp; 1784 int ret; 1785 struct ib_rwq_ind_table *ind_tbl = NULL; 1786 bool has_sq = true; 1787 1788 if (cmd->qp_type == IB_QPT_RAW_PACKET && priv_check(curthread, PRIV_NET_RAW) != 0) 1789 return -EPERM; 1790 1791 obj = kzalloc(sizeof *obj, GFP_KERNEL); 1792 if (!obj) 1793 return -ENOMEM; 1794 1795 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, 1796 &qp_lock_class); 1797 down_write(&obj->uevent.uobject.mutex); 1798 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1799 sizeof(cmd->rwq_ind_tbl_handle) && 1800 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1801 ind_tbl = idr_read_rwq_indirection_table(cmd->rwq_ind_tbl_handle, 1802 file->ucontext); 1803 if (!ind_tbl) { 1804 ret = -EINVAL; 1805 goto err_put; 1806 } 1807 1808 attr.rwq_ind_tbl = ind_tbl; 1809 } 1810 1811 if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) + 1812 sizeof(cmd->reserved1)) && cmd->reserved1) { 1813 ret = -EOPNOTSUPP; 1814 goto err_put; 1815 } 1816 1817 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1818 ret = -EINVAL; 1819 goto err_put; 1820 } 1821 1822 if (ind_tbl && !cmd->max_send_wr) 1823 has_sq = false; 1824 1825 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1826 xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext, 1827 &xrcd_uobj); 1828 if (!xrcd) { 1829 ret = -EINVAL; 1830 goto err_put; 1831 } 1832 device = xrcd->device; 1833 } else { 1834 if (cmd->qp_type == IB_QPT_XRC_INI) { 1835 cmd->max_recv_wr = 0; 1836 cmd->max_recv_sge = 0; 1837 } else { 1838 if (cmd->is_srq) { 1839 srq = idr_read_srq(cmd->srq_handle, 1840 file->ucontext); 1841 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1842 ret = -EINVAL; 1843 goto err_put; 1844 } 1845 } 1846 1847 if (!ind_tbl) { 1848 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1849 rcq = idr_read_cq(cmd->recv_cq_handle, 1850 file->ucontext, 0); 1851 if (!rcq) { 1852 ret = -EINVAL; 1853 goto err_put; 1854 } 1855 } 1856 } 1857 } 1858 1859 if (has_sq) 1860 scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq); 1861 if (!ind_tbl) 1862 rcq = rcq ?: scq; 1863 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 1864 if (!pd || (!scq && has_sq)) { 1865 ret = -EINVAL; 1866 goto err_put; 1867 } 1868 1869 device = pd->device; 1870 } 1871 1872 attr.event_handler = ib_uverbs_qp_event_handler; 1873 attr.qp_context = file; 1874 attr.send_cq = scq; 1875 attr.recv_cq = rcq; 1876 attr.srq = srq; 1877 attr.xrcd = xrcd; 1878 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1879 IB_SIGNAL_REQ_WR; 1880 attr.qp_type = cmd->qp_type; 1881 attr.create_flags = 0; 1882 1883 attr.cap.max_send_wr = cmd->max_send_wr; 1884 attr.cap.max_recv_wr = cmd->max_recv_wr; 1885 attr.cap.max_send_sge = cmd->max_send_sge; 1886 attr.cap.max_recv_sge = cmd->max_recv_sge; 1887 attr.cap.max_inline_data = cmd->max_inline_data; 1888 1889 obj->uevent.events_reported = 0; 1890 INIT_LIST_HEAD(&obj->uevent.event_list); 1891 INIT_LIST_HEAD(&obj->mcast_list); 1892 1893 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1894 sizeof(cmd->create_flags)) 1895 attr.create_flags = cmd->create_flags; 1896 1897 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1898 IB_QP_CREATE_CROSS_CHANNEL | 1899 IB_QP_CREATE_MANAGED_SEND | 1900 IB_QP_CREATE_MANAGED_RECV | 1901 IB_QP_CREATE_SCATTER_FCS)) { 1902 ret = -EINVAL; 1903 goto err_put; 1904 } 1905 1906 buf = (char *)cmd + sizeof(*cmd); 1907 if (cmd_sz > sizeof(*cmd)) 1908 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1909 cmd_sz - sizeof(*cmd) - 1))) { 1910 ret = -EINVAL; 1911 goto err_put; 1912 } 1913 1914 if (cmd->qp_type == IB_QPT_XRC_TGT) 1915 qp = ib_create_qp(pd, &attr); 1916 else 1917 qp = device->create_qp(pd, &attr, uhw); 1918 1919 if (IS_ERR(qp)) { 1920 ret = PTR_ERR(qp); 1921 goto err_put; 1922 } 1923 1924 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1925 qp->real_qp = qp; 1926 qp->device = device; 1927 qp->pd = pd; 1928 qp->send_cq = attr.send_cq; 1929 qp->recv_cq = attr.recv_cq; 1930 qp->srq = attr.srq; 1931 qp->rwq_ind_tbl = ind_tbl; 1932 qp->event_handler = attr.event_handler; 1933 qp->qp_context = attr.qp_context; 1934 qp->qp_type = attr.qp_type; 1935 atomic_set(&qp->usecnt, 0); 1936 atomic_inc(&pd->usecnt); 1937 if (attr.send_cq) 1938 atomic_inc(&attr.send_cq->usecnt); 1939 if (attr.recv_cq) 1940 atomic_inc(&attr.recv_cq->usecnt); 1941 if (attr.srq) 1942 atomic_inc(&attr.srq->usecnt); 1943 if (ind_tbl) 1944 atomic_inc(&ind_tbl->usecnt); 1945 } 1946 qp->uobject = &obj->uevent.uobject; 1947 1948 obj->uevent.uobject.object = qp; 1949 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1950 if (ret) 1951 goto err_destroy; 1952 1953 memset(&resp, 0, sizeof resp); 1954 resp.base.qpn = qp->qp_num; 1955 resp.base.qp_handle = obj->uevent.uobject.id; 1956 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1957 resp.base.max_send_sge = attr.cap.max_send_sge; 1958 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1959 resp.base.max_send_wr = attr.cap.max_send_wr; 1960 resp.base.max_inline_data = attr.cap.max_inline_data; 1961 1962 resp.response_length = offsetof(typeof(resp), response_length) + 1963 sizeof(resp.response_length); 1964 1965 ret = cb(file, &resp, ucore); 1966 if (ret) 1967 goto err_cb; 1968 1969 if (xrcd) { 1970 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1971 uobject); 1972 atomic_inc(&obj->uxrcd->refcnt); 1973 put_xrcd_read(xrcd_uobj); 1974 } 1975 1976 if (pd) 1977 put_pd_read(pd); 1978 if (scq) 1979 put_cq_read(scq); 1980 if (rcq && rcq != scq) 1981 put_cq_read(rcq); 1982 if (srq) 1983 put_srq_read(srq); 1984 if (ind_tbl) 1985 put_rwq_indirection_table_read(ind_tbl); 1986 1987 mutex_lock(&file->mutex); 1988 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1989 mutex_unlock(&file->mutex); 1990 1991 obj->uevent.uobject.live = 1; 1992 1993 up_write(&obj->uevent.uobject.mutex); 1994 1995 return 0; 1996 err_cb: 1997 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1998 1999 err_destroy: 2000 ib_destroy_qp(qp); 2001 2002 err_put: 2003 if (xrcd) 2004 put_xrcd_read(xrcd_uobj); 2005 if (pd) 2006 put_pd_read(pd); 2007 if (scq) 2008 put_cq_read(scq); 2009 if (rcq && rcq != scq) 2010 put_cq_read(rcq); 2011 if (srq) 2012 put_srq_read(srq); 2013 if (ind_tbl) 2014 put_rwq_indirection_table_read(ind_tbl); 2015 2016 put_uobj_write(&obj->uevent.uobject); 2017 return ret; 2018 } 2019 2020 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 2021 struct ib_uverbs_ex_create_qp_resp *resp, 2022 struct ib_udata *ucore) 2023 { 2024 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 2025 return -EFAULT; 2026 2027 return 0; 2028 } 2029 2030 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 2031 struct ib_device *ib_dev, 2032 const char __user *buf, int in_len, 2033 int out_len) 2034 { 2035 struct ib_uverbs_create_qp cmd; 2036 struct ib_uverbs_ex_create_qp cmd_ex; 2037 struct ib_udata ucore; 2038 struct ib_udata uhw; 2039 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 2040 int err; 2041 2042 if (out_len < resp_size) 2043 return -ENOSPC; 2044 2045 if (copy_from_user(&cmd, buf, sizeof(cmd))) 2046 return -EFAULT; 2047 2048 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), 2049 resp_size); 2050 INIT_UDATA(&uhw, buf + sizeof(cmd), 2051 (unsigned long)cmd.response + resp_size, 2052 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 2053 out_len - resp_size); 2054 2055 memset(&cmd_ex, 0, sizeof(cmd_ex)); 2056 cmd_ex.user_handle = cmd.user_handle; 2057 cmd_ex.pd_handle = cmd.pd_handle; 2058 cmd_ex.send_cq_handle = cmd.send_cq_handle; 2059 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 2060 cmd_ex.srq_handle = cmd.srq_handle; 2061 cmd_ex.max_send_wr = cmd.max_send_wr; 2062 cmd_ex.max_recv_wr = cmd.max_recv_wr; 2063 cmd_ex.max_send_sge = cmd.max_send_sge; 2064 cmd_ex.max_recv_sge = cmd.max_recv_sge; 2065 cmd_ex.max_inline_data = cmd.max_inline_data; 2066 cmd_ex.sq_sig_all = cmd.sq_sig_all; 2067 cmd_ex.qp_type = cmd.qp_type; 2068 cmd_ex.is_srq = cmd.is_srq; 2069 2070 err = create_qp(file, &ucore, &uhw, &cmd_ex, 2071 offsetof(typeof(cmd_ex), is_srq) + 2072 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 2073 NULL); 2074 2075 if (err) 2076 return err; 2077 2078 return in_len; 2079 } 2080 2081 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 2082 struct ib_uverbs_ex_create_qp_resp *resp, 2083 struct ib_udata *ucore) 2084 { 2085 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 2086 return -EFAULT; 2087 2088 return 0; 2089 } 2090 2091 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 2092 struct ib_device *ib_dev, 2093 struct ib_udata *ucore, 2094 struct ib_udata *uhw) 2095 { 2096 struct ib_uverbs_ex_create_qp_resp resp; 2097 struct ib_uverbs_ex_create_qp cmd = {0}; 2098 int err; 2099 2100 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 2101 sizeof(cmd.comp_mask))) 2102 return -EINVAL; 2103 2104 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2105 if (err) 2106 return err; 2107 2108 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 2109 return -EINVAL; 2110 2111 if (cmd.reserved) 2112 return -EINVAL; 2113 2114 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 2115 sizeof(resp.response_length))) 2116 return -ENOSPC; 2117 2118 err = create_qp(file, ucore, uhw, &cmd, 2119 min(ucore->inlen, sizeof(cmd)), 2120 ib_uverbs_ex_create_qp_cb, NULL); 2121 2122 if (err) 2123 return err; 2124 2125 return 0; 2126 } 2127 2128 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 2129 struct ib_device *ib_dev, 2130 const char __user *buf, int in_len, int out_len) 2131 { 2132 struct ib_uverbs_open_qp cmd; 2133 struct ib_uverbs_create_qp_resp resp; 2134 struct ib_udata udata; 2135 struct ib_uqp_object *obj; 2136 struct ib_xrcd *xrcd; 2137 struct ib_uobject *uninitialized_var(xrcd_uobj); 2138 struct ib_qp *qp; 2139 struct ib_qp_open_attr attr; 2140 int ret; 2141 2142 if (out_len < sizeof resp) 2143 return -ENOSPC; 2144 2145 if (copy_from_user(&cmd, buf, sizeof cmd)) 2146 return -EFAULT; 2147 2148 INIT_UDATA(&udata, buf + sizeof cmd, 2149 (unsigned long) cmd.response + sizeof resp, 2150 in_len - sizeof cmd, out_len - sizeof resp); 2151 2152 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2153 if (!obj) 2154 return -ENOMEM; 2155 2156 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 2157 down_write(&obj->uevent.uobject.mutex); 2158 2159 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 2160 if (!xrcd) { 2161 ret = -EINVAL; 2162 goto err_put; 2163 } 2164 2165 attr.event_handler = ib_uverbs_qp_event_handler; 2166 attr.qp_context = file; 2167 attr.qp_num = cmd.qpn; 2168 attr.qp_type = cmd.qp_type; 2169 2170 obj->uevent.events_reported = 0; 2171 INIT_LIST_HEAD(&obj->uevent.event_list); 2172 INIT_LIST_HEAD(&obj->mcast_list); 2173 2174 qp = ib_open_qp(xrcd, &attr); 2175 if (IS_ERR(qp)) { 2176 ret = PTR_ERR(qp); 2177 goto err_put; 2178 } 2179 2180 qp->uobject = &obj->uevent.uobject; 2181 2182 obj->uevent.uobject.object = qp; 2183 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2184 if (ret) 2185 goto err_destroy; 2186 2187 memset(&resp, 0, sizeof resp); 2188 resp.qpn = qp->qp_num; 2189 resp.qp_handle = obj->uevent.uobject.id; 2190 2191 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2192 &resp, sizeof resp)) { 2193 ret = -EFAULT; 2194 goto err_remove; 2195 } 2196 2197 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2198 atomic_inc(&obj->uxrcd->refcnt); 2199 put_xrcd_read(xrcd_uobj); 2200 2201 mutex_lock(&file->mutex); 2202 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 2203 mutex_unlock(&file->mutex); 2204 2205 obj->uevent.uobject.live = 1; 2206 2207 up_write(&obj->uevent.uobject.mutex); 2208 2209 return in_len; 2210 2211 err_remove: 2212 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2213 2214 err_destroy: 2215 ib_destroy_qp(qp); 2216 2217 err_put: 2218 put_xrcd_read(xrcd_uobj); 2219 put_uobj_write(&obj->uevent.uobject); 2220 return ret; 2221 } 2222 2223 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 2224 struct ib_device *ib_dev, 2225 const char __user *buf, int in_len, 2226 int out_len) 2227 { 2228 struct ib_uverbs_query_qp cmd; 2229 struct ib_uverbs_query_qp_resp resp; 2230 struct ib_qp *qp; 2231 struct ib_qp_attr *attr; 2232 struct ib_qp_init_attr *init_attr; 2233 int ret; 2234 2235 if (copy_from_user(&cmd, buf, sizeof cmd)) 2236 return -EFAULT; 2237 2238 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2239 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 2240 if (!attr || !init_attr) { 2241 ret = -ENOMEM; 2242 goto out; 2243 } 2244 2245 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2246 if (!qp) { 2247 ret = -EINVAL; 2248 goto out; 2249 } 2250 2251 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 2252 2253 put_qp_read(qp); 2254 2255 if (ret) 2256 goto out; 2257 2258 memset(&resp, 0, sizeof resp); 2259 2260 resp.qp_state = attr->qp_state; 2261 resp.cur_qp_state = attr->cur_qp_state; 2262 resp.path_mtu = attr->path_mtu; 2263 resp.path_mig_state = attr->path_mig_state; 2264 resp.qkey = attr->qkey; 2265 resp.rq_psn = attr->rq_psn; 2266 resp.sq_psn = attr->sq_psn; 2267 resp.dest_qp_num = attr->dest_qp_num; 2268 resp.qp_access_flags = attr->qp_access_flags; 2269 resp.pkey_index = attr->pkey_index; 2270 resp.alt_pkey_index = attr->alt_pkey_index; 2271 resp.sq_draining = attr->sq_draining; 2272 resp.max_rd_atomic = attr->max_rd_atomic; 2273 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 2274 resp.min_rnr_timer = attr->min_rnr_timer; 2275 resp.port_num = attr->port_num; 2276 resp.timeout = attr->timeout; 2277 resp.retry_cnt = attr->retry_cnt; 2278 resp.rnr_retry = attr->rnr_retry; 2279 resp.alt_port_num = attr->alt_port_num; 2280 resp.alt_timeout = attr->alt_timeout; 2281 2282 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 2283 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 2284 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 2285 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 2286 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 2287 resp.dest.dlid = attr->ah_attr.dlid; 2288 resp.dest.sl = attr->ah_attr.sl; 2289 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 2290 resp.dest.static_rate = attr->ah_attr.static_rate; 2291 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 2292 resp.dest.port_num = attr->ah_attr.port_num; 2293 2294 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 2295 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 2296 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 2297 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 2298 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 2299 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 2300 resp.alt_dest.sl = attr->alt_ah_attr.sl; 2301 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 2302 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 2303 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 2304 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 2305 2306 resp.max_send_wr = init_attr->cap.max_send_wr; 2307 resp.max_recv_wr = init_attr->cap.max_recv_wr; 2308 resp.max_send_sge = init_attr->cap.max_send_sge; 2309 resp.max_recv_sge = init_attr->cap.max_recv_sge; 2310 resp.max_inline_data = init_attr->cap.max_inline_data; 2311 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 2312 2313 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2314 &resp, sizeof resp)) 2315 ret = -EFAULT; 2316 2317 out: 2318 kfree(attr); 2319 kfree(init_attr); 2320 2321 return ret ? ret : in_len; 2322 } 2323 2324 /* Remove ignored fields set in the attribute mask */ 2325 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 2326 { 2327 switch (qp_type) { 2328 case IB_QPT_XRC_INI: 2329 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 2330 case IB_QPT_XRC_TGT: 2331 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 2332 IB_QP_RNR_RETRY); 2333 default: 2334 return mask; 2335 } 2336 } 2337 2338 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2339 struct ib_device *ib_dev, 2340 const char __user *buf, int in_len, 2341 int out_len) 2342 { 2343 struct ib_uverbs_modify_qp cmd; 2344 struct ib_udata udata; 2345 struct ib_qp *qp; 2346 struct ib_qp_attr *attr; 2347 int ret; 2348 2349 if (copy_from_user(&cmd, buf, sizeof cmd)) 2350 return -EFAULT; 2351 2352 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2353 out_len); 2354 2355 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2356 if (!attr) 2357 return -ENOMEM; 2358 2359 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2360 if (!qp) { 2361 ret = -EINVAL; 2362 goto out; 2363 } 2364 2365 attr->qp_state = cmd.qp_state; 2366 attr->cur_qp_state = cmd.cur_qp_state; 2367 attr->path_mtu = cmd.path_mtu; 2368 attr->path_mig_state = cmd.path_mig_state; 2369 attr->qkey = cmd.qkey; 2370 attr->rq_psn = cmd.rq_psn; 2371 attr->sq_psn = cmd.sq_psn; 2372 attr->dest_qp_num = cmd.dest_qp_num; 2373 attr->qp_access_flags = cmd.qp_access_flags; 2374 attr->pkey_index = cmd.pkey_index; 2375 attr->alt_pkey_index = cmd.alt_pkey_index; 2376 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 2377 attr->max_rd_atomic = cmd.max_rd_atomic; 2378 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 2379 attr->min_rnr_timer = cmd.min_rnr_timer; 2380 attr->port_num = cmd.port_num; 2381 attr->timeout = cmd.timeout; 2382 attr->retry_cnt = cmd.retry_cnt; 2383 attr->rnr_retry = cmd.rnr_retry; 2384 attr->alt_port_num = cmd.alt_port_num; 2385 attr->alt_timeout = cmd.alt_timeout; 2386 2387 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 2388 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 2389 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 2390 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 2391 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 2392 attr->ah_attr.dlid = cmd.dest.dlid; 2393 attr->ah_attr.sl = cmd.dest.sl; 2394 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 2395 attr->ah_attr.static_rate = cmd.dest.static_rate; 2396 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 2397 attr->ah_attr.port_num = cmd.dest.port_num; 2398 2399 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 2400 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 2401 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 2402 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 2403 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 2404 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 2405 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 2406 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 2407 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 2408 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 2409 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 2410 2411 if (qp->real_qp == qp) { 2412 if (cmd.attr_mask & IB_QP_AV) { 2413 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); 2414 if (ret) 2415 goto release_qp; 2416 } 2417 ret = qp->device->modify_qp(qp, attr, 2418 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2419 } else { 2420 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2421 } 2422 2423 if (ret) 2424 goto release_qp; 2425 2426 ret = in_len; 2427 2428 release_qp: 2429 put_qp_read(qp); 2430 2431 out: 2432 kfree(attr); 2433 2434 return ret; 2435 } 2436 2437 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2438 struct ib_device *ib_dev, 2439 const char __user *buf, int in_len, 2440 int out_len) 2441 { 2442 struct ib_uverbs_destroy_qp cmd; 2443 struct ib_uverbs_destroy_qp_resp resp; 2444 struct ib_uobject *uobj; 2445 struct ib_qp *qp; 2446 struct ib_uqp_object *obj; 2447 int ret = -EINVAL; 2448 2449 if (copy_from_user(&cmd, buf, sizeof cmd)) 2450 return -EFAULT; 2451 2452 memset(&resp, 0, sizeof resp); 2453 2454 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2455 if (!uobj) 2456 return -EINVAL; 2457 qp = uobj->object; 2458 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2459 2460 if (!list_empty(&obj->mcast_list)) { 2461 put_uobj_write(uobj); 2462 return -EBUSY; 2463 } 2464 2465 ret = ib_destroy_qp(qp); 2466 if (!ret) 2467 uobj->live = 0; 2468 2469 put_uobj_write(uobj); 2470 2471 if (ret) 2472 return ret; 2473 2474 if (obj->uxrcd) 2475 atomic_dec(&obj->uxrcd->refcnt); 2476 2477 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2478 2479 mutex_lock(&file->mutex); 2480 list_del(&uobj->list); 2481 mutex_unlock(&file->mutex); 2482 2483 ib_uverbs_release_uevent(file, &obj->uevent); 2484 2485 resp.events_reported = obj->uevent.events_reported; 2486 2487 put_uobj(uobj); 2488 2489 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2490 &resp, sizeof resp)) 2491 return -EFAULT; 2492 2493 return in_len; 2494 } 2495 2496 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2497 { 2498 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2499 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2500 }; 2501 2502 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2503 struct ib_device *ib_dev, 2504 const char __user *buf, int in_len, 2505 int out_len) 2506 { 2507 struct ib_uverbs_post_send cmd; 2508 struct ib_uverbs_post_send_resp resp; 2509 struct ib_uverbs_send_wr *user_wr; 2510 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2511 struct ib_qp *qp; 2512 int i, sg_ind; 2513 int is_ud; 2514 ssize_t ret = -EINVAL; 2515 size_t next_size; 2516 2517 if (copy_from_user(&cmd, buf, sizeof cmd)) 2518 return -EFAULT; 2519 2520 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2521 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2522 return -EINVAL; 2523 2524 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2525 return -EINVAL; 2526 2527 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2528 if (!user_wr) 2529 return -ENOMEM; 2530 2531 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2532 if (!qp) 2533 goto out; 2534 2535 is_ud = qp->qp_type == IB_QPT_UD; 2536 sg_ind = 0; 2537 last = NULL; 2538 for (i = 0; i < cmd.wr_count; ++i) { 2539 if (copy_from_user(user_wr, 2540 buf + sizeof cmd + i * cmd.wqe_size, 2541 cmd.wqe_size)) { 2542 ret = -EFAULT; 2543 goto out_put; 2544 } 2545 2546 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2547 ret = -EINVAL; 2548 goto out_put; 2549 } 2550 2551 if (is_ud) { 2552 struct ib_ud_wr *ud; 2553 2554 if (user_wr->opcode != IB_WR_SEND && 2555 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2556 ret = -EINVAL; 2557 goto out_put; 2558 } 2559 2560 next_size = sizeof(*ud); 2561 ud = alloc_wr(next_size, user_wr->num_sge); 2562 if (!ud) { 2563 ret = -ENOMEM; 2564 goto out_put; 2565 } 2566 2567 ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext); 2568 if (!ud->ah) { 2569 kfree(ud); 2570 ret = -EINVAL; 2571 goto out_put; 2572 } 2573 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2574 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2575 2576 next = &ud->wr; 2577 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2578 user_wr->opcode == IB_WR_RDMA_WRITE || 2579 user_wr->opcode == IB_WR_RDMA_READ) { 2580 struct ib_rdma_wr *rdma; 2581 2582 next_size = sizeof(*rdma); 2583 rdma = alloc_wr(next_size, user_wr->num_sge); 2584 if (!rdma) { 2585 ret = -ENOMEM; 2586 goto out_put; 2587 } 2588 2589 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2590 rdma->rkey = user_wr->wr.rdma.rkey; 2591 2592 next = &rdma->wr; 2593 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2594 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2595 struct ib_atomic_wr *atomic; 2596 2597 next_size = sizeof(*atomic); 2598 atomic = alloc_wr(next_size, user_wr->num_sge); 2599 if (!atomic) { 2600 ret = -ENOMEM; 2601 goto out_put; 2602 } 2603 2604 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2605 atomic->compare_add = user_wr->wr.atomic.compare_add; 2606 atomic->swap = user_wr->wr.atomic.swap; 2607 atomic->rkey = user_wr->wr.atomic.rkey; 2608 2609 next = &atomic->wr; 2610 } else if (user_wr->opcode == IB_WR_SEND || 2611 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2612 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2613 next_size = sizeof(*next); 2614 next = alloc_wr(next_size, user_wr->num_sge); 2615 if (!next) { 2616 ret = -ENOMEM; 2617 goto out_put; 2618 } 2619 } else { 2620 ret = -EINVAL; 2621 goto out_put; 2622 } 2623 2624 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2625 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2626 next->ex.imm_data = 2627 (__be32 __force) user_wr->ex.imm_data; 2628 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2629 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2630 } 2631 2632 if (!last) 2633 wr = next; 2634 else 2635 last->next = next; 2636 last = next; 2637 2638 next->next = NULL; 2639 next->wr_id = user_wr->wr_id; 2640 next->num_sge = user_wr->num_sge; 2641 next->opcode = user_wr->opcode; 2642 next->send_flags = user_wr->send_flags; 2643 2644 if (next->num_sge) { 2645 next->sg_list = (void *)((char *)next + 2646 ALIGN(next_size, sizeof(struct ib_sge))); 2647 if (copy_from_user(next->sg_list, 2648 (const char *)buf + sizeof cmd + 2649 cmd.wr_count * cmd.wqe_size + 2650 sg_ind * sizeof (struct ib_sge), 2651 next->num_sge * sizeof (struct ib_sge))) { 2652 ret = -EFAULT; 2653 goto out_put; 2654 } 2655 sg_ind += next->num_sge; 2656 } else 2657 next->sg_list = NULL; 2658 } 2659 2660 resp.bad_wr = 0; 2661 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2662 if (ret) 2663 for (next = wr; next; next = next->next) { 2664 ++resp.bad_wr; 2665 if (next == bad_wr) 2666 break; 2667 } 2668 2669 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2670 &resp, sizeof resp)) 2671 ret = -EFAULT; 2672 2673 out_put: 2674 put_qp_read(qp); 2675 2676 while (wr) { 2677 if (is_ud && ud_wr(wr)->ah) 2678 put_ah_read(ud_wr(wr)->ah); 2679 next = wr->next; 2680 kfree(wr); 2681 wr = next; 2682 } 2683 2684 out: 2685 kfree(user_wr); 2686 2687 return ret ? ret : in_len; 2688 } 2689 2690 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2691 int in_len, 2692 u32 wr_count, 2693 u32 sge_count, 2694 u32 wqe_size) 2695 { 2696 struct ib_uverbs_recv_wr *user_wr; 2697 struct ib_recv_wr *wr = NULL, *last, *next; 2698 int sg_ind; 2699 int i; 2700 int ret; 2701 2702 if (in_len < wqe_size * wr_count + 2703 sge_count * sizeof (struct ib_uverbs_sge)) 2704 return ERR_PTR(-EINVAL); 2705 2706 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2707 return ERR_PTR(-EINVAL); 2708 2709 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2710 if (!user_wr) 2711 return ERR_PTR(-ENOMEM); 2712 2713 sg_ind = 0; 2714 last = NULL; 2715 for (i = 0; i < wr_count; ++i) { 2716 if (copy_from_user(user_wr, buf + i * wqe_size, 2717 wqe_size)) { 2718 ret = -EFAULT; 2719 goto err; 2720 } 2721 2722 if (user_wr->num_sge + sg_ind > sge_count) { 2723 ret = -EINVAL; 2724 goto err; 2725 } 2726 2727 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2728 user_wr->num_sge * sizeof (struct ib_sge), 2729 GFP_KERNEL); 2730 if (!next) { 2731 ret = -ENOMEM; 2732 goto err; 2733 } 2734 2735 if (!last) 2736 wr = next; 2737 else 2738 last->next = next; 2739 last = next; 2740 2741 next->next = NULL; 2742 next->wr_id = user_wr->wr_id; 2743 next->num_sge = user_wr->num_sge; 2744 2745 if (next->num_sge) { 2746 next->sg_list = (void *)((char *)next + 2747 ALIGN(sizeof *next, sizeof (struct ib_sge))); 2748 if (copy_from_user(next->sg_list, 2749 (const char *)buf + wr_count * wqe_size + 2750 sg_ind * sizeof (struct ib_sge), 2751 next->num_sge * sizeof (struct ib_sge))) { 2752 ret = -EFAULT; 2753 goto err; 2754 } 2755 sg_ind += next->num_sge; 2756 } else 2757 next->sg_list = NULL; 2758 } 2759 2760 kfree(user_wr); 2761 return wr; 2762 2763 err: 2764 kfree(user_wr); 2765 2766 while (wr) { 2767 next = wr->next; 2768 kfree(wr); 2769 wr = next; 2770 } 2771 2772 return ERR_PTR(ret); 2773 } 2774 2775 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2776 struct ib_device *ib_dev, 2777 const char __user *buf, int in_len, 2778 int out_len) 2779 { 2780 struct ib_uverbs_post_recv cmd; 2781 struct ib_uverbs_post_recv_resp resp; 2782 struct ib_recv_wr *wr, *next, *bad_wr; 2783 struct ib_qp *qp; 2784 ssize_t ret = -EINVAL; 2785 2786 if (copy_from_user(&cmd, buf, sizeof cmd)) 2787 return -EFAULT; 2788 2789 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2790 in_len - sizeof cmd, cmd.wr_count, 2791 cmd.sge_count, cmd.wqe_size); 2792 if (IS_ERR(wr)) 2793 return PTR_ERR(wr); 2794 2795 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2796 if (!qp) 2797 goto out; 2798 2799 resp.bad_wr = 0; 2800 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2801 2802 put_qp_read(qp); 2803 2804 if (ret) 2805 for (next = wr; next; next = next->next) { 2806 ++resp.bad_wr; 2807 if (next == bad_wr) 2808 break; 2809 } 2810 2811 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2812 &resp, sizeof resp)) 2813 ret = -EFAULT; 2814 2815 out: 2816 while (wr) { 2817 next = wr->next; 2818 kfree(wr); 2819 wr = next; 2820 } 2821 2822 return ret ? ret : in_len; 2823 } 2824 2825 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2826 struct ib_device *ib_dev, 2827 const char __user *buf, int in_len, 2828 int out_len) 2829 { 2830 struct ib_uverbs_post_srq_recv cmd; 2831 struct ib_uverbs_post_srq_recv_resp resp; 2832 struct ib_recv_wr *wr, *next, *bad_wr; 2833 struct ib_srq *srq; 2834 ssize_t ret = -EINVAL; 2835 2836 if (copy_from_user(&cmd, buf, sizeof cmd)) 2837 return -EFAULT; 2838 2839 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2840 in_len - sizeof cmd, cmd.wr_count, 2841 cmd.sge_count, cmd.wqe_size); 2842 if (IS_ERR(wr)) 2843 return PTR_ERR(wr); 2844 2845 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2846 if (!srq) 2847 goto out; 2848 2849 resp.bad_wr = 0; 2850 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2851 2852 put_srq_read(srq); 2853 2854 if (ret) 2855 for (next = wr; next; next = next->next) { 2856 ++resp.bad_wr; 2857 if (next == bad_wr) 2858 break; 2859 } 2860 2861 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2862 &resp, sizeof resp)) 2863 ret = -EFAULT; 2864 2865 out: 2866 while (wr) { 2867 next = wr->next; 2868 kfree(wr); 2869 wr = next; 2870 } 2871 2872 return ret ? ret : in_len; 2873 } 2874 2875 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2876 struct ib_device *ib_dev, 2877 const char __user *buf, int in_len, 2878 int out_len) 2879 { 2880 struct ib_uverbs_create_ah cmd; 2881 struct ib_uverbs_create_ah_resp resp; 2882 struct ib_uobject *uobj; 2883 struct ib_pd *pd; 2884 struct ib_ah *ah; 2885 struct ib_ah_attr attr; 2886 int ret; 2887 struct ib_udata udata; 2888 2889 if (out_len < sizeof resp) 2890 return -ENOSPC; 2891 2892 if (copy_from_user(&cmd, buf, sizeof cmd)) 2893 return -EFAULT; 2894 2895 INIT_UDATA(&udata, buf + sizeof(cmd), 2896 (unsigned long)cmd.response + sizeof(resp), 2897 in_len - sizeof(cmd), out_len - sizeof(resp)); 2898 2899 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2900 if (!uobj) 2901 return -ENOMEM; 2902 2903 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2904 down_write(&uobj->mutex); 2905 2906 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2907 if (!pd) { 2908 ret = -EINVAL; 2909 goto err; 2910 } 2911 2912 attr.dlid = cmd.attr.dlid; 2913 attr.sl = cmd.attr.sl; 2914 attr.src_path_bits = cmd.attr.src_path_bits; 2915 attr.static_rate = cmd.attr.static_rate; 2916 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2917 attr.port_num = cmd.attr.port_num; 2918 attr.grh.flow_label = cmd.attr.grh.flow_label; 2919 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2920 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2921 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2922 memset(&attr.dmac, 0, sizeof(attr.dmac)); 2923 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2924 2925 ah = pd->device->create_ah(pd, &attr, &udata); 2926 2927 if (IS_ERR(ah)) { 2928 ret = PTR_ERR(ah); 2929 goto err_put; 2930 } 2931 2932 ah->device = pd->device; 2933 ah->pd = pd; 2934 atomic_inc(&pd->usecnt); 2935 ah->uobject = uobj; 2936 uobj->object = ah; 2937 2938 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2939 if (ret) 2940 goto err_destroy; 2941 2942 resp.ah_handle = uobj->id; 2943 2944 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2945 &resp, sizeof resp)) { 2946 ret = -EFAULT; 2947 goto err_copy; 2948 } 2949 2950 put_pd_read(pd); 2951 2952 mutex_lock(&file->mutex); 2953 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2954 mutex_unlock(&file->mutex); 2955 2956 uobj->live = 1; 2957 2958 up_write(&uobj->mutex); 2959 2960 return in_len; 2961 2962 err_copy: 2963 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2964 2965 err_destroy: 2966 ib_destroy_ah(ah); 2967 2968 err_put: 2969 put_pd_read(pd); 2970 2971 err: 2972 put_uobj_write(uobj); 2973 return ret; 2974 } 2975 2976 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2977 struct ib_device *ib_dev, 2978 const char __user *buf, int in_len, int out_len) 2979 { 2980 struct ib_uverbs_destroy_ah cmd; 2981 struct ib_ah *ah; 2982 struct ib_uobject *uobj; 2983 int ret; 2984 2985 if (copy_from_user(&cmd, buf, sizeof cmd)) 2986 return -EFAULT; 2987 2988 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2989 if (!uobj) 2990 return -EINVAL; 2991 ah = uobj->object; 2992 2993 ret = ib_destroy_ah(ah); 2994 if (!ret) 2995 uobj->live = 0; 2996 2997 put_uobj_write(uobj); 2998 2999 if (ret) 3000 return ret; 3001 3002 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 3003 3004 mutex_lock(&file->mutex); 3005 list_del(&uobj->list); 3006 mutex_unlock(&file->mutex); 3007 3008 put_uobj(uobj); 3009 3010 return in_len; 3011 } 3012 3013 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 3014 struct ib_device *ib_dev, 3015 const char __user *buf, int in_len, 3016 int out_len) 3017 { 3018 struct ib_uverbs_attach_mcast cmd; 3019 struct ib_qp *qp; 3020 struct ib_uqp_object *obj; 3021 struct ib_uverbs_mcast_entry *mcast; 3022 int ret; 3023 3024 if (copy_from_user(&cmd, buf, sizeof cmd)) 3025 return -EFAULT; 3026 3027 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 3028 if (!qp) 3029 return -EINVAL; 3030 3031 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 3032 3033 list_for_each_entry(mcast, &obj->mcast_list, list) 3034 if (cmd.mlid == mcast->lid && 3035 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 3036 ret = 0; 3037 goto out_put; 3038 } 3039 3040 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 3041 if (!mcast) { 3042 ret = -ENOMEM; 3043 goto out_put; 3044 } 3045 3046 mcast->lid = cmd.mlid; 3047 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 3048 3049 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 3050 if (!ret) 3051 list_add_tail(&mcast->list, &obj->mcast_list); 3052 else 3053 kfree(mcast); 3054 3055 out_put: 3056 put_qp_write(qp); 3057 3058 return ret ? ret : in_len; 3059 } 3060 3061 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 3062 struct ib_device *ib_dev, 3063 const char __user *buf, int in_len, 3064 int out_len) 3065 { 3066 struct ib_uverbs_detach_mcast cmd; 3067 struct ib_uqp_object *obj; 3068 struct ib_qp *qp; 3069 struct ib_uverbs_mcast_entry *mcast; 3070 int ret = -EINVAL; 3071 3072 if (copy_from_user(&cmd, buf, sizeof cmd)) 3073 return -EFAULT; 3074 3075 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 3076 if (!qp) 3077 return -EINVAL; 3078 3079 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 3080 if (ret) 3081 goto out_put; 3082 3083 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 3084 3085 list_for_each_entry(mcast, &obj->mcast_list, list) 3086 if (cmd.mlid == mcast->lid && 3087 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 3088 list_del(&mcast->list); 3089 kfree(mcast); 3090 break; 3091 } 3092 3093 out_put: 3094 put_qp_write(qp); 3095 3096 return ret ? ret : in_len; 3097 } 3098 3099 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec) 3100 { 3101 /* Returns user space filter size, includes padding */ 3102 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 3103 } 3104 3105 static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size, 3106 u16 ib_real_filter_sz) 3107 { 3108 /* 3109 * User space filter structures must be 64 bit aligned, otherwise this 3110 * may pass, but we won't handle additional new attributes. 3111 */ 3112 3113 if (kern_filter_size > ib_real_filter_sz) { 3114 if (memchr_inv((char *)kern_spec_filter + 3115 ib_real_filter_sz, 0, 3116 kern_filter_size - ib_real_filter_sz)) 3117 return -EINVAL; 3118 return ib_real_filter_sz; 3119 } 3120 return kern_filter_size; 3121 } 3122 3123 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 3124 union ib_flow_spec *ib_spec) 3125 { 3126 ssize_t actual_filter_sz; 3127 ssize_t kern_filter_sz; 3128 ssize_t ib_filter_sz; 3129 void *kern_spec_mask; 3130 void *kern_spec_val; 3131 3132 if (kern_spec->reserved) 3133 return -EINVAL; 3134 3135 ib_spec->type = kern_spec->type; 3136 3137 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 3138 /* User flow spec size must be aligned to 4 bytes */ 3139 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 3140 return -EINVAL; 3141 3142 kern_spec_val = (char *)kern_spec + 3143 sizeof(struct ib_uverbs_flow_spec_hdr); 3144 kern_spec_mask = (char *)kern_spec_val + kern_filter_sz; 3145 3146 switch (ib_spec->type) { 3147 case IB_FLOW_SPEC_ETH: 3148 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 3149 actual_filter_sz = spec_filter_size(kern_spec_mask, 3150 kern_filter_sz, 3151 ib_filter_sz); 3152 if (actual_filter_sz <= 0) 3153 return -EINVAL; 3154 ib_spec->size = sizeof(struct ib_flow_spec_eth); 3155 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 3156 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 3157 break; 3158 case IB_FLOW_SPEC_IPV4: 3159 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 3160 actual_filter_sz = spec_filter_size(kern_spec_mask, 3161 kern_filter_sz, 3162 ib_filter_sz); 3163 if (actual_filter_sz <= 0) 3164 return -EINVAL; 3165 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 3166 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 3167 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 3168 break; 3169 case IB_FLOW_SPEC_IPV6: 3170 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 3171 actual_filter_sz = spec_filter_size(kern_spec_mask, 3172 kern_filter_sz, 3173 ib_filter_sz); 3174 if (actual_filter_sz <= 0) 3175 return -EINVAL; 3176 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 3177 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 3178 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 3179 3180 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 3181 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 3182 return -EINVAL; 3183 break; 3184 case IB_FLOW_SPEC_TCP: 3185 case IB_FLOW_SPEC_UDP: 3186 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 3187 actual_filter_sz = spec_filter_size(kern_spec_mask, 3188 kern_filter_sz, 3189 ib_filter_sz); 3190 if (actual_filter_sz <= 0) 3191 return -EINVAL; 3192 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 3193 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 3194 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 3195 break; 3196 default: 3197 return -EINVAL; 3198 } 3199 return 0; 3200 } 3201 3202 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 3203 struct ib_device *ib_dev, 3204 struct ib_udata *ucore, 3205 struct ib_udata *uhw) 3206 { 3207 struct ib_uverbs_ex_create_wq cmd = {}; 3208 struct ib_uverbs_ex_create_wq_resp resp = {}; 3209 struct ib_uwq_object *obj; 3210 int err = 0; 3211 struct ib_cq *cq; 3212 struct ib_pd *pd; 3213 struct ib_wq *wq; 3214 struct ib_wq_init_attr wq_init_attr = {}; 3215 size_t required_cmd_sz; 3216 size_t required_resp_len; 3217 3218 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 3219 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 3220 3221 if (ucore->inlen < required_cmd_sz) 3222 return -EINVAL; 3223 3224 if (ucore->outlen < required_resp_len) 3225 return -ENOSPC; 3226 3227 if (ucore->inlen > sizeof(cmd) && 3228 !ib_is_udata_cleared(ucore, sizeof(cmd), 3229 ucore->inlen - sizeof(cmd))) 3230 return -EOPNOTSUPP; 3231 3232 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3233 if (err) 3234 return err; 3235 3236 if (cmd.comp_mask) 3237 return -EOPNOTSUPP; 3238 3239 obj = kmalloc(sizeof(*obj), GFP_KERNEL); 3240 if (!obj) 3241 return -ENOMEM; 3242 3243 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, 3244 &wq_lock_class); 3245 down_write(&obj->uevent.uobject.mutex); 3246 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 3247 if (!pd) { 3248 err = -EINVAL; 3249 goto err_uobj; 3250 } 3251 3252 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 3253 if (!cq) { 3254 err = -EINVAL; 3255 goto err_put_pd; 3256 } 3257 3258 wq_init_attr.cq = cq; 3259 wq_init_attr.max_sge = cmd.max_sge; 3260 wq_init_attr.max_wr = cmd.max_wr; 3261 wq_init_attr.wq_context = file; 3262 wq_init_attr.wq_type = cmd.wq_type; 3263 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 3264 obj->uevent.events_reported = 0; 3265 INIT_LIST_HEAD(&obj->uevent.event_list); 3266 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 3267 if (IS_ERR(wq)) { 3268 err = PTR_ERR(wq); 3269 goto err_put_cq; 3270 } 3271 3272 wq->uobject = &obj->uevent.uobject; 3273 obj->uevent.uobject.object = wq; 3274 wq->wq_type = wq_init_attr.wq_type; 3275 wq->cq = cq; 3276 wq->pd = pd; 3277 wq->device = pd->device; 3278 wq->wq_context = wq_init_attr.wq_context; 3279 atomic_set(&wq->usecnt, 0); 3280 atomic_inc(&pd->usecnt); 3281 atomic_inc(&cq->usecnt); 3282 wq->uobject = &obj->uevent.uobject; 3283 obj->uevent.uobject.object = wq; 3284 err = idr_add_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); 3285 if (err) 3286 goto destroy_wq; 3287 3288 memset(&resp, 0, sizeof(resp)); 3289 resp.wq_handle = obj->uevent.uobject.id; 3290 resp.max_sge = wq_init_attr.max_sge; 3291 resp.max_wr = wq_init_attr.max_wr; 3292 resp.wqn = wq->wq_num; 3293 resp.response_length = required_resp_len; 3294 err = ib_copy_to_udata(ucore, 3295 &resp, resp.response_length); 3296 if (err) 3297 goto err_copy; 3298 3299 put_pd_read(pd); 3300 put_cq_read(cq); 3301 3302 mutex_lock(&file->mutex); 3303 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->wq_list); 3304 mutex_unlock(&file->mutex); 3305 3306 obj->uevent.uobject.live = 1; 3307 up_write(&obj->uevent.uobject.mutex); 3308 return 0; 3309 3310 err_copy: 3311 idr_remove_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); 3312 destroy_wq: 3313 ib_destroy_wq(wq); 3314 err_put_cq: 3315 put_cq_read(cq); 3316 err_put_pd: 3317 put_pd_read(pd); 3318 err_uobj: 3319 put_uobj_write(&obj->uevent.uobject); 3320 3321 return err; 3322 } 3323 3324 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3325 struct ib_device *ib_dev, 3326 struct ib_udata *ucore, 3327 struct ib_udata *uhw) 3328 { 3329 struct ib_uverbs_ex_destroy_wq cmd = {}; 3330 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3331 struct ib_wq *wq; 3332 struct ib_uobject *uobj; 3333 struct ib_uwq_object *obj; 3334 size_t required_cmd_sz; 3335 size_t required_resp_len; 3336 int ret; 3337 3338 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3339 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3340 3341 if (ucore->inlen < required_cmd_sz) 3342 return -EINVAL; 3343 3344 if (ucore->outlen < required_resp_len) 3345 return -ENOSPC; 3346 3347 if (ucore->inlen > sizeof(cmd) && 3348 !ib_is_udata_cleared(ucore, sizeof(cmd), 3349 ucore->inlen - sizeof(cmd))) 3350 return -EOPNOTSUPP; 3351 3352 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3353 if (ret) 3354 return ret; 3355 3356 if (cmd.comp_mask) 3357 return -EOPNOTSUPP; 3358 3359 resp.response_length = required_resp_len; 3360 uobj = idr_write_uobj(&ib_uverbs_wq_idr, cmd.wq_handle, 3361 file->ucontext); 3362 if (!uobj) 3363 return -EINVAL; 3364 3365 wq = uobj->object; 3366 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3367 ret = ib_destroy_wq(wq); 3368 if (!ret) 3369 uobj->live = 0; 3370 3371 put_uobj_write(uobj); 3372 if (ret) 3373 return ret; 3374 3375 idr_remove_uobj(&ib_uverbs_wq_idr, uobj); 3376 3377 mutex_lock(&file->mutex); 3378 list_del(&uobj->list); 3379 mutex_unlock(&file->mutex); 3380 3381 ib_uverbs_release_uevent(file, &obj->uevent); 3382 resp.events_reported = obj->uevent.events_reported; 3383 put_uobj(uobj); 3384 3385 ret = ib_copy_to_udata(ucore, &resp, resp.response_length); 3386 if (ret) 3387 return ret; 3388 3389 return 0; 3390 } 3391 3392 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3393 struct ib_device *ib_dev, 3394 struct ib_udata *ucore, 3395 struct ib_udata *uhw) 3396 { 3397 struct ib_uverbs_ex_modify_wq cmd = {}; 3398 struct ib_wq *wq; 3399 struct ib_wq_attr wq_attr = {}; 3400 size_t required_cmd_sz; 3401 int ret; 3402 3403 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3404 if (ucore->inlen < required_cmd_sz) 3405 return -EINVAL; 3406 3407 if (ucore->inlen > sizeof(cmd) && 3408 !ib_is_udata_cleared(ucore, sizeof(cmd), 3409 ucore->inlen - sizeof(cmd))) 3410 return -EOPNOTSUPP; 3411 3412 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3413 if (ret) 3414 return ret; 3415 3416 if (!cmd.attr_mask) 3417 return -EINVAL; 3418 3419 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE)) 3420 return -EINVAL; 3421 3422 wq = idr_read_wq(cmd.wq_handle, file->ucontext); 3423 if (!wq) 3424 return -EINVAL; 3425 3426 wq_attr.curr_wq_state = cmd.curr_wq_state; 3427 wq_attr.wq_state = cmd.wq_state; 3428 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3429 put_wq_read(wq); 3430 return ret; 3431 } 3432 3433 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3434 struct ib_device *ib_dev, 3435 struct ib_udata *ucore, 3436 struct ib_udata *uhw) 3437 { 3438 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3439 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3440 struct ib_uobject *uobj; 3441 int err = 0; 3442 struct ib_rwq_ind_table_init_attr init_attr = {}; 3443 struct ib_rwq_ind_table *rwq_ind_tbl; 3444 struct ib_wq **wqs = NULL; 3445 u32 *wqs_handles = NULL; 3446 struct ib_wq *wq = NULL; 3447 int i, j, num_read_wqs; 3448 u32 num_wq_handles; 3449 u32 expected_in_size; 3450 size_t required_cmd_sz_header; 3451 size_t required_resp_len; 3452 3453 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3454 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3455 3456 if (ucore->inlen < required_cmd_sz_header) 3457 return -EINVAL; 3458 3459 if (ucore->outlen < required_resp_len) 3460 return -ENOSPC; 3461 3462 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3463 if (err) 3464 return err; 3465 3466 ucore->inbuf = (const char *)ucore->inbuf + required_cmd_sz_header; 3467 ucore->inlen -= required_cmd_sz_header; 3468 3469 if (cmd.comp_mask) 3470 return -EOPNOTSUPP; 3471 3472 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3473 return -EINVAL; 3474 3475 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3476 expected_in_size = num_wq_handles * sizeof(__u32); 3477 if (num_wq_handles == 1) 3478 /* input size for wq handles is u64 aligned */ 3479 expected_in_size += sizeof(__u32); 3480 3481 if (ucore->inlen < expected_in_size) 3482 return -EINVAL; 3483 3484 if (ucore->inlen > expected_in_size && 3485 !ib_is_udata_cleared(ucore, expected_in_size, 3486 ucore->inlen - expected_in_size)) 3487 return -EOPNOTSUPP; 3488 3489 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3490 GFP_KERNEL); 3491 if (!wqs_handles) 3492 return -ENOMEM; 3493 3494 err = ib_copy_from_udata(wqs_handles, ucore, 3495 num_wq_handles * sizeof(__u32)); 3496 if (err) 3497 goto err_free; 3498 3499 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3500 if (!wqs) { 3501 err = -ENOMEM; 3502 goto err_free; 3503 } 3504 3505 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3506 num_read_wqs++) { 3507 wq = idr_read_wq(wqs_handles[num_read_wqs], file->ucontext); 3508 if (!wq) { 3509 err = -EINVAL; 3510 goto put_wqs; 3511 } 3512 3513 wqs[num_read_wqs] = wq; 3514 } 3515 3516 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 3517 if (!uobj) { 3518 err = -ENOMEM; 3519 goto put_wqs; 3520 } 3521 3522 init_uobj(uobj, 0, file->ucontext, &rwq_ind_table_lock_class); 3523 down_write(&uobj->mutex); 3524 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3525 init_attr.ind_tbl = wqs; 3526 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3527 3528 if (IS_ERR(rwq_ind_tbl)) { 3529 err = PTR_ERR(rwq_ind_tbl); 3530 goto err_uobj; 3531 } 3532 3533 rwq_ind_tbl->ind_tbl = wqs; 3534 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3535 rwq_ind_tbl->uobject = uobj; 3536 uobj->object = rwq_ind_tbl; 3537 rwq_ind_tbl->device = ib_dev; 3538 atomic_set(&rwq_ind_tbl->usecnt, 0); 3539 3540 for (i = 0; i < num_wq_handles; i++) 3541 atomic_inc(&wqs[i]->usecnt); 3542 3543 err = idr_add_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3544 if (err) 3545 goto destroy_ind_tbl; 3546 3547 resp.ind_tbl_handle = uobj->id; 3548 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3549 resp.response_length = required_resp_len; 3550 3551 err = ib_copy_to_udata(ucore, 3552 &resp, resp.response_length); 3553 if (err) 3554 goto err_copy; 3555 3556 kfree(wqs_handles); 3557 3558 for (j = 0; j < num_read_wqs; j++) 3559 put_wq_read(wqs[j]); 3560 3561 mutex_lock(&file->mutex); 3562 list_add_tail(&uobj->list, &file->ucontext->rwq_ind_tbl_list); 3563 mutex_unlock(&file->mutex); 3564 3565 uobj->live = 1; 3566 3567 up_write(&uobj->mutex); 3568 return 0; 3569 3570 err_copy: 3571 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3572 destroy_ind_tbl: 3573 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3574 err_uobj: 3575 put_uobj_write(uobj); 3576 put_wqs: 3577 for (j = 0; j < num_read_wqs; j++) 3578 put_wq_read(wqs[j]); 3579 err_free: 3580 kfree(wqs_handles); 3581 kfree(wqs); 3582 return err; 3583 } 3584 3585 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3586 struct ib_device *ib_dev, 3587 struct ib_udata *ucore, 3588 struct ib_udata *uhw) 3589 { 3590 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3591 struct ib_rwq_ind_table *rwq_ind_tbl; 3592 struct ib_uobject *uobj; 3593 int ret; 3594 struct ib_wq **ind_tbl; 3595 size_t required_cmd_sz; 3596 3597 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3598 3599 if (ucore->inlen < required_cmd_sz) 3600 return -EINVAL; 3601 3602 if (ucore->inlen > sizeof(cmd) && 3603 !ib_is_udata_cleared(ucore, sizeof(cmd), 3604 ucore->inlen - sizeof(cmd))) 3605 return -EOPNOTSUPP; 3606 3607 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3608 if (ret) 3609 return ret; 3610 3611 if (cmd.comp_mask) 3612 return -EOPNOTSUPP; 3613 3614 uobj = idr_write_uobj(&ib_uverbs_rwq_ind_tbl_idr, cmd.ind_tbl_handle, 3615 file->ucontext); 3616 if (!uobj) 3617 return -EINVAL; 3618 rwq_ind_tbl = uobj->object; 3619 ind_tbl = rwq_ind_tbl->ind_tbl; 3620 3621 ret = ib_destroy_rwq_ind_table(rwq_ind_tbl); 3622 if (!ret) 3623 uobj->live = 0; 3624 3625 put_uobj_write(uobj); 3626 3627 if (ret) 3628 return ret; 3629 3630 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3631 3632 mutex_lock(&file->mutex); 3633 list_del(&uobj->list); 3634 mutex_unlock(&file->mutex); 3635 3636 put_uobj(uobj); 3637 kfree(ind_tbl); 3638 return ret; 3639 } 3640 3641 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3642 struct ib_device *ib_dev, 3643 struct ib_udata *ucore, 3644 struct ib_udata *uhw) 3645 { 3646 struct ib_uverbs_create_flow cmd; 3647 struct ib_uverbs_create_flow_resp resp; 3648 struct ib_uobject *uobj; 3649 struct ib_flow *flow_id; 3650 struct ib_uverbs_flow_attr *kern_flow_attr; 3651 struct ib_flow_attr *flow_attr; 3652 struct ib_qp *qp; 3653 int err = 0; 3654 void *kern_spec; 3655 void *ib_spec; 3656 int i; 3657 3658 if (ucore->inlen < sizeof(cmd)) 3659 return -EINVAL; 3660 3661 if (ucore->outlen < sizeof(resp)) 3662 return -ENOSPC; 3663 3664 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3665 if (err) 3666 return err; 3667 3668 ucore->inbuf = (const char *)ucore->inbuf + sizeof(cmd); 3669 ucore->inlen -= sizeof(cmd); 3670 3671 if (cmd.comp_mask) 3672 return -EINVAL; 3673 3674 if (priv_check(curthread, PRIV_NET_RAW) != 0) 3675 return -EPERM; 3676 3677 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3678 return -EINVAL; 3679 3680 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3681 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3682 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3683 return -EINVAL; 3684 3685 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3686 return -EINVAL; 3687 3688 if (cmd.flow_attr.size > ucore->inlen || 3689 cmd.flow_attr.size > 3690 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3691 return -EINVAL; 3692 3693 if (cmd.flow_attr.reserved[0] || 3694 cmd.flow_attr.reserved[1]) 3695 return -EINVAL; 3696 3697 if (cmd.flow_attr.num_of_specs) { 3698 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3699 GFP_KERNEL); 3700 if (!kern_flow_attr) 3701 return -ENOMEM; 3702 3703 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3704 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3705 cmd.flow_attr.size); 3706 if (err) 3707 goto err_free_attr; 3708 } else { 3709 kern_flow_attr = &cmd.flow_attr; 3710 } 3711 3712 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 3713 if (!uobj) { 3714 err = -ENOMEM; 3715 goto err_free_attr; 3716 } 3717 init_uobj(uobj, 0, file->ucontext, &rule_lock_class); 3718 down_write(&uobj->mutex); 3719 3720 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 3721 if (!qp) { 3722 err = -EINVAL; 3723 goto err_uobj; 3724 } 3725 3726 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * 3727 sizeof(union ib_flow_spec), GFP_KERNEL); 3728 if (!flow_attr) { 3729 err = -ENOMEM; 3730 goto err_put; 3731 } 3732 3733 flow_attr->type = kern_flow_attr->type; 3734 flow_attr->priority = kern_flow_attr->priority; 3735 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3736 flow_attr->port = kern_flow_attr->port; 3737 flow_attr->flags = kern_flow_attr->flags; 3738 flow_attr->size = sizeof(*flow_attr); 3739 3740 kern_spec = kern_flow_attr + 1; 3741 ib_spec = flow_attr + 1; 3742 for (i = 0; i < flow_attr->num_of_specs && 3743 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3744 cmd.flow_attr.size >= 3745 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3746 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3747 if (err) 3748 goto err_free; 3749 flow_attr->size += 3750 ((union ib_flow_spec *) ib_spec)->size; 3751 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3752 kern_spec = (char *)kern_spec + ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3753 ib_spec = (char *)ib_spec + ((union ib_flow_spec *)ib_spec)->size; 3754 } 3755 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3756 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3757 i, cmd.flow_attr.size); 3758 err = -EINVAL; 3759 goto err_free; 3760 } 3761 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3762 if (IS_ERR(flow_id)) { 3763 err = PTR_ERR(flow_id); 3764 goto err_free; 3765 } 3766 flow_id->qp = qp; 3767 flow_id->uobject = uobj; 3768 uobj->object = flow_id; 3769 3770 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj); 3771 if (err) 3772 goto destroy_flow; 3773 3774 memset(&resp, 0, sizeof(resp)); 3775 resp.flow_handle = uobj->id; 3776 3777 err = ib_copy_to_udata(ucore, 3778 &resp, sizeof(resp)); 3779 if (err) 3780 goto err_copy; 3781 3782 put_qp_read(qp); 3783 mutex_lock(&file->mutex); 3784 list_add_tail(&uobj->list, &file->ucontext->rule_list); 3785 mutex_unlock(&file->mutex); 3786 3787 uobj->live = 1; 3788 3789 up_write(&uobj->mutex); 3790 kfree(flow_attr); 3791 if (cmd.flow_attr.num_of_specs) 3792 kfree(kern_flow_attr); 3793 return 0; 3794 err_copy: 3795 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3796 destroy_flow: 3797 ib_destroy_flow(flow_id); 3798 err_free: 3799 kfree(flow_attr); 3800 err_put: 3801 put_qp_read(qp); 3802 err_uobj: 3803 put_uobj_write(uobj); 3804 err_free_attr: 3805 if (cmd.flow_attr.num_of_specs) 3806 kfree(kern_flow_attr); 3807 return err; 3808 } 3809 3810 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3811 struct ib_device *ib_dev, 3812 struct ib_udata *ucore, 3813 struct ib_udata *uhw) 3814 { 3815 struct ib_uverbs_destroy_flow cmd; 3816 struct ib_flow *flow_id; 3817 struct ib_uobject *uobj; 3818 int ret; 3819 3820 if (ucore->inlen < sizeof(cmd)) 3821 return -EINVAL; 3822 3823 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3824 if (ret) 3825 return ret; 3826 3827 if (cmd.comp_mask) 3828 return -EINVAL; 3829 3830 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 3831 file->ucontext); 3832 if (!uobj) 3833 return -EINVAL; 3834 flow_id = uobj->object; 3835 3836 ret = ib_destroy_flow(flow_id); 3837 if (!ret) 3838 uobj->live = 0; 3839 3840 put_uobj_write(uobj); 3841 3842 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3843 3844 mutex_lock(&file->mutex); 3845 list_del(&uobj->list); 3846 mutex_unlock(&file->mutex); 3847 3848 put_uobj(uobj); 3849 3850 return ret; 3851 } 3852 3853 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3854 struct ib_device *ib_dev, 3855 struct ib_uverbs_create_xsrq *cmd, 3856 struct ib_udata *udata) 3857 { 3858 struct ib_uverbs_create_srq_resp resp; 3859 struct ib_usrq_object *obj; 3860 struct ib_pd *pd; 3861 struct ib_srq *srq; 3862 struct ib_uobject *uninitialized_var(xrcd_uobj); 3863 struct ib_srq_init_attr attr; 3864 int ret; 3865 3866 obj = kmalloc(sizeof *obj, GFP_KERNEL); 3867 if (!obj) 3868 return -ENOMEM; 3869 3870 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 3871 down_write(&obj->uevent.uobject.mutex); 3872 3873 if (cmd->srq_type == IB_SRQT_XRC) { 3874 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 3875 if (!attr.ext.xrc.xrcd) { 3876 ret = -EINVAL; 3877 goto err; 3878 } 3879 3880 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3881 atomic_inc(&obj->uxrcd->refcnt); 3882 3883 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 3884 if (!attr.ext.xrc.cq) { 3885 ret = -EINVAL; 3886 goto err_put_xrcd; 3887 } 3888 } 3889 3890 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 3891 if (!pd) { 3892 ret = -EINVAL; 3893 goto err_put_cq; 3894 } 3895 3896 attr.event_handler = ib_uverbs_srq_event_handler; 3897 attr.srq_context = file; 3898 attr.srq_type = cmd->srq_type; 3899 attr.attr.max_wr = cmd->max_wr; 3900 attr.attr.max_sge = cmd->max_sge; 3901 attr.attr.srq_limit = cmd->srq_limit; 3902 3903 obj->uevent.events_reported = 0; 3904 INIT_LIST_HEAD(&obj->uevent.event_list); 3905 3906 srq = pd->device->create_srq(pd, &attr, udata); 3907 if (IS_ERR(srq)) { 3908 ret = PTR_ERR(srq); 3909 goto err_put; 3910 } 3911 3912 srq->device = pd->device; 3913 srq->pd = pd; 3914 srq->srq_type = cmd->srq_type; 3915 srq->uobject = &obj->uevent.uobject; 3916 srq->event_handler = attr.event_handler; 3917 srq->srq_context = attr.srq_context; 3918 3919 if (cmd->srq_type == IB_SRQT_XRC) { 3920 srq->ext.xrc.cq = attr.ext.xrc.cq; 3921 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3922 atomic_inc(&attr.ext.xrc.cq->usecnt); 3923 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3924 } 3925 3926 atomic_inc(&pd->usecnt); 3927 atomic_set(&srq->usecnt, 0); 3928 3929 obj->uevent.uobject.object = srq; 3930 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3931 if (ret) 3932 goto err_destroy; 3933 3934 memset(&resp, 0, sizeof resp); 3935 resp.srq_handle = obj->uevent.uobject.id; 3936 resp.max_wr = attr.attr.max_wr; 3937 resp.max_sge = attr.attr.max_sge; 3938 if (cmd->srq_type == IB_SRQT_XRC) 3939 resp.srqn = srq->ext.xrc.srq_num; 3940 3941 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3942 &resp, sizeof resp)) { 3943 ret = -EFAULT; 3944 goto err_copy; 3945 } 3946 3947 if (cmd->srq_type == IB_SRQT_XRC) { 3948 put_uobj_read(xrcd_uobj); 3949 put_cq_read(attr.ext.xrc.cq); 3950 } 3951 put_pd_read(pd); 3952 3953 mutex_lock(&file->mutex); 3954 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 3955 mutex_unlock(&file->mutex); 3956 3957 obj->uevent.uobject.live = 1; 3958 3959 up_write(&obj->uevent.uobject.mutex); 3960 3961 return 0; 3962 3963 err_copy: 3964 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3965 3966 err_destroy: 3967 ib_destroy_srq(srq); 3968 3969 err_put: 3970 put_pd_read(pd); 3971 3972 err_put_cq: 3973 if (cmd->srq_type == IB_SRQT_XRC) 3974 put_cq_read(attr.ext.xrc.cq); 3975 3976 err_put_xrcd: 3977 if (cmd->srq_type == IB_SRQT_XRC) { 3978 atomic_dec(&obj->uxrcd->refcnt); 3979 put_uobj_read(xrcd_uobj); 3980 } 3981 3982 err: 3983 put_uobj_write(&obj->uevent.uobject); 3984 return ret; 3985 } 3986 3987 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3988 struct ib_device *ib_dev, 3989 const char __user *buf, int in_len, 3990 int out_len) 3991 { 3992 struct ib_uverbs_create_srq cmd; 3993 struct ib_uverbs_create_xsrq xcmd; 3994 struct ib_uverbs_create_srq_resp resp; 3995 struct ib_udata udata; 3996 int ret; 3997 3998 if (out_len < sizeof resp) 3999 return -ENOSPC; 4000 4001 if (copy_from_user(&cmd, buf, sizeof cmd)) 4002 return -EFAULT; 4003 4004 xcmd.response = cmd.response; 4005 xcmd.user_handle = cmd.user_handle; 4006 xcmd.srq_type = IB_SRQT_BASIC; 4007 xcmd.pd_handle = cmd.pd_handle; 4008 xcmd.max_wr = cmd.max_wr; 4009 xcmd.max_sge = cmd.max_sge; 4010 xcmd.srq_limit = cmd.srq_limit; 4011 4012 INIT_UDATA(&udata, buf + sizeof cmd, 4013 (unsigned long) cmd.response + sizeof resp, 4014 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 4015 out_len - sizeof resp); 4016 4017 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 4018 if (ret) 4019 return ret; 4020 4021 return in_len; 4022 } 4023 4024 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 4025 struct ib_device *ib_dev, 4026 const char __user *buf, int in_len, int out_len) 4027 { 4028 struct ib_uverbs_create_xsrq cmd; 4029 struct ib_uverbs_create_srq_resp resp; 4030 struct ib_udata udata; 4031 int ret; 4032 4033 if (out_len < sizeof resp) 4034 return -ENOSPC; 4035 4036 if (copy_from_user(&cmd, buf, sizeof cmd)) 4037 return -EFAULT; 4038 4039 INIT_UDATA(&udata, buf + sizeof cmd, 4040 (unsigned long) cmd.response + sizeof resp, 4041 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 4042 out_len - sizeof resp); 4043 4044 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 4045 if (ret) 4046 return ret; 4047 4048 return in_len; 4049 } 4050 4051 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 4052 struct ib_device *ib_dev, 4053 const char __user *buf, int in_len, 4054 int out_len) 4055 { 4056 struct ib_uverbs_modify_srq cmd; 4057 struct ib_udata udata; 4058 struct ib_srq *srq; 4059 struct ib_srq_attr attr; 4060 int ret; 4061 4062 if (copy_from_user(&cmd, buf, sizeof cmd)) 4063 return -EFAULT; 4064 4065 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 4066 out_len); 4067 4068 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 4069 if (!srq) 4070 return -EINVAL; 4071 4072 attr.max_wr = cmd.max_wr; 4073 attr.srq_limit = cmd.srq_limit; 4074 4075 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 4076 4077 put_srq_read(srq); 4078 4079 return ret ? ret : in_len; 4080 } 4081 4082 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 4083 struct ib_device *ib_dev, 4084 const char __user *buf, 4085 int in_len, int out_len) 4086 { 4087 struct ib_uverbs_query_srq cmd; 4088 struct ib_uverbs_query_srq_resp resp; 4089 struct ib_srq_attr attr; 4090 struct ib_srq *srq; 4091 int ret; 4092 4093 if (out_len < sizeof resp) 4094 return -ENOSPC; 4095 4096 if (copy_from_user(&cmd, buf, sizeof cmd)) 4097 return -EFAULT; 4098 4099 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 4100 if (!srq) 4101 return -EINVAL; 4102 4103 ret = ib_query_srq(srq, &attr); 4104 4105 put_srq_read(srq); 4106 4107 if (ret) 4108 return ret; 4109 4110 memset(&resp, 0, sizeof resp); 4111 4112 resp.max_wr = attr.max_wr; 4113 resp.max_sge = attr.max_sge; 4114 resp.srq_limit = attr.srq_limit; 4115 4116 if (copy_to_user((void __user *) (unsigned long) cmd.response, 4117 &resp, sizeof resp)) 4118 return -EFAULT; 4119 4120 return in_len; 4121 } 4122 4123 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 4124 struct ib_device *ib_dev, 4125 const char __user *buf, int in_len, 4126 int out_len) 4127 { 4128 struct ib_uverbs_destroy_srq cmd; 4129 struct ib_uverbs_destroy_srq_resp resp; 4130 struct ib_uobject *uobj; 4131 struct ib_srq *srq; 4132 struct ib_uevent_object *obj; 4133 int ret = -EINVAL; 4134 struct ib_usrq_object *us; 4135 enum ib_srq_type srq_type; 4136 4137 if (copy_from_user(&cmd, buf, sizeof cmd)) 4138 return -EFAULT; 4139 4140 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 4141 if (!uobj) 4142 return -EINVAL; 4143 srq = uobj->object; 4144 obj = container_of(uobj, struct ib_uevent_object, uobject); 4145 srq_type = srq->srq_type; 4146 4147 ret = ib_destroy_srq(srq); 4148 if (!ret) 4149 uobj->live = 0; 4150 4151 put_uobj_write(uobj); 4152 4153 if (ret) 4154 return ret; 4155 4156 if (srq_type == IB_SRQT_XRC) { 4157 us = container_of(obj, struct ib_usrq_object, uevent); 4158 atomic_dec(&us->uxrcd->refcnt); 4159 } 4160 4161 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 4162 4163 mutex_lock(&file->mutex); 4164 list_del(&uobj->list); 4165 mutex_unlock(&file->mutex); 4166 4167 ib_uverbs_release_uevent(file, obj); 4168 4169 memset(&resp, 0, sizeof resp); 4170 resp.events_reported = obj->events_reported; 4171 4172 put_uobj(uobj); 4173 4174 if (copy_to_user((void __user *) (unsigned long) cmd.response, 4175 &resp, sizeof resp)) 4176 ret = -EFAULT; 4177 4178 return ret ? ret : in_len; 4179 } 4180 4181 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 4182 struct ib_device *ib_dev, 4183 struct ib_udata *ucore, 4184 struct ib_udata *uhw) 4185 { 4186 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 4187 struct ib_uverbs_ex_query_device cmd; 4188 struct ib_device_attr attr = {0}; 4189 int err; 4190 4191 if (ucore->inlen < sizeof(cmd)) 4192 return -EINVAL; 4193 4194 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 4195 if (err) 4196 return err; 4197 4198 if (cmd.comp_mask) 4199 return -EINVAL; 4200 4201 if (cmd.reserved) 4202 return -EINVAL; 4203 4204 resp.response_length = offsetof(typeof(resp), odp_caps); 4205 4206 if (ucore->outlen < resp.response_length) 4207 return -ENOSPC; 4208 4209 err = ib_dev->query_device(ib_dev, &attr, uhw); 4210 if (err) 4211 return err; 4212 4213 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 4214 4215 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 4216 goto end; 4217 4218 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 4219 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 4220 resp.odp_caps.per_transport_caps.rc_odp_caps = 4221 attr.odp_caps.per_transport_caps.rc_odp_caps; 4222 resp.odp_caps.per_transport_caps.uc_odp_caps = 4223 attr.odp_caps.per_transport_caps.uc_odp_caps; 4224 resp.odp_caps.per_transport_caps.ud_odp_caps = 4225 attr.odp_caps.per_transport_caps.ud_odp_caps; 4226 #endif 4227 resp.response_length += sizeof(resp.odp_caps); 4228 4229 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 4230 goto end; 4231 4232 resp.timestamp_mask = attr.timestamp_mask; 4233 resp.response_length += sizeof(resp.timestamp_mask); 4234 4235 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 4236 goto end; 4237 4238 resp.hca_core_clock = attr.hca_core_clock; 4239 resp.response_length += sizeof(resp.hca_core_clock); 4240 4241 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 4242 goto end; 4243 4244 resp.device_cap_flags_ex = attr.device_cap_flags; 4245 resp.response_length += sizeof(resp.device_cap_flags_ex); 4246 4247 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 4248 goto end; 4249 4250 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 4251 resp.rss_caps.max_rwq_indirection_tables = 4252 attr.rss_caps.max_rwq_indirection_tables; 4253 resp.rss_caps.max_rwq_indirection_table_size = 4254 attr.rss_caps.max_rwq_indirection_table_size; 4255 4256 resp.response_length += sizeof(resp.rss_caps); 4257 4258 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 4259 goto end; 4260 4261 resp.max_wq_type_rq = attr.max_wq_type_rq; 4262 resp.response_length += sizeof(resp.max_wq_type_rq); 4263 end: 4264 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 4265 return err; 4266 } 4267