1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3  *
4  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
6  * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
7  * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
8  *
9  * This software is available to you under a choice of one of two
10  * licenses.  You may choose to be licensed under the terms of the GNU
11  * General Public License (GPL) Version 2, available from the file
12  * COPYING in the main directory of this source tree, or the
13  * OpenIB.org BSD license below:
14  *
15  *     Redistribution and use in source and binary forms, with or
16  *     without modification, are permitted provided that the following
17  *     conditions are met:
18  *
19  *      - Redistributions of source code must retain the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer.
22  *
23  *      - Redistributions in binary form must reproduce the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer in the documentation and/or other materials
26  *        provided with the distribution.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35  * SOFTWARE.
36  */
37 
38 #include <sys/cdefs.h>
39 #define	LINUXKPI_PARAM_PREFIX ibcore_
40 
41 #include <sys/priv.h>
42 
43 #include <linux/file.h>
44 #include <linux/fs.h>
45 #include <linux/slab.h>
46 #include <linux/sched.h>
47 
48 #include <linux/uaccess.h>
49 
50 #include <rdma/uverbs_types.h>
51 #include <rdma/uverbs_std_types.h>
52 #include "rdma_core.h"
53 
54 #include "uverbs.h"
55 #include "core_priv.h"
56 
57 /*
58  * Copy a response to userspace. If the provided 'resp' is larger than the
59  * user buffer it is silently truncated. If the user provided a larger buffer
60  * then the trailing portion is zero filled.
61  *
62  * These semantics are intended to support future extension of the output
63  * structures.
64  */
uverbs_response(struct uverbs_attr_bundle * attrs,const void * resp,size_t resp_len)65 static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
66 			   size_t resp_len)
67 {
68 	int ret;
69 
70 	if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
71 		return uverbs_copy_to_struct_or_zero(
72 			attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
73 
74 	if (copy_to_user(attrs->ucore.outbuf, resp,
75 			 min(attrs->ucore.outlen, resp_len)))
76 		return -EFAULT;
77 
78 	if (resp_len < attrs->ucore.outlen) {
79 		/*
80 		 * Zero fill any extra memory that user
81 		 * space might have provided.
82 		 */
83 		ret = clear_user(attrs->ucore.outbuf + resp_len,
84 				 attrs->ucore.outlen - resp_len);
85 		if (ret)
86 			return -EFAULT;
87 	}
88 
89 	return 0;
90 }
91 
92 /*
93  * Copy a request from userspace. If the provided 'req' is larger than the
94  * user buffer then the user buffer is zero extended into the 'req'. If 'req'
95  * is smaller than the user buffer then the uncopied bytes in the user buffer
96  * must be zero.
97  */
uverbs_request(struct uverbs_attr_bundle * attrs,void * req,size_t req_len)98 static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req,
99 			  size_t req_len)
100 {
101 	if (copy_from_user(req, attrs->ucore.inbuf,
102 			   min(attrs->ucore.inlen, req_len)))
103 		return -EFAULT;
104 
105 	if (attrs->ucore.inlen < req_len) {
106 		memset((u8 *)req + attrs->ucore.inlen, 0,
107 		       req_len - attrs->ucore.inlen);
108 	} else if (attrs->ucore.inlen > req_len) {
109 		if (!ib_is_buffer_cleared(attrs->ucore.inbuf + req_len,
110 					  attrs->ucore.inlen - req_len))
111 			return -EOPNOTSUPP;
112 	}
113 	return 0;
114 }
115 
116 /*
117  * Generate the value for the 'response_length' protocol used by write_ex.
118  * This is the number of bytes the kernel actually wrote. Userspace can use
119  * this to detect what structure members in the response the kernel
120  * understood.
121  */
uverbs_response_length(struct uverbs_attr_bundle * attrs,size_t resp_len)122 static u32 uverbs_response_length(struct uverbs_attr_bundle *attrs,
123 				  size_t resp_len)
124 {
125 	return min_t(size_t, attrs->ucore.outlen, resp_len);
126 }
127 
128 /*
129  * The iterator version of the request interface is for handlers that need to
130  * step over a flex array at the end of a command header.
131  */
132 struct uverbs_req_iter {
133 	const u8 __user *cur;
134 	const u8 __user *end;
135 };
136 
uverbs_request_start(struct uverbs_attr_bundle * attrs,struct uverbs_req_iter * iter,void * req,size_t req_len)137 static int uverbs_request_start(struct uverbs_attr_bundle *attrs,
138 				struct uverbs_req_iter *iter,
139 				void *req,
140 				size_t req_len)
141 {
142 	if (attrs->ucore.inlen < req_len)
143 		return -ENOSPC;
144 
145 	if (copy_from_user(req, attrs->ucore.inbuf, req_len))
146 		return -EFAULT;
147 
148 	iter->cur = attrs->ucore.inbuf + req_len;
149 	iter->end = attrs->ucore.inbuf + attrs->ucore.inlen;
150 	return 0;
151 }
152 
uverbs_request_next(struct uverbs_req_iter * iter,void * val,size_t len)153 static int uverbs_request_next(struct uverbs_req_iter *iter, void *val,
154 			       size_t len)
155 {
156 	if (iter->cur + len > iter->end)
157 		return -ENOSPC;
158 
159 	if (copy_from_user(val, iter->cur, len))
160 		return -EFAULT;
161 
162 	iter->cur += len;
163 	return 0;
164 }
165 
uverbs_request_next_ptr(struct uverbs_req_iter * iter,size_t len)166 static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
167 						  size_t len)
168 {
169 	const void __user *res = iter->cur;
170 
171 	if (iter->cur + len > iter->end)
172 		return (void __force __user *)ERR_PTR(-ENOSPC);
173 	iter->cur += len;
174 	return res;
175 }
176 
uverbs_request_finish(struct uverbs_req_iter * iter)177 static int uverbs_request_finish(struct uverbs_req_iter *iter)
178 {
179 	if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
180 		return -EOPNOTSUPP;
181 	return 0;
182 }
183 
184 /*
185  * When calling a destroy function during an error unwind we need to pass in
186  * the udata that is sanitized of all user arguments. Ie from the driver
187  * perspective it looks like no udata was passed.
188  */
uverbs_get_cleared_udata(struct uverbs_attr_bundle * attrs)189 struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs)
190 {
191 	attrs->driver_udata = (struct ib_udata){};
192 	return &attrs->driver_udata;
193 }
194 
195 static struct ib_uverbs_completion_event_file *
_ib_uverbs_lookup_comp_file(s32 fd,struct uverbs_attr_bundle * attrs)196 _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
197 {
198 	struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
199 					       fd, attrs);
200 
201 	if (IS_ERR(uobj))
202 		return (void *)uobj;
203 
204 	uverbs_uobject_get(uobj);
205 	uobj_put_read(uobj);
206 
207 	return container_of(uobj, struct ib_uverbs_completion_event_file,
208 			    uobj);
209 }
210 #define ib_uverbs_lookup_comp_file(_fd, _ufile) ({			\
211 	CTASSERT(sizeof(_fd) == sizeof(s32));				\
212 	_ib_uverbs_lookup_comp_file(_fd, _ufile);			\
213 })
214 
ib_alloc_ucontext(struct uverbs_attr_bundle * attrs)215 int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs)
216 {
217 	struct ib_uverbs_file *ufile = attrs->ufile;
218 	struct ib_ucontext *ucontext;
219 	struct ib_device *ib_dev;
220 
221 	ib_dev = srcu_dereference(ufile->device->ib_dev,
222 				  &ufile->device->disassociate_srcu);
223 	if (!ib_dev)
224 		return -EIO;
225 
226 	ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
227 	if (!ucontext)
228 		return -ENOMEM;
229 
230 	ucontext->device = ib_dev;
231 	ucontext->ufile = ufile;
232 	xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
233 	attrs->context = ucontext;
234 	return 0;
235 }
236 
ib_init_ucontext(struct uverbs_attr_bundle * attrs)237 int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
238 {
239 	struct ib_ucontext *ucontext = attrs->context;
240 	struct ib_uverbs_file *file = attrs->ufile;
241 	int ret;
242 
243 	if (!down_read_trylock(&file->hw_destroy_rwsem))
244 		return -EIO;
245 	mutex_lock(&file->ucontext_lock);
246 	if (file->ucontext) {
247 		ret = -EINVAL;
248 		goto err;
249 	}
250 
251 	ret = ucontext->device->alloc_ucontext(ucontext,
252 						   &attrs->driver_udata);
253 	if (ret)
254 		goto err_uncharge;
255 
256 	/*
257 	 * Make sure that ib_uverbs_get_ucontext() sees the pointer update
258 	 * only after all writes to setup the ucontext have completed
259 	 */
260 	atomic_store_rel_ptr((uintptr_t *)&file->ucontext, (uintptr_t)ucontext);
261 
262 	mutex_unlock(&file->ucontext_lock);
263 	up_read(&file->hw_destroy_rwsem);
264 	return 0;
265 
266 err_uncharge:
267 err:
268 	mutex_unlock(&file->ucontext_lock);
269 	up_read(&file->hw_destroy_rwsem);
270 	return ret;
271 }
272 
ib_uverbs_get_context(struct uverbs_attr_bundle * attrs)273 static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
274 {
275 	struct ib_uverbs_get_context_resp resp;
276 	struct ib_uverbs_get_context cmd;
277 	struct ib_device *ib_dev;
278 	struct ib_uobject *uobj;
279 	int ret;
280 
281 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
282 	if (ret)
283 		return ret;
284 
285 	ret = ib_alloc_ucontext(attrs);
286 	if (ret)
287 		return ret;
288 
289 	uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev);
290 	if (IS_ERR(uobj)) {
291 		ret = PTR_ERR(uobj);
292 		goto err_ucontext;
293 	}
294 
295 	resp = (struct ib_uverbs_get_context_resp){
296 		.num_comp_vectors = attrs->ufile->device->num_comp_vectors,
297 		.async_fd = uobj->id,
298 	};
299 	ret = uverbs_response(attrs, &resp, sizeof(resp));
300 	if (ret)
301 		goto err_uobj;
302 
303 	ret = ib_init_ucontext(attrs);
304 	if (ret)
305 		goto err_uobj;
306 
307 	ib_uverbs_init_async_event_file(
308 		container_of(uobj, struct ib_uverbs_async_event_file, uobj));
309 	rdma_alloc_commit_uobject(uobj, attrs);
310 	return 0;
311 
312 err_uobj:
313 	rdma_alloc_abort_uobject(uobj, attrs);
314 err_ucontext:
315 	kfree(attrs->context);
316 	attrs->context = NULL;
317 	return ret;
318 }
319 
copy_query_dev_fields(struct ib_ucontext * ucontext,struct ib_uverbs_query_device_resp * resp,struct ib_device_attr * attr)320 static void copy_query_dev_fields(struct ib_ucontext *ucontext,
321 				  struct ib_uverbs_query_device_resp *resp,
322 				  struct ib_device_attr *attr)
323 {
324 	struct ib_device *ib_dev = ucontext->device;
325 
326 	resp->fw_ver		= attr->fw_ver;
327 	resp->node_guid		= ib_dev->node_guid;
328 	resp->sys_image_guid	= attr->sys_image_guid;
329 	resp->max_mr_size	= attr->max_mr_size;
330 	resp->page_size_cap	= attr->page_size_cap;
331 	resp->vendor_id		= attr->vendor_id;
332 	resp->vendor_part_id	= attr->vendor_part_id;
333 	resp->hw_ver		= attr->hw_ver;
334 	resp->max_qp		= attr->max_qp;
335 	resp->max_qp_wr		= attr->max_qp_wr;
336 	resp->device_cap_flags	= (u32)attr->device_cap_flags;
337 	resp->max_sge		= min(attr->max_send_sge, attr->max_recv_sge);
338 	resp->max_sge_rd	= attr->max_sge_rd;
339 	resp->max_cq		= attr->max_cq;
340 	resp->max_cqe		= attr->max_cqe;
341 	resp->max_mr		= attr->max_mr;
342 	resp->max_pd		= attr->max_pd;
343 	resp->max_qp_rd_atom	= attr->max_qp_rd_atom;
344 	resp->max_ee_rd_atom	= attr->max_ee_rd_atom;
345 	resp->max_res_rd_atom	= attr->max_res_rd_atom;
346 	resp->max_qp_init_rd_atom	= attr->max_qp_init_rd_atom;
347 	resp->max_ee_init_rd_atom	= attr->max_ee_init_rd_atom;
348 	resp->atomic_cap		= attr->atomic_cap;
349 	resp->max_ee			= attr->max_ee;
350 	resp->max_rdd			= attr->max_rdd;
351 	resp->max_mw			= attr->max_mw;
352 	resp->max_raw_ipv6_qp		= attr->max_raw_ipv6_qp;
353 	resp->max_raw_ethy_qp		= attr->max_raw_ethy_qp;
354 	resp->max_mcast_grp		= attr->max_mcast_grp;
355 	resp->max_mcast_qp_attach	= attr->max_mcast_qp_attach;
356 	resp->max_total_mcast_qp_attach	= attr->max_total_mcast_qp_attach;
357 	resp->max_ah			= attr->max_ah;
358 	resp->max_fmr			= attr->max_fmr;
359 	resp->max_map_per_fmr		= attr->max_map_per_fmr;
360 	resp->max_srq			= attr->max_srq;
361 	resp->max_srq_wr		= attr->max_srq_wr;
362 	resp->max_srq_sge		= attr->max_srq_sge;
363 	resp->max_pkeys			= attr->max_pkeys;
364 	resp->local_ca_ack_delay	= attr->local_ca_ack_delay;
365 	resp->phys_port_cnt		= ib_dev->phys_port_cnt;
366 }
367 
ib_uverbs_query_device(struct uverbs_attr_bundle * attrs)368 static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
369 {
370 	struct ib_uverbs_query_device      cmd;
371 	struct ib_uverbs_query_device_resp resp;
372 	struct ib_ucontext *ucontext;
373 	int ret;
374 
375 	ucontext = ib_uverbs_get_ucontext(attrs);
376 	if (IS_ERR(ucontext))
377 		return PTR_ERR(ucontext);
378 
379 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
380 	if (ret)
381 		return ret;
382 
383 	memset(&resp, 0, sizeof resp);
384 	copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
385 
386 	return uverbs_response(attrs, &resp, sizeof(resp));
387 }
388 
ib_uverbs_query_port(struct uverbs_attr_bundle * attrs)389 static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs)
390 {
391 	struct ib_uverbs_query_port      cmd;
392 	struct ib_uverbs_query_port_resp resp;
393 	struct ib_port_attr              attr;
394 	int                              ret;
395 	struct ib_ucontext *ucontext;
396 	struct ib_device *ib_dev;
397 
398 	ucontext = ib_uverbs_get_ucontext(attrs);
399 	if (IS_ERR(ucontext))
400 		return PTR_ERR(ucontext);
401 	ib_dev = ucontext->device;
402 
403 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
404 	if (ret)
405 		return ret;
406 
407 	ret = ib_query_port(ib_dev, cmd.port_num, &attr);
408 	if (ret)
409 		return ret;
410 
411 	memset(&resp, 0, sizeof resp);
412 	copy_port_attr_to_resp(&attr, &resp, ib_dev, cmd.port_num);
413 
414 	return uverbs_response(attrs, &resp, sizeof(resp));
415 }
416 
ib_uverbs_alloc_pd(struct uverbs_attr_bundle * attrs)417 static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
418 {
419 	struct ib_uverbs_alloc_pd      cmd;
420 	struct ib_uverbs_alloc_pd_resp resp;
421 	struct ib_uobject             *uobj;
422 	struct ib_pd                  *pd;
423 	int                            ret;
424 	struct ib_device *ib_dev;
425 
426 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
427 	if (ret)
428 		return ret;
429 
430 	uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev);
431 	if (IS_ERR(uobj))
432 		return PTR_ERR(uobj);
433 
434 	pd = rdma_zalloc_drv_obj(ib_dev, ib_pd);
435 	if (!pd) {
436 		ret = -ENOMEM;
437 		goto err;
438 	}
439 
440 	pd->device  = ib_dev;
441 	pd->uobject = uobj;
442 	pd->__internal_mr = NULL;
443 	atomic_set(&pd->usecnt, 0);
444 
445 	ret = ib_dev->alloc_pd(pd, &attrs->driver_udata);
446 	if (ret)
447 		goto err_alloc;
448 
449 	uobj->object = pd;
450 	memset(&resp, 0, sizeof resp);
451 	resp.pd_handle = uobj->id;
452 
453 	ret = uverbs_response(attrs, &resp, sizeof(resp));
454 	if (ret)
455 		goto err_copy;
456 
457 	rdma_alloc_commit_uobject(uobj, attrs);
458 	return 0;
459 
460 err_copy:
461 	ib_dealloc_pd_user(pd, uverbs_get_cleared_udata(attrs));
462 	pd = NULL;
463 err_alloc:
464 	kfree(pd);
465 err:
466 	uobj_alloc_abort(uobj, attrs);
467 	return ret;
468 }
469 
ib_uverbs_dealloc_pd(struct uverbs_attr_bundle * attrs)470 static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs)
471 {
472 	struct ib_uverbs_dealloc_pd cmd;
473 	int ret;
474 
475 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
476 	if (ret)
477 		return ret;
478 
479 	return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
480 }
481 
482 struct xrcd_table_entry {
483 	struct rb_node  node;
484 	struct ib_xrcd *xrcd;
485 	struct vnode   *vnode;
486 };
487 
xrcd_table_insert(struct ib_uverbs_device * dev,struct vnode * vnode,struct ib_xrcd * xrcd)488 static int xrcd_table_insert(struct ib_uverbs_device *dev,
489 			    struct vnode *vnode,
490 			    struct ib_xrcd *xrcd)
491 {
492 	struct xrcd_table_entry *entry, *scan;
493 	struct rb_node **p = &dev->xrcd_tree.rb_node;
494 	struct rb_node *parent = NULL;
495 
496 	entry = kmalloc(sizeof *entry, GFP_KERNEL);
497 	if (!entry)
498 		return -ENOMEM;
499 
500 	entry->xrcd  = xrcd;
501 	entry->vnode = vnode;
502 
503 	while (*p) {
504 		parent = *p;
505 		scan = rb_entry(parent, struct xrcd_table_entry, node);
506 
507 		if ((uintptr_t)vnode < (uintptr_t)scan->vnode) {
508 			p = &(*p)->rb_left;
509 		} else if ((uintptr_t)vnode > (uintptr_t)scan->vnode) {
510 			p = &(*p)->rb_right;
511 		} else {
512 			kfree(entry);
513 			return -EEXIST;
514 		}
515 	}
516 
517 	rb_link_node(&entry->node, parent, p);
518 	rb_insert_color(&entry->node, &dev->xrcd_tree);
519 	vrefact(vnode);
520 	return 0;
521 }
522 
xrcd_table_search(struct ib_uverbs_device * dev,struct vnode * vnode)523 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
524 						  struct vnode *vnode)
525 {
526 	struct xrcd_table_entry *entry;
527 	struct rb_node *p = dev->xrcd_tree.rb_node;
528 
529 	while (p) {
530 		entry = rb_entry(p, struct xrcd_table_entry, node);
531 
532 		if ((uintptr_t)vnode < (uintptr_t)entry->vnode)
533 			p = p->rb_left;
534 		else if ((uintptr_t)vnode > (uintptr_t)entry->vnode)
535 			p = p->rb_right;
536 		else
537 			return entry;
538 	}
539 
540 	return NULL;
541 }
542 
find_xrcd(struct ib_uverbs_device * dev,struct vnode * vnode)543 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct vnode *vnode)
544 {
545 	struct xrcd_table_entry *entry;
546 
547 	entry = xrcd_table_search(dev, vnode);
548 	if (!entry)
549 		return NULL;
550 
551 	return entry->xrcd;
552 }
553 
xrcd_table_delete(struct ib_uverbs_device * dev,struct vnode * vnode)554 static void xrcd_table_delete(struct ib_uverbs_device *dev,
555 			      struct vnode *vnode)
556 {
557 	struct xrcd_table_entry *entry;
558 
559 	entry = xrcd_table_search(dev, vnode);
560 	if (entry) {
561 		vrele(vnode);
562 		rb_erase(&entry->node, &dev->xrcd_tree);
563 		kfree(entry);
564 	}
565 }
566 
ib_uverbs_open_xrcd(struct uverbs_attr_bundle * attrs)567 static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
568 {
569 	struct ib_uverbs_device *ibudev = attrs->ufile->device;
570 	struct ib_uverbs_open_xrcd	cmd;
571 	struct ib_uverbs_open_xrcd_resp	resp;
572 	struct ib_uxrcd_object         *obj;
573 	struct ib_xrcd                 *xrcd = NULL;
574 	struct vnode                   *vnode = NULL;
575 	int				ret = 0;
576 	int				new_xrcd = 0;
577 	struct ib_device *ib_dev;
578 
579 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
580 	if (ret)
581 		return ret;
582 
583 	mutex_lock(&ibudev->xrcd_tree_mutex);
584 
585 	if (cmd.fd != -1) {
586 		/* search for file descriptor */
587 		ret = -fgetvp(curthread, cmd.fd, &cap_no_rights, &vnode);
588 		if (ret != 0)
589 			goto err_tree_mutex_unlock;
590 
591 		xrcd = find_xrcd(ibudev, vnode);
592 		if (!xrcd && !(cmd.oflags & O_CREAT)) {
593 			/* no file descriptor. Need CREATE flag */
594 			ret = -EAGAIN;
595 			goto err_tree_mutex_unlock;
596 		}
597 
598 		if (xrcd && cmd.oflags & O_EXCL) {
599 			ret = -EINVAL;
600 			goto err_tree_mutex_unlock;
601 		}
602 	}
603 
604 	obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs,
605 						   &ib_dev);
606 	if (IS_ERR(obj)) {
607 		ret = PTR_ERR(obj);
608 		goto err_tree_mutex_unlock;
609 	}
610 
611 	if (!xrcd) {
612 		xrcd = ib_dev->alloc_xrcd(ib_dev, &attrs->driver_udata);
613 		if (IS_ERR(xrcd)) {
614 			ret = PTR_ERR(xrcd);
615 			goto err;
616 		}
617 
618 		xrcd->vnode   = vnode;
619 		xrcd->device  = ib_dev;
620 		atomic_set(&xrcd->usecnt, 0);
621 		mutex_init(&xrcd->tgt_qp_mutex);
622 		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
623 		new_xrcd = 1;
624 	}
625 
626 	atomic_set(&obj->refcnt, 0);
627 	obj->uobject.object = xrcd;
628 	memset(&resp, 0, sizeof resp);
629 	resp.xrcd_handle = obj->uobject.id;
630 
631 	if (vnode != NULL) {
632 		if (new_xrcd) {
633 			/* create new vnode/xrcd table entry */
634 			ret = xrcd_table_insert(ibudev, vnode, xrcd);
635 			if (ret)
636 				goto err_dealloc_xrcd;
637 		}
638 		atomic_inc(&xrcd->usecnt);
639 	}
640 
641 	ret = uverbs_response(attrs, &resp, sizeof(resp));
642 	if (ret)
643 		goto err_copy;
644 
645 	if (vnode != NULL)
646 		vrele(vnode);
647 
648 	mutex_unlock(&ibudev->xrcd_tree_mutex);
649 
650 	rdma_alloc_commit_uobject(&obj->uobject, attrs);
651 	return 0;
652 
653 err_copy:
654 	if (vnode != NULL) {
655 		if (new_xrcd)
656 			xrcd_table_delete(ibudev, vnode);
657 		atomic_dec(&xrcd->usecnt);
658 	}
659 
660 err_dealloc_xrcd:
661 	ib_dealloc_xrcd(xrcd, uverbs_get_cleared_udata(attrs));
662 
663 err:
664 	uobj_alloc_abort(&obj->uobject, attrs);
665 
666 err_tree_mutex_unlock:
667 	if (vnode != NULL)
668 		vrele(vnode);
669 
670 	mutex_unlock(&ibudev->xrcd_tree_mutex);
671 
672 	return ret;
673 }
674 
ib_uverbs_close_xrcd(struct uverbs_attr_bundle * attrs)675 static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs)
676 {
677 	struct ib_uverbs_close_xrcd cmd;
678 	int ret;
679 
680 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
681 	if (ret)
682 		return ret;
683 
684 	return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
685 }
686 
ib_uverbs_dealloc_xrcd(struct ib_uobject * uobject,struct ib_xrcd * xrcd,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)687 int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
688 			   enum rdma_remove_reason why,
689 			   struct uverbs_attr_bundle *attrs)
690 {
691 	struct vnode *vnode;
692 	int ret;
693 	struct ib_uverbs_device *dev = attrs->ufile->device;
694 
695 	vnode = xrcd->vnode;
696 	if (vnode && !atomic_dec_and_test(&xrcd->usecnt))
697 		return 0;
698 
699 	ret = ib_dealloc_xrcd(xrcd, &attrs->driver_udata);
700 
701 	if (ib_is_destroy_retryable(ret, why, uobject)) {
702 		atomic_inc(&xrcd->usecnt);
703 		return ret;
704 	}
705 
706 	if (vnode)
707 		xrcd_table_delete(dev, vnode);
708 
709 	return ret;
710 }
711 
ib_uverbs_reg_mr(struct uverbs_attr_bundle * attrs)712 static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
713 {
714 	struct ib_uverbs_reg_mr      cmd;
715 	struct ib_uverbs_reg_mr_resp resp;
716 	struct ib_uobject           *uobj;
717 	struct ib_pd                *pd;
718 	struct ib_mr                *mr;
719 	int                          ret;
720 	struct ib_device *ib_dev;
721 
722 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
723 	if (ret)
724 		return ret;
725 
726 	if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
727 		return -EINVAL;
728 
729 	ret = ib_check_mr_access(cmd.access_flags);
730 	if (ret)
731 		return ret;
732 
733 	uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
734 	if (IS_ERR(uobj))
735 		return PTR_ERR(uobj);
736 
737 	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
738 	if (!pd) {
739 		ret = -EINVAL;
740 		goto err_free;
741 	}
742 
743 	if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
744 		if (!(pd->device->attrs.device_cap_flags &
745 		      IB_DEVICE_ON_DEMAND_PAGING)) {
746 			pr_debug("ODP support not available\n");
747 			ret = -EINVAL;
748 			goto err_put;
749 		}
750 	}
751 
752 	mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
753 					 cmd.access_flags,
754 					 &attrs->driver_udata);
755 	if (IS_ERR(mr)) {
756 		ret = PTR_ERR(mr);
757 		goto err_put;
758 	}
759 
760 	mr->device  = pd->device;
761 	mr->pd      = pd;
762 	mr->type    = IB_MR_TYPE_USER;
763 	mr->dm	    = NULL;
764 	mr->sig_attrs = NULL;
765 	mr->uobject = uobj;
766 	atomic_inc(&pd->usecnt);
767 
768 	uobj->object = mr;
769 
770 	memset(&resp, 0, sizeof resp);
771 	resp.lkey      = mr->lkey;
772 	resp.rkey      = mr->rkey;
773 	resp.mr_handle = uobj->id;
774 
775 	ret = uverbs_response(attrs, &resp, sizeof(resp));
776 	if (ret)
777 		goto err_copy;
778 
779 	uobj_put_obj_read(pd);
780 
781 	rdma_alloc_commit_uobject(uobj, attrs);
782 	return 0;
783 
784 err_copy:
785 	ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
786 
787 err_put:
788 	uobj_put_obj_read(pd);
789 
790 err_free:
791 	uobj_alloc_abort(uobj, attrs);
792 	return ret;
793 }
794 
ib_uverbs_rereg_mr(struct uverbs_attr_bundle * attrs)795 static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
796 {
797 	struct ib_uverbs_rereg_mr      cmd;
798 	struct ib_uverbs_rereg_mr_resp resp;
799 	struct ib_pd                *pd = NULL;
800 	struct ib_mr                *mr;
801 	struct ib_pd		    *old_pd;
802 	int                          ret;
803 	struct ib_uobject	    *uobj;
804 
805 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
806 	if (ret)
807 		return ret;
808 
809 	if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
810 		return -EINVAL;
811 
812 	if ((cmd.flags & IB_MR_REREG_TRANS) &&
813 	    (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
814 	     (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
815 			return -EINVAL;
816 
817 	uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
818 	if (IS_ERR(uobj))
819 		return PTR_ERR(uobj);
820 
821 	mr = uobj->object;
822 
823 	if (mr->dm) {
824 		ret = -EINVAL;
825 		goto put_uobjs;
826 	}
827 
828 	if (cmd.flags & IB_MR_REREG_ACCESS) {
829 		ret = ib_check_mr_access(cmd.access_flags);
830 		if (ret)
831 			goto put_uobjs;
832 	}
833 
834 	if (cmd.flags & IB_MR_REREG_PD) {
835 		pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
836 				       attrs);
837 		if (!pd) {
838 			ret = -EINVAL;
839 			goto put_uobjs;
840 		}
841 	}
842 
843 	old_pd = mr->pd;
844 	ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
845 					    cmd.length, cmd.hca_va,
846 					    cmd.access_flags, pd,
847 					    &attrs->driver_udata);
848 	if (ret)
849 		goto put_uobj_pd;
850 
851 	if (cmd.flags & IB_MR_REREG_PD) {
852 		atomic_inc(&pd->usecnt);
853 		mr->pd = pd;
854 		atomic_dec(&old_pd->usecnt);
855 	}
856 
857 	memset(&resp, 0, sizeof(resp));
858 	resp.lkey      = mr->lkey;
859 	resp.rkey      = mr->rkey;
860 
861 	ret = uverbs_response(attrs, &resp, sizeof(resp));
862 
863 put_uobj_pd:
864 	if (cmd.flags & IB_MR_REREG_PD)
865 		uobj_put_obj_read(pd);
866 
867 put_uobjs:
868 	uobj_put_write(uobj);
869 
870 	return ret;
871 }
872 
ib_uverbs_dereg_mr(struct uverbs_attr_bundle * attrs)873 static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs)
874 {
875 	struct ib_uverbs_dereg_mr cmd;
876 	int ret;
877 
878 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
879 	if (ret)
880 		return ret;
881 
882 	return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
883 }
884 
ib_uverbs_alloc_mw(struct uverbs_attr_bundle * attrs)885 static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
886 {
887 	struct ib_uverbs_alloc_mw      cmd;
888 	struct ib_uverbs_alloc_mw_resp resp;
889 	struct ib_uobject             *uobj;
890 	struct ib_pd                  *pd;
891 	struct ib_mw                  *mw;
892 	int                            ret;
893 	struct ib_device *ib_dev;
894 
895 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
896 	if (ret)
897 		return ret;
898 
899 	uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev);
900 	if (IS_ERR(uobj))
901 		return PTR_ERR(uobj);
902 
903 	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
904 	if (!pd) {
905 		ret = -EINVAL;
906 		goto err_free;
907 	}
908 
909 	if (cmd.mw_type != IB_MW_TYPE_1 && cmd.mw_type != IB_MW_TYPE_2) {
910 		ret = -EINVAL;
911 		goto err_put;
912 	}
913 
914 	mw = pd->device->alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
915 	if (IS_ERR(mw)) {
916 		ret = PTR_ERR(mw);
917 		goto err_put;
918 	}
919 
920 	mw->device  = pd->device;
921 	mw->pd      = pd;
922 	mw->uobject = uobj;
923 	atomic_inc(&pd->usecnt);
924 
925 	uobj->object = mw;
926 
927 	memset(&resp, 0, sizeof(resp));
928 	resp.rkey      = mw->rkey;
929 	resp.mw_handle = uobj->id;
930 
931 	ret = uverbs_response(attrs, &resp, sizeof(resp));
932 	if (ret)
933 		goto err_copy;
934 
935 	uobj_put_obj_read(pd);
936 	rdma_alloc_commit_uobject(uobj, attrs);
937 	return 0;
938 
939 err_copy:
940 	uverbs_dealloc_mw(mw);
941 err_put:
942 	uobj_put_obj_read(pd);
943 err_free:
944 	uobj_alloc_abort(uobj, attrs);
945 	return ret;
946 }
947 
ib_uverbs_dealloc_mw(struct uverbs_attr_bundle * attrs)948 static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs)
949 {
950 	struct ib_uverbs_dealloc_mw cmd;
951 	int ret;
952 
953 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
954 	if (ret)
955 		return ret;
956 
957 	return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs);
958 }
959 
ib_uverbs_create_comp_channel(struct uverbs_attr_bundle * attrs)960 static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs)
961 {
962 	struct ib_uverbs_create_comp_channel	   cmd;
963 	struct ib_uverbs_create_comp_channel_resp  resp;
964 	struct ib_uobject			  *uobj;
965 	struct ib_uverbs_completion_event_file	  *ev_file;
966 	struct ib_device *ib_dev;
967 	int ret;
968 
969 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
970 	if (ret)
971 		return ret;
972 
973 	uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev);
974 	if (IS_ERR(uobj))
975 		return PTR_ERR(uobj);
976 
977 	resp.fd = uobj->id;
978 
979 	ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
980 			       uobj);
981 	ib_uverbs_init_event_queue(&ev_file->ev_queue);
982 
983 	ret = uverbs_response(attrs, &resp, sizeof(resp));
984 	if (ret) {
985 		uobj_alloc_abort(uobj, attrs);
986 		return ret;
987 	}
988 
989 	rdma_alloc_commit_uobject(uobj, attrs);
990 	return 0;
991 }
992 
create_cq(struct uverbs_attr_bundle * attrs,struct ib_uverbs_ex_create_cq * cmd)993 static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
994 				       struct ib_uverbs_ex_create_cq *cmd)
995 {
996 	struct ib_ucq_object           *obj;
997 	struct ib_uverbs_completion_event_file    *ev_file = NULL;
998 	struct ib_cq                   *cq;
999 	int                             ret;
1000 	struct ib_uverbs_ex_create_cq_resp resp;
1001 	struct ib_cq_init_attr attr = {};
1002 	struct ib_device *ib_dev;
1003 
1004 	if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors)
1005 		return ERR_PTR(-EINVAL);
1006 
1007 	obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs,
1008 						 &ib_dev);
1009 	if (IS_ERR(obj))
1010 		return obj;
1011 
1012 	if (cmd->comp_channel >= 0) {
1013 		ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs);
1014 		if (IS_ERR(ev_file)) {
1015 			ret = PTR_ERR(ev_file);
1016 			goto err;
1017 		}
1018 	}
1019 
1020 	obj->uevent.uobject.user_handle = cmd->user_handle;
1021 	INIT_LIST_HEAD(&obj->comp_list);
1022 	INIT_LIST_HEAD(&obj->uevent.event_list);
1023 
1024 	attr.cqe = cmd->cqe;
1025 	attr.comp_vector = cmd->comp_vector;
1026 	attr.flags = cmd->flags;
1027 
1028 	cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
1029 	if (!cq) {
1030 		ret = -ENOMEM;
1031 		goto err_file;
1032 	}
1033 	cq->device        = ib_dev;
1034 	cq->uobject       = obj;
1035 	cq->comp_handler  = ib_uverbs_comp_handler;
1036 	cq->event_handler = ib_uverbs_cq_event_handler;
1037 	cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
1038 	atomic_set(&cq->usecnt, 0);
1039 
1040 	ret = ib_dev->create_cq(cq, &attr, &attrs->driver_udata);
1041 	if (ret)
1042 		goto err_free;
1043 
1044 	obj->uevent.uobject.object = cq;
1045 	memset(&resp, 0, sizeof resp);
1046 	resp.base.cq_handle = obj->uevent.uobject.id;
1047 	resp.base.cqe       = cq->cqe;
1048 	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1049 
1050 	ret = uverbs_response(attrs, &resp, sizeof(resp));
1051 	if (ret)
1052 		goto err_cb;
1053 
1054 	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
1055 	return obj;
1056 
1057 err_cb:
1058 	ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
1059 	cq = NULL;
1060 err_free:
1061 	kfree(cq);
1062 err_file:
1063 	if (ev_file)
1064 		ib_uverbs_release_ucq(ev_file, obj);
1065 
1066 err:
1067 	uobj_alloc_abort(&obj->uevent.uobject, attrs);
1068 
1069 	return ERR_PTR(ret);
1070 }
1071 
ib_uverbs_create_cq(struct uverbs_attr_bundle * attrs)1072 static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs)
1073 {
1074 	struct ib_uverbs_create_cq      cmd;
1075 	struct ib_uverbs_ex_create_cq	cmd_ex;
1076 	struct ib_ucq_object           *obj;
1077 	int ret;
1078 
1079 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1080 	if (ret)
1081 		return ret;
1082 
1083 	memset(&cmd_ex, 0, sizeof(cmd_ex));
1084 	cmd_ex.user_handle = cmd.user_handle;
1085 	cmd_ex.cqe = cmd.cqe;
1086 	cmd_ex.comp_vector = cmd.comp_vector;
1087 	cmd_ex.comp_channel = cmd.comp_channel;
1088 
1089 	obj = create_cq(attrs, &cmd_ex);
1090 	return PTR_ERR_OR_ZERO(obj);
1091 }
1092 
ib_uverbs_ex_create_cq(struct uverbs_attr_bundle * attrs)1093 static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs)
1094 {
1095 	struct ib_uverbs_ex_create_cq  cmd;
1096 	struct ib_ucq_object           *obj;
1097 	int ret;
1098 
1099 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1100 	if (ret)
1101 		return ret;
1102 
1103 	if (cmd.comp_mask)
1104 		return -EINVAL;
1105 
1106 	if (cmd.reserved)
1107 		return -EINVAL;
1108 
1109 	obj = create_cq(attrs, &cmd);
1110 	return PTR_ERR_OR_ZERO(obj);
1111 }
1112 
ib_uverbs_resize_cq(struct uverbs_attr_bundle * attrs)1113 static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
1114 {
1115 	struct ib_uverbs_resize_cq	cmd;
1116 	struct ib_uverbs_resize_cq_resp	resp = {};
1117 	struct ib_cq			*cq;
1118 	int				ret = -EINVAL;
1119 
1120 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1121 	if (ret)
1122 		return ret;
1123 
1124 	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1125 	if (!cq)
1126 		return -EINVAL;
1127 
1128 	ret = cq->device->resize_cq(cq, cmd.cqe, &attrs->driver_udata);
1129 	if (ret)
1130 		goto out;
1131 
1132 	resp.cqe = cq->cqe;
1133 
1134 	ret = uverbs_response(attrs, &resp, sizeof(resp));
1135 out:
1136 	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1137 				UVERBS_LOOKUP_READ);
1138 
1139 	return ret;
1140 }
1141 
copy_wc_to_user(struct ib_device * ib_dev,void __user * dest,struct ib_wc * wc)1142 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
1143 			   struct ib_wc *wc)
1144 {
1145 	struct ib_uverbs_wc tmp;
1146 
1147 	tmp.wr_id		= wc->wr_id;
1148 	tmp.status		= wc->status;
1149 	tmp.opcode		= wc->opcode;
1150 	tmp.vendor_err		= wc->vendor_err;
1151 	tmp.byte_len		= wc->byte_len;
1152 	tmp.ex.imm_data		= wc->ex.imm_data;
1153 	tmp.qp_num		= wc->qp->qp_num;
1154 	tmp.src_qp		= wc->src_qp;
1155 	tmp.wc_flags		= wc->wc_flags;
1156 	tmp.pkey_index		= wc->pkey_index;
1157 	tmp.slid		= wc->slid;
1158 	tmp.sl			= wc->sl;
1159 	tmp.dlid_path_bits	= wc->dlid_path_bits;
1160 	tmp.port_num		= wc->port_num;
1161 	tmp.reserved		= 0;
1162 
1163 	if (copy_to_user(dest, &tmp, sizeof tmp))
1164 		return -EFAULT;
1165 
1166 	return 0;
1167 }
1168 
ib_uverbs_poll_cq(struct uverbs_attr_bundle * attrs)1169 static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
1170 {
1171 	struct ib_uverbs_poll_cq       cmd;
1172 	struct ib_uverbs_poll_cq_resp  resp;
1173 	u8 __user                     *header_ptr;
1174 	u8 __user                     *data_ptr;
1175 	struct ib_cq                  *cq;
1176 	struct ib_wc                   wc;
1177 	int                            ret;
1178 
1179 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1180 	if (ret)
1181 		return ret;
1182 
1183 	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1184 	if (!cq)
1185 		return -EINVAL;
1186 
1187 	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
1188 	header_ptr = attrs->ucore.outbuf;
1189 	data_ptr = header_ptr + sizeof resp;
1190 
1191 	memset(&resp, 0, sizeof resp);
1192 	while (resp.count < cmd.ne) {
1193 		ret = ib_poll_cq(cq, 1, &wc);
1194 		if (ret < 0)
1195 			goto out_put;
1196 		if (!ret)
1197 			break;
1198 
1199 		ret = copy_wc_to_user(cq->device, data_ptr, &wc);
1200 		if (ret)
1201 			goto out_put;
1202 
1203 		data_ptr += sizeof(struct ib_uverbs_wc);
1204 		++resp.count;
1205 	}
1206 
1207 	if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1208 		ret = -EFAULT;
1209 		goto out_put;
1210 	}
1211 	ret = 0;
1212 
1213 	if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
1214 		ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
1215 
1216 out_put:
1217 	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1218 				UVERBS_LOOKUP_READ);
1219 	return ret;
1220 }
1221 
ib_uverbs_req_notify_cq(struct uverbs_attr_bundle * attrs)1222 static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
1223 {
1224 	struct ib_uverbs_req_notify_cq cmd;
1225 	struct ib_cq                  *cq;
1226 	int ret;
1227 
1228 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1229 	if (ret)
1230 		return ret;
1231 
1232 	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1233 	if (!cq)
1234 		return -EINVAL;
1235 
1236 	ib_req_notify_cq(cq, cmd.solicited_only ?
1237 			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1238 
1239 	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1240 				UVERBS_LOOKUP_READ);
1241 	return 0;
1242 }
1243 
ib_uverbs_destroy_cq(struct uverbs_attr_bundle * attrs)1244 static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs)
1245 {
1246 	struct ib_uverbs_destroy_cq      cmd;
1247 	struct ib_uverbs_destroy_cq_resp resp;
1248 	struct ib_uobject		*uobj;
1249 	struct ib_ucq_object        	*obj;
1250 	int ret;
1251 
1252 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1253 	if (ret)
1254 		return ret;
1255 
1256 	uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1257 	if (IS_ERR(uobj))
1258 		return PTR_ERR(uobj);
1259 
1260 	obj = container_of(uobj, struct ib_ucq_object, uevent.uobject);
1261 	memset(&resp, 0, sizeof(resp));
1262 	resp.comp_events_reported  = obj->comp_events_reported;
1263 	resp.async_events_reported = obj->uevent.events_reported;
1264 
1265 	uobj_put_destroy(uobj);
1266 
1267 	return uverbs_response(attrs, &resp, sizeof(resp));
1268 }
1269 
create_qp(struct uverbs_attr_bundle * attrs,struct ib_uverbs_ex_create_qp * cmd)1270 static int create_qp(struct uverbs_attr_bundle *attrs,
1271 		     struct ib_uverbs_ex_create_qp *cmd)
1272 {
1273 	struct ib_uqp_object		*obj;
1274 	struct ib_device		*device;
1275 	struct ib_pd			*pd = NULL;
1276 	struct ib_xrcd			*xrcd = NULL;
1277 	struct ib_uobject		*xrcd_uobj = ERR_PTR(-ENOENT);
1278 	struct ib_cq			*scq = NULL, *rcq = NULL;
1279 	struct ib_srq			*srq = NULL;
1280 	struct ib_qp			*qp;
1281 	struct ib_qp_init_attr		attr = {};
1282 	struct ib_uverbs_ex_create_qp_resp resp;
1283 	int				ret;
1284 	struct ib_rwq_ind_table *ind_tbl = NULL;
1285 	bool has_sq = true;
1286 	struct ib_device *ib_dev;
1287 
1288 	if (cmd->qp_type == IB_QPT_RAW_PACKET && priv_check(curthread, PRIV_NET_RAW) != 0)
1289 		return -EPERM;
1290 
1291 	obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1292 						 &ib_dev);
1293 	if (IS_ERR(obj))
1294 		return PTR_ERR(obj);
1295 	obj->uxrcd = NULL;
1296 	obj->uevent.uobject.user_handle = cmd->user_handle;
1297 	mutex_init(&obj->mcast_lock);
1298 
1299 	if (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE) {
1300 		ind_tbl = uobj_get_obj_read(rwq_ind_table,
1301 					    UVERBS_OBJECT_RWQ_IND_TBL,
1302 					    cmd->rwq_ind_tbl_handle, attrs);
1303 		if (!ind_tbl) {
1304 			ret = -EINVAL;
1305 			goto err_put;
1306 		}
1307 
1308 		attr.rwq_ind_tbl = ind_tbl;
1309 	}
1310 
1311 	if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1312 		ret = -EINVAL;
1313 		goto err_put;
1314 	}
1315 
1316 	if (ind_tbl && !cmd->max_send_wr)
1317 		has_sq = false;
1318 
1319 	if (cmd->qp_type == IB_QPT_XRC_TGT) {
1320 		xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
1321 					  attrs);
1322 
1323 		if (IS_ERR(xrcd_uobj)) {
1324 			ret = -EINVAL;
1325 			goto err_put;
1326 		}
1327 
1328 		xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1329 		if (!xrcd) {
1330 			ret = -EINVAL;
1331 			goto err_put;
1332 		}
1333 		device = xrcd->device;
1334 	} else {
1335 		if (cmd->qp_type == IB_QPT_XRC_INI) {
1336 			cmd->max_recv_wr = 0;
1337 			cmd->max_recv_sge = 0;
1338 		} else {
1339 			if (cmd->is_srq) {
1340 				srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
1341 							cmd->srq_handle, attrs);
1342 				if (!srq || srq->srq_type == IB_SRQT_XRC) {
1343 					ret = -EINVAL;
1344 					goto err_put;
1345 				}
1346 			}
1347 
1348 			if (!ind_tbl) {
1349 				if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1350 					rcq = uobj_get_obj_read(
1351 						cq, UVERBS_OBJECT_CQ,
1352 						cmd->recv_cq_handle, attrs);
1353 					if (!rcq) {
1354 						ret = -EINVAL;
1355 						goto err_put;
1356 					}
1357 				}
1358 			}
1359 		}
1360 
1361 		if (has_sq)
1362 			scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
1363 						cmd->send_cq_handle, attrs);
1364 		if (!ind_tbl)
1365 			rcq = rcq ?: scq;
1366 		pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
1367 				       attrs);
1368 		if (!pd || (!scq && has_sq)) {
1369 			ret = -EINVAL;
1370 			goto err_put;
1371 		}
1372 
1373 		device = pd->device;
1374 	}
1375 
1376 	attr.event_handler = ib_uverbs_qp_event_handler;
1377 	attr.send_cq       = scq;
1378 	attr.recv_cq       = rcq;
1379 	attr.srq           = srq;
1380 	attr.xrcd	   = xrcd;
1381 	attr.sq_sig_type   = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1382 					      IB_SIGNAL_REQ_WR;
1383 	attr.qp_type       = cmd->qp_type;
1384 	attr.create_flags  = 0;
1385 
1386 	attr.cap.max_send_wr     = cmd->max_send_wr;
1387 	attr.cap.max_recv_wr     = cmd->max_recv_wr;
1388 	attr.cap.max_send_sge    = cmd->max_send_sge;
1389 	attr.cap.max_recv_sge    = cmd->max_recv_sge;
1390 	attr.cap.max_inline_data = cmd->max_inline_data;
1391 
1392 	INIT_LIST_HEAD(&obj->uevent.event_list);
1393 	INIT_LIST_HEAD(&obj->mcast_list);
1394 
1395 	attr.create_flags = cmd->create_flags;
1396 	if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1397 				IB_QP_CREATE_CROSS_CHANNEL |
1398 				IB_QP_CREATE_MANAGED_SEND |
1399 				IB_QP_CREATE_MANAGED_RECV |
1400 				IB_QP_CREATE_SCATTER_FCS |
1401 				IB_QP_CREATE_CVLAN_STRIPPING |
1402 				IB_QP_CREATE_SOURCE_QPN |
1403 				IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
1404 		ret = -EINVAL;
1405 		goto err_put;
1406 	}
1407 
1408 	if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
1409 		if (priv_check(curthread, PRIV_NET_RAW)) {
1410 			ret = -EPERM;
1411 			goto err_put;
1412 		}
1413 
1414 		attr.source_qpn = cmd->source_qpn;
1415 	}
1416 
1417 	if (cmd->qp_type == IB_QPT_XRC_TGT)
1418 		qp = ib_create_qp(pd, &attr);
1419 	else
1420 		qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
1421 				   obj);
1422 
1423 	if (IS_ERR(qp)) {
1424 		ret = PTR_ERR(qp);
1425 		goto err_put;
1426 	}
1427 
1428 	if (cmd->qp_type != IB_QPT_XRC_TGT) {
1429 		atomic_inc(&pd->usecnt);
1430 		if (attr.send_cq)
1431 			atomic_inc(&attr.send_cq->usecnt);
1432 		if (attr.recv_cq)
1433 			atomic_inc(&attr.recv_cq->usecnt);
1434 		if (attr.srq)
1435 			atomic_inc(&attr.srq->usecnt);
1436 		if (ind_tbl)
1437 			atomic_inc(&ind_tbl->usecnt);
1438 	} else {
1439 		/* It is done in _ib_create_qp for other QP types */
1440 		qp->uobject = obj;
1441 	}
1442 
1443 	obj->uevent.uobject.object = qp;
1444 
1445 	memset(&resp, 0, sizeof resp);
1446 	resp.base.qpn             = qp->qp_num;
1447 	resp.base.qp_handle       = obj->uevent.uobject.id;
1448 	resp.base.max_recv_sge    = attr.cap.max_recv_sge;
1449 	resp.base.max_send_sge    = attr.cap.max_send_sge;
1450 	resp.base.max_recv_wr     = attr.cap.max_recv_wr;
1451 	resp.base.max_send_wr     = attr.cap.max_send_wr;
1452 	resp.base.max_inline_data = attr.cap.max_inline_data;
1453 	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1454 
1455 	ret = uverbs_response(attrs, &resp, sizeof(resp));
1456 	if (ret)
1457 		goto err_cb;
1458 
1459 	if (xrcd) {
1460 		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1461 					  uobject);
1462 		atomic_inc(&obj->uxrcd->refcnt);
1463 		uobj_put_read(xrcd_uobj);
1464 	}
1465 
1466 	if (pd)
1467 		uobj_put_obj_read(pd);
1468 	if (scq)
1469 		rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
1470 					UVERBS_LOOKUP_READ);
1471 	if (rcq && rcq != scq)
1472 		rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
1473 					UVERBS_LOOKUP_READ);
1474 	if (srq)
1475 		rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
1476 					UVERBS_LOOKUP_READ);
1477 	if (ind_tbl)
1478 		uobj_put_obj_read(ind_tbl);
1479 
1480 	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
1481 	return 0;
1482 err_cb:
1483 	ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
1484 
1485 err_put:
1486 	if (!IS_ERR(xrcd_uobj))
1487 		uobj_put_read(xrcd_uobj);
1488 	if (pd)
1489 		uobj_put_obj_read(pd);
1490 	if (scq)
1491 		rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
1492 					UVERBS_LOOKUP_READ);
1493 	if (rcq && rcq != scq)
1494 		rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
1495 					UVERBS_LOOKUP_READ);
1496 	if (srq)
1497 		rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
1498 					UVERBS_LOOKUP_READ);
1499 	if (ind_tbl)
1500 		uobj_put_obj_read(ind_tbl);
1501 
1502 	uobj_alloc_abort(&obj->uevent.uobject, attrs);
1503 	return ret;
1504 }
1505 
ib_uverbs_create_qp(struct uverbs_attr_bundle * attrs)1506 static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs)
1507 {
1508 	struct ib_uverbs_create_qp      cmd;
1509 	struct ib_uverbs_ex_create_qp	cmd_ex;
1510 	int ret;
1511 
1512 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1513 	if (ret)
1514 		return ret;
1515 
1516 	memset(&cmd_ex, 0, sizeof(cmd_ex));
1517 	cmd_ex.user_handle = cmd.user_handle;
1518 	cmd_ex.pd_handle = cmd.pd_handle;
1519 	cmd_ex.send_cq_handle = cmd.send_cq_handle;
1520 	cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1521 	cmd_ex.srq_handle = cmd.srq_handle;
1522 	cmd_ex.max_send_wr = cmd.max_send_wr;
1523 	cmd_ex.max_recv_wr = cmd.max_recv_wr;
1524 	cmd_ex.max_send_sge = cmd.max_send_sge;
1525 	cmd_ex.max_recv_sge = cmd.max_recv_sge;
1526 	cmd_ex.max_inline_data = cmd.max_inline_data;
1527 	cmd_ex.sq_sig_all = cmd.sq_sig_all;
1528 	cmd_ex.qp_type = cmd.qp_type;
1529 	cmd_ex.is_srq = cmd.is_srq;
1530 
1531 	return create_qp(attrs, &cmd_ex);
1532 }
1533 
ib_uverbs_ex_create_qp(struct uverbs_attr_bundle * attrs)1534 static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs)
1535 {
1536 	struct ib_uverbs_ex_create_qp cmd;
1537 	int ret;
1538 
1539 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1540 	if (ret)
1541 		return ret;
1542 
1543 	if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
1544 		return -EINVAL;
1545 
1546 	if (cmd.reserved)
1547 		return -EINVAL;
1548 
1549 	return create_qp(attrs, &cmd);
1550 }
1551 
ib_uverbs_open_qp(struct uverbs_attr_bundle * attrs)1552 static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
1553 {
1554 	struct ib_uverbs_open_qp        cmd;
1555 	struct ib_uverbs_create_qp_resp resp;
1556 	struct ib_uqp_object           *obj;
1557 	struct ib_xrcd		       *xrcd;
1558 	struct ib_uobject	       *uninitialized_var(xrcd_uobj);
1559 	struct ib_qp                   *qp;
1560 	struct ib_qp_open_attr          attr = {};
1561 	int ret;
1562 	struct ib_device *ib_dev;
1563 
1564 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1565 	if (ret)
1566 		return ret;
1567 
1568 	obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1569 						 &ib_dev);
1570 	if (IS_ERR(obj))
1571 		return PTR_ERR(obj);
1572 
1573 	xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs);
1574 	if (IS_ERR(xrcd_uobj)) {
1575 		ret = -EINVAL;
1576 		goto err_put;
1577 	}
1578 
1579 	xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1580 	if (!xrcd) {
1581 		ret = -EINVAL;
1582 		goto err_xrcd;
1583 	}
1584 
1585 	attr.event_handler = ib_uverbs_qp_event_handler;
1586 	attr.qp_num        = cmd.qpn;
1587 	attr.qp_type       = cmd.qp_type;
1588 
1589 	INIT_LIST_HEAD(&obj->uevent.event_list);
1590 	INIT_LIST_HEAD(&obj->mcast_list);
1591 
1592 	qp = ib_open_qp(xrcd, &attr);
1593 	if (IS_ERR(qp)) {
1594 		ret = PTR_ERR(qp);
1595 		goto err_xrcd;
1596 	}
1597 
1598 	obj->uevent.uobject.object = qp;
1599 	obj->uevent.uobject.user_handle = cmd.user_handle;
1600 
1601 	memset(&resp, 0, sizeof resp);
1602 	resp.qpn       = qp->qp_num;
1603 	resp.qp_handle = obj->uevent.uobject.id;
1604 
1605 	ret = uverbs_response(attrs, &resp, sizeof(resp));
1606 	if (ret)
1607 		goto err_destroy;
1608 
1609 	obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1610 	atomic_inc(&obj->uxrcd->refcnt);
1611 	qp->uobject = obj;
1612 	uobj_put_read(xrcd_uobj);
1613 
1614 	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
1615 	return 0;
1616 
1617 err_destroy:
1618 	ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
1619 err_xrcd:
1620 	uobj_put_read(xrcd_uobj);
1621 err_put:
1622 	uobj_alloc_abort(&obj->uevent.uobject, attrs);
1623 	return ret;
1624 }
1625 
copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest * uverb_attr,struct ib_ah_attr * rdma_attr)1626 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
1627 				   struct ib_ah_attr *rdma_attr)
1628 {
1629 	uverb_attr->dlid              = rdma_attr->dlid;
1630 	uverb_attr->sl                = rdma_attr->sl;
1631 	uverb_attr->src_path_bits     = rdma_attr->src_path_bits;
1632 	uverb_attr->static_rate       = rdma_attr->static_rate;
1633 	uverb_attr->is_global         = !!(rdma_attr->ah_flags & IB_AH_GRH);
1634 	if (uverb_attr->is_global) {
1635 		const struct ib_global_route *grh = &rdma_attr->grh;
1636 
1637 		memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
1638 		uverb_attr->flow_label        = grh->flow_label;
1639 		uverb_attr->sgid_index        = grh->sgid_index;
1640 		uverb_attr->hop_limit         = grh->hop_limit;
1641 		uverb_attr->traffic_class     = grh->traffic_class;
1642 	}
1643 	uverb_attr->port_num          = rdma_attr->port_num;
1644 }
1645 
ib_uverbs_query_qp(struct uverbs_attr_bundle * attrs)1646 static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
1647 {
1648 	struct ib_uverbs_query_qp      cmd;
1649 	struct ib_uverbs_query_qp_resp resp;
1650 	struct ib_qp                   *qp;
1651 	struct ib_qp_attr              *attr;
1652 	struct ib_qp_init_attr         *init_attr;
1653 	int                            ret;
1654 
1655 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1656 	if (ret)
1657 		return ret;
1658 
1659 	attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1660 	init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1661 	if (!attr || !init_attr) {
1662 		ret = -ENOMEM;
1663 		goto out;
1664 	}
1665 
1666 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1667 	if (!qp) {
1668 		ret = -EINVAL;
1669 		goto out;
1670 	}
1671 
1672 	ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1673 
1674 	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
1675 				UVERBS_LOOKUP_READ);
1676 
1677 	if (ret)
1678 		goto out;
1679 
1680 	memset(&resp, 0, sizeof resp);
1681 
1682 	resp.qp_state               = attr->qp_state;
1683 	resp.cur_qp_state           = attr->cur_qp_state;
1684 	resp.path_mtu               = attr->path_mtu;
1685 	resp.path_mig_state         = attr->path_mig_state;
1686 	resp.qkey                   = attr->qkey;
1687 	resp.rq_psn                 = attr->rq_psn;
1688 	resp.sq_psn                 = attr->sq_psn;
1689 	resp.dest_qp_num            = attr->dest_qp_num;
1690 	resp.qp_access_flags        = attr->qp_access_flags;
1691 	resp.pkey_index             = attr->pkey_index;
1692 	resp.alt_pkey_index         = attr->alt_pkey_index;
1693 	resp.sq_draining            = attr->sq_draining;
1694 	resp.max_rd_atomic          = attr->max_rd_atomic;
1695 	resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1696 	resp.min_rnr_timer          = attr->min_rnr_timer;
1697 	resp.port_num               = attr->port_num;
1698 	resp.timeout                = attr->timeout;
1699 	resp.retry_cnt              = attr->retry_cnt;
1700 	resp.rnr_retry              = attr->rnr_retry;
1701 	resp.alt_port_num           = attr->alt_port_num;
1702 	resp.alt_timeout            = attr->alt_timeout;
1703 
1704 	copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
1705 	copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
1706 
1707 	resp.max_send_wr            = init_attr->cap.max_send_wr;
1708 	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1709 	resp.max_send_sge           = init_attr->cap.max_send_sge;
1710 	resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1711 	resp.max_inline_data        = init_attr->cap.max_inline_data;
1712 	resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1713 
1714 	ret = uverbs_response(attrs, &resp, sizeof(resp));
1715 
1716 out:
1717 	kfree(attr);
1718 	kfree(init_attr);
1719 
1720 	return ret;
1721 }
1722 
1723 /* Remove ignored fields set in the attribute mask */
modify_qp_mask(enum ib_qp_type qp_type,int mask)1724 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1725 {
1726 	switch (qp_type) {
1727 	case IB_QPT_XRC_INI:
1728 		return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1729 	case IB_QPT_XRC_TGT:
1730 		return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1731 				IB_QP_RNR_RETRY);
1732 	default:
1733 		return mask;
1734 	}
1735 }
1736 
copy_ah_attr_from_uverbs(struct ib_device * dev,struct ib_ah_attr * rdma_attr,struct ib_uverbs_qp_dest * uverb_attr)1737 static void copy_ah_attr_from_uverbs(struct ib_device *dev,
1738 				     struct ib_ah_attr *rdma_attr,
1739 				     struct ib_uverbs_qp_dest *uverb_attr)
1740 {
1741 	if (uverb_attr->is_global) {
1742 		struct ib_global_route *grh = &rdma_attr->grh;
1743 
1744 		grh->flow_label = uverb_attr->flow_label;
1745 		grh->sgid_index = uverb_attr->sgid_index;
1746 		grh->hop_limit = uverb_attr->hop_limit;
1747 		grh->traffic_class = uverb_attr->traffic_class;
1748 		memcpy(grh->dgid.raw, uverb_attr->dgid, sizeof(grh->dgid));
1749 		rdma_attr->ah_flags = IB_AH_GRH;
1750 	} else {
1751 		rdma_attr->ah_flags = 0;
1752 	}
1753 	rdma_attr->dlid = uverb_attr->dlid;
1754 	rdma_attr->sl = uverb_attr->sl;
1755 	rdma_attr->src_path_bits = uverb_attr->src_path_bits;
1756 	rdma_attr->static_rate = uverb_attr->static_rate;
1757 	rdma_attr->port_num = uverb_attr->port_num;
1758 }
1759 
modify_qp(struct uverbs_attr_bundle * attrs,struct ib_uverbs_ex_modify_qp * cmd)1760 static int modify_qp(struct uverbs_attr_bundle *attrs,
1761 		     struct ib_uverbs_ex_modify_qp *cmd)
1762 {
1763 	struct ib_qp_attr *attr;
1764 	struct ib_qp *qp;
1765 	int ret;
1766 
1767 	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1768 	if (!attr)
1769 		return -ENOMEM;
1770 
1771 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
1772 			       attrs);
1773 	if (!qp) {
1774 		ret = -EINVAL;
1775 		goto out;
1776 	}
1777 
1778 	if ((cmd->base.attr_mask & IB_QP_PORT) &&
1779 	    !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1780 		ret = -EINVAL;
1781 		goto release_qp;
1782 	}
1783 
1784 	if ((cmd->base.attr_mask & IB_QP_AV)) {
1785 		if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
1786 			ret = -EINVAL;
1787 			goto release_qp;
1788 		}
1789 
1790 		if (cmd->base.attr_mask & IB_QP_STATE &&
1791 		    cmd->base.qp_state == IB_QPS_RTR) {
1792 		/* We are in INIT->RTR TRANSITION (if we are not,
1793 		 * this transition will be rejected in subsequent checks).
1794 		 * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
1795 		 * but the IB_QP_STATE flag is required.
1796 		 *
1797 		 * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
1798 		 * when IB_QP_AV is set, has required inclusion of a valid
1799 		 * port number in the primary AV. (AVs are created and handled
1800 		 * differently for infiniband and ethernet (RoCE) ports).
1801 		 *
1802 		 * Check the port number included in the primary AV against
1803 		 * the port number in the qp struct, which was set (and saved)
1804 		 * in the RST->INIT transition.
1805 		 */
1806 			if (cmd->base.dest.port_num != qp->real_qp->port) {
1807 				ret = -EINVAL;
1808 				goto release_qp;
1809 			}
1810 		} else {
1811 		/* We are in SQD->SQD. (If we are not, this transition will
1812 		 * be rejected later in the verbs layer checks).
1813 		 * Check for both IB_QP_PORT and IB_QP_AV, these can be set
1814 		 * together in the SQD->SQD transition.
1815 		 *
1816 		 * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
1817 		 * verbs layer driver does not track primary port changes
1818 		 * resulting from path migration. Thus, in SQD, if the primary
1819 		 * AV is modified, the primary port should also be modified).
1820 		 *
1821 		 * Note that in this transition, the IB_QP_STATE flag
1822 		 * is not allowed.
1823 		 */
1824 			if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
1825 			     == (IB_QP_AV | IB_QP_PORT)) &&
1826 			    cmd->base.port_num != cmd->base.dest.port_num) {
1827 				ret = -EINVAL;
1828 				goto release_qp;
1829 			}
1830 			if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
1831 			    == IB_QP_AV) {
1832 				cmd->base.attr_mask |= IB_QP_PORT;
1833 				cmd->base.port_num = cmd->base.dest.port_num;
1834 			}
1835 		}
1836 	}
1837 
1838 	if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1839 	    (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
1840 	    !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
1841 	    cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
1842 		ret = -EINVAL;
1843 		goto release_qp;
1844 	}
1845 
1846 	if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
1847 	    cmd->base.cur_qp_state > IB_QPS_ERR) ||
1848 	    (cmd->base.attr_mask & IB_QP_STATE &&
1849 	    cmd->base.qp_state > IB_QPS_ERR)) {
1850 		ret = -EINVAL;
1851 		goto release_qp;
1852 	}
1853 
1854 	if (cmd->base.attr_mask & IB_QP_STATE)
1855 		attr->qp_state = cmd->base.qp_state;
1856 	if (cmd->base.attr_mask & IB_QP_CUR_STATE)
1857 		attr->cur_qp_state = cmd->base.cur_qp_state;
1858 	if (cmd->base.attr_mask & IB_QP_PATH_MTU)
1859 		attr->path_mtu = cmd->base.path_mtu;
1860 	if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
1861 		attr->path_mig_state = cmd->base.path_mig_state;
1862 	if (cmd->base.attr_mask & IB_QP_QKEY)
1863 		attr->qkey = cmd->base.qkey;
1864 	if (cmd->base.attr_mask & IB_QP_RQ_PSN)
1865 		attr->rq_psn = cmd->base.rq_psn;
1866 	if (cmd->base.attr_mask & IB_QP_SQ_PSN)
1867 		attr->sq_psn = cmd->base.sq_psn;
1868 	if (cmd->base.attr_mask & IB_QP_DEST_QPN)
1869 		attr->dest_qp_num = cmd->base.dest_qp_num;
1870 	if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
1871 		attr->qp_access_flags = cmd->base.qp_access_flags;
1872 	if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
1873 		attr->pkey_index = cmd->base.pkey_index;
1874 	if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1875 		attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
1876 	if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1877 		attr->max_rd_atomic = cmd->base.max_rd_atomic;
1878 	if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1879 		attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
1880 	if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
1881 		attr->min_rnr_timer = cmd->base.min_rnr_timer;
1882 	if (cmd->base.attr_mask & IB_QP_PORT)
1883 		attr->port_num = cmd->base.port_num;
1884 	if (cmd->base.attr_mask & IB_QP_TIMEOUT)
1885 		attr->timeout = cmd->base.timeout;
1886 	if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
1887 		attr->retry_cnt = cmd->base.retry_cnt;
1888 	if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
1889 		attr->rnr_retry = cmd->base.rnr_retry;
1890 	if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
1891 		attr->alt_port_num = cmd->base.alt_port_num;
1892 		attr->alt_timeout = cmd->base.alt_timeout;
1893 		attr->alt_pkey_index = cmd->base.alt_pkey_index;
1894 	}
1895 	if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
1896 		attr->rate_limit = cmd->rate_limit;
1897 
1898 	if (cmd->base.attr_mask & IB_QP_AV)
1899 		copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
1900 					 &cmd->base.dest);
1901 
1902 	if (cmd->base.attr_mask & IB_QP_ALT_PATH)
1903 		copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
1904 					 &cmd->base.alt_dest);
1905 
1906 	ret = ib_modify_qp_with_udata(qp, attr,
1907 				      modify_qp_mask(qp->qp_type,
1908 						     cmd->base.attr_mask),
1909 				      &attrs->driver_udata);
1910 
1911 release_qp:
1912 	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
1913 				UVERBS_LOOKUP_READ);
1914 out:
1915 	kfree(attr);
1916 
1917 	return ret;
1918 }
1919 
ib_uverbs_modify_qp(struct uverbs_attr_bundle * attrs)1920 static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs)
1921 {
1922 	struct ib_uverbs_ex_modify_qp cmd;
1923 	int ret;
1924 
1925 	ret = uverbs_request(attrs, &cmd.base, sizeof(cmd.base));
1926 	if (ret)
1927 		return ret;
1928 
1929 	if (cmd.base.attr_mask &
1930 	    ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
1931 		return -EOPNOTSUPP;
1932 
1933 	return modify_qp(attrs, &cmd);
1934 }
1935 
ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle * attrs)1936 static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs)
1937 {
1938 	struct ib_uverbs_ex_modify_qp cmd;
1939 	struct ib_uverbs_ex_modify_qp_resp resp = {
1940 		.response_length = uverbs_response_length(attrs, sizeof(resp))
1941 	};
1942 	int ret;
1943 
1944 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1945 	if (ret)
1946 		return ret;
1947 
1948 	/*
1949 	 * Last bit is reserved for extending the attr_mask by
1950 	 * using another field.
1951 	 */
1952 	BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
1953 
1954 	if (cmd.base.attr_mask &
1955 	    ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
1956 		return -EOPNOTSUPP;
1957 
1958 	ret = modify_qp(attrs, &cmd);
1959 	if (ret)
1960 		return ret;
1961 
1962 	return uverbs_response(attrs, &resp, sizeof(resp));
1963 }
1964 
ib_uverbs_destroy_qp(struct uverbs_attr_bundle * attrs)1965 static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs)
1966 {
1967 	struct ib_uverbs_destroy_qp      cmd;
1968 	struct ib_uverbs_destroy_qp_resp resp;
1969 	struct ib_uobject		*uobj;
1970 	struct ib_uqp_object        	*obj;
1971 	int ret;
1972 
1973 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1974 	if (ret)
1975 		return ret;
1976 
1977 	uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1978 	if (IS_ERR(uobj))
1979 		return PTR_ERR(uobj);
1980 
1981 	obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
1982 	memset(&resp, 0, sizeof(resp));
1983 	resp.events_reported = obj->uevent.events_reported;
1984 
1985 	uobj_put_destroy(uobj);
1986 
1987 	return uverbs_response(attrs, &resp, sizeof(resp));
1988 }
1989 
alloc_wr(size_t wr_size,__u32 num_sge)1990 static void *alloc_wr(size_t wr_size, __u32 num_sge)
1991 {
1992 	if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
1993 		       sizeof (struct ib_sge))
1994 		return NULL;
1995 
1996 	return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
1997 			 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
1998 }
1999 
ib_uverbs_post_send(struct uverbs_attr_bundle * attrs)2000 static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
2001 {
2002 	struct ib_uverbs_post_send      cmd;
2003 	struct ib_uverbs_post_send_resp resp;
2004 	struct ib_uverbs_send_wr       *user_wr;
2005 	struct ib_send_wr              *wr = NULL, *last, *next;
2006 	const struct ib_send_wr        *bad_wr;
2007 	struct ib_qp                   *qp;
2008 	int                             i, sg_ind;
2009 	int				is_ud;
2010 	int ret, ret2;
2011 	size_t                          next_size;
2012 	const struct ib_sge __user *sgls;
2013 	const void __user *wqes;
2014 	struct uverbs_req_iter iter;
2015 
2016 	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2017 	if (ret)
2018 		return ret;
2019 	wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
2020 	if (IS_ERR(wqes))
2021 		return PTR_ERR(wqes);
2022 	sgls = uverbs_request_next_ptr(
2023 		&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
2024 	if (IS_ERR(sgls))
2025 		return PTR_ERR(sgls);
2026 	ret = uverbs_request_finish(&iter);
2027 	if (ret)
2028 		return ret;
2029 
2030 	user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2031 	if (!user_wr)
2032 		return -ENOMEM;
2033 
2034 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2035 	if (!qp) {
2036 		ret = -EINVAL;
2037 		goto out;
2038 	}
2039 
2040 	is_ud = qp->qp_type == IB_QPT_UD;
2041 	sg_ind = 0;
2042 	last = NULL;
2043 	for (i = 0; i < cmd.wr_count; ++i) {
2044 		if (copy_from_user(user_wr, (const u8 *)wqes + i * cmd.wqe_size,
2045 				   cmd.wqe_size)) {
2046 			ret = -EFAULT;
2047 			goto out_put;
2048 		}
2049 
2050 		if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2051 			ret = -EINVAL;
2052 			goto out_put;
2053 		}
2054 
2055 		if (is_ud) {
2056 			struct ib_ud_wr *ud;
2057 
2058 			if (user_wr->opcode != IB_WR_SEND &&
2059 			    user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2060 				ret = -EINVAL;
2061 				goto out_put;
2062 			}
2063 
2064 			next_size = sizeof(*ud);
2065 			ud = alloc_wr(next_size, user_wr->num_sge);
2066 			if (!ud) {
2067 				ret = -ENOMEM;
2068 				goto out_put;
2069 			}
2070 
2071 			ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
2072 						   user_wr->wr.ud.ah, attrs);
2073 			if (!ud->ah) {
2074 				kfree(ud);
2075 				ret = -EINVAL;
2076 				goto out_put;
2077 			}
2078 			ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2079 			ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2080 
2081 			next = &ud->wr;
2082 		} else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2083 			   user_wr->opcode == IB_WR_RDMA_WRITE ||
2084 			   user_wr->opcode == IB_WR_RDMA_READ) {
2085 			struct ib_rdma_wr *rdma;
2086 
2087 			next_size = sizeof(*rdma);
2088 			rdma = alloc_wr(next_size, user_wr->num_sge);
2089 			if (!rdma) {
2090 				ret = -ENOMEM;
2091 				goto out_put;
2092 			}
2093 
2094 			rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2095 			rdma->rkey = user_wr->wr.rdma.rkey;
2096 
2097 			next = &rdma->wr;
2098 		} else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2099 			   user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2100 			struct ib_atomic_wr *atomic;
2101 
2102 			next_size = sizeof(*atomic);
2103 			atomic = alloc_wr(next_size, user_wr->num_sge);
2104 			if (!atomic) {
2105 				ret = -ENOMEM;
2106 				goto out_put;
2107 			}
2108 
2109 			atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2110 			atomic->compare_add = user_wr->wr.atomic.compare_add;
2111 			atomic->swap = user_wr->wr.atomic.swap;
2112 			atomic->rkey = user_wr->wr.atomic.rkey;
2113 
2114 			next = &atomic->wr;
2115 		} else if (user_wr->opcode == IB_WR_SEND ||
2116 			   user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2117 			   user_wr->opcode == IB_WR_SEND_WITH_INV) {
2118 			next_size = sizeof(*next);
2119 			next = alloc_wr(next_size, user_wr->num_sge);
2120 			if (!next) {
2121 				ret = -ENOMEM;
2122 				goto out_put;
2123 			}
2124 		} else {
2125 			ret = -EINVAL;
2126 			goto out_put;
2127 		}
2128 
2129 		if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2130 		    user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2131 			next->ex.imm_data =
2132 					(__be32 __force) user_wr->ex.imm_data;
2133 		} else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2134 			next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2135 		}
2136 
2137 		if (!last)
2138 			wr = next;
2139 		else
2140 			last->next = next;
2141 		last = next;
2142 
2143 		next->next       = NULL;
2144 		next->wr_id      = user_wr->wr_id;
2145 		next->num_sge    = user_wr->num_sge;
2146 		next->opcode     = user_wr->opcode;
2147 		next->send_flags = user_wr->send_flags;
2148 
2149 		if (next->num_sge) {
2150 			next->sg_list = (void *)((char *)next +
2151 				ALIGN(next_size, sizeof(struct ib_sge)));
2152 			if (copy_from_user(next->sg_list, sgls + sg_ind,
2153 					   next->num_sge *
2154 						   sizeof(struct ib_sge))) {
2155 				ret = -EFAULT;
2156 				goto out_put;
2157 			}
2158 			sg_ind += next->num_sge;
2159 		} else
2160 			next->sg_list = NULL;
2161 	}
2162 
2163 	resp.bad_wr = 0;
2164 	ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2165 	if (ret)
2166 		for (next = wr; next; next = next->next) {
2167 			++resp.bad_wr;
2168 			if (next == bad_wr)
2169 				break;
2170 		}
2171 
2172 	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2173 	if (ret2)
2174 		ret = ret2;
2175 
2176 out_put:
2177 	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2178 				UVERBS_LOOKUP_READ);
2179 
2180 	while (wr) {
2181 		if (is_ud && ud_wr(wr)->ah)
2182 			uobj_put_obj_read(ud_wr(wr)->ah);
2183 		next = wr->next;
2184 		kfree(wr);
2185 		wr = next;
2186 	}
2187 
2188 out:
2189 	kfree(user_wr);
2190 
2191 	return ret;
2192 }
2193 
2194 static struct ib_recv_wr *
ib_uverbs_unmarshall_recv(struct uverbs_req_iter * iter,u32 wr_count,u32 wqe_size,u32 sge_count)2195 ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
2196 			  u32 wqe_size, u32 sge_count)
2197 {
2198 	struct ib_uverbs_recv_wr *user_wr;
2199 	struct ib_recv_wr        *wr = NULL, *last, *next;
2200 	int                       sg_ind;
2201 	int                       i;
2202 	int                       ret;
2203 	const struct ib_sge __user *sgls;
2204 	const void __user *wqes;
2205 
2206 	if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2207 		return ERR_PTR(-EINVAL);
2208 
2209 	wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
2210 	if (IS_ERR(wqes))
2211 		return ERR_CAST(wqes);
2212 	sgls = uverbs_request_next_ptr(
2213 		iter, sge_count * sizeof(struct ib_uverbs_sge));
2214 	if (IS_ERR(sgls))
2215 		return ERR_CAST(sgls);
2216 	ret = uverbs_request_finish(iter);
2217 	if (ret)
2218 		return ERR_PTR(ret);
2219 
2220 	user_wr = kmalloc(wqe_size, GFP_KERNEL);
2221 	if (!user_wr)
2222 		return ERR_PTR(-ENOMEM);
2223 
2224 	sg_ind = 0;
2225 	last = NULL;
2226 	for (i = 0; i < wr_count; ++i) {
2227 		if (copy_from_user(user_wr, (const char *)wqes + i * wqe_size,
2228 				   wqe_size)) {
2229 			ret = -EFAULT;
2230 			goto err;
2231 		}
2232 
2233 		if (user_wr->num_sge + sg_ind > sge_count) {
2234 			ret = -EINVAL;
2235 			goto err;
2236 		}
2237 
2238 		if (user_wr->num_sge >=
2239 		    (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
2240 		    sizeof (struct ib_sge)) {
2241 			ret = -EINVAL;
2242 			goto err;
2243 		}
2244 
2245 		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2246 			       user_wr->num_sge * sizeof (struct ib_sge),
2247 			       GFP_KERNEL);
2248 		if (!next) {
2249 			ret = -ENOMEM;
2250 			goto err;
2251 		}
2252 
2253 		if (!last)
2254 			wr = next;
2255 		else
2256 			last->next = next;
2257 		last = next;
2258 
2259 		next->next       = NULL;
2260 		next->wr_id      = user_wr->wr_id;
2261 		next->num_sge    = user_wr->num_sge;
2262 
2263 		if (next->num_sge) {
2264 			next->sg_list = (void *)((char *)next +
2265 				ALIGN(sizeof *next, sizeof (struct ib_sge)));
2266 			if (copy_from_user(next->sg_list, sgls + sg_ind,
2267 					   next->num_sge *
2268 						   sizeof(struct ib_sge))) {
2269 				ret = -EFAULT;
2270 				goto err;
2271 			}
2272 			sg_ind += next->num_sge;
2273 		} else
2274 			next->sg_list = NULL;
2275 	}
2276 
2277 	kfree(user_wr);
2278 	return wr;
2279 
2280 err:
2281 	kfree(user_wr);
2282 
2283 	while (wr) {
2284 		next = wr->next;
2285 		kfree(wr);
2286 		wr = next;
2287 	}
2288 
2289 	return ERR_PTR(ret);
2290 }
2291 
ib_uverbs_post_recv(struct uverbs_attr_bundle * attrs)2292 static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
2293 {
2294 	struct ib_uverbs_post_recv      cmd;
2295 	struct ib_uverbs_post_recv_resp resp;
2296 	struct ib_recv_wr              *wr, *next;
2297 	const struct ib_recv_wr        *bad_wr;
2298 	struct ib_qp                   *qp;
2299 	int ret, ret2;
2300 	struct uverbs_req_iter iter;
2301 
2302 	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2303 	if (ret)
2304 		return ret;
2305 
2306 	wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
2307 				       cmd.sge_count);
2308 	if (IS_ERR(wr))
2309 		return PTR_ERR(wr);
2310 
2311 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2312 	if (!qp) {
2313 		ret = -EINVAL;
2314 		goto out;
2315 	}
2316 
2317 	resp.bad_wr = 0;
2318 	ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2319 
2320 	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2321 				UVERBS_LOOKUP_READ);
2322 	if (ret) {
2323 		for (next = wr; next; next = next->next) {
2324 			++resp.bad_wr;
2325 			if (next == bad_wr)
2326 				break;
2327 		}
2328 	}
2329 
2330 	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2331 	if (ret2)
2332 		ret = ret2;
2333 out:
2334 	while (wr) {
2335 		next = wr->next;
2336 		kfree(wr);
2337 		wr = next;
2338 	}
2339 
2340 	return ret;
2341 }
2342 
ib_uverbs_post_srq_recv(struct uverbs_attr_bundle * attrs)2343 static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
2344 {
2345 	struct ib_uverbs_post_srq_recv      cmd;
2346 	struct ib_uverbs_post_srq_recv_resp resp;
2347 	struct ib_recv_wr                  *wr, *next;
2348 	const struct ib_recv_wr		   *bad_wr;
2349 	struct ib_srq                      *srq;
2350 	int ret, ret2;
2351 	struct uverbs_req_iter iter;
2352 
2353 	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2354 	if (ret)
2355 		return ret;
2356 
2357 	wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
2358 				       cmd.sge_count);
2359 	if (IS_ERR(wr))
2360 		return PTR_ERR(wr);
2361 
2362 	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
2363 	if (!srq) {
2364 		ret = -EINVAL;
2365 		goto out;
2366 	}
2367 
2368 	resp.bad_wr = 0;
2369 	ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2370 
2371 	rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
2372 				UVERBS_LOOKUP_READ);
2373 
2374 	if (ret)
2375 		for (next = wr; next; next = next->next) {
2376 			++resp.bad_wr;
2377 			if (next == bad_wr)
2378 				break;
2379 		}
2380 
2381 	ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2382 	if (ret2)
2383 		ret = ret2;
2384 
2385 out:
2386 	while (wr) {
2387 		next = wr->next;
2388 		kfree(wr);
2389 		wr = next;
2390 	}
2391 
2392 	return ret;
2393 }
2394 
ib_uverbs_create_ah(struct uverbs_attr_bundle * attrs)2395 static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
2396 {
2397 	struct ib_uverbs_create_ah	 cmd;
2398 	struct ib_uverbs_create_ah_resp	 resp;
2399 	struct ib_uobject		*uobj;
2400 	struct ib_pd			*pd;
2401 	struct ib_ah			*ah;
2402 	struct ib_ah_attr		attr = {};
2403 	int ret;
2404 	struct ib_device *ib_dev;
2405 
2406 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2407 	if (ret)
2408 		return ret;
2409 
2410 	uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev);
2411 	if (IS_ERR(uobj))
2412 		return PTR_ERR(uobj);
2413 
2414 	if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
2415 		ret = -EINVAL;
2416 		goto err;
2417 	}
2418 
2419 	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
2420 	if (!pd) {
2421 		ret = -EINVAL;
2422 		goto err;
2423 	}
2424 
2425 	attr.dlid = cmd.attr.dlid;
2426 	attr.sl = cmd.attr.sl;
2427 	attr.src_path_bits = cmd.attr.src_path_bits;
2428 	attr.static_rate = cmd.attr.static_rate;
2429 	attr.port_num = cmd.attr.port_num;
2430 
2431 	if (cmd.attr.is_global) {
2432 		struct ib_global_route *grh = &attr.grh;
2433 
2434 		grh->flow_label = cmd.attr.grh.flow_label;
2435 		grh->sgid_index = cmd.attr.grh.sgid_index;
2436 		grh->hop_limit = cmd.attr.grh.hop_limit;
2437 		grh->traffic_class = cmd.attr.grh.traffic_class;
2438 		memcpy(grh->dgid.raw, cmd.attr.grh.dgid, sizeof(grh->dgid));
2439 		attr.ah_flags = IB_AH_GRH;
2440 	} else {
2441 		attr.ah_flags = 0;
2442 	}
2443 
2444 	ah = ib_create_user_ah(pd, &attr, &attrs->driver_udata);
2445 	if (IS_ERR(ah)) {
2446 		ret = PTR_ERR(ah);
2447 		goto err_put;
2448 	}
2449 
2450 	ah->uobject  = uobj;
2451 	uobj->user_handle = cmd.user_handle;
2452 	uobj->object = ah;
2453 
2454 	resp.ah_handle = uobj->id;
2455 
2456 	ret = uverbs_response(attrs, &resp, sizeof(resp));
2457 	if (ret)
2458 		goto err_copy;
2459 
2460 	uobj_put_obj_read(pd);
2461 	rdma_alloc_commit_uobject(uobj, attrs);
2462 	return 0;
2463 
2464 err_copy:
2465 	ib_destroy_ah_user(ah, RDMA_DESTROY_AH_SLEEPABLE,
2466 			   uverbs_get_cleared_udata(attrs));
2467 
2468 err_put:
2469 	uobj_put_obj_read(pd);
2470 
2471 err:
2472 	uobj_alloc_abort(uobj, attrs);
2473 	return ret;
2474 }
2475 
ib_uverbs_destroy_ah(struct uverbs_attr_bundle * attrs)2476 static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs)
2477 {
2478 	struct ib_uverbs_destroy_ah cmd;
2479 	int ret;
2480 
2481 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2482 	if (ret)
2483 		return ret;
2484 
2485 	return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs);
2486 }
2487 
ib_uverbs_attach_mcast(struct uverbs_attr_bundle * attrs)2488 static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
2489 {
2490 	struct ib_uverbs_attach_mcast cmd;
2491 	struct ib_qp                 *qp;
2492 	struct ib_uqp_object         *obj;
2493 	struct ib_uverbs_mcast_entry *mcast;
2494 	int                           ret;
2495 
2496 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2497 	if (ret)
2498 		return ret;
2499 
2500 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2501 	if (!qp)
2502 		return -EINVAL;
2503 
2504 	obj = qp->uobject;
2505 
2506 	mutex_lock(&obj->mcast_lock);
2507 	list_for_each_entry(mcast, &obj->mcast_list, list)
2508 		if (cmd.mlid == mcast->lid &&
2509 		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2510 			ret = 0;
2511 			goto out_put;
2512 		}
2513 
2514 	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2515 	if (!mcast) {
2516 		ret = -ENOMEM;
2517 		goto out_put;
2518 	}
2519 
2520 	mcast->lid = cmd.mlid;
2521 	memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2522 
2523 	ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2524 	if (!ret)
2525 		list_add_tail(&mcast->list, &obj->mcast_list);
2526 	else
2527 		kfree(mcast);
2528 
2529 out_put:
2530 	mutex_unlock(&obj->mcast_lock);
2531 	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2532 				UVERBS_LOOKUP_READ);
2533 
2534 	return ret;
2535 }
2536 
ib_uverbs_detach_mcast(struct uverbs_attr_bundle * attrs)2537 static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
2538 {
2539 	struct ib_uverbs_detach_mcast cmd;
2540 	struct ib_uqp_object         *obj;
2541 	struct ib_qp                 *qp;
2542 	struct ib_uverbs_mcast_entry *mcast;
2543 	int                           ret;
2544 	bool                          found = false;
2545 
2546 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2547 	if (ret)
2548 		return ret;
2549 
2550 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2551 	if (!qp)
2552 		return -EINVAL;
2553 
2554 	obj = qp->uobject;
2555 	mutex_lock(&obj->mcast_lock);
2556 
2557 	list_for_each_entry(mcast, &obj->mcast_list, list)
2558 		if (cmd.mlid == mcast->lid &&
2559 		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2560 			list_del(&mcast->list);
2561 			kfree(mcast);
2562 			found = true;
2563 			break;
2564 		}
2565 
2566 	if (!found) {
2567 		ret = -EINVAL;
2568 		goto out_put;
2569 	}
2570 
2571 	ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2572 
2573 out_put:
2574 	mutex_unlock(&obj->mcast_lock);
2575 	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2576 				UVERBS_LOOKUP_READ);
2577 	return ret;
2578 }
2579 
flow_resources_alloc(size_t num_specs)2580 struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
2581 {
2582 	struct ib_uflow_resources *resources;
2583 
2584 	resources = kzalloc(sizeof(*resources), GFP_KERNEL);
2585 
2586 	if (!resources)
2587 		return NULL;
2588 
2589 	if (!num_specs)
2590 		goto out;
2591 
2592 	resources->counters =
2593 		kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
2594 	resources->collection =
2595 		kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
2596 
2597 	if (!resources->counters || !resources->collection)
2598 		goto err;
2599 
2600 out:
2601 	resources->max = num_specs;
2602 	return resources;
2603 
2604 err:
2605 	kfree(resources->counters);
2606 	kfree(resources);
2607 
2608 	return NULL;
2609 }
2610 EXPORT_SYMBOL(flow_resources_alloc);
2611 
ib_uverbs_flow_resources_free(struct ib_uflow_resources * uflow_res)2612 void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
2613 {
2614 	unsigned int i;
2615 
2616 	if (!uflow_res)
2617 		return;
2618 
2619 	for (i = 0; i < uflow_res->collection_num; i++)
2620 		atomic_dec(&uflow_res->collection[i]->usecnt);
2621 
2622 	for (i = 0; i < uflow_res->counters_num; i++)
2623 		atomic_dec(&uflow_res->counters[i]->usecnt);
2624 
2625 	kfree(uflow_res->collection);
2626 	kfree(uflow_res->counters);
2627 	kfree(uflow_res);
2628 }
2629 EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
2630 
flow_resources_add(struct ib_uflow_resources * uflow_res,enum ib_flow_spec_type type,void * ibobj)2631 void flow_resources_add(struct ib_uflow_resources *uflow_res,
2632 			enum ib_flow_spec_type type,
2633 			void *ibobj)
2634 {
2635 	WARN_ON(uflow_res->num >= uflow_res->max);
2636 
2637 	switch (type) {
2638 	case IB_FLOW_SPEC_ACTION_HANDLE:
2639 		atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
2640 		uflow_res->collection[uflow_res->collection_num++] =
2641 			(struct ib_flow_action *)ibobj;
2642 		break;
2643 	case IB_FLOW_SPEC_ACTION_COUNT:
2644 		atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
2645 		uflow_res->counters[uflow_res->counters_num++] =
2646 			(struct ib_counters *)ibobj;
2647 		break;
2648 	default:
2649 		WARN_ON(1);
2650 	}
2651 
2652 	uflow_res->num++;
2653 }
2654 EXPORT_SYMBOL(flow_resources_add);
2655 
kern_spec_to_ib_spec_action(struct uverbs_attr_bundle * attrs,struct ib_uverbs_flow_spec * kern_spec,union ib_flow_spec * ib_spec,struct ib_uflow_resources * uflow_res)2656 static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
2657 				       struct ib_uverbs_flow_spec *kern_spec,
2658 				       union ib_flow_spec *ib_spec,
2659 				       struct ib_uflow_resources *uflow_res)
2660 {
2661 	ib_spec->type = kern_spec->type;
2662 	switch (ib_spec->type) {
2663 	case IB_FLOW_SPEC_ACTION_TAG:
2664 		if (kern_spec->flow_tag.size !=
2665 		    sizeof(struct ib_uverbs_flow_spec_action_tag))
2666 			return -EINVAL;
2667 
2668 		ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2669 		ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2670 		break;
2671 	case IB_FLOW_SPEC_ACTION_DROP:
2672 		if (kern_spec->drop.size !=
2673 		    sizeof(struct ib_uverbs_flow_spec_action_drop))
2674 			return -EINVAL;
2675 
2676 		ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2677 		break;
2678 	case IB_FLOW_SPEC_ACTION_HANDLE:
2679 		if (kern_spec->action.size !=
2680 		    sizeof(struct ib_uverbs_flow_spec_action_handle))
2681 			return -EOPNOTSUPP;
2682 		ib_spec->action.act = uobj_get_obj_read(flow_action,
2683 							UVERBS_OBJECT_FLOW_ACTION,
2684 							kern_spec->action.handle,
2685 							attrs);
2686 		if (!ib_spec->action.act)
2687 			return -EINVAL;
2688 		ib_spec->action.size =
2689 			sizeof(struct ib_flow_spec_action_handle);
2690 		flow_resources_add(uflow_res,
2691 				   IB_FLOW_SPEC_ACTION_HANDLE,
2692 				   ib_spec->action.act);
2693 		uobj_put_obj_read(ib_spec->action.act);
2694 		break;
2695 	case IB_FLOW_SPEC_ACTION_COUNT:
2696 		if (kern_spec->flow_count.size !=
2697 			sizeof(struct ib_uverbs_flow_spec_action_count))
2698 			return -EINVAL;
2699 		ib_spec->flow_count.counters =
2700 			uobj_get_obj_read(counters,
2701 					  UVERBS_OBJECT_COUNTERS,
2702 					  kern_spec->flow_count.handle,
2703 					  attrs);
2704 		if (!ib_spec->flow_count.counters)
2705 			return -EINVAL;
2706 		ib_spec->flow_count.size =
2707 				sizeof(struct ib_flow_spec_action_count);
2708 		flow_resources_add(uflow_res,
2709 				   IB_FLOW_SPEC_ACTION_COUNT,
2710 				   ib_spec->flow_count.counters);
2711 		uobj_put_obj_read(ib_spec->flow_count.counters);
2712 		break;
2713 	default:
2714 		return -EINVAL;
2715 	}
2716 	return 0;
2717 }
2718 
spec_filter_size(const void * kern_spec_filter,u16 kern_filter_size,u16 ib_real_filter_sz)2719 static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
2720 				u16 ib_real_filter_sz)
2721 {
2722 	/*
2723 	 * User space filter structures must be 64 bit aligned, otherwise this
2724 	 * may pass, but we won't handle additional new attributes.
2725 	 */
2726 
2727 	if (kern_filter_size > ib_real_filter_sz) {
2728 		if (memchr_inv((const char *)kern_spec_filter +
2729 			       ib_real_filter_sz, 0,
2730 			       kern_filter_size - ib_real_filter_sz))
2731 			return -EINVAL;
2732 		return ib_real_filter_sz;
2733 	}
2734 	return kern_filter_size;
2735 }
2736 
ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,const void * kern_spec_mask,const void * kern_spec_val,size_t kern_filter_sz,union ib_flow_spec * ib_spec)2737 int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
2738 					  const void *kern_spec_mask,
2739 					  const void *kern_spec_val,
2740 					  size_t kern_filter_sz,
2741 					  union ib_flow_spec *ib_spec)
2742 {
2743 	ssize_t actual_filter_sz;
2744 	ssize_t ib_filter_sz;
2745 
2746 	/* User flow spec size must be aligned to 4 bytes */
2747 	if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2748 		return -EINVAL;
2749 
2750 	ib_spec->type = type;
2751 
2752 	if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2753 		return -EINVAL;
2754 
2755 	switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2756 	case IB_FLOW_SPEC_ETH:
2757 		ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2758 		actual_filter_sz = spec_filter_size(kern_spec_mask,
2759 						    kern_filter_sz,
2760 						    ib_filter_sz);
2761 		if (actual_filter_sz <= 0)
2762 			return -EINVAL;
2763 		ib_spec->size = sizeof(struct ib_flow_spec_eth);
2764 		memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2765 		memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
2766 		break;
2767 	case IB_FLOW_SPEC_IPV4:
2768 		ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2769 		actual_filter_sz = spec_filter_size(kern_spec_mask,
2770 						    kern_filter_sz,
2771 						    ib_filter_sz);
2772 		if (actual_filter_sz <= 0)
2773 			return -EINVAL;
2774 		ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2775 		memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2776 		memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
2777 		break;
2778 	case IB_FLOW_SPEC_IPV6:
2779 		ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2780 		actual_filter_sz = spec_filter_size(kern_spec_mask,
2781 						    kern_filter_sz,
2782 						    ib_filter_sz);
2783 		if (actual_filter_sz <= 0)
2784 			return -EINVAL;
2785 		ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2786 		memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2787 		memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
2788 
2789 		if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2790 		    (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2791 			return -EINVAL;
2792 		break;
2793 	case IB_FLOW_SPEC_TCP:
2794 	case IB_FLOW_SPEC_UDP:
2795 		ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2796 		actual_filter_sz = spec_filter_size(kern_spec_mask,
2797 						    kern_filter_sz,
2798 						    ib_filter_sz);
2799 		if (actual_filter_sz <= 0)
2800 			return -EINVAL;
2801 		ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2802 		memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2803 		memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
2804 		break;
2805 	case IB_FLOW_SPEC_VXLAN_TUNNEL:
2806 		ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2807 		actual_filter_sz = spec_filter_size(kern_spec_mask,
2808 						    kern_filter_sz,
2809 						    ib_filter_sz);
2810 		if (actual_filter_sz <= 0)
2811 			return -EINVAL;
2812 		ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2813 		memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2814 		memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2815 
2816 		if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2817 		    (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2818 			return -EINVAL;
2819 		break;
2820 	case IB_FLOW_SPEC_ESP:
2821 		ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
2822 		actual_filter_sz = spec_filter_size(kern_spec_mask,
2823 						    kern_filter_sz,
2824 						    ib_filter_sz);
2825 		if (actual_filter_sz <= 0)
2826 			return -EINVAL;
2827 		ib_spec->esp.size = sizeof(struct ib_flow_spec_esp);
2828 		memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
2829 		memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
2830 		break;
2831 	case IB_FLOW_SPEC_GRE:
2832 		ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
2833 		actual_filter_sz = spec_filter_size(kern_spec_mask,
2834 						    kern_filter_sz,
2835 						    ib_filter_sz);
2836 		if (actual_filter_sz <= 0)
2837 			return -EINVAL;
2838 		ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
2839 		memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
2840 		memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
2841 		break;
2842 	case IB_FLOW_SPEC_MPLS:
2843 		ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
2844 		actual_filter_sz = spec_filter_size(kern_spec_mask,
2845 						    kern_filter_sz,
2846 						    ib_filter_sz);
2847 		if (actual_filter_sz <= 0)
2848 			return -EINVAL;
2849 		ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
2850 		memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
2851 		memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
2852 		break;
2853 	default:
2854 		return -EINVAL;
2855 	}
2856 	return 0;
2857 }
2858 
kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec * kern_spec,union ib_flow_spec * ib_spec)2859 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
2860 				       union ib_flow_spec *ib_spec)
2861 {
2862 	size_t kern_filter_sz;
2863 	void *kern_spec_mask;
2864 	void *kern_spec_val;
2865 
2866 	if (kern_spec->hdr.size < sizeof(struct ib_uverbs_flow_spec_hdr))
2867 		return -EINVAL;
2868 	kern_filter_sz = kern_spec->hdr.size - sizeof(struct ib_uverbs_flow_spec_hdr);
2869 	kern_filter_sz /= 2;
2870 
2871 	kern_spec_val = (u8 *)kern_spec +
2872 		sizeof(struct ib_uverbs_flow_spec_hdr);
2873 	kern_spec_mask = (u8 *)kern_spec_val + kern_filter_sz;
2874 
2875 	return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type,
2876 						     kern_spec_mask,
2877 						     kern_spec_val,
2878 						     kern_filter_sz, ib_spec);
2879 }
2880 
kern_spec_to_ib_spec(struct uverbs_attr_bundle * attrs,struct ib_uverbs_flow_spec * kern_spec,union ib_flow_spec * ib_spec,struct ib_uflow_resources * uflow_res)2881 static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs,
2882 				struct ib_uverbs_flow_spec *kern_spec,
2883 				union ib_flow_spec *ib_spec,
2884 				struct ib_uflow_resources *uflow_res)
2885 {
2886 	if (kern_spec->reserved)
2887 		return -EINVAL;
2888 
2889 	if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2890 		return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec,
2891 						   uflow_res);
2892 	else
2893 		return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
2894 }
2895 
ib_uverbs_ex_create_wq(struct uverbs_attr_bundle * attrs)2896 static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
2897 {
2898 	struct ib_uverbs_ex_create_wq cmd;
2899 	struct ib_uverbs_ex_create_wq_resp resp = {};
2900 	struct ib_uwq_object           *obj;
2901 	int err = 0;
2902 	struct ib_cq *cq;
2903 	struct ib_pd *pd;
2904 	struct ib_wq *wq;
2905 	struct ib_wq_init_attr wq_init_attr = {};
2906 	struct ib_device *ib_dev;
2907 
2908 	err = uverbs_request(attrs, &cmd, sizeof(cmd));
2909 	if (err)
2910 		return err;
2911 
2912 	if (cmd.comp_mask)
2913 		return -EOPNOTSUPP;
2914 
2915 	obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs,
2916 						 &ib_dev);
2917 	if (IS_ERR(obj))
2918 		return PTR_ERR(obj);
2919 
2920 	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
2921 	if (!pd) {
2922 		err = -EINVAL;
2923 		goto err_uobj;
2924 	}
2925 
2926 	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
2927 	if (!cq) {
2928 		err = -EINVAL;
2929 		goto err_put_pd;
2930 	}
2931 
2932 	wq_init_attr.cq = cq;
2933 	wq_init_attr.max_sge = cmd.max_sge;
2934 	wq_init_attr.max_wr = cmd.max_wr;
2935 	wq_init_attr.wq_context = attrs->ufile;
2936 	wq_init_attr.wq_type = cmd.wq_type;
2937 	wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
2938 	wq_init_attr.create_flags = cmd.create_flags;
2939 	INIT_LIST_HEAD(&obj->uevent.event_list);
2940 
2941 	wq = pd->device->create_wq(pd, &wq_init_attr, &attrs->driver_udata);
2942 	if (IS_ERR(wq)) {
2943 		err = PTR_ERR(wq);
2944 		goto err_put_cq;
2945 	}
2946 
2947 	wq->uobject = obj;
2948 	obj->uevent.uobject.object = wq;
2949 	wq->wq_type = wq_init_attr.wq_type;
2950 	wq->cq = cq;
2951 	wq->pd = pd;
2952 	wq->device = pd->device;
2953 	wq->wq_context = wq_init_attr.wq_context;
2954 	atomic_set(&wq->usecnt, 0);
2955 	atomic_inc(&pd->usecnt);
2956 	atomic_inc(&cq->usecnt);
2957 	wq->uobject = obj;
2958 	obj->uevent.uobject.object = wq;
2959 
2960 	memset(&resp, 0, sizeof(resp));
2961 	resp.wq_handle = obj->uevent.uobject.id;
2962 	resp.max_sge = wq_init_attr.max_sge;
2963 	resp.max_wr = wq_init_attr.max_wr;
2964 	resp.wqn = wq->wq_num;
2965 	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
2966 	err = uverbs_response(attrs, &resp, sizeof(resp));
2967 	if (err)
2968 		goto err_copy;
2969 
2970 	uobj_put_obj_read(pd);
2971 	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
2972 				UVERBS_LOOKUP_READ);
2973 	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
2974 	return 0;
2975 
2976 err_copy:
2977 	ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
2978 err_put_cq:
2979 	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
2980 				UVERBS_LOOKUP_READ);
2981 err_put_pd:
2982 	uobj_put_obj_read(pd);
2983 err_uobj:
2984 	uobj_alloc_abort(&obj->uevent.uobject, attrs);
2985 
2986 	return err;
2987 }
2988 
ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle * attrs)2989 static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs)
2990 {
2991 	struct ib_uverbs_ex_destroy_wq	cmd;
2992 	struct ib_uverbs_ex_destroy_wq_resp	resp = {};
2993 	struct ib_uobject		*uobj;
2994 	struct ib_uwq_object		*obj;
2995 	int				ret;
2996 
2997 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2998 	if (ret)
2999 		return ret;
3000 
3001 	if (cmd.comp_mask)
3002 		return -EOPNOTSUPP;
3003 
3004 	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3005 	uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
3006 	if (IS_ERR(uobj))
3007 		return PTR_ERR(uobj);
3008 
3009 	obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
3010 	resp.events_reported = obj->uevent.events_reported;
3011 
3012 	uobj_put_destroy(uobj);
3013 
3014 	return uverbs_response(attrs, &resp, sizeof(resp));
3015 }
3016 
ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle * attrs)3017 static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
3018 {
3019 	struct ib_uverbs_ex_modify_wq cmd;
3020 	struct ib_wq *wq;
3021 	struct ib_wq_attr wq_attr = {};
3022 	int ret;
3023 
3024 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3025 	if (ret)
3026 		return ret;
3027 
3028 	if (!cmd.attr_mask)
3029 		return -EINVAL;
3030 
3031 	if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
3032 		return -EINVAL;
3033 
3034 	wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
3035 	if (!wq)
3036 		return -EINVAL;
3037 
3038 	wq_attr.curr_wq_state = cmd.curr_wq_state;
3039 	wq_attr.wq_state = cmd.wq_state;
3040 	if (cmd.attr_mask & IB_WQ_FLAGS) {
3041 		wq_attr.flags = cmd.flags;
3042 		wq_attr.flags_mask = cmd.flags_mask;
3043 	}
3044 	ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask,
3045 					&attrs->driver_udata);
3046 	rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
3047 				UVERBS_LOOKUP_READ);
3048 	return ret;
3049 }
3050 
ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle * attrs)3051 static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3052 {
3053 	struct ib_uverbs_ex_create_rwq_ind_table cmd;
3054 	struct ib_uverbs_ex_create_rwq_ind_table_resp  resp = {};
3055 	struct ib_uobject		  *uobj;
3056 	int err;
3057 	struct ib_rwq_ind_table_init_attr init_attr = {};
3058 	struct ib_rwq_ind_table *rwq_ind_tbl;
3059 	struct ib_wq	**wqs = NULL;
3060 	u32 *wqs_handles = NULL;
3061 	struct ib_wq	*wq = NULL;
3062 	int i, j, num_read_wqs;
3063 	u32 num_wq_handles;
3064 	struct uverbs_req_iter iter;
3065 	struct ib_device *ib_dev;
3066 
3067 	err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3068 	if (err)
3069 		return err;
3070 
3071 	if (cmd.comp_mask)
3072 		return -EOPNOTSUPP;
3073 
3074 	if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3075 		return -EINVAL;
3076 
3077 	num_wq_handles = 1 << cmd.log_ind_tbl_size;
3078 	wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3079 			      GFP_KERNEL);
3080 	if (!wqs_handles)
3081 		return -ENOMEM;
3082 
3083 	err = uverbs_request_next(&iter, wqs_handles,
3084 				  num_wq_handles * sizeof(__u32));
3085 	if (err)
3086 		goto err_free;
3087 
3088 	err = uverbs_request_finish(&iter);
3089 	if (err)
3090 		goto err_free;
3091 
3092 	wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3093 	if (!wqs) {
3094 		err = -ENOMEM;
3095 		goto  err_free;
3096 	}
3097 
3098 	for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3099 			num_read_wqs++) {
3100 		wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
3101 				       wqs_handles[num_read_wqs], attrs);
3102 		if (!wq) {
3103 			err = -EINVAL;
3104 			goto put_wqs;
3105 		}
3106 
3107 		wqs[num_read_wqs] = wq;
3108 	}
3109 
3110 	uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev);
3111 	if (IS_ERR(uobj)) {
3112 		err = PTR_ERR(uobj);
3113 		goto put_wqs;
3114 	}
3115 
3116 	init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3117 	init_attr.ind_tbl = wqs;
3118 
3119 	rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr,
3120 						       &attrs->driver_udata);
3121 
3122 	if (IS_ERR(rwq_ind_tbl)) {
3123 		err = PTR_ERR(rwq_ind_tbl);
3124 		goto err_uobj;
3125 	}
3126 
3127 	rwq_ind_tbl->ind_tbl = wqs;
3128 	rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3129 	rwq_ind_tbl->uobject = uobj;
3130 	uobj->object = rwq_ind_tbl;
3131 	rwq_ind_tbl->device = ib_dev;
3132 	atomic_set(&rwq_ind_tbl->usecnt, 0);
3133 
3134 	for (i = 0; i < num_wq_handles; i++)
3135 		atomic_inc(&wqs[i]->usecnt);
3136 
3137 	resp.ind_tbl_handle = uobj->id;
3138 	resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3139 	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3140 
3141 	err = uverbs_response(attrs, &resp, sizeof(resp));
3142 	if (err)
3143 		goto err_copy;
3144 
3145 	kfree(wqs_handles);
3146 
3147 	for (j = 0; j < num_read_wqs; j++)
3148 		rdma_lookup_put_uobject(&wqs[j]->uobject->uevent.uobject,
3149 					UVERBS_LOOKUP_READ);
3150 
3151 	rdma_alloc_commit_uobject(uobj, attrs);
3152 	return 0;
3153 
3154 err_copy:
3155 	ib_destroy_rwq_ind_table(rwq_ind_tbl);
3156 err_uobj:
3157 	uobj_alloc_abort(uobj, attrs);
3158 put_wqs:
3159 	for (j = 0; j < num_read_wqs; j++)
3160 		rdma_lookup_put_uobject(&wqs[j]->uobject->uevent.uobject,
3161 					UVERBS_LOOKUP_READ);
3162 err_free:
3163 	kfree(wqs_handles);
3164 	kfree(wqs);
3165 	return err;
3166 }
3167 
ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle * attrs)3168 static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3169 {
3170 	struct ib_uverbs_ex_destroy_rwq_ind_table cmd;
3171 	int ret;
3172 
3173 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3174 	if (ret)
3175 		return ret;
3176 
3177 	if (cmd.comp_mask)
3178 		return -EOPNOTSUPP;
3179 
3180 	return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
3181 				    cmd.ind_tbl_handle, attrs);
3182 }
3183 
ib_uverbs_ex_create_flow(struct uverbs_attr_bundle * attrs)3184 static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
3185 {
3186 	struct ib_uverbs_create_flow	  cmd;
3187 	struct ib_uverbs_create_flow_resp resp;
3188 	struct ib_uobject		  *uobj;
3189 	struct ib_flow			  *flow_id;
3190 	struct ib_uverbs_flow_attr	  *kern_flow_attr;
3191 	struct ib_flow_attr		  *flow_attr;
3192 	struct ib_qp			  *qp;
3193 	struct ib_uflow_resources	  *uflow_res;
3194 	struct ib_uverbs_flow_spec_hdr	  *kern_spec;
3195 	struct uverbs_req_iter iter;
3196 	int err;
3197 	void *ib_spec;
3198 	int i;
3199 	struct ib_device *ib_dev;
3200 
3201 	err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3202 	if (err)
3203 		return err;
3204 
3205 	if (cmd.comp_mask)
3206 		return -EINVAL;
3207 
3208 	if (priv_check(curthread, PRIV_NET_RAW) != 0)
3209 		return -EPERM;
3210 
3211 	if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3212 		return -EINVAL;
3213 
3214 	if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3215 	    ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3216 	     (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3217 		return -EINVAL;
3218 
3219 	if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3220 		return -EINVAL;
3221 
3222 	if (cmd.flow_attr.size >
3223 	    (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3224 		return -EINVAL;
3225 
3226 	if (cmd.flow_attr.reserved[0] ||
3227 	    cmd.flow_attr.reserved[1])
3228 		return -EINVAL;
3229 
3230 	if (cmd.flow_attr.num_of_specs) {
3231 		kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3232 					 GFP_KERNEL);
3233 		if (!kern_flow_attr)
3234 			return -ENOMEM;
3235 
3236 		*kern_flow_attr = cmd.flow_attr;
3237 		err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs,
3238 					  cmd.flow_attr.size);
3239 		if (err)
3240 			goto err_free_attr;
3241 	} else {
3242 		kern_flow_attr = &cmd.flow_attr;
3243 	}
3244 
3245 	err = uverbs_request_finish(&iter);
3246 	if (err)
3247 		goto err_free_attr;
3248 
3249 	uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev);
3250 	if (IS_ERR(uobj)) {
3251 		err = PTR_ERR(uobj);
3252 		goto err_free_attr;
3253 	}
3254 
3255 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
3256 	if (!qp) {
3257 		err = -EINVAL;
3258 		goto err_uobj;
3259 	}
3260 
3261 	if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
3262 		err = -EINVAL;
3263 		goto err_put;
3264 	}
3265 
3266 	flow_attr = kzalloc(struct_size(flow_attr, flows,
3267 				cmd.flow_attr.num_of_specs), GFP_KERNEL);
3268 	if (!flow_attr) {
3269 		err = -ENOMEM;
3270 		goto err_put;
3271 	}
3272 	uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs);
3273 	if (!uflow_res) {
3274 		err = -ENOMEM;
3275 		goto err_free_flow_attr;
3276 	}
3277 
3278 	flow_attr->type = kern_flow_attr->type;
3279 	flow_attr->priority = kern_flow_attr->priority;
3280 	flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3281 	flow_attr->port = kern_flow_attr->port;
3282 	flow_attr->flags = kern_flow_attr->flags;
3283 	flow_attr->size = sizeof(*flow_attr);
3284 
3285 	kern_spec = kern_flow_attr->flow_specs;
3286 	ib_spec = flow_attr + 1;
3287 	for (i = 0; i < flow_attr->num_of_specs &&
3288 			cmd.flow_attr.size >= sizeof(*kern_spec) &&
3289 			cmd.flow_attr.size >= kern_spec->size;
3290 	     i++) {
3291 		err = kern_spec_to_ib_spec(
3292 				attrs, (struct ib_uverbs_flow_spec *)kern_spec,
3293 				ib_spec, uflow_res);
3294 		if (err)
3295 			goto err_free;
3296 
3297 		flow_attr->size +=
3298 			((union ib_flow_spec *) ib_spec)->size;
3299 		cmd.flow_attr.size -= kern_spec->size;
3300 		kern_spec = (struct ib_uverbs_flow_spec_hdr *)((u8 *)kern_spec + kern_spec->size);
3301 		ib_spec = (u8 *)ib_spec + ((union ib_flow_spec *) ib_spec)->size;
3302 	}
3303 	if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3304 		pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3305 			i, cmd.flow_attr.size);
3306 		err = -EINVAL;
3307 		goto err_free;
3308 	}
3309 
3310 	flow_id = qp->device->create_flow(
3311 		qp, flow_attr, IB_FLOW_DOMAIN_USER, &attrs->driver_udata);
3312 
3313 	if (IS_ERR(flow_id)) {
3314 		err = PTR_ERR(flow_id);
3315 		goto err_free;
3316 	}
3317 
3318 	ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
3319 
3320 	memset(&resp, 0, sizeof(resp));
3321 	resp.flow_handle = uobj->id;
3322 
3323 	err = uverbs_response(attrs, &resp, sizeof(resp));
3324 	if (err)
3325 		goto err_copy;
3326 
3327 	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
3328 				UVERBS_LOOKUP_READ);
3329 	kfree(flow_attr);
3330 	if (cmd.flow_attr.num_of_specs)
3331 		kfree(kern_flow_attr);
3332 	rdma_alloc_commit_uobject(uobj, attrs);
3333 	return 0;
3334 err_copy:
3335 	if (!qp->device->destroy_flow(flow_id))
3336 		atomic_dec(&qp->usecnt);
3337 err_free:
3338 	ib_uverbs_flow_resources_free(uflow_res);
3339 err_free_flow_attr:
3340 	kfree(flow_attr);
3341 err_put:
3342 	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
3343 				UVERBS_LOOKUP_READ);
3344 err_uobj:
3345 	uobj_alloc_abort(uobj, attrs);
3346 err_free_attr:
3347 	if (cmd.flow_attr.num_of_specs)
3348 		kfree(kern_flow_attr);
3349 	return err;
3350 }
3351 
ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle * attrs)3352 static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs)
3353 {
3354 	struct ib_uverbs_destroy_flow	cmd;
3355 	int				ret;
3356 
3357 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3358 	if (ret)
3359 		return ret;
3360 
3361 	if (cmd.comp_mask)
3362 		return -EINVAL;
3363 
3364 	return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs);
3365 }
3366 
__uverbs_create_xsrq(struct uverbs_attr_bundle * attrs,struct ib_uverbs_create_xsrq * cmd,struct ib_udata * udata)3367 static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
3368 				struct ib_uverbs_create_xsrq *cmd,
3369 				struct ib_udata *udata)
3370 {
3371 	struct ib_uverbs_create_srq_resp resp;
3372 	struct ib_usrq_object           *obj;
3373 	struct ib_pd                    *pd;
3374 	struct ib_srq                   *srq;
3375 	struct ib_uobject               *uninitialized_var(xrcd_uobj);
3376 	struct ib_srq_init_attr          attr;
3377 	int ret;
3378 	struct ib_device *ib_dev;
3379 
3380 	obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs,
3381 						  &ib_dev);
3382 	if (IS_ERR(obj))
3383 		return PTR_ERR(obj);
3384 
3385 	if (cmd->srq_type == IB_SRQT_TM)
3386 		attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
3387 
3388 	if (cmd->srq_type == IB_SRQT_XRC) {
3389 		xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
3390 					  attrs);
3391 		if (IS_ERR(xrcd_uobj)) {
3392 			ret = -EINVAL;
3393 			goto err;
3394 		}
3395 
3396 		attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3397 		if (!attr.ext.xrc.xrcd) {
3398 			ret = -EINVAL;
3399 			goto err_put_xrcd;
3400 		}
3401 
3402 		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3403 		atomic_inc(&obj->uxrcd->refcnt);
3404 	}
3405 
3406 	if (ib_srq_has_cq(cmd->srq_type)) {
3407 		attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
3408 						cmd->cq_handle, attrs);
3409 		if (!attr.ext.cq) {
3410 			ret = -EINVAL;
3411 			goto err_put_xrcd;
3412 		}
3413 	}
3414 
3415 	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
3416 	if (!pd) {
3417 		ret = -EINVAL;
3418 		goto err_put_cq;
3419 	}
3420 
3421 	attr.event_handler  = ib_uverbs_srq_event_handler;
3422 	attr.srq_context    = attrs->ufile;
3423 	attr.srq_type       = cmd->srq_type;
3424 	attr.attr.max_wr    = cmd->max_wr;
3425 	attr.attr.max_sge   = cmd->max_sge;
3426 	attr.attr.srq_limit = cmd->srq_limit;
3427 
3428 	INIT_LIST_HEAD(&obj->uevent.event_list);
3429 
3430 	srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
3431 	if (!srq) {
3432 		ret = -ENOMEM;
3433 		goto err_put;
3434 	}
3435 
3436 	srq->device        = pd->device;
3437 	srq->pd            = pd;
3438 	srq->srq_type	   = cmd->srq_type;
3439 	srq->uobject       = obj;
3440 	srq->event_handler = attr.event_handler;
3441 	srq->srq_context   = attr.srq_context;
3442 
3443 	ret = pd->device->create_srq(srq, &attr, udata);
3444 	if (ret)
3445 		goto err_free;
3446 
3447 	if (ib_srq_has_cq(cmd->srq_type)) {
3448 		srq->ext.cq       = attr.ext.cq;
3449 		atomic_inc(&attr.ext.cq->usecnt);
3450 	}
3451 
3452 	if (cmd->srq_type == IB_SRQT_XRC) {
3453 		srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3454 		atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3455 	}
3456 
3457 	atomic_inc(&pd->usecnt);
3458 	atomic_set(&srq->usecnt, 0);
3459 
3460 	obj->uevent.uobject.object = srq;
3461 	obj->uevent.uobject.user_handle = cmd->user_handle;
3462 
3463 	memset(&resp, 0, sizeof resp);
3464 	resp.srq_handle = obj->uevent.uobject.id;
3465 	resp.max_wr     = attr.attr.max_wr;
3466 	resp.max_sge    = attr.attr.max_sge;
3467 	if (cmd->srq_type == IB_SRQT_XRC)
3468 		resp.srqn = srq->ext.xrc.srq_num;
3469 
3470 	ret = uverbs_response(attrs, &resp, sizeof(resp));
3471 	if (ret)
3472 		goto err_copy;
3473 
3474 	if (cmd->srq_type == IB_SRQT_XRC)
3475 		uobj_put_read(xrcd_uobj);
3476 
3477 	if (ib_srq_has_cq(cmd->srq_type))
3478 		rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
3479 					UVERBS_LOOKUP_READ);
3480 
3481 	uobj_put_obj_read(pd);
3482 	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
3483 	return 0;
3484 
3485 err_copy:
3486 	ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
3487 	/* It was released in ib_destroy_srq_user */
3488 	srq = NULL;
3489 err_free:
3490 	kfree(srq);
3491 err_put:
3492 	uobj_put_obj_read(pd);
3493 
3494 err_put_cq:
3495 	if (ib_srq_has_cq(cmd->srq_type))
3496 		rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
3497 					UVERBS_LOOKUP_READ);
3498 
3499 err_put_xrcd:
3500 	if (cmd->srq_type == IB_SRQT_XRC) {
3501 		atomic_dec(&obj->uxrcd->refcnt);
3502 		uobj_put_read(xrcd_uobj);
3503 	}
3504 
3505 err:
3506 	uobj_alloc_abort(&obj->uevent.uobject, attrs);
3507 	return ret;
3508 }
3509 
ib_uverbs_create_srq(struct uverbs_attr_bundle * attrs)3510 static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs)
3511 {
3512 	struct ib_uverbs_create_srq      cmd;
3513 	struct ib_uverbs_create_xsrq     xcmd;
3514 	int ret;
3515 
3516 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3517 	if (ret)
3518 		return ret;
3519 
3520 	memset(&xcmd, 0, sizeof(xcmd));
3521 	xcmd.response	 = cmd.response;
3522 	xcmd.user_handle = cmd.user_handle;
3523 	xcmd.srq_type	 = IB_SRQT_BASIC;
3524 	xcmd.pd_handle	 = cmd.pd_handle;
3525 	xcmd.max_wr	 = cmd.max_wr;
3526 	xcmd.max_sge	 = cmd.max_sge;
3527 	xcmd.srq_limit	 = cmd.srq_limit;
3528 
3529 	return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata);
3530 }
3531 
ib_uverbs_create_xsrq(struct uverbs_attr_bundle * attrs)3532 static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs)
3533 {
3534 	struct ib_uverbs_create_xsrq     cmd;
3535 	int ret;
3536 
3537 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3538 	if (ret)
3539 		return ret;
3540 
3541 	return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata);
3542 }
3543 
ib_uverbs_modify_srq(struct uverbs_attr_bundle * attrs)3544 static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
3545 {
3546 	struct ib_uverbs_modify_srq cmd;
3547 	struct ib_srq              *srq;
3548 	struct ib_srq_attr          attr;
3549 	int                         ret;
3550 
3551 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3552 	if (ret)
3553 		return ret;
3554 
3555 	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3556 	if (!srq)
3557 		return -EINVAL;
3558 
3559 	attr.max_wr    = cmd.max_wr;
3560 	attr.srq_limit = cmd.srq_limit;
3561 
3562 	ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask,
3563 					  &attrs->driver_udata);
3564 
3565 	rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
3566 				UVERBS_LOOKUP_READ);
3567 
3568 	return ret;
3569 }
3570 
ib_uverbs_query_srq(struct uverbs_attr_bundle * attrs)3571 static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
3572 {
3573 	struct ib_uverbs_query_srq      cmd;
3574 	struct ib_uverbs_query_srq_resp resp;
3575 	struct ib_srq_attr              attr;
3576 	struct ib_srq                   *srq;
3577 	int                             ret;
3578 
3579 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3580 	if (ret)
3581 		return ret;
3582 
3583 	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3584 	if (!srq)
3585 		return -EINVAL;
3586 
3587 	ret = ib_query_srq(srq, &attr);
3588 
3589 	rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
3590 				UVERBS_LOOKUP_READ);
3591 
3592 	if (ret)
3593 		return ret;
3594 
3595 	memset(&resp, 0, sizeof resp);
3596 
3597 	resp.max_wr    = attr.max_wr;
3598 	resp.max_sge   = attr.max_sge;
3599 	resp.srq_limit = attr.srq_limit;
3600 
3601 	return uverbs_response(attrs, &resp, sizeof(resp));
3602 }
3603 
ib_uverbs_destroy_srq(struct uverbs_attr_bundle * attrs)3604 static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs)
3605 {
3606 	struct ib_uverbs_destroy_srq      cmd;
3607 	struct ib_uverbs_destroy_srq_resp resp;
3608 	struct ib_uobject		 *uobj;
3609 	struct ib_uevent_object        	 *obj;
3610 	int ret;
3611 
3612 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3613 	if (ret)
3614 		return ret;
3615 
3616 	uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3617 	if (IS_ERR(uobj))
3618 		return PTR_ERR(uobj);
3619 
3620 	obj = container_of(uobj, struct ib_uevent_object, uobject);
3621 	memset(&resp, 0, sizeof(resp));
3622 	resp.events_reported = obj->events_reported;
3623 
3624 	uobj_put_destroy(uobj);
3625 
3626 	return uverbs_response(attrs, &resp, sizeof(resp));
3627 }
3628 
ib_uverbs_ex_query_device(struct uverbs_attr_bundle * attrs)3629 static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
3630 {
3631 	struct ib_uverbs_ex_query_device_resp resp = {};
3632 	struct ib_uverbs_ex_query_device  cmd;
3633 	struct ib_device_attr attr = {0};
3634 	struct ib_ucontext *ucontext;
3635 	struct ib_device *ib_dev;
3636 	int err;
3637 
3638 	ucontext = ib_uverbs_get_ucontext(attrs);
3639 	if (IS_ERR(ucontext))
3640 		return PTR_ERR(ucontext);
3641 	ib_dev = ucontext->device;
3642 
3643 	err = uverbs_request(attrs, &cmd, sizeof(cmd));
3644 	if (err)
3645 		return err;
3646 
3647 	if (cmd.comp_mask)
3648 		return -EINVAL;
3649 
3650 	if (cmd.reserved)
3651 		return -EINVAL;
3652 
3653 	err = ib_dev->query_device(ib_dev, &attr, &attrs->driver_udata);
3654 	if (err)
3655 		return err;
3656 
3657 	copy_query_dev_fields(ucontext, &resp.base, &attr);
3658 
3659 	resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3660 	resp.odp_caps.per_transport_caps.rc_odp_caps =
3661 		attr.odp_caps.per_transport_caps.rc_odp_caps;
3662 	resp.odp_caps.per_transport_caps.uc_odp_caps =
3663 		attr.odp_caps.per_transport_caps.uc_odp_caps;
3664 	resp.odp_caps.per_transport_caps.ud_odp_caps =
3665 		attr.odp_caps.per_transport_caps.ud_odp_caps;
3666 	resp.xrc_odp_caps = attr.odp_caps.per_transport_caps.xrc_odp_caps;
3667 
3668 	resp.timestamp_mask = attr.timestamp_mask;
3669 	resp.hca_core_clock = attr.hca_core_clock;
3670 	resp.device_cap_flags_ex = attr.device_cap_flags;
3671 	resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
3672 	resp.rss_caps.max_rwq_indirection_tables =
3673 		attr.rss_caps.max_rwq_indirection_tables;
3674 	resp.rss_caps.max_rwq_indirection_table_size =
3675 		attr.rss_caps.max_rwq_indirection_table_size;
3676 	resp.max_wq_type_rq = attr.max_wq_type_rq;
3677 	resp.raw_packet_caps = attr.raw_packet_caps;
3678 	resp.tm_caps.max_rndv_hdr_size	= attr.tm_caps.max_rndv_hdr_size;
3679 	resp.tm_caps.max_num_tags	= attr.tm_caps.max_num_tags;
3680 	resp.tm_caps.max_ops		= attr.tm_caps.max_ops;
3681 	resp.tm_caps.max_sge		= attr.tm_caps.max_sge;
3682 	resp.tm_caps.flags		= attr.tm_caps.flags;
3683 	resp.cq_moderation_caps.max_cq_moderation_count  =
3684 		attr.cq_caps.max_cq_moderation_count;
3685 	resp.cq_moderation_caps.max_cq_moderation_period =
3686 		attr.cq_caps.max_cq_moderation_period;
3687 	resp.max_dm_size = attr.max_dm_size;
3688 	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3689 
3690 	return uverbs_response(attrs, &resp, sizeof(resp));
3691 }
3692 
ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle * attrs)3693 static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
3694 {
3695 	struct ib_uverbs_ex_modify_cq cmd;
3696 	struct ib_cq *cq;
3697 	int ret;
3698 
3699 	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3700 	if (ret)
3701 		return ret;
3702 
3703 	if (!cmd.attr_mask || cmd.reserved)
3704 		return -EINVAL;
3705 
3706 	if (cmd.attr_mask > IB_CQ_MODERATE)
3707 		return -EOPNOTSUPP;
3708 
3709 	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
3710 	if (!cq)
3711 		return -EINVAL;
3712 
3713 	ret = ib_modify_cq(cq, cmd.attr.cq_count, cmd.attr.cq_period);
3714 
3715 	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
3716 				UVERBS_LOOKUP_READ);
3717 	return ret;
3718 }
3719 
3720 /*
3721  * Describe the input structs for write(). Some write methods have an input
3722  * only struct, most have an input and output. If the struct has an output then
3723  * the 'response' u64 must be the first field in the request structure.
3724  *
3725  * If udata is present then both the request and response structs have a
3726  * trailing driver_data flex array. In this case the size of the base struct
3727  * cannot be changed.
3728  */
3729 #define UAPI_DEF_WRITE_IO(req, resp)                                           \
3730 	.write.has_resp = 1 +                                                  \
3731 			  BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) +    \
3732 			  BUILD_BUG_ON_ZERO(sizeof(((req *)0)->response) !=    \
3733 					    sizeof(u64)),                      \
3734 	.write.req_size = sizeof(req), .write.resp_size = sizeof(resp)
3735 
3736 #define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req)
3737 
3738 #define UAPI_DEF_WRITE_UDATA_IO(req, resp)                                     \
3739 	UAPI_DEF_WRITE_IO(req, resp),                                          \
3740 		.write.has_udata =                                             \
3741 			1 +                                                    \
3742 			BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=        \
3743 					  sizeof(req)) +                       \
3744 			BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) !=       \
3745 					  sizeof(resp))
3746 
3747 #define UAPI_DEF_WRITE_UDATA_I(req)                                            \
3748 	UAPI_DEF_WRITE_I(req),                                                 \
3749 		.write.has_udata =                                             \
3750 			1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=    \
3751 					      sizeof(req))
3752 
3753 /*
3754  * The _EX versions are for use with WRITE_EX and allow the last struct member
3755  * to be specified. Buffers that do not include that member will be rejected.
3756  */
3757 #define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member)     \
3758 	.write.has_resp = 1,                                                   \
3759 	.write.req_size = offsetofend(req, req_last_member),                   \
3760 	.write.resp_size = offsetofend(resp, resp_last_member)
3761 
3762 #define UAPI_DEF_WRITE_I_EX(req, req_last_member)                              \
3763 	.write.req_size = offsetofend(req, req_last_member)
3764 
3765 const struct uapi_definition uverbs_def_write_intf[] = {
3766 	DECLARE_UVERBS_OBJECT(
3767 		UVERBS_OBJECT_AH,
3768 		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH,
3769 				     ib_uverbs_create_ah,
3770 				     UAPI_DEF_WRITE_UDATA_IO(
3771 					     struct ib_uverbs_create_ah,
3772 					     struct ib_uverbs_create_ah_resp),
3773 				     UAPI_DEF_METHOD_NEEDS_FN(create_ah)),
3774 		DECLARE_UVERBS_WRITE(
3775 			IB_USER_VERBS_CMD_DESTROY_AH,
3776 			ib_uverbs_destroy_ah,
3777 			UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah),
3778 			UAPI_DEF_METHOD_NEEDS_FN(destroy_ah))),
3779 
3780 	DECLARE_UVERBS_OBJECT(
3781 		UVERBS_OBJECT_COMP_CHANNEL,
3782 		DECLARE_UVERBS_WRITE(
3783 			IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
3784 			ib_uverbs_create_comp_channel,
3785 			UAPI_DEF_WRITE_IO(
3786 				struct ib_uverbs_create_comp_channel,
3787 				struct ib_uverbs_create_comp_channel_resp))),
3788 
3789 	DECLARE_UVERBS_OBJECT(
3790 		UVERBS_OBJECT_CQ,
3791 		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ,
3792 				     ib_uverbs_create_cq,
3793 				     UAPI_DEF_WRITE_UDATA_IO(
3794 					     struct ib_uverbs_create_cq,
3795 					     struct ib_uverbs_create_cq_resp),
3796 				     UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
3797 		DECLARE_UVERBS_WRITE(
3798 			IB_USER_VERBS_CMD_DESTROY_CQ,
3799 			ib_uverbs_destroy_cq,
3800 			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq,
3801 					  struct ib_uverbs_destroy_cq_resp),
3802 			UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)),
3803 		DECLARE_UVERBS_WRITE(
3804 			IB_USER_VERBS_CMD_POLL_CQ,
3805 			ib_uverbs_poll_cq,
3806 			UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq,
3807 					  struct ib_uverbs_poll_cq_resp),
3808 			UAPI_DEF_METHOD_NEEDS_FN(poll_cq)),
3809 		DECLARE_UVERBS_WRITE(
3810 			IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
3811 			ib_uverbs_req_notify_cq,
3812 			UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq),
3813 			UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)),
3814 		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ,
3815 				     ib_uverbs_resize_cq,
3816 				     UAPI_DEF_WRITE_UDATA_IO(
3817 					     struct ib_uverbs_resize_cq,
3818 					     struct ib_uverbs_resize_cq_resp),
3819 				     UAPI_DEF_METHOD_NEEDS_FN(resize_cq)),
3820 		DECLARE_UVERBS_WRITE_EX(
3821 			IB_USER_VERBS_EX_CMD_CREATE_CQ,
3822 			ib_uverbs_ex_create_cq,
3823 			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq,
3824 					     reserved,
3825 					     struct ib_uverbs_ex_create_cq_resp,
3826 					     response_length),
3827 			UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
3828 		DECLARE_UVERBS_WRITE_EX(
3829 			IB_USER_VERBS_EX_CMD_MODIFY_CQ,
3830 			ib_uverbs_ex_modify_cq,
3831 			UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
3832 			UAPI_DEF_METHOD_NEEDS_FN(create_cq))),
3833 
3834 	DECLARE_UVERBS_OBJECT(
3835 		UVERBS_OBJECT_DEVICE,
3836 		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT,
3837 				     ib_uverbs_get_context,
3838 				     UAPI_DEF_WRITE_UDATA_IO(
3839 					     struct ib_uverbs_get_context,
3840 					     struct ib_uverbs_get_context_resp)),
3841 		DECLARE_UVERBS_WRITE(
3842 			IB_USER_VERBS_CMD_QUERY_DEVICE,
3843 			ib_uverbs_query_device,
3844 			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device,
3845 					  struct ib_uverbs_query_device_resp)),
3846 		DECLARE_UVERBS_WRITE(
3847 			IB_USER_VERBS_CMD_QUERY_PORT,
3848 			ib_uverbs_query_port,
3849 			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port,
3850 					  struct ib_uverbs_query_port_resp),
3851 			UAPI_DEF_METHOD_NEEDS_FN(query_port)),
3852 		DECLARE_UVERBS_WRITE_EX(
3853 			IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
3854 			ib_uverbs_ex_query_device,
3855 			UAPI_DEF_WRITE_IO_EX(
3856 				struct ib_uverbs_ex_query_device,
3857 				reserved,
3858 				struct ib_uverbs_ex_query_device_resp,
3859 				response_length),
3860 			UAPI_DEF_METHOD_NEEDS_FN(query_device)),
3861 		UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext),
3862 		UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)),
3863 
3864 	DECLARE_UVERBS_OBJECT(
3865 		UVERBS_OBJECT_FLOW,
3866 		DECLARE_UVERBS_WRITE_EX(
3867 			IB_USER_VERBS_EX_CMD_CREATE_FLOW,
3868 			ib_uverbs_ex_create_flow,
3869 			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow,
3870 					     flow_attr,
3871 					     struct ib_uverbs_create_flow_resp,
3872 					     flow_handle),
3873 			UAPI_DEF_METHOD_NEEDS_FN(create_flow)),
3874 		DECLARE_UVERBS_WRITE_EX(
3875 			IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
3876 			ib_uverbs_ex_destroy_flow,
3877 			UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow),
3878 			UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))),
3879 
3880 	DECLARE_UVERBS_OBJECT(
3881 		UVERBS_OBJECT_MR,
3882 		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR,
3883 				     ib_uverbs_dereg_mr,
3884 				     UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr),
3885 				     UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)),
3886 		DECLARE_UVERBS_WRITE(
3887 			IB_USER_VERBS_CMD_REG_MR,
3888 			ib_uverbs_reg_mr,
3889 			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr,
3890 						struct ib_uverbs_reg_mr_resp),
3891 			UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)),
3892 		DECLARE_UVERBS_WRITE(
3893 			IB_USER_VERBS_CMD_REREG_MR,
3894 			ib_uverbs_rereg_mr,
3895 			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr,
3896 						struct ib_uverbs_rereg_mr_resp),
3897 			UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))),
3898 
3899 	DECLARE_UVERBS_OBJECT(
3900 		UVERBS_OBJECT_MW,
3901 		DECLARE_UVERBS_WRITE(
3902 			IB_USER_VERBS_CMD_ALLOC_MW,
3903 			ib_uverbs_alloc_mw,
3904 			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw,
3905 						struct ib_uverbs_alloc_mw_resp),
3906 			UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)),
3907 		DECLARE_UVERBS_WRITE(
3908 			IB_USER_VERBS_CMD_DEALLOC_MW,
3909 			ib_uverbs_dealloc_mw,
3910 			UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw),
3911 			UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))),
3912 
3913 	DECLARE_UVERBS_OBJECT(
3914 		UVERBS_OBJECT_PD,
3915 		DECLARE_UVERBS_WRITE(
3916 			IB_USER_VERBS_CMD_ALLOC_PD,
3917 			ib_uverbs_alloc_pd,
3918 			UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd,
3919 						struct ib_uverbs_alloc_pd_resp),
3920 			UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)),
3921 		DECLARE_UVERBS_WRITE(
3922 			IB_USER_VERBS_CMD_DEALLOC_PD,
3923 			ib_uverbs_dealloc_pd,
3924 			UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd),
3925 			UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))),
3926 
3927 	DECLARE_UVERBS_OBJECT(
3928 		UVERBS_OBJECT_QP,
3929 		DECLARE_UVERBS_WRITE(
3930 			IB_USER_VERBS_CMD_ATTACH_MCAST,
3931 			ib_uverbs_attach_mcast,
3932 			UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast),
3933 			UAPI_DEF_METHOD_NEEDS_FN(attach_mcast),
3934 			UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
3935 		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP,
3936 				     ib_uverbs_create_qp,
3937 				     UAPI_DEF_WRITE_UDATA_IO(
3938 					     struct ib_uverbs_create_qp,
3939 					     struct ib_uverbs_create_qp_resp),
3940 				     UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
3941 		DECLARE_UVERBS_WRITE(
3942 			IB_USER_VERBS_CMD_DESTROY_QP,
3943 			ib_uverbs_destroy_qp,
3944 			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp,
3945 					  struct ib_uverbs_destroy_qp_resp),
3946 			UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)),
3947 		DECLARE_UVERBS_WRITE(
3948 			IB_USER_VERBS_CMD_DETACH_MCAST,
3949 			ib_uverbs_detach_mcast,
3950 			UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast),
3951 			UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
3952 		DECLARE_UVERBS_WRITE(
3953 			IB_USER_VERBS_CMD_MODIFY_QP,
3954 			ib_uverbs_modify_qp,
3955 			UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp),
3956 			UAPI_DEF_METHOD_NEEDS_FN(modify_qp)),
3957 		DECLARE_UVERBS_WRITE(
3958 			IB_USER_VERBS_CMD_POST_RECV,
3959 			ib_uverbs_post_recv,
3960 			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv,
3961 					  struct ib_uverbs_post_recv_resp),
3962 			UAPI_DEF_METHOD_NEEDS_FN(post_recv)),
3963 		DECLARE_UVERBS_WRITE(
3964 			IB_USER_VERBS_CMD_POST_SEND,
3965 			ib_uverbs_post_send,
3966 			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send,
3967 					  struct ib_uverbs_post_send_resp),
3968 			UAPI_DEF_METHOD_NEEDS_FN(post_send)),
3969 		DECLARE_UVERBS_WRITE(
3970 			IB_USER_VERBS_CMD_QUERY_QP,
3971 			ib_uverbs_query_qp,
3972 			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp,
3973 					  struct ib_uverbs_query_qp_resp),
3974 			UAPI_DEF_METHOD_NEEDS_FN(query_qp)),
3975 		DECLARE_UVERBS_WRITE_EX(
3976 			IB_USER_VERBS_EX_CMD_CREATE_QP,
3977 			ib_uverbs_ex_create_qp,
3978 			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp,
3979 					     comp_mask,
3980 					     struct ib_uverbs_ex_create_qp_resp,
3981 					     response_length),
3982 			UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
3983 		DECLARE_UVERBS_WRITE_EX(
3984 			IB_USER_VERBS_EX_CMD_MODIFY_QP,
3985 			ib_uverbs_ex_modify_qp,
3986 			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp,
3987 					     base,
3988 					     struct ib_uverbs_ex_modify_qp_resp,
3989 					     response_length),
3990 			UAPI_DEF_METHOD_NEEDS_FN(modify_qp))),
3991 
3992 	DECLARE_UVERBS_OBJECT(
3993 		UVERBS_OBJECT_RWQ_IND_TBL,
3994 		DECLARE_UVERBS_WRITE_EX(
3995 			IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
3996 			ib_uverbs_ex_create_rwq_ind_table,
3997 			UAPI_DEF_WRITE_IO_EX(
3998 				struct ib_uverbs_ex_create_rwq_ind_table,
3999 				log_ind_tbl_size,
4000 				struct ib_uverbs_ex_create_rwq_ind_table_resp,
4001 				ind_tbl_num),
4002 			UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)),
4003 		DECLARE_UVERBS_WRITE_EX(
4004 			IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
4005 			ib_uverbs_ex_destroy_rwq_ind_table,
4006 			UAPI_DEF_WRITE_I(
4007 				struct ib_uverbs_ex_destroy_rwq_ind_table),
4008 			UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
4009 
4010 	DECLARE_UVERBS_OBJECT(
4011 		UVERBS_OBJECT_WQ,
4012 		DECLARE_UVERBS_WRITE_EX(
4013 			IB_USER_VERBS_EX_CMD_CREATE_WQ,
4014 			ib_uverbs_ex_create_wq,
4015 			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq,
4016 					     max_sge,
4017 					     struct ib_uverbs_ex_create_wq_resp,
4018 					     wqn),
4019 			UAPI_DEF_METHOD_NEEDS_FN(create_wq)),
4020 		DECLARE_UVERBS_WRITE_EX(
4021 			IB_USER_VERBS_EX_CMD_DESTROY_WQ,
4022 			ib_uverbs_ex_destroy_wq,
4023 			UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq,
4024 					     wq_handle,
4025 					     struct ib_uverbs_ex_destroy_wq_resp,
4026 					     reserved),
4027 			UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)),
4028 		DECLARE_UVERBS_WRITE_EX(
4029 			IB_USER_VERBS_EX_CMD_MODIFY_WQ,
4030 			ib_uverbs_ex_modify_wq,
4031 			UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq,
4032 					    curr_wq_state),
4033 			UAPI_DEF_METHOD_NEEDS_FN(modify_wq))),
4034 
4035 	DECLARE_UVERBS_OBJECT(
4036 		UVERBS_OBJECT_SRQ,
4037 		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ,
4038 				     ib_uverbs_create_srq,
4039 				     UAPI_DEF_WRITE_UDATA_IO(
4040 					     struct ib_uverbs_create_srq,
4041 					     struct ib_uverbs_create_srq_resp),
4042 				     UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
4043 		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ,
4044 				     ib_uverbs_create_xsrq,
4045 				     UAPI_DEF_WRITE_UDATA_IO(
4046 					     struct ib_uverbs_create_xsrq,
4047 					     struct ib_uverbs_create_srq_resp),
4048 				     UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
4049 		DECLARE_UVERBS_WRITE(
4050 			IB_USER_VERBS_CMD_DESTROY_SRQ,
4051 			ib_uverbs_destroy_srq,
4052 			UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq,
4053 					  struct ib_uverbs_destroy_srq_resp),
4054 			UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)),
4055 		DECLARE_UVERBS_WRITE(
4056 			IB_USER_VERBS_CMD_MODIFY_SRQ,
4057 			ib_uverbs_modify_srq,
4058 			UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq),
4059 			UAPI_DEF_METHOD_NEEDS_FN(modify_srq)),
4060 		DECLARE_UVERBS_WRITE(
4061 			IB_USER_VERBS_CMD_POST_SRQ_RECV,
4062 			ib_uverbs_post_srq_recv,
4063 			UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv,
4064 					  struct ib_uverbs_post_srq_recv_resp),
4065 			UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)),
4066 		DECLARE_UVERBS_WRITE(
4067 			IB_USER_VERBS_CMD_QUERY_SRQ,
4068 			ib_uverbs_query_srq,
4069 			UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq,
4070 					  struct ib_uverbs_query_srq_resp),
4071 			UAPI_DEF_METHOD_NEEDS_FN(query_srq))),
4072 
4073 	DECLARE_UVERBS_OBJECT(
4074 		UVERBS_OBJECT_XRCD,
4075 		DECLARE_UVERBS_WRITE(
4076 			IB_USER_VERBS_CMD_CLOSE_XRCD,
4077 			ib_uverbs_close_xrcd,
4078 			UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd),
4079 			UAPI_DEF_METHOD_NEEDS_FN(dealloc_xrcd)),
4080 		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP,
4081 				     ib_uverbs_open_qp,
4082 				     UAPI_DEF_WRITE_UDATA_IO(
4083 					     struct ib_uverbs_open_qp,
4084 					     struct ib_uverbs_create_qp_resp)),
4085 		DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD,
4086 				     ib_uverbs_open_xrcd,
4087 				     UAPI_DEF_WRITE_UDATA_IO(
4088 					     struct ib_uverbs_open_xrcd,
4089 					     struct ib_uverbs_open_xrcd_resp),
4090 				     UAPI_DEF_METHOD_NEEDS_FN(alloc_xrcd))),
4091 
4092 	{},
4093 };
4094