1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
4  */
5 
6 #include <linux/overflow.h>
7 #include <rdma/uverbs_std_types.h>
8 #include "rdma_core.h"
9 #include "uverbs.h"
10 #include <rdma/uverbs_ioctl.h>
11 #include <rdma/opa_addr.h>
12 #include <rdma/ib_cache.h>
13 
14 /*
15  * This ioctl method allows calling any defined write or write_ex
16  * handler. This essentially replaces the hdr/ex_hdr system with the ioctl
17  * marshalling, and brings the non-ex path into the same marshalling as the ex
18  * path.
19  */
20 static int UVERBS_HANDLER(UVERBS_METHOD_INVOKE_WRITE)(
21 	struct uverbs_attr_bundle *attrs)
22 {
23 	struct uverbs_api *uapi = attrs->ufile->device->uapi;
24 	const struct uverbs_api_write_method *method_elm;
25 	u32 cmd;
26 	int rc;
27 
28 	rc = uverbs_get_const(&cmd, attrs, UVERBS_ATTR_WRITE_CMD);
29 	if (rc)
30 		return rc;
31 
32 	method_elm = uapi_get_method(uapi, cmd);
33 	if (IS_ERR(method_elm))
34 		return PTR_ERR(method_elm);
35 
36 	uverbs_fill_udata(attrs, &attrs->ucore, UVERBS_ATTR_CORE_IN,
37 			  UVERBS_ATTR_CORE_OUT);
38 
39 	if (attrs->ucore.inlen < method_elm->req_size ||
40 	    attrs->ucore.outlen < method_elm->resp_size)
41 		return -ENOSPC;
42 
43 	attrs->uobject = NULL;
44 	rc = method_elm->handler(attrs);
45 	if (attrs->uobject)
46 		uverbs_finalize_object(attrs->uobject, UVERBS_ACCESS_NEW, true,
47 				       !rc, attrs);
48 	return rc;
49 }
50 
51 DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_INVOKE_WRITE,
52 			    UVERBS_ATTR_CONST_IN(UVERBS_ATTR_WRITE_CMD,
53 						 enum ib_uverbs_write_cmds,
54 						 UA_MANDATORY),
55 			    UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CORE_IN,
56 					       UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
57 					       UA_OPTIONAL),
58 			    UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CORE_OUT,
59 						UVERBS_ATTR_MIN_SIZE(0),
60 						UA_OPTIONAL),
61 			    UVERBS_ATTR_UHW());
62 
63 static uint32_t *
64 gather_objects_handle(struct ib_uverbs_file *ufile,
65 		      const struct uverbs_api_object *uapi_object,
66 		      struct uverbs_attr_bundle *attrs,
67 		      ssize_t out_len,
68 		      u64 *total)
69 {
70 	u64 max_count = out_len / sizeof(u32);
71 	struct ib_uobject *obj;
72 	u64 count = 0;
73 	u32 *handles;
74 
75 	/* Allocated memory that cannot page out where we gather
76 	 * all object ids under a spin_lock.
77 	 */
78 	handles = uverbs_zalloc(attrs, out_len);
79 	if (IS_ERR(handles))
80 		return handles;
81 
82 	spin_lock_irq(&ufile->uobjects_lock);
83 	list_for_each_entry(obj, &ufile->uobjects, list) {
84 		u32 obj_id = obj->id;
85 
86 		if (obj->uapi_object != uapi_object)
87 			continue;
88 
89 		if (count >= max_count)
90 			break;
91 
92 		handles[count] = obj_id;
93 		count++;
94 	}
95 	spin_unlock_irq(&ufile->uobjects_lock);
96 
97 	*total = count;
98 	return handles;
99 }
100 
101 static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
102 	struct uverbs_attr_bundle *attrs)
103 {
104 	const struct uverbs_api_object *uapi_object;
105 	ssize_t out_len;
106 	u64 total = 0;
107 	u16 object_id;
108 	u32 *handles;
109 	int ret;
110 
111 	out_len = uverbs_attr_get_len(attrs, UVERBS_ATTR_INFO_HANDLES_LIST);
112 	if (out_len <= 0 || (out_len % sizeof(u32) != 0))
113 		return -EINVAL;
114 
115 	ret = uverbs_get_const(&object_id, attrs, UVERBS_ATTR_INFO_OBJECT_ID);
116 	if (ret)
117 		return ret;
118 
119 	uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
120 	if (IS_ERR(uapi_object))
121 		return PTR_ERR(uapi_object);
122 
123 	handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
124 					out_len, &total);
125 	if (IS_ERR(handles))
126 		return PTR_ERR(handles);
127 
128 	ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_HANDLES_LIST, handles,
129 			     sizeof(u32) * total);
130 	if (ret)
131 		goto err;
132 
133 	ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_TOTAL_HANDLES, &total,
134 			     sizeof(total));
135 err:
136 	return ret;
137 }
138 
139 void copy_port_attr_to_resp(struct ib_port_attr *attr,
140 			    struct ib_uverbs_query_port_resp *resp,
141 			    struct ib_device *ib_dev, u8 port_num)
142 {
143 	resp->state = attr->state;
144 	resp->max_mtu = attr->max_mtu;
145 	resp->active_mtu = attr->active_mtu;
146 	resp->gid_tbl_len = attr->gid_tbl_len;
147 	resp->port_cap_flags = make_port_cap_flags(attr);
148 	resp->max_msg_sz = attr->max_msg_sz;
149 	resp->bad_pkey_cntr = attr->bad_pkey_cntr;
150 	resp->qkey_viol_cntr = attr->qkey_viol_cntr;
151 	resp->pkey_tbl_len = attr->pkey_tbl_len;
152 
153 	if (rdma_is_grh_required(ib_dev, port_num))
154 		resp->flags |= IB_UVERBS_QPF_GRH_REQUIRED;
155 
156 	if (rdma_cap_opa_ah(ib_dev, port_num)) {
157 		resp->lid = OPA_TO_IB_UCAST_LID(attr->lid);
158 		resp->sm_lid = OPA_TO_IB_UCAST_LID(attr->sm_lid);
159 	} else {
160 		resp->lid = ib_lid_cpu16(attr->lid);
161 		resp->sm_lid = ib_lid_cpu16(attr->sm_lid);
162 	}
163 
164 	resp->lmc = attr->lmc;
165 	resp->max_vl_num = attr->max_vl_num;
166 	resp->sm_sl = attr->sm_sl;
167 	resp->subnet_timeout = attr->subnet_timeout;
168 	resp->init_type_reply = attr->init_type_reply;
169 	resp->active_width = attr->active_width;
170 	/* This ABI needs to be extended to provide any speed more than IB_SPEED_NDR */
171 	resp->active_speed = min_t(u16, attr->active_speed, IB_SPEED_NDR);
172 	resp->phys_state = attr->phys_state;
173 	resp->link_layer = rdma_port_get_link_layer(ib_dev, port_num);
174 }
175 
176 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
177 	struct uverbs_attr_bundle *attrs)
178 {
179 	struct ib_device *ib_dev;
180 	struct ib_port_attr attr = {};
181 	struct ib_uverbs_query_port_resp_ex resp = {};
182 	struct ib_ucontext *ucontext;
183 	int ret;
184 	u8 port_num;
185 
186 	ucontext = ib_uverbs_get_ucontext(attrs);
187 	if (IS_ERR(ucontext))
188 		return PTR_ERR(ucontext);
189 	ib_dev = ucontext->device;
190 
191 	/* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
192 	if (!ib_dev->ops.query_port)
193 		return -EOPNOTSUPP;
194 
195 	ret = uverbs_get_const(&port_num, attrs,
196 			       UVERBS_ATTR_QUERY_PORT_PORT_NUM);
197 	if (ret)
198 		return ret;
199 
200 	ret = ib_query_port(ib_dev, port_num, &attr);
201 	if (ret)
202 		return ret;
203 
204 	copy_port_attr_to_resp(&attr, &resp.legacy_resp, ib_dev, port_num);
205 	resp.port_cap_flags2 = attr.port_cap_flags2;
206 	resp.active_speed_ex = attr.active_speed;
207 
208 	return uverbs_copy_to_struct_or_zero(attrs, UVERBS_ATTR_QUERY_PORT_RESP,
209 					     &resp, sizeof(resp));
210 }
211 
212 static int UVERBS_HANDLER(UVERBS_METHOD_GET_CONTEXT)(
213 	struct uverbs_attr_bundle *attrs)
214 {
215 	u32 num_comp = attrs->ufile->device->num_comp_vectors;
216 	u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS;
217 	int ret;
218 
219 	ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
220 			     &num_comp, sizeof(num_comp));
221 	if (IS_UVERBS_COPY_ERR(ret))
222 		return ret;
223 
224 	ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
225 			     &core_support, sizeof(core_support));
226 	if (IS_UVERBS_COPY_ERR(ret))
227 		return ret;
228 
229 	ret = ib_alloc_ucontext(attrs);
230 	if (ret)
231 		return ret;
232 	ret = ib_init_ucontext(attrs);
233 	if (ret) {
234 		kfree(attrs->context);
235 		attrs->context = NULL;
236 		return ret;
237 	}
238 	return 0;
239 }
240 
241 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_CONTEXT)(
242 	struct uverbs_attr_bundle *attrs)
243 {
244 	u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS;
245 	struct ib_ucontext *ucontext;
246 	struct ib_device *ib_dev;
247 	u32 num_comp;
248 	int ret;
249 
250 	ucontext = ib_uverbs_get_ucontext(attrs);
251 	if (IS_ERR(ucontext))
252 		return PTR_ERR(ucontext);
253 	ib_dev = ucontext->device;
254 
255 	if (!ib_dev->ops.query_ucontext)
256 		return -EOPNOTSUPP;
257 
258 	num_comp = attrs->ufile->device->num_comp_vectors;
259 	ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS,
260 			     &num_comp, sizeof(num_comp));
261 	if (IS_UVERBS_COPY_ERR(ret))
262 		return ret;
263 
264 	ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT,
265 			     &core_support, sizeof(core_support));
266 	if (IS_UVERBS_COPY_ERR(ret))
267 		return ret;
268 
269 	return ucontext->device->ops.query_ucontext(ucontext, attrs);
270 }
271 
272 static int copy_gid_entries_to_user(struct uverbs_attr_bundle *attrs,
273 				    struct ib_uverbs_gid_entry *entries,
274 				    size_t num_entries, size_t user_entry_size)
275 {
276 	const struct uverbs_attr *attr;
277 	void __user *user_entries;
278 	size_t copy_len;
279 	int ret;
280 	int i;
281 
282 	if (user_entry_size == sizeof(*entries)) {
283 		ret = uverbs_copy_to(attrs,
284 				     UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
285 				     entries, sizeof(*entries) * num_entries);
286 		return ret;
287 	}
288 
289 	copy_len = min_t(size_t, user_entry_size, sizeof(*entries));
290 	attr = uverbs_attr_get(attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES);
291 	if (IS_ERR(attr))
292 		return PTR_ERR(attr);
293 
294 	user_entries = u64_to_user_ptr(attr->ptr_attr.data);
295 	for (i = 0; i < num_entries; i++) {
296 		if (copy_to_user(user_entries, entries, copy_len))
297 			return -EFAULT;
298 
299 		if (user_entry_size > sizeof(*entries)) {
300 			if (clear_user(user_entries + sizeof(*entries),
301 				       user_entry_size - sizeof(*entries)))
302 				return -EFAULT;
303 		}
304 
305 		entries++;
306 		user_entries += user_entry_size;
307 	}
308 
309 	return uverbs_output_written(attrs,
310 				     UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES);
311 }
312 
313 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
314 	struct uverbs_attr_bundle *attrs)
315 {
316 	struct ib_uverbs_gid_entry *entries;
317 	struct ib_ucontext *ucontext;
318 	struct ib_device *ib_dev;
319 	size_t user_entry_size;
320 	ssize_t num_entries;
321 	int max_entries;
322 	u32 flags;
323 	int ret;
324 
325 	ret = uverbs_get_flags32(&flags, attrs,
326 				 UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, 0);
327 	if (ret)
328 		return ret;
329 
330 	ret = uverbs_get_const(&user_entry_size, attrs,
331 			       UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE);
332 	if (ret)
333 		return ret;
334 
335 	if (!user_entry_size)
336 		return -EINVAL;
337 
338 	max_entries = uverbs_attr_ptr_get_array_size(
339 		attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
340 		user_entry_size);
341 	if (max_entries <= 0)
342 		return max_entries ?: -EINVAL;
343 
344 	ucontext = ib_uverbs_get_ucontext(attrs);
345 	if (IS_ERR(ucontext))
346 		return PTR_ERR(ucontext);
347 	ib_dev = ucontext->device;
348 
349 	entries = uverbs_kcalloc(attrs, max_entries, sizeof(*entries));
350 	if (IS_ERR(entries))
351 		return PTR_ERR(entries);
352 
353 	num_entries = rdma_query_gid_table(ib_dev, entries, max_entries);
354 	if (num_entries < 0)
355 		return -EINVAL;
356 
357 	ret = copy_gid_entries_to_user(attrs, entries, num_entries,
358 				       user_entry_size);
359 	if (ret)
360 		return ret;
361 
362 	ret = uverbs_copy_to(attrs,
363 			     UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
364 			     &num_entries, sizeof(num_entries));
365 	return ret;
366 }
367 
368 static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_ENTRY)(
369 	struct uverbs_attr_bundle *attrs)
370 {
371 	struct ib_uverbs_gid_entry entry = {};
372 	const struct ib_gid_attr *gid_attr;
373 	struct ib_ucontext *ucontext;
374 	struct ib_device *ib_dev;
375 	struct net_device *ndev;
376 	u32 gid_index;
377 	u32 port_num;
378 	u32 flags;
379 	int ret;
380 
381 	ret = uverbs_get_flags32(&flags, attrs,
382 				 UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, 0);
383 	if (ret)
384 		return ret;
385 
386 	ret = uverbs_get_const(&port_num, attrs,
387 			       UVERBS_ATTR_QUERY_GID_ENTRY_PORT);
388 	if (ret)
389 		return ret;
390 
391 	ret = uverbs_get_const(&gid_index, attrs,
392 			       UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX);
393 	if (ret)
394 		return ret;
395 
396 	ucontext = ib_uverbs_get_ucontext(attrs);
397 	if (IS_ERR(ucontext))
398 		return PTR_ERR(ucontext);
399 	ib_dev = ucontext->device;
400 
401 	if (!rdma_is_port_valid(ib_dev, port_num))
402 		return -EINVAL;
403 
404 	gid_attr = rdma_get_gid_attr(ib_dev, port_num, gid_index);
405 	if (IS_ERR(gid_attr))
406 		return PTR_ERR(gid_attr);
407 
408 	memcpy(&entry.gid, &gid_attr->gid, sizeof(gid_attr->gid));
409 	entry.gid_index = gid_attr->index;
410 	entry.port_num = gid_attr->port_num;
411 	entry.gid_type = gid_attr->gid_type;
412 
413 	rcu_read_lock();
414 	ndev = rdma_read_gid_attr_ndev_rcu(gid_attr);
415 	if (IS_ERR(ndev)) {
416 		if (PTR_ERR(ndev) != -ENODEV) {
417 			ret = PTR_ERR(ndev);
418 			rcu_read_unlock();
419 			goto out;
420 		}
421 	} else {
422 		entry.netdev_ifindex = ndev->ifindex;
423 	}
424 	rcu_read_unlock();
425 
426 	ret = uverbs_copy_to_struct_or_zero(
427 		attrs, UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY, &entry,
428 		sizeof(entry));
429 out:
430 	rdma_put_gid_attr(gid_attr);
431 	return ret;
432 }
433 
434 DECLARE_UVERBS_NAMED_METHOD(
435 	UVERBS_METHOD_GET_CONTEXT,
436 	UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
437 			    UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
438 	UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
439 			    UVERBS_ATTR_TYPE(u64), UA_OPTIONAL),
440 	UVERBS_ATTR_UHW());
441 
442 DECLARE_UVERBS_NAMED_METHOD(
443 	UVERBS_METHOD_QUERY_CONTEXT,
444 	UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS,
445 			    UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
446 	UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT,
447 			    UVERBS_ATTR_TYPE(u64), UA_OPTIONAL));
448 
449 DECLARE_UVERBS_NAMED_METHOD(
450 	UVERBS_METHOD_INFO_HANDLES,
451 	/* Also includes any device specific object ids */
452 	UVERBS_ATTR_CONST_IN(UVERBS_ATTR_INFO_OBJECT_ID,
453 			     enum uverbs_default_objects, UA_MANDATORY),
454 	UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_TOTAL_HANDLES,
455 			    UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
456 	UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_HANDLES_LIST,
457 			    UVERBS_ATTR_MIN_SIZE(sizeof(u32)), UA_OPTIONAL));
458 
459 DECLARE_UVERBS_NAMED_METHOD(
460 	UVERBS_METHOD_QUERY_PORT,
461 	UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_PORT_PORT_NUM, u8, UA_MANDATORY),
462 	UVERBS_ATTR_PTR_OUT(
463 		UVERBS_ATTR_QUERY_PORT_RESP,
464 		UVERBS_ATTR_STRUCT(struct ib_uverbs_query_port_resp_ex,
465 				   active_speed_ex),
466 		UA_MANDATORY));
467 
468 DECLARE_UVERBS_NAMED_METHOD(
469 	UVERBS_METHOD_QUERY_GID_TABLE,
470 	UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE, u64,
471 			     UA_MANDATORY),
472 	UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, u32,
473 			     UA_OPTIONAL),
474 	UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
475 			    UVERBS_ATTR_MIN_SIZE(0), UA_MANDATORY),
476 	UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
477 			    UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
478 
479 DECLARE_UVERBS_NAMED_METHOD(
480 	UVERBS_METHOD_QUERY_GID_ENTRY,
481 	UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_PORT, u32,
482 			     UA_MANDATORY),
483 	UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX, u32,
484 			     UA_MANDATORY),
485 	UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, u32,
486 			     UA_MANDATORY),
487 	UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY,
488 			    UVERBS_ATTR_STRUCT(struct ib_uverbs_gid_entry,
489 					       netdev_ifindex),
490 			    UA_MANDATORY));
491 
492 DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE,
493 			      &UVERBS_METHOD(UVERBS_METHOD_GET_CONTEXT),
494 			      &UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE),
495 			      &UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES),
496 			      &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT),
497 			      &UVERBS_METHOD(UVERBS_METHOD_QUERY_CONTEXT),
498 			      &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_TABLE),
499 			      &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_ENTRY));
500 
501 const struct uapi_definition uverbs_def_obj_device[] = {
502 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DEVICE),
503 	{},
504 };
505