1 /*
2  * Copyright (c) 2017, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/uverbs_std_types.h>
34 #include <rdma/ib_user_verbs.h>
35 #include <rdma/ib_verbs.h>
36 #include <linux/file.h>
37 #include "rdma_core.h"
38 #include "uverbs.h"
39 
40 static int uverbs_free_ah(struct ib_uobject *uobject,
41 			  enum rdma_remove_reason why,
42 			  struct uverbs_attr_bundle *attrs)
43 {
44 	return ib_destroy_ah_user((struct ib_ah *)uobject->object,
45 				  RDMA_DESTROY_AH_SLEEPABLE,
46 				  &attrs->driver_udata);
47 }
48 
49 static int uverbs_free_flow(struct ib_uobject *uobject,
50 			    enum rdma_remove_reason why,
51 			    struct uverbs_attr_bundle *attrs)
52 {
53 	struct ib_flow *flow = (struct ib_flow *)uobject->object;
54 	struct ib_uflow_object *uflow =
55 		container_of(uobject, struct ib_uflow_object, uobject);
56 	struct ib_qp *qp = flow->qp;
57 	int ret;
58 
59 	ret = flow->device->destroy_flow(flow);
60 	if (!ret) {
61 		if (qp)
62 			atomic_dec(&qp->usecnt);
63 		ib_uverbs_flow_resources_free(uflow->resources);
64 	}
65 
66 	return ret;
67 }
68 
69 static int uverbs_free_mw(struct ib_uobject *uobject,
70 			  enum rdma_remove_reason why,
71 			  struct uverbs_attr_bundle *attrs)
72 {
73 	return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
74 }
75 
76 static int uverbs_free_qp(struct ib_uobject *uobject,
77 			  enum rdma_remove_reason why,
78 			  struct uverbs_attr_bundle *attrs)
79 {
80 	struct ib_qp *qp = uobject->object;
81 	struct ib_uqp_object *uqp =
82 		container_of(uobject, struct ib_uqp_object, uevent.uobject);
83 	int ret;
84 
85 	/*
86 	 * If this is a user triggered destroy then do not allow destruction
87 	 * until the user cleans up all the mcast bindings. Unlike in other
88 	 * places we forcibly clean up the mcast attachments for !DESTROY
89 	 * because the mcast attaches are not ubojects and will not be
90 	 * destroyed by anything else during cleanup processing.
91 	 */
92 	if (why == RDMA_REMOVE_DESTROY) {
93 		if (!list_empty(&uqp->mcast_list))
94 			return -EBUSY;
95 	} else if (qp == qp->real_qp) {
96 		ib_uverbs_detach_umcast(qp, uqp);
97 	}
98 
99 	ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
100 	if (ib_is_destroy_retryable(ret, why, uobject))
101 		return ret;
102 
103 	if (uqp->uxrcd)
104 		atomic_dec(&uqp->uxrcd->refcnt);
105 
106 	ib_uverbs_release_uevent(&uqp->uevent);
107 	return ret;
108 }
109 
110 static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
111 				   enum rdma_remove_reason why,
112 				   struct uverbs_attr_bundle *attrs)
113 {
114 	struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
115 	struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
116 	int ret;
117 
118 	ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
119 	if (ib_is_destroy_retryable(ret, why, uobject))
120 		return ret;
121 
122 	kfree(ind_tbl);
123 	return ret;
124 }
125 
126 static int uverbs_free_wq(struct ib_uobject *uobject,
127 			  enum rdma_remove_reason why,
128 			  struct uverbs_attr_bundle *attrs)
129 {
130 	struct ib_wq *wq = uobject->object;
131 	struct ib_uwq_object *uwq =
132 		container_of(uobject, struct ib_uwq_object, uevent.uobject);
133 	int ret;
134 
135 	ret = ib_destroy_wq(wq, &attrs->driver_udata);
136 	if (ib_is_destroy_retryable(ret, why, uobject))
137 		return ret;
138 
139 	ib_uverbs_release_uevent(&uwq->uevent);
140 	return ret;
141 }
142 
143 static int uverbs_free_srq(struct ib_uobject *uobject,
144 			   enum rdma_remove_reason why,
145 			   struct uverbs_attr_bundle *attrs)
146 {
147 	struct ib_srq *srq = uobject->object;
148 	struct ib_uevent_object *uevent =
149 		container_of(uobject, struct ib_uevent_object, uobject);
150 	enum ib_srq_type  srq_type = srq->srq_type;
151 	int ret;
152 
153 	ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
154 	if (ib_is_destroy_retryable(ret, why, uobject))
155 		return ret;
156 
157 	if (srq_type == IB_SRQT_XRC) {
158 		struct ib_usrq_object *us =
159 			container_of(uevent, struct ib_usrq_object, uevent);
160 
161 		atomic_dec(&us->uxrcd->refcnt);
162 	}
163 
164 	ib_uverbs_release_uevent(uevent);
165 	return ret;
166 }
167 
168 static int uverbs_free_xrcd(struct ib_uobject *uobject,
169 			    enum rdma_remove_reason why,
170 			    struct uverbs_attr_bundle *attrs)
171 {
172 	struct ib_xrcd *xrcd = uobject->object;
173 	struct ib_uxrcd_object *uxrcd =
174 		container_of(uobject, struct ib_uxrcd_object, uobject);
175 	int ret;
176 
177 	ret = ib_destroy_usecnt(&uxrcd->refcnt, why, uobject);
178 	if (ret)
179 		return ret;
180 
181 	mutex_lock(&attrs->ufile->device->xrcd_tree_mutex);
182 	ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why, attrs);
183 	mutex_unlock(&attrs->ufile->device->xrcd_tree_mutex);
184 
185 	return ret;
186 }
187 
188 static int uverbs_free_pd(struct ib_uobject *uobject,
189 			  enum rdma_remove_reason why,
190 			  struct uverbs_attr_bundle *attrs)
191 {
192 	struct ib_pd *pd = uobject->object;
193 	int ret;
194 
195 	ret = ib_destroy_usecnt(&pd->usecnt, why, uobject);
196 	if (ret)
197 		return ret;
198 
199 	ib_dealloc_pd_user(pd, &attrs->driver_udata);
200 	return 0;
201 }
202 
203 void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue)
204 {
205 	struct ib_uverbs_event *entry, *tmp;
206 
207 	spin_lock_irq(&event_queue->lock);
208 	/*
209 	 * The user must ensure that no new items are added to the event_list
210 	 * once is_closed is set.
211 	 */
212 	event_queue->is_closed = 1;
213 	spin_unlock_irq(&event_queue->lock);
214 	wake_up_interruptible(&event_queue->poll_wait);
215 	kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
216 
217 	spin_lock_irq(&event_queue->lock);
218 	list_for_each_entry_safe(entry, tmp, &event_queue->event_list, list) {
219 		if (entry->counter)
220 			list_del(&entry->obj_list);
221 		list_del(&entry->list);
222 		kfree(entry);
223 	}
224 	spin_unlock_irq(&event_queue->lock);
225 }
226 
227 static int
228 uverbs_completion_event_file_destroy_uobj(struct ib_uobject *uobj,
229 					  enum rdma_remove_reason why)
230 {
231 	struct ib_uverbs_completion_event_file *file =
232 		container_of(uobj, struct ib_uverbs_completion_event_file,
233 			     uobj);
234 
235 	ib_uverbs_free_event_queue(&file->ev_queue);
236 	return 0;
237 }
238 
239 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
240 {
241 	return 0;
242 }
243 EXPORT_SYMBOL(uverbs_destroy_def_handler);
244 
245 DECLARE_UVERBS_NAMED_OBJECT(
246 	UVERBS_OBJECT_COMP_CHANNEL,
247 	UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file),
248 			     uverbs_completion_event_file_destroy_uobj,
249 			     &uverbs_event_fops,
250 			     "[infinibandevent]",
251 			     FMODE_READ));
252 
253 DECLARE_UVERBS_NAMED_OBJECT(
254 	UVERBS_OBJECT_QP,
255 	UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp));
256 
257 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
258 	UVERBS_METHOD_MW_DESTROY,
259 	UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MW_HANDLE,
260 			UVERBS_OBJECT_MW,
261 			UVERBS_ACCESS_DESTROY,
262 			UA_MANDATORY));
263 
264 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW,
265 			    UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw),
266 			    &UVERBS_METHOD(UVERBS_METHOD_MW_DESTROY));
267 
268 DECLARE_UVERBS_NAMED_OBJECT(
269 	UVERBS_OBJECT_SRQ,
270 	UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
271 				 uverbs_free_srq));
272 
273 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
274 	UVERBS_METHOD_AH_DESTROY,
275 	UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_AH_HANDLE,
276 			UVERBS_OBJECT_AH,
277 			UVERBS_ACCESS_DESTROY,
278 			UA_MANDATORY));
279 
280 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_AH,
281 			    UVERBS_TYPE_ALLOC_IDR(uverbs_free_ah),
282 			    &UVERBS_METHOD(UVERBS_METHOD_AH_DESTROY));
283 
284 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
285 	UVERBS_METHOD_FLOW_DESTROY,
286 	UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_HANDLE,
287 			UVERBS_OBJECT_FLOW,
288 			UVERBS_ACCESS_DESTROY,
289 			UA_MANDATORY));
290 
291 DECLARE_UVERBS_NAMED_OBJECT(
292 	UVERBS_OBJECT_FLOW,
293 	UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object),
294 				 uverbs_free_flow),
295 			    &UVERBS_METHOD(UVERBS_METHOD_FLOW_DESTROY));
296 
297 DECLARE_UVERBS_NAMED_OBJECT(
298 	UVERBS_OBJECT_WQ,
299 	UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq));
300 
301 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
302 	UVERBS_METHOD_RWQ_IND_TBL_DESTROY,
303 	UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_RWQ_IND_TBL_HANDLE,
304 			UVERBS_OBJECT_RWQ_IND_TBL,
305 			UVERBS_ACCESS_DESTROY,
306 			UA_MANDATORY));
307 
308 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL,
309 			    UVERBS_TYPE_ALLOC_IDR(uverbs_free_rwq_ind_tbl),
310 			    &UVERBS_METHOD(UVERBS_METHOD_RWQ_IND_TBL_DESTROY));
311 
312 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
313 	UVERBS_METHOD_XRCD_DESTROY,
314 	UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_XRCD_HANDLE,
315 			UVERBS_OBJECT_XRCD,
316 			UVERBS_ACCESS_DESTROY,
317 			UA_MANDATORY));
318 
319 DECLARE_UVERBS_NAMED_OBJECT(
320 	UVERBS_OBJECT_XRCD,
321 	UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object),
322 				 uverbs_free_xrcd),
323 			    &UVERBS_METHOD(UVERBS_METHOD_XRCD_DESTROY));
324 
325 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
326 	UVERBS_METHOD_PD_DESTROY,
327 	UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_PD_HANDLE,
328 			UVERBS_OBJECT_PD,
329 			UVERBS_ACCESS_DESTROY,
330 			UA_MANDATORY));
331 
332 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_PD,
333 			    UVERBS_TYPE_ALLOC_IDR(uverbs_free_pd),
334 			    &UVERBS_METHOD(UVERBS_METHOD_PD_DESTROY));
335 
336 const struct uapi_definition uverbs_def_obj_intf[] = {
337 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_PD,
338 				      UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
339 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_COMP_CHANNEL,
340 				      UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
341 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP,
342 				      UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)),
343 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_AH,
344 				      UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)),
345 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MW,
346 				      UAPI_DEF_OBJ_NEEDS_FN(dealloc_mw)),
347 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_SRQ,
348 				      UAPI_DEF_OBJ_NEEDS_FN(destroy_srq)),
349 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_FLOW,
350 				      UAPI_DEF_OBJ_NEEDS_FN(destroy_flow)),
351 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_WQ,
352 				      UAPI_DEF_OBJ_NEEDS_FN(destroy_wq)),
353 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
354 		UVERBS_OBJECT_RWQ_IND_TBL,
355 		UAPI_DEF_OBJ_NEEDS_FN(destroy_rwq_ind_table)),
356 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_XRCD,
357 				      UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)),
358 	{}
359 };
360