xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_qp.c (revision 95ee2897)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28 
29 #include <linux/gfp.h>
30 #include <dev/mlx5/qp.h>
31 #include <dev/mlx5/driver.h>
32 #include <dev/mlx5/mlx5_core/mlx5_core.h>
33 #include <dev/mlx5/mlx5_core/transobj.h>
34 
mlx5_get_rsc(struct mlx5_core_dev * dev,u32 rsn)35 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
36 						 u32 rsn)
37 {
38 	struct mlx5_qp_table *table = &dev->priv.qp_table;
39 	struct mlx5_core_rsc_common *common;
40 
41 	spin_lock(&table->lock);
42 
43 	common = radix_tree_lookup(&table->tree, rsn);
44 	if (common)
45 		atomic_inc(&common->refcount);
46 
47 	spin_unlock(&table->lock);
48 
49 	if (!common) {
50 		mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
51 			       rsn);
52 		return NULL;
53 	}
54 	return common;
55 }
56 
mlx5_core_put_rsc(struct mlx5_core_rsc_common * common)57 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
58 {
59 	if (atomic_dec_and_test(&common->refcount))
60 		complete(&common->free);
61 }
62 
mlx5_rsc_event(struct mlx5_core_dev * dev,u32 rsn,int event_type)63 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
64 {
65 	struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
66 	struct mlx5_core_qp *qp;
67 
68 	if (!common)
69 		return;
70 
71 	switch (common->res) {
72 	case MLX5_RES_QP:
73 		qp = (struct mlx5_core_qp *)common;
74 		qp->event(qp, event_type);
75 		break;
76 
77 	default:
78 		mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
79 	}
80 
81 	mlx5_core_put_rsc(common);
82 }
83 
create_qprqsq_common(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,int rsc_type)84 static int create_qprqsq_common(struct mlx5_core_dev *dev,
85 				struct mlx5_core_qp *qp, int rsc_type)
86 {
87 	struct mlx5_qp_table *table = &dev->priv.qp_table;
88 	int err;
89 
90 	qp->common.res = rsc_type;
91 
92 	spin_lock_irq(&table->lock);
93 	err = radix_tree_insert(&table->tree, qp->qpn | (rsc_type << 24), qp);
94 	spin_unlock_irq(&table->lock);
95 	if (err)
96 		return err;
97 
98 	atomic_set(&qp->common.refcount, 1);
99 	init_completion(&qp->common.free);
100 	qp->pid = curthread->td_proc->p_pid;
101 
102 	return 0;
103 }
104 
destroy_qprqsq_common(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,int rsc_type)105 static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
106 				  struct mlx5_core_qp *qp, int rsc_type)
107 {
108 	struct mlx5_qp_table *table = &dev->priv.qp_table;
109 	unsigned long flags;
110 
111 	spin_lock_irqsave(&table->lock, flags);
112 	radix_tree_delete(&table->tree, qp->qpn | (rsc_type << 24));
113 	spin_unlock_irqrestore(&table->lock, flags);
114 
115 	mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
116 	wait_for_completion(&qp->common.free);
117 }
118 
mlx5_core_create_qp(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,u32 * in,int inlen)119 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
120 			struct mlx5_core_qp *qp,
121 			u32 *in, int inlen)
122 {
123 	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
124 	u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
125 	u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
126 	int err;
127 
128 	MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
129 
130 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
131 	if (err)
132 		return err;
133 
134 	qp->uid = MLX5_GET(create_qp_in, in, uid);
135 	qp->qpn = MLX5_GET(create_qp_out, out, qpn);
136 	mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
137 
138 	err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
139 	if (err)
140 		goto err_cmd;
141 
142 	atomic_inc(&dev->num_qps);
143 
144 	return 0;
145 
146 err_cmd:
147 	MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
148 	MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
149 	MLX5_SET(destroy_qp_in, din, uid, qp->uid);
150 	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
151 	return err;
152 }
153 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
154 
mlx5_core_destroy_qp(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)155 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
156 			 struct mlx5_core_qp *qp)
157 {
158 	u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
159 	u32 in[MLX5_ST_SZ_DW(destroy_qp_in)]   = {0};
160 	int err;
161 
162 
163 	destroy_qprqsq_common(dev, qp, MLX5_RES_QP);
164 
165 	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
166 	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
167 	MLX5_SET(destroy_qp_in, in, uid, qp->uid);
168 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
169 	if (err)
170 		return err;
171 
172 	atomic_dec(&dev->num_qps);
173 	return 0;
174 }
175 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
176 
177 struct mbox_info {
178 	u32 *in;
179 	u32 *out;
180 	int inlen;
181 	int outlen;
182 };
183 
mbox_alloc(struct mbox_info * mbox,int inlen,int outlen)184 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
185 {
186 	mbox->inlen  = inlen;
187 	mbox->outlen = outlen;
188 	mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
189 	mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
190 	if (!mbox->in || !mbox->out) {
191 		kfree(mbox->in);
192 		kfree(mbox->out);
193 		return -ENOMEM;
194 	}
195 
196 	return 0;
197 }
198 
mbox_free(struct mbox_info * mbox)199 static void mbox_free(struct mbox_info *mbox)
200 {
201 	kfree(mbox->in);
202 	kfree(mbox->out);
203 }
204 
modify_qp_mbox_alloc(struct mlx5_core_dev * dev,u16 opcode,int qpn,u32 opt_param_mask,void * qpc,struct mbox_info * mbox,u16 uid)205 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
206 				u32 opt_param_mask, void *qpc,
207 				struct mbox_info *mbox, u16 uid)
208 {
209 	mbox->out = NULL;
210 	mbox->in = NULL;
211 
212 #define MBOX_ALLOC(mbox, typ)  \
213 	mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
214 
215 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid)                            \
216 	do {                                                                   \
217 		MLX5_SET(typ##_in, in, opcode, _opcode);                       \
218 		MLX5_SET(typ##_in, in, qpn, _qpn);                             \
219 		MLX5_SET(typ##_in, in, uid, _uid);                             \
220 	} while (0)
221 
222 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid)          \
223 	do {                                                                   \
224 		MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid);                   \
225 		MLX5_SET(typ##_in, in, opt_param_mask, _opt_p);                \
226 		memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc,                  \
227 		       MLX5_ST_SZ_BYTES(qpc));                                 \
228 	} while (0)
229 
230 	switch (opcode) {
231 	/* 2RST & 2ERR */
232 	case MLX5_CMD_OP_2RST_QP:
233 		if (MBOX_ALLOC(mbox, qp_2rst))
234 			return -ENOMEM;
235 		MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
236 		break;
237 	case MLX5_CMD_OP_2ERR_QP:
238 		if (MBOX_ALLOC(mbox, qp_2err))
239 			return -ENOMEM;
240 		MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
241 		break;
242 
243 	/* MODIFY with QPC */
244 	case MLX5_CMD_OP_RST2INIT_QP:
245 		if (MBOX_ALLOC(mbox, rst2init_qp))
246 			return -ENOMEM;
247 		MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
248 				  opt_param_mask, qpc, uid);
249 		break;
250 	case MLX5_CMD_OP_INIT2RTR_QP:
251 		if (MBOX_ALLOC(mbox, init2rtr_qp))
252 			return -ENOMEM;
253 		MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
254 				  opt_param_mask, qpc, uid);
255 		break;
256 	case MLX5_CMD_OP_RTR2RTS_QP:
257 		if (MBOX_ALLOC(mbox, rtr2rts_qp))
258 			return -ENOMEM;
259 		MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
260 				  opt_param_mask, qpc, uid);
261 		break;
262 	case MLX5_CMD_OP_RTS2RTS_QP:
263 		if (MBOX_ALLOC(mbox, rts2rts_qp))
264 			return -ENOMEM;
265 		MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
266 				  opt_param_mask, qpc, uid);
267 		break;
268 	case MLX5_CMD_OP_SQERR2RTS_QP:
269 		if (MBOX_ALLOC(mbox, sqerr2rts_qp))
270 			return -ENOMEM;
271 		MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
272 				  opt_param_mask, qpc, uid);
273 		break;
274 	case MLX5_CMD_OP_INIT2INIT_QP:
275 		if (MBOX_ALLOC(mbox, init2init_qp))
276 			return -ENOMEM;
277 		MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
278 				  opt_param_mask, qpc, uid);
279 		break;
280 	default:
281 		mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
282 			opcode, qpn);
283 		return -EINVAL;
284 	}
285 
286 	return 0;
287 }
288 
289 
mlx5_core_qp_modify(struct mlx5_core_dev * dev,u16 opcode,u32 opt_param_mask,void * qpc,struct mlx5_core_qp * qp)290 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
291 			u32 opt_param_mask, void *qpc,
292 			struct mlx5_core_qp *qp)
293 {
294 	struct mbox_info mbox;
295 	int err;
296 
297 	err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
298 				   opt_param_mask, qpc, &mbox, qp->uid);
299 	if (err)
300 		return err;
301 
302 	err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
303 	mbox_free(&mbox);
304 	return err;
305 }
306 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
307 
mlx5_init_qp_table(struct mlx5_core_dev * dev)308 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
309 {
310 	struct mlx5_qp_table *table = &dev->priv.qp_table;
311 
312 	memset(table, 0, sizeof(*table));
313 	spin_lock_init(&table->lock);
314 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
315 }
316 
mlx5_cleanup_qp_table(struct mlx5_core_dev * dev)317 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
318 {
319 }
320 
mlx5_core_qp_query(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,u32 * out,int outlen)321 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
322 		       u32 *out, int outlen)
323 {
324 	u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
325 
326 	MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
327 	MLX5_SET(query_qp_in, in, qpn, qp->qpn);
328 
329 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
330 }
331 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
332 
mlx5_core_xrcd_alloc(struct mlx5_core_dev * dev,u32 * xrcdn)333 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
334 {
335 	u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
336 	u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
337 	int err;
338 
339 	MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
340 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
341 	if (!err)
342 		*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
343 	return err;
344 }
345 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
346 
mlx5_core_xrcd_dealloc(struct mlx5_core_dev * dev,u32 xrcdn)347 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
348 {
349 	u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
350 	u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
351 
352 	MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
353 	MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
354 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
355 }
356 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
357 
mlx5_core_create_dct(struct mlx5_core_dev * dev,struct mlx5_core_dct * dct,u32 * in,int inlen,u32 * out,int outlen)358 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
359 			 struct mlx5_core_dct *dct,
360 			 u32 *in, int inlen,
361 			 u32 *out, int outlen)
362 {
363 	struct mlx5_qp_table *table = &dev->priv.qp_table;
364 	u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
365 	u32 din[MLX5_ST_SZ_DW(destroy_dct_in)]	 = {0};
366 	int err;
367 
368 	init_completion(&dct->drained);
369 	MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
370 
371 	err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
372 	if (err) {
373 		mlx5_core_warn(dev, "create DCT failed, ret %d", err);
374 		return err;
375 	}
376 
377 	dct->dctn = MLX5_GET(create_dct_out, out, dctn);
378 	dct->uid = MLX5_GET(create_dct_in, in, uid);
379 
380 	dct->common.res = MLX5_RES_DCT;
381 	spin_lock_irq(&table->lock);
382 	err = radix_tree_insert(&table->tree, dct->dctn, dct);
383 	spin_unlock_irq(&table->lock);
384 	if (err) {
385 		mlx5_core_warn(dev, "err %d", err);
386 		goto err_cmd;
387 	}
388 
389 	dct->pid = curthread->td_proc->p_pid;
390 	atomic_set(&dct->common.refcount, 1);
391 	init_completion(&dct->common.free);
392 
393 	return 0;
394 
395 err_cmd:
396 	MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
397 	MLX5_SET(destroy_dct_in, din, dctn, dct->dctn);
398 	MLX5_SET(destroy_dct_in, din, uid, dct->uid);
399 	mlx5_cmd_exec(dev, &din, sizeof(din), dout, sizeof(dout));
400 
401 	return err;
402 }
403 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
404 
mlx5_core_drain_dct(struct mlx5_core_dev * dev,struct mlx5_core_dct * dct)405 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
406 			       struct mlx5_core_dct *dct)
407 {
408 	u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
409 	u32 in[MLX5_ST_SZ_DW(drain_dct_in)]   = {0};
410 
411 	MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
412 	MLX5_SET(drain_dct_in, in, dctn, dct->dctn);
413 	MLX5_SET(drain_dct_in, in, uid, dct->uid);
414 	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
415 			     (void *)&out, sizeof(out));
416 }
417 
mlx5_core_destroy_dct(struct mlx5_core_dev * dev,struct mlx5_core_dct * dct)418 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
419 			  struct mlx5_core_dct *dct)
420 {
421 	struct mlx5_qp_table *table = &dev->priv.qp_table;
422 	u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
423 	u32 in[MLX5_ST_SZ_DW(destroy_dct_in)]	= {0};
424 	unsigned long flags;
425 	int err;
426 
427 	err = mlx5_core_drain_dct(dev, dct);
428 	if (err) {
429 		if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
430 			goto free_dct;
431 		} else {
432 			mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
433 			return err;
434 		}
435 	}
436 
437 	wait_for_completion(&dct->drained);
438 
439 free_dct:
440 	spin_lock_irqsave(&table->lock, flags);
441 	if (radix_tree_delete(&table->tree, dct->dctn) != dct)
442 		mlx5_core_warn(dev, "dct delete differs\n");
443 	spin_unlock_irqrestore(&table->lock, flags);
444 
445 	if (atomic_dec_and_test(&dct->common.refcount))
446 		complete(&dct->common.free);
447 	wait_for_completion(&dct->common.free);
448 
449 	MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
450 	MLX5_SET(destroy_dct_in, in, dctn, dct->dctn);
451 	MLX5_SET(destroy_dct_in, in, uid, dct->uid);
452 
453 	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
454 			     (void *)&out, sizeof(out));
455 }
456 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
457 
mlx5_core_dct_query(struct mlx5_core_dev * dev,struct mlx5_core_dct * dct,u32 * out,int outlen)458 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
459 			u32 *out, int outlen)
460 {
461 	u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
462 
463 	MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
464 	MLX5_SET(query_dct_in, in, dctn, dct->dctn);
465 
466 	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
467 			     (void *)out, outlen);
468 }
469 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
470 
mlx5_core_arm_dct(struct mlx5_core_dev * dev,struct mlx5_core_dct * dct)471 int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct)
472 {
473 	u32 out[MLX5_ST_SZ_DW(arm_dct_out)] = {0};
474 	u32 in[MLX5_ST_SZ_DW(arm_dct_in)]   = {0};
475 
476 	MLX5_SET(arm_dct_in, in, opcode, MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
477 	MLX5_SET(arm_dct_in, in, dctn, dct->dctn);
478 
479 	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
480 			     (void *)&out, sizeof(out));
481 }
482 EXPORT_SYMBOL_GPL(mlx5_core_arm_dct);
483 
destroy_rq_tracked(struct mlx5_core_dev * dev,u32 rqn,u16 uid)484 static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
485 {
486 	u32 in[MLX5_ST_SZ_DW(destroy_rq_in)]   = {};
487 	u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
488 
489 	MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
490 	MLX5_SET(destroy_rq_in, in, rqn, rqn);
491 	MLX5_SET(destroy_rq_in, in, uid, uid);
492 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
493 }
494 
mlx5_core_create_rq_tracked(struct mlx5_core_dev * dev,u32 * in,int inlen,struct mlx5_core_qp * rq)495 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
496 				struct mlx5_core_qp *rq)
497 {
498 	int err;
499 
500 	err = mlx5_core_create_rq(dev, in, inlen, &rq->qpn);
501 	if (err)
502 		return err;
503 
504 	rq->uid = MLX5_GET(create_rq_in, in, uid);
505 
506 	err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
507 	if (err)
508 		destroy_rq_tracked(dev, rq->qpn, rq->uid);
509 
510 	return err;
511 }
512 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
513 
mlx5_core_destroy_rq_tracked(struct mlx5_core_dev * dev,struct mlx5_core_qp * rq)514 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
515 				  struct mlx5_core_qp *rq)
516 {
517 	destroy_qprqsq_common(dev, rq, MLX5_RES_RQ);
518 	destroy_rq_tracked(dev, rq->qpn, rq->uid);
519 }
520 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
521 
destroy_sq_tracked(struct mlx5_core_dev * dev,u32 sqn,u16 uid)522 static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
523 {
524 	u32 in[MLX5_ST_SZ_DW(destroy_sq_in)]   = {};
525 	u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
526 
527 	MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
528 	MLX5_SET(destroy_sq_in, in, sqn, sqn);
529 	MLX5_SET(destroy_sq_in, in, uid, uid);
530 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
531 }
532 
mlx5_core_create_sq_tracked(struct mlx5_core_dev * dev,u32 * in,int inlen,struct mlx5_core_qp * sq)533 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
534 				struct mlx5_core_qp *sq)
535 {
536 	int err;
537 
538 	err = mlx5_core_create_sq(dev, in, inlen, &sq->qpn);
539 	if (err)
540 		return err;
541 
542 	sq->uid = MLX5_GET(create_sq_in, in, uid);
543 
544 	err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
545 	if (err)
546 		destroy_sq_tracked(dev, sq->qpn, sq->uid);
547 
548 	return err;
549 }
550 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
551 
mlx5_core_destroy_sq_tracked(struct mlx5_core_dev * dev,struct mlx5_core_qp * sq)552 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
553 				  struct mlx5_core_qp *sq)
554 {
555 	destroy_qprqsq_common(dev, sq, MLX5_RES_SQ);
556 	destroy_sq_tracked(dev, sq->qpn, sq->uid);
557 }
558 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
559