xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_qp.c (revision c697fb7f)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 
29 #include <linux/gfp.h>
30 #include <dev/mlx5/qp.h>
31 #include <dev/mlx5/driver.h>
32 
33 #include "mlx5_core.h"
34 
35 #include "transobj.h"
36 
37 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
38 						 u32 rsn)
39 {
40 	struct mlx5_qp_table *table = &dev->priv.qp_table;
41 	struct mlx5_core_rsc_common *common;
42 
43 	spin_lock(&table->lock);
44 
45 	common = radix_tree_lookup(&table->tree, rsn);
46 	if (common)
47 		atomic_inc(&common->refcount);
48 
49 	spin_unlock(&table->lock);
50 
51 	if (!common) {
52 		mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
53 			       rsn);
54 		return NULL;
55 	}
56 	return common;
57 }
58 
59 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
60 {
61 	if (atomic_dec_and_test(&common->refcount))
62 		complete(&common->free);
63 }
64 
65 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
66 {
67 	struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
68 	struct mlx5_core_qp *qp;
69 
70 	if (!common)
71 		return;
72 
73 	switch (common->res) {
74 	case MLX5_RES_QP:
75 		qp = (struct mlx5_core_qp *)common;
76 		qp->event(qp, event_type);
77 		break;
78 
79 	default:
80 		mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
81 	}
82 
83 	mlx5_core_put_rsc(common);
84 }
85 
86 static int create_qprqsq_common(struct mlx5_core_dev *dev,
87 				struct mlx5_core_qp *qp, int rsc_type)
88 {
89 	struct mlx5_qp_table *table = &dev->priv.qp_table;
90 	int err;
91 
92 	qp->common.res = rsc_type;
93 
94 	spin_lock_irq(&table->lock);
95 	err = radix_tree_insert(&table->tree, qp->qpn | (rsc_type << 24), qp);
96 	spin_unlock_irq(&table->lock);
97 	if (err)
98 		return err;
99 
100 	atomic_set(&qp->common.refcount, 1);
101 	init_completion(&qp->common.free);
102 	qp->pid = curthread->td_proc->p_pid;
103 
104 	return 0;
105 }
106 
107 static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
108 				  struct mlx5_core_qp *qp, int rsc_type)
109 {
110 	struct mlx5_qp_table *table = &dev->priv.qp_table;
111 	unsigned long flags;
112 
113 	spin_lock_irqsave(&table->lock, flags);
114 	radix_tree_delete(&table->tree, qp->qpn | (rsc_type << 24));
115 	spin_unlock_irqrestore(&table->lock, flags);
116 
117 	mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
118 	wait_for_completion(&qp->common.free);
119 }
120 
121 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
122 			struct mlx5_core_qp *qp,
123 			u32 *in, int inlen)
124 {
125 	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
126 	u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
127 	u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
128 	int err;
129 
130 	MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
131 
132 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
133 	if (err)
134 		return err;
135 
136 	qp->qpn = MLX5_GET(create_qp_out, out, qpn);
137 	mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
138 
139 	err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
140 	if (err)
141 		goto err_cmd;
142 
143 	atomic_inc(&dev->num_qps);
144 
145 	return 0;
146 
147 err_cmd:
148 	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
149 	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
150 	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
151 	return err;
152 }
153 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
154 
155 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
156 			 struct mlx5_core_qp *qp)
157 {
158 	u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
159 	u32 in[MLX5_ST_SZ_DW(destroy_qp_in)]   = {0};
160 	int err;
161 
162 
163 	destroy_qprqsq_common(dev, qp, MLX5_RES_QP);
164 
165 	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
166 	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
167 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
168 	if (err)
169 		return err;
170 
171 	atomic_dec(&dev->num_qps);
172 	return 0;
173 }
174 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
175 
176 struct mbox_info {
177 	u32 *in;
178 	u32 *out;
179 	int inlen;
180 	int outlen;
181 };
182 
183 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
184 {
185 	mbox->inlen  = inlen;
186 	mbox->outlen = outlen;
187 	mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
188 	mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
189 	if (!mbox->in || !mbox->out) {
190 		kfree(mbox->in);
191 		kfree(mbox->out);
192 		return -ENOMEM;
193 	}
194 
195 	return 0;
196 }
197 
198 static void mbox_free(struct mbox_info *mbox)
199 {
200 	kfree(mbox->in);
201 	kfree(mbox->out);
202 }
203 
204 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
205 				u32 opt_param_mask, void *qpc,
206 				struct mbox_info *mbox)
207 {
208 	mbox->out = NULL;
209 	mbox->in = NULL;
210 
211 	#define MBOX_ALLOC(mbox, typ)  \
212 		mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
213 
214 	#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
215 		MLX5_SET(typ##_in, in, opcode, _opcode); \
216 		MLX5_SET(typ##_in, in, qpn, _qpn)
217 	#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
218 		MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
219 		MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
220 		memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
221 
222 	switch (opcode) {
223 	/* 2RST & 2ERR */
224 	case MLX5_CMD_OP_2RST_QP:
225 		if (MBOX_ALLOC(mbox, qp_2rst))
226 			return -ENOMEM;
227 		MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
228 		break;
229 	case MLX5_CMD_OP_2ERR_QP:
230 		if (MBOX_ALLOC(mbox, qp_2err))
231 			return -ENOMEM;
232 		MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
233 		break;
234 
235 	/* MODIFY with QPC */
236 	case MLX5_CMD_OP_RST2INIT_QP:
237 		if (MBOX_ALLOC(mbox, rst2init_qp))
238 			return -ENOMEM;
239 		MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
240 				  opt_param_mask, qpc);
241 		break;
242 	case MLX5_CMD_OP_INIT2RTR_QP:
243 		if (MBOX_ALLOC(mbox, init2rtr_qp))
244 			return -ENOMEM;
245 		MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
246 				  opt_param_mask, qpc);
247 		break;
248 	case MLX5_CMD_OP_RTR2RTS_QP:
249 		if (MBOX_ALLOC(mbox, rtr2rts_qp))
250 			return -ENOMEM;
251 		MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
252 				  opt_param_mask, qpc);
253 		break;
254 	case MLX5_CMD_OP_RTS2RTS_QP:
255 		if (MBOX_ALLOC(mbox, rts2rts_qp))
256 			return -ENOMEM;
257 		MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
258 				  opt_param_mask, qpc);
259 		break;
260 	case MLX5_CMD_OP_SQERR2RTS_QP:
261 		if (MBOX_ALLOC(mbox, sqerr2rts_qp))
262 			return -ENOMEM;
263 		MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
264 				  opt_param_mask, qpc);
265 		break;
266 	case MLX5_CMD_OP_INIT2INIT_QP:
267 		if (MBOX_ALLOC(mbox, init2init_qp))
268 			return -ENOMEM;
269 		MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
270 				  opt_param_mask, qpc);
271 		break;
272 	default:
273 		mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
274 			opcode, qpn);
275 		return -EINVAL;
276 	}
277 
278 	return 0;
279 }
280 
281 
282 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
283 			u32 opt_param_mask, void *qpc,
284 			struct mlx5_core_qp *qp)
285 {
286 	struct mbox_info mbox;
287 	int err;
288 
289 	err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
290 				   opt_param_mask, qpc, &mbox);
291 	if (err)
292 		return err;
293 
294 	err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
295 	mbox_free(&mbox);
296 	return err;
297 }
298 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
299 
300 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
301 {
302 	struct mlx5_qp_table *table = &dev->priv.qp_table;
303 
304 	memset(table, 0, sizeof(*table));
305 	spin_lock_init(&table->lock);
306 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
307 }
308 
309 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
310 {
311 }
312 
313 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
314 		       u32 *out, int outlen)
315 {
316 	u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
317 
318 	MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
319 	MLX5_SET(query_qp_in, in, qpn, qp->qpn);
320 
321 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
322 }
323 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
324 
325 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
326 {
327 	u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
328 	u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
329 	int err;
330 
331 	MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
332 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
333 	if (!err)
334 		*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
335 	return err;
336 }
337 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
338 
339 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
340 {
341 	u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
342 	u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
343 
344 	MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
345 	MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
346 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
347 }
348 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
349 
350 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
351 			 struct mlx5_core_dct *dct,
352 			 u32 *in)
353 {
354 	struct mlx5_qp_table *table = &dev->priv.qp_table;
355 	u32 out[MLX5_ST_SZ_DW(create_dct_out)]	 = {0};
356 	u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
357 	u32 din[MLX5_ST_SZ_DW(destroy_dct_in)]	 = {0};
358 	int inlen = MLX5_ST_SZ_BYTES(create_dct_in);
359 	int err;
360 
361 	init_completion(&dct->drained);
362 	MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
363 
364 	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
365 	if (err) {
366 		mlx5_core_warn(dev, "create DCT failed, ret %d", err);
367 		return err;
368 	}
369 
370 	dct->dctn = MLX5_GET(create_dct_out, out, dctn);
371 
372 	dct->common.res = MLX5_RES_DCT;
373 	spin_lock_irq(&table->lock);
374 	err = radix_tree_insert(&table->tree, dct->dctn, dct);
375 	spin_unlock_irq(&table->lock);
376 	if (err) {
377 		mlx5_core_warn(dev, "err %d", err);
378 		goto err_cmd;
379 	}
380 
381 	dct->pid = curthread->td_proc->p_pid;
382 	atomic_set(&dct->common.refcount, 1);
383 	init_completion(&dct->common.free);
384 
385 	return 0;
386 
387 err_cmd:
388 	MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
389 	MLX5_SET(destroy_dct_in, din, dctn, dct->dctn);
390 	mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
391 
392 	return err;
393 }
394 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
395 
396 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
397 			       struct mlx5_core_dct *dct)
398 {
399 	u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
400 	u32 in[MLX5_ST_SZ_DW(drain_dct_in)]   = {0};
401 
402 	MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
403 	MLX5_SET(drain_dct_in, in, dctn, dct->dctn);
404 	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
405 			     (void *)&out, sizeof(out));
406 }
407 
408 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
409 			  struct mlx5_core_dct *dct)
410 {
411 	struct mlx5_qp_table *table = &dev->priv.qp_table;
412 	u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
413 	u32 in[MLX5_ST_SZ_DW(destroy_dct_in)]	= {0};
414 	unsigned long flags;
415 	int err;
416 
417 	err = mlx5_core_drain_dct(dev, dct);
418 	if (err) {
419 		if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
420 			goto free_dct;
421 		} else {
422 			mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
423 			return err;
424 		}
425 	}
426 
427 	wait_for_completion(&dct->drained);
428 
429 free_dct:
430 	spin_lock_irqsave(&table->lock, flags);
431 	if (radix_tree_delete(&table->tree, dct->dctn) != dct)
432 		mlx5_core_warn(dev, "dct delete differs\n");
433 	spin_unlock_irqrestore(&table->lock, flags);
434 
435 	if (atomic_dec_and_test(&dct->common.refcount))
436 		complete(&dct->common.free);
437 	wait_for_completion(&dct->common.free);
438 
439 	MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
440 	MLX5_SET(destroy_dct_in, in, dctn, dct->dctn);
441 
442 	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
443 			     (void *)&out, sizeof(out));
444 }
445 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
446 
447 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
448 			u32 *out, int outlen)
449 {
450 	u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
451 
452 	MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
453 	MLX5_SET(query_dct_in, in, dctn, dct->dctn);
454 
455 	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
456 			     (void *)out, outlen);
457 }
458 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
459 
460 int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct)
461 {
462 	u32 out[MLX5_ST_SZ_DW(arm_dct_out)] = {0};
463 	u32 in[MLX5_ST_SZ_DW(arm_dct_in)]   = {0};
464 
465 	MLX5_SET(arm_dct_in, in, opcode, MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
466 	MLX5_SET(arm_dct_in, in, dctn, dct->dctn);
467 
468 	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
469 			     (void *)&out, sizeof(out));
470 }
471 EXPORT_SYMBOL_GPL(mlx5_core_arm_dct);
472 
473 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
474 				struct mlx5_core_qp *rq)
475 {
476 	int err;
477 
478 	err = mlx5_core_create_rq(dev, in, inlen, &rq->qpn);
479 	if (err)
480 		return err;
481 
482 	err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
483 	if (err)
484 		mlx5_core_destroy_rq(dev, rq->qpn);
485 
486 	return err;
487 }
488 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
489 
490 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
491 				  struct mlx5_core_qp *rq)
492 {
493 	destroy_qprqsq_common(dev, rq, MLX5_RES_RQ);
494 	mlx5_core_destroy_rq(dev, rq->qpn);
495 }
496 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
497 
498 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
499 				struct mlx5_core_qp *sq)
500 {
501 	int err;
502 
503 	err = mlx5_core_create_sq(dev, in, inlen, &sq->qpn);
504 	if (err)
505 		return err;
506 
507 	err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
508 	if (err)
509 		mlx5_core_destroy_sq(dev, sq->qpn);
510 
511 	return err;
512 }
513 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
514 
515 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
516 				  struct mlx5_core_qp *sq)
517 {
518 	destroy_qprqsq_common(dev, sq, MLX5_RES_SQ);
519 	mlx5_core_destroy_sq(dev, sq->qpn);
520 }
521 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
522