xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_srq.c (revision 0957b409)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <dev/mlx5/driver.h>
31 #include <dev/mlx5/srq.h>
32 #include <rdma/ib_verbs.h>
33 #include "mlx5_core.h"
34 #include "transobj.h"
35 
36 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
37 {
38 	struct mlx5_srq_table *table = &dev->priv.srq_table;
39 	struct mlx5_core_srq *srq;
40 
41 	spin_lock(&table->lock);
42 
43 	srq = radix_tree_lookup(&table->tree, srqn);
44 	if (srq)
45 		atomic_inc(&srq->refcount);
46 
47 	spin_unlock(&table->lock);
48 
49 	if (!srq) {
50 		mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
51 		return;
52 	}
53 
54 	srq->event(srq, event_type);
55 
56 	if (atomic_dec_and_test(&srq->refcount))
57 		complete(&srq->free);
58 }
59 
60 static void set_wq(void *wq, struct mlx5_srq_attr *in)
61 {
62 	MLX5_SET(wq,   wq, wq_signature,  !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG));
63 	MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
64 	MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
65 	MLX5_SET(wq,   wq, log_wq_sz,	  in->log_size);
66 	MLX5_SET(wq,   wq, page_offset,	  in->page_offset);
67 	MLX5_SET(wq,   wq, lwm,		  in->lwm);
68 	MLX5_SET(wq,   wq, pd,		  in->pd);
69 	MLX5_SET64(wq, wq, dbr_addr,	  in->db_record);
70 }
71 
72 static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
73 {
74 	MLX5_SET(srqc,	 srqc, wq_signature,  !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG));
75 	MLX5_SET(srqc,	 srqc, log_page_size, in->log_page_size);
76 	MLX5_SET(srqc,	 srqc, log_rq_stride, in->wqe_shift);
77 	MLX5_SET(srqc,	 srqc, log_srq_size,  in->log_size);
78 	MLX5_SET(srqc,	 srqc, page_offset,   in->page_offset);
79 	MLX5_SET(srqc,	 srqc, lwm,	      in->lwm);
80 	MLX5_SET(srqc,	 srqc, pd,	      in->pd);
81 	MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
82 	MLX5_SET(srqc,	 srqc, xrcd,	      in->xrcd);
83 	MLX5_SET(srqc,	 srqc, cqn,	      in->cqn);
84 }
85 
86 static void get_wq(void *wq, struct mlx5_srq_attr *in)
87 {
88 	if (MLX5_GET(wq, wq, wq_signature))
89 		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
90 	in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
91 	in->wqe_shift	  = MLX5_GET(wq,   wq, log_wq_stride) - 4;
92 	in->log_size	  = MLX5_GET(wq,   wq, log_wq_sz);
93 	in->page_offset	  = MLX5_GET(wq,   wq, page_offset);
94 	in->lwm		  = MLX5_GET(wq,   wq, lwm);
95 	in->pd		  = MLX5_GET(wq,   wq, pd);
96 	in->db_record	  = MLX5_GET64(wq, wq, dbr_addr);
97 }
98 
99 static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
100 {
101 	if (MLX5_GET(srqc, srqc, wq_signature))
102 		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
103 	in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
104 	in->wqe_shift	  = MLX5_GET(srqc,   srqc, log_rq_stride);
105 	in->log_size	  = MLX5_GET(srqc,   srqc, log_srq_size);
106 	in->page_offset	  = MLX5_GET(srqc,   srqc, page_offset);
107 	in->lwm		  = MLX5_GET(srqc,   srqc, lwm);
108 	in->pd		  = MLX5_GET(srqc,   srqc, pd);
109 	in->db_record	  = MLX5_GET64(srqc, srqc, dbr_addr);
110 }
111 
112 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
113 {
114 	struct mlx5_srq_table *table = &dev->priv.srq_table;
115 	struct mlx5_core_srq *srq;
116 
117 	spin_lock(&table->lock);
118 
119 	srq = radix_tree_lookup(&table->tree, srqn);
120 	if (srq)
121 		atomic_inc(&srq->refcount);
122 
123 	spin_unlock(&table->lock);
124 
125 	return srq;
126 }
127 EXPORT_SYMBOL(mlx5_core_get_srq);
128 
129 static int get_pas_size(struct mlx5_srq_attr *in)
130 {
131 	u32 log_page_size = in->log_page_size + 12;
132 	u32 log_srq_size  = in->log_size;
133 	u32 log_rq_stride = in->wqe_shift;
134 	u32 page_offset	  = in->page_offset;
135 	u32 po_quanta	  = 1 << (log_page_size - 6);
136 	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
137 	u32 page_size	  = 1 << log_page_size;
138 	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
139 	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;
140 
141 	return rq_num_pas * sizeof(u64);
142 
143 }
144 
145 static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
146 			  struct mlx5_srq_attr *in)
147 {
148 	void *create_in;
149 	void *rmpc;
150 	void *wq;
151 	int pas_size;
152 	int inlen;
153 	int err;
154 
155 	pas_size = get_pas_size(in);
156 	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
157 	create_in = mlx5_vzalloc(inlen);
158 	if (!create_in)
159 		return -ENOMEM;
160 
161 	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
162 	wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
163 
164 	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
165 	set_wq(wq, in);
166 	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
167 
168 	err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
169 
170 	kvfree(create_in);
171 	return err;
172 }
173 
174 static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
175 			    struct mlx5_core_srq *srq)
176 {
177 	return mlx5_core_destroy_rmp(dev, srq->srqn);
178 }
179 
180 static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
181 			 struct mlx5_srq_attr *out)
182 {
183 	u32 *rmp_out;
184 	void *rmpc;
185 	int err;
186 
187 	rmp_out =  mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
188 	if (!rmp_out)
189 		return -ENOMEM;
190 
191 	err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
192 	if (err)
193 		goto out;
194 
195 	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
196 	get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
197 	if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
198 		out->flags |= MLX5_SRQ_FLAG_ERR;
199 
200 out:
201 	kvfree(rmp_out);
202 	return 0;
203 }
204 
205 static int arm_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm)
206 {
207 	return mlx5_core_arm_rmp(dev, srq->srqn, lwm);
208 }
209 
210 static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
211 			      struct mlx5_core_srq *srq,
212 			      struct mlx5_srq_attr *in)
213 {
214 	void *create_in;
215 	void *xrc_srqc;
216 	void *pas;
217 	int pas_size;
218 	int inlen;
219 	int err;
220 
221 	pas_size  = get_pas_size(in);
222 	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
223 	create_in = mlx5_vzalloc(inlen);
224 	if (!create_in)
225 		return -ENOMEM;
226 
227 	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry);
228 	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
229 
230 	set_srqc(xrc_srqc, in);
231 	MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
232 	memcpy(pas, in->pas, pas_size);
233 
234 	err = mlx5_core_create_xsrq(dev, create_in, inlen, &srq->srqn);
235 	if (err)
236 		goto out;
237 
238 out:
239 	kvfree(create_in);
240 	return err;
241 }
242 
243 static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
244 			       struct mlx5_core_srq *srq)
245 {
246 	return mlx5_core_destroy_xsrq(dev, srq->srqn);
247 }
248 
249 static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
250 			     struct mlx5_core_srq *srq,
251 			     struct mlx5_srq_attr *out)
252 {
253 	u32 *xrcsrq_out;
254 	void *xrc_srqc;
255 	int err;
256 
257 	xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
258 	if (!xrcsrq_out)
259 		return -ENOMEM;
260 
261 	err = mlx5_core_query_xsrq(dev, srq->srqn, xrcsrq_out);
262 	if (err)
263 		goto out;
264 
265 	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
266 				xrc_srq_context_entry);
267 	get_srqc(xrc_srqc, out);
268 	if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
269 		out->flags |= MLX5_SRQ_FLAG_ERR;
270 
271 out:
272 	kvfree(xrcsrq_out);
273 	return err;
274 }
275 
276 static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
277 			   struct mlx5_core_srq *srq, u16 lwm)
278 {
279 	return mlx5_core_arm_xsrq(dev, srq->srqn, lwm);
280 }
281 
282 static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
283 			  struct mlx5_srq_attr *in)
284 {
285 	u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
286 	void *create_in;
287 	void *srqc;
288 	void *pas;
289 	int pas_size;
290 	int inlen;
291 	int err;
292 
293 	pas_size  = get_pas_size(in);
294 	inlen	  = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
295 	create_in = mlx5_vzalloc(inlen);
296 	if (!create_in)
297 		return -ENOMEM;
298 
299 	srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
300 	pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
301 
302 	set_srqc(srqc, in);
303 	memcpy(pas, in->pas, pas_size);
304 
305 	MLX5_SET(create_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_SRQ);
306 	err = mlx5_cmd_exec(dev, create_in, inlen, create_out, sizeof(create_out));
307 	kvfree(create_in);
308 	if (!err)
309 		srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
310 
311 	return err;
312 }
313 
314 static int destroy_srq_cmd(struct mlx5_core_dev *dev,
315 			   struct mlx5_core_srq *srq)
316 {
317 	u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
318 	u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
319 
320 	MLX5_SET(destroy_srq_in, srq_in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
321 	MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
322 
323 	return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out));
324 }
325 
326 static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
327 			 struct mlx5_srq_attr *out)
328 {
329 	u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
330 	u32 *srq_out;
331 	void *srqc;
332 	int outlen = MLX5_ST_SZ_BYTES(query_srq_out);
333 	int err;
334 
335 	srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
336 	if (!srq_out)
337 		return -ENOMEM;
338 
339 	MLX5_SET(query_srq_in, srq_in, opcode, MLX5_CMD_OP_QUERY_SRQ);
340 	MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
341 	err =  mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, outlen);
342 	if (err)
343 		goto out;
344 
345 	srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
346 	get_srqc(srqc, out);
347 	if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
348 		out->flags |= MLX5_SRQ_FLAG_ERR;
349 out:
350 	kvfree(srq_out);
351 	return err;
352 }
353 
354 static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
355 		       u16 lwm, int is_srq)
356 {
357 	/* arm_srq structs missing using identical xrc ones */
358 	u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
359 	u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
360 
361 	MLX5_SET(arm_xrc_srq_in, srq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
362 	MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
363 	MLX5_SET(arm_xrc_srq_in, srq_in, lwm,	   lwm);
364 
365 	return	mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out));
366 }
367 
368 static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
369 			    struct mlx5_srq_attr *in)
370 {
371 	if (!dev->issi)
372 		return create_srq_cmd(dev, srq, in);
373 	else if (srq->common.res == MLX5_RES_XSRQ)
374 		return create_xrc_srq_cmd(dev, srq, in);
375 	else
376 		return create_rmp_cmd(dev, srq, in);
377 }
378 
379 static int destroy_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
380 {
381 	if (!dev->issi)
382 		return destroy_srq_cmd(dev, srq);
383 	else if (srq->common.res == MLX5_RES_XSRQ)
384 		return destroy_xrc_srq_cmd(dev, srq);
385 	else
386 		return destroy_rmp_cmd(dev, srq);
387 }
388 
389 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
390 			 struct mlx5_srq_attr *in)
391 {
392 	int err;
393 	struct mlx5_srq_table *table = &dev->priv.srq_table;
394 
395 	if (in->type == IB_SRQT_XRC)
396 		srq->common.res = MLX5_RES_XSRQ;
397 	else
398 		srq->common.res = MLX5_RES_SRQ;
399 
400 	err = create_srq_split(dev, srq, in);
401 	if (err)
402 		return err;
403 
404 	atomic_set(&srq->refcount, 1);
405 	init_completion(&srq->free);
406 
407 	spin_lock_irq(&table->lock);
408 	err = radix_tree_insert(&table->tree, srq->srqn, srq);
409 	spin_unlock_irq(&table->lock);
410 	if (err) {
411 		mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
412 		goto err_destroy_srq_split;
413 	}
414 
415 	return 0;
416 
417 err_destroy_srq_split:
418 	destroy_srq_split(dev, srq);
419 
420 	return err;
421 }
422 EXPORT_SYMBOL(mlx5_core_create_srq);
423 
424 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
425 {
426 	struct mlx5_srq_table *table = &dev->priv.srq_table;
427 	struct mlx5_core_srq *tmp;
428 	int err;
429 
430 	spin_lock_irq(&table->lock);
431 	tmp = radix_tree_delete(&table->tree, srq->srqn);
432 	spin_unlock_irq(&table->lock);
433 	if (!tmp) {
434 		mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
435 		return -EINVAL;
436 	}
437 	if (tmp != srq) {
438 		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
439 		return -EINVAL;
440 	}
441 
442 	err = destroy_srq_split(dev, srq);
443 	if (err)
444 		return err;
445 
446 	if (atomic_dec_and_test(&srq->refcount))
447 		complete(&srq->free);
448 	wait_for_completion(&srq->free);
449 
450 	return 0;
451 }
452 EXPORT_SYMBOL(mlx5_core_destroy_srq);
453 
454 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
455 			struct mlx5_srq_attr *out)
456 {
457 	if (!dev->issi)
458 		return query_srq_cmd(dev, srq, out);
459 	else if (srq->common.res == MLX5_RES_XSRQ)
460 		return query_xrc_srq_cmd(dev, srq, out);
461 	else
462 		return query_rmp_cmd(dev, srq, out);
463 }
464 EXPORT_SYMBOL(mlx5_core_query_srq);
465 
466 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
467 		      u16 lwm, int is_srq)
468 {
469 	if (!dev->issi)
470 		return arm_srq_cmd(dev, srq, lwm, is_srq);
471 	else if (srq->common.res == MLX5_RES_XSRQ)
472 		return arm_xrc_srq_cmd(dev, srq, lwm);
473 	else
474 		return arm_rmp_cmd(dev, srq, lwm);
475 }
476 EXPORT_SYMBOL(mlx5_core_arm_srq);
477 
478 void mlx5_init_srq_table(struct mlx5_core_dev *dev)
479 {
480 	struct mlx5_srq_table *table = &dev->priv.srq_table;
481 
482 	memset(table, 0, sizeof(*table));
483 	spin_lock_init(&table->lock);
484 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
485 }
486 
487 void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
488 {
489 	/* nothing */
490 }
491