xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_srq.c (revision 9768746b)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30 
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <dev/mlx5/driver.h>
34 #include <dev/mlx5/srq.h>
35 #include <rdma/ib_verbs.h>
36 #include <dev/mlx5/mlx5_core/mlx5_core.h>
37 #include <dev/mlx5/mlx5_core/transobj.h>
38 
39 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
40 {
41 	struct mlx5_srq_table *table = &dev->priv.srq_table;
42 	struct mlx5_core_srq *srq;
43 
44 	spin_lock(&table->lock);
45 
46 	srq = radix_tree_lookup(&table->tree, srqn);
47 	if (srq)
48 		atomic_inc(&srq->refcount);
49 
50 	spin_unlock(&table->lock);
51 
52 	if (!srq) {
53 		mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
54 		return;
55 	}
56 
57 	srq->event(srq, event_type);
58 
59 	if (atomic_dec_and_test(&srq->refcount))
60 		complete(&srq->free);
61 }
62 
63 static void set_wq(void *wq, struct mlx5_srq_attr *in)
64 {
65 	MLX5_SET(wq,   wq, wq_signature,  !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG));
66 	MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
67 	MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
68 	MLX5_SET(wq,   wq, log_wq_sz,	  in->log_size);
69 	MLX5_SET(wq,   wq, page_offset,	  in->page_offset);
70 	MLX5_SET(wq,   wq, lwm,		  in->lwm);
71 	MLX5_SET(wq,   wq, pd,		  in->pd);
72 	MLX5_SET64(wq, wq, dbr_addr,	  in->db_record);
73 }
74 
75 static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
76 {
77 	MLX5_SET(srqc,	 srqc, wq_signature,  !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG));
78 	MLX5_SET(srqc,	 srqc, log_page_size, in->log_page_size);
79 	MLX5_SET(srqc,	 srqc, log_rq_stride, in->wqe_shift);
80 	MLX5_SET(srqc,	 srqc, log_srq_size,  in->log_size);
81 	MLX5_SET(srqc,	 srqc, page_offset,   in->page_offset);
82 	MLX5_SET(srqc,	 srqc, lwm,	      in->lwm);
83 	MLX5_SET(srqc,	 srqc, pd,	      in->pd);
84 	MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
85 	MLX5_SET(srqc,	 srqc, xrcd,	      in->xrcd);
86 	MLX5_SET(srqc,	 srqc, cqn,	      in->cqn);
87 }
88 
89 static void get_wq(void *wq, struct mlx5_srq_attr *in)
90 {
91 	if (MLX5_GET(wq, wq, wq_signature))
92 		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
93 	in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
94 	in->wqe_shift	  = MLX5_GET(wq,   wq, log_wq_stride) - 4;
95 	in->log_size	  = MLX5_GET(wq,   wq, log_wq_sz);
96 	in->page_offset	  = MLX5_GET(wq,   wq, page_offset);
97 	in->lwm		  = MLX5_GET(wq,   wq, lwm);
98 	in->pd		  = MLX5_GET(wq,   wq, pd);
99 	in->db_record	  = MLX5_GET64(wq, wq, dbr_addr);
100 }
101 
102 static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
103 {
104 	if (MLX5_GET(srqc, srqc, wq_signature))
105 		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
106 	in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
107 	in->wqe_shift	  = MLX5_GET(srqc,   srqc, log_rq_stride);
108 	in->log_size	  = MLX5_GET(srqc,   srqc, log_srq_size);
109 	in->page_offset	  = MLX5_GET(srqc,   srqc, page_offset);
110 	in->lwm		  = MLX5_GET(srqc,   srqc, lwm);
111 	in->pd		  = MLX5_GET(srqc,   srqc, pd);
112 	in->db_record	  = MLX5_GET64(srqc, srqc, dbr_addr);
113 }
114 
115 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
116 {
117 	struct mlx5_srq_table *table = &dev->priv.srq_table;
118 	struct mlx5_core_srq *srq;
119 
120 	spin_lock(&table->lock);
121 
122 	srq = radix_tree_lookup(&table->tree, srqn);
123 	if (srq)
124 		atomic_inc(&srq->refcount);
125 
126 	spin_unlock(&table->lock);
127 
128 	return srq;
129 }
130 EXPORT_SYMBOL(mlx5_core_get_srq);
131 
132 static int get_pas_size(struct mlx5_srq_attr *in)
133 {
134 	u32 log_page_size = in->log_page_size + 12;
135 	u32 log_srq_size  = in->log_size;
136 	u32 log_rq_stride = in->wqe_shift;
137 	u32 page_offset	  = in->page_offset;
138 	u32 po_quanta	  = 1 << (log_page_size - 6);
139 	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
140 	u32 page_size	  = 1 << log_page_size;
141 	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
142 	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;
143 
144 	return rq_num_pas * sizeof(u64);
145 
146 }
147 
148 static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
149 			  struct mlx5_srq_attr *in)
150 {
151 	void *create_in;
152 	void *rmpc;
153 	void *wq;
154 	int pas_size;
155 	int inlen;
156 	int err;
157 
158 	pas_size = get_pas_size(in);
159 	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
160 	create_in = mlx5_vzalloc(inlen);
161 	if (!create_in)
162 		return -ENOMEM;
163 
164 	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
165 	wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
166 
167 	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
168 	set_wq(wq, in);
169 	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
170 
171 	err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
172 
173 	kvfree(create_in);
174 	return err;
175 }
176 
177 static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
178 			    struct mlx5_core_srq *srq)
179 {
180 	return mlx5_core_destroy_rmp(dev, srq->srqn);
181 }
182 
183 static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
184 			 struct mlx5_srq_attr *out)
185 {
186 	u32 *rmp_out;
187 	void *rmpc;
188 	int err;
189 
190 	rmp_out =  mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
191 	if (!rmp_out)
192 		return -ENOMEM;
193 
194 	err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
195 	if (err)
196 		goto out;
197 
198 	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
199 	get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
200 	if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
201 		out->flags |= MLX5_SRQ_FLAG_ERR;
202 
203 out:
204 	kvfree(rmp_out);
205 	return 0;
206 }
207 
208 static int arm_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm)
209 {
210 	return mlx5_core_arm_rmp(dev, srq->srqn, lwm);
211 }
212 
213 static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
214 			      struct mlx5_core_srq *srq,
215 			      struct mlx5_srq_attr *in)
216 {
217 	void *create_in;
218 	void *xrc_srqc;
219 	void *pas;
220 	int pas_size;
221 	int inlen;
222 	int err;
223 
224 	pas_size  = get_pas_size(in);
225 	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
226 	create_in = mlx5_vzalloc(inlen);
227 	if (!create_in)
228 		return -ENOMEM;
229 
230 	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry);
231 	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
232 
233 	set_srqc(xrc_srqc, in);
234 	MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
235 	memcpy(pas, in->pas, pas_size);
236 
237 	err = mlx5_core_create_xsrq(dev, create_in, inlen, &srq->srqn);
238 	if (err)
239 		goto out;
240 
241 out:
242 	kvfree(create_in);
243 	return err;
244 }
245 
246 static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
247 			       struct mlx5_core_srq *srq)
248 {
249 	return mlx5_core_destroy_xsrq(dev, srq->srqn);
250 }
251 
252 static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
253 			     struct mlx5_core_srq *srq,
254 			     struct mlx5_srq_attr *out)
255 {
256 	u32 *xrcsrq_out;
257 	void *xrc_srqc;
258 	int err;
259 
260 	xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
261 	if (!xrcsrq_out)
262 		return -ENOMEM;
263 
264 	err = mlx5_core_query_xsrq(dev, srq->srqn, xrcsrq_out);
265 	if (err)
266 		goto out;
267 
268 	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
269 				xrc_srq_context_entry);
270 	get_srqc(xrc_srqc, out);
271 	if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
272 		out->flags |= MLX5_SRQ_FLAG_ERR;
273 
274 out:
275 	kvfree(xrcsrq_out);
276 	return err;
277 }
278 
279 static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
280 			   struct mlx5_core_srq *srq, u16 lwm)
281 {
282 	return mlx5_core_arm_xsrq(dev, srq->srqn, lwm);
283 }
284 
285 static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
286 			  struct mlx5_srq_attr *in)
287 {
288 	u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
289 	void *create_in;
290 	void *srqc;
291 	void *pas;
292 	int pas_size;
293 	int inlen;
294 	int err;
295 
296 	pas_size  = get_pas_size(in);
297 	inlen	  = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
298 	create_in = mlx5_vzalloc(inlen);
299 	if (!create_in)
300 		return -ENOMEM;
301 
302 	srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
303 	pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
304 
305 	set_srqc(srqc, in);
306 	memcpy(pas, in->pas, pas_size);
307 
308 	MLX5_SET(create_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_SRQ);
309 	err = mlx5_cmd_exec(dev, create_in, inlen, create_out, sizeof(create_out));
310 	kvfree(create_in);
311 	if (!err)
312 		srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
313 
314 	return err;
315 }
316 
317 static int destroy_srq_cmd(struct mlx5_core_dev *dev,
318 			   struct mlx5_core_srq *srq)
319 {
320 	u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
321 	u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
322 
323 	MLX5_SET(destroy_srq_in, srq_in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
324 	MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
325 
326 	return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out));
327 }
328 
329 static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
330 			 struct mlx5_srq_attr *out)
331 {
332 	u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
333 	u32 *srq_out;
334 	void *srqc;
335 	int outlen = MLX5_ST_SZ_BYTES(query_srq_out);
336 	int err;
337 
338 	srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
339 	if (!srq_out)
340 		return -ENOMEM;
341 
342 	MLX5_SET(query_srq_in, srq_in, opcode, MLX5_CMD_OP_QUERY_SRQ);
343 	MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
344 	err =  mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, outlen);
345 	if (err)
346 		goto out;
347 
348 	srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
349 	get_srqc(srqc, out);
350 	if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
351 		out->flags |= MLX5_SRQ_FLAG_ERR;
352 out:
353 	kvfree(srq_out);
354 	return err;
355 }
356 
357 static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
358 		       u16 lwm, int is_srq)
359 {
360 	/* arm_srq structs missing using identical xrc ones */
361 	u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
362 	u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
363 
364 	MLX5_SET(arm_xrc_srq_in, srq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
365 	MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
366 	MLX5_SET(arm_xrc_srq_in, srq_in, lwm,	   lwm);
367 
368 	return	mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out));
369 }
370 
371 static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
372 			    struct mlx5_srq_attr *in)
373 {
374 	if (!dev->issi)
375 		return create_srq_cmd(dev, srq, in);
376 	else if (srq->common.res == MLX5_RES_XSRQ)
377 		return create_xrc_srq_cmd(dev, srq, in);
378 	else
379 		return create_rmp_cmd(dev, srq, in);
380 }
381 
382 static int destroy_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
383 {
384 	if (!dev->issi)
385 		return destroy_srq_cmd(dev, srq);
386 	else if (srq->common.res == MLX5_RES_XSRQ)
387 		return destroy_xrc_srq_cmd(dev, srq);
388 	else
389 		return destroy_rmp_cmd(dev, srq);
390 }
391 
392 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
393 			 struct mlx5_srq_attr *in)
394 {
395 	int err;
396 	struct mlx5_srq_table *table = &dev->priv.srq_table;
397 
398 	if (in->type == IB_SRQT_XRC)
399 		srq->common.res = MLX5_RES_XSRQ;
400 	else
401 		srq->common.res = MLX5_RES_SRQ;
402 
403 	err = create_srq_split(dev, srq, in);
404 	if (err)
405 		return err;
406 
407 	atomic_set(&srq->refcount, 1);
408 	init_completion(&srq->free);
409 
410 	spin_lock_irq(&table->lock);
411 	err = radix_tree_insert(&table->tree, srq->srqn, srq);
412 	spin_unlock_irq(&table->lock);
413 	if (err) {
414 		mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
415 		goto err_destroy_srq_split;
416 	}
417 
418 	return 0;
419 
420 err_destroy_srq_split:
421 	destroy_srq_split(dev, srq);
422 
423 	return err;
424 }
425 EXPORT_SYMBOL(mlx5_core_create_srq);
426 
427 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
428 {
429 	struct mlx5_srq_table *table = &dev->priv.srq_table;
430 	struct mlx5_core_srq *tmp;
431 	int err;
432 
433 	spin_lock_irq(&table->lock);
434 	tmp = radix_tree_delete(&table->tree, srq->srqn);
435 	spin_unlock_irq(&table->lock);
436 	if (!tmp) {
437 		mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
438 		return -EINVAL;
439 	}
440 	if (tmp != srq) {
441 		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
442 		return -EINVAL;
443 	}
444 
445 	err = destroy_srq_split(dev, srq);
446 	if (err)
447 		return err;
448 
449 	if (atomic_dec_and_test(&srq->refcount))
450 		complete(&srq->free);
451 	wait_for_completion(&srq->free);
452 
453 	return 0;
454 }
455 EXPORT_SYMBOL(mlx5_core_destroy_srq);
456 
457 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
458 			struct mlx5_srq_attr *out)
459 {
460 	if (!dev->issi)
461 		return query_srq_cmd(dev, srq, out);
462 	else if (srq->common.res == MLX5_RES_XSRQ)
463 		return query_xrc_srq_cmd(dev, srq, out);
464 	else
465 		return query_rmp_cmd(dev, srq, out);
466 }
467 EXPORT_SYMBOL(mlx5_core_query_srq);
468 
469 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
470 		      u16 lwm, int is_srq)
471 {
472 	if (!dev->issi)
473 		return arm_srq_cmd(dev, srq, lwm, is_srq);
474 	else if (srq->common.res == MLX5_RES_XSRQ)
475 		return arm_xrc_srq_cmd(dev, srq, lwm);
476 	else
477 		return arm_rmp_cmd(dev, srq, lwm);
478 }
479 EXPORT_SYMBOL(mlx5_core_arm_srq);
480 
481 void mlx5_init_srq_table(struct mlx5_core_dev *dev)
482 {
483 	struct mlx5_srq_table *table = &dev->priv.srq_table;
484 
485 	memset(table, 0, sizeof(*table));
486 	spin_lock_init(&table->lock);
487 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
488 }
489 
490 void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
491 {
492 	/* nothing */
493 }
494