xref: /linux/drivers/infiniband/hw/mlx5/qp.c (revision 52338415)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <rdma/rdma_counter.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_ib.h"
40 #include "ib_rep.h"
41 #include "cmd.h"
42 
43 /* not supported currently */
44 static int wq_signature;
45 
46 enum {
47 	MLX5_IB_ACK_REQ_FREQ	= 8,
48 };
49 
50 enum {
51 	MLX5_IB_DEFAULT_SCHED_QUEUE	= 0x83,
52 	MLX5_IB_DEFAULT_QP0_SCHED_QUEUE	= 0x3f,
53 	MLX5_IB_LINK_TYPE_IB		= 0,
54 	MLX5_IB_LINK_TYPE_ETH		= 1
55 };
56 
57 enum {
58 	MLX5_IB_SQ_STRIDE	= 6,
59 	MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
60 };
61 
62 static const u32 mlx5_ib_opcode[] = {
63 	[IB_WR_SEND]				= MLX5_OPCODE_SEND,
64 	[IB_WR_LSO]				= MLX5_OPCODE_LSO,
65 	[IB_WR_SEND_WITH_IMM]			= MLX5_OPCODE_SEND_IMM,
66 	[IB_WR_RDMA_WRITE]			= MLX5_OPCODE_RDMA_WRITE,
67 	[IB_WR_RDMA_WRITE_WITH_IMM]		= MLX5_OPCODE_RDMA_WRITE_IMM,
68 	[IB_WR_RDMA_READ]			= MLX5_OPCODE_RDMA_READ,
69 	[IB_WR_ATOMIC_CMP_AND_SWP]		= MLX5_OPCODE_ATOMIC_CS,
70 	[IB_WR_ATOMIC_FETCH_AND_ADD]		= MLX5_OPCODE_ATOMIC_FA,
71 	[IB_WR_SEND_WITH_INV]			= MLX5_OPCODE_SEND_INVAL,
72 	[IB_WR_LOCAL_INV]			= MLX5_OPCODE_UMR,
73 	[IB_WR_REG_MR]				= MLX5_OPCODE_UMR,
74 	[IB_WR_MASKED_ATOMIC_CMP_AND_SWP]	= MLX5_OPCODE_ATOMIC_MASKED_CS,
75 	[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]	= MLX5_OPCODE_ATOMIC_MASKED_FA,
76 	[MLX5_IB_WR_UMR]			= MLX5_OPCODE_UMR,
77 };
78 
79 struct mlx5_wqe_eth_pad {
80 	u8 rsvd0[16];
81 };
82 
83 enum raw_qp_set_mask_map {
84 	MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID		= 1UL << 0,
85 	MLX5_RAW_QP_RATE_LIMIT			= 1UL << 1,
86 };
87 
88 struct mlx5_modify_raw_qp_param {
89 	u16 operation;
90 
91 	u32 set_mask; /* raw_qp_set_mask_map */
92 
93 	struct mlx5_rate_limit rl;
94 
95 	u8 rq_q_ctr_id;
96 	u16 port;
97 };
98 
99 static void get_cqs(enum ib_qp_type qp_type,
100 		    struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
101 		    struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
102 
103 static int is_qp0(enum ib_qp_type qp_type)
104 {
105 	return qp_type == IB_QPT_SMI;
106 }
107 
108 static int is_sqp(enum ib_qp_type qp_type)
109 {
110 	return is_qp0(qp_type) || is_qp1(qp_type);
111 }
112 
113 /**
114  * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ
115  * to kernel buffer
116  *
117  * @umem: User space memory where the WQ is
118  * @buffer: buffer to copy to
119  * @buflen: buffer length
120  * @wqe_index: index of WQE to copy from
121  * @wq_offset: offset to start of WQ
122  * @wq_wqe_cnt: number of WQEs in WQ
123  * @wq_wqe_shift: log2 of WQE size
124  * @bcnt: number of bytes to copy
125  * @bytes_copied: number of bytes to copy (return value)
126  *
127  * Copies from start of WQE bcnt or less bytes.
128  * Does not gurantee to copy the entire WQE.
129  *
130  * Return: zero on success, or an error code.
131  */
132 static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
133 					void *buffer,
134 					u32 buflen,
135 					int wqe_index,
136 					int wq_offset,
137 					int wq_wqe_cnt,
138 					int wq_wqe_shift,
139 					int bcnt,
140 					size_t *bytes_copied)
141 {
142 	size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
143 	size_t wq_end = wq_offset + (wq_wqe_cnt << wq_wqe_shift);
144 	size_t copy_length;
145 	int ret;
146 
147 	/* don't copy more than requested, more than buffer length or
148 	 * beyond WQ end
149 	 */
150 	copy_length = min_t(u32, buflen, wq_end - offset);
151 	copy_length = min_t(u32, copy_length, bcnt);
152 
153 	ret = ib_umem_copy_from(buffer, umem, offset, copy_length);
154 	if (ret)
155 		return ret;
156 
157 	if (!ret && bytes_copied)
158 		*bytes_copied = copy_length;
159 
160 	return 0;
161 }
162 
163 int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
164 			     int wqe_index,
165 			     void *buffer,
166 			     int buflen,
167 			     size_t *bc)
168 {
169 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
170 	struct ib_umem *umem = base->ubuffer.umem;
171 	struct mlx5_ib_wq *wq = &qp->sq;
172 	struct mlx5_wqe_ctrl_seg *ctrl;
173 	size_t bytes_copied;
174 	size_t bytes_copied2;
175 	size_t wqe_length;
176 	int ret;
177 	int ds;
178 
179 	if (buflen < sizeof(*ctrl))
180 		return -EINVAL;
181 
182 	/* at first read as much as possible */
183 	ret = mlx5_ib_read_user_wqe_common(umem,
184 					   buffer,
185 					   buflen,
186 					   wqe_index,
187 					   wq->offset,
188 					   wq->wqe_cnt,
189 					   wq->wqe_shift,
190 					   buflen,
191 					   &bytes_copied);
192 	if (ret)
193 		return ret;
194 
195 	/* we need at least control segment size to proceed */
196 	if (bytes_copied < sizeof(*ctrl))
197 		return -EINVAL;
198 
199 	ctrl = buffer;
200 	ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
201 	wqe_length = ds * MLX5_WQE_DS_UNITS;
202 
203 	/* if we copied enough then we are done */
204 	if (bytes_copied >= wqe_length) {
205 		*bc = bytes_copied;
206 		return 0;
207 	}
208 
209 	/* otherwise this a wrapped around wqe
210 	 * so read the remaining bytes starting
211 	 * from  wqe_index 0
212 	 */
213 	ret = mlx5_ib_read_user_wqe_common(umem,
214 					   buffer + bytes_copied,
215 					   buflen - bytes_copied,
216 					   0,
217 					   wq->offset,
218 					   wq->wqe_cnt,
219 					   wq->wqe_shift,
220 					   wqe_length - bytes_copied,
221 					   &bytes_copied2);
222 
223 	if (ret)
224 		return ret;
225 	*bc = bytes_copied + bytes_copied2;
226 	return 0;
227 }
228 
229 int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
230 			     int wqe_index,
231 			     void *buffer,
232 			     int buflen,
233 			     size_t *bc)
234 {
235 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
236 	struct ib_umem *umem = base->ubuffer.umem;
237 	struct mlx5_ib_wq *wq = &qp->rq;
238 	size_t bytes_copied;
239 	int ret;
240 
241 	ret = mlx5_ib_read_user_wqe_common(umem,
242 					   buffer,
243 					   buflen,
244 					   wqe_index,
245 					   wq->offset,
246 					   wq->wqe_cnt,
247 					   wq->wqe_shift,
248 					   buflen,
249 					   &bytes_copied);
250 
251 	if (ret)
252 		return ret;
253 	*bc = bytes_copied;
254 	return 0;
255 }
256 
257 int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
258 			      int wqe_index,
259 			      void *buffer,
260 			      int buflen,
261 			      size_t *bc)
262 {
263 	struct ib_umem *umem = srq->umem;
264 	size_t bytes_copied;
265 	int ret;
266 
267 	ret = mlx5_ib_read_user_wqe_common(umem,
268 					   buffer,
269 					   buflen,
270 					   wqe_index,
271 					   0,
272 					   srq->msrq.max,
273 					   srq->msrq.wqe_shift,
274 					   buflen,
275 					   &bytes_copied);
276 
277 	if (ret)
278 		return ret;
279 	*bc = bytes_copied;
280 	return 0;
281 }
282 
283 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
284 {
285 	struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
286 	struct ib_event event;
287 
288 	if (type == MLX5_EVENT_TYPE_PATH_MIG) {
289 		/* This event is only valid for trans_qps */
290 		to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
291 	}
292 
293 	if (ibqp->event_handler) {
294 		event.device     = ibqp->device;
295 		event.element.qp = ibqp;
296 		switch (type) {
297 		case MLX5_EVENT_TYPE_PATH_MIG:
298 			event.event = IB_EVENT_PATH_MIG;
299 			break;
300 		case MLX5_EVENT_TYPE_COMM_EST:
301 			event.event = IB_EVENT_COMM_EST;
302 			break;
303 		case MLX5_EVENT_TYPE_SQ_DRAINED:
304 			event.event = IB_EVENT_SQ_DRAINED;
305 			break;
306 		case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
307 			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
308 			break;
309 		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
310 			event.event = IB_EVENT_QP_FATAL;
311 			break;
312 		case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
313 			event.event = IB_EVENT_PATH_MIG_ERR;
314 			break;
315 		case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
316 			event.event = IB_EVENT_QP_REQ_ERR;
317 			break;
318 		case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
319 			event.event = IB_EVENT_QP_ACCESS_ERR;
320 			break;
321 		default:
322 			pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
323 			return;
324 		}
325 
326 		ibqp->event_handler(&event, ibqp->qp_context);
327 	}
328 }
329 
330 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
331 		       int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
332 {
333 	int wqe_size;
334 	int wq_size;
335 
336 	/* Sanity check RQ size before proceeding */
337 	if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
338 		return -EINVAL;
339 
340 	if (!has_rq) {
341 		qp->rq.max_gs = 0;
342 		qp->rq.wqe_cnt = 0;
343 		qp->rq.wqe_shift = 0;
344 		cap->max_recv_wr = 0;
345 		cap->max_recv_sge = 0;
346 	} else {
347 		if (ucmd) {
348 			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
349 			if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
350 				return -EINVAL;
351 			qp->rq.wqe_shift = ucmd->rq_wqe_shift;
352 			if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
353 				return -EINVAL;
354 			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
355 			qp->rq.max_post = qp->rq.wqe_cnt;
356 		} else {
357 			wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
358 			wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
359 			wqe_size = roundup_pow_of_two(wqe_size);
360 			wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
361 			wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
362 			qp->rq.wqe_cnt = wq_size / wqe_size;
363 			if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
364 				mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
365 					    wqe_size,
366 					    MLX5_CAP_GEN(dev->mdev,
367 							 max_wqe_sz_rq));
368 				return -EINVAL;
369 			}
370 			qp->rq.wqe_shift = ilog2(wqe_size);
371 			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
372 			qp->rq.max_post = qp->rq.wqe_cnt;
373 		}
374 	}
375 
376 	return 0;
377 }
378 
379 static int sq_overhead(struct ib_qp_init_attr *attr)
380 {
381 	int size = 0;
382 
383 	switch (attr->qp_type) {
384 	case IB_QPT_XRC_INI:
385 		size += sizeof(struct mlx5_wqe_xrc_seg);
386 		/* fall through */
387 	case IB_QPT_RC:
388 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
389 			max(sizeof(struct mlx5_wqe_atomic_seg) +
390 			    sizeof(struct mlx5_wqe_raddr_seg),
391 			    sizeof(struct mlx5_wqe_umr_ctrl_seg) +
392 			    sizeof(struct mlx5_mkey_seg) +
393 			    MLX5_IB_SQ_UMR_INLINE_THRESHOLD /
394 			    MLX5_IB_UMR_OCTOWORD);
395 		break;
396 
397 	case IB_QPT_XRC_TGT:
398 		return 0;
399 
400 	case IB_QPT_UC:
401 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
402 			max(sizeof(struct mlx5_wqe_raddr_seg),
403 			    sizeof(struct mlx5_wqe_umr_ctrl_seg) +
404 			    sizeof(struct mlx5_mkey_seg));
405 		break;
406 
407 	case IB_QPT_UD:
408 		if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
409 			size += sizeof(struct mlx5_wqe_eth_pad) +
410 				sizeof(struct mlx5_wqe_eth_seg);
411 		/* fall through */
412 	case IB_QPT_SMI:
413 	case MLX5_IB_QPT_HW_GSI:
414 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
415 			sizeof(struct mlx5_wqe_datagram_seg);
416 		break;
417 
418 	case MLX5_IB_QPT_REG_UMR:
419 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
420 			sizeof(struct mlx5_wqe_umr_ctrl_seg) +
421 			sizeof(struct mlx5_mkey_seg);
422 		break;
423 
424 	default:
425 		return -EINVAL;
426 	}
427 
428 	return size;
429 }
430 
431 static int calc_send_wqe(struct ib_qp_init_attr *attr)
432 {
433 	int inl_size = 0;
434 	int size;
435 
436 	size = sq_overhead(attr);
437 	if (size < 0)
438 		return size;
439 
440 	if (attr->cap.max_inline_data) {
441 		inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
442 			attr->cap.max_inline_data;
443 	}
444 
445 	size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
446 	if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN &&
447 	    ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
448 		return MLX5_SIG_WQE_SIZE;
449 	else
450 		return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
451 }
452 
453 static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
454 {
455 	int max_sge;
456 
457 	if (attr->qp_type == IB_QPT_RC)
458 		max_sge = (min_t(int, wqe_size, 512) -
459 			   sizeof(struct mlx5_wqe_ctrl_seg) -
460 			   sizeof(struct mlx5_wqe_raddr_seg)) /
461 			sizeof(struct mlx5_wqe_data_seg);
462 	else if (attr->qp_type == IB_QPT_XRC_INI)
463 		max_sge = (min_t(int, wqe_size, 512) -
464 			   sizeof(struct mlx5_wqe_ctrl_seg) -
465 			   sizeof(struct mlx5_wqe_xrc_seg) -
466 			   sizeof(struct mlx5_wqe_raddr_seg)) /
467 			sizeof(struct mlx5_wqe_data_seg);
468 	else
469 		max_sge = (wqe_size - sq_overhead(attr)) /
470 			sizeof(struct mlx5_wqe_data_seg);
471 
472 	return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
473 		     sizeof(struct mlx5_wqe_data_seg));
474 }
475 
476 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
477 			struct mlx5_ib_qp *qp)
478 {
479 	int wqe_size;
480 	int wq_size;
481 
482 	if (!attr->cap.max_send_wr)
483 		return 0;
484 
485 	wqe_size = calc_send_wqe(attr);
486 	mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
487 	if (wqe_size < 0)
488 		return wqe_size;
489 
490 	if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
491 		mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
492 			    wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
493 		return -EINVAL;
494 	}
495 
496 	qp->max_inline_data = wqe_size - sq_overhead(attr) -
497 			      sizeof(struct mlx5_wqe_inline_seg);
498 	attr->cap.max_inline_data = qp->max_inline_data;
499 
500 	wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
501 	qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
502 	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
503 		mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
504 			    attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
505 			    qp->sq.wqe_cnt,
506 			    1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
507 		return -ENOMEM;
508 	}
509 	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
510 	qp->sq.max_gs = get_send_sge(attr, wqe_size);
511 	if (qp->sq.max_gs < attr->cap.max_send_sge)
512 		return -ENOMEM;
513 
514 	attr->cap.max_send_sge = qp->sq.max_gs;
515 	qp->sq.max_post = wq_size / wqe_size;
516 	attr->cap.max_send_wr = qp->sq.max_post;
517 
518 	return wq_size;
519 }
520 
521 static int set_user_buf_size(struct mlx5_ib_dev *dev,
522 			    struct mlx5_ib_qp *qp,
523 			    struct mlx5_ib_create_qp *ucmd,
524 			    struct mlx5_ib_qp_base *base,
525 			    struct ib_qp_init_attr *attr)
526 {
527 	int desc_sz = 1 << qp->sq.wqe_shift;
528 
529 	if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
530 		mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
531 			     desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
532 		return -EINVAL;
533 	}
534 
535 	if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) {
536 		mlx5_ib_warn(dev, "sq_wqe_count %d is not a power of two\n",
537 			     ucmd->sq_wqe_count);
538 		return -EINVAL;
539 	}
540 
541 	qp->sq.wqe_cnt = ucmd->sq_wqe_count;
542 
543 	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
544 		mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
545 			     qp->sq.wqe_cnt,
546 			     1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
547 		return -EINVAL;
548 	}
549 
550 	if (attr->qp_type == IB_QPT_RAW_PACKET ||
551 	    qp->flags & MLX5_IB_QP_UNDERLAY) {
552 		base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
553 		qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
554 	} else {
555 		base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
556 					 (qp->sq.wqe_cnt << 6);
557 	}
558 
559 	return 0;
560 }
561 
562 static int qp_has_rq(struct ib_qp_init_attr *attr)
563 {
564 	if (attr->qp_type == IB_QPT_XRC_INI ||
565 	    attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
566 	    attr->qp_type == MLX5_IB_QPT_REG_UMR ||
567 	    !attr->cap.max_recv_wr)
568 		return 0;
569 
570 	return 1;
571 }
572 
573 enum {
574 	/* this is the first blue flame register in the array of bfregs assigned
575 	 * to a processes. Since we do not use it for blue flame but rather
576 	 * regular 64 bit doorbells, we do not need a lock for maintaiing
577 	 * "odd/even" order
578 	 */
579 	NUM_NON_BLUE_FLAME_BFREGS = 1,
580 };
581 
582 static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
583 {
584 	return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
585 }
586 
587 static int num_med_bfreg(struct mlx5_ib_dev *dev,
588 			 struct mlx5_bfreg_info *bfregi)
589 {
590 	int n;
591 
592 	n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
593 	    NUM_NON_BLUE_FLAME_BFREGS;
594 
595 	return n >= 0 ? n : 0;
596 }
597 
598 static int first_med_bfreg(struct mlx5_ib_dev *dev,
599 			   struct mlx5_bfreg_info *bfregi)
600 {
601 	return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
602 }
603 
604 static int first_hi_bfreg(struct mlx5_ib_dev *dev,
605 			  struct mlx5_bfreg_info *bfregi)
606 {
607 	int med;
608 
609 	med = num_med_bfreg(dev, bfregi);
610 	return ++med;
611 }
612 
613 static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
614 				  struct mlx5_bfreg_info *bfregi)
615 {
616 	int i;
617 
618 	for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
619 		if (!bfregi->count[i]) {
620 			bfregi->count[i]++;
621 			return i;
622 		}
623 	}
624 
625 	return -ENOMEM;
626 }
627 
628 static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
629 				 struct mlx5_bfreg_info *bfregi)
630 {
631 	int minidx = first_med_bfreg(dev, bfregi);
632 	int i;
633 
634 	if (minidx < 0)
635 		return minidx;
636 
637 	for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
638 		if (bfregi->count[i] < bfregi->count[minidx])
639 			minidx = i;
640 		if (!bfregi->count[minidx])
641 			break;
642 	}
643 
644 	bfregi->count[minidx]++;
645 	return minidx;
646 }
647 
648 static int alloc_bfreg(struct mlx5_ib_dev *dev,
649 		       struct mlx5_bfreg_info *bfregi)
650 {
651 	int bfregn = -ENOMEM;
652 
653 	mutex_lock(&bfregi->lock);
654 	if (bfregi->ver >= 2) {
655 		bfregn = alloc_high_class_bfreg(dev, bfregi);
656 		if (bfregn < 0)
657 			bfregn = alloc_med_class_bfreg(dev, bfregi);
658 	}
659 
660 	if (bfregn < 0) {
661 		BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
662 		bfregn = 0;
663 		bfregi->count[bfregn]++;
664 	}
665 	mutex_unlock(&bfregi->lock);
666 
667 	return bfregn;
668 }
669 
670 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
671 {
672 	mutex_lock(&bfregi->lock);
673 	bfregi->count[bfregn]--;
674 	mutex_unlock(&bfregi->lock);
675 }
676 
677 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
678 {
679 	switch (state) {
680 	case IB_QPS_RESET:	return MLX5_QP_STATE_RST;
681 	case IB_QPS_INIT:	return MLX5_QP_STATE_INIT;
682 	case IB_QPS_RTR:	return MLX5_QP_STATE_RTR;
683 	case IB_QPS_RTS:	return MLX5_QP_STATE_RTS;
684 	case IB_QPS_SQD:	return MLX5_QP_STATE_SQD;
685 	case IB_QPS_SQE:	return MLX5_QP_STATE_SQER;
686 	case IB_QPS_ERR:	return MLX5_QP_STATE_ERR;
687 	default:		return -1;
688 	}
689 }
690 
691 static int to_mlx5_st(enum ib_qp_type type)
692 {
693 	switch (type) {
694 	case IB_QPT_RC:			return MLX5_QP_ST_RC;
695 	case IB_QPT_UC:			return MLX5_QP_ST_UC;
696 	case IB_QPT_UD:			return MLX5_QP_ST_UD;
697 	case MLX5_IB_QPT_REG_UMR:	return MLX5_QP_ST_REG_UMR;
698 	case IB_QPT_XRC_INI:
699 	case IB_QPT_XRC_TGT:		return MLX5_QP_ST_XRC;
700 	case IB_QPT_SMI:		return MLX5_QP_ST_QP0;
701 	case MLX5_IB_QPT_HW_GSI:	return MLX5_QP_ST_QP1;
702 	case MLX5_IB_QPT_DCI:		return MLX5_QP_ST_DCI;
703 	case IB_QPT_RAW_IPV6:		return MLX5_QP_ST_RAW_IPV6;
704 	case IB_QPT_RAW_PACKET:
705 	case IB_QPT_RAW_ETHERTYPE:	return MLX5_QP_ST_RAW_ETHERTYPE;
706 	case IB_QPT_MAX:
707 	default:		return -EINVAL;
708 	}
709 }
710 
711 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
712 			     struct mlx5_ib_cq *recv_cq);
713 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
714 			       struct mlx5_ib_cq *recv_cq);
715 
716 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
717 			struct mlx5_bfreg_info *bfregi, u32 bfregn,
718 			bool dyn_bfreg)
719 {
720 	unsigned int bfregs_per_sys_page;
721 	u32 index_of_sys_page;
722 	u32 offset;
723 
724 	bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
725 				MLX5_NON_FP_BFREGS_PER_UAR;
726 	index_of_sys_page = bfregn / bfregs_per_sys_page;
727 
728 	if (dyn_bfreg) {
729 		index_of_sys_page += bfregi->num_static_sys_pages;
730 
731 		if (index_of_sys_page >= bfregi->num_sys_pages)
732 			return -EINVAL;
733 
734 		if (bfregn > bfregi->num_dyn_bfregs ||
735 		    bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
736 			mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
737 			return -EINVAL;
738 		}
739 	}
740 
741 	offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
742 	return bfregi->sys_pages[index_of_sys_page] + offset;
743 }
744 
745 static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
746 			    unsigned long addr, size_t size,
747 			    struct ib_umem **umem, int *npages, int *page_shift,
748 			    int *ncont, u32 *offset)
749 {
750 	int err;
751 
752 	*umem = ib_umem_get(udata, addr, size, 0, 0);
753 	if (IS_ERR(*umem)) {
754 		mlx5_ib_dbg(dev, "umem_get failed\n");
755 		return PTR_ERR(*umem);
756 	}
757 
758 	mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
759 
760 	err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
761 	if (err) {
762 		mlx5_ib_warn(dev, "bad offset\n");
763 		goto err_umem;
764 	}
765 
766 	mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
767 		    addr, size, *npages, *page_shift, *ncont, *offset);
768 
769 	return 0;
770 
771 err_umem:
772 	ib_umem_release(*umem);
773 	*umem = NULL;
774 
775 	return err;
776 }
777 
778 static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
779 			    struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
780 {
781 	struct mlx5_ib_ucontext *context =
782 		rdma_udata_to_drv_context(
783 			udata,
784 			struct mlx5_ib_ucontext,
785 			ibucontext);
786 
787 	if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
788 		atomic_dec(&dev->delay_drop.rqs_cnt);
789 
790 	mlx5_ib_db_unmap_user(context, &rwq->db);
791 	ib_umem_release(rwq->umem);
792 }
793 
794 static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
795 			  struct ib_udata *udata, struct mlx5_ib_rwq *rwq,
796 			  struct mlx5_ib_create_wq *ucmd)
797 {
798 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
799 		udata, struct mlx5_ib_ucontext, ibucontext);
800 	int page_shift = 0;
801 	int npages;
802 	u32 offset = 0;
803 	int ncont = 0;
804 	int err;
805 
806 	if (!ucmd->buf_addr)
807 		return -EINVAL;
808 
809 	rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0, 0);
810 	if (IS_ERR(rwq->umem)) {
811 		mlx5_ib_dbg(dev, "umem_get failed\n");
812 		err = PTR_ERR(rwq->umem);
813 		return err;
814 	}
815 
816 	mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
817 			   &ncont, NULL);
818 	err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
819 				     &rwq->rq_page_offset);
820 	if (err) {
821 		mlx5_ib_warn(dev, "bad offset\n");
822 		goto err_umem;
823 	}
824 
825 	rwq->rq_num_pas = ncont;
826 	rwq->page_shift = page_shift;
827 	rwq->log_page_size =  page_shift - MLX5_ADAPTER_PAGE_SHIFT;
828 	rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
829 
830 	mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
831 		    (unsigned long long)ucmd->buf_addr, rwq->buf_size,
832 		    npages, page_shift, ncont, offset);
833 
834 	err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db);
835 	if (err) {
836 		mlx5_ib_dbg(dev, "map failed\n");
837 		goto err_umem;
838 	}
839 
840 	rwq->create_type = MLX5_WQ_USER;
841 	return 0;
842 
843 err_umem:
844 	ib_umem_release(rwq->umem);
845 	return err;
846 }
847 
848 static int adjust_bfregn(struct mlx5_ib_dev *dev,
849 			 struct mlx5_bfreg_info *bfregi, int bfregn)
850 {
851 	return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
852 				bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
853 }
854 
855 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
856 			  struct mlx5_ib_qp *qp, struct ib_udata *udata,
857 			  struct ib_qp_init_attr *attr,
858 			  u32 **in,
859 			  struct mlx5_ib_create_qp_resp *resp, int *inlen,
860 			  struct mlx5_ib_qp_base *base)
861 {
862 	struct mlx5_ib_ucontext *context;
863 	struct mlx5_ib_create_qp ucmd;
864 	struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
865 	int page_shift = 0;
866 	int uar_index = 0;
867 	int npages;
868 	u32 offset = 0;
869 	int bfregn;
870 	int ncont = 0;
871 	__be64 *pas;
872 	void *qpc;
873 	int err;
874 	u16 uid;
875 
876 	err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
877 	if (err) {
878 		mlx5_ib_dbg(dev, "copy failed\n");
879 		return err;
880 	}
881 
882 	context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
883 					    ibucontext);
884 	if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) {
885 		uar_index = bfregn_to_uar_index(dev, &context->bfregi,
886 						ucmd.bfreg_index, true);
887 		if (uar_index < 0)
888 			return uar_index;
889 
890 		bfregn = MLX5_IB_INVALID_BFREG;
891 	} else if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) {
892 		/*
893 		 * TBD: should come from the verbs when we have the API
894 		 */
895 		/* In CROSS_CHANNEL CQ and QP must use the same UAR */
896 		bfregn = MLX5_CROSS_CHANNEL_BFREG;
897 	}
898 	else {
899 		bfregn = alloc_bfreg(dev, &context->bfregi);
900 		if (bfregn < 0)
901 			return bfregn;
902 	}
903 
904 	mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
905 	if (bfregn != MLX5_IB_INVALID_BFREG)
906 		uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn,
907 						false);
908 
909 	qp->rq.offset = 0;
910 	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
911 	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
912 
913 	err = set_user_buf_size(dev, qp, &ucmd, base, attr);
914 	if (err)
915 		goto err_bfreg;
916 
917 	if (ucmd.buf_addr && ubuffer->buf_size) {
918 		ubuffer->buf_addr = ucmd.buf_addr;
919 		err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr,
920 				       ubuffer->buf_size, &ubuffer->umem,
921 				       &npages, &page_shift, &ncont, &offset);
922 		if (err)
923 			goto err_bfreg;
924 	} else {
925 		ubuffer->umem = NULL;
926 	}
927 
928 	*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
929 		 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
930 	*in = kvzalloc(*inlen, GFP_KERNEL);
931 	if (!*in) {
932 		err = -ENOMEM;
933 		goto err_umem;
934 	}
935 
936 	uid = (attr->qp_type != IB_QPT_XRC_TGT &&
937 	       attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
938 	MLX5_SET(create_qp_in, *in, uid, uid);
939 	pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
940 	if (ubuffer->umem)
941 		mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
942 
943 	qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
944 
945 	MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
946 	MLX5_SET(qpc, qpc, page_offset, offset);
947 
948 	MLX5_SET(qpc, qpc, uar_page, uar_index);
949 	if (bfregn != MLX5_IB_INVALID_BFREG)
950 		resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
951 	else
952 		resp->bfreg_index = MLX5_IB_INVALID_BFREG;
953 	qp->bfregn = bfregn;
954 
955 	err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &qp->db);
956 	if (err) {
957 		mlx5_ib_dbg(dev, "map failed\n");
958 		goto err_free;
959 	}
960 
961 	err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
962 	if (err) {
963 		mlx5_ib_dbg(dev, "copy failed\n");
964 		goto err_unmap;
965 	}
966 	qp->create_type = MLX5_QP_USER;
967 
968 	return 0;
969 
970 err_unmap:
971 	mlx5_ib_db_unmap_user(context, &qp->db);
972 
973 err_free:
974 	kvfree(*in);
975 
976 err_umem:
977 	ib_umem_release(ubuffer->umem);
978 
979 err_bfreg:
980 	if (bfregn != MLX5_IB_INVALID_BFREG)
981 		mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
982 	return err;
983 }
984 
985 static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
986 			    struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base,
987 			    struct ib_udata *udata)
988 {
989 	struct mlx5_ib_ucontext *context =
990 		rdma_udata_to_drv_context(
991 			udata,
992 			struct mlx5_ib_ucontext,
993 			ibucontext);
994 
995 	mlx5_ib_db_unmap_user(context, &qp->db);
996 	ib_umem_release(base->ubuffer.umem);
997 
998 	/*
999 	 * Free only the BFREGs which are handled by the kernel.
1000 	 * BFREGs of UARs allocated dynamically are handled by user.
1001 	 */
1002 	if (qp->bfregn != MLX5_IB_INVALID_BFREG)
1003 		mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
1004 }
1005 
1006 /* get_sq_edge - Get the next nearby edge.
1007  *
1008  * An 'edge' is defined as the first following address after the end
1009  * of the fragment or the SQ. Accordingly, during the WQE construction
1010  * which repetitively increases the pointer to write the next data, it
1011  * simply should check if it gets to an edge.
1012  *
1013  * @sq - SQ buffer.
1014  * @idx - Stride index in the SQ buffer.
1015  *
1016  * Return:
1017  *	The new edge.
1018  */
1019 static void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
1020 {
1021 	void *fragment_end;
1022 
1023 	fragment_end = mlx5_frag_buf_get_wqe
1024 		(&sq->fbc,
1025 		 mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
1026 
1027 	return fragment_end + MLX5_SEND_WQE_BB;
1028 }
1029 
1030 static int create_kernel_qp(struct mlx5_ib_dev *dev,
1031 			    struct ib_qp_init_attr *init_attr,
1032 			    struct mlx5_ib_qp *qp,
1033 			    u32 **in, int *inlen,
1034 			    struct mlx5_ib_qp_base *base)
1035 {
1036 	int uar_index;
1037 	void *qpc;
1038 	int err;
1039 
1040 	if (init_attr->create_flags & ~(IB_QP_CREATE_INTEGRITY_EN |
1041 					IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1042 					IB_QP_CREATE_IPOIB_UD_LSO |
1043 					IB_QP_CREATE_NETIF_QP |
1044 					mlx5_ib_create_qp_sqpn_qp1()))
1045 		return -EINVAL;
1046 
1047 	if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
1048 		qp->bf.bfreg = &dev->fp_bfreg;
1049 	else
1050 		qp->bf.bfreg = &dev->bfreg;
1051 
1052 	/* We need to divide by two since each register is comprised of
1053 	 * two buffers of identical size, namely odd and even
1054 	 */
1055 	qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2;
1056 	uar_index = qp->bf.bfreg->index;
1057 
1058 	err = calc_sq_size(dev, init_attr, qp);
1059 	if (err < 0) {
1060 		mlx5_ib_dbg(dev, "err %d\n", err);
1061 		return err;
1062 	}
1063 
1064 	qp->rq.offset = 0;
1065 	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
1066 	base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
1067 
1068 	err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size,
1069 				       &qp->buf, dev->mdev->priv.numa_node);
1070 	if (err) {
1071 		mlx5_ib_dbg(dev, "err %d\n", err);
1072 		return err;
1073 	}
1074 
1075 	if (qp->rq.wqe_cnt)
1076 		mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift,
1077 			      ilog2(qp->rq.wqe_cnt), &qp->rq.fbc);
1078 
1079 	if (qp->sq.wqe_cnt) {
1080 		int sq_strides_offset = (qp->sq.offset  & (PAGE_SIZE - 1)) /
1081 					MLX5_SEND_WQE_BB;
1082 		mlx5_init_fbc_offset(qp->buf.frags +
1083 				     (qp->sq.offset / PAGE_SIZE),
1084 				     ilog2(MLX5_SEND_WQE_BB),
1085 				     ilog2(qp->sq.wqe_cnt),
1086 				     sq_strides_offset, &qp->sq.fbc);
1087 
1088 		qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
1089 	}
1090 
1091 	*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
1092 		 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
1093 	*in = kvzalloc(*inlen, GFP_KERNEL);
1094 	if (!*in) {
1095 		err = -ENOMEM;
1096 		goto err_buf;
1097 	}
1098 
1099 	qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
1100 	MLX5_SET(qpc, qpc, uar_page, uar_index);
1101 	MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1102 
1103 	/* Set "fast registration enabled" for all kernel QPs */
1104 	MLX5_SET(qpc, qpc, fre, 1);
1105 	MLX5_SET(qpc, qpc, rlky, 1);
1106 
1107 	if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
1108 		MLX5_SET(qpc, qpc, deth_sqpn, 1);
1109 		qp->flags |= MLX5_IB_QP_SQPN_QP1;
1110 	}
1111 
1112 	mlx5_fill_page_frag_array(&qp->buf,
1113 				  (__be64 *)MLX5_ADDR_OF(create_qp_in,
1114 							 *in, pas));
1115 
1116 	err = mlx5_db_alloc(dev->mdev, &qp->db);
1117 	if (err) {
1118 		mlx5_ib_dbg(dev, "err %d\n", err);
1119 		goto err_free;
1120 	}
1121 
1122 	qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
1123 				     sizeof(*qp->sq.wrid), GFP_KERNEL);
1124 	qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt,
1125 					sizeof(*qp->sq.wr_data), GFP_KERNEL);
1126 	qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
1127 				     sizeof(*qp->rq.wrid), GFP_KERNEL);
1128 	qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt,
1129 				       sizeof(*qp->sq.w_list), GFP_KERNEL);
1130 	qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
1131 					 sizeof(*qp->sq.wqe_head), GFP_KERNEL);
1132 
1133 	if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
1134 	    !qp->sq.w_list || !qp->sq.wqe_head) {
1135 		err = -ENOMEM;
1136 		goto err_wrid;
1137 	}
1138 	qp->create_type = MLX5_QP_KERNEL;
1139 
1140 	return 0;
1141 
1142 err_wrid:
1143 	kvfree(qp->sq.wqe_head);
1144 	kvfree(qp->sq.w_list);
1145 	kvfree(qp->sq.wrid);
1146 	kvfree(qp->sq.wr_data);
1147 	kvfree(qp->rq.wrid);
1148 	mlx5_db_free(dev->mdev, &qp->db);
1149 
1150 err_free:
1151 	kvfree(*in);
1152 
1153 err_buf:
1154 	mlx5_frag_buf_free(dev->mdev, &qp->buf);
1155 	return err;
1156 }
1157 
1158 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1159 {
1160 	kvfree(qp->sq.wqe_head);
1161 	kvfree(qp->sq.w_list);
1162 	kvfree(qp->sq.wrid);
1163 	kvfree(qp->sq.wr_data);
1164 	kvfree(qp->rq.wrid);
1165 	mlx5_db_free(dev->mdev, &qp->db);
1166 	mlx5_frag_buf_free(dev->mdev, &qp->buf);
1167 }
1168 
1169 static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
1170 {
1171 	if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
1172 	    (attr->qp_type == MLX5_IB_QPT_DCI) ||
1173 	    (attr->qp_type == IB_QPT_XRC_INI))
1174 		return MLX5_SRQ_RQ;
1175 	else if (!qp->has_rq)
1176 		return MLX5_ZERO_LEN_RQ;
1177 	else
1178 		return MLX5_NON_ZERO_RQ;
1179 }
1180 
1181 static int is_connected(enum ib_qp_type qp_type)
1182 {
1183 	if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
1184 	    qp_type == MLX5_IB_QPT_DCI)
1185 		return 1;
1186 
1187 	return 0;
1188 }
1189 
1190 static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
1191 				    struct mlx5_ib_qp *qp,
1192 				    struct mlx5_ib_sq *sq, u32 tdn,
1193 				    struct ib_pd *pd)
1194 {
1195 	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
1196 	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1197 
1198 	MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
1199 	MLX5_SET(tisc, tisc, transport_domain, tdn);
1200 	if (qp->flags & MLX5_IB_QP_UNDERLAY)
1201 		MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
1202 
1203 	return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
1204 }
1205 
1206 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
1207 				      struct mlx5_ib_sq *sq, struct ib_pd *pd)
1208 {
1209 	mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
1210 }
1211 
1212 static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
1213 {
1214 	if (sq->flow_rule)
1215 		mlx5_del_flow_rules(sq->flow_rule);
1216 	sq->flow_rule = NULL;
1217 }
1218 
1219 static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1220 				   struct ib_udata *udata,
1221 				   struct mlx5_ib_sq *sq, void *qpin,
1222 				   struct ib_pd *pd)
1223 {
1224 	struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
1225 	__be64 *pas;
1226 	void *in;
1227 	void *sqc;
1228 	void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1229 	void *wq;
1230 	int inlen;
1231 	int err;
1232 	int page_shift = 0;
1233 	int npages;
1234 	int ncont = 0;
1235 	u32 offset = 0;
1236 
1237 	err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr, ubuffer->buf_size,
1238 			       &sq->ubuffer.umem, &npages, &page_shift, &ncont,
1239 			       &offset);
1240 	if (err)
1241 		return err;
1242 
1243 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont;
1244 	in = kvzalloc(inlen, GFP_KERNEL);
1245 	if (!in) {
1246 		err = -ENOMEM;
1247 		goto err_umem;
1248 	}
1249 
1250 	MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid);
1251 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1252 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1253 	if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
1254 		MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
1255 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1256 	MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
1257 	MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
1258 	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1259 	MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
1260 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
1261 	    MLX5_CAP_ETH(dev->mdev, swp))
1262 		MLX5_SET(sqc, sqc, allow_swp, 1);
1263 
1264 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1265 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1266 	MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
1267 	MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page));
1268 	MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
1269 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1270 	MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size));
1271 	MLX5_SET(wq, wq, log_wq_pg_sz,  page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1272 	MLX5_SET(wq, wq, page_offset, offset);
1273 
1274 	pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
1275 	mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);
1276 
1277 	err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp);
1278 
1279 	kvfree(in);
1280 
1281 	if (err)
1282 		goto err_umem;
1283 
1284 	return 0;
1285 
1286 err_umem:
1287 	ib_umem_release(sq->ubuffer.umem);
1288 	sq->ubuffer.umem = NULL;
1289 
1290 	return err;
1291 }
1292 
1293 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1294 				     struct mlx5_ib_sq *sq)
1295 {
1296 	destroy_flow_rule_vport_sq(sq);
1297 	mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
1298 	ib_umem_release(sq->ubuffer.umem);
1299 }
1300 
1301 static size_t get_rq_pas_size(void *qpc)
1302 {
1303 	u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
1304 	u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
1305 	u32 log_rq_size   = MLX5_GET(qpc, qpc, log_rq_size);
1306 	u32 page_offset   = MLX5_GET(qpc, qpc, page_offset);
1307 	u32 po_quanta	  = 1 << (log_page_size - 6);
1308 	u32 rq_sz	  = 1 << (log_rq_size + 4 + log_rq_stride);
1309 	u32 page_size	  = 1 << log_page_size;
1310 	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
1311 	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;
1312 
1313 	return rq_num_pas * sizeof(u64);
1314 }
1315 
1316 static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1317 				   struct mlx5_ib_rq *rq, void *qpin,
1318 				   size_t qpinlen, struct ib_pd *pd)
1319 {
1320 	struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
1321 	__be64 *pas;
1322 	__be64 *qp_pas;
1323 	void *in;
1324 	void *rqc;
1325 	void *wq;
1326 	void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1327 	size_t rq_pas_size = get_rq_pas_size(qpc);
1328 	size_t inlen;
1329 	int err;
1330 
1331 	if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
1332 		return -EINVAL;
1333 
1334 	inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
1335 	in = kvzalloc(inlen, GFP_KERNEL);
1336 	if (!in)
1337 		return -ENOMEM;
1338 
1339 	MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
1340 	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
1341 	if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
1342 		MLX5_SET(rqc, rqc, vsd, 1);
1343 	MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
1344 	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
1345 	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
1346 	MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
1347 	MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
1348 
1349 	if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS)
1350 		MLX5_SET(rqc, rqc, scatter_fcs, 1);
1351 
1352 	wq = MLX5_ADDR_OF(rqc, rqc, wq);
1353 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1354 	if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
1355 		MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1356 	MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
1357 	MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
1358 	MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
1359 	MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4);
1360 	MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(qpc, qpc, log_page_size));
1361 	MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size));
1362 
1363 	pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
1364 	qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas);
1365 	memcpy(pas, qp_pas, rq_pas_size);
1366 
1367 	err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp);
1368 
1369 	kvfree(in);
1370 
1371 	return err;
1372 }
1373 
1374 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1375 				     struct mlx5_ib_rq *rq)
1376 {
1377 	mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
1378 }
1379 
1380 static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
1381 {
1382 	return  (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
1383 		 MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
1384 		 MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
1385 }
1386 
1387 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1388 				      struct mlx5_ib_rq *rq,
1389 				      u32 qp_flags_en,
1390 				      struct ib_pd *pd)
1391 {
1392 	if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1393 			   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
1394 		mlx5_ib_disable_lb(dev, false, true);
1395 	mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
1396 }
1397 
1398 static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1399 				    struct mlx5_ib_rq *rq, u32 tdn,
1400 				    u32 *qp_flags_en,
1401 				    struct ib_pd *pd,
1402 				    u32 *out, int outlen)
1403 {
1404 	u8 lb_flag = 0;
1405 	u32 *in;
1406 	void *tirc;
1407 	int inlen;
1408 	int err;
1409 
1410 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1411 	in = kvzalloc(inlen, GFP_KERNEL);
1412 	if (!in)
1413 		return -ENOMEM;
1414 
1415 	MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
1416 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1417 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
1418 	MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
1419 	MLX5_SET(tirc, tirc, transport_domain, tdn);
1420 	if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
1421 		MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
1422 
1423 	if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
1424 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1425 
1426 	if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
1427 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
1428 
1429 	if (dev->is_rep) {
1430 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1431 		*qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
1432 	}
1433 
1434 	MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
1435 
1436 	err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
1437 
1438 	rq->tirn = MLX5_GET(create_tir_out, out, tirn);
1439 	if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
1440 		err = mlx5_ib_enable_lb(dev, false, true);
1441 
1442 		if (err)
1443 			destroy_raw_packet_qp_tir(dev, rq, 0, pd);
1444 	}
1445 	kvfree(in);
1446 
1447 	return err;
1448 }
1449 
1450 static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1451 				u32 *in, size_t inlen,
1452 				struct ib_pd *pd,
1453 				struct ib_udata *udata,
1454 				struct mlx5_ib_create_qp_resp *resp)
1455 {
1456 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
1457 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1458 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1459 	struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
1460 		udata, struct mlx5_ib_ucontext, ibucontext);
1461 	int err;
1462 	u32 tdn = mucontext->tdn;
1463 	u16 uid = to_mpd(pd)->uid;
1464 	u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
1465 
1466 	if (qp->sq.wqe_cnt) {
1467 		err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
1468 		if (err)
1469 			return err;
1470 
1471 		err = create_raw_packet_qp_sq(dev, udata, sq, in, pd);
1472 		if (err)
1473 			goto err_destroy_tis;
1474 
1475 		if (uid) {
1476 			resp->tisn = sq->tisn;
1477 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN;
1478 			resp->sqn = sq->base.mqp.qpn;
1479 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN;
1480 		}
1481 
1482 		sq->base.container_mibqp = qp;
1483 		sq->base.mqp.event = mlx5_ib_qp_event;
1484 	}
1485 
1486 	if (qp->rq.wqe_cnt) {
1487 		rq->base.container_mibqp = qp;
1488 
1489 		if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING)
1490 			rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
1491 		if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
1492 			rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
1493 		err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
1494 		if (err)
1495 			goto err_destroy_sq;
1496 
1497 		err = create_raw_packet_qp_tir(
1498 			dev, rq, tdn, &qp->flags_en, pd, out,
1499 			MLX5_ST_SZ_BYTES(create_tir_out));
1500 		if (err)
1501 			goto err_destroy_rq;
1502 
1503 		if (uid) {
1504 			resp->rqn = rq->base.mqp.qpn;
1505 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
1506 			resp->tirn = rq->tirn;
1507 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
1508 			if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
1509 				resp->tir_icm_addr = MLX5_GET(
1510 					create_tir_out, out, icm_address_31_0);
1511 				resp->tir_icm_addr |=
1512 					(u64)MLX5_GET(create_tir_out, out,
1513 						      icm_address_39_32)
1514 					<< 32;
1515 				resp->tir_icm_addr |=
1516 					(u64)MLX5_GET(create_tir_out, out,
1517 						      icm_address_63_40)
1518 					<< 40;
1519 				resp->comp_mask |=
1520 					MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
1521 			}
1522 		}
1523 	}
1524 
1525 	qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
1526 						     rq->base.mqp.qpn;
1527 	err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
1528 	if (err)
1529 		goto err_destroy_tir;
1530 
1531 	return 0;
1532 
1533 err_destroy_tir:
1534 	destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, pd);
1535 err_destroy_rq:
1536 	destroy_raw_packet_qp_rq(dev, rq);
1537 err_destroy_sq:
1538 	if (!qp->sq.wqe_cnt)
1539 		return err;
1540 	destroy_raw_packet_qp_sq(dev, sq);
1541 err_destroy_tis:
1542 	destroy_raw_packet_qp_tis(dev, sq, pd);
1543 
1544 	return err;
1545 }
1546 
1547 static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
1548 				  struct mlx5_ib_qp *qp)
1549 {
1550 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
1551 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1552 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1553 
1554 	if (qp->rq.wqe_cnt) {
1555 		destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
1556 		destroy_raw_packet_qp_rq(dev, rq);
1557 	}
1558 
1559 	if (qp->sq.wqe_cnt) {
1560 		destroy_raw_packet_qp_sq(dev, sq);
1561 		destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd);
1562 	}
1563 }
1564 
1565 static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
1566 				    struct mlx5_ib_raw_packet_qp *raw_packet_qp)
1567 {
1568 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1569 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1570 
1571 	sq->sq = &qp->sq;
1572 	rq->rq = &qp->rq;
1573 	sq->doorbell = &qp->db;
1574 	rq->doorbell = &qp->db;
1575 }
1576 
1577 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1578 {
1579 	if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1580 			    MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
1581 		mlx5_ib_disable_lb(dev, false, true);
1582 	mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1583 			     to_mpd(qp->ibqp.pd)->uid);
1584 }
1585 
1586 static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1587 				 struct ib_pd *pd,
1588 				 struct ib_qp_init_attr *init_attr,
1589 				 struct ib_udata *udata)
1590 {
1591 	struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
1592 		udata, struct mlx5_ib_ucontext, ibucontext);
1593 	struct mlx5_ib_create_qp_resp resp = {};
1594 	int inlen;
1595 	int outlen;
1596 	int err;
1597 	u32 *in;
1598 	u32 *out;
1599 	void *tirc;
1600 	void *hfso;
1601 	u32 selected_fields = 0;
1602 	u32 outer_l4;
1603 	size_t min_resp_len;
1604 	u32 tdn = mucontext->tdn;
1605 	struct mlx5_ib_create_qp_rss ucmd = {};
1606 	size_t required_cmd_sz;
1607 	u8 lb_flag = 0;
1608 
1609 	if (init_attr->qp_type != IB_QPT_RAW_PACKET)
1610 		return -EOPNOTSUPP;
1611 
1612 	if (init_attr->create_flags || init_attr->send_cq)
1613 		return -EINVAL;
1614 
1615 	min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
1616 	if (udata->outlen < min_resp_len)
1617 		return -EINVAL;
1618 
1619 	required_cmd_sz = offsetof(typeof(ucmd), flags) + sizeof(ucmd.flags);
1620 	if (udata->inlen < required_cmd_sz) {
1621 		mlx5_ib_dbg(dev, "invalid inlen\n");
1622 		return -EINVAL;
1623 	}
1624 
1625 	if (udata->inlen > sizeof(ucmd) &&
1626 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
1627 				 udata->inlen - sizeof(ucmd))) {
1628 		mlx5_ib_dbg(dev, "inlen is not supported\n");
1629 		return -EOPNOTSUPP;
1630 	}
1631 
1632 	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
1633 		mlx5_ib_dbg(dev, "copy failed\n");
1634 		return -EFAULT;
1635 	}
1636 
1637 	if (ucmd.comp_mask) {
1638 		mlx5_ib_dbg(dev, "invalid comp mask\n");
1639 		return -EOPNOTSUPP;
1640 	}
1641 
1642 	if (ucmd.flags & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
1643 			   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1644 			   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) {
1645 		mlx5_ib_dbg(dev, "invalid flags\n");
1646 		return -EOPNOTSUPP;
1647 	}
1648 
1649 	if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
1650 	    !tunnel_offload_supported(dev->mdev)) {
1651 		mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
1652 		return -EOPNOTSUPP;
1653 	}
1654 
1655 	if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
1656 	    !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
1657 		mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
1658 		return -EOPNOTSUPP;
1659 	}
1660 
1661 	if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->is_rep) {
1662 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1663 		qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
1664 	}
1665 
1666 	if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
1667 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
1668 		qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
1669 	}
1670 
1671 	err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
1672 	if (err) {
1673 		mlx5_ib_dbg(dev, "copy failed\n");
1674 		return -EINVAL;
1675 	}
1676 
1677 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1678 	outlen = MLX5_ST_SZ_BYTES(create_tir_out);
1679 	in = kvzalloc(inlen + outlen, GFP_KERNEL);
1680 	if (!in)
1681 		return -ENOMEM;
1682 
1683 	out = in + MLX5_ST_SZ_DW(create_tir_in);
1684 	MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
1685 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1686 	MLX5_SET(tirc, tirc, disp_type,
1687 		 MLX5_TIRC_DISP_TYPE_INDIRECT);
1688 	MLX5_SET(tirc, tirc, indirect_table,
1689 		 init_attr->rwq_ind_tbl->ind_tbl_num);
1690 	MLX5_SET(tirc, tirc, transport_domain, tdn);
1691 
1692 	hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1693 
1694 	if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
1695 		MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
1696 
1697 	MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
1698 
1699 	if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
1700 		hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
1701 	else
1702 		hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1703 
1704 	switch (ucmd.rx_hash_function) {
1705 	case MLX5_RX_HASH_FUNC_TOEPLITZ:
1706 	{
1707 		void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
1708 		size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
1709 
1710 		if (len != ucmd.rx_key_len) {
1711 			err = -EINVAL;
1712 			goto err;
1713 		}
1714 
1715 		MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
1716 		memcpy(rss_key, ucmd.rx_hash_key, len);
1717 		break;
1718 	}
1719 	default:
1720 		err = -EOPNOTSUPP;
1721 		goto err;
1722 	}
1723 
1724 	if (!ucmd.rx_hash_fields_mask) {
1725 		/* special case when this TIR serves as steering entry without hashing */
1726 		if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
1727 			goto create_tir;
1728 		err = -EINVAL;
1729 		goto err;
1730 	}
1731 
1732 	if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1733 	     (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
1734 	     ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1735 	     (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
1736 		err = -EINVAL;
1737 		goto err;
1738 	}
1739 
1740 	/* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1741 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1742 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
1743 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1744 			 MLX5_L3_PROT_TYPE_IPV4);
1745 	else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1746 		 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1747 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1748 			 MLX5_L3_PROT_TYPE_IPV6);
1749 
1750 	outer_l4 = ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1751 		    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) << 0 |
1752 		   ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1753 		    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) << 1 |
1754 		   (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
1755 
1756 	/* Check that only one l4 protocol is set */
1757 	if (outer_l4 & (outer_l4 - 1)) {
1758 		err = -EINVAL;
1759 		goto err;
1760 	}
1761 
1762 	/* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1763 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1764 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
1765 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1766 			 MLX5_L4_PROT_TYPE_TCP);
1767 	else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1768 		 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1769 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1770 			 MLX5_L4_PROT_TYPE_UDP);
1771 
1772 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1773 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
1774 		selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
1775 
1776 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
1777 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1778 		selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
1779 
1780 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1781 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
1782 		selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
1783 
1784 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
1785 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1786 		selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
1787 
1788 	if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
1789 		selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI;
1790 
1791 	MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
1792 
1793 create_tir:
1794 	err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
1795 
1796 	qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
1797 	if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
1798 		err = mlx5_ib_enable_lb(dev, false, true);
1799 
1800 		if (err)
1801 			mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1802 					     to_mpd(pd)->uid);
1803 	}
1804 
1805 	if (err)
1806 		goto err;
1807 
1808 	if (mucontext->devx_uid) {
1809 		resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
1810 		resp.tirn = qp->rss_qp.tirn;
1811 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
1812 			resp.tir_icm_addr =
1813 				MLX5_GET(create_tir_out, out, icm_address_31_0);
1814 			resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
1815 							   icm_address_39_32)
1816 					     << 32;
1817 			resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
1818 							   icm_address_63_40)
1819 					     << 40;
1820 			resp.comp_mask |=
1821 				MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
1822 		}
1823 	}
1824 
1825 	err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
1826 	if (err)
1827 		goto err_copy;
1828 
1829 	kvfree(in);
1830 	/* qpn is reserved for that QP */
1831 	qp->trans_qp.base.mqp.qpn = 0;
1832 	qp->flags |= MLX5_IB_QP_RSS;
1833 	return 0;
1834 
1835 err_copy:
1836 	mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, mucontext->devx_uid);
1837 err:
1838 	kvfree(in);
1839 	return err;
1840 }
1841 
1842 static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
1843 					 void *qpc)
1844 {
1845 	int rcqe_sz;
1846 
1847 	if (init_attr->qp_type == MLX5_IB_QPT_DCI)
1848 		return;
1849 
1850 	rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
1851 
1852 	if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
1853 		if (rcqe_sz == 128)
1854 			MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
1855 
1856 		return;
1857 	}
1858 
1859 	MLX5_SET(qpc, qpc, cs_res,
1860 		 rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
1861 				  MLX5_RES_SCAT_DATA32_CQE);
1862 }
1863 
1864 static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
1865 					 struct ib_qp_init_attr *init_attr,
1866 					 struct mlx5_ib_create_qp *ucmd,
1867 					 void *qpc)
1868 {
1869 	enum ib_qp_type qpt = init_attr->qp_type;
1870 	int scqe_sz;
1871 	bool allow_scat_cqe = 0;
1872 
1873 	if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
1874 		return;
1875 
1876 	if (ucmd)
1877 		allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
1878 
1879 	if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
1880 		return;
1881 
1882 	scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
1883 	if (scqe_sz == 128) {
1884 		MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
1885 		return;
1886 	}
1887 
1888 	if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
1889 	    MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
1890 		MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
1891 }
1892 
1893 static int atomic_size_to_mode(int size_mask)
1894 {
1895 	/* driver does not support atomic_size > 256B
1896 	 * and does not know how to translate bigger sizes
1897 	 */
1898 	int supported_size_mask = size_mask & 0x1ff;
1899 	int log_max_size;
1900 
1901 	if (!supported_size_mask)
1902 		return -EOPNOTSUPP;
1903 
1904 	log_max_size = __fls(supported_size_mask);
1905 
1906 	if (log_max_size > 3)
1907 		return log_max_size;
1908 
1909 	return MLX5_ATOMIC_MODE_8B;
1910 }
1911 
1912 static int get_atomic_mode(struct mlx5_ib_dev *dev,
1913 			   enum ib_qp_type qp_type)
1914 {
1915 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
1916 	u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
1917 	int atomic_mode = -EOPNOTSUPP;
1918 	int atomic_size_mask;
1919 
1920 	if (!atomic)
1921 		return -EOPNOTSUPP;
1922 
1923 	if (qp_type == MLX5_IB_QPT_DCT)
1924 		atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
1925 	else
1926 		atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
1927 
1928 	if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) ||
1929 	    (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD))
1930 		atomic_mode = atomic_size_to_mode(atomic_size_mask);
1931 
1932 	if (atomic_mode <= 0 &&
1933 	    (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP &&
1934 	     atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
1935 		atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;
1936 
1937 	return atomic_mode;
1938 }
1939 
1940 static inline bool check_flags_mask(uint64_t input, uint64_t supported)
1941 {
1942 	return (input & ~supported) == 0;
1943 }
1944 
1945 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1946 			    struct ib_qp_init_attr *init_attr,
1947 			    struct ib_udata *udata, struct mlx5_ib_qp *qp)
1948 {
1949 	struct mlx5_ib_resources *devr = &dev->devr;
1950 	int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
1951 	struct mlx5_core_dev *mdev = dev->mdev;
1952 	struct mlx5_ib_create_qp_resp resp = {};
1953 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
1954 		udata, struct mlx5_ib_ucontext, ibucontext);
1955 	struct mlx5_ib_cq *send_cq;
1956 	struct mlx5_ib_cq *recv_cq;
1957 	unsigned long flags;
1958 	u32 uidx = MLX5_IB_DEFAULT_UIDX;
1959 	struct mlx5_ib_create_qp ucmd;
1960 	struct mlx5_ib_qp_base *base;
1961 	int mlx5_st;
1962 	void *qpc;
1963 	u32 *in;
1964 	int err;
1965 
1966 	mutex_init(&qp->mutex);
1967 	spin_lock_init(&qp->sq.lock);
1968 	spin_lock_init(&qp->rq.lock);
1969 
1970 	mlx5_st = to_mlx5_st(init_attr->qp_type);
1971 	if (mlx5_st < 0)
1972 		return -EINVAL;
1973 
1974 	if (init_attr->rwq_ind_tbl) {
1975 		if (!udata)
1976 			return -ENOSYS;
1977 
1978 		err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata);
1979 		return err;
1980 	}
1981 
1982 	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
1983 		if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
1984 			mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
1985 			return -EINVAL;
1986 		} else {
1987 			qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
1988 		}
1989 	}
1990 
1991 	if (init_attr->create_flags &
1992 			(IB_QP_CREATE_CROSS_CHANNEL |
1993 			 IB_QP_CREATE_MANAGED_SEND |
1994 			 IB_QP_CREATE_MANAGED_RECV)) {
1995 		if (!MLX5_CAP_GEN(mdev, cd)) {
1996 			mlx5_ib_dbg(dev, "cross-channel isn't supported\n");
1997 			return -EINVAL;
1998 		}
1999 		if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
2000 			qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
2001 		if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
2002 			qp->flags |= MLX5_IB_QP_MANAGED_SEND;
2003 		if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
2004 			qp->flags |= MLX5_IB_QP_MANAGED_RECV;
2005 	}
2006 
2007 	if (init_attr->qp_type == IB_QPT_UD &&
2008 	    (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO))
2009 		if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
2010 			mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n");
2011 			return -EOPNOTSUPP;
2012 		}
2013 
2014 	if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
2015 		if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
2016 			mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs");
2017 			return -EOPNOTSUPP;
2018 		}
2019 		if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
2020 		    !MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
2021 			mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n");
2022 			return -EOPNOTSUPP;
2023 		}
2024 		qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS;
2025 	}
2026 
2027 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
2028 		qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
2029 
2030 	if (init_attr->create_flags & IB_QP_CREATE_CVLAN_STRIPPING) {
2031 		if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
2032 		      MLX5_CAP_ETH(dev->mdev, vlan_cap)) ||
2033 		    (init_attr->qp_type != IB_QPT_RAW_PACKET))
2034 			return -EOPNOTSUPP;
2035 		qp->flags |= MLX5_IB_QP_CVLAN_STRIPPING;
2036 	}
2037 
2038 	if (udata) {
2039 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
2040 			mlx5_ib_dbg(dev, "copy failed\n");
2041 			return -EFAULT;
2042 		}
2043 
2044 		if (!check_flags_mask(ucmd.flags,
2045 				      MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
2046 				      MLX5_QP_FLAG_BFREG_INDEX |
2047 				      MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
2048 				      MLX5_QP_FLAG_SCATTER_CQE |
2049 				      MLX5_QP_FLAG_SIGNATURE |
2050 				      MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
2051 				      MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
2052 				      MLX5_QP_FLAG_TUNNEL_OFFLOADS |
2053 				      MLX5_QP_FLAG_TYPE_DCI |
2054 				      MLX5_QP_FLAG_TYPE_DCT))
2055 			return -EINVAL;
2056 
2057 		err = get_qp_user_index(ucontext, &ucmd, udata->inlen, &uidx);
2058 		if (err)
2059 			return err;
2060 
2061 		qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
2062 		if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
2063 			qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
2064 		if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
2065 			if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
2066 			    !tunnel_offload_supported(mdev)) {
2067 				mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
2068 				return -EOPNOTSUPP;
2069 			}
2070 			qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
2071 		}
2072 
2073 		if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
2074 			if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
2075 				mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
2076 				return -EOPNOTSUPP;
2077 			}
2078 			qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
2079 		}
2080 
2081 		if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
2082 			if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
2083 				mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
2084 				return -EOPNOTSUPP;
2085 			}
2086 			qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
2087 		}
2088 
2089 		if (ucmd.flags & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) {
2090 			if (init_attr->qp_type != IB_QPT_RC ||
2091 				!MLX5_CAP_GEN(dev->mdev, qp_packet_based)) {
2092 				mlx5_ib_dbg(dev, "packet based credit mode isn't supported\n");
2093 				return -EOPNOTSUPP;
2094 			}
2095 			qp->flags |= MLX5_IB_QP_PACKET_BASED_CREDIT;
2096 		}
2097 
2098 		if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
2099 			if (init_attr->qp_type != IB_QPT_UD ||
2100 			    (MLX5_CAP_GEN(dev->mdev, port_type) !=
2101 			     MLX5_CAP_PORT_TYPE_IB) ||
2102 			    !mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) {
2103 				mlx5_ib_dbg(dev, "Source QP option isn't supported\n");
2104 				return -EOPNOTSUPP;
2105 			}
2106 
2107 			qp->flags |= MLX5_IB_QP_UNDERLAY;
2108 			qp->underlay_qpn = init_attr->source_qpn;
2109 		}
2110 	} else {
2111 		qp->wq_sig = !!wq_signature;
2112 	}
2113 
2114 	base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
2115 		qp->flags & MLX5_IB_QP_UNDERLAY) ?
2116 	       &qp->raw_packet_qp.rq.base :
2117 	       &qp->trans_qp.base;
2118 
2119 	qp->has_rq = qp_has_rq(init_attr);
2120 	err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
2121 			  qp, udata ? &ucmd : NULL);
2122 	if (err) {
2123 		mlx5_ib_dbg(dev, "err %d\n", err);
2124 		return err;
2125 	}
2126 
2127 	if (pd) {
2128 		if (udata) {
2129 			__u32 max_wqes =
2130 				1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
2131 			mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
2132 			if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
2133 			    ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
2134 				mlx5_ib_dbg(dev, "invalid rq params\n");
2135 				return -EINVAL;
2136 			}
2137 			if (ucmd.sq_wqe_count > max_wqes) {
2138 				mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
2139 					    ucmd.sq_wqe_count, max_wqes);
2140 				return -EINVAL;
2141 			}
2142 			if (init_attr->create_flags &
2143 			    mlx5_ib_create_qp_sqpn_qp1()) {
2144 				mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
2145 				return -EINVAL;
2146 			}
2147 			err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
2148 					     &resp, &inlen, base);
2149 			if (err)
2150 				mlx5_ib_dbg(dev, "err %d\n", err);
2151 		} else {
2152 			err = create_kernel_qp(dev, init_attr, qp, &in, &inlen,
2153 					       base);
2154 			if (err)
2155 				mlx5_ib_dbg(dev, "err %d\n", err);
2156 		}
2157 
2158 		if (err)
2159 			return err;
2160 	} else {
2161 		in = kvzalloc(inlen, GFP_KERNEL);
2162 		if (!in)
2163 			return -ENOMEM;
2164 
2165 		qp->create_type = MLX5_QP_EMPTY;
2166 	}
2167 
2168 	if (is_sqp(init_attr->qp_type))
2169 		qp->port = init_attr->port_num;
2170 
2171 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
2172 
2173 	MLX5_SET(qpc, qpc, st, mlx5_st);
2174 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
2175 
2176 	if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
2177 		MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
2178 	else
2179 		MLX5_SET(qpc, qpc, latency_sensitive, 1);
2180 
2181 
2182 	if (qp->wq_sig)
2183 		MLX5_SET(qpc, qpc, wq_signature, 1);
2184 
2185 	if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
2186 		MLX5_SET(qpc, qpc, block_lb_mc, 1);
2187 
2188 	if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
2189 		MLX5_SET(qpc, qpc, cd_master, 1);
2190 	if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
2191 		MLX5_SET(qpc, qpc, cd_slave_send, 1);
2192 	if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
2193 		MLX5_SET(qpc, qpc, cd_slave_receive, 1);
2194 	if (qp->flags & MLX5_IB_QP_PACKET_BASED_CREDIT)
2195 		MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1);
2196 	if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
2197 		configure_responder_scat_cqe(init_attr, qpc);
2198 		configure_requester_scat_cqe(dev, init_attr,
2199 					     udata ? &ucmd : NULL,
2200 					     qpc);
2201 	}
2202 
2203 	if (qp->rq.wqe_cnt) {
2204 		MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
2205 		MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
2206 	}
2207 
2208 	MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
2209 
2210 	if (qp->sq.wqe_cnt) {
2211 		MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
2212 	} else {
2213 		MLX5_SET(qpc, qpc, no_sq, 1);
2214 		if (init_attr->srq &&
2215 		    init_attr->srq->srq_type == IB_SRQT_TM)
2216 			MLX5_SET(qpc, qpc, offload_type,
2217 				 MLX5_QPC_OFFLOAD_TYPE_RNDV);
2218 	}
2219 
2220 	/* Set default resources */
2221 	switch (init_attr->qp_type) {
2222 	case IB_QPT_XRC_TGT:
2223 		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
2224 		MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
2225 		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
2226 		MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
2227 		break;
2228 	case IB_QPT_XRC_INI:
2229 		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
2230 		MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
2231 		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
2232 		break;
2233 	default:
2234 		if (init_attr->srq) {
2235 			MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
2236 			MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
2237 		} else {
2238 			MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
2239 			MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
2240 		}
2241 	}
2242 
2243 	if (init_attr->send_cq)
2244 		MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
2245 
2246 	if (init_attr->recv_cq)
2247 		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
2248 
2249 	MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2250 
2251 	/* 0xffffff means we ask to work with cqe version 0 */
2252 	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2253 		MLX5_SET(qpc, qpc, user_index, uidx);
2254 
2255 	/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
2256 	if (init_attr->qp_type == IB_QPT_UD &&
2257 	    (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
2258 		MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
2259 		qp->flags |= MLX5_IB_QP_LSO;
2260 	}
2261 
2262 	if (init_attr->create_flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
2263 		if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
2264 			mlx5_ib_dbg(dev, "scatter end padding is not supported\n");
2265 			err = -EOPNOTSUPP;
2266 			goto err;
2267 		} else if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
2268 			MLX5_SET(qpc, qpc, end_padding_mode,
2269 				 MLX5_WQ_END_PAD_MODE_ALIGN);
2270 		} else {
2271 			qp->flags |= MLX5_IB_QP_PCI_WRITE_END_PADDING;
2272 		}
2273 	}
2274 
2275 	if (inlen < 0) {
2276 		err = -EINVAL;
2277 		goto err;
2278 	}
2279 
2280 	if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
2281 	    qp->flags & MLX5_IB_QP_UNDERLAY) {
2282 		qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
2283 		raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
2284 		err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
2285 					   &resp);
2286 	} else {
2287 		err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
2288 	}
2289 
2290 	if (err) {
2291 		mlx5_ib_dbg(dev, "create qp failed\n");
2292 		goto err_create;
2293 	}
2294 
2295 	kvfree(in);
2296 
2297 	base->container_mibqp = qp;
2298 	base->mqp.event = mlx5_ib_qp_event;
2299 
2300 	get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
2301 		&send_cq, &recv_cq);
2302 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2303 	mlx5_ib_lock_cqs(send_cq, recv_cq);
2304 	/* Maintain device to QPs access, needed for further handling via reset
2305 	 * flow
2306 	 */
2307 	list_add_tail(&qp->qps_list, &dev->qp_list);
2308 	/* Maintain CQ to QPs access, needed for further handling via reset flow
2309 	 */
2310 	if (send_cq)
2311 		list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
2312 	if (recv_cq)
2313 		list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
2314 	mlx5_ib_unlock_cqs(send_cq, recv_cq);
2315 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2316 
2317 	return 0;
2318 
2319 err_create:
2320 	if (qp->create_type == MLX5_QP_USER)
2321 		destroy_qp_user(dev, pd, qp, base, udata);
2322 	else if (qp->create_type == MLX5_QP_KERNEL)
2323 		destroy_qp_kernel(dev, qp);
2324 
2325 err:
2326 	kvfree(in);
2327 	return err;
2328 }
2329 
2330 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
2331 	__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
2332 {
2333 	if (send_cq) {
2334 		if (recv_cq) {
2335 			if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
2336 				spin_lock(&send_cq->lock);
2337 				spin_lock_nested(&recv_cq->lock,
2338 						 SINGLE_DEPTH_NESTING);
2339 			} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
2340 				spin_lock(&send_cq->lock);
2341 				__acquire(&recv_cq->lock);
2342 			} else {
2343 				spin_lock(&recv_cq->lock);
2344 				spin_lock_nested(&send_cq->lock,
2345 						 SINGLE_DEPTH_NESTING);
2346 			}
2347 		} else {
2348 			spin_lock(&send_cq->lock);
2349 			__acquire(&recv_cq->lock);
2350 		}
2351 	} else if (recv_cq) {
2352 		spin_lock(&recv_cq->lock);
2353 		__acquire(&send_cq->lock);
2354 	} else {
2355 		__acquire(&send_cq->lock);
2356 		__acquire(&recv_cq->lock);
2357 	}
2358 }
2359 
2360 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
2361 	__releases(&send_cq->lock) __releases(&recv_cq->lock)
2362 {
2363 	if (send_cq) {
2364 		if (recv_cq) {
2365 			if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
2366 				spin_unlock(&recv_cq->lock);
2367 				spin_unlock(&send_cq->lock);
2368 			} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
2369 				__release(&recv_cq->lock);
2370 				spin_unlock(&send_cq->lock);
2371 			} else {
2372 				spin_unlock(&send_cq->lock);
2373 				spin_unlock(&recv_cq->lock);
2374 			}
2375 		} else {
2376 			__release(&recv_cq->lock);
2377 			spin_unlock(&send_cq->lock);
2378 		}
2379 	} else if (recv_cq) {
2380 		__release(&send_cq->lock);
2381 		spin_unlock(&recv_cq->lock);
2382 	} else {
2383 		__release(&recv_cq->lock);
2384 		__release(&send_cq->lock);
2385 	}
2386 }
2387 
2388 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
2389 {
2390 	return to_mpd(qp->ibqp.pd);
2391 }
2392 
2393 static void get_cqs(enum ib_qp_type qp_type,
2394 		    struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
2395 		    struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
2396 {
2397 	switch (qp_type) {
2398 	case IB_QPT_XRC_TGT:
2399 		*send_cq = NULL;
2400 		*recv_cq = NULL;
2401 		break;
2402 	case MLX5_IB_QPT_REG_UMR:
2403 	case IB_QPT_XRC_INI:
2404 		*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
2405 		*recv_cq = NULL;
2406 		break;
2407 
2408 	case IB_QPT_SMI:
2409 	case MLX5_IB_QPT_HW_GSI:
2410 	case IB_QPT_RC:
2411 	case IB_QPT_UC:
2412 	case IB_QPT_UD:
2413 	case IB_QPT_RAW_IPV6:
2414 	case IB_QPT_RAW_ETHERTYPE:
2415 	case IB_QPT_RAW_PACKET:
2416 		*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
2417 		*recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
2418 		break;
2419 
2420 	case IB_QPT_MAX:
2421 	default:
2422 		*send_cq = NULL;
2423 		*recv_cq = NULL;
2424 		break;
2425 	}
2426 }
2427 
2428 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2429 				const struct mlx5_modify_raw_qp_param *raw_qp_param,
2430 				u8 lag_tx_affinity);
2431 
2432 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2433 			      struct ib_udata *udata)
2434 {
2435 	struct mlx5_ib_cq *send_cq, *recv_cq;
2436 	struct mlx5_ib_qp_base *base;
2437 	unsigned long flags;
2438 	int err;
2439 
2440 	if (qp->ibqp.rwq_ind_tbl) {
2441 		destroy_rss_raw_qp_tir(dev, qp);
2442 		return;
2443 	}
2444 
2445 	base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
2446 		qp->flags & MLX5_IB_QP_UNDERLAY) ?
2447 	       &qp->raw_packet_qp.rq.base :
2448 	       &qp->trans_qp.base;
2449 
2450 	if (qp->state != IB_QPS_RESET) {
2451 		if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
2452 		    !(qp->flags & MLX5_IB_QP_UNDERLAY)) {
2453 			err = mlx5_core_qp_modify(dev->mdev,
2454 						  MLX5_CMD_OP_2RST_QP, 0,
2455 						  NULL, &base->mqp);
2456 		} else {
2457 			struct mlx5_modify_raw_qp_param raw_qp_param = {
2458 				.operation = MLX5_CMD_OP_2RST_QP
2459 			};
2460 
2461 			err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
2462 		}
2463 		if (err)
2464 			mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
2465 				     base->mqp.qpn);
2466 	}
2467 
2468 	get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
2469 		&send_cq, &recv_cq);
2470 
2471 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2472 	mlx5_ib_lock_cqs(send_cq, recv_cq);
2473 	/* del from lists under both locks above to protect reset flow paths */
2474 	list_del(&qp->qps_list);
2475 	if (send_cq)
2476 		list_del(&qp->cq_send_list);
2477 
2478 	if (recv_cq)
2479 		list_del(&qp->cq_recv_list);
2480 
2481 	if (qp->create_type == MLX5_QP_KERNEL) {
2482 		__mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
2483 				   qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
2484 		if (send_cq != recv_cq)
2485 			__mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
2486 					   NULL);
2487 	}
2488 	mlx5_ib_unlock_cqs(send_cq, recv_cq);
2489 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2490 
2491 	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
2492 	    qp->flags & MLX5_IB_QP_UNDERLAY) {
2493 		destroy_raw_packet_qp(dev, qp);
2494 	} else {
2495 		err = mlx5_core_destroy_qp(dev->mdev, &base->mqp);
2496 		if (err)
2497 			mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
2498 				     base->mqp.qpn);
2499 	}
2500 
2501 	if (qp->create_type == MLX5_QP_KERNEL)
2502 		destroy_qp_kernel(dev, qp);
2503 	else if (qp->create_type == MLX5_QP_USER)
2504 		destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base, udata);
2505 }
2506 
2507 static const char *ib_qp_type_str(enum ib_qp_type type)
2508 {
2509 	switch (type) {
2510 	case IB_QPT_SMI:
2511 		return "IB_QPT_SMI";
2512 	case IB_QPT_GSI:
2513 		return "IB_QPT_GSI";
2514 	case IB_QPT_RC:
2515 		return "IB_QPT_RC";
2516 	case IB_QPT_UC:
2517 		return "IB_QPT_UC";
2518 	case IB_QPT_UD:
2519 		return "IB_QPT_UD";
2520 	case IB_QPT_RAW_IPV6:
2521 		return "IB_QPT_RAW_IPV6";
2522 	case IB_QPT_RAW_ETHERTYPE:
2523 		return "IB_QPT_RAW_ETHERTYPE";
2524 	case IB_QPT_XRC_INI:
2525 		return "IB_QPT_XRC_INI";
2526 	case IB_QPT_XRC_TGT:
2527 		return "IB_QPT_XRC_TGT";
2528 	case IB_QPT_RAW_PACKET:
2529 		return "IB_QPT_RAW_PACKET";
2530 	case MLX5_IB_QPT_REG_UMR:
2531 		return "MLX5_IB_QPT_REG_UMR";
2532 	case IB_QPT_DRIVER:
2533 		return "IB_QPT_DRIVER";
2534 	case IB_QPT_MAX:
2535 	default:
2536 		return "Invalid QP type";
2537 	}
2538 }
2539 
2540 static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
2541 					struct ib_qp_init_attr *attr,
2542 					struct mlx5_ib_create_qp *ucmd,
2543 					struct ib_udata *udata)
2544 {
2545 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
2546 		udata, struct mlx5_ib_ucontext, ibucontext);
2547 	struct mlx5_ib_qp *qp;
2548 	int err = 0;
2549 	u32 uidx = MLX5_IB_DEFAULT_UIDX;
2550 	void *dctc;
2551 
2552 	if (!attr->srq || !attr->recv_cq)
2553 		return ERR_PTR(-EINVAL);
2554 
2555 	err = get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &uidx);
2556 	if (err)
2557 		return ERR_PTR(err);
2558 
2559 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2560 	if (!qp)
2561 		return ERR_PTR(-ENOMEM);
2562 
2563 	qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
2564 	if (!qp->dct.in) {
2565 		err = -ENOMEM;
2566 		goto err_free;
2567 	}
2568 
2569 	MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
2570 	dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
2571 	qp->qp_sub_type = MLX5_IB_QPT_DCT;
2572 	MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
2573 	MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
2574 	MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
2575 	MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
2576 	MLX5_SET(dctc, dctc, user_index, uidx);
2577 
2578 	if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
2579 		configure_responder_scat_cqe(attr, dctc);
2580 
2581 	qp->state = IB_QPS_RESET;
2582 
2583 	return &qp->ibqp;
2584 err_free:
2585 	kfree(qp);
2586 	return ERR_PTR(err);
2587 }
2588 
2589 static int set_mlx_qp_type(struct mlx5_ib_dev *dev,
2590 			   struct ib_qp_init_attr *init_attr,
2591 			   struct mlx5_ib_create_qp *ucmd,
2592 			   struct ib_udata *udata)
2593 {
2594 	enum { MLX_QP_FLAGS = MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI };
2595 	int err;
2596 
2597 	if (!udata)
2598 		return -EINVAL;
2599 
2600 	if (udata->inlen < sizeof(*ucmd)) {
2601 		mlx5_ib_dbg(dev, "create_qp user command is smaller than expected\n");
2602 		return -EINVAL;
2603 	}
2604 	err = ib_copy_from_udata(ucmd, udata, sizeof(*ucmd));
2605 	if (err)
2606 		return err;
2607 
2608 	if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCI) {
2609 		init_attr->qp_type = MLX5_IB_QPT_DCI;
2610 	} else {
2611 		if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCT) {
2612 			init_attr->qp_type = MLX5_IB_QPT_DCT;
2613 		} else {
2614 			mlx5_ib_dbg(dev, "Invalid QP flags\n");
2615 			return -EINVAL;
2616 		}
2617 	}
2618 
2619 	if (!MLX5_CAP_GEN(dev->mdev, dct)) {
2620 		mlx5_ib_dbg(dev, "DC transport is not supported\n");
2621 		return -EOPNOTSUPP;
2622 	}
2623 
2624 	return 0;
2625 }
2626 
2627 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
2628 				struct ib_qp_init_attr *verbs_init_attr,
2629 				struct ib_udata *udata)
2630 {
2631 	struct mlx5_ib_dev *dev;
2632 	struct mlx5_ib_qp *qp;
2633 	u16 xrcdn = 0;
2634 	int err;
2635 	struct ib_qp_init_attr mlx_init_attr;
2636 	struct ib_qp_init_attr *init_attr = verbs_init_attr;
2637 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
2638 		udata, struct mlx5_ib_ucontext, ibucontext);
2639 
2640 	if (pd) {
2641 		dev = to_mdev(pd->device);
2642 
2643 		if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
2644 			if (!ucontext) {
2645 				mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
2646 				return ERR_PTR(-EINVAL);
2647 			} else if (!ucontext->cqe_version) {
2648 				mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n");
2649 				return ERR_PTR(-EINVAL);
2650 			}
2651 		}
2652 	} else {
2653 		/* being cautious here */
2654 		if (init_attr->qp_type != IB_QPT_XRC_TGT &&
2655 		    init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
2656 			pr_warn("%s: no PD for transport %s\n", __func__,
2657 				ib_qp_type_str(init_attr->qp_type));
2658 			return ERR_PTR(-EINVAL);
2659 		}
2660 		dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
2661 	}
2662 
2663 	if (init_attr->qp_type == IB_QPT_DRIVER) {
2664 		struct mlx5_ib_create_qp ucmd;
2665 
2666 		init_attr = &mlx_init_attr;
2667 		memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr));
2668 		err = set_mlx_qp_type(dev, init_attr, &ucmd, udata);
2669 		if (err)
2670 			return ERR_PTR(err);
2671 
2672 		if (init_attr->qp_type == MLX5_IB_QPT_DCI) {
2673 			if (init_attr->cap.max_recv_wr ||
2674 			    init_attr->cap.max_recv_sge) {
2675 				mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n");
2676 				return ERR_PTR(-EINVAL);
2677 			}
2678 		} else {
2679 			return mlx5_ib_create_dct(pd, init_attr, &ucmd, udata);
2680 		}
2681 	}
2682 
2683 	switch (init_attr->qp_type) {
2684 	case IB_QPT_XRC_TGT:
2685 	case IB_QPT_XRC_INI:
2686 		if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
2687 			mlx5_ib_dbg(dev, "XRC not supported\n");
2688 			return ERR_PTR(-ENOSYS);
2689 		}
2690 		init_attr->recv_cq = NULL;
2691 		if (init_attr->qp_type == IB_QPT_XRC_TGT) {
2692 			xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
2693 			init_attr->send_cq = NULL;
2694 		}
2695 
2696 		/* fall through */
2697 	case IB_QPT_RAW_PACKET:
2698 	case IB_QPT_RC:
2699 	case IB_QPT_UC:
2700 	case IB_QPT_UD:
2701 	case IB_QPT_SMI:
2702 	case MLX5_IB_QPT_HW_GSI:
2703 	case MLX5_IB_QPT_REG_UMR:
2704 	case MLX5_IB_QPT_DCI:
2705 		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2706 		if (!qp)
2707 			return ERR_PTR(-ENOMEM);
2708 
2709 		err = create_qp_common(dev, pd, init_attr, udata, qp);
2710 		if (err) {
2711 			mlx5_ib_dbg(dev, "create_qp_common failed\n");
2712 			kfree(qp);
2713 			return ERR_PTR(err);
2714 		}
2715 
2716 		if (is_qp0(init_attr->qp_type))
2717 			qp->ibqp.qp_num = 0;
2718 		else if (is_qp1(init_attr->qp_type))
2719 			qp->ibqp.qp_num = 1;
2720 		else
2721 			qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
2722 
2723 		mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
2724 			    qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
2725 			    init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
2726 			    init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
2727 
2728 		qp->trans_qp.xrcdn = xrcdn;
2729 
2730 		break;
2731 
2732 	case IB_QPT_GSI:
2733 		return mlx5_ib_gsi_create_qp(pd, init_attr);
2734 
2735 	case IB_QPT_RAW_IPV6:
2736 	case IB_QPT_RAW_ETHERTYPE:
2737 	case IB_QPT_MAX:
2738 	default:
2739 		mlx5_ib_dbg(dev, "unsupported qp type %d\n",
2740 			    init_attr->qp_type);
2741 		/* Don't support raw QPs */
2742 		return ERR_PTR(-EINVAL);
2743 	}
2744 
2745 	if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
2746 		qp->qp_sub_type = init_attr->qp_type;
2747 
2748 	return &qp->ibqp;
2749 }
2750 
2751 static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
2752 {
2753 	struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
2754 
2755 	if (mqp->state == IB_QPS_RTR) {
2756 		int err;
2757 
2758 		err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct);
2759 		if (err) {
2760 			mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
2761 			return err;
2762 		}
2763 	}
2764 
2765 	kfree(mqp->dct.in);
2766 	kfree(mqp);
2767 	return 0;
2768 }
2769 
2770 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
2771 {
2772 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
2773 	struct mlx5_ib_qp *mqp = to_mqp(qp);
2774 
2775 	if (unlikely(qp->qp_type == IB_QPT_GSI))
2776 		return mlx5_ib_gsi_destroy_qp(qp);
2777 
2778 	if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
2779 		return mlx5_ib_destroy_dct(mqp);
2780 
2781 	destroy_qp_common(dev, mqp, udata);
2782 
2783 	kfree(mqp);
2784 
2785 	return 0;
2786 }
2787 
2788 static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
2789 				const struct ib_qp_attr *attr,
2790 				int attr_mask, __be32 *hw_access_flags_be)
2791 {
2792 	u8 dest_rd_atomic;
2793 	u32 access_flags, hw_access_flags = 0;
2794 
2795 	struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
2796 
2797 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
2798 		dest_rd_atomic = attr->max_dest_rd_atomic;
2799 	else
2800 		dest_rd_atomic = qp->trans_qp.resp_depth;
2801 
2802 	if (attr_mask & IB_QP_ACCESS_FLAGS)
2803 		access_flags = attr->qp_access_flags;
2804 	else
2805 		access_flags = qp->trans_qp.atomic_rd_en;
2806 
2807 	if (!dest_rd_atomic)
2808 		access_flags &= IB_ACCESS_REMOTE_WRITE;
2809 
2810 	if (access_flags & IB_ACCESS_REMOTE_READ)
2811 		hw_access_flags |= MLX5_QP_BIT_RRE;
2812 	if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
2813 		int atomic_mode;
2814 
2815 		atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
2816 		if (atomic_mode < 0)
2817 			return -EOPNOTSUPP;
2818 
2819 		hw_access_flags |= MLX5_QP_BIT_RAE;
2820 		hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
2821 	}
2822 
2823 	if (access_flags & IB_ACCESS_REMOTE_WRITE)
2824 		hw_access_flags |= MLX5_QP_BIT_RWE;
2825 
2826 	*hw_access_flags_be = cpu_to_be32(hw_access_flags);
2827 
2828 	return 0;
2829 }
2830 
2831 enum {
2832 	MLX5_PATH_FLAG_FL	= 1 << 0,
2833 	MLX5_PATH_FLAG_FREE_AR	= 1 << 1,
2834 	MLX5_PATH_FLAG_COUNTER	= 1 << 2,
2835 };
2836 
2837 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
2838 {
2839 	if (rate == IB_RATE_PORT_CURRENT)
2840 		return 0;
2841 
2842 	if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS)
2843 		return -EINVAL;
2844 
2845 	while (rate != IB_RATE_PORT_CURRENT &&
2846 	       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
2847 		 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
2848 		--rate;
2849 
2850 	return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
2851 }
2852 
2853 static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
2854 				      struct mlx5_ib_sq *sq, u8 sl,
2855 				      struct ib_pd *pd)
2856 {
2857 	void *in;
2858 	void *tisc;
2859 	int inlen;
2860 	int err;
2861 
2862 	inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
2863 	in = kvzalloc(inlen, GFP_KERNEL);
2864 	if (!in)
2865 		return -ENOMEM;
2866 
2867 	MLX5_SET(modify_tis_in, in, bitmask.prio, 1);
2868 	MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
2869 
2870 	tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
2871 	MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
2872 
2873 	err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
2874 
2875 	kvfree(in);
2876 
2877 	return err;
2878 }
2879 
2880 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
2881 					 struct mlx5_ib_sq *sq, u8 tx_affinity,
2882 					 struct ib_pd *pd)
2883 {
2884 	void *in;
2885 	void *tisc;
2886 	int inlen;
2887 	int err;
2888 
2889 	inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
2890 	in = kvzalloc(inlen, GFP_KERNEL);
2891 	if (!in)
2892 		return -ENOMEM;
2893 
2894 	MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
2895 	MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
2896 
2897 	tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
2898 	MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
2899 
2900 	err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
2901 
2902 	kvfree(in);
2903 
2904 	return err;
2905 }
2906 
2907 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2908 			 const struct rdma_ah_attr *ah,
2909 			 struct mlx5_qp_path *path, u8 port, int attr_mask,
2910 			 u32 path_flags, const struct ib_qp_attr *attr,
2911 			 bool alt)
2912 {
2913 	const struct ib_global_route *grh = rdma_ah_read_grh(ah);
2914 	int err;
2915 	enum ib_gid_type gid_type;
2916 	u8 ah_flags = rdma_ah_get_ah_flags(ah);
2917 	u8 sl = rdma_ah_get_sl(ah);
2918 
2919 	if (attr_mask & IB_QP_PKEY_INDEX)
2920 		path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
2921 						     attr->pkey_index);
2922 
2923 	if (ah_flags & IB_AH_GRH) {
2924 		if (grh->sgid_index >=
2925 		    dev->mdev->port_caps[port - 1].gid_table_len) {
2926 			pr_err("sgid_index (%u) too large. max is %d\n",
2927 			       grh->sgid_index,
2928 			       dev->mdev->port_caps[port - 1].gid_table_len);
2929 			return -EINVAL;
2930 		}
2931 	}
2932 
2933 	if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
2934 		if (!(ah_flags & IB_AH_GRH))
2935 			return -EINVAL;
2936 
2937 		memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
2938 		if (qp->ibqp.qp_type == IB_QPT_RC ||
2939 		    qp->ibqp.qp_type == IB_QPT_UC ||
2940 		    qp->ibqp.qp_type == IB_QPT_XRC_INI ||
2941 		    qp->ibqp.qp_type == IB_QPT_XRC_TGT)
2942 			path->udp_sport =
2943 				mlx5_get_roce_udp_sport(dev, ah->grh.sgid_attr);
2944 		path->dci_cfi_prio_sl = (sl & 0x7) << 4;
2945 		gid_type = ah->grh.sgid_attr->gid_type;
2946 		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
2947 			path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
2948 	} else {
2949 		path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
2950 		path->fl_free_ar |=
2951 			(path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
2952 		path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
2953 		path->grh_mlid = rdma_ah_get_path_bits(ah) & 0x7f;
2954 		if (ah_flags & IB_AH_GRH)
2955 			path->grh_mlid	|= 1 << 7;
2956 		path->dci_cfi_prio_sl = sl & 0xf;
2957 	}
2958 
2959 	if (ah_flags & IB_AH_GRH) {
2960 		path->mgid_index = grh->sgid_index;
2961 		path->hop_limit  = grh->hop_limit;
2962 		path->tclass_flowlabel =
2963 			cpu_to_be32((grh->traffic_class << 20) |
2964 				    (grh->flow_label));
2965 		memcpy(path->rgid, grh->dgid.raw, 16);
2966 	}
2967 
2968 	err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
2969 	if (err < 0)
2970 		return err;
2971 	path->static_rate = err;
2972 	path->port = port;
2973 
2974 	if (attr_mask & IB_QP_TIMEOUT)
2975 		path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
2976 
2977 	if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
2978 		return modify_raw_packet_eth_prio(dev->mdev,
2979 						  &qp->raw_packet_qp.sq,
2980 						  sl & 0xf, qp->ibqp.pd);
2981 
2982 	return 0;
2983 }
2984 
2985 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
2986 	[MLX5_QP_STATE_INIT] = {
2987 		[MLX5_QP_STATE_INIT] = {
2988 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE		|
2989 					  MLX5_QP_OPTPAR_RAE		|
2990 					  MLX5_QP_OPTPAR_RWE		|
2991 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
2992 					  MLX5_QP_OPTPAR_PRI_PORT,
2993 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE		|
2994 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
2995 					  MLX5_QP_OPTPAR_PRI_PORT,
2996 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX	|
2997 					  MLX5_QP_OPTPAR_Q_KEY		|
2998 					  MLX5_QP_OPTPAR_PRI_PORT,
2999 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE		|
3000 					  MLX5_QP_OPTPAR_RAE		|
3001 					  MLX5_QP_OPTPAR_RWE		|
3002 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
3003 					  MLX5_QP_OPTPAR_PRI_PORT,
3004 		},
3005 		[MLX5_QP_STATE_RTR] = {
3006 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
3007 					  MLX5_QP_OPTPAR_RRE            |
3008 					  MLX5_QP_OPTPAR_RAE            |
3009 					  MLX5_QP_OPTPAR_RWE            |
3010 					  MLX5_QP_OPTPAR_PKEY_INDEX,
3011 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
3012 					  MLX5_QP_OPTPAR_RWE            |
3013 					  MLX5_QP_OPTPAR_PKEY_INDEX,
3014 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX     |
3015 					  MLX5_QP_OPTPAR_Q_KEY,
3016 			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX	|
3017 					   MLX5_QP_OPTPAR_Q_KEY,
3018 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
3019 					  MLX5_QP_OPTPAR_RRE            |
3020 					  MLX5_QP_OPTPAR_RAE            |
3021 					  MLX5_QP_OPTPAR_RWE            |
3022 					  MLX5_QP_OPTPAR_PKEY_INDEX,
3023 		},
3024 	},
3025 	[MLX5_QP_STATE_RTR] = {
3026 		[MLX5_QP_STATE_RTS] = {
3027 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
3028 					  MLX5_QP_OPTPAR_RRE		|
3029 					  MLX5_QP_OPTPAR_RAE		|
3030 					  MLX5_QP_OPTPAR_RWE		|
3031 					  MLX5_QP_OPTPAR_PM_STATE	|
3032 					  MLX5_QP_OPTPAR_RNR_TIMEOUT,
3033 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
3034 					  MLX5_QP_OPTPAR_RWE		|
3035 					  MLX5_QP_OPTPAR_PM_STATE,
3036 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
3037 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
3038 					  MLX5_QP_OPTPAR_RRE		|
3039 					  MLX5_QP_OPTPAR_RAE		|
3040 					  MLX5_QP_OPTPAR_RWE		|
3041 					  MLX5_QP_OPTPAR_PM_STATE	|
3042 					  MLX5_QP_OPTPAR_RNR_TIMEOUT,
3043 		},
3044 	},
3045 	[MLX5_QP_STATE_RTS] = {
3046 		[MLX5_QP_STATE_RTS] = {
3047 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE		|
3048 					  MLX5_QP_OPTPAR_RAE		|
3049 					  MLX5_QP_OPTPAR_RWE		|
3050 					  MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3051 					  MLX5_QP_OPTPAR_PM_STATE	|
3052 					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3053 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE		|
3054 					  MLX5_QP_OPTPAR_PM_STATE	|
3055 					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3056 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY		|
3057 					  MLX5_QP_OPTPAR_SRQN		|
3058 					  MLX5_QP_OPTPAR_CQN_RCV,
3059 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE		|
3060 					  MLX5_QP_OPTPAR_RAE		|
3061 					  MLX5_QP_OPTPAR_RWE		|
3062 					  MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3063 					  MLX5_QP_OPTPAR_PM_STATE	|
3064 					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3065 		},
3066 	},
3067 	[MLX5_QP_STATE_SQER] = {
3068 		[MLX5_QP_STATE_RTS] = {
3069 			[MLX5_QP_ST_UD]	 = MLX5_QP_OPTPAR_Q_KEY,
3070 			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
3071 			[MLX5_QP_ST_UC]	 = MLX5_QP_OPTPAR_RWE,
3072 			[MLX5_QP_ST_RC]	 = MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3073 					   MLX5_QP_OPTPAR_RWE		|
3074 					   MLX5_QP_OPTPAR_RAE		|
3075 					   MLX5_QP_OPTPAR_RRE,
3076 			[MLX5_QP_ST_XRC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3077 					   MLX5_QP_OPTPAR_RWE		|
3078 					   MLX5_QP_OPTPAR_RAE		|
3079 					   MLX5_QP_OPTPAR_RRE,
3080 		},
3081 	},
3082 };
3083 
3084 static int ib_nr_to_mlx5_nr(int ib_mask)
3085 {
3086 	switch (ib_mask) {
3087 	case IB_QP_STATE:
3088 		return 0;
3089 	case IB_QP_CUR_STATE:
3090 		return 0;
3091 	case IB_QP_EN_SQD_ASYNC_NOTIFY:
3092 		return 0;
3093 	case IB_QP_ACCESS_FLAGS:
3094 		return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
3095 			MLX5_QP_OPTPAR_RAE;
3096 	case IB_QP_PKEY_INDEX:
3097 		return MLX5_QP_OPTPAR_PKEY_INDEX;
3098 	case IB_QP_PORT:
3099 		return MLX5_QP_OPTPAR_PRI_PORT;
3100 	case IB_QP_QKEY:
3101 		return MLX5_QP_OPTPAR_Q_KEY;
3102 	case IB_QP_AV:
3103 		return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
3104 			MLX5_QP_OPTPAR_PRI_PORT;
3105 	case IB_QP_PATH_MTU:
3106 		return 0;
3107 	case IB_QP_TIMEOUT:
3108 		return MLX5_QP_OPTPAR_ACK_TIMEOUT;
3109 	case IB_QP_RETRY_CNT:
3110 		return MLX5_QP_OPTPAR_RETRY_COUNT;
3111 	case IB_QP_RNR_RETRY:
3112 		return MLX5_QP_OPTPAR_RNR_RETRY;
3113 	case IB_QP_RQ_PSN:
3114 		return 0;
3115 	case IB_QP_MAX_QP_RD_ATOMIC:
3116 		return MLX5_QP_OPTPAR_SRA_MAX;
3117 	case IB_QP_ALT_PATH:
3118 		return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
3119 	case IB_QP_MIN_RNR_TIMER:
3120 		return MLX5_QP_OPTPAR_RNR_TIMEOUT;
3121 	case IB_QP_SQ_PSN:
3122 		return 0;
3123 	case IB_QP_MAX_DEST_RD_ATOMIC:
3124 		return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
3125 			MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
3126 	case IB_QP_PATH_MIG_STATE:
3127 		return MLX5_QP_OPTPAR_PM_STATE;
3128 	case IB_QP_CAP:
3129 		return 0;
3130 	case IB_QP_DEST_QPN:
3131 		return 0;
3132 	}
3133 	return 0;
3134 }
3135 
3136 static int ib_mask_to_mlx5_opt(int ib_mask)
3137 {
3138 	int result = 0;
3139 	int i;
3140 
3141 	for (i = 0; i < 8 * sizeof(int); i++) {
3142 		if ((1 << i) & ib_mask)
3143 			result |= ib_nr_to_mlx5_nr(1 << i);
3144 	}
3145 
3146 	return result;
3147 }
3148 
3149 static int modify_raw_packet_qp_rq(
3150 	struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
3151 	const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
3152 {
3153 	void *in;
3154 	void *rqc;
3155 	int inlen;
3156 	int err;
3157 
3158 	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
3159 	in = kvzalloc(inlen, GFP_KERNEL);
3160 	if (!in)
3161 		return -ENOMEM;
3162 
3163 	MLX5_SET(modify_rq_in, in, rq_state, rq->state);
3164 	MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid);
3165 
3166 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
3167 	MLX5_SET(rqc, rqc, state, new_state);
3168 
3169 	if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) {
3170 		if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
3171 			MLX5_SET64(modify_rq_in, in, modify_bitmask,
3172 				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
3173 			MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
3174 		} else
3175 			dev_info_once(
3176 				&dev->ib_dev.dev,
3177 				"RAW PACKET QP counters are not supported on current FW\n");
3178 	}
3179 
3180 	err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
3181 	if (err)
3182 		goto out;
3183 
3184 	rq->state = new_state;
3185 
3186 out:
3187 	kvfree(in);
3188 	return err;
3189 }
3190 
3191 static int modify_raw_packet_qp_sq(
3192 	struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state,
3193 	const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
3194 {
3195 	struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
3196 	struct mlx5_rate_limit old_rl = ibqp->rl;
3197 	struct mlx5_rate_limit new_rl = old_rl;
3198 	bool new_rate_added = false;
3199 	u16 rl_index = 0;
3200 	void *in;
3201 	void *sqc;
3202 	int inlen;
3203 	int err;
3204 
3205 	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
3206 	in = kvzalloc(inlen, GFP_KERNEL);
3207 	if (!in)
3208 		return -ENOMEM;
3209 
3210 	MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid);
3211 	MLX5_SET(modify_sq_in, in, sq_state, sq->state);
3212 
3213 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
3214 	MLX5_SET(sqc, sqc, state, new_state);
3215 
3216 	if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
3217 		if (new_state != MLX5_SQC_STATE_RDY)
3218 			pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
3219 				__func__);
3220 		else
3221 			new_rl = raw_qp_param->rl;
3222 	}
3223 
3224 	if (!mlx5_rl_are_equal(&old_rl, &new_rl)) {
3225 		if (new_rl.rate) {
3226 			err = mlx5_rl_add_rate(dev, &rl_index, &new_rl);
3227 			if (err) {
3228 				pr_err("Failed configuring rate limit(err %d): \
3229 				       rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
3230 				       err, new_rl.rate, new_rl.max_burst_sz,
3231 				       new_rl.typical_pkt_sz);
3232 
3233 				goto out;
3234 			}
3235 			new_rate_added = true;
3236 		}
3237 
3238 		MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
3239 		/* index 0 means no limit */
3240 		MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
3241 	}
3242 
3243 	err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
3244 	if (err) {
3245 		/* Remove new rate from table if failed */
3246 		if (new_rate_added)
3247 			mlx5_rl_remove_rate(dev, &new_rl);
3248 		goto out;
3249 	}
3250 
3251 	/* Only remove the old rate after new rate was set */
3252 	if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
3253 	    (new_state != MLX5_SQC_STATE_RDY)) {
3254 		mlx5_rl_remove_rate(dev, &old_rl);
3255 		if (new_state != MLX5_SQC_STATE_RDY)
3256 			memset(&new_rl, 0, sizeof(new_rl));
3257 	}
3258 
3259 	ibqp->rl = new_rl;
3260 	sq->state = new_state;
3261 
3262 out:
3263 	kvfree(in);
3264 	return err;
3265 }
3266 
3267 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
3268 				const struct mlx5_modify_raw_qp_param *raw_qp_param,
3269 				u8 tx_affinity)
3270 {
3271 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
3272 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
3273 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
3274 	int modify_rq = !!qp->rq.wqe_cnt;
3275 	int modify_sq = !!qp->sq.wqe_cnt;
3276 	int rq_state;
3277 	int sq_state;
3278 	int err;
3279 
3280 	switch (raw_qp_param->operation) {
3281 	case MLX5_CMD_OP_RST2INIT_QP:
3282 		rq_state = MLX5_RQC_STATE_RDY;
3283 		sq_state = MLX5_SQC_STATE_RDY;
3284 		break;
3285 	case MLX5_CMD_OP_2ERR_QP:
3286 		rq_state = MLX5_RQC_STATE_ERR;
3287 		sq_state = MLX5_SQC_STATE_ERR;
3288 		break;
3289 	case MLX5_CMD_OP_2RST_QP:
3290 		rq_state = MLX5_RQC_STATE_RST;
3291 		sq_state = MLX5_SQC_STATE_RST;
3292 		break;
3293 	case MLX5_CMD_OP_RTR2RTS_QP:
3294 	case MLX5_CMD_OP_RTS2RTS_QP:
3295 		if (raw_qp_param->set_mask ==
3296 		    MLX5_RAW_QP_RATE_LIMIT) {
3297 			modify_rq = 0;
3298 			sq_state = sq->state;
3299 		} else {
3300 			return raw_qp_param->set_mask ? -EINVAL : 0;
3301 		}
3302 		break;
3303 	case MLX5_CMD_OP_INIT2INIT_QP:
3304 	case MLX5_CMD_OP_INIT2RTR_QP:
3305 		if (raw_qp_param->set_mask)
3306 			return -EINVAL;
3307 		else
3308 			return 0;
3309 	default:
3310 		WARN_ON(1);
3311 		return -EINVAL;
3312 	}
3313 
3314 	if (modify_rq) {
3315 		err =  modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
3316 					       qp->ibqp.pd);
3317 		if (err)
3318 			return err;
3319 	}
3320 
3321 	if (modify_sq) {
3322 		struct mlx5_flow_handle *flow_rule;
3323 
3324 		if (tx_affinity) {
3325 			err = modify_raw_packet_tx_affinity(dev->mdev, sq,
3326 							    tx_affinity,
3327 							    qp->ibqp.pd);
3328 			if (err)
3329 				return err;
3330 		}
3331 
3332 		flow_rule = create_flow_rule_vport_sq(dev, sq,
3333 						      raw_qp_param->port);
3334 		if (IS_ERR(flow_rule))
3335 			return PTR_ERR(flow_rule);
3336 
3337 		err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
3338 					      raw_qp_param, qp->ibqp.pd);
3339 		if (err) {
3340 			if (flow_rule)
3341 				mlx5_del_flow_rules(flow_rule);
3342 			return err;
3343 		}
3344 
3345 		if (flow_rule) {
3346 			destroy_flow_rule_vport_sq(sq);
3347 			sq->flow_rule = flow_rule;
3348 		}
3349 
3350 		return err;
3351 	}
3352 
3353 	return 0;
3354 }
3355 
3356 static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
3357 				    struct mlx5_ib_pd *pd,
3358 				    struct mlx5_ib_qp_base *qp_base,
3359 				    u8 port_num, struct ib_udata *udata)
3360 {
3361 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
3362 		udata, struct mlx5_ib_ucontext, ibucontext);
3363 	unsigned int tx_port_affinity;
3364 
3365 	if (ucontext) {
3366 		tx_port_affinity = (unsigned int)atomic_add_return(
3367 					   1, &ucontext->tx_port_affinity) %
3368 					   MLX5_MAX_PORTS +
3369 				   1;
3370 		mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
3371 				tx_port_affinity, qp_base->mqp.qpn, ucontext);
3372 	} else {
3373 		tx_port_affinity =
3374 			(unsigned int)atomic_add_return(
3375 				1, &dev->port[port_num].roce.tx_port_affinity) %
3376 				MLX5_MAX_PORTS +
3377 			1;
3378 		mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
3379 				tx_port_affinity, qp_base->mqp.qpn);
3380 	}
3381 
3382 	return tx_port_affinity;
3383 }
3384 
3385 static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
3386 				    struct rdma_counter *counter)
3387 {
3388 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
3389 	struct mlx5_ib_qp *mqp = to_mqp(qp);
3390 	struct mlx5_qp_context context = {};
3391 	struct mlx5_ib_qp_base *base;
3392 	u32 set_id;
3393 
3394 	if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
3395 		return 0;
3396 
3397 	if (counter)
3398 		set_id = counter->id;
3399 	else
3400 		set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
3401 
3402 	base = &mqp->trans_qp.base;
3403 	context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
3404 	context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24);
3405 	return mlx5_core_qp_modify(dev->mdev,
3406 				   MLX5_CMD_OP_RTS2RTS_QP,
3407 				   MLX5_QP_OPTPAR_COUNTER_SET_ID,
3408 				   &context, &base->mqp);
3409 }
3410 
3411 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
3412 			       const struct ib_qp_attr *attr, int attr_mask,
3413 			       enum ib_qp_state cur_state,
3414 			       enum ib_qp_state new_state,
3415 			       const struct mlx5_ib_modify_qp *ucmd,
3416 			       struct ib_udata *udata)
3417 {
3418 	static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
3419 		[MLX5_QP_STATE_RST] = {
3420 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3421 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3422 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_RST2INIT_QP,
3423 		},
3424 		[MLX5_QP_STATE_INIT]  = {
3425 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3426 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3427 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_INIT2INIT_QP,
3428 			[MLX5_QP_STATE_RTR]	= MLX5_CMD_OP_INIT2RTR_QP,
3429 		},
3430 		[MLX5_QP_STATE_RTR]   = {
3431 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3432 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3433 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTR2RTS_QP,
3434 		},
3435 		[MLX5_QP_STATE_RTS]   = {
3436 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3437 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3438 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTS2RTS_QP,
3439 		},
3440 		[MLX5_QP_STATE_SQD] = {
3441 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3442 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3443 		},
3444 		[MLX5_QP_STATE_SQER] = {
3445 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3446 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3447 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_SQERR2RTS_QP,
3448 		},
3449 		[MLX5_QP_STATE_ERR] = {
3450 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3451 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3452 		}
3453 	};
3454 
3455 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3456 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
3457 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
3458 	struct mlx5_ib_cq *send_cq, *recv_cq;
3459 	struct mlx5_qp_context *context;
3460 	struct mlx5_ib_pd *pd;
3461 	enum mlx5_qp_state mlx5_cur, mlx5_new;
3462 	enum mlx5_qp_optpar optpar;
3463 	u32 set_id = 0;
3464 	int mlx5_st;
3465 	int err;
3466 	u16 op;
3467 	u8 tx_affinity = 0;
3468 
3469 	mlx5_st = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
3470 			     qp->qp_sub_type : ibqp->qp_type);
3471 	if (mlx5_st < 0)
3472 		return -EINVAL;
3473 
3474 	context = kzalloc(sizeof(*context), GFP_KERNEL);
3475 	if (!context)
3476 		return -ENOMEM;
3477 
3478 	pd = get_pd(qp);
3479 	context->flags = cpu_to_be32(mlx5_st << 16);
3480 
3481 	if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
3482 		context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
3483 	} else {
3484 		switch (attr->path_mig_state) {
3485 		case IB_MIG_MIGRATED:
3486 			context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
3487 			break;
3488 		case IB_MIG_REARM:
3489 			context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
3490 			break;
3491 		case IB_MIG_ARMED:
3492 			context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
3493 			break;
3494 		}
3495 	}
3496 
3497 	if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
3498 		if ((ibqp->qp_type == IB_QPT_RC) ||
3499 		    (ibqp->qp_type == IB_QPT_UD &&
3500 		     !(qp->flags & MLX5_IB_QP_SQPN_QP1)) ||
3501 		    (ibqp->qp_type == IB_QPT_UC) ||
3502 		    (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
3503 		    (ibqp->qp_type == IB_QPT_XRC_INI) ||
3504 		    (ibqp->qp_type == IB_QPT_XRC_TGT)) {
3505 			if (dev->lag_active) {
3506 				u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
3507 				tx_affinity = get_tx_affinity(dev, pd, base, p,
3508 							      udata);
3509 				context->flags |= cpu_to_be32(tx_affinity << 24);
3510 			}
3511 		}
3512 	}
3513 
3514 	if (is_sqp(ibqp->qp_type)) {
3515 		context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
3516 	} else if ((ibqp->qp_type == IB_QPT_UD &&
3517 		    !(qp->flags & MLX5_IB_QP_UNDERLAY)) ||
3518 		   ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
3519 		context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
3520 	} else if (attr_mask & IB_QP_PATH_MTU) {
3521 		if (attr->path_mtu < IB_MTU_256 ||
3522 		    attr->path_mtu > IB_MTU_4096) {
3523 			mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
3524 			err = -EINVAL;
3525 			goto out;
3526 		}
3527 		context->mtu_msgmax = (attr->path_mtu << 5) |
3528 				      (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
3529 	}
3530 
3531 	if (attr_mask & IB_QP_DEST_QPN)
3532 		context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
3533 
3534 	if (attr_mask & IB_QP_PKEY_INDEX)
3535 		context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
3536 
3537 	/* todo implement counter_index functionality */
3538 
3539 	if (is_sqp(ibqp->qp_type))
3540 		context->pri_path.port = qp->port;
3541 
3542 	if (attr_mask & IB_QP_PORT)
3543 		context->pri_path.port = attr->port_num;
3544 
3545 	if (attr_mask & IB_QP_AV) {
3546 		err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
3547 				    attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
3548 				    attr_mask, 0, attr, false);
3549 		if (err)
3550 			goto out;
3551 	}
3552 
3553 	if (attr_mask & IB_QP_TIMEOUT)
3554 		context->pri_path.ackto_lt |= attr->timeout << 3;
3555 
3556 	if (attr_mask & IB_QP_ALT_PATH) {
3557 		err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
3558 				    &context->alt_path,
3559 				    attr->alt_port_num,
3560 				    attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
3561 				    0, attr, true);
3562 		if (err)
3563 			goto out;
3564 	}
3565 
3566 	get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
3567 		&send_cq, &recv_cq);
3568 
3569 	context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
3570 	context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
3571 	context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
3572 	context->params1  = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
3573 
3574 	if (attr_mask & IB_QP_RNR_RETRY)
3575 		context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
3576 
3577 	if (attr_mask & IB_QP_RETRY_CNT)
3578 		context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
3579 
3580 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
3581 		if (attr->max_rd_atomic)
3582 			context->params1 |=
3583 				cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
3584 	}
3585 
3586 	if (attr_mask & IB_QP_SQ_PSN)
3587 		context->next_send_psn = cpu_to_be32(attr->sq_psn);
3588 
3589 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
3590 		if (attr->max_dest_rd_atomic)
3591 			context->params2 |=
3592 				cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
3593 	}
3594 
3595 	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
3596 		__be32 access_flags;
3597 
3598 		err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags);
3599 		if (err)
3600 			goto out;
3601 
3602 		context->params2 |= access_flags;
3603 	}
3604 
3605 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
3606 		context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
3607 
3608 	if (attr_mask & IB_QP_RQ_PSN)
3609 		context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
3610 
3611 	if (attr_mask & IB_QP_QKEY)
3612 		context->qkey = cpu_to_be32(attr->qkey);
3613 
3614 	if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
3615 		context->db_rec_addr = cpu_to_be64(qp->db.dma);
3616 
3617 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3618 		u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
3619 			       qp->port) - 1;
3620 
3621 		/* Underlay port should be used - index 0 function per port */
3622 		if (qp->flags & MLX5_IB_QP_UNDERLAY)
3623 			port_num = 0;
3624 
3625 		if (ibqp->counter)
3626 			set_id = ibqp->counter->id;
3627 		else
3628 			set_id = mlx5_ib_get_counters_id(dev, port_num);
3629 		context->qp_counter_set_usr_page |=
3630 			cpu_to_be32(set_id << 24);
3631 	}
3632 
3633 	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
3634 		context->sq_crq_size |= cpu_to_be16(1 << 4);
3635 
3636 	if (qp->flags & MLX5_IB_QP_SQPN_QP1)
3637 		context->deth_sqpn = cpu_to_be32(1);
3638 
3639 	mlx5_cur = to_mlx5_state(cur_state);
3640 	mlx5_new = to_mlx5_state(new_state);
3641 
3642 	if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
3643 	    !optab[mlx5_cur][mlx5_new]) {
3644 		err = -EINVAL;
3645 		goto out;
3646 	}
3647 
3648 	op = optab[mlx5_cur][mlx5_new];
3649 	optpar = ib_mask_to_mlx5_opt(attr_mask);
3650 	optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
3651 
3652 	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
3653 	    qp->flags & MLX5_IB_QP_UNDERLAY) {
3654 		struct mlx5_modify_raw_qp_param raw_qp_param = {};
3655 
3656 		raw_qp_param.operation = op;
3657 		if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3658 			raw_qp_param.rq_q_ctr_id = set_id;
3659 			raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
3660 		}
3661 
3662 		if (attr_mask & IB_QP_PORT)
3663 			raw_qp_param.port = attr->port_num;
3664 
3665 		if (attr_mask & IB_QP_RATE_LIMIT) {
3666 			raw_qp_param.rl.rate = attr->rate_limit;
3667 
3668 			if (ucmd->burst_info.max_burst_sz) {
3669 				if (attr->rate_limit &&
3670 				    MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) {
3671 					raw_qp_param.rl.max_burst_sz =
3672 						ucmd->burst_info.max_burst_sz;
3673 				} else {
3674 					err = -EINVAL;
3675 					goto out;
3676 				}
3677 			}
3678 
3679 			if (ucmd->burst_info.typical_pkt_sz) {
3680 				if (attr->rate_limit &&
3681 				    MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) {
3682 					raw_qp_param.rl.typical_pkt_sz =
3683 						ucmd->burst_info.typical_pkt_sz;
3684 				} else {
3685 					err = -EINVAL;
3686 					goto out;
3687 				}
3688 			}
3689 
3690 			raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
3691 		}
3692 
3693 		err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
3694 	} else {
3695 		err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
3696 					  &base->mqp);
3697 	}
3698 
3699 	if (err)
3700 		goto out;
3701 
3702 	qp->state = new_state;
3703 
3704 	if (attr_mask & IB_QP_ACCESS_FLAGS)
3705 		qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
3706 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3707 		qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
3708 	if (attr_mask & IB_QP_PORT)
3709 		qp->port = attr->port_num;
3710 	if (attr_mask & IB_QP_ALT_PATH)
3711 		qp->trans_qp.alt_port = attr->alt_port_num;
3712 
3713 	/*
3714 	 * If we moved a kernel QP to RESET, clean up all old CQ
3715 	 * entries and reinitialize the QP.
3716 	 */
3717 	if (new_state == IB_QPS_RESET &&
3718 	    !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
3719 		mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
3720 				 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
3721 		if (send_cq != recv_cq)
3722 			mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL);
3723 
3724 		qp->rq.head = 0;
3725 		qp->rq.tail = 0;
3726 		qp->sq.head = 0;
3727 		qp->sq.tail = 0;
3728 		qp->sq.cur_post = 0;
3729 		if (qp->sq.wqe_cnt)
3730 			qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
3731 		qp->db.db[MLX5_RCV_DBR] = 0;
3732 		qp->db.db[MLX5_SND_DBR] = 0;
3733 	}
3734 
3735 	if ((new_state == IB_QPS_RTS) && qp->counter_pending) {
3736 		err = __mlx5_ib_qp_set_counter(ibqp, ibqp->counter);
3737 		if (!err)
3738 			qp->counter_pending = 0;
3739 	}
3740 
3741 out:
3742 	kfree(context);
3743 	return err;
3744 }
3745 
3746 static inline bool is_valid_mask(int mask, int req, int opt)
3747 {
3748 	if ((mask & req) != req)
3749 		return false;
3750 
3751 	if (mask & ~(req | opt))
3752 		return false;
3753 
3754 	return true;
3755 }
3756 
3757 /* check valid transition for driver QP types
3758  * for now the only QP type that this function supports is DCI
3759  */
3760 static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state,
3761 				enum ib_qp_attr_mask attr_mask)
3762 {
3763 	int req = IB_QP_STATE;
3764 	int opt = 0;
3765 
3766 	if (new_state == IB_QPS_RESET) {
3767 		return is_valid_mask(attr_mask, req, opt);
3768 	} else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3769 		req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
3770 		return is_valid_mask(attr_mask, req, opt);
3771 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3772 		opt = IB_QP_PKEY_INDEX | IB_QP_PORT;
3773 		return is_valid_mask(attr_mask, req, opt);
3774 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3775 		req |= IB_QP_PATH_MTU;
3776 		opt = IB_QP_PKEY_INDEX | IB_QP_AV;
3777 		return is_valid_mask(attr_mask, req, opt);
3778 	} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3779 		req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3780 		       IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN;
3781 		opt = IB_QP_MIN_RNR_TIMER;
3782 		return is_valid_mask(attr_mask, req, opt);
3783 	} else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) {
3784 		opt = IB_QP_MIN_RNR_TIMER;
3785 		return is_valid_mask(attr_mask, req, opt);
3786 	} else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) {
3787 		return is_valid_mask(attr_mask, req, opt);
3788 	}
3789 	return false;
3790 }
3791 
3792 /* mlx5_ib_modify_dct: modify a DCT QP
3793  * valid transitions are:
3794  * RESET to INIT: must set access_flags, pkey_index and port
3795  * INIT  to RTR : must set min_rnr_timer, tclass, flow_label,
3796  *			   mtu, gid_index and hop_limit
3797  * Other transitions and attributes are illegal
3798  */
3799 static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3800 			      int attr_mask, struct ib_udata *udata)
3801 {
3802 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
3803 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3804 	enum ib_qp_state cur_state, new_state;
3805 	int err = 0;
3806 	int required = IB_QP_STATE;
3807 	void *dctc;
3808 
3809 	if (!(attr_mask & IB_QP_STATE))
3810 		return -EINVAL;
3811 
3812 	cur_state = qp->state;
3813 	new_state = attr->qp_state;
3814 
3815 	dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
3816 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3817 		u16 set_id;
3818 
3819 		required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
3820 		if (!is_valid_mask(attr_mask, required, 0))
3821 			return -EINVAL;
3822 
3823 		if (attr->port_num == 0 ||
3824 		    attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
3825 			mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
3826 				    attr->port_num, dev->num_ports);
3827 			return -EINVAL;
3828 		}
3829 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
3830 			MLX5_SET(dctc, dctc, rre, 1);
3831 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
3832 			MLX5_SET(dctc, dctc, rwe, 1);
3833 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
3834 			int atomic_mode;
3835 
3836 			atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
3837 			if (atomic_mode < 0)
3838 				return -EOPNOTSUPP;
3839 
3840 			MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
3841 			MLX5_SET(dctc, dctc, rae, 1);
3842 		}
3843 		MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
3844 		MLX5_SET(dctc, dctc, port, attr->port_num);
3845 
3846 		set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
3847 		MLX5_SET(dctc, dctc, counter_set_id, set_id);
3848 
3849 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3850 		struct mlx5_ib_modify_qp_resp resp = {};
3851 		u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
3852 		u32 min_resp_len = offsetof(typeof(resp), dctn) +
3853 				   sizeof(resp.dctn);
3854 
3855 		if (udata->outlen < min_resp_len)
3856 			return -EINVAL;
3857 		resp.response_length = min_resp_len;
3858 
3859 		required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
3860 		if (!is_valid_mask(attr_mask, required, 0))
3861 			return -EINVAL;
3862 		MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
3863 		MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
3864 		MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
3865 		MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
3866 		MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
3867 		MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
3868 
3869 		err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
3870 					   MLX5_ST_SZ_BYTES(create_dct_in), out,
3871 					   sizeof(out));
3872 		if (err)
3873 			return err;
3874 		resp.dctn = qp->dct.mdct.mqp.qpn;
3875 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
3876 		if (err) {
3877 			mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct);
3878 			return err;
3879 		}
3880 	} else {
3881 		mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state);
3882 		return -EINVAL;
3883 	}
3884 	if (err)
3885 		qp->state = IB_QPS_ERR;
3886 	else
3887 		qp->state = new_state;
3888 	return err;
3889 }
3890 
3891 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3892 		      int attr_mask, struct ib_udata *udata)
3893 {
3894 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3895 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
3896 	struct mlx5_ib_modify_qp ucmd = {};
3897 	enum ib_qp_type qp_type;
3898 	enum ib_qp_state cur_state, new_state;
3899 	size_t required_cmd_sz;
3900 	int err = -EINVAL;
3901 	int port;
3902 
3903 	if (ibqp->rwq_ind_tbl)
3904 		return -ENOSYS;
3905 
3906 	if (udata && udata->inlen) {
3907 		required_cmd_sz = offsetof(typeof(ucmd), reserved) +
3908 			sizeof(ucmd.reserved);
3909 		if (udata->inlen < required_cmd_sz)
3910 			return -EINVAL;
3911 
3912 		if (udata->inlen > sizeof(ucmd) &&
3913 		    !ib_is_udata_cleared(udata, sizeof(ucmd),
3914 					 udata->inlen - sizeof(ucmd)))
3915 			return -EOPNOTSUPP;
3916 
3917 		if (ib_copy_from_udata(&ucmd, udata,
3918 				       min(udata->inlen, sizeof(ucmd))))
3919 			return -EFAULT;
3920 
3921 		if (ucmd.comp_mask ||
3922 		    memchr_inv(&ucmd.reserved, 0, sizeof(ucmd.reserved)) ||
3923 		    memchr_inv(&ucmd.burst_info.reserved, 0,
3924 			       sizeof(ucmd.burst_info.reserved)))
3925 			return -EOPNOTSUPP;
3926 	}
3927 
3928 	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
3929 		return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
3930 
3931 	if (ibqp->qp_type == IB_QPT_DRIVER)
3932 		qp_type = qp->qp_sub_type;
3933 	else
3934 		qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
3935 			IB_QPT_GSI : ibqp->qp_type;
3936 
3937 	if (qp_type == MLX5_IB_QPT_DCT)
3938 		return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata);
3939 
3940 	mutex_lock(&qp->mutex);
3941 
3942 	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
3943 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
3944 
3945 	if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
3946 		port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
3947 	}
3948 
3949 	if (qp->flags & MLX5_IB_QP_UNDERLAY) {
3950 		if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
3951 			mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
3952 				    attr_mask);
3953 			goto out;
3954 		}
3955 	} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
3956 		   qp_type != MLX5_IB_QPT_DCI &&
3957 		   !ib_modify_qp_is_ok(cur_state, new_state, qp_type,
3958 				       attr_mask)) {
3959 		mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
3960 			    cur_state, new_state, ibqp->qp_type, attr_mask);
3961 		goto out;
3962 	} else if (qp_type == MLX5_IB_QPT_DCI &&
3963 		   !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
3964 		mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
3965 			    cur_state, new_state, qp_type, attr_mask);
3966 		goto out;
3967 	}
3968 
3969 	if ((attr_mask & IB_QP_PORT) &&
3970 	    (attr->port_num == 0 ||
3971 	     attr->port_num > dev->num_ports)) {
3972 		mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
3973 			    attr->port_num, dev->num_ports);
3974 		goto out;
3975 	}
3976 
3977 	if (attr_mask & IB_QP_PKEY_INDEX) {
3978 		port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
3979 		if (attr->pkey_index >=
3980 		    dev->mdev->port_caps[port - 1].pkey_table_len) {
3981 			mlx5_ib_dbg(dev, "invalid pkey index %d\n",
3982 				    attr->pkey_index);
3983 			goto out;
3984 		}
3985 	}
3986 
3987 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
3988 	    attr->max_rd_atomic >
3989 	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
3990 		mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
3991 			    attr->max_rd_atomic);
3992 		goto out;
3993 	}
3994 
3995 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
3996 	    attr->max_dest_rd_atomic >
3997 	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
3998 		mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
3999 			    attr->max_dest_rd_atomic);
4000 		goto out;
4001 	}
4002 
4003 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
4004 		err = 0;
4005 		goto out;
4006 	}
4007 
4008 	err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
4009 				  new_state, &ucmd, udata);
4010 
4011 out:
4012 	mutex_unlock(&qp->mutex);
4013 	return err;
4014 }
4015 
4016 static void _handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
4017 				   u32 wqe_sz, void **cur_edge)
4018 {
4019 	u32 idx;
4020 
4021 	idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
4022 	*cur_edge = get_sq_edge(sq, idx);
4023 
4024 	*seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
4025 }
4026 
4027 /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
4028  * next nearby edge and get new address translation for current WQE position.
4029  * @sq - SQ buffer.
4030  * @seg: Current WQE position (16B aligned).
4031  * @wqe_sz: Total current WQE size [16B].
4032  * @cur_edge: Updated current edge.
4033  */
4034 static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
4035 					 u32 wqe_sz, void **cur_edge)
4036 {
4037 	if (likely(*seg != *cur_edge))
4038 		return;
4039 
4040 	_handle_post_send_edge(sq, seg, wqe_sz, cur_edge);
4041 }
4042 
4043 /* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
4044  * pointers. At the end @seg is aligned to 16B regardless the copied size.
4045  * @sq - SQ buffer.
4046  * @cur_edge: Updated current edge.
4047  * @seg: Current WQE position (16B aligned).
4048  * @wqe_sz: Total current WQE size [16B].
4049  * @src: Pointer to copy from.
4050  * @n: Number of bytes to copy.
4051  */
4052 static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
4053 				   void **seg, u32 *wqe_sz, const void *src,
4054 				   size_t n)
4055 {
4056 	while (likely(n)) {
4057 		size_t leftlen = *cur_edge - *seg;
4058 		size_t copysz = min_t(size_t, leftlen, n);
4059 		size_t stride;
4060 
4061 		memcpy(*seg, src, copysz);
4062 
4063 		n -= copysz;
4064 		src += copysz;
4065 		stride = !n ? ALIGN(copysz, 16) : copysz;
4066 		*seg += stride;
4067 		*wqe_sz += stride >> 4;
4068 		handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
4069 	}
4070 }
4071 
4072 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
4073 {
4074 	struct mlx5_ib_cq *cq;
4075 	unsigned cur;
4076 
4077 	cur = wq->head - wq->tail;
4078 	if (likely(cur + nreq < wq->max_post))
4079 		return 0;
4080 
4081 	cq = to_mcq(ib_cq);
4082 	spin_lock(&cq->lock);
4083 	cur = wq->head - wq->tail;
4084 	spin_unlock(&cq->lock);
4085 
4086 	return cur + nreq >= wq->max_post;
4087 }
4088 
4089 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
4090 					  u64 remote_addr, u32 rkey)
4091 {
4092 	rseg->raddr    = cpu_to_be64(remote_addr);
4093 	rseg->rkey     = cpu_to_be32(rkey);
4094 	rseg->reserved = 0;
4095 }
4096 
4097 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
4098 			void **seg, int *size, void **cur_edge)
4099 {
4100 	struct mlx5_wqe_eth_seg *eseg = *seg;
4101 
4102 	memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
4103 
4104 	if (wr->send_flags & IB_SEND_IP_CSUM)
4105 		eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
4106 				 MLX5_ETH_WQE_L4_CSUM;
4107 
4108 	if (wr->opcode == IB_WR_LSO) {
4109 		struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
4110 		size_t left, copysz;
4111 		void *pdata = ud_wr->header;
4112 		size_t stride;
4113 
4114 		left = ud_wr->hlen;
4115 		eseg->mss = cpu_to_be16(ud_wr->mss);
4116 		eseg->inline_hdr.sz = cpu_to_be16(left);
4117 
4118 		/* memcpy_send_wqe should get a 16B align address. Hence, we
4119 		 * first copy up to the current edge and then, if needed,
4120 		 * fall-through to memcpy_send_wqe.
4121 		 */
4122 		copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
4123 			       left);
4124 		memcpy(eseg->inline_hdr.start, pdata, copysz);
4125 		stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
4126 			       sizeof(eseg->inline_hdr.start) + copysz, 16);
4127 		*size += stride / 16;
4128 		*seg += stride;
4129 
4130 		if (copysz < left) {
4131 			handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4132 			left -= copysz;
4133 			pdata += copysz;
4134 			memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata,
4135 					left);
4136 		}
4137 
4138 		return;
4139 	}
4140 
4141 	*seg += sizeof(struct mlx5_wqe_eth_seg);
4142 	*size += sizeof(struct mlx5_wqe_eth_seg) / 16;
4143 }
4144 
4145 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
4146 			     const struct ib_send_wr *wr)
4147 {
4148 	memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
4149 	dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
4150 	dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
4151 }
4152 
4153 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
4154 {
4155 	dseg->byte_count = cpu_to_be32(sg->length);
4156 	dseg->lkey       = cpu_to_be32(sg->lkey);
4157 	dseg->addr       = cpu_to_be64(sg->addr);
4158 }
4159 
4160 static u64 get_xlt_octo(u64 bytes)
4161 {
4162 	return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
4163 	       MLX5_IB_UMR_OCTOWORD;
4164 }
4165 
4166 static __be64 frwr_mkey_mask(bool atomic)
4167 {
4168 	u64 result;
4169 
4170 	result = MLX5_MKEY_MASK_LEN		|
4171 		MLX5_MKEY_MASK_PAGE_SIZE	|
4172 		MLX5_MKEY_MASK_START_ADDR	|
4173 		MLX5_MKEY_MASK_EN_RINVAL	|
4174 		MLX5_MKEY_MASK_KEY		|
4175 		MLX5_MKEY_MASK_LR		|
4176 		MLX5_MKEY_MASK_LW		|
4177 		MLX5_MKEY_MASK_RR		|
4178 		MLX5_MKEY_MASK_RW		|
4179 		MLX5_MKEY_MASK_SMALL_FENCE	|
4180 		MLX5_MKEY_MASK_FREE;
4181 
4182 	if (atomic)
4183 		result |= MLX5_MKEY_MASK_A;
4184 
4185 	return cpu_to_be64(result);
4186 }
4187 
4188 static __be64 sig_mkey_mask(void)
4189 {
4190 	u64 result;
4191 
4192 	result = MLX5_MKEY_MASK_LEN		|
4193 		MLX5_MKEY_MASK_PAGE_SIZE	|
4194 		MLX5_MKEY_MASK_START_ADDR	|
4195 		MLX5_MKEY_MASK_EN_SIGERR	|
4196 		MLX5_MKEY_MASK_EN_RINVAL	|
4197 		MLX5_MKEY_MASK_KEY		|
4198 		MLX5_MKEY_MASK_LR		|
4199 		MLX5_MKEY_MASK_LW		|
4200 		MLX5_MKEY_MASK_RR		|
4201 		MLX5_MKEY_MASK_RW		|
4202 		MLX5_MKEY_MASK_SMALL_FENCE	|
4203 		MLX5_MKEY_MASK_FREE		|
4204 		MLX5_MKEY_MASK_BSF_EN;
4205 
4206 	return cpu_to_be64(result);
4207 }
4208 
4209 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4210 			    struct mlx5_ib_mr *mr, u8 flags, bool atomic)
4211 {
4212 	int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4213 
4214 	memset(umr, 0, sizeof(*umr));
4215 
4216 	umr->flags = flags;
4217 	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4218 	umr->mkey_mask = frwr_mkey_mask(atomic);
4219 }
4220 
4221 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
4222 {
4223 	memset(umr, 0, sizeof(*umr));
4224 	umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
4225 	umr->flags = MLX5_UMR_INLINE;
4226 }
4227 
4228 static __be64 get_umr_enable_mr_mask(void)
4229 {
4230 	u64 result;
4231 
4232 	result = MLX5_MKEY_MASK_KEY |
4233 		 MLX5_MKEY_MASK_FREE;
4234 
4235 	return cpu_to_be64(result);
4236 }
4237 
4238 static __be64 get_umr_disable_mr_mask(void)
4239 {
4240 	u64 result;
4241 
4242 	result = MLX5_MKEY_MASK_FREE;
4243 
4244 	return cpu_to_be64(result);
4245 }
4246 
4247 static __be64 get_umr_update_translation_mask(void)
4248 {
4249 	u64 result;
4250 
4251 	result = MLX5_MKEY_MASK_LEN |
4252 		 MLX5_MKEY_MASK_PAGE_SIZE |
4253 		 MLX5_MKEY_MASK_START_ADDR;
4254 
4255 	return cpu_to_be64(result);
4256 }
4257 
4258 static __be64 get_umr_update_access_mask(int atomic)
4259 {
4260 	u64 result;
4261 
4262 	result = MLX5_MKEY_MASK_LR |
4263 		 MLX5_MKEY_MASK_LW |
4264 		 MLX5_MKEY_MASK_RR |
4265 		 MLX5_MKEY_MASK_RW;
4266 
4267 	if (atomic)
4268 		result |= MLX5_MKEY_MASK_A;
4269 
4270 	return cpu_to_be64(result);
4271 }
4272 
4273 static __be64 get_umr_update_pd_mask(void)
4274 {
4275 	u64 result;
4276 
4277 	result = MLX5_MKEY_MASK_PD;
4278 
4279 	return cpu_to_be64(result);
4280 }
4281 
4282 static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
4283 {
4284 	if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
4285 	     MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
4286 	    (mask & MLX5_MKEY_MASK_A &&
4287 	     MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
4288 		return -EPERM;
4289 	return 0;
4290 }
4291 
4292 static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
4293 			       struct mlx5_wqe_umr_ctrl_seg *umr,
4294 			       const struct ib_send_wr *wr, int atomic)
4295 {
4296 	const struct mlx5_umr_wr *umrwr = umr_wr(wr);
4297 
4298 	memset(umr, 0, sizeof(*umr));
4299 
4300 	if (!umrwr->ignore_free_state) {
4301 		if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
4302 			 /* fail if free */
4303 			umr->flags = MLX5_UMR_CHECK_FREE;
4304 		else
4305 			/* fail if not free */
4306 			umr->flags = MLX5_UMR_CHECK_NOT_FREE;
4307 	}
4308 
4309 	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
4310 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
4311 		u64 offset = get_xlt_octo(umrwr->offset);
4312 
4313 		umr->xlt_offset = cpu_to_be16(offset & 0xffff);
4314 		umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
4315 		umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
4316 	}
4317 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
4318 		umr->mkey_mask |= get_umr_update_translation_mask();
4319 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
4320 		umr->mkey_mask |= get_umr_update_access_mask(atomic);
4321 		umr->mkey_mask |= get_umr_update_pd_mask();
4322 	}
4323 	if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
4324 		umr->mkey_mask |= get_umr_enable_mr_mask();
4325 	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
4326 		umr->mkey_mask |= get_umr_disable_mr_mask();
4327 
4328 	if (!wr->num_sge)
4329 		umr->flags |= MLX5_UMR_INLINE;
4330 
4331 	return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
4332 }
4333 
4334 static u8 get_umr_flags(int acc)
4335 {
4336 	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) |
4337 	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) |
4338 	       (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) |
4339 	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) |
4340 		MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
4341 }
4342 
4343 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
4344 			     struct mlx5_ib_mr *mr,
4345 			     u32 key, int access)
4346 {
4347 	int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
4348 
4349 	memset(seg, 0, sizeof(*seg));
4350 
4351 	if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
4352 		seg->log2_page_size = ilog2(mr->ibmr.page_size);
4353 	else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
4354 		/* KLMs take twice the size of MTTs */
4355 		ndescs *= 2;
4356 
4357 	seg->flags = get_umr_flags(access) | mr->access_mode;
4358 	seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
4359 	seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
4360 	seg->start_addr = cpu_to_be64(mr->ibmr.iova);
4361 	seg->len = cpu_to_be64(mr->ibmr.length);
4362 	seg->xlt_oct_size = cpu_to_be32(ndescs);
4363 }
4364 
4365 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
4366 {
4367 	memset(seg, 0, sizeof(*seg));
4368 	seg->status = MLX5_MKEY_STATUS_FREE;
4369 }
4370 
4371 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
4372 				 const struct ib_send_wr *wr)
4373 {
4374 	const struct mlx5_umr_wr *umrwr = umr_wr(wr);
4375 
4376 	memset(seg, 0, sizeof(*seg));
4377 	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
4378 		seg->status = MLX5_MKEY_STATUS_FREE;
4379 
4380 	seg->flags = convert_access(umrwr->access_flags);
4381 	if (umrwr->pd)
4382 		seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
4383 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
4384 	    !umrwr->length)
4385 		seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
4386 
4387 	seg->start_addr = cpu_to_be64(umrwr->virt_addr);
4388 	seg->len = cpu_to_be64(umrwr->length);
4389 	seg->log2_page_size = umrwr->page_shift;
4390 	seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
4391 				       mlx5_mkey_variant(umrwr->mkey));
4392 }
4393 
4394 static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
4395 			     struct mlx5_ib_mr *mr,
4396 			     struct mlx5_ib_pd *pd)
4397 {
4398 	int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
4399 
4400 	dseg->addr = cpu_to_be64(mr->desc_map);
4401 	dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
4402 	dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
4403 }
4404 
4405 static __be32 send_ieth(const struct ib_send_wr *wr)
4406 {
4407 	switch (wr->opcode) {
4408 	case IB_WR_SEND_WITH_IMM:
4409 	case IB_WR_RDMA_WRITE_WITH_IMM:
4410 		return wr->ex.imm_data;
4411 
4412 	case IB_WR_SEND_WITH_INV:
4413 		return cpu_to_be32(wr->ex.invalidate_rkey);
4414 
4415 	default:
4416 		return 0;
4417 	}
4418 }
4419 
4420 static u8 calc_sig(void *wqe, int size)
4421 {
4422 	u8 *p = wqe;
4423 	u8 res = 0;
4424 	int i;
4425 
4426 	for (i = 0; i < size; i++)
4427 		res ^= p[i];
4428 
4429 	return ~res;
4430 }
4431 
4432 static u8 wq_sig(void *wqe)
4433 {
4434 	return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
4435 }
4436 
4437 static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
4438 			    void **wqe, int *wqe_sz, void **cur_edge)
4439 {
4440 	struct mlx5_wqe_inline_seg *seg;
4441 	size_t offset;
4442 	int inl = 0;
4443 	int i;
4444 
4445 	seg = *wqe;
4446 	*wqe += sizeof(*seg);
4447 	offset = sizeof(*seg);
4448 
4449 	for (i = 0; i < wr->num_sge; i++) {
4450 		size_t len  = wr->sg_list[i].length;
4451 		void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
4452 
4453 		inl += len;
4454 
4455 		if (unlikely(inl > qp->max_inline_data))
4456 			return -ENOMEM;
4457 
4458 		while (likely(len)) {
4459 			size_t leftlen;
4460 			size_t copysz;
4461 
4462 			handle_post_send_edge(&qp->sq, wqe,
4463 					      *wqe_sz + (offset >> 4),
4464 					      cur_edge);
4465 
4466 			leftlen = *cur_edge - *wqe;
4467 			copysz = min_t(size_t, leftlen, len);
4468 
4469 			memcpy(*wqe, addr, copysz);
4470 			len -= copysz;
4471 			addr += copysz;
4472 			*wqe += copysz;
4473 			offset += copysz;
4474 		}
4475 	}
4476 
4477 	seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
4478 
4479 	*wqe_sz +=  ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
4480 
4481 	return 0;
4482 }
4483 
4484 static u16 prot_field_size(enum ib_signature_type type)
4485 {
4486 	switch (type) {
4487 	case IB_SIG_TYPE_T10_DIF:
4488 		return MLX5_DIF_SIZE;
4489 	default:
4490 		return 0;
4491 	}
4492 }
4493 
4494 static u8 bs_selector(int block_size)
4495 {
4496 	switch (block_size) {
4497 	case 512:	    return 0x1;
4498 	case 520:	    return 0x2;
4499 	case 4096:	    return 0x3;
4500 	case 4160:	    return 0x4;
4501 	case 1073741824:    return 0x5;
4502 	default:	    return 0;
4503 	}
4504 }
4505 
4506 static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
4507 			      struct mlx5_bsf_inl *inl)
4508 {
4509 	/* Valid inline section and allow BSF refresh */
4510 	inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
4511 				       MLX5_BSF_REFRESH_DIF);
4512 	inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
4513 	inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
4514 	/* repeating block */
4515 	inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
4516 	inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
4517 			MLX5_DIF_CRC : MLX5_DIF_IPCS;
4518 
4519 	if (domain->sig.dif.ref_remap)
4520 		inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
4521 
4522 	if (domain->sig.dif.app_escape) {
4523 		if (domain->sig.dif.ref_escape)
4524 			inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
4525 		else
4526 			inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
4527 	}
4528 
4529 	inl->dif_app_bitmask_check =
4530 		cpu_to_be16(domain->sig.dif.apptag_check_mask);
4531 }
4532 
4533 static int mlx5_set_bsf(struct ib_mr *sig_mr,
4534 			struct ib_sig_attrs *sig_attrs,
4535 			struct mlx5_bsf *bsf, u32 data_size)
4536 {
4537 	struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
4538 	struct mlx5_bsf_basic *basic = &bsf->basic;
4539 	struct ib_sig_domain *mem = &sig_attrs->mem;
4540 	struct ib_sig_domain *wire = &sig_attrs->wire;
4541 
4542 	memset(bsf, 0, sizeof(*bsf));
4543 
4544 	/* Basic + Extended + Inline */
4545 	basic->bsf_size_sbs = 1 << 7;
4546 	/* Input domain check byte mask */
4547 	basic->check_byte_mask = sig_attrs->check_mask;
4548 	basic->raw_data_size = cpu_to_be32(data_size);
4549 
4550 	/* Memory domain */
4551 	switch (sig_attrs->mem.sig_type) {
4552 	case IB_SIG_TYPE_NONE:
4553 		break;
4554 	case IB_SIG_TYPE_T10_DIF:
4555 		basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
4556 		basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
4557 		mlx5_fill_inl_bsf(mem, &bsf->m_inl);
4558 		break;
4559 	default:
4560 		return -EINVAL;
4561 	}
4562 
4563 	/* Wire domain */
4564 	switch (sig_attrs->wire.sig_type) {
4565 	case IB_SIG_TYPE_NONE:
4566 		break;
4567 	case IB_SIG_TYPE_T10_DIF:
4568 		if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
4569 		    mem->sig_type == wire->sig_type) {
4570 			/* Same block structure */
4571 			basic->bsf_size_sbs |= 1 << 4;
4572 			if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
4573 				basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
4574 			if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
4575 				basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
4576 			if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
4577 				basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
4578 		} else
4579 			basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
4580 
4581 		basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
4582 		mlx5_fill_inl_bsf(wire, &bsf->w_inl);
4583 		break;
4584 	default:
4585 		return -EINVAL;
4586 	}
4587 
4588 	return 0;
4589 }
4590 
4591 static int set_sig_data_segment(const struct ib_send_wr *send_wr,
4592 				struct ib_mr *sig_mr,
4593 				struct ib_sig_attrs *sig_attrs,
4594 				struct mlx5_ib_qp *qp, void **seg, int *size,
4595 				void **cur_edge)
4596 {
4597 	struct mlx5_bsf *bsf;
4598 	u32 data_len;
4599 	u32 data_key;
4600 	u64 data_va;
4601 	u32 prot_len = 0;
4602 	u32 prot_key = 0;
4603 	u64 prot_va = 0;
4604 	bool prot = false;
4605 	int ret;
4606 	int wqe_size;
4607 	struct mlx5_ib_mr *mr = to_mmr(sig_mr);
4608 	struct mlx5_ib_mr *pi_mr = mr->pi_mr;
4609 
4610 	data_len = pi_mr->data_length;
4611 	data_key = pi_mr->ibmr.lkey;
4612 	data_va = pi_mr->data_iova;
4613 	if (pi_mr->meta_ndescs) {
4614 		prot_len = pi_mr->meta_length;
4615 		prot_key = pi_mr->ibmr.lkey;
4616 		prot_va = pi_mr->pi_iova;
4617 		prot = true;
4618 	}
4619 
4620 	if (!prot || (data_key == prot_key && data_va == prot_va &&
4621 		      data_len == prot_len)) {
4622 		/**
4623 		 * Source domain doesn't contain signature information
4624 		 * or data and protection are interleaved in memory.
4625 		 * So need construct:
4626 		 *                  ------------------
4627 		 *                 |     data_klm     |
4628 		 *                  ------------------
4629 		 *                 |       BSF        |
4630 		 *                  ------------------
4631 		 **/
4632 		struct mlx5_klm *data_klm = *seg;
4633 
4634 		data_klm->bcount = cpu_to_be32(data_len);
4635 		data_klm->key = cpu_to_be32(data_key);
4636 		data_klm->va = cpu_to_be64(data_va);
4637 		wqe_size = ALIGN(sizeof(*data_klm), 64);
4638 	} else {
4639 		/**
4640 		 * Source domain contains signature information
4641 		 * So need construct a strided block format:
4642 		 *               ---------------------------
4643 		 *              |     stride_block_ctrl     |
4644 		 *               ---------------------------
4645 		 *              |          data_klm         |
4646 		 *               ---------------------------
4647 		 *              |          prot_klm         |
4648 		 *               ---------------------------
4649 		 *              |             BSF           |
4650 		 *               ---------------------------
4651 		 **/
4652 		struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
4653 		struct mlx5_stride_block_entry *data_sentry;
4654 		struct mlx5_stride_block_entry *prot_sentry;
4655 		u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
4656 		int prot_size;
4657 
4658 		sblock_ctrl = *seg;
4659 		data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
4660 		prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
4661 
4662 		prot_size = prot_field_size(sig_attrs->mem.sig_type);
4663 		if (!prot_size) {
4664 			pr_err("Bad block size given: %u\n", block_size);
4665 			return -EINVAL;
4666 		}
4667 		sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
4668 							    prot_size);
4669 		sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
4670 		sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
4671 		sblock_ctrl->num_entries = cpu_to_be16(2);
4672 
4673 		data_sentry->bcount = cpu_to_be16(block_size);
4674 		data_sentry->key = cpu_to_be32(data_key);
4675 		data_sentry->va = cpu_to_be64(data_va);
4676 		data_sentry->stride = cpu_to_be16(block_size);
4677 
4678 		prot_sentry->bcount = cpu_to_be16(prot_size);
4679 		prot_sentry->key = cpu_to_be32(prot_key);
4680 		prot_sentry->va = cpu_to_be64(prot_va);
4681 		prot_sentry->stride = cpu_to_be16(prot_size);
4682 
4683 		wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
4684 				 sizeof(*prot_sentry), 64);
4685 	}
4686 
4687 	*seg += wqe_size;
4688 	*size += wqe_size / 16;
4689 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4690 
4691 	bsf = *seg;
4692 	ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
4693 	if (ret)
4694 		return -EINVAL;
4695 
4696 	*seg += sizeof(*bsf);
4697 	*size += sizeof(*bsf) / 16;
4698 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4699 
4700 	return 0;
4701 }
4702 
4703 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
4704 				 struct ib_mr *sig_mr, int access_flags,
4705 				 u32 size, u32 length, u32 pdn)
4706 {
4707 	u32 sig_key = sig_mr->rkey;
4708 	u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
4709 
4710 	memset(seg, 0, sizeof(*seg));
4711 
4712 	seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
4713 	seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
4714 	seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
4715 				    MLX5_MKEY_BSF_EN | pdn);
4716 	seg->len = cpu_to_be64(length);
4717 	seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
4718 	seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
4719 }
4720 
4721 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
4722 				u32 size)
4723 {
4724 	memset(umr, 0, sizeof(*umr));
4725 
4726 	umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
4727 	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4728 	umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
4729 	umr->mkey_mask = sig_mkey_mask();
4730 }
4731 
4732 static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
4733 			 struct mlx5_ib_qp *qp, void **seg, int *size,
4734 			 void **cur_edge)
4735 {
4736 	const struct ib_reg_wr *wr = reg_wr(send_wr);
4737 	struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
4738 	struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
4739 	struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
4740 	u32 pdn = get_pd(qp)->pdn;
4741 	u32 xlt_size;
4742 	int region_len, ret;
4743 
4744 	if (unlikely(send_wr->num_sge != 0) ||
4745 	    unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
4746 	    unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
4747 	    unlikely(!sig_mr->sig->sig_status_checked))
4748 		return -EINVAL;
4749 
4750 	/* length of the protected region, data + protection */
4751 	region_len = pi_mr->ibmr.length;
4752 
4753 	/**
4754 	 * KLM octoword size - if protection was provided
4755 	 * then we use strided block format (3 octowords),
4756 	 * else we use single KLM (1 octoword)
4757 	 **/
4758 	if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
4759 		xlt_size = 0x30;
4760 	else
4761 		xlt_size = sizeof(struct mlx5_klm);
4762 
4763 	set_sig_umr_segment(*seg, xlt_size);
4764 	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
4765 	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4766 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4767 
4768 	set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
4769 			     pdn);
4770 	*seg += sizeof(struct mlx5_mkey_seg);
4771 	*size += sizeof(struct mlx5_mkey_seg) / 16;
4772 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4773 
4774 	ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
4775 				   cur_edge);
4776 	if (ret)
4777 		return ret;
4778 
4779 	sig_mr->sig->sig_status_checked = false;
4780 	return 0;
4781 }
4782 
4783 static int set_psv_wr(struct ib_sig_domain *domain,
4784 		      u32 psv_idx, void **seg, int *size)
4785 {
4786 	struct mlx5_seg_set_psv *psv_seg = *seg;
4787 
4788 	memset(psv_seg, 0, sizeof(*psv_seg));
4789 	psv_seg->psv_num = cpu_to_be32(psv_idx);
4790 	switch (domain->sig_type) {
4791 	case IB_SIG_TYPE_NONE:
4792 		break;
4793 	case IB_SIG_TYPE_T10_DIF:
4794 		psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
4795 						     domain->sig.dif.app_tag);
4796 		psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
4797 		break;
4798 	default:
4799 		pr_err("Bad signature type (%d) is given.\n",
4800 		       domain->sig_type);
4801 		return -EINVAL;
4802 	}
4803 
4804 	*seg += sizeof(*psv_seg);
4805 	*size += sizeof(*psv_seg) / 16;
4806 
4807 	return 0;
4808 }
4809 
4810 static int set_reg_wr(struct mlx5_ib_qp *qp,
4811 		      const struct ib_reg_wr *wr,
4812 		      void **seg, int *size, void **cur_edge,
4813 		      bool check_not_free)
4814 {
4815 	struct mlx5_ib_mr *mr = to_mmr(wr->mr);
4816 	struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
4817 	struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
4818 	int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4819 	bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
4820 	bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
4821 	u8 flags = 0;
4822 
4823 	if (!mlx5_ib_can_use_umr(dev, atomic)) {
4824 		mlx5_ib_warn(to_mdev(qp->ibqp.device),
4825 			     "Fast update of %s for MR is disabled\n",
4826 			     (MLX5_CAP_GEN(dev->mdev,
4827 					   umr_modify_entity_size_disabled)) ?
4828 				     "entity size" :
4829 				     "atomic access");
4830 		return -EINVAL;
4831 	}
4832 
4833 	if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
4834 		mlx5_ib_warn(to_mdev(qp->ibqp.device),
4835 			     "Invalid IB_SEND_INLINE send flag\n");
4836 		return -EINVAL;
4837 	}
4838 
4839 	if (check_not_free)
4840 		flags |= MLX5_UMR_CHECK_NOT_FREE;
4841 	if (umr_inline)
4842 		flags |= MLX5_UMR_INLINE;
4843 
4844 	set_reg_umr_seg(*seg, mr, flags, atomic);
4845 	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
4846 	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4847 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4848 
4849 	set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
4850 	*seg += sizeof(struct mlx5_mkey_seg);
4851 	*size += sizeof(struct mlx5_mkey_seg) / 16;
4852 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4853 
4854 	if (umr_inline) {
4855 		memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
4856 				mr_list_size);
4857 		*size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
4858 	} else {
4859 		set_reg_data_seg(*seg, mr, pd);
4860 		*seg += sizeof(struct mlx5_wqe_data_seg);
4861 		*size += (sizeof(struct mlx5_wqe_data_seg) / 16);
4862 	}
4863 	return 0;
4864 }
4865 
4866 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
4867 			void **cur_edge)
4868 {
4869 	set_linv_umr_seg(*seg);
4870 	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
4871 	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4872 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4873 	set_linv_mkey_seg(*seg);
4874 	*seg += sizeof(struct mlx5_mkey_seg);
4875 	*size += sizeof(struct mlx5_mkey_seg) / 16;
4876 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4877 }
4878 
4879 static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
4880 {
4881 	__be32 *p = NULL;
4882 	int i, j;
4883 
4884 	pr_debug("dump WQE index %u:\n", idx);
4885 	for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
4886 		if ((i & 0xf) == 0) {
4887 			p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
4888 			pr_debug("WQBB at %p:\n", (void *)p);
4889 			j = 0;
4890 			idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
4891 		}
4892 		pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
4893 			 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
4894 			 be32_to_cpu(p[j + 3]));
4895 	}
4896 }
4897 
4898 static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
4899 		       struct mlx5_wqe_ctrl_seg **ctrl,
4900 		       const struct ib_send_wr *wr, unsigned int *idx,
4901 		       int *size, void **cur_edge, int nreq,
4902 		       bool send_signaled, bool solicited)
4903 {
4904 	if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
4905 		return -ENOMEM;
4906 
4907 	*idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
4908 	*seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
4909 	*ctrl = *seg;
4910 	*(uint32_t *)(*seg + 8) = 0;
4911 	(*ctrl)->imm = send_ieth(wr);
4912 	(*ctrl)->fm_ce_se = qp->sq_signal_bits |
4913 		(send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
4914 		(solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
4915 
4916 	*seg += sizeof(**ctrl);
4917 	*size = sizeof(**ctrl) / 16;
4918 	*cur_edge = qp->sq.cur_edge;
4919 
4920 	return 0;
4921 }
4922 
4923 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
4924 		     struct mlx5_wqe_ctrl_seg **ctrl,
4925 		     const struct ib_send_wr *wr, unsigned *idx,
4926 		     int *size, void **cur_edge, int nreq)
4927 {
4928 	return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
4929 			   wr->send_flags & IB_SEND_SIGNALED,
4930 			   wr->send_flags & IB_SEND_SOLICITED);
4931 }
4932 
4933 static void finish_wqe(struct mlx5_ib_qp *qp,
4934 		       struct mlx5_wqe_ctrl_seg *ctrl,
4935 		       void *seg, u8 size, void *cur_edge,
4936 		       unsigned int idx, u64 wr_id, int nreq, u8 fence,
4937 		       u32 mlx5_opcode)
4938 {
4939 	u8 opmod = 0;
4940 
4941 	ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
4942 					     mlx5_opcode | ((u32)opmod << 24));
4943 	ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
4944 	ctrl->fm_ce_se |= fence;
4945 	if (unlikely(qp->wq_sig))
4946 		ctrl->signature = wq_sig(ctrl);
4947 
4948 	qp->sq.wrid[idx] = wr_id;
4949 	qp->sq.w_list[idx].opcode = mlx5_opcode;
4950 	qp->sq.wqe_head[idx] = qp->sq.head + nreq;
4951 	qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
4952 	qp->sq.w_list[idx].next = qp->sq.cur_post;
4953 
4954 	/* We save the edge which was possibly updated during the WQE
4955 	 * construction, into SQ's cache.
4956 	 */
4957 	seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
4958 	qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
4959 			  get_sq_edge(&qp->sq, qp->sq.cur_post &
4960 				      (qp->sq.wqe_cnt - 1)) :
4961 			  cur_edge;
4962 }
4963 
4964 static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
4965 			      const struct ib_send_wr **bad_wr, bool drain)
4966 {
4967 	struct mlx5_wqe_ctrl_seg *ctrl = NULL;  /* compiler warning */
4968 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4969 	struct mlx5_core_dev *mdev = dev->mdev;
4970 	struct ib_reg_wr reg_pi_wr;
4971 	struct mlx5_ib_qp *qp;
4972 	struct mlx5_ib_mr *mr;
4973 	struct mlx5_ib_mr *pi_mr;
4974 	struct mlx5_ib_mr pa_pi_mr;
4975 	struct ib_sig_attrs *sig_attrs;
4976 	struct mlx5_wqe_xrc_seg *xrc;
4977 	struct mlx5_bf *bf;
4978 	void *cur_edge;
4979 	int uninitialized_var(size);
4980 	unsigned long flags;
4981 	unsigned idx;
4982 	int err = 0;
4983 	int num_sge;
4984 	void *seg;
4985 	int nreq;
4986 	int i;
4987 	u8 next_fence = 0;
4988 	u8 fence;
4989 
4990 	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
4991 		     !drain)) {
4992 		*bad_wr = wr;
4993 		return -EIO;
4994 	}
4995 
4996 	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
4997 		return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
4998 
4999 	qp = to_mqp(ibqp);
5000 	bf = &qp->bf;
5001 
5002 	spin_lock_irqsave(&qp->sq.lock, flags);
5003 
5004 	for (nreq = 0; wr; nreq++, wr = wr->next) {
5005 		if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
5006 			mlx5_ib_warn(dev, "\n");
5007 			err = -EINVAL;
5008 			*bad_wr = wr;
5009 			goto out;
5010 		}
5011 
5012 		num_sge = wr->num_sge;
5013 		if (unlikely(num_sge > qp->sq.max_gs)) {
5014 			mlx5_ib_warn(dev, "\n");
5015 			err = -EINVAL;
5016 			*bad_wr = wr;
5017 			goto out;
5018 		}
5019 
5020 		err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
5021 				nreq);
5022 		if (err) {
5023 			mlx5_ib_warn(dev, "\n");
5024 			err = -ENOMEM;
5025 			*bad_wr = wr;
5026 			goto out;
5027 		}
5028 
5029 		if (wr->opcode == IB_WR_REG_MR ||
5030 		    wr->opcode == IB_WR_REG_MR_INTEGRITY) {
5031 			fence = dev->umr_fence;
5032 			next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
5033 		} else  {
5034 			if (wr->send_flags & IB_SEND_FENCE) {
5035 				if (qp->next_fence)
5036 					fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
5037 				else
5038 					fence = MLX5_FENCE_MODE_FENCE;
5039 			} else {
5040 				fence = qp->next_fence;
5041 			}
5042 		}
5043 
5044 		switch (ibqp->qp_type) {
5045 		case IB_QPT_XRC_INI:
5046 			xrc = seg;
5047 			seg += sizeof(*xrc);
5048 			size += sizeof(*xrc) / 16;
5049 			/* fall through */
5050 		case IB_QPT_RC:
5051 			switch (wr->opcode) {
5052 			case IB_WR_RDMA_READ:
5053 			case IB_WR_RDMA_WRITE:
5054 			case IB_WR_RDMA_WRITE_WITH_IMM:
5055 				set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
5056 					      rdma_wr(wr)->rkey);
5057 				seg += sizeof(struct mlx5_wqe_raddr_seg);
5058 				size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
5059 				break;
5060 
5061 			case IB_WR_ATOMIC_CMP_AND_SWP:
5062 			case IB_WR_ATOMIC_FETCH_AND_ADD:
5063 			case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
5064 				mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
5065 				err = -ENOSYS;
5066 				*bad_wr = wr;
5067 				goto out;
5068 
5069 			case IB_WR_LOCAL_INV:
5070 				qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
5071 				ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
5072 				set_linv_wr(qp, &seg, &size, &cur_edge);
5073 				num_sge = 0;
5074 				break;
5075 
5076 			case IB_WR_REG_MR:
5077 				qp->sq.wr_data[idx] = IB_WR_REG_MR;
5078 				ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
5079 				err = set_reg_wr(qp, reg_wr(wr), &seg, &size,
5080 						 &cur_edge, true);
5081 				if (err) {
5082 					*bad_wr = wr;
5083 					goto out;
5084 				}
5085 				num_sge = 0;
5086 				break;
5087 
5088 			case IB_WR_REG_MR_INTEGRITY:
5089 				qp->sq.wr_data[idx] = IB_WR_REG_MR_INTEGRITY;
5090 
5091 				mr = to_mmr(reg_wr(wr)->mr);
5092 				pi_mr = mr->pi_mr;
5093 
5094 				if (pi_mr) {
5095 					memset(&reg_pi_wr, 0,
5096 					       sizeof(struct ib_reg_wr));
5097 
5098 					reg_pi_wr.mr = &pi_mr->ibmr;
5099 					reg_pi_wr.access = reg_wr(wr)->access;
5100 					reg_pi_wr.key = pi_mr->ibmr.rkey;
5101 
5102 					ctrl->imm = cpu_to_be32(reg_pi_wr.key);
5103 					/* UMR for data + prot registration */
5104 					err = set_reg_wr(qp, &reg_pi_wr, &seg,
5105 							 &size, &cur_edge,
5106 							 false);
5107 					if (err) {
5108 						*bad_wr = wr;
5109 						goto out;
5110 					}
5111 					finish_wqe(qp, ctrl, seg, size,
5112 						   cur_edge, idx, wr->wr_id,
5113 						   nreq, fence,
5114 						   MLX5_OPCODE_UMR);
5115 
5116 					err = begin_wqe(qp, &seg, &ctrl, wr,
5117 							&idx, &size, &cur_edge,
5118 							nreq);
5119 					if (err) {
5120 						mlx5_ib_warn(dev, "\n");
5121 						err = -ENOMEM;
5122 						*bad_wr = wr;
5123 						goto out;
5124 					}
5125 				} else {
5126 					memset(&pa_pi_mr, 0,
5127 					       sizeof(struct mlx5_ib_mr));
5128 					/* No UMR, use local_dma_lkey */
5129 					pa_pi_mr.ibmr.lkey =
5130 						mr->ibmr.pd->local_dma_lkey;
5131 
5132 					pa_pi_mr.ndescs = mr->ndescs;
5133 					pa_pi_mr.data_length = mr->data_length;
5134 					pa_pi_mr.data_iova = mr->data_iova;
5135 					if (mr->meta_ndescs) {
5136 						pa_pi_mr.meta_ndescs =
5137 							mr->meta_ndescs;
5138 						pa_pi_mr.meta_length =
5139 							mr->meta_length;
5140 						pa_pi_mr.pi_iova = mr->pi_iova;
5141 					}
5142 
5143 					pa_pi_mr.ibmr.length = mr->ibmr.length;
5144 					mr->pi_mr = &pa_pi_mr;
5145 				}
5146 				ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
5147 				/* UMR for sig MR */
5148 				err = set_pi_umr_wr(wr, qp, &seg, &size,
5149 						    &cur_edge);
5150 				if (err) {
5151 					mlx5_ib_warn(dev, "\n");
5152 					*bad_wr = wr;
5153 					goto out;
5154 				}
5155 				finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
5156 					   wr->wr_id, nreq, fence,
5157 					   MLX5_OPCODE_UMR);
5158 
5159 				/*
5160 				 * SET_PSV WQEs are not signaled and solicited
5161 				 * on error
5162 				 */
5163 				sig_attrs = mr->ibmr.sig_attrs;
5164 				err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
5165 						  &size, &cur_edge, nreq, false,
5166 						  true);
5167 				if (err) {
5168 					mlx5_ib_warn(dev, "\n");
5169 					err = -ENOMEM;
5170 					*bad_wr = wr;
5171 					goto out;
5172 				}
5173 				err = set_psv_wr(&sig_attrs->mem,
5174 						 mr->sig->psv_memory.psv_idx,
5175 						 &seg, &size);
5176 				if (err) {
5177 					mlx5_ib_warn(dev, "\n");
5178 					*bad_wr = wr;
5179 					goto out;
5180 				}
5181 				finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
5182 					   wr->wr_id, nreq, next_fence,
5183 					   MLX5_OPCODE_SET_PSV);
5184 
5185 				err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
5186 						  &size, &cur_edge, nreq, false,
5187 						  true);
5188 				if (err) {
5189 					mlx5_ib_warn(dev, "\n");
5190 					err = -ENOMEM;
5191 					*bad_wr = wr;
5192 					goto out;
5193 				}
5194 				err = set_psv_wr(&sig_attrs->wire,
5195 						 mr->sig->psv_wire.psv_idx,
5196 						 &seg, &size);
5197 				if (err) {
5198 					mlx5_ib_warn(dev, "\n");
5199 					*bad_wr = wr;
5200 					goto out;
5201 				}
5202 				finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
5203 					   wr->wr_id, nreq, next_fence,
5204 					   MLX5_OPCODE_SET_PSV);
5205 
5206 				qp->next_fence =
5207 					MLX5_FENCE_MODE_INITIATOR_SMALL;
5208 				num_sge = 0;
5209 				goto skip_psv;
5210 
5211 			default:
5212 				break;
5213 			}
5214 			break;
5215 
5216 		case IB_QPT_UC:
5217 			switch (wr->opcode) {
5218 			case IB_WR_RDMA_WRITE:
5219 			case IB_WR_RDMA_WRITE_WITH_IMM:
5220 				set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
5221 					      rdma_wr(wr)->rkey);
5222 				seg  += sizeof(struct mlx5_wqe_raddr_seg);
5223 				size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
5224 				break;
5225 
5226 			default:
5227 				break;
5228 			}
5229 			break;
5230 
5231 		case IB_QPT_SMI:
5232 			if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
5233 				mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
5234 				err = -EPERM;
5235 				*bad_wr = wr;
5236 				goto out;
5237 			}
5238 			/* fall through */
5239 		case MLX5_IB_QPT_HW_GSI:
5240 			set_datagram_seg(seg, wr);
5241 			seg += sizeof(struct mlx5_wqe_datagram_seg);
5242 			size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
5243 			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
5244 
5245 			break;
5246 		case IB_QPT_UD:
5247 			set_datagram_seg(seg, wr);
5248 			seg += sizeof(struct mlx5_wqe_datagram_seg);
5249 			size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
5250 			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
5251 
5252 			/* handle qp that supports ud offload */
5253 			if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
5254 				struct mlx5_wqe_eth_pad *pad;
5255 
5256 				pad = seg;
5257 				memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
5258 				seg += sizeof(struct mlx5_wqe_eth_pad);
5259 				size += sizeof(struct mlx5_wqe_eth_pad) / 16;
5260 				set_eth_seg(wr, qp, &seg, &size, &cur_edge);
5261 				handle_post_send_edge(&qp->sq, &seg, size,
5262 						      &cur_edge);
5263 			}
5264 			break;
5265 		case MLX5_IB_QPT_REG_UMR:
5266 			if (wr->opcode != MLX5_IB_WR_UMR) {
5267 				err = -EINVAL;
5268 				mlx5_ib_warn(dev, "bad opcode\n");
5269 				goto out;
5270 			}
5271 			qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
5272 			ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
5273 			err = set_reg_umr_segment(dev, seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
5274 			if (unlikely(err))
5275 				goto out;
5276 			seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
5277 			size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
5278 			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
5279 			set_reg_mkey_segment(seg, wr);
5280 			seg += sizeof(struct mlx5_mkey_seg);
5281 			size += sizeof(struct mlx5_mkey_seg) / 16;
5282 			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
5283 			break;
5284 
5285 		default:
5286 			break;
5287 		}
5288 
5289 		if (wr->send_flags & IB_SEND_INLINE && num_sge) {
5290 			err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
5291 			if (unlikely(err)) {
5292 				mlx5_ib_warn(dev, "\n");
5293 				*bad_wr = wr;
5294 				goto out;
5295 			}
5296 		} else {
5297 			for (i = 0; i < num_sge; i++) {
5298 				handle_post_send_edge(&qp->sq, &seg, size,
5299 						      &cur_edge);
5300 				if (likely(wr->sg_list[i].length)) {
5301 					set_data_ptr_seg
5302 					((struct mlx5_wqe_data_seg *)seg,
5303 					 wr->sg_list + i);
5304 					size += sizeof(struct mlx5_wqe_data_seg) / 16;
5305 					seg += sizeof(struct mlx5_wqe_data_seg);
5306 				}
5307 			}
5308 		}
5309 
5310 		qp->next_fence = next_fence;
5311 		finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
5312 			   fence, mlx5_ib_opcode[wr->opcode]);
5313 skip_psv:
5314 		if (0)
5315 			dump_wqe(qp, idx, size);
5316 	}
5317 
5318 out:
5319 	if (likely(nreq)) {
5320 		qp->sq.head += nreq;
5321 
5322 		/* Make sure that descriptors are written before
5323 		 * updating doorbell record and ringing the doorbell
5324 		 */
5325 		wmb();
5326 
5327 		qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
5328 
5329 		/* Make sure doorbell record is visible to the HCA before
5330 		 * we hit doorbell */
5331 		wmb();
5332 
5333 		/* currently we support only regular doorbells */
5334 		mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
5335 		/* Make sure doorbells don't leak out of SQ spinlock
5336 		 * and reach the HCA out of order.
5337 		 */
5338 		bf->offset ^= bf->buf_size;
5339 	}
5340 
5341 	spin_unlock_irqrestore(&qp->sq.lock, flags);
5342 
5343 	return err;
5344 }
5345 
5346 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
5347 		      const struct ib_send_wr **bad_wr)
5348 {
5349 	return _mlx5_ib_post_send(ibqp, wr, bad_wr, false);
5350 }
5351 
5352 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
5353 {
5354 	sig->signature = calc_sig(sig, size);
5355 }
5356 
5357 static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
5358 		      const struct ib_recv_wr **bad_wr, bool drain)
5359 {
5360 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
5361 	struct mlx5_wqe_data_seg *scat;
5362 	struct mlx5_rwqe_sig *sig;
5363 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
5364 	struct mlx5_core_dev *mdev = dev->mdev;
5365 	unsigned long flags;
5366 	int err = 0;
5367 	int nreq;
5368 	int ind;
5369 	int i;
5370 
5371 	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
5372 		     !drain)) {
5373 		*bad_wr = wr;
5374 		return -EIO;
5375 	}
5376 
5377 	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
5378 		return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
5379 
5380 	spin_lock_irqsave(&qp->rq.lock, flags);
5381 
5382 	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
5383 
5384 	for (nreq = 0; wr; nreq++, wr = wr->next) {
5385 		if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
5386 			err = -ENOMEM;
5387 			*bad_wr = wr;
5388 			goto out;
5389 		}
5390 
5391 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
5392 			err = -EINVAL;
5393 			*bad_wr = wr;
5394 			goto out;
5395 		}
5396 
5397 		scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
5398 		if (qp->wq_sig)
5399 			scat++;
5400 
5401 		for (i = 0; i < wr->num_sge; i++)
5402 			set_data_ptr_seg(scat + i, wr->sg_list + i);
5403 
5404 		if (i < qp->rq.max_gs) {
5405 			scat[i].byte_count = 0;
5406 			scat[i].lkey       = cpu_to_be32(MLX5_INVALID_LKEY);
5407 			scat[i].addr       = 0;
5408 		}
5409 
5410 		if (qp->wq_sig) {
5411 			sig = (struct mlx5_rwqe_sig *)scat;
5412 			set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
5413 		}
5414 
5415 		qp->rq.wrid[ind] = wr->wr_id;
5416 
5417 		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
5418 	}
5419 
5420 out:
5421 	if (likely(nreq)) {
5422 		qp->rq.head += nreq;
5423 
5424 		/* Make sure that descriptors are written before
5425 		 * doorbell record.
5426 		 */
5427 		wmb();
5428 
5429 		*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
5430 	}
5431 
5432 	spin_unlock_irqrestore(&qp->rq.lock, flags);
5433 
5434 	return err;
5435 }
5436 
5437 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
5438 		      const struct ib_recv_wr **bad_wr)
5439 {
5440 	return _mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
5441 }
5442 
5443 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
5444 {
5445 	switch (mlx5_state) {
5446 	case MLX5_QP_STATE_RST:      return IB_QPS_RESET;
5447 	case MLX5_QP_STATE_INIT:     return IB_QPS_INIT;
5448 	case MLX5_QP_STATE_RTR:      return IB_QPS_RTR;
5449 	case MLX5_QP_STATE_RTS:      return IB_QPS_RTS;
5450 	case MLX5_QP_STATE_SQ_DRAINING:
5451 	case MLX5_QP_STATE_SQD:      return IB_QPS_SQD;
5452 	case MLX5_QP_STATE_SQER:     return IB_QPS_SQE;
5453 	case MLX5_QP_STATE_ERR:      return IB_QPS_ERR;
5454 	default:		     return -1;
5455 	}
5456 }
5457 
5458 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
5459 {
5460 	switch (mlx5_mig_state) {
5461 	case MLX5_QP_PM_ARMED:		return IB_MIG_ARMED;
5462 	case MLX5_QP_PM_REARM:		return IB_MIG_REARM;
5463 	case MLX5_QP_PM_MIGRATED:	return IB_MIG_MIGRATED;
5464 	default: return -1;
5465 	}
5466 }
5467 
5468 static int to_ib_qp_access_flags(int mlx5_flags)
5469 {
5470 	int ib_flags = 0;
5471 
5472 	if (mlx5_flags & MLX5_QP_BIT_RRE)
5473 		ib_flags |= IB_ACCESS_REMOTE_READ;
5474 	if (mlx5_flags & MLX5_QP_BIT_RWE)
5475 		ib_flags |= IB_ACCESS_REMOTE_WRITE;
5476 	if (mlx5_flags & MLX5_QP_BIT_RAE)
5477 		ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
5478 
5479 	return ib_flags;
5480 }
5481 
5482 static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
5483 			    struct rdma_ah_attr *ah_attr,
5484 			    struct mlx5_qp_path *path)
5485 {
5486 
5487 	memset(ah_attr, 0, sizeof(*ah_attr));
5488 
5489 	if (!path->port || path->port > ibdev->num_ports)
5490 		return;
5491 
5492 	ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
5493 
5494 	rdma_ah_set_port_num(ah_attr, path->port);
5495 	rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
5496 
5497 	rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
5498 	rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
5499 	rdma_ah_set_static_rate(ah_attr,
5500 				path->static_rate ? path->static_rate - 5 : 0);
5501 	if (path->grh_mlid & (1 << 7)) {
5502 		u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
5503 
5504 		rdma_ah_set_grh(ah_attr, NULL,
5505 				tc_fl & 0xfffff,
5506 				path->mgid_index,
5507 				path->hop_limit,
5508 				(tc_fl >> 20) & 0xff);
5509 		rdma_ah_set_dgid_raw(ah_attr, path->rgid);
5510 	}
5511 }
5512 
5513 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev,
5514 					struct mlx5_ib_sq *sq,
5515 					u8 *sq_state)
5516 {
5517 	int err;
5518 
5519 	err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state);
5520 	if (err)
5521 		goto out;
5522 	sq->state = *sq_state;
5523 
5524 out:
5525 	return err;
5526 }
5527 
5528 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev,
5529 					struct mlx5_ib_rq *rq,
5530 					u8 *rq_state)
5531 {
5532 	void *out;
5533 	void *rqc;
5534 	int inlen;
5535 	int err;
5536 
5537 	inlen = MLX5_ST_SZ_BYTES(query_rq_out);
5538 	out = kvzalloc(inlen, GFP_KERNEL);
5539 	if (!out)
5540 		return -ENOMEM;
5541 
5542 	err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
5543 	if (err)
5544 		goto out;
5545 
5546 	rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
5547 	*rq_state = MLX5_GET(rqc, rqc, state);
5548 	rq->state = *rq_state;
5549 
5550 out:
5551 	kvfree(out);
5552 	return err;
5553 }
5554 
5555 static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state,
5556 				  struct mlx5_ib_qp *qp, u8 *qp_state)
5557 {
5558 	static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = {
5559 		[MLX5_RQC_STATE_RST] = {
5560 			[MLX5_SQC_STATE_RST]	= IB_QPS_RESET,
5561 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE_BAD,
5562 			[MLX5_SQC_STATE_ERR]	= MLX5_QP_STATE_BAD,
5563 			[MLX5_SQ_STATE_NA]	= IB_QPS_RESET,
5564 		},
5565 		[MLX5_RQC_STATE_RDY] = {
5566 			[MLX5_SQC_STATE_RST]	= MLX5_QP_STATE_BAD,
5567 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE,
5568 			[MLX5_SQC_STATE_ERR]	= IB_QPS_SQE,
5569 			[MLX5_SQ_STATE_NA]	= MLX5_QP_STATE,
5570 		},
5571 		[MLX5_RQC_STATE_ERR] = {
5572 			[MLX5_SQC_STATE_RST]    = MLX5_QP_STATE_BAD,
5573 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE_BAD,
5574 			[MLX5_SQC_STATE_ERR]	= IB_QPS_ERR,
5575 			[MLX5_SQ_STATE_NA]	= IB_QPS_ERR,
5576 		},
5577 		[MLX5_RQ_STATE_NA] = {
5578 			[MLX5_SQC_STATE_RST]    = IB_QPS_RESET,
5579 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE,
5580 			[MLX5_SQC_STATE_ERR]	= MLX5_QP_STATE,
5581 			[MLX5_SQ_STATE_NA]	= MLX5_QP_STATE_BAD,
5582 		},
5583 	};
5584 
5585 	*qp_state = sqrq_trans[rq_state][sq_state];
5586 
5587 	if (*qp_state == MLX5_QP_STATE_BAD) {
5588 		WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
5589 		     qp->raw_packet_qp.sq.base.mqp.qpn, sq_state,
5590 		     qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
5591 		return -EINVAL;
5592 	}
5593 
5594 	if (*qp_state == MLX5_QP_STATE)
5595 		*qp_state = qp->state;
5596 
5597 	return 0;
5598 }
5599 
5600 static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
5601 				     struct mlx5_ib_qp *qp,
5602 				     u8 *raw_packet_qp_state)
5603 {
5604 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
5605 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
5606 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
5607 	int err;
5608 	u8 sq_state = MLX5_SQ_STATE_NA;
5609 	u8 rq_state = MLX5_RQ_STATE_NA;
5610 
5611 	if (qp->sq.wqe_cnt) {
5612 		err = query_raw_packet_qp_sq_state(dev, sq, &sq_state);
5613 		if (err)
5614 			return err;
5615 	}
5616 
5617 	if (qp->rq.wqe_cnt) {
5618 		err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
5619 		if (err)
5620 			return err;
5621 	}
5622 
5623 	return sqrq_state_to_qp_state(sq_state, rq_state, qp,
5624 				      raw_packet_qp_state);
5625 }
5626 
5627 static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
5628 			 struct ib_qp_attr *qp_attr)
5629 {
5630 	int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
5631 	struct mlx5_qp_context *context;
5632 	int mlx5_state;
5633 	u32 *outb;
5634 	int err = 0;
5635 
5636 	outb = kzalloc(outlen, GFP_KERNEL);
5637 	if (!outb)
5638 		return -ENOMEM;
5639 
5640 	err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
5641 				 outlen);
5642 	if (err)
5643 		goto out;
5644 
5645 	/* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
5646 	context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc);
5647 
5648 	mlx5_state = be32_to_cpu(context->flags) >> 28;
5649 
5650 	qp->state		     = to_ib_qp_state(mlx5_state);
5651 	qp_attr->path_mtu	     = context->mtu_msgmax >> 5;
5652 	qp_attr->path_mig_state	     =
5653 		to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
5654 	qp_attr->qkey		     = be32_to_cpu(context->qkey);
5655 	qp_attr->rq_psn		     = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
5656 	qp_attr->sq_psn		     = be32_to_cpu(context->next_send_psn) & 0xffffff;
5657 	qp_attr->dest_qp_num	     = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
5658 	qp_attr->qp_access_flags     =
5659 		to_ib_qp_access_flags(be32_to_cpu(context->params2));
5660 
5661 	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
5662 		to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
5663 		to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
5664 		qp_attr->alt_pkey_index =
5665 			be16_to_cpu(context->alt_path.pkey_index);
5666 		qp_attr->alt_port_num	=
5667 			rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
5668 	}
5669 
5670 	qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
5671 	qp_attr->port_num = context->pri_path.port;
5672 
5673 	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
5674 	qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
5675 
5676 	qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
5677 
5678 	qp_attr->max_dest_rd_atomic =
5679 		1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
5680 	qp_attr->min_rnr_timer	    =
5681 		(be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
5682 	qp_attr->timeout	    = context->pri_path.ackto_lt >> 3;
5683 	qp_attr->retry_cnt	    = (be32_to_cpu(context->params1) >> 16) & 0x7;
5684 	qp_attr->rnr_retry	    = (be32_to_cpu(context->params1) >> 13) & 0x7;
5685 	qp_attr->alt_timeout	    = context->alt_path.ackto_lt >> 3;
5686 
5687 out:
5688 	kfree(outb);
5689 	return err;
5690 }
5691 
5692 static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
5693 				struct ib_qp_attr *qp_attr, int qp_attr_mask,
5694 				struct ib_qp_init_attr *qp_init_attr)
5695 {
5696 	struct mlx5_core_dct	*dct = &mqp->dct.mdct;
5697 	u32 *out;
5698 	u32 access_flags = 0;
5699 	int outlen = MLX5_ST_SZ_BYTES(query_dct_out);
5700 	void *dctc;
5701 	int err;
5702 	int supported_mask = IB_QP_STATE |
5703 			     IB_QP_ACCESS_FLAGS |
5704 			     IB_QP_PORT |
5705 			     IB_QP_MIN_RNR_TIMER |
5706 			     IB_QP_AV |
5707 			     IB_QP_PATH_MTU |
5708 			     IB_QP_PKEY_INDEX;
5709 
5710 	if (qp_attr_mask & ~supported_mask)
5711 		return -EINVAL;
5712 	if (mqp->state != IB_QPS_RTR)
5713 		return -EINVAL;
5714 
5715 	out = kzalloc(outlen, GFP_KERNEL);
5716 	if (!out)
5717 		return -ENOMEM;
5718 
5719 	err = mlx5_core_dct_query(dev->mdev, dct, out, outlen);
5720 	if (err)
5721 		goto out;
5722 
5723 	dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry);
5724 
5725 	if (qp_attr_mask & IB_QP_STATE)
5726 		qp_attr->qp_state = IB_QPS_RTR;
5727 
5728 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
5729 		if (MLX5_GET(dctc, dctc, rre))
5730 			access_flags |= IB_ACCESS_REMOTE_READ;
5731 		if (MLX5_GET(dctc, dctc, rwe))
5732 			access_flags |= IB_ACCESS_REMOTE_WRITE;
5733 		if (MLX5_GET(dctc, dctc, rae))
5734 			access_flags |= IB_ACCESS_REMOTE_ATOMIC;
5735 		qp_attr->qp_access_flags = access_flags;
5736 	}
5737 
5738 	if (qp_attr_mask & IB_QP_PORT)
5739 		qp_attr->port_num = MLX5_GET(dctc, dctc, port);
5740 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
5741 		qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
5742 	if (qp_attr_mask & IB_QP_AV) {
5743 		qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass);
5744 		qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label);
5745 		qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index);
5746 		qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit);
5747 	}
5748 	if (qp_attr_mask & IB_QP_PATH_MTU)
5749 		qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu);
5750 	if (qp_attr_mask & IB_QP_PKEY_INDEX)
5751 		qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index);
5752 out:
5753 	kfree(out);
5754 	return err;
5755 }
5756 
5757 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
5758 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
5759 {
5760 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
5761 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
5762 	int err = 0;
5763 	u8 raw_packet_qp_state;
5764 
5765 	if (ibqp->rwq_ind_tbl)
5766 		return -ENOSYS;
5767 
5768 	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
5769 		return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
5770 					    qp_init_attr);
5771 
5772 	/* Not all of output fields are applicable, make sure to zero them */
5773 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
5774 	memset(qp_attr, 0, sizeof(*qp_attr));
5775 
5776 	if (unlikely(qp->qp_sub_type == MLX5_IB_QPT_DCT))
5777 		return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
5778 					    qp_attr_mask, qp_init_attr);
5779 
5780 	mutex_lock(&qp->mutex);
5781 
5782 	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
5783 	    qp->flags & MLX5_IB_QP_UNDERLAY) {
5784 		err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
5785 		if (err)
5786 			goto out;
5787 		qp->state = raw_packet_qp_state;
5788 		qp_attr->port_num = 1;
5789 	} else {
5790 		err = query_qp_attr(dev, qp, qp_attr);
5791 		if (err)
5792 			goto out;
5793 	}
5794 
5795 	qp_attr->qp_state	     = qp->state;
5796 	qp_attr->cur_qp_state	     = qp_attr->qp_state;
5797 	qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
5798 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
5799 
5800 	if (!ibqp->uobject) {
5801 		qp_attr->cap.max_send_wr  = qp->sq.max_post;
5802 		qp_attr->cap.max_send_sge = qp->sq.max_gs;
5803 		qp_init_attr->qp_context = ibqp->qp_context;
5804 	} else {
5805 		qp_attr->cap.max_send_wr  = 0;
5806 		qp_attr->cap.max_send_sge = 0;
5807 	}
5808 
5809 	qp_init_attr->qp_type = ibqp->qp_type;
5810 	qp_init_attr->recv_cq = ibqp->recv_cq;
5811 	qp_init_attr->send_cq = ibqp->send_cq;
5812 	qp_init_attr->srq = ibqp->srq;
5813 	qp_attr->cap.max_inline_data = qp->max_inline_data;
5814 
5815 	qp_init_attr->cap	     = qp_attr->cap;
5816 
5817 	qp_init_attr->create_flags = 0;
5818 	if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
5819 		qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
5820 
5821 	if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
5822 		qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL;
5823 	if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
5824 		qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
5825 	if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
5826 		qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
5827 	if (qp->flags & MLX5_IB_QP_SQPN_QP1)
5828 		qp_init_attr->create_flags |= mlx5_ib_create_qp_sqpn_qp1();
5829 
5830 	qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
5831 		IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
5832 
5833 out:
5834 	mutex_unlock(&qp->mutex);
5835 	return err;
5836 }
5837 
5838 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
5839 				   struct ib_udata *udata)
5840 {
5841 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5842 	struct mlx5_ib_xrcd *xrcd;
5843 	int err;
5844 
5845 	if (!MLX5_CAP_GEN(dev->mdev, xrc))
5846 		return ERR_PTR(-ENOSYS);
5847 
5848 	xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
5849 	if (!xrcd)
5850 		return ERR_PTR(-ENOMEM);
5851 
5852 	err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
5853 	if (err) {
5854 		kfree(xrcd);
5855 		return ERR_PTR(-ENOMEM);
5856 	}
5857 
5858 	return &xrcd->ibxrcd;
5859 }
5860 
5861 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
5862 {
5863 	struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
5864 	u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
5865 	int err;
5866 
5867 	err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
5868 	if (err)
5869 		mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
5870 
5871 	kfree(xrcd);
5872 	return 0;
5873 }
5874 
5875 static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
5876 {
5877 	struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
5878 	struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
5879 	struct ib_event event;
5880 
5881 	if (rwq->ibwq.event_handler) {
5882 		event.device     = rwq->ibwq.device;
5883 		event.element.wq = &rwq->ibwq;
5884 		switch (type) {
5885 		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
5886 			event.event = IB_EVENT_WQ_FATAL;
5887 			break;
5888 		default:
5889 			mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn);
5890 			return;
5891 		}
5892 
5893 		rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
5894 	}
5895 }
5896 
5897 static int set_delay_drop(struct mlx5_ib_dev *dev)
5898 {
5899 	int err = 0;
5900 
5901 	mutex_lock(&dev->delay_drop.lock);
5902 	if (dev->delay_drop.activate)
5903 		goto out;
5904 
5905 	err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout);
5906 	if (err)
5907 		goto out;
5908 
5909 	dev->delay_drop.activate = true;
5910 out:
5911 	mutex_unlock(&dev->delay_drop.lock);
5912 
5913 	if (!err)
5914 		atomic_inc(&dev->delay_drop.rqs_cnt);
5915 	return err;
5916 }
5917 
5918 static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
5919 		      struct ib_wq_init_attr *init_attr)
5920 {
5921 	struct mlx5_ib_dev *dev;
5922 	int has_net_offloads;
5923 	__be64 *rq_pas0;
5924 	void *in;
5925 	void *rqc;
5926 	void *wq;
5927 	int inlen;
5928 	int err;
5929 
5930 	dev = to_mdev(pd->device);
5931 
5932 	inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
5933 	in = kvzalloc(inlen, GFP_KERNEL);
5934 	if (!in)
5935 		return -ENOMEM;
5936 
5937 	MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
5938 	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
5939 	MLX5_SET(rqc,  rqc, mem_rq_type,
5940 		 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
5941 	MLX5_SET(rqc, rqc, user_index, rwq->user_index);
5942 	MLX5_SET(rqc,  rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
5943 	MLX5_SET(rqc,  rqc, state, MLX5_RQC_STATE_RST);
5944 	MLX5_SET(rqc,  rqc, flush_in_error_en, 1);
5945 	wq = MLX5_ADDR_OF(rqc, rqc, wq);
5946 	MLX5_SET(wq, wq, wq_type,
5947 		 rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
5948 		 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC);
5949 	if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
5950 		if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
5951 			mlx5_ib_dbg(dev, "Scatter end padding is not supported\n");
5952 			err = -EOPNOTSUPP;
5953 			goto out;
5954 		} else {
5955 			MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
5956 		}
5957 	}
5958 	MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
5959 	if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
5960 		MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
5961 		MLX5_SET(wq, wq, log_wqe_stride_size,
5962 			 rwq->single_stride_log_num_of_bytes -
5963 			 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
5964 		MLX5_SET(wq, wq, log_wqe_num_of_strides, rwq->log_num_strides -
5965 			 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES);
5966 	}
5967 	MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
5968 	MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
5969 	MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
5970 	MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
5971 	MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
5972 	MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
5973 	has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads);
5974 	if (init_attr->create_flags & IB_WQ_FLAGS_CVLAN_STRIPPING) {
5975 		if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
5976 			mlx5_ib_dbg(dev, "VLAN offloads are not supported\n");
5977 			err = -EOPNOTSUPP;
5978 			goto out;
5979 		}
5980 	} else {
5981 		MLX5_SET(rqc, rqc, vsd, 1);
5982 	}
5983 	if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) {
5984 		if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) {
5985 			mlx5_ib_dbg(dev, "Scatter FCS is not supported\n");
5986 			err = -EOPNOTSUPP;
5987 			goto out;
5988 		}
5989 		MLX5_SET(rqc, rqc, scatter_fcs, 1);
5990 	}
5991 	if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
5992 		if (!(dev->ib_dev.attrs.raw_packet_caps &
5993 		      IB_RAW_PACKET_CAP_DELAY_DROP)) {
5994 			mlx5_ib_dbg(dev, "Delay drop is not supported\n");
5995 			err = -EOPNOTSUPP;
5996 			goto out;
5997 		}
5998 		MLX5_SET(rqc, rqc, delay_drop_en, 1);
5999 	}
6000 	rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
6001 	mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
6002 	err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
6003 	if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
6004 		err = set_delay_drop(dev);
6005 		if (err) {
6006 			mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
6007 				     err);
6008 			mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
6009 		} else {
6010 			rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
6011 		}
6012 	}
6013 out:
6014 	kvfree(in);
6015 	return err;
6016 }
6017 
6018 static int set_user_rq_size(struct mlx5_ib_dev *dev,
6019 			    struct ib_wq_init_attr *wq_init_attr,
6020 			    struct mlx5_ib_create_wq *ucmd,
6021 			    struct mlx5_ib_rwq *rwq)
6022 {
6023 	/* Sanity check RQ size before proceeding */
6024 	if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
6025 		return -EINVAL;
6026 
6027 	if (!ucmd->rq_wqe_count)
6028 		return -EINVAL;
6029 
6030 	rwq->wqe_count = ucmd->rq_wqe_count;
6031 	rwq->wqe_shift = ucmd->rq_wqe_shift;
6032 	if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
6033 		return -EINVAL;
6034 
6035 	rwq->log_rq_stride = rwq->wqe_shift;
6036 	rwq->log_rq_size = ilog2(rwq->wqe_count);
6037 	return 0;
6038 }
6039 
6040 static int prepare_user_rq(struct ib_pd *pd,
6041 			   struct ib_wq_init_attr *init_attr,
6042 			   struct ib_udata *udata,
6043 			   struct mlx5_ib_rwq *rwq)
6044 {
6045 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
6046 	struct mlx5_ib_create_wq ucmd = {};
6047 	int err;
6048 	size_t required_cmd_sz;
6049 
6050 	required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes)
6051 		+ sizeof(ucmd.single_stride_log_num_of_bytes);
6052 	if (udata->inlen < required_cmd_sz) {
6053 		mlx5_ib_dbg(dev, "invalid inlen\n");
6054 		return -EINVAL;
6055 	}
6056 
6057 	if (udata->inlen > sizeof(ucmd) &&
6058 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
6059 				 udata->inlen - sizeof(ucmd))) {
6060 		mlx5_ib_dbg(dev, "inlen is not supported\n");
6061 		return -EOPNOTSUPP;
6062 	}
6063 
6064 	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
6065 		mlx5_ib_dbg(dev, "copy failed\n");
6066 		return -EFAULT;
6067 	}
6068 
6069 	if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
6070 		mlx5_ib_dbg(dev, "invalid comp mask\n");
6071 		return -EOPNOTSUPP;
6072 	} else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
6073 		if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
6074 			mlx5_ib_dbg(dev, "Striding RQ is not supported\n");
6075 			return -EOPNOTSUPP;
6076 		}
6077 		if ((ucmd.single_stride_log_num_of_bytes <
6078 		    MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) ||
6079 		    (ucmd.single_stride_log_num_of_bytes >
6080 		     MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) {
6081 			mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n",
6082 				    ucmd.single_stride_log_num_of_bytes,
6083 				    MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES,
6084 				    MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
6085 			return -EINVAL;
6086 		}
6087 		if ((ucmd.single_wqe_log_num_of_strides >
6088 		    MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
6089 		     (ucmd.single_wqe_log_num_of_strides <
6090 			MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) {
6091 			mlx5_ib_dbg(dev, "Invalid log num strides (%u. Range is %u - %u)\n",
6092 				    ucmd.single_wqe_log_num_of_strides,
6093 				    MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
6094 				    MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
6095 			return -EINVAL;
6096 		}
6097 		rwq->single_stride_log_num_of_bytes =
6098 			ucmd.single_stride_log_num_of_bytes;
6099 		rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
6100 		rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
6101 		rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
6102 	}
6103 
6104 	err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
6105 	if (err) {
6106 		mlx5_ib_dbg(dev, "err %d\n", err);
6107 		return err;
6108 	}
6109 
6110 	err = create_user_rq(dev, pd, udata, rwq, &ucmd);
6111 	if (err) {
6112 		mlx5_ib_dbg(dev, "err %d\n", err);
6113 		return err;
6114 	}
6115 
6116 	rwq->user_index = ucmd.user_index;
6117 	return 0;
6118 }
6119 
6120 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
6121 				struct ib_wq_init_attr *init_attr,
6122 				struct ib_udata *udata)
6123 {
6124 	struct mlx5_ib_dev *dev;
6125 	struct mlx5_ib_rwq *rwq;
6126 	struct mlx5_ib_create_wq_resp resp = {};
6127 	size_t min_resp_len;
6128 	int err;
6129 
6130 	if (!udata)
6131 		return ERR_PTR(-ENOSYS);
6132 
6133 	min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
6134 	if (udata->outlen && udata->outlen < min_resp_len)
6135 		return ERR_PTR(-EINVAL);
6136 
6137 	dev = to_mdev(pd->device);
6138 	switch (init_attr->wq_type) {
6139 	case IB_WQT_RQ:
6140 		rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
6141 		if (!rwq)
6142 			return ERR_PTR(-ENOMEM);
6143 		err = prepare_user_rq(pd, init_attr, udata, rwq);
6144 		if (err)
6145 			goto err;
6146 		err = create_rq(rwq, pd, init_attr);
6147 		if (err)
6148 			goto err_user_rq;
6149 		break;
6150 	default:
6151 		mlx5_ib_dbg(dev, "unsupported wq type %d\n",
6152 			    init_attr->wq_type);
6153 		return ERR_PTR(-EINVAL);
6154 	}
6155 
6156 	rwq->ibwq.wq_num = rwq->core_qp.qpn;
6157 	rwq->ibwq.state = IB_WQS_RESET;
6158 	if (udata->outlen) {
6159 		resp.response_length = offsetof(typeof(resp), response_length) +
6160 				sizeof(resp.response_length);
6161 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
6162 		if (err)
6163 			goto err_copy;
6164 	}
6165 
6166 	rwq->core_qp.event = mlx5_ib_wq_event;
6167 	rwq->ibwq.event_handler = init_attr->event_handler;
6168 	return &rwq->ibwq;
6169 
6170 err_copy:
6171 	mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
6172 err_user_rq:
6173 	destroy_user_rq(dev, pd, rwq, udata);
6174 err:
6175 	kfree(rwq);
6176 	return ERR_PTR(err);
6177 }
6178 
6179 void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
6180 {
6181 	struct mlx5_ib_dev *dev = to_mdev(wq->device);
6182 	struct mlx5_ib_rwq *rwq = to_mrwq(wq);
6183 
6184 	mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
6185 	destroy_user_rq(dev, wq->pd, rwq, udata);
6186 	kfree(rwq);
6187 }
6188 
6189 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
6190 						      struct ib_rwq_ind_table_init_attr *init_attr,
6191 						      struct ib_udata *udata)
6192 {
6193 	struct mlx5_ib_dev *dev = to_mdev(device);
6194 	struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
6195 	int sz = 1 << init_attr->log_ind_tbl_size;
6196 	struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
6197 	size_t min_resp_len;
6198 	int inlen;
6199 	int err;
6200 	int i;
6201 	u32 *in;
6202 	void *rqtc;
6203 
6204 	if (udata->inlen > 0 &&
6205 	    !ib_is_udata_cleared(udata, 0,
6206 				 udata->inlen))
6207 		return ERR_PTR(-EOPNOTSUPP);
6208 
6209 	if (init_attr->log_ind_tbl_size >
6210 	    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
6211 		mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
6212 			    init_attr->log_ind_tbl_size,
6213 			    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
6214 		return ERR_PTR(-EINVAL);
6215 	}
6216 
6217 	min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
6218 	if (udata->outlen && udata->outlen < min_resp_len)
6219 		return ERR_PTR(-EINVAL);
6220 
6221 	rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
6222 	if (!rwq_ind_tbl)
6223 		return ERR_PTR(-ENOMEM);
6224 
6225 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
6226 	in = kvzalloc(inlen, GFP_KERNEL);
6227 	if (!in) {
6228 		err = -ENOMEM;
6229 		goto err;
6230 	}
6231 
6232 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
6233 
6234 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
6235 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
6236 
6237 	for (i = 0; i < sz; i++)
6238 		MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
6239 
6240 	rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
6241 	MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
6242 
6243 	err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
6244 	kvfree(in);
6245 
6246 	if (err)
6247 		goto err;
6248 
6249 	rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
6250 	if (udata->outlen) {
6251 		resp.response_length = offsetof(typeof(resp), response_length) +
6252 					sizeof(resp.response_length);
6253 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
6254 		if (err)
6255 			goto err_copy;
6256 	}
6257 
6258 	return &rwq_ind_tbl->ib_rwq_ind_tbl;
6259 
6260 err_copy:
6261 	mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
6262 err:
6263 	kfree(rwq_ind_tbl);
6264 	return ERR_PTR(err);
6265 }
6266 
6267 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
6268 {
6269 	struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
6270 	struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
6271 
6272 	mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
6273 
6274 	kfree(rwq_ind_tbl);
6275 	return 0;
6276 }
6277 
6278 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
6279 		      u32 wq_attr_mask, struct ib_udata *udata)
6280 {
6281 	struct mlx5_ib_dev *dev = to_mdev(wq->device);
6282 	struct mlx5_ib_rwq *rwq = to_mrwq(wq);
6283 	struct mlx5_ib_modify_wq ucmd = {};
6284 	size_t required_cmd_sz;
6285 	int curr_wq_state;
6286 	int wq_state;
6287 	int inlen;
6288 	int err;
6289 	void *rqc;
6290 	void *in;
6291 
6292 	required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
6293 	if (udata->inlen < required_cmd_sz)
6294 		return -EINVAL;
6295 
6296 	if (udata->inlen > sizeof(ucmd) &&
6297 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
6298 				 udata->inlen - sizeof(ucmd)))
6299 		return -EOPNOTSUPP;
6300 
6301 	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
6302 		return -EFAULT;
6303 
6304 	if (ucmd.comp_mask || ucmd.reserved)
6305 		return -EOPNOTSUPP;
6306 
6307 	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
6308 	in = kvzalloc(inlen, GFP_KERNEL);
6309 	if (!in)
6310 		return -ENOMEM;
6311 
6312 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
6313 
6314 	curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ?
6315 		wq_attr->curr_wq_state : wq->state;
6316 	wq_state = (wq_attr_mask & IB_WQ_STATE) ?
6317 		wq_attr->wq_state : curr_wq_state;
6318 	if (curr_wq_state == IB_WQS_ERR)
6319 		curr_wq_state = MLX5_RQC_STATE_ERR;
6320 	if (wq_state == IB_WQS_ERR)
6321 		wq_state = MLX5_RQC_STATE_ERR;
6322 	MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
6323 	MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid);
6324 	MLX5_SET(rqc, rqc, state, wq_state);
6325 
6326 	if (wq_attr_mask & IB_WQ_FLAGS) {
6327 		if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) {
6328 			if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
6329 			      MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
6330 				mlx5_ib_dbg(dev, "VLAN offloads are not "
6331 					    "supported\n");
6332 				err = -EOPNOTSUPP;
6333 				goto out;
6334 			}
6335 			MLX5_SET64(modify_rq_in, in, modify_bitmask,
6336 				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
6337 			MLX5_SET(rqc, rqc, vsd,
6338 				 (wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1);
6339 		}
6340 
6341 		if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
6342 			mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n");
6343 			err = -EOPNOTSUPP;
6344 			goto out;
6345 		}
6346 	}
6347 
6348 	if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
6349 		u16 set_id;
6350 
6351 		set_id = mlx5_ib_get_counters_id(dev, 0);
6352 		if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
6353 			MLX5_SET64(modify_rq_in, in, modify_bitmask,
6354 				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
6355 			MLX5_SET(rqc, rqc, counter_set_id, set_id);
6356 		} else
6357 			dev_info_once(
6358 				&dev->ib_dev.dev,
6359 				"Receive WQ counters are not supported on current FW\n");
6360 	}
6361 
6362 	err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
6363 	if (!err)
6364 		rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
6365 
6366 out:
6367 	kvfree(in);
6368 	return err;
6369 }
6370 
6371 struct mlx5_ib_drain_cqe {
6372 	struct ib_cqe cqe;
6373 	struct completion done;
6374 };
6375 
6376 static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
6377 {
6378 	struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
6379 						     struct mlx5_ib_drain_cqe,
6380 						     cqe);
6381 
6382 	complete(&cqe->done);
6383 }
6384 
6385 /* This function returns only once the drained WR was completed */
6386 static void handle_drain_completion(struct ib_cq *cq,
6387 				    struct mlx5_ib_drain_cqe *sdrain,
6388 				    struct mlx5_ib_dev *dev)
6389 {
6390 	struct mlx5_core_dev *mdev = dev->mdev;
6391 
6392 	if (cq->poll_ctx == IB_POLL_DIRECT) {
6393 		while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
6394 			ib_process_cq_direct(cq, -1);
6395 		return;
6396 	}
6397 
6398 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
6399 		struct mlx5_ib_cq *mcq = to_mcq(cq);
6400 		bool triggered = false;
6401 		unsigned long flags;
6402 
6403 		spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
6404 		/* Make sure that the CQ handler won't run if wasn't run yet */
6405 		if (!mcq->mcq.reset_notify_added)
6406 			mcq->mcq.reset_notify_added = 1;
6407 		else
6408 			triggered = true;
6409 		spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
6410 
6411 		if (triggered) {
6412 			/* Wait for any scheduled/running task to be ended */
6413 			switch (cq->poll_ctx) {
6414 			case IB_POLL_SOFTIRQ:
6415 				irq_poll_disable(&cq->iop);
6416 				irq_poll_enable(&cq->iop);
6417 				break;
6418 			case IB_POLL_WORKQUEUE:
6419 				cancel_work_sync(&cq->work);
6420 				break;
6421 			default:
6422 				WARN_ON_ONCE(1);
6423 			}
6424 		}
6425 
6426 		/* Run the CQ handler - this makes sure that the drain WR will
6427 		 * be processed if wasn't processed yet.
6428 		 */
6429 		mcq->mcq.comp(&mcq->mcq, NULL);
6430 	}
6431 
6432 	wait_for_completion(&sdrain->done);
6433 }
6434 
6435 void mlx5_ib_drain_sq(struct ib_qp *qp)
6436 {
6437 	struct ib_cq *cq = qp->send_cq;
6438 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
6439 	struct mlx5_ib_drain_cqe sdrain;
6440 	const struct ib_send_wr *bad_swr;
6441 	struct ib_rdma_wr swr = {
6442 		.wr = {
6443 			.next = NULL,
6444 			{ .wr_cqe	= &sdrain.cqe, },
6445 			.opcode	= IB_WR_RDMA_WRITE,
6446 		},
6447 	};
6448 	int ret;
6449 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
6450 	struct mlx5_core_dev *mdev = dev->mdev;
6451 
6452 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
6453 	if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
6454 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
6455 		return;
6456 	}
6457 
6458 	sdrain.cqe.done = mlx5_ib_drain_qp_done;
6459 	init_completion(&sdrain.done);
6460 
6461 	ret = _mlx5_ib_post_send(qp, &swr.wr, &bad_swr, true);
6462 	if (ret) {
6463 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
6464 		return;
6465 	}
6466 
6467 	handle_drain_completion(cq, &sdrain, dev);
6468 }
6469 
6470 void mlx5_ib_drain_rq(struct ib_qp *qp)
6471 {
6472 	struct ib_cq *cq = qp->recv_cq;
6473 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
6474 	struct mlx5_ib_drain_cqe rdrain;
6475 	struct ib_recv_wr rwr = {};
6476 	const struct ib_recv_wr *bad_rwr;
6477 	int ret;
6478 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
6479 	struct mlx5_core_dev *mdev = dev->mdev;
6480 
6481 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
6482 	if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
6483 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
6484 		return;
6485 	}
6486 
6487 	rwr.wr_cqe = &rdrain.cqe;
6488 	rdrain.cqe.done = mlx5_ib_drain_qp_done;
6489 	init_completion(&rdrain.done);
6490 
6491 	ret = _mlx5_ib_post_recv(qp, &rwr, &bad_rwr, true);
6492 	if (ret) {
6493 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
6494 		return;
6495 	}
6496 
6497 	handle_drain_completion(cq, &rdrain, dev);
6498 }
6499 
6500 /**
6501  * Bind a qp to a counter. If @counter is NULL then bind the qp to
6502  * the default counter
6503  */
6504 int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
6505 {
6506 	struct mlx5_ib_qp *mqp = to_mqp(qp);
6507 	int err = 0;
6508 
6509 	mutex_lock(&mqp->mutex);
6510 	if (mqp->state == IB_QPS_RESET) {
6511 		qp->counter = counter;
6512 		goto out;
6513 	}
6514 
6515 	if (mqp->state == IB_QPS_RTS) {
6516 		err = __mlx5_ib_qp_set_counter(qp, counter);
6517 		if (!err)
6518 			qp->counter = counter;
6519 
6520 		goto out;
6521 	}
6522 
6523 	mqp->counter_pending = 1;
6524 	qp->counter = counter;
6525 
6526 out:
6527 	mutex_unlock(&mqp->mutex);
6528 	return err;
6529 }
6530