xref: /freebsd/sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c (revision 53b70c86)
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/log2.h>
35 #include <linux/slab.h>
36 #include <linux/netdevice.h>
37 #include <linux/bitops.h>
38 #include <linux/rcupdate.h>
39 #include <linux/etherdevice.h>
40 
41 #include <rdma/ib_cache.h>
42 #include <rdma/ib_pack.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_mad.h>
45 #include <rdma/uverbs_ioctl.h>
46 
47 #include <dev/mlx4/cmd.h>
48 #include <dev/mlx4/qp.h>
49 #include <dev/mlx4/driver.h>
50 #include <linux/io.h>
51 
52 #include "mlx4_ib.h"
53 #include <rdma/mlx4-abi.h>
54 
55 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
56 			     struct mlx4_ib_cq *recv_cq);
57 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
58 			       struct mlx4_ib_cq *recv_cq);
59 
60 enum {
61 	MLX4_IB_ACK_REQ_FREQ	= 8,
62 };
63 
64 enum {
65 	MLX4_IB_DEFAULT_SCHED_QUEUE	= 0x83,
66 	MLX4_IB_DEFAULT_QP0_SCHED_QUEUE	= 0x3f,
67 	MLX4_IB_LINK_TYPE_IB		= 0,
68 	MLX4_IB_LINK_TYPE_ETH		= 1
69 };
70 
71 enum {
72 	/*
73 	 * Largest possible UD header: send with GRH and immediate
74 	 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
75 	 * tag.  (LRH would only use 8 bytes, so Ethernet is the
76 	 * biggest case)
77 	 */
78 	MLX4_IB_UD_HEADER_SIZE		= 82,
79 	MLX4_IB_LSO_HEADER_SPARE	= 128,
80 };
81 
82 enum {
83 	MLX4_IB_IBOE_ETHERTYPE		= 0x8915
84 };
85 
86 struct mlx4_ib_sqp {
87 	struct mlx4_ib_qp	qp;
88 	int			pkey_index;
89 	u32			qkey;
90 	u32			send_psn;
91 	struct ib_ud_header	ud_header;
92 	u8			header_buf[MLX4_IB_UD_HEADER_SIZE];
93 	struct ib_qp		*roce_v2_gsi;
94 };
95 
96 enum {
97 	MLX4_IB_MIN_SQ_STRIDE	= 6,
98 	MLX4_IB_CACHE_LINE_SIZE	= 64,
99 };
100 
101 enum {
102 	MLX4_RAW_QP_MTU		= 7,
103 	MLX4_RAW_QP_MSGMAX	= 31,
104 };
105 
106 #ifndef ETH_ALEN
107 #define ETH_ALEN        6
108 #endif
109 
110 static const __be32 mlx4_ib_opcode[] = {
111 	[IB_WR_SEND]				= cpu_to_be32(MLX4_OPCODE_SEND),
112 	[IB_WR_LSO]				= cpu_to_be32(MLX4_OPCODE_LSO),
113 	[IB_WR_SEND_WITH_IMM]			= cpu_to_be32(MLX4_OPCODE_SEND_IMM),
114 	[IB_WR_RDMA_WRITE]			= cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
115 	[IB_WR_RDMA_WRITE_WITH_IMM]		= cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
116 	[IB_WR_RDMA_READ]			= cpu_to_be32(MLX4_OPCODE_RDMA_READ),
117 	[IB_WR_ATOMIC_CMP_AND_SWP]		= cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
118 	[IB_WR_ATOMIC_FETCH_AND_ADD]		= cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
119 	[IB_WR_SEND_WITH_INV]			= cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
120 	[IB_WR_LOCAL_INV]			= cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
121 	[IB_WR_REG_MR]				= cpu_to_be32(MLX4_OPCODE_FMR),
122 	[IB_WR_MASKED_ATOMIC_CMP_AND_SWP]	= cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
123 	[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]	= cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
124 };
125 
126 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
127 {
128 	return container_of(mqp, struct mlx4_ib_sqp, qp);
129 }
130 
131 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
132 {
133 	if (!mlx4_is_master(dev->dev))
134 		return 0;
135 
136 	return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn &&
137 	       qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn +
138 		8 * MLX4_MFUNC_MAX;
139 }
140 
141 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
142 {
143 	int proxy_sqp = 0;
144 	int real_sqp = 0;
145 	int i;
146 	/* PPF or Native -- real SQP */
147 	real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
148 		    qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
149 		    qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
150 	if (real_sqp)
151 		return 1;
152 	/* VF or PF -- proxy SQP */
153 	if (mlx4_is_mfunc(dev->dev)) {
154 		for (i = 0; i < dev->dev->caps.num_ports; i++) {
155 			if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] ||
156 			    qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) {
157 				proxy_sqp = 1;
158 				break;
159 			}
160 		}
161 	}
162 	if (proxy_sqp)
163 		return 1;
164 
165 	return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP);
166 }
167 
168 /* used for INIT/CLOSE port logic */
169 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
170 {
171 	int proxy_qp0 = 0;
172 	int real_qp0 = 0;
173 	int i;
174 	/* PPF or Native -- real QP0 */
175 	real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
176 		    qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
177 		    qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1);
178 	if (real_qp0)
179 		return 1;
180 	/* VF or PF -- proxy QP0 */
181 	if (mlx4_is_mfunc(dev->dev)) {
182 		for (i = 0; i < dev->dev->caps.num_ports; i++) {
183 			if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) {
184 				proxy_qp0 = 1;
185 				break;
186 			}
187 		}
188 	}
189 	return proxy_qp0;
190 }
191 
192 static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
193 {
194 	return mlx4_buf_offset(&qp->buf, offset);
195 }
196 
197 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
198 {
199 	return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
200 }
201 
202 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
203 {
204 	return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift));
205 }
206 
207 /*
208  * Stamp a SQ WQE so that it is invalid if prefetched by marking the
209  * first four bytes of every 64 byte chunk with
210  *     0x7FFFFFF | (invalid_ownership_value << 31).
211  *
212  * When the max work request size is less than or equal to the WQE
213  * basic block size, as an optimization, we can stamp all WQEs with
214  * 0xffffffff, and skip the very first chunk of each WQE.
215  */
216 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
217 {
218 	__be32 *wqe;
219 	int i;
220 	int s;
221 	int ind;
222 	void *buf;
223 	__be32 stamp;
224 	struct mlx4_wqe_ctrl_seg *ctrl;
225 
226 	if (qp->sq_max_wqes_per_wr > 1) {
227 		s = roundup(size, 1U << qp->sq.wqe_shift);
228 		for (i = 0; i < s; i += 64) {
229 			ind = (i >> qp->sq.wqe_shift) + n;
230 			stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
231 						       cpu_to_be32(0xffffffff);
232 			buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
233 			wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
234 			*wqe = stamp;
235 		}
236 	} else {
237 		ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
238 		s = (ctrl->fence_size & 0x3f) << 4;
239 		for (i = 64; i < s; i += 64) {
240 			wqe = buf + i;
241 			*wqe = cpu_to_be32(0xffffffff);
242 		}
243 	}
244 }
245 
246 static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
247 {
248 	struct mlx4_wqe_ctrl_seg *ctrl;
249 	struct mlx4_wqe_inline_seg *inl;
250 	void *wqe;
251 	int s;
252 
253 	ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
254 	s = sizeof(struct mlx4_wqe_ctrl_seg);
255 
256 	if (qp->ibqp.qp_type == IB_QPT_UD) {
257 		struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
258 		struct mlx4_av *av = (struct mlx4_av *)dgram->av;
259 		memset(dgram, 0, sizeof *dgram);
260 		av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
261 		s += sizeof(struct mlx4_wqe_datagram_seg);
262 	}
263 
264 	/* Pad the remainder of the WQE with an inline data segment. */
265 	if (size > s) {
266 		inl = wqe + s;
267 		inl->byte_count = cpu_to_be32(1U << 31 | (size - s - sizeof *inl));
268 	}
269 	ctrl->srcrb_flags = 0;
270 	ctrl->fence_size = size / 16;
271 	/*
272 	 * Make sure descriptor is fully written before setting ownership bit
273 	 * (because HW can start executing as soon as we do).
274 	 */
275 	wmb();
276 
277 	ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
278 		(n & qp->sq.wqe_cnt ? cpu_to_be32(1U << 31) : 0);
279 
280 	stamp_send_wqe(qp, n + qp->sq_spare_wqes, size);
281 }
282 
283 /* Post NOP WQE to prevent wrap-around in the middle of WR */
284 static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
285 {
286 	unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
287 	if (unlikely(s < qp->sq_max_wqes_per_wr)) {
288 		post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
289 		ind += s;
290 	}
291 	return ind;
292 }
293 
294 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
295 {
296 	struct ib_event event;
297 	struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
298 
299 	if (type == MLX4_EVENT_TYPE_PATH_MIG)
300 		to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
301 
302 	if (ibqp->event_handler) {
303 		event.device     = ibqp->device;
304 		event.element.qp = ibqp;
305 		switch (type) {
306 		case MLX4_EVENT_TYPE_PATH_MIG:
307 			event.event = IB_EVENT_PATH_MIG;
308 			break;
309 		case MLX4_EVENT_TYPE_COMM_EST:
310 			event.event = IB_EVENT_COMM_EST;
311 			break;
312 		case MLX4_EVENT_TYPE_SQ_DRAINED:
313 			event.event = IB_EVENT_SQ_DRAINED;
314 			break;
315 		case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
316 			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
317 			break;
318 		case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
319 			event.event = IB_EVENT_QP_FATAL;
320 			break;
321 		case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
322 			event.event = IB_EVENT_PATH_MIG_ERR;
323 			break;
324 		case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
325 			event.event = IB_EVENT_QP_REQ_ERR;
326 			break;
327 		case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
328 			event.event = IB_EVENT_QP_ACCESS_ERR;
329 			break;
330 		default:
331 			pr_warn("Unexpected event type %d "
332 			       "on QP %06x\n", type, qp->qpn);
333 			return;
334 		}
335 
336 		ibqp->event_handler(&event, ibqp->qp_context);
337 	}
338 }
339 
340 static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
341 {
342 	/*
343 	 * UD WQEs must have a datagram segment.
344 	 * RC and UC WQEs might have a remote address segment.
345 	 * MLX WQEs need two extra inline data segments (for the UD
346 	 * header and space for the ICRC).
347 	 */
348 	switch (type) {
349 	case MLX4_IB_QPT_UD:
350 		return sizeof (struct mlx4_wqe_ctrl_seg) +
351 			sizeof (struct mlx4_wqe_datagram_seg) +
352 			((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
353 	case MLX4_IB_QPT_PROXY_SMI_OWNER:
354 	case MLX4_IB_QPT_PROXY_SMI:
355 	case MLX4_IB_QPT_PROXY_GSI:
356 		return sizeof (struct mlx4_wqe_ctrl_seg) +
357 			sizeof (struct mlx4_wqe_datagram_seg) + 64;
358 	case MLX4_IB_QPT_TUN_SMI_OWNER:
359 	case MLX4_IB_QPT_TUN_GSI:
360 		return sizeof (struct mlx4_wqe_ctrl_seg) +
361 			sizeof (struct mlx4_wqe_datagram_seg);
362 
363 	case MLX4_IB_QPT_UC:
364 		return sizeof (struct mlx4_wqe_ctrl_seg) +
365 			sizeof (struct mlx4_wqe_raddr_seg);
366 	case MLX4_IB_QPT_RC:
367 		return sizeof (struct mlx4_wqe_ctrl_seg) +
368 			sizeof (struct mlx4_wqe_masked_atomic_seg) +
369 			sizeof (struct mlx4_wqe_raddr_seg);
370 	case MLX4_IB_QPT_SMI:
371 	case MLX4_IB_QPT_GSI:
372 		return sizeof (struct mlx4_wqe_ctrl_seg) +
373 			ALIGN(MLX4_IB_UD_HEADER_SIZE +
374 			      DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE,
375 					   MLX4_INLINE_ALIGN) *
376 			      sizeof (struct mlx4_wqe_inline_seg),
377 			      sizeof (struct mlx4_wqe_data_seg)) +
378 			ALIGN(4 +
379 			      sizeof (struct mlx4_wqe_inline_seg),
380 			      sizeof (struct mlx4_wqe_data_seg));
381 	default:
382 		return sizeof (struct mlx4_wqe_ctrl_seg);
383 	}
384 }
385 
386 static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
387 		       int is_user, int has_rq, struct mlx4_ib_qp *qp)
388 {
389 	/* Sanity check RQ size before proceeding */
390 	if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
391 	    cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
392 		return -EINVAL;
393 
394 	if (!has_rq) {
395 		if (cap->max_recv_wr)
396 			return -EINVAL;
397 
398 		qp->rq.wqe_cnt = qp->rq.max_gs = 0;
399 	} else {
400 		/* HW requires >= 1 RQ entry with >= 1 gather entry */
401 		if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge))
402 			return -EINVAL;
403 
404 		qp->rq.wqe_cnt	 = roundup_pow_of_two(max(1U, cap->max_recv_wr));
405 		qp->rq.max_gs	 = roundup_pow_of_two(max(1U, cap->max_recv_sge));
406 		qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
407 	}
408 
409 	/* leave userspace return values as they were, so as not to break ABI */
410 	if (is_user) {
411 		cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
412 		cap->max_recv_sge = qp->rq.max_gs;
413 	} else {
414 		cap->max_recv_wr  = qp->rq.max_post =
415 			min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
416 		cap->max_recv_sge = min(qp->rq.max_gs,
417 					min(dev->dev->caps.max_sq_sg,
418 					    dev->dev->caps.max_rq_sg));
419 	}
420 
421 	return 0;
422 }
423 
424 static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
425 			      enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp,
426 			      bool shrink_wqe)
427 {
428 	int s;
429 
430 	/* Sanity check SQ size before proceeding */
431 	if (cap->max_send_wr  > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
432 	    cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
433 	    cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
434 	    sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
435 		return -EINVAL;
436 
437 	/*
438 	 * For MLX transport we need 2 extra S/G entries:
439 	 * one for the header and one for the checksum at the end
440 	 */
441 	if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI ||
442 	     type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) &&
443 	    cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
444 		return -EINVAL;
445 
446 	s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
447 		cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
448 		send_wqe_overhead(type, qp->flags);
449 
450 	if (s > dev->dev->caps.max_sq_desc_sz)
451 		return -EINVAL;
452 
453 	/*
454 	 * Hermon supports shrinking WQEs, such that a single work
455 	 * request can include multiple units of 1 << wqe_shift.  This
456 	 * way, work requests can differ in size, and do not have to
457 	 * be a power of 2 in size, saving memory and speeding up send
458 	 * WR posting.  Unfortunately, if we do this then the
459 	 * wqe_index field in CQEs can't be used to look up the WR ID
460 	 * anymore, so we do this only if selective signaling is off.
461 	 *
462 	 * Further, on 32-bit platforms, we can't use vmap() to make
463 	 * the QP buffer virtually contiguous.  Thus we have to use
464 	 * constant-sized WRs to make sure a WR is always fully within
465 	 * a single page-sized chunk.
466 	 *
467 	 * Finally, we use NOP work requests to pad the end of the
468 	 * work queue, to avoid wrap-around in the middle of WR.  We
469 	 * set NEC bit to avoid getting completions with error for
470 	 * these NOP WRs, but since NEC is only supported starting
471 	 * with firmware 2.2.232, we use constant-sized WRs for older
472 	 * firmware.
473 	 *
474 	 * And, since MLX QPs only support SEND, we use constant-sized
475 	 * WRs in this case.
476 	 *
477 	 * We look for the smallest value of wqe_shift such that the
478 	 * resulting number of wqes does not exceed device
479 	 * capabilities.
480 	 *
481 	 * We set WQE size to at least 64 bytes, this way stamping
482 	 * invalidates each WQE.
483 	 */
484 	if (shrink_wqe && dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
485 	    qp->sq_signal_bits && BITS_PER_LONG == 64 &&
486 	    type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
487 	    !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
488 		      MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER)))
489 		qp->sq.wqe_shift = ilog2(64);
490 	else
491 		qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
492 
493 	for (;;) {
494 		qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
495 
496 		/*
497 		 * We need to leave 2 KB + 1 WR of headroom in the SQ to
498 		 * allow HW to prefetch.
499 		 */
500 		qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
501 		qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
502 						    qp->sq_max_wqes_per_wr +
503 						    qp->sq_spare_wqes);
504 
505 		if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
506 			break;
507 
508 		if (qp->sq_max_wqes_per_wr <= 1)
509 			return -EINVAL;
510 
511 		++qp->sq.wqe_shift;
512 	}
513 
514 	qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz,
515 			     (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) -
516 			 send_wqe_overhead(type, qp->flags)) /
517 		sizeof (struct mlx4_wqe_data_seg);
518 
519 	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
520 		(qp->sq.wqe_cnt << qp->sq.wqe_shift);
521 	if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
522 		qp->rq.offset = 0;
523 		qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
524 	} else {
525 		qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
526 		qp->sq.offset = 0;
527 	}
528 
529 	cap->max_send_wr  = qp->sq.max_post =
530 		(qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
531 	cap->max_send_sge = min(qp->sq.max_gs,
532 				min(dev->dev->caps.max_sq_sg,
533 				    dev->dev->caps.max_rq_sg));
534 	/* We don't support inline sends for kernel QPs (yet) */
535 	cap->max_inline_data = 0;
536 
537 	return 0;
538 }
539 
540 static int set_user_sq_size(struct mlx4_ib_dev *dev,
541 			    struct mlx4_ib_qp *qp,
542 			    struct mlx4_ib_create_qp *ucmd)
543 {
544 	/* Sanity check SQ size before proceeding */
545 	if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes	 ||
546 	    ucmd->log_sq_stride >
547 		ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
548 	    ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
549 		return -EINVAL;
550 
551 	qp->sq.wqe_cnt   = 1 << ucmd->log_sq_bb_count;
552 	qp->sq.wqe_shift = ucmd->log_sq_stride;
553 
554 	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
555 		(qp->sq.wqe_cnt << qp->sq.wqe_shift);
556 
557 	return 0;
558 }
559 
560 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
561 {
562 	int i;
563 
564 	qp->sqp_proxy_rcv =
565 		kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt,
566 			GFP_KERNEL);
567 	if (!qp->sqp_proxy_rcv)
568 		return -ENOMEM;
569 	for (i = 0; i < qp->rq.wqe_cnt; i++) {
570 		qp->sqp_proxy_rcv[i].addr =
571 			kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr),
572 				GFP_KERNEL);
573 		if (!qp->sqp_proxy_rcv[i].addr)
574 			goto err;
575 		qp->sqp_proxy_rcv[i].map =
576 			ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
577 					  sizeof (struct mlx4_ib_proxy_sqp_hdr),
578 					  DMA_FROM_DEVICE);
579 		if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) {
580 			kfree(qp->sqp_proxy_rcv[i].addr);
581 			goto err;
582 		}
583 	}
584 	return 0;
585 
586 err:
587 	while (i > 0) {
588 		--i;
589 		ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
590 				    sizeof (struct mlx4_ib_proxy_sqp_hdr),
591 				    DMA_FROM_DEVICE);
592 		kfree(qp->sqp_proxy_rcv[i].addr);
593 	}
594 	kfree(qp->sqp_proxy_rcv);
595 	qp->sqp_proxy_rcv = NULL;
596 	return -ENOMEM;
597 }
598 
599 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
600 {
601 	int i;
602 
603 	for (i = 0; i < qp->rq.wqe_cnt; i++) {
604 		ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
605 				    sizeof (struct mlx4_ib_proxy_sqp_hdr),
606 				    DMA_FROM_DEVICE);
607 		kfree(qp->sqp_proxy_rcv[i].addr);
608 	}
609 	kfree(qp->sqp_proxy_rcv);
610 }
611 
612 static int qp_has_rq(struct ib_qp_init_attr *attr)
613 {
614 	if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
615 		return 0;
616 
617 	return !attr->srq;
618 }
619 
620 static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn)
621 {
622 	int i;
623 	for (i = 0; i < dev->caps.num_ports; i++) {
624 		if (qpn == dev->caps.qp0_proxy[i])
625 			return !!dev->caps.qp0_qkey[i];
626 	}
627 	return 0;
628 }
629 
630 static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
631 				    struct mlx4_ib_qp *qp)
632 {
633 	mutex_lock(&dev->counters_table[qp->port - 1].mutex);
634 	mlx4_counter_free(dev->dev, qp->counter_index->index);
635 	list_del(&qp->counter_index->list);
636 	mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
637 
638 	kfree(qp->counter_index);
639 	qp->counter_index = NULL;
640 }
641 
642 static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
643 			    struct ib_qp_init_attr *init_attr,
644 			    struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp,
645 			    gfp_t gfp)
646 {
647 	int qpn;
648 	int err;
649 	struct ib_qp_cap backup_cap;
650 	struct mlx4_ib_sqp *sqp;
651 	struct mlx4_ib_qp *qp;
652 	enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
653 	struct mlx4_ib_cq *mcq;
654 	unsigned long flags;
655 
656 	/* When tunneling special qps, we use a plain UD qp */
657 	if (sqpn) {
658 		if (mlx4_is_mfunc(dev->dev) &&
659 		    (!mlx4_is_master(dev->dev) ||
660 		     !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) {
661 			if (init_attr->qp_type == IB_QPT_GSI)
662 				qp_type = MLX4_IB_QPT_PROXY_GSI;
663 			else {
664 				if (mlx4_is_master(dev->dev) ||
665 				    qp0_enabled_vf(dev->dev, sqpn))
666 					qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
667 				else
668 					qp_type = MLX4_IB_QPT_PROXY_SMI;
669 			}
670 		}
671 		qpn = sqpn;
672 		/* add extra sg entry for tunneling */
673 		init_attr->cap.max_recv_sge++;
674 	} else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) {
675 		struct mlx4_ib_qp_tunnel_init_attr *tnl_init =
676 			container_of(init_attr,
677 				     struct mlx4_ib_qp_tunnel_init_attr, init_attr);
678 		if ((tnl_init->proxy_qp_type != IB_QPT_SMI &&
679 		     tnl_init->proxy_qp_type != IB_QPT_GSI)   ||
680 		    !mlx4_is_master(dev->dev))
681 			return -EINVAL;
682 		if (tnl_init->proxy_qp_type == IB_QPT_GSI)
683 			qp_type = MLX4_IB_QPT_TUN_GSI;
684 		else if (tnl_init->slave == mlx4_master_func_num(dev->dev) ||
685 			 mlx4_vf_smi_enabled(dev->dev, tnl_init->slave,
686 					     tnl_init->port))
687 			qp_type = MLX4_IB_QPT_TUN_SMI_OWNER;
688 		else
689 			qp_type = MLX4_IB_QPT_TUN_SMI;
690 		/* we are definitely in the PPF here, since we are creating
691 		 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
692 		qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave
693 			+ tnl_init->proxy_qp_type * 2 + tnl_init->port - 1;
694 		sqpn = qpn;
695 	}
696 
697 	if (!*caller_qp) {
698 		if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
699 		    (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
700 				MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
701 			sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp);
702 			if (!sqp)
703 				return -ENOMEM;
704 			qp = &sqp->qp;
705 			qp->pri.vid = 0xFFFF;
706 			qp->alt.vid = 0xFFFF;
707 		} else {
708 			qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp);
709 			if (!qp)
710 				return -ENOMEM;
711 			qp->pri.vid = 0xFFFF;
712 			qp->alt.vid = 0xFFFF;
713 		}
714 	} else
715 		qp = *caller_qp;
716 
717 	qp->mlx4_ib_qp_type = qp_type;
718 
719 	mutex_init(&qp->mutex);
720 	spin_lock_init(&qp->sq.lock);
721 	spin_lock_init(&qp->rq.lock);
722 	INIT_LIST_HEAD(&qp->gid_list);
723 	INIT_LIST_HEAD(&qp->steering_rules);
724 
725 	qp->state	 = IB_QPS_RESET;
726 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
727 		qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
728 
729 	err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp);
730 	if (err)
731 		goto err;
732 
733 	if (pd->uobject) {
734 		struct mlx4_ib_create_qp ucmd;
735 
736 		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
737 			err = -EFAULT;
738 			goto err;
739 		}
740 
741 		qp->sq_no_prefetch = ucmd.sq_no_prefetch;
742 
743 		err = set_user_sq_size(dev, qp, &ucmd);
744 		if (err)
745 			goto err;
746 
747 		qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
748 				       qp->buf_size, 0, 0);
749 		if (IS_ERR(qp->umem)) {
750 			err = PTR_ERR(qp->umem);
751 			goto err;
752 		}
753 
754 		err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
755 				    ilog2(qp->umem->page_size), &qp->mtt);
756 		if (err)
757 			goto err_buf;
758 
759 		err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
760 		if (err)
761 			goto err_mtt;
762 
763 		if (qp_has_rq(init_attr)) {
764 			err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
765 						  ucmd.db_addr, &qp->db);
766 			if (err)
767 				goto err_mtt;
768 		}
769 	} else {
770 		qp->sq_no_prefetch = 0;
771 
772 		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
773 			qp->flags |= MLX4_IB_QP_LSO;
774 
775 		if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
776 			if (dev->steering_support ==
777 			    MLX4_STEERING_MODE_DEVICE_MANAGED)
778 				qp->flags |= MLX4_IB_QP_NETIF;
779 			else
780 				goto err;
781 		}
782 
783 		memcpy(&backup_cap, &init_attr->cap, sizeof(backup_cap));
784 		err = set_kernel_sq_size(dev, &init_attr->cap,
785 					 qp_type, qp, true);
786 		if (err)
787 			goto err;
788 
789 		if (qp_has_rq(init_attr)) {
790 			err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp);
791 			if (err)
792 				goto err;
793 
794 			*qp->db.db = 0;
795 		}
796 
797 		if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
798 				   &qp->buf, gfp)) {
799 			memcpy(&init_attr->cap, &backup_cap,
800 			       sizeof(backup_cap));
801 			err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
802 						 qp, false);
803 			if (err)
804 				goto err_db;
805 
806 			if (mlx4_buf_alloc(dev->dev, qp->buf_size,
807 					   PAGE_SIZE * 2, &qp->buf, gfp)) {
808 				err = -ENOMEM;
809 				goto err_db;
810 			}
811 		}
812 
813 		err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
814 				    &qp->mtt);
815 		if (err)
816 			goto err_buf;
817 
818 		err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp);
819 		if (err)
820 			goto err_mtt;
821 
822 		qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64),
823 					gfp | __GFP_NOWARN);
824 		if (!qp->sq.wrid)
825 			qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
826 						gfp, 0 /*PAGE_KERNEL*/);
827 		qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64),
828 					gfp | __GFP_NOWARN);
829 		if (!qp->rq.wrid)
830 			qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
831 						gfp, 0 /*PAGE_KERNEL*/);
832 		if (!qp->sq.wrid || !qp->rq.wrid) {
833 			err = -ENOMEM;
834 			goto err_wrid;
835 		}
836 	}
837 
838 	if (sqpn) {
839 		if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
840 		    MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
841 			if (alloc_proxy_bufs(pd->device, qp)) {
842 				err = -ENOMEM;
843 				goto err_wrid;
844 			}
845 		}
846 	} else {
847 		/* Raw packet QPNs may not have bits 6,7 set in their qp_num;
848 		 * otherwise, the WQE BlueFlame setup flow wrongly causes
849 		 * VLAN insertion. */
850 		if (init_attr->qp_type == IB_QPT_RAW_PACKET)
851 			err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
852 						    (init_attr->cap.max_send_wr ?
853 						     MLX4_RESERVE_ETH_BF_QP : 0) |
854 						    (init_attr->cap.max_recv_wr ?
855 						     MLX4_RESERVE_A0_QP : 0));
856 		else
857 			if (qp->flags & MLX4_IB_QP_NETIF)
858 				err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
859 			else
860 				err = mlx4_qp_reserve_range(dev->dev, 1, 1,
861 							    &qpn, 0);
862 		if (err)
863 			goto err_proxy;
864 	}
865 
866 	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
867 		qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
868 
869 	err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp);
870 	if (err)
871 		goto err_qpn;
872 
873 	if (init_attr->qp_type == IB_QPT_XRC_TGT)
874 		qp->mqp.qpn |= (1 << 23);
875 
876 	/*
877 	 * Hardware wants QPN written in big-endian order (after
878 	 * shifting) for send doorbell.  Precompute this value to save
879 	 * a little bit when posting sends.
880 	 */
881 	qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
882 
883 	qp->mqp.event = mlx4_ib_qp_event;
884 	if (!*caller_qp)
885 		*caller_qp = qp;
886 
887 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
888 	mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
889 			 to_mcq(init_attr->recv_cq));
890 	/* Maintain device to QPs access, needed for further handling
891 	 * via reset flow
892 	 */
893 	list_add_tail(&qp->qps_list, &dev->qp_list);
894 	/* Maintain CQ to QPs access, needed for further handling
895 	 * via reset flow
896 	 */
897 	mcq = to_mcq(init_attr->send_cq);
898 	list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
899 	mcq = to_mcq(init_attr->recv_cq);
900 	list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
901 	mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
902 			   to_mcq(init_attr->recv_cq));
903 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
904 	return 0;
905 
906 err_qpn:
907 	if (!sqpn) {
908 		if (qp->flags & MLX4_IB_QP_NETIF)
909 			mlx4_ib_steer_qp_free(dev, qpn, 1);
910 		else
911 			mlx4_qp_release_range(dev->dev, qpn, 1);
912 	}
913 err_proxy:
914 	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
915 		free_proxy_bufs(pd->device, qp);
916 err_wrid:
917 	if (pd->uobject) {
918 		if (qp_has_rq(init_attr))
919 			mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
920 	} else {
921 		kvfree(qp->sq.wrid);
922 		kvfree(qp->rq.wrid);
923 	}
924 
925 err_mtt:
926 	mlx4_mtt_cleanup(dev->dev, &qp->mtt);
927 
928 err_buf:
929 	if (pd->uobject)
930 		ib_umem_release(qp->umem);
931 	else
932 		mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
933 
934 err_db:
935 	if (!pd->uobject && qp_has_rq(init_attr))
936 		mlx4_db_free(dev->dev, &qp->db);
937 
938 err:
939 	if (!*caller_qp)
940 		kfree(qp);
941 	return err;
942 }
943 
944 static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
945 {
946 	switch (state) {
947 	case IB_QPS_RESET:	return MLX4_QP_STATE_RST;
948 	case IB_QPS_INIT:	return MLX4_QP_STATE_INIT;
949 	case IB_QPS_RTR:	return MLX4_QP_STATE_RTR;
950 	case IB_QPS_RTS:	return MLX4_QP_STATE_RTS;
951 	case IB_QPS_SQD:	return MLX4_QP_STATE_SQD;
952 	case IB_QPS_SQE:	return MLX4_QP_STATE_SQER;
953 	case IB_QPS_ERR:	return MLX4_QP_STATE_ERR;
954 	default:		return -1;
955 	}
956 }
957 
958 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
959 	__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
960 {
961 	if (send_cq == recv_cq) {
962 		spin_lock(&send_cq->lock);
963 		__acquire(&recv_cq->lock);
964 	} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
965 		spin_lock(&send_cq->lock);
966 		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
967 	} else {
968 		spin_lock(&recv_cq->lock);
969 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
970 	}
971 }
972 
973 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
974 	__releases(&send_cq->lock) __releases(&recv_cq->lock)
975 {
976 	if (send_cq == recv_cq) {
977 		__release(&recv_cq->lock);
978 		spin_unlock(&send_cq->lock);
979 	} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
980 		spin_unlock(&recv_cq->lock);
981 		spin_unlock(&send_cq->lock);
982 	} else {
983 		spin_unlock(&send_cq->lock);
984 		spin_unlock(&recv_cq->lock);
985 	}
986 }
987 
988 static void del_gid_entries(struct mlx4_ib_qp *qp)
989 {
990 	struct mlx4_ib_gid_entry *ge, *tmp;
991 
992 	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
993 		list_del(&ge->list);
994 		kfree(ge);
995 	}
996 }
997 
998 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
999 {
1000 	if (qp->ibqp.qp_type == IB_QPT_XRC_TGT)
1001 		return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd);
1002 	else
1003 		return to_mpd(qp->ibqp.pd);
1004 }
1005 
1006 static void get_cqs(struct mlx4_ib_qp *qp,
1007 		    struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
1008 {
1009 	switch (qp->ibqp.qp_type) {
1010 	case IB_QPT_XRC_TGT:
1011 		*send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
1012 		*recv_cq = *send_cq;
1013 		break;
1014 	case IB_QPT_XRC_INI:
1015 		*send_cq = to_mcq(qp->ibqp.send_cq);
1016 		*recv_cq = *send_cq;
1017 		break;
1018 	default:
1019 		*send_cq = to_mcq(qp->ibqp.send_cq);
1020 		*recv_cq = to_mcq(qp->ibqp.recv_cq);
1021 		break;
1022 	}
1023 }
1024 
1025 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
1026 			      struct ib_udata *udata)
1027 {
1028 	struct mlx4_ib_cq *send_cq, *recv_cq;
1029 	unsigned long flags;
1030 
1031 	if (qp->state != IB_QPS_RESET) {
1032 		if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
1033 				   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
1034 			pr_warn("modify QP %06x to RESET failed.\n",
1035 			       qp->mqp.qpn);
1036 		if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
1037 			mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
1038 			qp->pri.smac = 0;
1039 			qp->pri.smac_port = 0;
1040 		}
1041 		if (qp->alt.smac) {
1042 			mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
1043 			qp->alt.smac = 0;
1044 		}
1045 		if (qp->pri.vid < 0x1000) {
1046 			mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
1047 			qp->pri.vid = 0xFFFF;
1048 			qp->pri.candidate_vid = 0xFFFF;
1049 			qp->pri.update_vid = 0;
1050 		}
1051 		if (qp->alt.vid < 0x1000) {
1052 			mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
1053 			qp->alt.vid = 0xFFFF;
1054 			qp->alt.candidate_vid = 0xFFFF;
1055 			qp->alt.update_vid = 0;
1056 		}
1057 	}
1058 
1059 	get_cqs(qp, &send_cq, &recv_cq);
1060 
1061 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
1062 	mlx4_ib_lock_cqs(send_cq, recv_cq);
1063 
1064 	/* del from lists under both locks above to protect reset flow paths */
1065 	list_del(&qp->qps_list);
1066 	list_del(&qp->cq_send_list);
1067 	list_del(&qp->cq_recv_list);
1068 	if (!udata) {
1069 		__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1070 				 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
1071 		if (send_cq != recv_cq)
1072 			__mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1073 	}
1074 
1075 	mlx4_qp_remove(dev->dev, &qp->mqp);
1076 
1077 	mlx4_ib_unlock_cqs(send_cq, recv_cq);
1078 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
1079 
1080 	mlx4_qp_free(dev->dev, &qp->mqp);
1081 
1082 	if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
1083 		if (qp->flags & MLX4_IB_QP_NETIF)
1084 			mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
1085 		else
1086 			mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
1087 	}
1088 
1089 	mlx4_mtt_cleanup(dev->dev, &qp->mtt);
1090 
1091 	if (udata) {
1092 		if (qp->rq.wqe_cnt) {
1093 			struct mlx4_ib_ucontext *mcontext =
1094 				rdma_udata_to_drv_context(
1095 					udata,
1096 					struct mlx4_ib_ucontext,
1097 					ibucontext);
1098 
1099 			mlx4_ib_db_unmap_user(mcontext, &qp->db);
1100 		}
1101 	} else {
1102 		kvfree(qp->sq.wrid);
1103 		kvfree(qp->rq.wrid);
1104 		if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
1105 		    MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
1106 			free_proxy_bufs(&dev->ib_dev, qp);
1107 		mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
1108 		if (qp->rq.wqe_cnt)
1109 			mlx4_db_free(dev->dev, &qp->db);
1110 	}
1111 	ib_umem_release(qp->umem);
1112 
1113 	del_gid_entries(qp);
1114 }
1115 
1116 static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
1117 {
1118 	/* Native or PPF */
1119 	if (!mlx4_is_mfunc(dev->dev) ||
1120 	    (mlx4_is_master(dev->dev) &&
1121 	     attr->create_flags & MLX4_IB_SRIOV_SQP)) {
1122 		return  dev->dev->phys_caps.base_sqpn +
1123 			(attr->qp_type == IB_QPT_SMI ? 0 : 2) +
1124 			attr->port_num - 1;
1125 	}
1126 	/* PF or VF -- creating proxies */
1127 	if (attr->qp_type == IB_QPT_SMI)
1128 		return dev->dev->caps.qp0_proxy[attr->port_num - 1];
1129 	else
1130 		return dev->dev->caps.qp1_proxy[attr->port_num - 1];
1131 }
1132 
1133 static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1134 					struct ib_qp_init_attr *init_attr,
1135 					struct ib_udata *udata)
1136 {
1137 	struct mlx4_ib_qp *qp = NULL;
1138 	int err;
1139 	int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
1140 	u16 xrcdn = 0;
1141 	gfp_t gfp;
1142 
1143 	gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
1144 		GFP_NOIO : GFP_KERNEL;
1145 	/*
1146 	 * We only support LSO, vendor flag1, and multicast loopback blocking,
1147 	 * and only for kernel UD QPs.
1148 	 */
1149 	if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
1150 					MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
1151 					MLX4_IB_SRIOV_TUNNEL_QP |
1152 					MLX4_IB_SRIOV_SQP |
1153 					MLX4_IB_QP_NETIF |
1154 					MLX4_IB_QP_CREATE_ROCE_V2_GSI |
1155 					MLX4_IB_QP_CREATE_USE_GFP_NOIO))
1156 		return ERR_PTR(-EINVAL);
1157 
1158 	if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
1159 		if (init_attr->qp_type != IB_QPT_UD)
1160 			return ERR_PTR(-EINVAL);
1161 	}
1162 
1163 	if (init_attr->create_flags) {
1164 		if (udata && init_attr->create_flags & ~(sup_u_create_flags))
1165 			return ERR_PTR(-EINVAL);
1166 
1167 		if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
1168 						 MLX4_IB_QP_CREATE_USE_GFP_NOIO |
1169 						 MLX4_IB_QP_CREATE_ROCE_V2_GSI  |
1170 						 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
1171 		     init_attr->qp_type != IB_QPT_UD) ||
1172 		    (init_attr->create_flags & MLX4_IB_SRIOV_SQP &&
1173 		     init_attr->qp_type > IB_QPT_GSI) ||
1174 		    (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI &&
1175 		     init_attr->qp_type != IB_QPT_GSI))
1176 			return ERR_PTR(-EINVAL);
1177 	}
1178 
1179 	switch (init_attr->qp_type) {
1180 	case IB_QPT_XRC_TGT:
1181 		pd = to_mxrcd(init_attr->xrcd)->pd;
1182 		xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1183 		init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
1184 		/* fall through */
1185 	case IB_QPT_XRC_INI:
1186 		if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1187 			return ERR_PTR(-ENOSYS);
1188 		init_attr->recv_cq = init_attr->send_cq;
1189 		/* fall through */
1190 	case IB_QPT_RC:
1191 	case IB_QPT_UC:
1192 	case IB_QPT_RAW_PACKET:
1193 		qp = kzalloc(sizeof *qp, gfp);
1194 		if (!qp)
1195 			return ERR_PTR(-ENOMEM);
1196 		qp->pri.vid = 0xFFFF;
1197 		qp->alt.vid = 0xFFFF;
1198 		/* fall through */
1199 	case IB_QPT_UD:
1200 	{
1201 		err = create_qp_common(to_mdev(pd->device), pd, init_attr,
1202 				       udata, 0, &qp, gfp);
1203 		if (err) {
1204 			kfree(qp);
1205 			return ERR_PTR(err);
1206 		}
1207 
1208 		qp->ibqp.qp_num = qp->mqp.qpn;
1209 		qp->xrcdn = xrcdn;
1210 
1211 		break;
1212 	}
1213 	case IB_QPT_SMI:
1214 	case IB_QPT_GSI:
1215 	{
1216 		int sqpn;
1217 
1218 		/* Userspace is not allowed to create special QPs: */
1219 		if (udata)
1220 			return ERR_PTR(-EINVAL);
1221 		if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) {
1222 			int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev, 1, 1, &sqpn, 0);
1223 
1224 			if (res)
1225 				return ERR_PTR(res);
1226 		} else {
1227 			sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
1228 		}
1229 
1230 		err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
1231 				       sqpn,
1232 				       &qp, gfp);
1233 		if (err)
1234 			return ERR_PTR(err);
1235 
1236 		qp->port	= init_attr->port_num;
1237 		qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
1238 			init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI ? sqpn : 1;
1239 		break;
1240 	}
1241 	default:
1242 		/* Don't support raw QPs */
1243 		return ERR_PTR(-EINVAL);
1244 	}
1245 
1246 	return &qp->ibqp;
1247 }
1248 
1249 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
1250 				struct ib_qp_init_attr *init_attr,
1251 				struct ib_udata *udata) {
1252 	struct ib_device *device = pd ? pd->device : init_attr->xrcd->device;
1253 	struct ib_qp *ibqp;
1254 	struct mlx4_ib_dev *dev = to_mdev(device);
1255 
1256 	ibqp = _mlx4_ib_create_qp(pd, init_attr, udata);
1257 
1258 	if (!IS_ERR(ibqp) &&
1259 	    (init_attr->qp_type == IB_QPT_GSI) &&
1260 	    !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
1261 		struct mlx4_ib_sqp *sqp = to_msqp((to_mqp(ibqp)));
1262 		int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num);
1263 
1264 		if (is_eth &&
1265 		    dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
1266 			init_attr->create_flags |= MLX4_IB_QP_CREATE_ROCE_V2_GSI;
1267 			sqp->roce_v2_gsi = ib_create_qp(pd, init_attr);
1268 
1269 			if (IS_ERR(sqp->roce_v2_gsi)) {
1270 				pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
1271 				sqp->roce_v2_gsi = NULL;
1272 			} else {
1273 				sqp = to_msqp(to_mqp(sqp->roce_v2_gsi));
1274 				sqp->qp.flags |= MLX4_IB_ROCE_V2_GSI_QP;
1275 			}
1276 
1277 			init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
1278 		}
1279 	}
1280 	return ibqp;
1281 }
1282 
1283 static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
1284 {
1285 	struct mlx4_ib_dev *dev = to_mdev(qp->device);
1286 	struct mlx4_ib_qp *mqp = to_mqp(qp);
1287 	struct mlx4_ib_pd *pd;
1288 
1289 	if (is_qp0(dev, mqp))
1290 		mlx4_CLOSE_PORT(dev->dev, mqp->port);
1291 
1292 	if (dev->qp1_proxy[mqp->port - 1] == mqp) {
1293 		mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
1294 		dev->qp1_proxy[mqp->port - 1] = NULL;
1295 		mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
1296 	}
1297 
1298 	if (mqp->counter_index)
1299 		mlx4_ib_free_qp_counter(dev, mqp);
1300 
1301 	pd = get_pd(mqp);
1302 	destroy_qp_common(dev, mqp, udata);
1303 
1304 	if (is_sqp(dev, mqp))
1305 		kfree(to_msqp(mqp));
1306 	else
1307 		kfree(mqp);
1308 
1309 	return 0;
1310 }
1311 
1312 int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
1313 {
1314 	struct mlx4_ib_qp *mqp = to_mqp(qp);
1315 
1316 	if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
1317 		struct mlx4_ib_sqp *sqp = to_msqp(mqp);
1318 
1319 		if (sqp->roce_v2_gsi)
1320 			ib_destroy_qp(sqp->roce_v2_gsi);
1321 	}
1322 
1323 	return _mlx4_ib_destroy_qp(qp, udata);
1324 }
1325 
1326 static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
1327 {
1328 	switch (type) {
1329 	case MLX4_IB_QPT_RC:		return MLX4_QP_ST_RC;
1330 	case MLX4_IB_QPT_UC:		return MLX4_QP_ST_UC;
1331 	case MLX4_IB_QPT_UD:		return MLX4_QP_ST_UD;
1332 	case MLX4_IB_QPT_XRC_INI:
1333 	case MLX4_IB_QPT_XRC_TGT:	return MLX4_QP_ST_XRC;
1334 	case MLX4_IB_QPT_SMI:
1335 	case MLX4_IB_QPT_GSI:
1336 	case MLX4_IB_QPT_RAW_PACKET:	return MLX4_QP_ST_MLX;
1337 
1338 	case MLX4_IB_QPT_PROXY_SMI_OWNER:
1339 	case MLX4_IB_QPT_TUN_SMI_OWNER:	return (mlx4_is_mfunc(dev->dev) ?
1340 						MLX4_QP_ST_MLX : -1);
1341 	case MLX4_IB_QPT_PROXY_SMI:
1342 	case MLX4_IB_QPT_TUN_SMI:
1343 	case MLX4_IB_QPT_PROXY_GSI:
1344 	case MLX4_IB_QPT_TUN_GSI:	return (mlx4_is_mfunc(dev->dev) ?
1345 						MLX4_QP_ST_UD : -1);
1346 	default:			return -1;
1347 	}
1348 }
1349 
1350 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
1351 				   int attr_mask)
1352 {
1353 	u8 dest_rd_atomic;
1354 	u32 access_flags;
1355 	u32 hw_access_flags = 0;
1356 
1357 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1358 		dest_rd_atomic = attr->max_dest_rd_atomic;
1359 	else
1360 		dest_rd_atomic = qp->resp_depth;
1361 
1362 	if (attr_mask & IB_QP_ACCESS_FLAGS)
1363 		access_flags = attr->qp_access_flags;
1364 	else
1365 		access_flags = qp->atomic_rd_en;
1366 
1367 	if (!dest_rd_atomic)
1368 		access_flags &= IB_ACCESS_REMOTE_WRITE;
1369 
1370 	if (access_flags & IB_ACCESS_REMOTE_READ)
1371 		hw_access_flags |= MLX4_QP_BIT_RRE;
1372 	if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1373 		hw_access_flags |= MLX4_QP_BIT_RAE;
1374 	if (access_flags & IB_ACCESS_REMOTE_WRITE)
1375 		hw_access_flags |= MLX4_QP_BIT_RWE;
1376 
1377 	return cpu_to_be32(hw_access_flags);
1378 }
1379 
1380 static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,
1381 			    int attr_mask)
1382 {
1383 	if (attr_mask & IB_QP_PKEY_INDEX)
1384 		sqp->pkey_index = attr->pkey_index;
1385 	if (attr_mask & IB_QP_QKEY)
1386 		sqp->qkey = attr->qkey;
1387 	if (attr_mask & IB_QP_SQ_PSN)
1388 		sqp->send_psn = attr->sq_psn;
1389 }
1390 
1391 static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
1392 {
1393 	path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
1394 }
1395 
1396 static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1397 			  u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
1398 			  struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
1399 {
1400 	int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
1401 		IB_LINK_LAYER_ETHERNET;
1402 	int vidx;
1403 	int smac_index;
1404 	int err;
1405 
1406 
1407 	path->grh_mylmc     = ah->src_path_bits & 0x7f;
1408 	path->rlid	    = cpu_to_be16(ah->dlid);
1409 	if (ah->static_rate) {
1410 		path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET;
1411 		while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
1412 		       !(1 << path->static_rate & dev->dev->caps.stat_rate_support))
1413 			--path->static_rate;
1414 	} else
1415 		path->static_rate = 0;
1416 
1417 	if (ah->ah_flags & IB_AH_GRH) {
1418 		int real_sgid_index = mlx4_ib_gid_index_to_real_index(dev,
1419 								      port,
1420 								      ah->grh.sgid_index);
1421 
1422 		if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
1423 			pr_err("sgid_index (%u) too large. max is %d\n",
1424 			       real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
1425 			return -1;
1426 		}
1427 
1428 		path->grh_mylmc |= 1 << 7;
1429 		path->mgid_index = real_sgid_index;
1430 		path->hop_limit  = ah->grh.hop_limit;
1431 		path->tclass_flowlabel =
1432 			cpu_to_be32((ah->grh.traffic_class << 20) |
1433 				    (ah->grh.flow_label));
1434 		memcpy(path->rgid, ah->grh.dgid.raw, 16);
1435 	}
1436 
1437 	if (is_eth) {
1438 		if (!(ah->ah_flags & IB_AH_GRH))
1439 			return -1;
1440 
1441 		path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1442 			((port - 1) << 6) | ((ah->sl & 7) << 3);
1443 
1444 		path->feup |= MLX4_FEUP_FORCE_ETH_UP;
1445 		if (vlan_tag < 0x1000) {
1446 			if (smac_info->vid < 0x1000) {
1447 				/* both valid vlan ids */
1448 				if (smac_info->vid != vlan_tag) {
1449 					/* different VIDs.  unreg old and reg new */
1450 					err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
1451 					if (err)
1452 						return err;
1453 					smac_info->candidate_vid = vlan_tag;
1454 					smac_info->candidate_vlan_index = vidx;
1455 					smac_info->candidate_vlan_port = port;
1456 					smac_info->update_vid = 1;
1457 					path->vlan_index = vidx;
1458 				} else {
1459 					path->vlan_index = smac_info->vlan_index;
1460 				}
1461 			} else {
1462 				/* no current vlan tag in qp */
1463 				err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
1464 				if (err)
1465 					return err;
1466 				smac_info->candidate_vid = vlan_tag;
1467 				smac_info->candidate_vlan_index = vidx;
1468 				smac_info->candidate_vlan_port = port;
1469 				smac_info->update_vid = 1;
1470 				path->vlan_index = vidx;
1471 			}
1472 			path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
1473 			path->fl = 1 << 6;
1474 		} else {
1475 			/* have current vlan tag. unregister it at modify-qp success */
1476 			if (smac_info->vid < 0x1000) {
1477 				smac_info->candidate_vid = 0xFFFF;
1478 				smac_info->update_vid = 1;
1479 			}
1480 		}
1481 
1482 		/* get smac_index for RoCE use.
1483 		 * If no smac was yet assigned, register one.
1484 		 * If one was already assigned, but the new mac differs,
1485 		 * unregister the old one and register the new one.
1486 		*/
1487 		if ((!smac_info->smac && !smac_info->smac_port) ||
1488 		    smac_info->smac != smac) {
1489 			/* register candidate now, unreg if needed, after success */
1490 			smac_index = mlx4_register_mac(dev->dev, port, smac);
1491 			if (smac_index >= 0) {
1492 				smac_info->candidate_smac_index = smac_index;
1493 				smac_info->candidate_smac = smac;
1494 				smac_info->candidate_smac_port = port;
1495 			} else {
1496 				return -EINVAL;
1497 			}
1498 		} else {
1499 			smac_index = smac_info->smac_index;
1500 		}
1501 
1502 		memcpy(path->dmac, ah->dmac, 6);
1503 		path->ackto = MLX4_IB_LINK_TYPE_ETH;
1504 		/* put MAC table smac index for IBoE */
1505 		path->grh_mylmc = (u8) (smac_index) | 0x80;
1506 	} else {
1507 		path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1508 			((port - 1) << 6) | ((ah->sl & 0xf) << 2);
1509 	}
1510 
1511 	return 0;
1512 }
1513 
1514 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
1515 			 enum ib_qp_attr_mask qp_attr_mask,
1516 			 struct mlx4_ib_qp *mqp,
1517 			 struct mlx4_qp_path *path, u8 port,
1518 			 u16 vlan_id, u8 *smac)
1519 {
1520 	return _mlx4_set_path(dev, &qp->ah_attr,
1521 			      mlx4_mac_to_u64(smac),
1522 			      vlan_id,
1523 			      path, &mqp->pri, port);
1524 }
1525 
1526 static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
1527 			     const struct ib_qp_attr *qp,
1528 			     enum ib_qp_attr_mask qp_attr_mask,
1529 			     struct mlx4_ib_qp *mqp,
1530 			     struct mlx4_qp_path *path, u8 port)
1531 {
1532 	return _mlx4_set_path(dev, &qp->alt_ah_attr,
1533 			      0,
1534 			      0xffff,
1535 			      path, &mqp->alt, port);
1536 }
1537 
1538 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1539 {
1540 	struct mlx4_ib_gid_entry *ge, *tmp;
1541 
1542 	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1543 		if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) {
1544 			ge->added = 1;
1545 			ge->port = qp->port;
1546 		}
1547 	}
1548 }
1549 
1550 static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev,
1551 				    struct mlx4_ib_qp *qp,
1552 				    struct mlx4_qp_context *context)
1553 {
1554 	u64 u64_mac;
1555 	int smac_index;
1556 
1557 	u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
1558 
1559 	context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
1560 	if (!qp->pri.smac && !qp->pri.smac_port) {
1561 		smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
1562 		if (smac_index >= 0) {
1563 			qp->pri.candidate_smac_index = smac_index;
1564 			qp->pri.candidate_smac = u64_mac;
1565 			qp->pri.candidate_smac_port = qp->port;
1566 			context->pri_path.grh_mylmc = 0x80 | (u8) smac_index;
1567 		} else {
1568 			return -ENOENT;
1569 		}
1570 	}
1571 	return 0;
1572 }
1573 
1574 static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1575 {
1576 	struct counter_index *new_counter_index;
1577 	int err;
1578 	u32 tmp_idx;
1579 
1580 	if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) !=
1581 	    IB_LINK_LAYER_ETHERNET ||
1582 	    !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) ||
1583 	    !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK))
1584 		return 0;
1585 
1586 	err = mlx4_counter_alloc(dev->dev, &tmp_idx);
1587 	if (err)
1588 		return err;
1589 
1590 	new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL);
1591 	if (!new_counter_index) {
1592 		mlx4_counter_free(dev->dev, tmp_idx);
1593 		return -ENOMEM;
1594 	}
1595 
1596 	new_counter_index->index = tmp_idx;
1597 	new_counter_index->allocated = 1;
1598 	qp->counter_index = new_counter_index;
1599 
1600 	mutex_lock(&dev->counters_table[qp->port - 1].mutex);
1601 	list_add_tail(&new_counter_index->list,
1602 		      &dev->counters_table[qp->port - 1].counters_list);
1603 	mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
1604 
1605 	return 0;
1606 }
1607 
1608 enum {
1609 	MLX4_QPC_ROCE_MODE_1 = 0,
1610 	MLX4_QPC_ROCE_MODE_2 = 2,
1611 	MLX4_QPC_ROCE_MODE_UNDEFINED = 0xff
1612 };
1613 
1614 static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
1615 {
1616 	switch (gid_type) {
1617 	case IB_GID_TYPE_ROCE:
1618 		return MLX4_QPC_ROCE_MODE_1;
1619 	case IB_GID_TYPE_ROCE_UDP_ENCAP:
1620 		return MLX4_QPC_ROCE_MODE_2;
1621 	default:
1622 		return MLX4_QPC_ROCE_MODE_UNDEFINED;
1623 	}
1624 }
1625 
1626 static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1627 			       const struct ib_qp_attr *attr, int attr_mask,
1628 			       enum ib_qp_state cur_state,
1629 			       enum ib_qp_state new_state,
1630 			       struct ib_udata *udata)
1631 {
1632 	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
1633 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
1634 	struct mlx4_ib_pd *pd;
1635 	struct mlx4_ib_cq *send_cq, *recv_cq;
1636 	struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
1637 		udata, struct mlx4_ib_ucontext, ibucontext);
1638 	struct mlx4_qp_context *context;
1639 	enum mlx4_qp_optpar optpar = 0;
1640 	int sqd_event;
1641 	int steer_qp = 0;
1642 	int err = -EINVAL;
1643 	int counter_index;
1644 
1645 	/* APM is not supported under RoCE */
1646 	if (attr_mask & IB_QP_ALT_PATH &&
1647 	    rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
1648 	    IB_LINK_LAYER_ETHERNET)
1649 		return -ENOTSUPP;
1650 
1651 	context = kzalloc(sizeof *context, GFP_KERNEL);
1652 	if (!context)
1653 		return -ENOMEM;
1654 
1655 	context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
1656 				     (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
1657 
1658 	if (!(attr_mask & IB_QP_PATH_MIG_STATE))
1659 		context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
1660 	else {
1661 		optpar |= MLX4_QP_OPTPAR_PM_STATE;
1662 		switch (attr->path_mig_state) {
1663 		case IB_MIG_MIGRATED:
1664 			context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
1665 			break;
1666 		case IB_MIG_REARM:
1667 			context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11);
1668 			break;
1669 		case IB_MIG_ARMED:
1670 			context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11);
1671 			break;
1672 		}
1673 	}
1674 
1675 	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
1676 		context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
1677 	else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1678 		context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
1679 	else if (ibqp->qp_type == IB_QPT_UD) {
1680 		if (qp->flags & MLX4_IB_QP_LSO)
1681 			context->mtu_msgmax = (IB_MTU_4096 << 5) |
1682 					      ilog2(dev->dev->caps.max_gso_sz);
1683 		else
1684 			context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1685 	} else if (attr_mask & IB_QP_PATH_MTU) {
1686 		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
1687 			pr_err("path MTU (%u) is invalid\n",
1688 			       attr->path_mtu);
1689 			goto out;
1690 		}
1691 		context->mtu_msgmax = (attr->path_mtu << 5) |
1692 			ilog2(dev->dev->caps.max_msg_sz);
1693 	}
1694 
1695 	if (qp->rq.wqe_cnt)
1696 		context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
1697 	context->rq_size_stride |= qp->rq.wqe_shift - 4;
1698 
1699 	if (qp->sq.wqe_cnt)
1700 		context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
1701 	context->sq_size_stride |= qp->sq.wqe_shift - 4;
1702 
1703 	if (new_state == IB_QPS_RESET && qp->counter_index)
1704 		mlx4_ib_free_qp_counter(dev, qp);
1705 
1706 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
1707 		context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
1708 		context->xrcd = cpu_to_be32((u32) qp->xrcdn);
1709 		if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1710 			context->param3 |= cpu_to_be32(1 << 30);
1711 	}
1712 
1713 	if (ucontext)
1714 		context->usr_page = cpu_to_be32(
1715 			mlx4_to_hw_uar_index(dev->dev, ucontext->uar.index));
1716 	else
1717 		context->usr_page = cpu_to_be32(
1718 			mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
1719 
1720 	if (attr_mask & IB_QP_DEST_QPN)
1721 		context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
1722 
1723 	if (attr_mask & IB_QP_PORT) {
1724 		if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD &&
1725 		    !(attr_mask & IB_QP_AV)) {
1726 			mlx4_set_sched(&context->pri_path, attr->port_num);
1727 			optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE;
1728 		}
1729 	}
1730 
1731 	if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
1732 		err = create_qp_lb_counter(dev, qp);
1733 		if (err)
1734 			goto out;
1735 
1736 		counter_index =
1737 			dev->counters_table[qp->port - 1].default_counter;
1738 		if (qp->counter_index)
1739 			counter_index = qp->counter_index->index;
1740 
1741 		if (counter_index != -1) {
1742 			context->pri_path.counter_index = counter_index;
1743 			optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
1744 			if (qp->counter_index) {
1745 				context->pri_path.fl |=
1746 					MLX4_FL_ETH_SRC_CHECK_MC_LB;
1747 				context->pri_path.vlan_control |=
1748 					MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
1749 			}
1750 		} else
1751 			context->pri_path.counter_index =
1752 				MLX4_SINK_COUNTER_INDEX(dev->dev);
1753 
1754 		if (qp->flags & MLX4_IB_QP_NETIF) {
1755 			mlx4_ib_steer_qp_reg(dev, qp, 1);
1756 			steer_qp = 1;
1757 		}
1758 
1759 		if (ibqp->qp_type == IB_QPT_GSI) {
1760 			enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ?
1761 				IB_GID_TYPE_ROCE_UDP_ENCAP : IB_GID_TYPE_ROCE;
1762 			u8 qpc_roce_mode = gid_type_to_qpc(gid_type);
1763 
1764 			context->rlkey_roce_mode |= (qpc_roce_mode << 6);
1765 		}
1766 	}
1767 
1768 	if (attr_mask & IB_QP_PKEY_INDEX) {
1769 		if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
1770 			context->pri_path.disable_pkey_check = 0x40;
1771 		context->pri_path.pkey_index = attr->pkey_index;
1772 		optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
1773 	}
1774 
1775 	if (attr_mask & IB_QP_AV) {
1776 		u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
1777 			attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1778 		union ib_gid gid;
1779 		struct ib_gid_attr gid_attr;
1780 		u16 vlan = 0xffff;
1781 		u8 smac[ETH_ALEN];
1782 		int status = 0;
1783 		int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
1784 			attr->ah_attr.ah_flags & IB_AH_GRH;
1785 
1786 		if (is_eth) {
1787 			int index = attr->ah_attr.grh.sgid_index;
1788 
1789 			status = ib_get_cached_gid(ibqp->device, port_num,
1790 						   index, &gid, &gid_attr);
1791 			if (!status && !memcmp(&gid, &zgid, sizeof(gid)))
1792 				status = -ENOENT;
1793 			if (!status && gid_attr.ndev) {
1794 				vlan = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1795 				memcpy(smac, IF_LLADDR(gid_attr.ndev), ETH_ALEN);
1796 				if_rele(gid_attr.ndev);
1797 			}
1798 		}
1799 		if (status)
1800 			goto out;
1801 
1802 		if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
1803 				  port_num, vlan, smac))
1804 			goto out;
1805 
1806 		optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
1807 			   MLX4_QP_OPTPAR_SCHED_QUEUE);
1808 
1809 		if (is_eth &&
1810 		    (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)) {
1811 			u8 qpc_roce_mode = gid_type_to_qpc(gid_attr.gid_type);
1812 
1813 			if (qpc_roce_mode == MLX4_QPC_ROCE_MODE_UNDEFINED) {
1814 				err = -EINVAL;
1815 				goto out;
1816 			}
1817 			context->rlkey_roce_mode |= (qpc_roce_mode << 6);
1818 		}
1819 
1820 	}
1821 
1822 	if (attr_mask & IB_QP_TIMEOUT) {
1823 		context->pri_path.ackto |= attr->timeout << 3;
1824 		optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
1825 	}
1826 
1827 	if (attr_mask & IB_QP_ALT_PATH) {
1828 		if (attr->alt_port_num == 0 ||
1829 		    attr->alt_port_num > dev->dev->caps.num_ports)
1830 			goto out;
1831 
1832 		if (attr->alt_pkey_index >=
1833 		    dev->dev->caps.pkey_table_len[attr->alt_port_num])
1834 			goto out;
1835 
1836 		if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
1837 				      &context->alt_path,
1838 				      attr->alt_port_num))
1839 			goto out;
1840 
1841 		context->alt_path.pkey_index = attr->alt_pkey_index;
1842 		context->alt_path.ackto = attr->alt_timeout << 3;
1843 		optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
1844 	}
1845 
1846 	pd = get_pd(qp);
1847 	get_cqs(qp, &send_cq, &recv_cq);
1848 	context->pd       = cpu_to_be32(pd->pdn);
1849 	context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
1850 	context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
1851 	context->params1  = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
1852 
1853 	/* Set "fast registration enabled" for all kernel QPs */
1854 	if (!qp->ibqp.uobject)
1855 		context->params1 |= cpu_to_be32(1 << 11);
1856 
1857 	if (attr_mask & IB_QP_RNR_RETRY) {
1858 		context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1859 		optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
1860 	}
1861 
1862 	if (attr_mask & IB_QP_RETRY_CNT) {
1863 		context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1864 		optpar |= MLX4_QP_OPTPAR_RETRY_COUNT;
1865 	}
1866 
1867 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1868 		if (attr->max_rd_atomic)
1869 			context->params1 |=
1870 				cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1871 		optpar |= MLX4_QP_OPTPAR_SRA_MAX;
1872 	}
1873 
1874 	if (attr_mask & IB_QP_SQ_PSN)
1875 		context->next_send_psn = cpu_to_be32(attr->sq_psn);
1876 
1877 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1878 		if (attr->max_dest_rd_atomic)
1879 			context->params2 |=
1880 				cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1881 		optpar |= MLX4_QP_OPTPAR_RRA_MAX;
1882 	}
1883 
1884 	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
1885 		context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask);
1886 		optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
1887 	}
1888 
1889 	if (ibqp->srq)
1890 		context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
1891 
1892 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1893 		context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1894 		optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT;
1895 	}
1896 	if (attr_mask & IB_QP_RQ_PSN)
1897 		context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1898 
1899 	/* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
1900 	if (attr_mask & IB_QP_QKEY) {
1901 		if (qp->mlx4_ib_qp_type &
1902 		    (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))
1903 			context->qkey = cpu_to_be32(IB_QP_SET_QKEY);
1904 		else {
1905 			if (mlx4_is_mfunc(dev->dev) &&
1906 			    !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) &&
1907 			    (attr->qkey & MLX4_RESERVED_QKEY_MASK) ==
1908 			    MLX4_RESERVED_QKEY_BASE) {
1909 				pr_err("Cannot use reserved QKEY"
1910 				       " 0x%x (range 0xffff0000..0xffffffff"
1911 				       " is reserved)\n", attr->qkey);
1912 				err = -EINVAL;
1913 				goto out;
1914 			}
1915 			context->qkey = cpu_to_be32(attr->qkey);
1916 		}
1917 		optpar |= MLX4_QP_OPTPAR_Q_KEY;
1918 	}
1919 
1920 	if (ibqp->srq)
1921 		context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
1922 
1923 	if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1924 		context->db_rec_addr = cpu_to_be64(qp->db.dma);
1925 
1926 	if (cur_state == IB_QPS_INIT &&
1927 	    new_state == IB_QPS_RTR  &&
1928 	    (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
1929 	     ibqp->qp_type == IB_QPT_UD ||
1930 	     ibqp->qp_type == IB_QPT_RAW_PACKET)) {
1931 		context->pri_path.sched_queue = (qp->port - 1) << 6;
1932 		if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
1933 		    qp->mlx4_ib_qp_type &
1934 		    (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) {
1935 			context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
1936 			if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI)
1937 				context->pri_path.fl = 0x80;
1938 		} else {
1939 			if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
1940 				context->pri_path.fl = 0x80;
1941 			context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
1942 		}
1943 		if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
1944 		    IB_LINK_LAYER_ETHERNET) {
1945 			if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
1946 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
1947 				context->pri_path.feup = 1 << 7; /* don't fsm */
1948 			/* handle smac_index */
1949 			if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
1950 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
1951 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
1952 				err = handle_eth_ud_smac_index(dev, qp, context);
1953 				if (err) {
1954 					err = -EINVAL;
1955 					goto out;
1956 				}
1957 				if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1958 					dev->qp1_proxy[qp->port - 1] = qp;
1959 			}
1960 		}
1961 	}
1962 
1963 	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
1964 		context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
1965 					MLX4_IB_LINK_TYPE_ETH;
1966 		if (dev->dev->caps.tunnel_offload_mode ==  MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1967 			/* set QP to receive both tunneled & non-tunneled packets */
1968 			if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
1969 				context->srqn = cpu_to_be32(7 << 28);
1970 		}
1971 	}
1972 
1973 	if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
1974 		int is_eth = rdma_port_get_link_layer(
1975 				&dev->ib_dev, qp->port) ==
1976 				IB_LINK_LAYER_ETHERNET;
1977 		if (is_eth) {
1978 			context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
1979 			optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
1980 		}
1981 	}
1982 
1983 
1984 	if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD	&&
1985 	    attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1986 		sqd_event = 1;
1987 	else
1988 		sqd_event = 0;
1989 
1990 	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1991 		context->rlkey_roce_mode |= (1 << 4);
1992 
1993 	/*
1994 	 * Before passing a kernel QP to the HW, make sure that the
1995 	 * ownership bits of the send queue are set and the SQ
1996 	 * headroom is stamped so that the hardware doesn't start
1997 	 * processing stale work requests.
1998 	 */
1999 	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2000 		struct mlx4_wqe_ctrl_seg *ctrl;
2001 		int i;
2002 
2003 		for (i = 0; i < qp->sq.wqe_cnt; ++i) {
2004 			ctrl = get_send_wqe(qp, i);
2005 			ctrl->owner_opcode = cpu_to_be32(1U << 31);
2006 			if (qp->sq_max_wqes_per_wr == 1)
2007 				ctrl->fence_size =
2008 						1 << (qp->sq.wqe_shift - 4);
2009 
2010 			stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
2011 		}
2012 	}
2013 
2014 	err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
2015 			     to_mlx4_state(new_state), context, optpar,
2016 			     sqd_event, &qp->mqp);
2017 	if (err)
2018 		goto out;
2019 
2020 	qp->state = new_state;
2021 
2022 	if (attr_mask & IB_QP_ACCESS_FLAGS)
2023 		qp->atomic_rd_en = attr->qp_access_flags;
2024 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
2025 		qp->resp_depth = attr->max_dest_rd_atomic;
2026 	if (attr_mask & IB_QP_PORT) {
2027 		qp->port = attr->port_num;
2028 		update_mcg_macs(dev, qp);
2029 	}
2030 	if (attr_mask & IB_QP_ALT_PATH)
2031 		qp->alt_port = attr->alt_port_num;
2032 
2033 	if (is_sqp(dev, qp))
2034 		store_sqp_attrs(to_msqp(qp), attr, attr_mask);
2035 
2036 	/*
2037 	 * If we moved QP0 to RTR, bring the IB link up; if we moved
2038 	 * QP0 to RESET or ERROR, bring the link back down.
2039 	 */
2040 	if (is_qp0(dev, qp)) {
2041 		if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
2042 			if (mlx4_INIT_PORT(dev->dev, qp->port))
2043 				pr_warn("INIT_PORT failed for port %d\n",
2044 				       qp->port);
2045 
2046 		if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
2047 		    (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
2048 			mlx4_CLOSE_PORT(dev->dev, qp->port);
2049 	}
2050 
2051 	/*
2052 	 * If we moved a kernel QP to RESET, clean up all old CQ
2053 	 * entries and reinitialize the QP.
2054 	 */
2055 	if (new_state == IB_QPS_RESET) {
2056 		if (!ibqp->uobject) {
2057 			mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
2058 					 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
2059 			if (send_cq != recv_cq)
2060 				mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
2061 
2062 			qp->rq.head = 0;
2063 			qp->rq.tail = 0;
2064 			qp->sq.head = 0;
2065 			qp->sq.tail = 0;
2066 			qp->sq_next_wqe = 0;
2067 			if (qp->rq.wqe_cnt)
2068 				*qp->db.db  = 0;
2069 
2070 			if (qp->flags & MLX4_IB_QP_NETIF)
2071 				mlx4_ib_steer_qp_reg(dev, qp, 0);
2072 		}
2073 		if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
2074 			mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
2075 			qp->pri.smac = 0;
2076 			qp->pri.smac_port = 0;
2077 		}
2078 		if (qp->alt.smac) {
2079 			mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
2080 			qp->alt.smac = 0;
2081 		}
2082 		if (qp->pri.vid < 0x1000) {
2083 			mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
2084 			qp->pri.vid = 0xFFFF;
2085 			qp->pri.candidate_vid = 0xFFFF;
2086 			qp->pri.update_vid = 0;
2087 		}
2088 
2089 		if (qp->alt.vid < 0x1000) {
2090 			mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
2091 			qp->alt.vid = 0xFFFF;
2092 			qp->alt.candidate_vid = 0xFFFF;
2093 			qp->alt.update_vid = 0;
2094 		}
2095 	}
2096 out:
2097 	if (err && qp->counter_index)
2098 		mlx4_ib_free_qp_counter(dev, qp);
2099 	if (err && steer_qp)
2100 		mlx4_ib_steer_qp_reg(dev, qp, 0);
2101 	kfree(context);
2102 	if (qp->pri.candidate_smac ||
2103 	    (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
2104 		if (err) {
2105 			mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
2106 		} else {
2107 			if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
2108 				mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
2109 			qp->pri.smac = qp->pri.candidate_smac;
2110 			qp->pri.smac_index = qp->pri.candidate_smac_index;
2111 			qp->pri.smac_port = qp->pri.candidate_smac_port;
2112 		}
2113 		qp->pri.candidate_smac = 0;
2114 		qp->pri.candidate_smac_index = 0;
2115 		qp->pri.candidate_smac_port = 0;
2116 	}
2117 	if (qp->alt.candidate_smac) {
2118 		if (err) {
2119 			mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
2120 		} else {
2121 			if (qp->alt.smac)
2122 				mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
2123 			qp->alt.smac = qp->alt.candidate_smac;
2124 			qp->alt.smac_index = qp->alt.candidate_smac_index;
2125 			qp->alt.smac_port = qp->alt.candidate_smac_port;
2126 		}
2127 		qp->alt.candidate_smac = 0;
2128 		qp->alt.candidate_smac_index = 0;
2129 		qp->alt.candidate_smac_port = 0;
2130 	}
2131 
2132 	if (qp->pri.update_vid) {
2133 		if (err) {
2134 			if (qp->pri.candidate_vid < 0x1000)
2135 				mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port,
2136 						     qp->pri.candidate_vid);
2137 		} else {
2138 			if (qp->pri.vid < 0x1000)
2139 				mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port,
2140 						     qp->pri.vid);
2141 			qp->pri.vid = qp->pri.candidate_vid;
2142 			qp->pri.vlan_port = qp->pri.candidate_vlan_port;
2143 			qp->pri.vlan_index =  qp->pri.candidate_vlan_index;
2144 		}
2145 		qp->pri.candidate_vid = 0xFFFF;
2146 		qp->pri.update_vid = 0;
2147 	}
2148 
2149 	if (qp->alt.update_vid) {
2150 		if (err) {
2151 			if (qp->alt.candidate_vid < 0x1000)
2152 				mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port,
2153 						     qp->alt.candidate_vid);
2154 		} else {
2155 			if (qp->alt.vid < 0x1000)
2156 				mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port,
2157 						     qp->alt.vid);
2158 			qp->alt.vid = qp->alt.candidate_vid;
2159 			qp->alt.vlan_port = qp->alt.candidate_vlan_port;
2160 			qp->alt.vlan_index =  qp->alt.candidate_vlan_index;
2161 		}
2162 		qp->alt.candidate_vid = 0xFFFF;
2163 		qp->alt.update_vid = 0;
2164 	}
2165 
2166 	return err;
2167 }
2168 
2169 static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2170 			      int attr_mask, struct ib_udata *udata)
2171 {
2172 	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
2173 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
2174 	enum ib_qp_state cur_state, new_state;
2175 	int err = -EINVAL;
2176 	mutex_lock(&qp->mutex);
2177 
2178 	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
2179 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
2180 
2181 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
2182 				attr_mask)) {
2183 		pr_debug("qpn 0x%x: invalid attribute mask specified "
2184 			 "for transition %d to %d. qp_type %d,"
2185 			 " attr_mask 0x%x\n",
2186 			 ibqp->qp_num, cur_state, new_state,
2187 			 ibqp->qp_type, attr_mask);
2188 		goto out;
2189 	}
2190 
2191 	if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
2192 		if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
2193 			if ((ibqp->qp_type == IB_QPT_RC) ||
2194 			    (ibqp->qp_type == IB_QPT_UD) ||
2195 			    (ibqp->qp_type == IB_QPT_UC) ||
2196 			    (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
2197 			    (ibqp->qp_type == IB_QPT_XRC_INI)) {
2198 				attr->port_num = mlx4_ib_bond_next_port(dev);
2199 			}
2200 		} else {
2201 			/* no sense in changing port_num
2202 			 * when ports are bonded */
2203 			attr_mask &= ~IB_QP_PORT;
2204 		}
2205 	}
2206 
2207 	if ((attr_mask & IB_QP_PORT) &&
2208 	    (attr->port_num == 0 || attr->port_num > dev->num_ports)) {
2209 		pr_debug("qpn 0x%x: invalid port number (%d) specified "
2210 			 "for transition %d to %d. qp_type %d\n",
2211 			 ibqp->qp_num, attr->port_num, cur_state,
2212 			 new_state, ibqp->qp_type);
2213 		goto out;
2214 	}
2215 
2216 	if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
2217 	    (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
2218 	     IB_LINK_LAYER_ETHERNET))
2219 		goto out;
2220 
2221 	if (attr_mask & IB_QP_PKEY_INDEX) {
2222 		int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
2223 		if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
2224 			pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
2225 				 "for transition %d to %d. qp_type %d\n",
2226 				 ibqp->qp_num, attr->pkey_index, cur_state,
2227 				 new_state, ibqp->qp_type);
2228 			goto out;
2229 		}
2230 	}
2231 
2232 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
2233 	    attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
2234 		pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
2235 			 "Transition %d to %d. qp_type %d\n",
2236 			 ibqp->qp_num, attr->max_rd_atomic, cur_state,
2237 			 new_state, ibqp->qp_type);
2238 		goto out;
2239 	}
2240 
2241 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
2242 	    attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
2243 		pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
2244 			 "Transition %d to %d. qp_type %d\n",
2245 			 ibqp->qp_num, attr->max_dest_rd_atomic, cur_state,
2246 			 new_state, ibqp->qp_type);
2247 		goto out;
2248 	}
2249 
2250 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
2251 		err = 0;
2252 		goto out;
2253 	}
2254 
2255 	err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state, udata);
2256 
2257 	if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
2258 		attr->port_num = 1;
2259 
2260 out:
2261 	mutex_unlock(&qp->mutex);
2262 	return err;
2263 }
2264 
2265 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2266 		      int attr_mask, struct ib_udata *udata)
2267 {
2268 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
2269 	int ret;
2270 
2271 	ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata);
2272 
2273 	if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
2274 		struct mlx4_ib_sqp *sqp = to_msqp(mqp);
2275 		int err = 0;
2276 
2277 		if (sqp->roce_v2_gsi)
2278 			err = ib_modify_qp(sqp->roce_v2_gsi, attr, attr_mask);
2279 		if (err)
2280 			pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n",
2281 			       err);
2282 	}
2283 	return ret;
2284 }
2285 
2286 static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
2287 {
2288 	int i;
2289 	for (i = 0; i < dev->caps.num_ports; i++) {
2290 		if (qpn == dev->caps.qp0_proxy[i] ||
2291 		    qpn == dev->caps.qp0_tunnel[i]) {
2292 			*qkey = dev->caps.qp0_qkey[i];
2293 			return 0;
2294 		}
2295 	}
2296 	return -EINVAL;
2297 }
2298 
2299 static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
2300 				  const struct ib_ud_wr *wr,
2301 				  void *wqe, unsigned *mlx_seg_len)
2302 {
2303 	struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
2304 	struct ib_device *ib_dev = &mdev->ib_dev;
2305 	struct mlx4_wqe_mlx_seg *mlx = wqe;
2306 	struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
2307 	struct mlx4_ib_ah *ah = to_mah(wr->ah);
2308 	u16 pkey;
2309 	u32 qkey;
2310 	int send_size;
2311 	int header_size;
2312 	int spc;
2313 	int i;
2314 
2315 	if (wr->wr.opcode != IB_WR_SEND)
2316 		return -EINVAL;
2317 
2318 	send_size = 0;
2319 
2320 	for (i = 0; i < wr->wr.num_sge; ++i)
2321 		send_size += wr->wr.sg_list[i].length;
2322 
2323 	/* for proxy-qp0 sends, need to add in size of tunnel header */
2324 	/* for tunnel-qp0 sends, tunnel header is already in s/g list */
2325 	if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
2326 		send_size += sizeof (struct mlx4_ib_tunnel_header);
2327 
2328 	ib_ud_header_init(send_size, 1, 0, 0, 0, 0, 0, 0, &sqp->ud_header);
2329 
2330 	if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
2331 		sqp->ud_header.lrh.service_level =
2332 			be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
2333 		sqp->ud_header.lrh.destination_lid =
2334 			cpu_to_be16(ah->av.ib.g_slid & 0x7f);
2335 		sqp->ud_header.lrh.source_lid =
2336 			cpu_to_be16(ah->av.ib.g_slid & 0x7f);
2337 	}
2338 
2339 	mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
2340 
2341 	/* force loopback */
2342 	mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR);
2343 	mlx->rlid = sqp->ud_header.lrh.destination_lid;
2344 
2345 	sqp->ud_header.lrh.virtual_lane    = 0;
2346 	sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2347 	ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
2348 	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
2349 	if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
2350 		sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
2351 	else
2352 		sqp->ud_header.bth.destination_qpn =
2353 			cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]);
2354 
2355 	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
2356 	if (mlx4_is_master(mdev->dev)) {
2357 		if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
2358 			return -EINVAL;
2359 	} else {
2360 		if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
2361 			return -EINVAL;
2362 	}
2363 	sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
2364 	sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn);
2365 
2366 	sqp->ud_header.bth.opcode        = IB_OPCODE_UD_SEND_ONLY;
2367 	sqp->ud_header.immediate_present = 0;
2368 
2369 	header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
2370 
2371 	/*
2372 	 * Inline data segments may not cross a 64 byte boundary.  If
2373 	 * our UD header is bigger than the space available up to the
2374 	 * next 64 byte boundary in the WQE, use two inline data
2375 	 * segments to hold the UD header.
2376 	 */
2377 	spc = MLX4_INLINE_ALIGN -
2378 	      ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
2379 	if (header_size <= spc) {
2380 		inl->byte_count = cpu_to_be32((1U << 31) | header_size);
2381 		memcpy(inl + 1, sqp->header_buf, header_size);
2382 		i = 1;
2383 	} else {
2384 		inl->byte_count = cpu_to_be32((1U << 31) | spc);
2385 		memcpy(inl + 1, sqp->header_buf, spc);
2386 
2387 		inl = (void *) (inl + 1) + spc;
2388 		memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
2389 		/*
2390 		 * Need a barrier here to make sure all the data is
2391 		 * visible before the byte_count field is set.
2392 		 * Otherwise the HCA prefetcher could grab the 64-byte
2393 		 * chunk with this inline segment and get a valid (!=
2394 		 * 0xffffffff) byte count but stale data, and end up
2395 		 * generating a packet with bad headers.
2396 		 *
2397 		 * The first inline segment's byte_count field doesn't
2398 		 * need a barrier, because it comes after a
2399 		 * control/MLX segment and therefore is at an offset
2400 		 * of 16 mod 64.
2401 		 */
2402 		wmb();
2403 		inl->byte_count = cpu_to_be32((1U << 31) | (header_size - spc));
2404 		i = 2;
2405 	}
2406 
2407 	*mlx_seg_len =
2408 	ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
2409 	return 0;
2410 }
2411 
2412 static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num)
2413 {
2414 	union sl2vl_tbl_to_u64 tmp_vltab;
2415 	u8 vl;
2416 
2417 	if (sl > 15)
2418 		return 0xf;
2419 	tmp_vltab.sl64 = atomic64_read(&dev->sl2vl[port_num - 1]);
2420 	vl = tmp_vltab.sl8[sl >> 1];
2421 	if (sl & 1)
2422 		vl &= 0x0f;
2423 	else
2424 		vl >>= 4;
2425 	return vl;
2426 }
2427 
2428 #define MLX4_ROCEV2_QP1_SPORT 0xC000
2429 static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
2430 			    void *wqe, unsigned *mlx_seg_len)
2431 {
2432 	struct ib_device *ib_dev = sqp->qp.ibqp.device;
2433 	struct mlx4_wqe_mlx_seg *mlx = wqe;
2434 	struct mlx4_wqe_ctrl_seg *ctrl = wqe;
2435 	struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
2436 	struct mlx4_ib_ah *ah = to_mah(wr->ah);
2437 	union ib_gid sgid;
2438 	u16 pkey;
2439 	int send_size;
2440 	int header_size;
2441 	int spc;
2442 	int i;
2443 	int err = 0;
2444 	u16 vlan = 0xffff;
2445 	bool is_eth;
2446 	bool is_vlan = false;
2447 	bool is_grh;
2448 	bool is_udp = false;
2449 	int ip_version = 0;
2450 
2451 	send_size = 0;
2452 	for (i = 0; i < wr->wr.num_sge; ++i)
2453 		send_size += wr->wr.sg_list[i].length;
2454 
2455 	is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
2456 	is_grh = mlx4_ib_ah_grh_present(ah);
2457 	if (is_eth) {
2458 		struct ib_gid_attr gid_attr;
2459 
2460 		if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
2461 			/* When multi-function is enabled, the ib_core gid
2462 			 * indexes don't necessarily match the hw ones, so
2463 			 * we must use our own cache */
2464 			err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev,
2465 							   be32_to_cpu(ah->av.ib.port_pd) >> 24,
2466 							   ah->av.ib.gid_index, &sgid.raw[0]);
2467 			if (err)
2468 				return err;
2469 		} else  {
2470 			err = ib_get_cached_gid(ib_dev,
2471 						be32_to_cpu(ah->av.ib.port_pd) >> 24,
2472 						ah->av.ib.gid_index, &sgid,
2473 						&gid_attr);
2474 			if (!err) {
2475 				if (gid_attr.ndev)
2476 					if_rele(gid_attr.ndev);
2477 				if (!memcmp(&sgid, &zgid, sizeof(sgid)))
2478 					err = -ENOENT;
2479 			}
2480 			if (!err) {
2481 				is_udp = gid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2482 				if (is_udp) {
2483 					if (ipv6_addr_v4mapped((struct in6_addr *)&sgid))
2484 						ip_version = 4;
2485 					else
2486 						ip_version = 6;
2487 					is_grh = false;
2488 				}
2489 			} else {
2490 				return err;
2491 			}
2492 		}
2493 		if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
2494 			vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
2495 			is_vlan = 1;
2496 		}
2497 	}
2498 	err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh,
2499 			  ip_version, is_udp, 0, &sqp->ud_header);
2500 	if (err)
2501 		return err;
2502 
2503 	if (!is_eth) {
2504 		sqp->ud_header.lrh.service_level =
2505 			be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
2506 		sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid;
2507 		sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
2508 	}
2509 
2510 	if (is_grh || (ip_version == 6)) {
2511 		sqp->ud_header.grh.traffic_class =
2512 			(be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
2513 		sqp->ud_header.grh.flow_label    =
2514 			ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
2515 		sqp->ud_header.grh.hop_limit     = ah->av.ib.hop_limit;
2516 		if (is_eth) {
2517 			memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
2518 		} else {
2519 			if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
2520 				/* When multi-function is enabled, the ib_core gid
2521 				 * indexes don't necessarily match the hw ones, so
2522 				 * we must use our own cache
2523 				 */
2524 				sqp->ud_header.grh.source_gid.global.subnet_prefix =
2525 					cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
2526 								    demux[sqp->qp.port - 1].
2527 								    subnet_prefix)));
2528 				sqp->ud_header.grh.source_gid.global.interface_id =
2529 					to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
2530 						       guid_cache[ah->av.ib.gid_index];
2531 			} else {
2532 				ib_get_cached_gid(ib_dev,
2533 						  be32_to_cpu(ah->av.ib.port_pd) >> 24,
2534 						  ah->av.ib.gid_index,
2535 						  &sqp->ud_header.grh.source_gid, NULL);
2536 			}
2537 		}
2538 		memcpy(sqp->ud_header.grh.destination_gid.raw,
2539 		       ah->av.ib.dgid, 16);
2540 	}
2541 
2542 	if (ip_version == 4) {
2543 		sqp->ud_header.ip4.tos =
2544 			(be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
2545 		sqp->ud_header.ip4.id = 0;
2546 		sqp->ud_header.ip4.frag_off = htons(IP_DF);
2547 		sqp->ud_header.ip4.ttl = ah->av.eth.hop_limit;
2548 
2549 		memcpy(&sqp->ud_header.ip4.saddr,
2550 		       sgid.raw + 12, 4);
2551 		memcpy(&sqp->ud_header.ip4.daddr, ah->av.ib.dgid + 12, 4);
2552 		sqp->ud_header.ip4.check = ib_ud_ip4_csum(&sqp->ud_header);
2553 	}
2554 
2555 	if (is_udp) {
2556 		sqp->ud_header.udp.dport = htons(ROCE_V2_UDP_DPORT);
2557 		sqp->ud_header.udp.sport = htons(MLX4_ROCEV2_QP1_SPORT);
2558 		sqp->ud_header.udp.csum = 0;
2559 	}
2560 
2561 	mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
2562 
2563 	if (!is_eth) {
2564 		mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
2565 					  (sqp->ud_header.lrh.destination_lid ==
2566 					   IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
2567 					  (sqp->ud_header.lrh.service_level << 8));
2568 		if (ah->av.ib.port_pd & cpu_to_be32(0x80000000))
2569 			mlx->flags |= cpu_to_be32(0x1); /* force loopback */
2570 		mlx->rlid = sqp->ud_header.lrh.destination_lid;
2571 	}
2572 
2573 	switch (wr->wr.opcode) {
2574 	case IB_WR_SEND:
2575 		sqp->ud_header.bth.opcode	 = IB_OPCODE_UD_SEND_ONLY;
2576 		sqp->ud_header.immediate_present = 0;
2577 		break;
2578 	case IB_WR_SEND_WITH_IMM:
2579 		sqp->ud_header.bth.opcode	 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2580 		sqp->ud_header.immediate_present = 1;
2581 		sqp->ud_header.immediate_data    = wr->wr.ex.imm_data;
2582 		break;
2583 	default:
2584 		return -EINVAL;
2585 	}
2586 
2587 	if (is_eth) {
2588 		struct in6_addr in6;
2589 		u16 ether_type;
2590 		u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
2591 
2592 		ether_type = (!is_udp) ? MLX4_IB_IBOE_ETHERTYPE :
2593 			(ip_version == 4 ? ETHERTYPE_IP : ETHERTYPE_IPV6);
2594 
2595 		mlx->sched_prio = cpu_to_be16(pcp);
2596 
2597 		ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac);
2598 		memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
2599 		memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
2600 		memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
2601 		memcpy(&in6, sgid.raw, sizeof(in6));
2602 
2603 
2604 		if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
2605 			mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
2606 		if (!is_vlan) {
2607 			sqp->ud_header.eth.type = cpu_to_be16(ether_type);
2608 		} else {
2609 			sqp->ud_header.vlan.type = cpu_to_be16(ether_type);
2610 			sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
2611 		}
2612 	} else {
2613 		sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 :
2614 							sl_to_vl(to_mdev(ib_dev),
2615 								 sqp->ud_header.lrh.service_level,
2616 								 sqp->qp.port);
2617 		if (sqp->qp.ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
2618 			return -EINVAL;
2619 		if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
2620 			sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
2621 	}
2622 	sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2623 	if (!sqp->qp.ibqp.qp_num)
2624 		ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
2625 	else
2626 		ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
2627 	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
2628 	sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
2629 	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
2630 	sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
2631 					       sqp->qkey : wr->remote_qkey);
2632 	sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
2633 
2634 	header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
2635 
2636 	if (0) {
2637 		pr_err("built UD header of size %d:\n", header_size);
2638 		for (i = 0; i < header_size / 4; ++i) {
2639 			if (i % 8 == 0)
2640 				pr_err("  [%02x] ", i * 4);
2641 			pr_cont(" %08x",
2642 				be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
2643 			if ((i + 1) % 8 == 0)
2644 				pr_cont("\n");
2645 		}
2646 		pr_err("\n");
2647 	}
2648 
2649 	/*
2650 	 * Inline data segments may not cross a 64 byte boundary.  If
2651 	 * our UD header is bigger than the space available up to the
2652 	 * next 64 byte boundary in the WQE, use two inline data
2653 	 * segments to hold the UD header.
2654 	 */
2655 	spc = MLX4_INLINE_ALIGN -
2656 		((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
2657 	if (header_size <= spc) {
2658 		inl->byte_count = cpu_to_be32(1U << 31 | header_size);
2659 		memcpy(inl + 1, sqp->header_buf, header_size);
2660 		i = 1;
2661 	} else {
2662 		inl->byte_count = cpu_to_be32(1U << 31 | spc);
2663 		memcpy(inl + 1, sqp->header_buf, spc);
2664 
2665 		inl = (void *) (inl + 1) + spc;
2666 		memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
2667 		/*
2668 		 * Need a barrier here to make sure all the data is
2669 		 * visible before the byte_count field is set.
2670 		 * Otherwise the HCA prefetcher could grab the 64-byte
2671 		 * chunk with this inline segment and get a valid (!=
2672 		 * 0xffffffff) byte count but stale data, and end up
2673 		 * generating a packet with bad headers.
2674 		 *
2675 		 * The first inline segment's byte_count field doesn't
2676 		 * need a barrier, because it comes after a
2677 		 * control/MLX segment and therefore is at an offset
2678 		 * of 16 mod 64.
2679 		 */
2680 		wmb();
2681 		inl->byte_count = cpu_to_be32(1U << 31 | (header_size - spc));
2682 		i = 2;
2683 	}
2684 
2685 	*mlx_seg_len =
2686 		ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
2687 	return 0;
2688 }
2689 
2690 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
2691 {
2692 	unsigned cur;
2693 	struct mlx4_ib_cq *cq;
2694 
2695 	cur = wq->head - wq->tail;
2696 	if (likely(cur + nreq < wq->max_post))
2697 		return 0;
2698 
2699 	cq = to_mcq(ib_cq);
2700 	spin_lock(&cq->lock);
2701 	cur = wq->head - wq->tail;
2702 	spin_unlock(&cq->lock);
2703 
2704 	return cur + nreq >= wq->max_post;
2705 }
2706 
2707 static __be32 convert_access(int acc)
2708 {
2709 	return (acc & IB_ACCESS_REMOTE_ATOMIC ?
2710 		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC)       : 0) |
2711 	       (acc & IB_ACCESS_REMOTE_WRITE  ?
2712 		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
2713 	       (acc & IB_ACCESS_REMOTE_READ   ?
2714 		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ)  : 0) |
2715 	       (acc & IB_ACCESS_LOCAL_WRITE   ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE)  : 0) |
2716 		cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
2717 }
2718 
2719 static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
2720 			const struct ib_reg_wr *wr)
2721 {
2722 	struct mlx4_ib_mr *mr = to_mmr(wr->mr);
2723 
2724 	fseg->flags		= convert_access(wr->access);
2725 	fseg->mem_key		= cpu_to_be32(wr->key);
2726 	fseg->buf_list		= cpu_to_be64(mr->page_map);
2727 	fseg->start_addr	= cpu_to_be64(mr->ibmr.iova);
2728 	fseg->reg_len		= cpu_to_be64(mr->ibmr.length);
2729 	fseg->offset		= 0; /* XXX -- is this just for ZBVA? */
2730 	fseg->page_size		= cpu_to_be32(ilog2(mr->ibmr.page_size));
2731 	fseg->reserved[0]	= 0;
2732 	fseg->reserved[1]	= 0;
2733 }
2734 
2735 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
2736 {
2737 	memset(iseg, 0, sizeof(*iseg));
2738 	iseg->mem_key = cpu_to_be32(rkey);
2739 }
2740 
2741 static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
2742 					  u64 remote_addr, u32 rkey)
2743 {
2744 	rseg->raddr    = cpu_to_be64(remote_addr);
2745 	rseg->rkey     = cpu_to_be32(rkey);
2746 	rseg->reserved = 0;
2747 }
2748 
2749 static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
2750 		const struct ib_atomic_wr *wr)
2751 {
2752 	if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
2753 		aseg->swap_add = cpu_to_be64(wr->swap);
2754 		aseg->compare  = cpu_to_be64(wr->compare_add);
2755 	} else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
2756 		aseg->swap_add = cpu_to_be64(wr->compare_add);
2757 		aseg->compare  = cpu_to_be64(wr->compare_add_mask);
2758 	} else {
2759 		aseg->swap_add = cpu_to_be64(wr->compare_add);
2760 		aseg->compare  = 0;
2761 	}
2762 
2763 }
2764 
2765 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
2766 				  const struct ib_atomic_wr *wr)
2767 {
2768 	aseg->swap_add		= cpu_to_be64(wr->swap);
2769 	aseg->swap_add_mask	= cpu_to_be64(wr->swap_mask);
2770 	aseg->compare		= cpu_to_be64(wr->compare_add);
2771 	aseg->compare_mask	= cpu_to_be64(wr->compare_add_mask);
2772 }
2773 
2774 static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
2775 			     const struct ib_ud_wr *wr)
2776 {
2777 	memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
2778 	dseg->dqpn = cpu_to_be32(wr->remote_qpn);
2779 	dseg->qkey = cpu_to_be32(wr->remote_qkey);
2780 	dseg->vlan = to_mah(wr->ah)->av.eth.vlan;
2781 	memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6);
2782 }
2783 
2784 static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
2785 				    struct mlx4_wqe_datagram_seg *dseg,
2786 				    const struct ib_ud_wr *wr,
2787 				    enum mlx4_ib_qp_type qpt)
2788 {
2789 	union mlx4_ext_av *av = &to_mah(wr->ah)->av;
2790 	struct mlx4_av sqp_av = {0};
2791 	int port = *((u8 *) &av->ib.port_pd) & 0x3;
2792 
2793 	/* force loopback */
2794 	sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000);
2795 	sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */
2796 	sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel &
2797 			cpu_to_be32(0xf0000000);
2798 
2799 	memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
2800 	if (qpt == MLX4_IB_QPT_PROXY_GSI)
2801 		dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
2802 	else
2803 		dseg->dqpn = cpu_to_be32(dev->dev->caps.qp0_tunnel[port - 1]);
2804 	/* Use QKEY from the QP context, which is set by master */
2805 	dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
2806 }
2807 
2808 static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
2809 {
2810 	struct mlx4_wqe_inline_seg *inl = wqe;
2811 	struct mlx4_ib_tunnel_header hdr;
2812 	struct mlx4_ib_ah *ah = to_mah(wr->ah);
2813 	int spc;
2814 	int i;
2815 
2816 	memcpy(&hdr.av, &ah->av, sizeof hdr.av);
2817 	hdr.remote_qpn = cpu_to_be32(wr->remote_qpn);
2818 	hdr.pkey_index = cpu_to_be16(wr->pkey_index);
2819 	hdr.qkey = cpu_to_be32(wr->remote_qkey);
2820 	memcpy(hdr.mac, ah->av.eth.mac, 6);
2821 	hdr.vlan = ah->av.eth.vlan;
2822 
2823 	spc = MLX4_INLINE_ALIGN -
2824 		((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
2825 	if (sizeof (hdr) <= spc) {
2826 		memcpy(inl + 1, &hdr, sizeof (hdr));
2827 		wmb();
2828 		inl->byte_count = cpu_to_be32((1U << 31) | (u32)sizeof(hdr));
2829 		i = 1;
2830 	} else {
2831 		memcpy(inl + 1, &hdr, spc);
2832 		wmb();
2833 		inl->byte_count = cpu_to_be32((1U << 31) | spc);
2834 
2835 		inl = (void *) (inl + 1) + spc;
2836 		memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc);
2837 		wmb();
2838 		inl->byte_count = cpu_to_be32((1U << 31) | (u32)(sizeof (hdr) - spc));
2839 		i = 2;
2840 	}
2841 
2842 	*mlx_seg_len =
2843 		ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16);
2844 }
2845 
2846 static void set_mlx_icrc_seg(void *dseg)
2847 {
2848 	u32 *t = dseg;
2849 	struct mlx4_wqe_inline_seg *iseg = dseg;
2850 
2851 	t[1] = 0;
2852 
2853 	/*
2854 	 * Need a barrier here before writing the byte_count field to
2855 	 * make sure that all the data is visible before the
2856 	 * byte_count field is set.  Otherwise, if the segment begins
2857 	 * a new cacheline, the HCA prefetcher could grab the 64-byte
2858 	 * chunk and get a valid (!= * 0xffffffff) byte count but
2859 	 * stale data, and end up sending the wrong data.
2860 	 */
2861 	wmb();
2862 
2863 	iseg->byte_count = cpu_to_be32((1U << 31) | 4);
2864 }
2865 
2866 static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
2867 {
2868 	dseg->lkey       = cpu_to_be32(sg->lkey);
2869 	dseg->addr       = cpu_to_be64(sg->addr);
2870 
2871 	/*
2872 	 * Need a barrier here before writing the byte_count field to
2873 	 * make sure that all the data is visible before the
2874 	 * byte_count field is set.  Otherwise, if the segment begins
2875 	 * a new cacheline, the HCA prefetcher could grab the 64-byte
2876 	 * chunk and get a valid (!= * 0xffffffff) byte count but
2877 	 * stale data, and end up sending the wrong data.
2878 	 */
2879 	wmb();
2880 
2881 	dseg->byte_count = cpu_to_be32(sg->length);
2882 }
2883 
2884 static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
2885 {
2886 	dseg->byte_count = cpu_to_be32(sg->length);
2887 	dseg->lkey       = cpu_to_be32(sg->lkey);
2888 	dseg->addr       = cpu_to_be64(sg->addr);
2889 }
2890 
2891 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, const struct ib_ud_wr *wr,
2892 			 struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
2893 			 __be32 *lso_hdr_sz, __be32 *blh)
2894 {
2895 	unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
2896 
2897 	if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
2898 		*blh = cpu_to_be32(1 << 6);
2899 
2900 	if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
2901 		     wr->wr.num_sge > qp->sq.max_gs - (halign >> 4)))
2902 		return -EINVAL;
2903 
2904 	memcpy(wqe->header, wr->header, wr->hlen);
2905 
2906 	*lso_hdr_sz  = cpu_to_be32(wr->mss << 16 | wr->hlen);
2907 	*lso_seg_len = halign;
2908 	return 0;
2909 }
2910 
2911 static __be32 send_ieth(const struct ib_send_wr *wr)
2912 {
2913 	switch (wr->opcode) {
2914 	case IB_WR_SEND_WITH_IMM:
2915 	case IB_WR_RDMA_WRITE_WITH_IMM:
2916 		return wr->ex.imm_data;
2917 
2918 	case IB_WR_SEND_WITH_INV:
2919 		return cpu_to_be32(wr->ex.invalidate_rkey);
2920 
2921 	default:
2922 		return 0;
2923 	}
2924 }
2925 
2926 static void add_zero_len_inline(void *wqe)
2927 {
2928 	struct mlx4_wqe_inline_seg *inl = wqe;
2929 	memset(wqe, 0, 16);
2930 	inl->byte_count = cpu_to_be32(1U << 31);
2931 }
2932 
2933 int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2934 		      const struct ib_send_wr **bad_wr)
2935 {
2936 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
2937 	void *wqe;
2938 	struct mlx4_wqe_ctrl_seg *ctrl;
2939 	struct mlx4_wqe_data_seg *dseg;
2940 	unsigned long flags;
2941 	int nreq;
2942 	int err = 0;
2943 	unsigned ind;
2944 	int uninitialized_var(stamp);
2945 	int uninitialized_var(size);
2946 	unsigned uninitialized_var(seglen);
2947 	__be32 dummy;
2948 	__be32 *lso_wqe;
2949 	__be32 lso_hdr_sz = 0;
2950 	__be32 blh;
2951 	int i;
2952 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
2953 
2954 	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
2955 		struct mlx4_ib_sqp *sqp = to_msqp(qp);
2956 
2957 		if (sqp->roce_v2_gsi) {
2958 			struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah);
2959 			struct ib_gid_attr gid_attr;
2960 			union ib_gid gid;
2961 
2962 			if (!ib_get_cached_gid(ibqp->device,
2963 					       be32_to_cpu(ah->av.ib.port_pd) >> 24,
2964 					       ah->av.ib.gid_index, &gid,
2965 					       &gid_attr)) {
2966 				if (gid_attr.ndev)
2967 					if_rele(gid_attr.ndev);
2968 				qp = (gid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
2969 					to_mqp(sqp->roce_v2_gsi) : qp;
2970 			} else {
2971 				pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n",
2972 				       ah->av.ib.gid_index);
2973 			}
2974 		}
2975 	}
2976 
2977 	spin_lock_irqsave(&qp->sq.lock, flags);
2978 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
2979 		err = -EIO;
2980 		*bad_wr = wr;
2981 		nreq = 0;
2982 		goto out;
2983 	}
2984 
2985 	ind = qp->sq_next_wqe;
2986 
2987 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
2988 		lso_wqe = &dummy;
2989 		blh = 0;
2990 
2991 		if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
2992 			err = -ENOMEM;
2993 			*bad_wr = wr;
2994 			goto out;
2995 		}
2996 
2997 		if (unlikely(wr->num_sge > qp->sq.max_gs)) {
2998 			err = -EINVAL;
2999 			*bad_wr = wr;
3000 			goto out;
3001 		}
3002 
3003 		ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
3004 		qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
3005 
3006 		ctrl->srcrb_flags =
3007 			(wr->send_flags & IB_SEND_SIGNALED ?
3008 			 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
3009 			(wr->send_flags & IB_SEND_SOLICITED ?
3010 			 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
3011 			((wr->send_flags & IB_SEND_IP_CSUM) ?
3012 			 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
3013 				     MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
3014 			qp->sq_signal_bits;
3015 
3016 		ctrl->imm = send_ieth(wr);
3017 
3018 		wqe += sizeof *ctrl;
3019 		size = sizeof *ctrl / 16;
3020 
3021 		switch (qp->mlx4_ib_qp_type) {
3022 		case MLX4_IB_QPT_RC:
3023 		case MLX4_IB_QPT_UC:
3024 			switch (wr->opcode) {
3025 			case IB_WR_ATOMIC_CMP_AND_SWP:
3026 			case IB_WR_ATOMIC_FETCH_AND_ADD:
3027 			case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
3028 				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
3029 					      atomic_wr(wr)->rkey);
3030 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
3031 
3032 				set_atomic_seg(wqe, atomic_wr(wr));
3033 				wqe  += sizeof (struct mlx4_wqe_atomic_seg);
3034 
3035 				size += (sizeof (struct mlx4_wqe_raddr_seg) +
3036 					 sizeof (struct mlx4_wqe_atomic_seg)) / 16;
3037 
3038 				break;
3039 
3040 			case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
3041 				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
3042 					      atomic_wr(wr)->rkey);
3043 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
3044 
3045 				set_masked_atomic_seg(wqe, atomic_wr(wr));
3046 				wqe  += sizeof (struct mlx4_wqe_masked_atomic_seg);
3047 
3048 				size += (sizeof (struct mlx4_wqe_raddr_seg) +
3049 					 sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
3050 
3051 				break;
3052 
3053 			case IB_WR_RDMA_READ:
3054 			case IB_WR_RDMA_WRITE:
3055 			case IB_WR_RDMA_WRITE_WITH_IMM:
3056 				set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
3057 					      rdma_wr(wr)->rkey);
3058 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
3059 				size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
3060 				break;
3061 
3062 			case IB_WR_LOCAL_INV:
3063 				ctrl->srcrb_flags |=
3064 					cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
3065 				set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
3066 				wqe  += sizeof (struct mlx4_wqe_local_inval_seg);
3067 				size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
3068 				break;
3069 
3070 			case IB_WR_REG_MR:
3071 				ctrl->srcrb_flags |=
3072 					cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
3073 				set_reg_seg(wqe, reg_wr(wr));
3074 				wqe  += sizeof(struct mlx4_wqe_fmr_seg);
3075 				size += sizeof(struct mlx4_wqe_fmr_seg) / 16;
3076 				break;
3077 
3078 			default:
3079 				/* No extra segments required for sends */
3080 				break;
3081 			}
3082 			break;
3083 
3084 		case MLX4_IB_QPT_TUN_SMI_OWNER:
3085 			err =  build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
3086 					ctrl, &seglen);
3087 			if (unlikely(err)) {
3088 				*bad_wr = wr;
3089 				goto out;
3090 			}
3091 			wqe  += seglen;
3092 			size += seglen / 16;
3093 			break;
3094 		case MLX4_IB_QPT_TUN_SMI:
3095 		case MLX4_IB_QPT_TUN_GSI:
3096 			/* this is a UD qp used in MAD responses to slaves. */
3097 			set_datagram_seg(wqe, ud_wr(wr));
3098 			/* set the forced-loopback bit in the data seg av */
3099 			*(__be32 *) wqe |= cpu_to_be32(0x80000000);
3100 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
3101 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
3102 			break;
3103 		case MLX4_IB_QPT_UD:
3104 			set_datagram_seg(wqe, ud_wr(wr));
3105 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
3106 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
3107 
3108 			if (wr->opcode == IB_WR_LSO) {
3109 				err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen,
3110 						&lso_hdr_sz, &blh);
3111 				if (unlikely(err)) {
3112 					*bad_wr = wr;
3113 					goto out;
3114 				}
3115 				lso_wqe = (__be32 *) wqe;
3116 				wqe  += seglen;
3117 				size += seglen / 16;
3118 			}
3119 			break;
3120 
3121 		case MLX4_IB_QPT_PROXY_SMI_OWNER:
3122 			err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
3123 					ctrl, &seglen);
3124 			if (unlikely(err)) {
3125 				*bad_wr = wr;
3126 				goto out;
3127 			}
3128 			wqe  += seglen;
3129 			size += seglen / 16;
3130 			/* to start tunnel header on a cache-line boundary */
3131 			add_zero_len_inline(wqe);
3132 			wqe += 16;
3133 			size++;
3134 			build_tunnel_header(ud_wr(wr), wqe, &seglen);
3135 			wqe  += seglen;
3136 			size += seglen / 16;
3137 			break;
3138 		case MLX4_IB_QPT_PROXY_SMI:
3139 		case MLX4_IB_QPT_PROXY_GSI:
3140 			/* If we are tunneling special qps, this is a UD qp.
3141 			 * In this case we first add a UD segment targeting
3142 			 * the tunnel qp, and then add a header with address
3143 			 * information */
3144 			set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe,
3145 						ud_wr(wr),
3146 						qp->mlx4_ib_qp_type);
3147 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
3148 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
3149 			build_tunnel_header(ud_wr(wr), wqe, &seglen);
3150 			wqe  += seglen;
3151 			size += seglen / 16;
3152 			break;
3153 
3154 		case MLX4_IB_QPT_SMI:
3155 		case MLX4_IB_QPT_GSI:
3156 			err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl,
3157 					&seglen);
3158 			if (unlikely(err)) {
3159 				*bad_wr = wr;
3160 				goto out;
3161 			}
3162 			wqe  += seglen;
3163 			size += seglen / 16;
3164 			break;
3165 
3166 		default:
3167 			break;
3168 		}
3169 
3170 		/*
3171 		 * Write data segments in reverse order, so as to
3172 		 * overwrite cacheline stamp last within each
3173 		 * cacheline.  This avoids issues with WQE
3174 		 * prefetching.
3175 		 */
3176 
3177 		dseg = wqe;
3178 		dseg += wr->num_sge - 1;
3179 		size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
3180 
3181 		/* Add one more inline data segment for ICRC for MLX sends */
3182 		if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
3183 			     qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI ||
3184 			     qp->mlx4_ib_qp_type &
3185 			     (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) {
3186 			set_mlx_icrc_seg(dseg + 1);
3187 			size += sizeof (struct mlx4_wqe_data_seg) / 16;
3188 		}
3189 
3190 		for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
3191 			set_data_seg(dseg, wr->sg_list + i);
3192 
3193 		/*
3194 		 * Possibly overwrite stamping in cacheline with LSO
3195 		 * segment only after making sure all data segments
3196 		 * are written.
3197 		 */
3198 		wmb();
3199 		*lso_wqe = lso_hdr_sz;
3200 
3201 		ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
3202 					     MLX4_WQE_CTRL_FENCE : 0) | size;
3203 
3204 		/*
3205 		 * Make sure descriptor is fully written before
3206 		 * setting ownership bit (because HW can start
3207 		 * executing as soon as we do).
3208 		 */
3209 		wmb();
3210 
3211 		if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {
3212 			*bad_wr = wr;
3213 			err = -EINVAL;
3214 			goto out;
3215 		}
3216 
3217 		ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
3218 			(ind & qp->sq.wqe_cnt ? cpu_to_be32(1U << 31) : 0) | blh;
3219 
3220 		stamp = ind + qp->sq_spare_wqes;
3221 		ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
3222 
3223 		/*
3224 		 * We can improve latency by not stamping the last
3225 		 * send queue WQE until after ringing the doorbell, so
3226 		 * only stamp here if there are still more WQEs to post.
3227 		 *
3228 		 * Same optimization applies to padding with NOP wqe
3229 		 * in case of WQE shrinking (used to prevent wrap-around
3230 		 * in the middle of WR).
3231 		 */
3232 		if (wr->next) {
3233 			stamp_send_wqe(qp, stamp, size * 16);
3234 			ind = pad_wraparound(qp, ind);
3235 		}
3236 	}
3237 
3238 out:
3239 	if (likely(nreq)) {
3240 		qp->sq.head += nreq;
3241 
3242 		/*
3243 		 * Make sure that descriptors are written before
3244 		 * doorbell record.
3245 		 */
3246 		wmb();
3247 
3248 		writel(qp->doorbell_qpn,
3249 		       to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
3250 
3251 		/*
3252 		 * Make sure doorbells don't leak out of SQ spinlock
3253 		 * and reach the HCA out of order.
3254 		 */
3255 		mmiowb();
3256 
3257 		stamp_send_wqe(qp, stamp, size * 16);
3258 
3259 		ind = pad_wraparound(qp, ind);
3260 		qp->sq_next_wqe = ind;
3261 	}
3262 
3263 	spin_unlock_irqrestore(&qp->sq.lock, flags);
3264 
3265 	return err;
3266 }
3267 
3268 int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3269 		      const struct ib_recv_wr **bad_wr)
3270 {
3271 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
3272 	struct mlx4_wqe_data_seg *scat;
3273 	unsigned long flags;
3274 	int err = 0;
3275 	int nreq;
3276 	int ind;
3277 	int max_gs;
3278 	int i;
3279 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
3280 
3281 	max_gs = qp->rq.max_gs;
3282 	spin_lock_irqsave(&qp->rq.lock, flags);
3283 
3284 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
3285 		err = -EIO;
3286 		*bad_wr = wr;
3287 		nreq = 0;
3288 		goto out;
3289 	}
3290 
3291 	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
3292 
3293 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
3294 		if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
3295 			err = -ENOMEM;
3296 			*bad_wr = wr;
3297 			goto out;
3298 		}
3299 
3300 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
3301 			err = -EINVAL;
3302 			*bad_wr = wr;
3303 			goto out;
3304 		}
3305 
3306 		scat = get_recv_wqe(qp, ind);
3307 
3308 		if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
3309 		    MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
3310 			ib_dma_sync_single_for_device(ibqp->device,
3311 						      qp->sqp_proxy_rcv[ind].map,
3312 						      sizeof (struct mlx4_ib_proxy_sqp_hdr),
3313 						      DMA_FROM_DEVICE);
3314 			scat->byte_count =
3315 				cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr));
3316 			/* use dma lkey from upper layer entry */
3317 			scat->lkey = cpu_to_be32(wr->sg_list->lkey);
3318 			scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map);
3319 			scat++;
3320 			max_gs--;
3321 		}
3322 
3323 		for (i = 0; i < wr->num_sge; ++i)
3324 			__set_data_seg(scat + i, wr->sg_list + i);
3325 
3326 		if (i < max_gs) {
3327 			scat[i].byte_count = 0;
3328 			scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);
3329 			scat[i].addr       = 0;
3330 		}
3331 
3332 		qp->rq.wrid[ind] = wr->wr_id;
3333 
3334 		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
3335 	}
3336 
3337 out:
3338 	if (likely(nreq)) {
3339 		qp->rq.head += nreq;
3340 
3341 		/*
3342 		 * Make sure that descriptors are written before
3343 		 * doorbell record.
3344 		 */
3345 		wmb();
3346 
3347 		*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
3348 	}
3349 
3350 	spin_unlock_irqrestore(&qp->rq.lock, flags);
3351 
3352 	return err;
3353 }
3354 
3355 static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
3356 {
3357 	switch (mlx4_state) {
3358 	case MLX4_QP_STATE_RST:      return IB_QPS_RESET;
3359 	case MLX4_QP_STATE_INIT:     return IB_QPS_INIT;
3360 	case MLX4_QP_STATE_RTR:      return IB_QPS_RTR;
3361 	case MLX4_QP_STATE_RTS:      return IB_QPS_RTS;
3362 	case MLX4_QP_STATE_SQ_DRAINING:
3363 	case MLX4_QP_STATE_SQD:      return IB_QPS_SQD;
3364 	case MLX4_QP_STATE_SQER:     return IB_QPS_SQE;
3365 	case MLX4_QP_STATE_ERR:      return IB_QPS_ERR;
3366 	default:		     return -1;
3367 	}
3368 }
3369 
3370 static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state)
3371 {
3372 	switch (mlx4_mig_state) {
3373 	case MLX4_QP_PM_ARMED:		return IB_MIG_ARMED;
3374 	case MLX4_QP_PM_REARM:		return IB_MIG_REARM;
3375 	case MLX4_QP_PM_MIGRATED:	return IB_MIG_MIGRATED;
3376 	default: return -1;
3377 	}
3378 }
3379 
3380 static int to_ib_qp_access_flags(int mlx4_flags)
3381 {
3382 	int ib_flags = 0;
3383 
3384 	if (mlx4_flags & MLX4_QP_BIT_RRE)
3385 		ib_flags |= IB_ACCESS_REMOTE_READ;
3386 	if (mlx4_flags & MLX4_QP_BIT_RWE)
3387 		ib_flags |= IB_ACCESS_REMOTE_WRITE;
3388 	if (mlx4_flags & MLX4_QP_BIT_RAE)
3389 		ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
3390 
3391 	return ib_flags;
3392 }
3393 
3394 static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
3395 				struct mlx4_qp_path *path)
3396 {
3397 	struct mlx4_dev *dev = ibdev->dev;
3398 	int is_eth;
3399 
3400 	memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
3401 	ib_ah_attr->port_num	  = path->sched_queue & 0x40 ? 2 : 1;
3402 
3403 	if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
3404 		return;
3405 
3406 	is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
3407 		IB_LINK_LAYER_ETHERNET;
3408 	if (is_eth)
3409 		ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
3410 		((path->sched_queue & 4) << 1);
3411 	else
3412 		ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
3413 
3414 	ib_ah_attr->dlid	  = be16_to_cpu(path->rlid);
3415 	ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
3416 	ib_ah_attr->static_rate   = path->static_rate ? path->static_rate - 5 : 0;
3417 	ib_ah_attr->ah_flags      = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
3418 	if (ib_ah_attr->ah_flags) {
3419 		ib_ah_attr->grh.sgid_index = path->mgid_index;
3420 		ib_ah_attr->grh.hop_limit  = path->hop_limit;
3421 		ib_ah_attr->grh.traffic_class =
3422 			(be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
3423 		ib_ah_attr->grh.flow_label =
3424 			be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
3425 		memcpy(ib_ah_attr->grh.dgid.raw,
3426 			path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
3427 	}
3428 }
3429 
3430 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3431 		     struct ib_qp_init_attr *qp_init_attr)
3432 {
3433 	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
3434 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
3435 	struct mlx4_qp_context context;
3436 	int mlx4_state;
3437 	int err = 0;
3438 
3439 	mutex_lock(&qp->mutex);
3440 
3441 	if (qp->state == IB_QPS_RESET) {
3442 		qp_attr->qp_state = IB_QPS_RESET;
3443 		goto done;
3444 	}
3445 
3446 	err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
3447 	if (err) {
3448 		err = -EINVAL;
3449 		goto out;
3450 	}
3451 
3452 	mlx4_state = be32_to_cpu(context.flags) >> 28;
3453 
3454 	qp->state		     = to_ib_qp_state(mlx4_state);
3455 	qp_attr->qp_state	     = qp->state;
3456 	qp_attr->path_mtu	     = context.mtu_msgmax >> 5;
3457 	qp_attr->path_mig_state	     =
3458 		to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
3459 	qp_attr->qkey		     = be32_to_cpu(context.qkey);
3460 	qp_attr->rq_psn		     = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff;
3461 	qp_attr->sq_psn		     = be32_to_cpu(context.next_send_psn) & 0xffffff;
3462 	qp_attr->dest_qp_num	     = be32_to_cpu(context.remote_qpn) & 0xffffff;
3463 	qp_attr->qp_access_flags     =
3464 		to_ib_qp_access_flags(be32_to_cpu(context.params2));
3465 
3466 	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3467 		to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
3468 		to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
3469 		qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
3470 		qp_attr->alt_port_num	= qp_attr->alt_ah_attr.port_num;
3471 	}
3472 
3473 	qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
3474 	if (qp_attr->qp_state == IB_QPS_INIT)
3475 		qp_attr->port_num = qp->port;
3476 	else
3477 		qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
3478 
3479 	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3480 	qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;
3481 
3482 	qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7);
3483 
3484 	qp_attr->max_dest_rd_atomic =
3485 		1 << ((be32_to_cpu(context.params2) >> 21) & 0x7);
3486 	qp_attr->min_rnr_timer	    =
3487 		(be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f;
3488 	qp_attr->timeout	    = context.pri_path.ackto >> 3;
3489 	qp_attr->retry_cnt	    = (be32_to_cpu(context.params1) >> 16) & 0x7;
3490 	qp_attr->rnr_retry	    = (be32_to_cpu(context.params1) >> 13) & 0x7;
3491 	qp_attr->alt_timeout	    = context.alt_path.ackto >> 3;
3492 
3493 done:
3494 	qp_attr->cur_qp_state	     = qp_attr->qp_state;
3495 	qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
3496 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
3497 
3498 	if (!ibqp->uobject) {
3499 		qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;
3500 		qp_attr->cap.max_send_sge = qp->sq.max_gs;
3501 	} else {
3502 		qp_attr->cap.max_send_wr  = 0;
3503 		qp_attr->cap.max_send_sge = 0;
3504 	}
3505 
3506 	/*
3507 	 * We don't support inline sends for kernel QPs (yet), and we
3508 	 * don't know what userspace's value should be.
3509 	 */
3510 	qp_attr->cap.max_inline_data = 0;
3511 
3512 	qp_init_attr->cap	     = qp_attr->cap;
3513 
3514 	qp_init_attr->create_flags = 0;
3515 	if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3516 		qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3517 
3518 	if (qp->flags & MLX4_IB_QP_LSO)
3519 		qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
3520 
3521 	if (qp->flags & MLX4_IB_QP_NETIF)
3522 		qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP;
3523 
3524 	qp_init_attr->sq_sig_type =
3525 		qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
3526 		IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3527 
3528 out:
3529 	mutex_unlock(&qp->mutex);
3530 	return err;
3531 }
3532 
3533