xref: /freebsd/sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c (revision 4f52dfbb)
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/log2.h>
35 #include <linux/slab.h>
36 #include <linux/netdevice.h>
37 #include <linux/bitops.h>
38 #include <linux/rcupdate.h>
39 #include <linux/etherdevice.h>
40 
41 #include <rdma/ib_cache.h>
42 #include <rdma/ib_pack.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_mad.h>
45 
46 #include <dev/mlx4/cmd.h>
47 #include <dev/mlx4/qp.h>
48 #include <dev/mlx4/driver.h>
49 #include <linux/io.h>
50 
51 #include "mlx4_ib.h"
52 #include <rdma/mlx4-abi.h>
53 
54 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
55 			     struct mlx4_ib_cq *recv_cq);
56 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
57 			       struct mlx4_ib_cq *recv_cq);
58 
59 enum {
60 	MLX4_IB_ACK_REQ_FREQ	= 8,
61 };
62 
63 enum {
64 	MLX4_IB_DEFAULT_SCHED_QUEUE	= 0x83,
65 	MLX4_IB_DEFAULT_QP0_SCHED_QUEUE	= 0x3f,
66 	MLX4_IB_LINK_TYPE_IB		= 0,
67 	MLX4_IB_LINK_TYPE_ETH		= 1
68 };
69 
70 enum {
71 	/*
72 	 * Largest possible UD header: send with GRH and immediate
73 	 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
74 	 * tag.  (LRH would only use 8 bytes, so Ethernet is the
75 	 * biggest case)
76 	 */
77 	MLX4_IB_UD_HEADER_SIZE		= 82,
78 	MLX4_IB_LSO_HEADER_SPARE	= 128,
79 };
80 
81 enum {
82 	MLX4_IB_IBOE_ETHERTYPE		= 0x8915
83 };
84 
85 struct mlx4_ib_sqp {
86 	struct mlx4_ib_qp	qp;
87 	int			pkey_index;
88 	u32			qkey;
89 	u32			send_psn;
90 	struct ib_ud_header	ud_header;
91 	u8			header_buf[MLX4_IB_UD_HEADER_SIZE];
92 	struct ib_qp		*roce_v2_gsi;
93 };
94 
95 enum {
96 	MLX4_IB_MIN_SQ_STRIDE	= 6,
97 	MLX4_IB_CACHE_LINE_SIZE	= 64,
98 };
99 
100 enum {
101 	MLX4_RAW_QP_MTU		= 7,
102 	MLX4_RAW_QP_MSGMAX	= 31,
103 };
104 
105 #ifndef ETH_ALEN
106 #define ETH_ALEN        6
107 #endif
108 
109 static const __be32 mlx4_ib_opcode[] = {
110 	[IB_WR_SEND]				= cpu_to_be32(MLX4_OPCODE_SEND),
111 	[IB_WR_LSO]				= cpu_to_be32(MLX4_OPCODE_LSO),
112 	[IB_WR_SEND_WITH_IMM]			= cpu_to_be32(MLX4_OPCODE_SEND_IMM),
113 	[IB_WR_RDMA_WRITE]			= cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
114 	[IB_WR_RDMA_WRITE_WITH_IMM]		= cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
115 	[IB_WR_RDMA_READ]			= cpu_to_be32(MLX4_OPCODE_RDMA_READ),
116 	[IB_WR_ATOMIC_CMP_AND_SWP]		= cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
117 	[IB_WR_ATOMIC_FETCH_AND_ADD]		= cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
118 	[IB_WR_SEND_WITH_INV]			= cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
119 	[IB_WR_LOCAL_INV]			= cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
120 	[IB_WR_REG_MR]				= cpu_to_be32(MLX4_OPCODE_FMR),
121 	[IB_WR_MASKED_ATOMIC_CMP_AND_SWP]	= cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
122 	[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]	= cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
123 };
124 
125 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
126 {
127 	return container_of(mqp, struct mlx4_ib_sqp, qp);
128 }
129 
130 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
131 {
132 	if (!mlx4_is_master(dev->dev))
133 		return 0;
134 
135 	return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn &&
136 	       qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn +
137 		8 * MLX4_MFUNC_MAX;
138 }
139 
140 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
141 {
142 	int proxy_sqp = 0;
143 	int real_sqp = 0;
144 	int i;
145 	/* PPF or Native -- real SQP */
146 	real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
147 		    qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
148 		    qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
149 	if (real_sqp)
150 		return 1;
151 	/* VF or PF -- proxy SQP */
152 	if (mlx4_is_mfunc(dev->dev)) {
153 		for (i = 0; i < dev->dev->caps.num_ports; i++) {
154 			if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] ||
155 			    qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) {
156 				proxy_sqp = 1;
157 				break;
158 			}
159 		}
160 	}
161 	if (proxy_sqp)
162 		return 1;
163 
164 	return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP);
165 }
166 
167 /* used for INIT/CLOSE port logic */
168 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
169 {
170 	int proxy_qp0 = 0;
171 	int real_qp0 = 0;
172 	int i;
173 	/* PPF or Native -- real QP0 */
174 	real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
175 		    qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
176 		    qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1);
177 	if (real_qp0)
178 		return 1;
179 	/* VF or PF -- proxy QP0 */
180 	if (mlx4_is_mfunc(dev->dev)) {
181 		for (i = 0; i < dev->dev->caps.num_ports; i++) {
182 			if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) {
183 				proxy_qp0 = 1;
184 				break;
185 			}
186 		}
187 	}
188 	return proxy_qp0;
189 }
190 
191 static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
192 {
193 	return mlx4_buf_offset(&qp->buf, offset);
194 }
195 
196 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
197 {
198 	return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
199 }
200 
201 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
202 {
203 	return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift));
204 }
205 
206 /*
207  * Stamp a SQ WQE so that it is invalid if prefetched by marking the
208  * first four bytes of every 64 byte chunk with
209  *     0x7FFFFFF | (invalid_ownership_value << 31).
210  *
211  * When the max work request size is less than or equal to the WQE
212  * basic block size, as an optimization, we can stamp all WQEs with
213  * 0xffffffff, and skip the very first chunk of each WQE.
214  */
215 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
216 {
217 	__be32 *wqe;
218 	int i;
219 	int s;
220 	int ind;
221 	void *buf;
222 	__be32 stamp;
223 	struct mlx4_wqe_ctrl_seg *ctrl;
224 
225 	if (qp->sq_max_wqes_per_wr > 1) {
226 		s = roundup(size, 1U << qp->sq.wqe_shift);
227 		for (i = 0; i < s; i += 64) {
228 			ind = (i >> qp->sq.wqe_shift) + n;
229 			stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
230 						       cpu_to_be32(0xffffffff);
231 			buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
232 			wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
233 			*wqe = stamp;
234 		}
235 	} else {
236 		ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
237 		s = (ctrl->fence_size & 0x3f) << 4;
238 		for (i = 64; i < s; i += 64) {
239 			wqe = buf + i;
240 			*wqe = cpu_to_be32(0xffffffff);
241 		}
242 	}
243 }
244 
245 static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
246 {
247 	struct mlx4_wqe_ctrl_seg *ctrl;
248 	struct mlx4_wqe_inline_seg *inl;
249 	void *wqe;
250 	int s;
251 
252 	ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
253 	s = sizeof(struct mlx4_wqe_ctrl_seg);
254 
255 	if (qp->ibqp.qp_type == IB_QPT_UD) {
256 		struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
257 		struct mlx4_av *av = (struct mlx4_av *)dgram->av;
258 		memset(dgram, 0, sizeof *dgram);
259 		av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
260 		s += sizeof(struct mlx4_wqe_datagram_seg);
261 	}
262 
263 	/* Pad the remainder of the WQE with an inline data segment. */
264 	if (size > s) {
265 		inl = wqe + s;
266 		inl->byte_count = cpu_to_be32(1U << 31 | (size - s - sizeof *inl));
267 	}
268 	ctrl->srcrb_flags = 0;
269 	ctrl->fence_size = size / 16;
270 	/*
271 	 * Make sure descriptor is fully written before setting ownership bit
272 	 * (because HW can start executing as soon as we do).
273 	 */
274 	wmb();
275 
276 	ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
277 		(n & qp->sq.wqe_cnt ? cpu_to_be32(1U << 31) : 0);
278 
279 	stamp_send_wqe(qp, n + qp->sq_spare_wqes, size);
280 }
281 
282 /* Post NOP WQE to prevent wrap-around in the middle of WR */
283 static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
284 {
285 	unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
286 	if (unlikely(s < qp->sq_max_wqes_per_wr)) {
287 		post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
288 		ind += s;
289 	}
290 	return ind;
291 }
292 
293 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
294 {
295 	struct ib_event event;
296 	struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
297 
298 	if (type == MLX4_EVENT_TYPE_PATH_MIG)
299 		to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
300 
301 	if (ibqp->event_handler) {
302 		event.device     = ibqp->device;
303 		event.element.qp = ibqp;
304 		switch (type) {
305 		case MLX4_EVENT_TYPE_PATH_MIG:
306 			event.event = IB_EVENT_PATH_MIG;
307 			break;
308 		case MLX4_EVENT_TYPE_COMM_EST:
309 			event.event = IB_EVENT_COMM_EST;
310 			break;
311 		case MLX4_EVENT_TYPE_SQ_DRAINED:
312 			event.event = IB_EVENT_SQ_DRAINED;
313 			break;
314 		case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
315 			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
316 			break;
317 		case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
318 			event.event = IB_EVENT_QP_FATAL;
319 			break;
320 		case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
321 			event.event = IB_EVENT_PATH_MIG_ERR;
322 			break;
323 		case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
324 			event.event = IB_EVENT_QP_REQ_ERR;
325 			break;
326 		case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
327 			event.event = IB_EVENT_QP_ACCESS_ERR;
328 			break;
329 		default:
330 			pr_warn("Unexpected event type %d "
331 			       "on QP %06x\n", type, qp->qpn);
332 			return;
333 		}
334 
335 		ibqp->event_handler(&event, ibqp->qp_context);
336 	}
337 }
338 
339 static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
340 {
341 	/*
342 	 * UD WQEs must have a datagram segment.
343 	 * RC and UC WQEs might have a remote address segment.
344 	 * MLX WQEs need two extra inline data segments (for the UD
345 	 * header and space for the ICRC).
346 	 */
347 	switch (type) {
348 	case MLX4_IB_QPT_UD:
349 		return sizeof (struct mlx4_wqe_ctrl_seg) +
350 			sizeof (struct mlx4_wqe_datagram_seg) +
351 			((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
352 	case MLX4_IB_QPT_PROXY_SMI_OWNER:
353 	case MLX4_IB_QPT_PROXY_SMI:
354 	case MLX4_IB_QPT_PROXY_GSI:
355 		return sizeof (struct mlx4_wqe_ctrl_seg) +
356 			sizeof (struct mlx4_wqe_datagram_seg) + 64;
357 	case MLX4_IB_QPT_TUN_SMI_OWNER:
358 	case MLX4_IB_QPT_TUN_GSI:
359 		return sizeof (struct mlx4_wqe_ctrl_seg) +
360 			sizeof (struct mlx4_wqe_datagram_seg);
361 
362 	case MLX4_IB_QPT_UC:
363 		return sizeof (struct mlx4_wqe_ctrl_seg) +
364 			sizeof (struct mlx4_wqe_raddr_seg);
365 	case MLX4_IB_QPT_RC:
366 		return sizeof (struct mlx4_wqe_ctrl_seg) +
367 			sizeof (struct mlx4_wqe_masked_atomic_seg) +
368 			sizeof (struct mlx4_wqe_raddr_seg);
369 	case MLX4_IB_QPT_SMI:
370 	case MLX4_IB_QPT_GSI:
371 		return sizeof (struct mlx4_wqe_ctrl_seg) +
372 			ALIGN(MLX4_IB_UD_HEADER_SIZE +
373 			      DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE,
374 					   MLX4_INLINE_ALIGN) *
375 			      sizeof (struct mlx4_wqe_inline_seg),
376 			      sizeof (struct mlx4_wqe_data_seg)) +
377 			ALIGN(4 +
378 			      sizeof (struct mlx4_wqe_inline_seg),
379 			      sizeof (struct mlx4_wqe_data_seg));
380 	default:
381 		return sizeof (struct mlx4_wqe_ctrl_seg);
382 	}
383 }
384 
385 static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
386 		       int is_user, int has_rq, struct mlx4_ib_qp *qp)
387 {
388 	/* Sanity check RQ size before proceeding */
389 	if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
390 	    cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
391 		return -EINVAL;
392 
393 	if (!has_rq) {
394 		if (cap->max_recv_wr)
395 			return -EINVAL;
396 
397 		qp->rq.wqe_cnt = qp->rq.max_gs = 0;
398 	} else {
399 		/* HW requires >= 1 RQ entry with >= 1 gather entry */
400 		if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge))
401 			return -EINVAL;
402 
403 		qp->rq.wqe_cnt	 = roundup_pow_of_two(max(1U, cap->max_recv_wr));
404 		qp->rq.max_gs	 = roundup_pow_of_two(max(1U, cap->max_recv_sge));
405 		qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
406 	}
407 
408 	/* leave userspace return values as they were, so as not to break ABI */
409 	if (is_user) {
410 		cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
411 		cap->max_recv_sge = qp->rq.max_gs;
412 	} else {
413 		cap->max_recv_wr  = qp->rq.max_post =
414 			min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
415 		cap->max_recv_sge = min(qp->rq.max_gs,
416 					min(dev->dev->caps.max_sq_sg,
417 					    dev->dev->caps.max_rq_sg));
418 	}
419 
420 	return 0;
421 }
422 
423 static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
424 			      enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp,
425 			      bool shrink_wqe)
426 {
427 	int s;
428 
429 	/* Sanity check SQ size before proceeding */
430 	if (cap->max_send_wr  > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
431 	    cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
432 	    cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
433 	    sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
434 		return -EINVAL;
435 
436 	/*
437 	 * For MLX transport we need 2 extra S/G entries:
438 	 * one for the header and one for the checksum at the end
439 	 */
440 	if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI ||
441 	     type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) &&
442 	    cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
443 		return -EINVAL;
444 
445 	s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
446 		cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
447 		send_wqe_overhead(type, qp->flags);
448 
449 	if (s > dev->dev->caps.max_sq_desc_sz)
450 		return -EINVAL;
451 
452 	/*
453 	 * Hermon supports shrinking WQEs, such that a single work
454 	 * request can include multiple units of 1 << wqe_shift.  This
455 	 * way, work requests can differ in size, and do not have to
456 	 * be a power of 2 in size, saving memory and speeding up send
457 	 * WR posting.  Unfortunately, if we do this then the
458 	 * wqe_index field in CQEs can't be used to look up the WR ID
459 	 * anymore, so we do this only if selective signaling is off.
460 	 *
461 	 * Further, on 32-bit platforms, we can't use vmap() to make
462 	 * the QP buffer virtually contiguous.  Thus we have to use
463 	 * constant-sized WRs to make sure a WR is always fully within
464 	 * a single page-sized chunk.
465 	 *
466 	 * Finally, we use NOP work requests to pad the end of the
467 	 * work queue, to avoid wrap-around in the middle of WR.  We
468 	 * set NEC bit to avoid getting completions with error for
469 	 * these NOP WRs, but since NEC is only supported starting
470 	 * with firmware 2.2.232, we use constant-sized WRs for older
471 	 * firmware.
472 	 *
473 	 * And, since MLX QPs only support SEND, we use constant-sized
474 	 * WRs in this case.
475 	 *
476 	 * We look for the smallest value of wqe_shift such that the
477 	 * resulting number of wqes does not exceed device
478 	 * capabilities.
479 	 *
480 	 * We set WQE size to at least 64 bytes, this way stamping
481 	 * invalidates each WQE.
482 	 */
483 	if (shrink_wqe && dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
484 	    qp->sq_signal_bits && BITS_PER_LONG == 64 &&
485 	    type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
486 	    !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
487 		      MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER)))
488 		qp->sq.wqe_shift = ilog2(64);
489 	else
490 		qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
491 
492 	for (;;) {
493 		qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
494 
495 		/*
496 		 * We need to leave 2 KB + 1 WR of headroom in the SQ to
497 		 * allow HW to prefetch.
498 		 */
499 		qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
500 		qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
501 						    qp->sq_max_wqes_per_wr +
502 						    qp->sq_spare_wqes);
503 
504 		if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
505 			break;
506 
507 		if (qp->sq_max_wqes_per_wr <= 1)
508 			return -EINVAL;
509 
510 		++qp->sq.wqe_shift;
511 	}
512 
513 	qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz,
514 			     (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) -
515 			 send_wqe_overhead(type, qp->flags)) /
516 		sizeof (struct mlx4_wqe_data_seg);
517 
518 	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
519 		(qp->sq.wqe_cnt << qp->sq.wqe_shift);
520 	if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
521 		qp->rq.offset = 0;
522 		qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
523 	} else {
524 		qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
525 		qp->sq.offset = 0;
526 	}
527 
528 	cap->max_send_wr  = qp->sq.max_post =
529 		(qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
530 	cap->max_send_sge = min(qp->sq.max_gs,
531 				min(dev->dev->caps.max_sq_sg,
532 				    dev->dev->caps.max_rq_sg));
533 	/* We don't support inline sends for kernel QPs (yet) */
534 	cap->max_inline_data = 0;
535 
536 	return 0;
537 }
538 
539 static int set_user_sq_size(struct mlx4_ib_dev *dev,
540 			    struct mlx4_ib_qp *qp,
541 			    struct mlx4_ib_create_qp *ucmd)
542 {
543 	/* Sanity check SQ size before proceeding */
544 	if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes	 ||
545 	    ucmd->log_sq_stride >
546 		ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
547 	    ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
548 		return -EINVAL;
549 
550 	qp->sq.wqe_cnt   = 1 << ucmd->log_sq_bb_count;
551 	qp->sq.wqe_shift = ucmd->log_sq_stride;
552 
553 	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
554 		(qp->sq.wqe_cnt << qp->sq.wqe_shift);
555 
556 	return 0;
557 }
558 
559 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
560 {
561 	int i;
562 
563 	qp->sqp_proxy_rcv =
564 		kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt,
565 			GFP_KERNEL);
566 	if (!qp->sqp_proxy_rcv)
567 		return -ENOMEM;
568 	for (i = 0; i < qp->rq.wqe_cnt; i++) {
569 		qp->sqp_proxy_rcv[i].addr =
570 			kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr),
571 				GFP_KERNEL);
572 		if (!qp->sqp_proxy_rcv[i].addr)
573 			goto err;
574 		qp->sqp_proxy_rcv[i].map =
575 			ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
576 					  sizeof (struct mlx4_ib_proxy_sqp_hdr),
577 					  DMA_FROM_DEVICE);
578 		if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) {
579 			kfree(qp->sqp_proxy_rcv[i].addr);
580 			goto err;
581 		}
582 	}
583 	return 0;
584 
585 err:
586 	while (i > 0) {
587 		--i;
588 		ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
589 				    sizeof (struct mlx4_ib_proxy_sqp_hdr),
590 				    DMA_FROM_DEVICE);
591 		kfree(qp->sqp_proxy_rcv[i].addr);
592 	}
593 	kfree(qp->sqp_proxy_rcv);
594 	qp->sqp_proxy_rcv = NULL;
595 	return -ENOMEM;
596 }
597 
598 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
599 {
600 	int i;
601 
602 	for (i = 0; i < qp->rq.wqe_cnt; i++) {
603 		ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
604 				    sizeof (struct mlx4_ib_proxy_sqp_hdr),
605 				    DMA_FROM_DEVICE);
606 		kfree(qp->sqp_proxy_rcv[i].addr);
607 	}
608 	kfree(qp->sqp_proxy_rcv);
609 }
610 
611 static int qp_has_rq(struct ib_qp_init_attr *attr)
612 {
613 	if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
614 		return 0;
615 
616 	return !attr->srq;
617 }
618 
619 static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn)
620 {
621 	int i;
622 	for (i = 0; i < dev->caps.num_ports; i++) {
623 		if (qpn == dev->caps.qp0_proxy[i])
624 			return !!dev->caps.qp0_qkey[i];
625 	}
626 	return 0;
627 }
628 
629 static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
630 				    struct mlx4_ib_qp *qp)
631 {
632 	mutex_lock(&dev->counters_table[qp->port - 1].mutex);
633 	mlx4_counter_free(dev->dev, qp->counter_index->index);
634 	list_del(&qp->counter_index->list);
635 	mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
636 
637 	kfree(qp->counter_index);
638 	qp->counter_index = NULL;
639 }
640 
641 static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
642 			    struct ib_qp_init_attr *init_attr,
643 			    struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp,
644 			    gfp_t gfp)
645 {
646 	int qpn;
647 	int err;
648 	struct ib_qp_cap backup_cap;
649 	struct mlx4_ib_sqp *sqp;
650 	struct mlx4_ib_qp *qp;
651 	enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
652 	struct mlx4_ib_cq *mcq;
653 	unsigned long flags;
654 
655 	/* When tunneling special qps, we use a plain UD qp */
656 	if (sqpn) {
657 		if (mlx4_is_mfunc(dev->dev) &&
658 		    (!mlx4_is_master(dev->dev) ||
659 		     !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) {
660 			if (init_attr->qp_type == IB_QPT_GSI)
661 				qp_type = MLX4_IB_QPT_PROXY_GSI;
662 			else {
663 				if (mlx4_is_master(dev->dev) ||
664 				    qp0_enabled_vf(dev->dev, sqpn))
665 					qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
666 				else
667 					qp_type = MLX4_IB_QPT_PROXY_SMI;
668 			}
669 		}
670 		qpn = sqpn;
671 		/* add extra sg entry for tunneling */
672 		init_attr->cap.max_recv_sge++;
673 	} else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) {
674 		struct mlx4_ib_qp_tunnel_init_attr *tnl_init =
675 			container_of(init_attr,
676 				     struct mlx4_ib_qp_tunnel_init_attr, init_attr);
677 		if ((tnl_init->proxy_qp_type != IB_QPT_SMI &&
678 		     tnl_init->proxy_qp_type != IB_QPT_GSI)   ||
679 		    !mlx4_is_master(dev->dev))
680 			return -EINVAL;
681 		if (tnl_init->proxy_qp_type == IB_QPT_GSI)
682 			qp_type = MLX4_IB_QPT_TUN_GSI;
683 		else if (tnl_init->slave == mlx4_master_func_num(dev->dev) ||
684 			 mlx4_vf_smi_enabled(dev->dev, tnl_init->slave,
685 					     tnl_init->port))
686 			qp_type = MLX4_IB_QPT_TUN_SMI_OWNER;
687 		else
688 			qp_type = MLX4_IB_QPT_TUN_SMI;
689 		/* we are definitely in the PPF here, since we are creating
690 		 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
691 		qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave
692 			+ tnl_init->proxy_qp_type * 2 + tnl_init->port - 1;
693 		sqpn = qpn;
694 	}
695 
696 	if (!*caller_qp) {
697 		if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
698 		    (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
699 				MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
700 			sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp);
701 			if (!sqp)
702 				return -ENOMEM;
703 			qp = &sqp->qp;
704 			qp->pri.vid = 0xFFFF;
705 			qp->alt.vid = 0xFFFF;
706 		} else {
707 			qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp);
708 			if (!qp)
709 				return -ENOMEM;
710 			qp->pri.vid = 0xFFFF;
711 			qp->alt.vid = 0xFFFF;
712 		}
713 	} else
714 		qp = *caller_qp;
715 
716 	qp->mlx4_ib_qp_type = qp_type;
717 
718 	mutex_init(&qp->mutex);
719 	spin_lock_init(&qp->sq.lock);
720 	spin_lock_init(&qp->rq.lock);
721 	INIT_LIST_HEAD(&qp->gid_list);
722 	INIT_LIST_HEAD(&qp->steering_rules);
723 
724 	qp->state	 = IB_QPS_RESET;
725 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
726 		qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
727 
728 	err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp);
729 	if (err)
730 		goto err;
731 
732 	if (pd->uobject) {
733 		struct mlx4_ib_create_qp ucmd;
734 
735 		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
736 			err = -EFAULT;
737 			goto err;
738 		}
739 
740 		qp->sq_no_prefetch = ucmd.sq_no_prefetch;
741 
742 		err = set_user_sq_size(dev, qp, &ucmd);
743 		if (err)
744 			goto err;
745 
746 		qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
747 				       qp->buf_size, 0, 0);
748 		if (IS_ERR(qp->umem)) {
749 			err = PTR_ERR(qp->umem);
750 			goto err;
751 		}
752 
753 		err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
754 				    ilog2(qp->umem->page_size), &qp->mtt);
755 		if (err)
756 			goto err_buf;
757 
758 		err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
759 		if (err)
760 			goto err_mtt;
761 
762 		if (qp_has_rq(init_attr)) {
763 			err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
764 						  ucmd.db_addr, &qp->db);
765 			if (err)
766 				goto err_mtt;
767 		}
768 	} else {
769 		qp->sq_no_prefetch = 0;
770 
771 		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
772 			qp->flags |= MLX4_IB_QP_LSO;
773 
774 		if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
775 			if (dev->steering_support ==
776 			    MLX4_STEERING_MODE_DEVICE_MANAGED)
777 				qp->flags |= MLX4_IB_QP_NETIF;
778 			else
779 				goto err;
780 		}
781 
782 		memcpy(&backup_cap, &init_attr->cap, sizeof(backup_cap));
783 		err = set_kernel_sq_size(dev, &init_attr->cap,
784 					 qp_type, qp, true);
785 		if (err)
786 			goto err;
787 
788 		if (qp_has_rq(init_attr)) {
789 			err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp);
790 			if (err)
791 				goto err;
792 
793 			*qp->db.db = 0;
794 		}
795 
796 		if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
797 				   &qp->buf, gfp)) {
798 			memcpy(&init_attr->cap, &backup_cap,
799 			       sizeof(backup_cap));
800 			err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
801 						 qp, false);
802 			if (err)
803 				goto err_db;
804 
805 			if (mlx4_buf_alloc(dev->dev, qp->buf_size,
806 					   PAGE_SIZE * 2, &qp->buf, gfp)) {
807 				err = -ENOMEM;
808 				goto err_db;
809 			}
810 		}
811 
812 		err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
813 				    &qp->mtt);
814 		if (err)
815 			goto err_buf;
816 
817 		err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp);
818 		if (err)
819 			goto err_mtt;
820 
821 		qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64),
822 					gfp | __GFP_NOWARN);
823 		if (!qp->sq.wrid)
824 			qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
825 						gfp, 0 /*PAGE_KERNEL*/);
826 		qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64),
827 					gfp | __GFP_NOWARN);
828 		if (!qp->rq.wrid)
829 			qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
830 						gfp, 0 /*PAGE_KERNEL*/);
831 		if (!qp->sq.wrid || !qp->rq.wrid) {
832 			err = -ENOMEM;
833 			goto err_wrid;
834 		}
835 	}
836 
837 	if (sqpn) {
838 		if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
839 		    MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
840 			if (alloc_proxy_bufs(pd->device, qp)) {
841 				err = -ENOMEM;
842 				goto err_wrid;
843 			}
844 		}
845 	} else {
846 		/* Raw packet QPNs may not have bits 6,7 set in their qp_num;
847 		 * otherwise, the WQE BlueFlame setup flow wrongly causes
848 		 * VLAN insertion. */
849 		if (init_attr->qp_type == IB_QPT_RAW_PACKET)
850 			err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
851 						    (init_attr->cap.max_send_wr ?
852 						     MLX4_RESERVE_ETH_BF_QP : 0) |
853 						    (init_attr->cap.max_recv_wr ?
854 						     MLX4_RESERVE_A0_QP : 0));
855 		else
856 			if (qp->flags & MLX4_IB_QP_NETIF)
857 				err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
858 			else
859 				err = mlx4_qp_reserve_range(dev->dev, 1, 1,
860 							    &qpn, 0);
861 		if (err)
862 			goto err_proxy;
863 	}
864 
865 	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
866 		qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
867 
868 	err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp);
869 	if (err)
870 		goto err_qpn;
871 
872 	if (init_attr->qp_type == IB_QPT_XRC_TGT)
873 		qp->mqp.qpn |= (1 << 23);
874 
875 	/*
876 	 * Hardware wants QPN written in big-endian order (after
877 	 * shifting) for send doorbell.  Precompute this value to save
878 	 * a little bit when posting sends.
879 	 */
880 	qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
881 
882 	qp->mqp.event = mlx4_ib_qp_event;
883 	if (!*caller_qp)
884 		*caller_qp = qp;
885 
886 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
887 	mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
888 			 to_mcq(init_attr->recv_cq));
889 	/* Maintain device to QPs access, needed for further handling
890 	 * via reset flow
891 	 */
892 	list_add_tail(&qp->qps_list, &dev->qp_list);
893 	/* Maintain CQ to QPs access, needed for further handling
894 	 * via reset flow
895 	 */
896 	mcq = to_mcq(init_attr->send_cq);
897 	list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
898 	mcq = to_mcq(init_attr->recv_cq);
899 	list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
900 	mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
901 			   to_mcq(init_attr->recv_cq));
902 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
903 	return 0;
904 
905 err_qpn:
906 	if (!sqpn) {
907 		if (qp->flags & MLX4_IB_QP_NETIF)
908 			mlx4_ib_steer_qp_free(dev, qpn, 1);
909 		else
910 			mlx4_qp_release_range(dev->dev, qpn, 1);
911 	}
912 err_proxy:
913 	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
914 		free_proxy_bufs(pd->device, qp);
915 err_wrid:
916 	if (pd->uobject) {
917 		if (qp_has_rq(init_attr))
918 			mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
919 	} else {
920 		kvfree(qp->sq.wrid);
921 		kvfree(qp->rq.wrid);
922 	}
923 
924 err_mtt:
925 	mlx4_mtt_cleanup(dev->dev, &qp->mtt);
926 
927 err_buf:
928 	if (pd->uobject)
929 		ib_umem_release(qp->umem);
930 	else
931 		mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
932 
933 err_db:
934 	if (!pd->uobject && qp_has_rq(init_attr))
935 		mlx4_db_free(dev->dev, &qp->db);
936 
937 err:
938 	if (!*caller_qp)
939 		kfree(qp);
940 	return err;
941 }
942 
943 static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
944 {
945 	switch (state) {
946 	case IB_QPS_RESET:	return MLX4_QP_STATE_RST;
947 	case IB_QPS_INIT:	return MLX4_QP_STATE_INIT;
948 	case IB_QPS_RTR:	return MLX4_QP_STATE_RTR;
949 	case IB_QPS_RTS:	return MLX4_QP_STATE_RTS;
950 	case IB_QPS_SQD:	return MLX4_QP_STATE_SQD;
951 	case IB_QPS_SQE:	return MLX4_QP_STATE_SQER;
952 	case IB_QPS_ERR:	return MLX4_QP_STATE_ERR;
953 	default:		return -1;
954 	}
955 }
956 
957 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
958 	__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
959 {
960 	if (send_cq == recv_cq) {
961 		spin_lock(&send_cq->lock);
962 		__acquire(&recv_cq->lock);
963 	} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
964 		spin_lock(&send_cq->lock);
965 		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
966 	} else {
967 		spin_lock(&recv_cq->lock);
968 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
969 	}
970 }
971 
972 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
973 	__releases(&send_cq->lock) __releases(&recv_cq->lock)
974 {
975 	if (send_cq == recv_cq) {
976 		__release(&recv_cq->lock);
977 		spin_unlock(&send_cq->lock);
978 	} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
979 		spin_unlock(&recv_cq->lock);
980 		spin_unlock(&send_cq->lock);
981 	} else {
982 		spin_unlock(&send_cq->lock);
983 		spin_unlock(&recv_cq->lock);
984 	}
985 }
986 
987 static void del_gid_entries(struct mlx4_ib_qp *qp)
988 {
989 	struct mlx4_ib_gid_entry *ge, *tmp;
990 
991 	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
992 		list_del(&ge->list);
993 		kfree(ge);
994 	}
995 }
996 
997 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
998 {
999 	if (qp->ibqp.qp_type == IB_QPT_XRC_TGT)
1000 		return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd);
1001 	else
1002 		return to_mpd(qp->ibqp.pd);
1003 }
1004 
1005 static void get_cqs(struct mlx4_ib_qp *qp,
1006 		    struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
1007 {
1008 	switch (qp->ibqp.qp_type) {
1009 	case IB_QPT_XRC_TGT:
1010 		*send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
1011 		*recv_cq = *send_cq;
1012 		break;
1013 	case IB_QPT_XRC_INI:
1014 		*send_cq = to_mcq(qp->ibqp.send_cq);
1015 		*recv_cq = *send_cq;
1016 		break;
1017 	default:
1018 		*send_cq = to_mcq(qp->ibqp.send_cq);
1019 		*recv_cq = to_mcq(qp->ibqp.recv_cq);
1020 		break;
1021 	}
1022 }
1023 
1024 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
1025 			      int is_user)
1026 {
1027 	struct mlx4_ib_cq *send_cq, *recv_cq;
1028 	unsigned long flags;
1029 
1030 	if (qp->state != IB_QPS_RESET) {
1031 		if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
1032 				   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
1033 			pr_warn("modify QP %06x to RESET failed.\n",
1034 			       qp->mqp.qpn);
1035 		if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
1036 			mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
1037 			qp->pri.smac = 0;
1038 			qp->pri.smac_port = 0;
1039 		}
1040 		if (qp->alt.smac) {
1041 			mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
1042 			qp->alt.smac = 0;
1043 		}
1044 		if (qp->pri.vid < 0x1000) {
1045 			mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
1046 			qp->pri.vid = 0xFFFF;
1047 			qp->pri.candidate_vid = 0xFFFF;
1048 			qp->pri.update_vid = 0;
1049 		}
1050 		if (qp->alt.vid < 0x1000) {
1051 			mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
1052 			qp->alt.vid = 0xFFFF;
1053 			qp->alt.candidate_vid = 0xFFFF;
1054 			qp->alt.update_vid = 0;
1055 		}
1056 	}
1057 
1058 	get_cqs(qp, &send_cq, &recv_cq);
1059 
1060 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
1061 	mlx4_ib_lock_cqs(send_cq, recv_cq);
1062 
1063 	/* del from lists under both locks above to protect reset flow paths */
1064 	list_del(&qp->qps_list);
1065 	list_del(&qp->cq_send_list);
1066 	list_del(&qp->cq_recv_list);
1067 	if (!is_user) {
1068 		__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1069 				 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
1070 		if (send_cq != recv_cq)
1071 			__mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1072 	}
1073 
1074 	mlx4_qp_remove(dev->dev, &qp->mqp);
1075 
1076 	mlx4_ib_unlock_cqs(send_cq, recv_cq);
1077 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
1078 
1079 	mlx4_qp_free(dev->dev, &qp->mqp);
1080 
1081 	if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
1082 		if (qp->flags & MLX4_IB_QP_NETIF)
1083 			mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
1084 		else
1085 			mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
1086 	}
1087 
1088 	mlx4_mtt_cleanup(dev->dev, &qp->mtt);
1089 
1090 	if (is_user) {
1091 		if (qp->rq.wqe_cnt)
1092 			mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
1093 					      &qp->db);
1094 		ib_umem_release(qp->umem);
1095 	} else {
1096 		kvfree(qp->sq.wrid);
1097 		kvfree(qp->rq.wrid);
1098 		if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
1099 		    MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
1100 			free_proxy_bufs(&dev->ib_dev, qp);
1101 		mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
1102 		if (qp->rq.wqe_cnt)
1103 			mlx4_db_free(dev->dev, &qp->db);
1104 	}
1105 
1106 	del_gid_entries(qp);
1107 }
1108 
1109 static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
1110 {
1111 	/* Native or PPF */
1112 	if (!mlx4_is_mfunc(dev->dev) ||
1113 	    (mlx4_is_master(dev->dev) &&
1114 	     attr->create_flags & MLX4_IB_SRIOV_SQP)) {
1115 		return  dev->dev->phys_caps.base_sqpn +
1116 			(attr->qp_type == IB_QPT_SMI ? 0 : 2) +
1117 			attr->port_num - 1;
1118 	}
1119 	/* PF or VF -- creating proxies */
1120 	if (attr->qp_type == IB_QPT_SMI)
1121 		return dev->dev->caps.qp0_proxy[attr->port_num - 1];
1122 	else
1123 		return dev->dev->caps.qp1_proxy[attr->port_num - 1];
1124 }
1125 
1126 static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1127 					struct ib_qp_init_attr *init_attr,
1128 					struct ib_udata *udata)
1129 {
1130 	struct mlx4_ib_qp *qp = NULL;
1131 	int err;
1132 	int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
1133 	u16 xrcdn = 0;
1134 	gfp_t gfp;
1135 
1136 	gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
1137 		GFP_NOIO : GFP_KERNEL;
1138 	/*
1139 	 * We only support LSO, vendor flag1, and multicast loopback blocking,
1140 	 * and only for kernel UD QPs.
1141 	 */
1142 	if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
1143 					MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
1144 					MLX4_IB_SRIOV_TUNNEL_QP |
1145 					MLX4_IB_SRIOV_SQP |
1146 					MLX4_IB_QP_NETIF |
1147 					MLX4_IB_QP_CREATE_ROCE_V2_GSI |
1148 					MLX4_IB_QP_CREATE_USE_GFP_NOIO))
1149 		return ERR_PTR(-EINVAL);
1150 
1151 	if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
1152 		if (init_attr->qp_type != IB_QPT_UD)
1153 			return ERR_PTR(-EINVAL);
1154 	}
1155 
1156 	if (init_attr->create_flags) {
1157 		if (udata && init_attr->create_flags & ~(sup_u_create_flags))
1158 			return ERR_PTR(-EINVAL);
1159 
1160 		if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
1161 						 MLX4_IB_QP_CREATE_USE_GFP_NOIO |
1162 						 MLX4_IB_QP_CREATE_ROCE_V2_GSI  |
1163 						 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
1164 		     init_attr->qp_type != IB_QPT_UD) ||
1165 		    (init_attr->create_flags & MLX4_IB_SRIOV_SQP &&
1166 		     init_attr->qp_type > IB_QPT_GSI) ||
1167 		    (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI &&
1168 		     init_attr->qp_type != IB_QPT_GSI))
1169 			return ERR_PTR(-EINVAL);
1170 	}
1171 
1172 	switch (init_attr->qp_type) {
1173 	case IB_QPT_XRC_TGT:
1174 		pd = to_mxrcd(init_attr->xrcd)->pd;
1175 		xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1176 		init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
1177 		/* fall through */
1178 	case IB_QPT_XRC_INI:
1179 		if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1180 			return ERR_PTR(-ENOSYS);
1181 		init_attr->recv_cq = init_attr->send_cq;
1182 		/* fall through */
1183 	case IB_QPT_RC:
1184 	case IB_QPT_UC:
1185 	case IB_QPT_RAW_PACKET:
1186 		qp = kzalloc(sizeof *qp, gfp);
1187 		if (!qp)
1188 			return ERR_PTR(-ENOMEM);
1189 		qp->pri.vid = 0xFFFF;
1190 		qp->alt.vid = 0xFFFF;
1191 		/* fall through */
1192 	case IB_QPT_UD:
1193 	{
1194 		err = create_qp_common(to_mdev(pd->device), pd, init_attr,
1195 				       udata, 0, &qp, gfp);
1196 		if (err) {
1197 			kfree(qp);
1198 			return ERR_PTR(err);
1199 		}
1200 
1201 		qp->ibqp.qp_num = qp->mqp.qpn;
1202 		qp->xrcdn = xrcdn;
1203 
1204 		break;
1205 	}
1206 	case IB_QPT_SMI:
1207 	case IB_QPT_GSI:
1208 	{
1209 		int sqpn;
1210 
1211 		/* Userspace is not allowed to create special QPs: */
1212 		if (udata)
1213 			return ERR_PTR(-EINVAL);
1214 		if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) {
1215 			int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev, 1, 1, &sqpn, 0);
1216 
1217 			if (res)
1218 				return ERR_PTR(res);
1219 		} else {
1220 			sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
1221 		}
1222 
1223 		err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
1224 				       sqpn,
1225 				       &qp, gfp);
1226 		if (err)
1227 			return ERR_PTR(err);
1228 
1229 		qp->port	= init_attr->port_num;
1230 		qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
1231 			init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI ? sqpn : 1;
1232 		break;
1233 	}
1234 	default:
1235 		/* Don't support raw QPs */
1236 		return ERR_PTR(-EINVAL);
1237 	}
1238 
1239 	return &qp->ibqp;
1240 }
1241 
1242 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
1243 				struct ib_qp_init_attr *init_attr,
1244 				struct ib_udata *udata) {
1245 	struct ib_device *device = pd ? pd->device : init_attr->xrcd->device;
1246 	struct ib_qp *ibqp;
1247 	struct mlx4_ib_dev *dev = to_mdev(device);
1248 
1249 	ibqp = _mlx4_ib_create_qp(pd, init_attr, udata);
1250 
1251 	if (!IS_ERR(ibqp) &&
1252 	    (init_attr->qp_type == IB_QPT_GSI) &&
1253 	    !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
1254 		struct mlx4_ib_sqp *sqp = to_msqp((to_mqp(ibqp)));
1255 		int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num);
1256 
1257 		if (is_eth &&
1258 		    dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
1259 			init_attr->create_flags |= MLX4_IB_QP_CREATE_ROCE_V2_GSI;
1260 			sqp->roce_v2_gsi = ib_create_qp(pd, init_attr);
1261 
1262 			if (IS_ERR(sqp->roce_v2_gsi)) {
1263 				pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
1264 				sqp->roce_v2_gsi = NULL;
1265 			} else {
1266 				sqp = to_msqp(to_mqp(sqp->roce_v2_gsi));
1267 				sqp->qp.flags |= MLX4_IB_ROCE_V2_GSI_QP;
1268 			}
1269 
1270 			init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
1271 		}
1272 	}
1273 	return ibqp;
1274 }
1275 
1276 static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
1277 {
1278 	struct mlx4_ib_dev *dev = to_mdev(qp->device);
1279 	struct mlx4_ib_qp *mqp = to_mqp(qp);
1280 	struct mlx4_ib_pd *pd;
1281 
1282 	if (is_qp0(dev, mqp))
1283 		mlx4_CLOSE_PORT(dev->dev, mqp->port);
1284 
1285 	if (dev->qp1_proxy[mqp->port - 1] == mqp) {
1286 		mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
1287 		dev->qp1_proxy[mqp->port - 1] = NULL;
1288 		mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
1289 	}
1290 
1291 	if (mqp->counter_index)
1292 		mlx4_ib_free_qp_counter(dev, mqp);
1293 
1294 	pd = get_pd(mqp);
1295 	destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
1296 
1297 	if (is_sqp(dev, mqp))
1298 		kfree(to_msqp(mqp));
1299 	else
1300 		kfree(mqp);
1301 
1302 	return 0;
1303 }
1304 
1305 int mlx4_ib_destroy_qp(struct ib_qp *qp)
1306 {
1307 	struct mlx4_ib_qp *mqp = to_mqp(qp);
1308 
1309 	if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
1310 		struct mlx4_ib_sqp *sqp = to_msqp(mqp);
1311 
1312 		if (sqp->roce_v2_gsi)
1313 			ib_destroy_qp(sqp->roce_v2_gsi);
1314 	}
1315 
1316 	return _mlx4_ib_destroy_qp(qp);
1317 }
1318 
1319 static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
1320 {
1321 	switch (type) {
1322 	case MLX4_IB_QPT_RC:		return MLX4_QP_ST_RC;
1323 	case MLX4_IB_QPT_UC:		return MLX4_QP_ST_UC;
1324 	case MLX4_IB_QPT_UD:		return MLX4_QP_ST_UD;
1325 	case MLX4_IB_QPT_XRC_INI:
1326 	case MLX4_IB_QPT_XRC_TGT:	return MLX4_QP_ST_XRC;
1327 	case MLX4_IB_QPT_SMI:
1328 	case MLX4_IB_QPT_GSI:
1329 	case MLX4_IB_QPT_RAW_PACKET:	return MLX4_QP_ST_MLX;
1330 
1331 	case MLX4_IB_QPT_PROXY_SMI_OWNER:
1332 	case MLX4_IB_QPT_TUN_SMI_OWNER:	return (mlx4_is_mfunc(dev->dev) ?
1333 						MLX4_QP_ST_MLX : -1);
1334 	case MLX4_IB_QPT_PROXY_SMI:
1335 	case MLX4_IB_QPT_TUN_SMI:
1336 	case MLX4_IB_QPT_PROXY_GSI:
1337 	case MLX4_IB_QPT_TUN_GSI:	return (mlx4_is_mfunc(dev->dev) ?
1338 						MLX4_QP_ST_UD : -1);
1339 	default:			return -1;
1340 	}
1341 }
1342 
1343 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
1344 				   int attr_mask)
1345 {
1346 	u8 dest_rd_atomic;
1347 	u32 access_flags;
1348 	u32 hw_access_flags = 0;
1349 
1350 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1351 		dest_rd_atomic = attr->max_dest_rd_atomic;
1352 	else
1353 		dest_rd_atomic = qp->resp_depth;
1354 
1355 	if (attr_mask & IB_QP_ACCESS_FLAGS)
1356 		access_flags = attr->qp_access_flags;
1357 	else
1358 		access_flags = qp->atomic_rd_en;
1359 
1360 	if (!dest_rd_atomic)
1361 		access_flags &= IB_ACCESS_REMOTE_WRITE;
1362 
1363 	if (access_flags & IB_ACCESS_REMOTE_READ)
1364 		hw_access_flags |= MLX4_QP_BIT_RRE;
1365 	if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1366 		hw_access_flags |= MLX4_QP_BIT_RAE;
1367 	if (access_flags & IB_ACCESS_REMOTE_WRITE)
1368 		hw_access_flags |= MLX4_QP_BIT_RWE;
1369 
1370 	return cpu_to_be32(hw_access_flags);
1371 }
1372 
1373 static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,
1374 			    int attr_mask)
1375 {
1376 	if (attr_mask & IB_QP_PKEY_INDEX)
1377 		sqp->pkey_index = attr->pkey_index;
1378 	if (attr_mask & IB_QP_QKEY)
1379 		sqp->qkey = attr->qkey;
1380 	if (attr_mask & IB_QP_SQ_PSN)
1381 		sqp->send_psn = attr->sq_psn;
1382 }
1383 
1384 static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
1385 {
1386 	path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
1387 }
1388 
1389 static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1390 			  u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
1391 			  struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
1392 {
1393 	int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
1394 		IB_LINK_LAYER_ETHERNET;
1395 	int vidx;
1396 	int smac_index;
1397 	int err;
1398 
1399 
1400 	path->grh_mylmc     = ah->src_path_bits & 0x7f;
1401 	path->rlid	    = cpu_to_be16(ah->dlid);
1402 	if (ah->static_rate) {
1403 		path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET;
1404 		while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
1405 		       !(1 << path->static_rate & dev->dev->caps.stat_rate_support))
1406 			--path->static_rate;
1407 	} else
1408 		path->static_rate = 0;
1409 
1410 	if (ah->ah_flags & IB_AH_GRH) {
1411 		int real_sgid_index = mlx4_ib_gid_index_to_real_index(dev,
1412 								      port,
1413 								      ah->grh.sgid_index);
1414 
1415 		if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
1416 			pr_err("sgid_index (%u) too large. max is %d\n",
1417 			       real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
1418 			return -1;
1419 		}
1420 
1421 		path->grh_mylmc |= 1 << 7;
1422 		path->mgid_index = real_sgid_index;
1423 		path->hop_limit  = ah->grh.hop_limit;
1424 		path->tclass_flowlabel =
1425 			cpu_to_be32((ah->grh.traffic_class << 20) |
1426 				    (ah->grh.flow_label));
1427 		memcpy(path->rgid, ah->grh.dgid.raw, 16);
1428 	}
1429 
1430 	if (is_eth) {
1431 		if (!(ah->ah_flags & IB_AH_GRH))
1432 			return -1;
1433 
1434 		path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1435 			((port - 1) << 6) | ((ah->sl & 7) << 3);
1436 
1437 		path->feup |= MLX4_FEUP_FORCE_ETH_UP;
1438 		if (vlan_tag < 0x1000) {
1439 			if (smac_info->vid < 0x1000) {
1440 				/* both valid vlan ids */
1441 				if (smac_info->vid != vlan_tag) {
1442 					/* different VIDs.  unreg old and reg new */
1443 					err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
1444 					if (err)
1445 						return err;
1446 					smac_info->candidate_vid = vlan_tag;
1447 					smac_info->candidate_vlan_index = vidx;
1448 					smac_info->candidate_vlan_port = port;
1449 					smac_info->update_vid = 1;
1450 					path->vlan_index = vidx;
1451 				} else {
1452 					path->vlan_index = smac_info->vlan_index;
1453 				}
1454 			} else {
1455 				/* no current vlan tag in qp */
1456 				err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
1457 				if (err)
1458 					return err;
1459 				smac_info->candidate_vid = vlan_tag;
1460 				smac_info->candidate_vlan_index = vidx;
1461 				smac_info->candidate_vlan_port = port;
1462 				smac_info->update_vid = 1;
1463 				path->vlan_index = vidx;
1464 			}
1465 			path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
1466 			path->fl = 1 << 6;
1467 		} else {
1468 			/* have current vlan tag. unregister it at modify-qp success */
1469 			if (smac_info->vid < 0x1000) {
1470 				smac_info->candidate_vid = 0xFFFF;
1471 				smac_info->update_vid = 1;
1472 			}
1473 		}
1474 
1475 		/* get smac_index for RoCE use.
1476 		 * If no smac was yet assigned, register one.
1477 		 * If one was already assigned, but the new mac differs,
1478 		 * unregister the old one and register the new one.
1479 		*/
1480 		if ((!smac_info->smac && !smac_info->smac_port) ||
1481 		    smac_info->smac != smac) {
1482 			/* register candidate now, unreg if needed, after success */
1483 			smac_index = mlx4_register_mac(dev->dev, port, smac);
1484 			if (smac_index >= 0) {
1485 				smac_info->candidate_smac_index = smac_index;
1486 				smac_info->candidate_smac = smac;
1487 				smac_info->candidate_smac_port = port;
1488 			} else {
1489 				return -EINVAL;
1490 			}
1491 		} else {
1492 			smac_index = smac_info->smac_index;
1493 		}
1494 
1495 		memcpy(path->dmac, ah->dmac, 6);
1496 		path->ackto = MLX4_IB_LINK_TYPE_ETH;
1497 		/* put MAC table smac index for IBoE */
1498 		path->grh_mylmc = (u8) (smac_index) | 0x80;
1499 	} else {
1500 		path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1501 			((port - 1) << 6) | ((ah->sl & 0xf) << 2);
1502 	}
1503 
1504 	return 0;
1505 }
1506 
1507 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
1508 			 enum ib_qp_attr_mask qp_attr_mask,
1509 			 struct mlx4_ib_qp *mqp,
1510 			 struct mlx4_qp_path *path, u8 port,
1511 			 u16 vlan_id, u8 *smac)
1512 {
1513 	return _mlx4_set_path(dev, &qp->ah_attr,
1514 			      mlx4_mac_to_u64(smac),
1515 			      vlan_id,
1516 			      path, &mqp->pri, port);
1517 }
1518 
1519 static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
1520 			     const struct ib_qp_attr *qp,
1521 			     enum ib_qp_attr_mask qp_attr_mask,
1522 			     struct mlx4_ib_qp *mqp,
1523 			     struct mlx4_qp_path *path, u8 port)
1524 {
1525 	return _mlx4_set_path(dev, &qp->alt_ah_attr,
1526 			      0,
1527 			      0xffff,
1528 			      path, &mqp->alt, port);
1529 }
1530 
1531 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1532 {
1533 	struct mlx4_ib_gid_entry *ge, *tmp;
1534 
1535 	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1536 		if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) {
1537 			ge->added = 1;
1538 			ge->port = qp->port;
1539 		}
1540 	}
1541 }
1542 
1543 static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev,
1544 				    struct mlx4_ib_qp *qp,
1545 				    struct mlx4_qp_context *context)
1546 {
1547 	u64 u64_mac;
1548 	int smac_index;
1549 
1550 	u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
1551 
1552 	context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
1553 	if (!qp->pri.smac && !qp->pri.smac_port) {
1554 		smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
1555 		if (smac_index >= 0) {
1556 			qp->pri.candidate_smac_index = smac_index;
1557 			qp->pri.candidate_smac = u64_mac;
1558 			qp->pri.candidate_smac_port = qp->port;
1559 			context->pri_path.grh_mylmc = 0x80 | (u8) smac_index;
1560 		} else {
1561 			return -ENOENT;
1562 		}
1563 	}
1564 	return 0;
1565 }
1566 
1567 static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1568 {
1569 	struct counter_index *new_counter_index;
1570 	int err;
1571 	u32 tmp_idx;
1572 
1573 	if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) !=
1574 	    IB_LINK_LAYER_ETHERNET ||
1575 	    !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) ||
1576 	    !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK))
1577 		return 0;
1578 
1579 	err = mlx4_counter_alloc(dev->dev, &tmp_idx);
1580 	if (err)
1581 		return err;
1582 
1583 	new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL);
1584 	if (!new_counter_index) {
1585 		mlx4_counter_free(dev->dev, tmp_idx);
1586 		return -ENOMEM;
1587 	}
1588 
1589 	new_counter_index->index = tmp_idx;
1590 	new_counter_index->allocated = 1;
1591 	qp->counter_index = new_counter_index;
1592 
1593 	mutex_lock(&dev->counters_table[qp->port - 1].mutex);
1594 	list_add_tail(&new_counter_index->list,
1595 		      &dev->counters_table[qp->port - 1].counters_list);
1596 	mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
1597 
1598 	return 0;
1599 }
1600 
1601 enum {
1602 	MLX4_QPC_ROCE_MODE_1 = 0,
1603 	MLX4_QPC_ROCE_MODE_2 = 2,
1604 	MLX4_QPC_ROCE_MODE_UNDEFINED = 0xff
1605 };
1606 
1607 static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
1608 {
1609 	switch (gid_type) {
1610 	case IB_GID_TYPE_ROCE:
1611 		return MLX4_QPC_ROCE_MODE_1;
1612 	case IB_GID_TYPE_ROCE_UDP_ENCAP:
1613 		return MLX4_QPC_ROCE_MODE_2;
1614 	default:
1615 		return MLX4_QPC_ROCE_MODE_UNDEFINED;
1616 	}
1617 }
1618 
1619 static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1620 			       const struct ib_qp_attr *attr, int attr_mask,
1621 			       enum ib_qp_state cur_state, enum ib_qp_state new_state)
1622 {
1623 	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
1624 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
1625 	struct mlx4_ib_pd *pd;
1626 	struct mlx4_ib_cq *send_cq, *recv_cq;
1627 	struct mlx4_qp_context *context;
1628 	enum mlx4_qp_optpar optpar = 0;
1629 	int sqd_event;
1630 	int steer_qp = 0;
1631 	int err = -EINVAL;
1632 	int counter_index;
1633 
1634 	/* APM is not supported under RoCE */
1635 	if (attr_mask & IB_QP_ALT_PATH &&
1636 	    rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
1637 	    IB_LINK_LAYER_ETHERNET)
1638 		return -ENOTSUPP;
1639 
1640 	context = kzalloc(sizeof *context, GFP_KERNEL);
1641 	if (!context)
1642 		return -ENOMEM;
1643 
1644 	context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
1645 				     (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
1646 
1647 	if (!(attr_mask & IB_QP_PATH_MIG_STATE))
1648 		context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
1649 	else {
1650 		optpar |= MLX4_QP_OPTPAR_PM_STATE;
1651 		switch (attr->path_mig_state) {
1652 		case IB_MIG_MIGRATED:
1653 			context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
1654 			break;
1655 		case IB_MIG_REARM:
1656 			context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11);
1657 			break;
1658 		case IB_MIG_ARMED:
1659 			context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11);
1660 			break;
1661 		}
1662 	}
1663 
1664 	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
1665 		context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
1666 	else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1667 		context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
1668 	else if (ibqp->qp_type == IB_QPT_UD) {
1669 		if (qp->flags & MLX4_IB_QP_LSO)
1670 			context->mtu_msgmax = (IB_MTU_4096 << 5) |
1671 					      ilog2(dev->dev->caps.max_gso_sz);
1672 		else
1673 			context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1674 	} else if (attr_mask & IB_QP_PATH_MTU) {
1675 		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
1676 			pr_err("path MTU (%u) is invalid\n",
1677 			       attr->path_mtu);
1678 			goto out;
1679 		}
1680 		context->mtu_msgmax = (attr->path_mtu << 5) |
1681 			ilog2(dev->dev->caps.max_msg_sz);
1682 	}
1683 
1684 	if (qp->rq.wqe_cnt)
1685 		context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
1686 	context->rq_size_stride |= qp->rq.wqe_shift - 4;
1687 
1688 	if (qp->sq.wqe_cnt)
1689 		context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
1690 	context->sq_size_stride |= qp->sq.wqe_shift - 4;
1691 
1692 	if (new_state == IB_QPS_RESET && qp->counter_index)
1693 		mlx4_ib_free_qp_counter(dev, qp);
1694 
1695 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
1696 		context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
1697 		context->xrcd = cpu_to_be32((u32) qp->xrcdn);
1698 		if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1699 			context->param3 |= cpu_to_be32(1 << 30);
1700 	}
1701 
1702 	if (qp->ibqp.uobject)
1703 		context->usr_page = cpu_to_be32(
1704 			mlx4_to_hw_uar_index(dev->dev,
1705 					     to_mucontext(ibqp->uobject->context)->uar.index));
1706 	else
1707 		context->usr_page = cpu_to_be32(
1708 			mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
1709 
1710 	if (attr_mask & IB_QP_DEST_QPN)
1711 		context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
1712 
1713 	if (attr_mask & IB_QP_PORT) {
1714 		if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD &&
1715 		    !(attr_mask & IB_QP_AV)) {
1716 			mlx4_set_sched(&context->pri_path, attr->port_num);
1717 			optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE;
1718 		}
1719 	}
1720 
1721 	if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
1722 		err = create_qp_lb_counter(dev, qp);
1723 		if (err)
1724 			goto out;
1725 
1726 		counter_index =
1727 			dev->counters_table[qp->port - 1].default_counter;
1728 		if (qp->counter_index)
1729 			counter_index = qp->counter_index->index;
1730 
1731 		if (counter_index != -1) {
1732 			context->pri_path.counter_index = counter_index;
1733 			optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
1734 			if (qp->counter_index) {
1735 				context->pri_path.fl |=
1736 					MLX4_FL_ETH_SRC_CHECK_MC_LB;
1737 				context->pri_path.vlan_control |=
1738 					MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
1739 			}
1740 		} else
1741 			context->pri_path.counter_index =
1742 				MLX4_SINK_COUNTER_INDEX(dev->dev);
1743 
1744 		if (qp->flags & MLX4_IB_QP_NETIF) {
1745 			mlx4_ib_steer_qp_reg(dev, qp, 1);
1746 			steer_qp = 1;
1747 		}
1748 
1749 		if (ibqp->qp_type == IB_QPT_GSI) {
1750 			enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ?
1751 				IB_GID_TYPE_ROCE_UDP_ENCAP : IB_GID_TYPE_ROCE;
1752 			u8 qpc_roce_mode = gid_type_to_qpc(gid_type);
1753 
1754 			context->rlkey_roce_mode |= (qpc_roce_mode << 6);
1755 		}
1756 	}
1757 
1758 	if (attr_mask & IB_QP_PKEY_INDEX) {
1759 		if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
1760 			context->pri_path.disable_pkey_check = 0x40;
1761 		context->pri_path.pkey_index = attr->pkey_index;
1762 		optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
1763 	}
1764 
1765 	if (attr_mask & IB_QP_AV) {
1766 		u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
1767 			attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1768 		union ib_gid gid;
1769 		struct ib_gid_attr gid_attr;
1770 		u16 vlan = 0xffff;
1771 		u8 smac[ETH_ALEN];
1772 		int status = 0;
1773 		int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
1774 			attr->ah_attr.ah_flags & IB_AH_GRH;
1775 
1776 		if (is_eth) {
1777 			int index = attr->ah_attr.grh.sgid_index;
1778 
1779 			status = ib_get_cached_gid(ibqp->device, port_num,
1780 						   index, &gid, &gid_attr);
1781 			if (!status && !memcmp(&gid, &zgid, sizeof(gid)))
1782 				status = -ENOENT;
1783 			if (!status && gid_attr.ndev) {
1784 				vlan = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1785 				memcpy(smac, IF_LLADDR(gid_attr.ndev), ETH_ALEN);
1786 				dev_put(gid_attr.ndev);
1787 			}
1788 		}
1789 		if (status)
1790 			goto out;
1791 
1792 		if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
1793 				  port_num, vlan, smac))
1794 			goto out;
1795 
1796 		optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
1797 			   MLX4_QP_OPTPAR_SCHED_QUEUE);
1798 
1799 		if (is_eth &&
1800 		    (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)) {
1801 			u8 qpc_roce_mode = gid_type_to_qpc(gid_attr.gid_type);
1802 
1803 			if (qpc_roce_mode == MLX4_QPC_ROCE_MODE_UNDEFINED) {
1804 				err = -EINVAL;
1805 				goto out;
1806 			}
1807 			context->rlkey_roce_mode |= (qpc_roce_mode << 6);
1808 		}
1809 
1810 	}
1811 
1812 	if (attr_mask & IB_QP_TIMEOUT) {
1813 		context->pri_path.ackto |= attr->timeout << 3;
1814 		optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
1815 	}
1816 
1817 	if (attr_mask & IB_QP_ALT_PATH) {
1818 		if (attr->alt_port_num == 0 ||
1819 		    attr->alt_port_num > dev->dev->caps.num_ports)
1820 			goto out;
1821 
1822 		if (attr->alt_pkey_index >=
1823 		    dev->dev->caps.pkey_table_len[attr->alt_port_num])
1824 			goto out;
1825 
1826 		if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
1827 				      &context->alt_path,
1828 				      attr->alt_port_num))
1829 			goto out;
1830 
1831 		context->alt_path.pkey_index = attr->alt_pkey_index;
1832 		context->alt_path.ackto = attr->alt_timeout << 3;
1833 		optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
1834 	}
1835 
1836 	pd = get_pd(qp);
1837 	get_cqs(qp, &send_cq, &recv_cq);
1838 	context->pd       = cpu_to_be32(pd->pdn);
1839 	context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
1840 	context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
1841 	context->params1  = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
1842 
1843 	/* Set "fast registration enabled" for all kernel QPs */
1844 	if (!qp->ibqp.uobject)
1845 		context->params1 |= cpu_to_be32(1 << 11);
1846 
1847 	if (attr_mask & IB_QP_RNR_RETRY) {
1848 		context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1849 		optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
1850 	}
1851 
1852 	if (attr_mask & IB_QP_RETRY_CNT) {
1853 		context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1854 		optpar |= MLX4_QP_OPTPAR_RETRY_COUNT;
1855 	}
1856 
1857 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1858 		if (attr->max_rd_atomic)
1859 			context->params1 |=
1860 				cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1861 		optpar |= MLX4_QP_OPTPAR_SRA_MAX;
1862 	}
1863 
1864 	if (attr_mask & IB_QP_SQ_PSN)
1865 		context->next_send_psn = cpu_to_be32(attr->sq_psn);
1866 
1867 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1868 		if (attr->max_dest_rd_atomic)
1869 			context->params2 |=
1870 				cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1871 		optpar |= MLX4_QP_OPTPAR_RRA_MAX;
1872 	}
1873 
1874 	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
1875 		context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask);
1876 		optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
1877 	}
1878 
1879 	if (ibqp->srq)
1880 		context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
1881 
1882 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1883 		context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1884 		optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT;
1885 	}
1886 	if (attr_mask & IB_QP_RQ_PSN)
1887 		context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1888 
1889 	/* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
1890 	if (attr_mask & IB_QP_QKEY) {
1891 		if (qp->mlx4_ib_qp_type &
1892 		    (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))
1893 			context->qkey = cpu_to_be32(IB_QP_SET_QKEY);
1894 		else {
1895 			if (mlx4_is_mfunc(dev->dev) &&
1896 			    !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) &&
1897 			    (attr->qkey & MLX4_RESERVED_QKEY_MASK) ==
1898 			    MLX4_RESERVED_QKEY_BASE) {
1899 				pr_err("Cannot use reserved QKEY"
1900 				       " 0x%x (range 0xffff0000..0xffffffff"
1901 				       " is reserved)\n", attr->qkey);
1902 				err = -EINVAL;
1903 				goto out;
1904 			}
1905 			context->qkey = cpu_to_be32(attr->qkey);
1906 		}
1907 		optpar |= MLX4_QP_OPTPAR_Q_KEY;
1908 	}
1909 
1910 	if (ibqp->srq)
1911 		context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
1912 
1913 	if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1914 		context->db_rec_addr = cpu_to_be64(qp->db.dma);
1915 
1916 	if (cur_state == IB_QPS_INIT &&
1917 	    new_state == IB_QPS_RTR  &&
1918 	    (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
1919 	     ibqp->qp_type == IB_QPT_UD ||
1920 	     ibqp->qp_type == IB_QPT_RAW_PACKET)) {
1921 		context->pri_path.sched_queue = (qp->port - 1) << 6;
1922 		if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
1923 		    qp->mlx4_ib_qp_type &
1924 		    (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) {
1925 			context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
1926 			if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI)
1927 				context->pri_path.fl = 0x80;
1928 		} else {
1929 			if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
1930 				context->pri_path.fl = 0x80;
1931 			context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
1932 		}
1933 		if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
1934 		    IB_LINK_LAYER_ETHERNET) {
1935 			if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
1936 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
1937 				context->pri_path.feup = 1 << 7; /* don't fsm */
1938 			/* handle smac_index */
1939 			if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
1940 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
1941 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
1942 				err = handle_eth_ud_smac_index(dev, qp, context);
1943 				if (err) {
1944 					err = -EINVAL;
1945 					goto out;
1946 				}
1947 				if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1948 					dev->qp1_proxy[qp->port - 1] = qp;
1949 			}
1950 		}
1951 	}
1952 
1953 	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
1954 		context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
1955 					MLX4_IB_LINK_TYPE_ETH;
1956 		if (dev->dev->caps.tunnel_offload_mode ==  MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1957 			/* set QP to receive both tunneled & non-tunneled packets */
1958 			if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
1959 				context->srqn = cpu_to_be32(7 << 28);
1960 		}
1961 	}
1962 
1963 	if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
1964 		int is_eth = rdma_port_get_link_layer(
1965 				&dev->ib_dev, qp->port) ==
1966 				IB_LINK_LAYER_ETHERNET;
1967 		if (is_eth) {
1968 			context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
1969 			optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
1970 		}
1971 	}
1972 
1973 
1974 	if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD	&&
1975 	    attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1976 		sqd_event = 1;
1977 	else
1978 		sqd_event = 0;
1979 
1980 	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1981 		context->rlkey_roce_mode |= (1 << 4);
1982 
1983 	/*
1984 	 * Before passing a kernel QP to the HW, make sure that the
1985 	 * ownership bits of the send queue are set and the SQ
1986 	 * headroom is stamped so that the hardware doesn't start
1987 	 * processing stale work requests.
1988 	 */
1989 	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
1990 		struct mlx4_wqe_ctrl_seg *ctrl;
1991 		int i;
1992 
1993 		for (i = 0; i < qp->sq.wqe_cnt; ++i) {
1994 			ctrl = get_send_wqe(qp, i);
1995 			ctrl->owner_opcode = cpu_to_be32(1U << 31);
1996 			if (qp->sq_max_wqes_per_wr == 1)
1997 				ctrl->fence_size =
1998 						1 << (qp->sq.wqe_shift - 4);
1999 
2000 			stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
2001 		}
2002 	}
2003 
2004 	err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
2005 			     to_mlx4_state(new_state), context, optpar,
2006 			     sqd_event, &qp->mqp);
2007 	if (err)
2008 		goto out;
2009 
2010 	qp->state = new_state;
2011 
2012 	if (attr_mask & IB_QP_ACCESS_FLAGS)
2013 		qp->atomic_rd_en = attr->qp_access_flags;
2014 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
2015 		qp->resp_depth = attr->max_dest_rd_atomic;
2016 	if (attr_mask & IB_QP_PORT) {
2017 		qp->port = attr->port_num;
2018 		update_mcg_macs(dev, qp);
2019 	}
2020 	if (attr_mask & IB_QP_ALT_PATH)
2021 		qp->alt_port = attr->alt_port_num;
2022 
2023 	if (is_sqp(dev, qp))
2024 		store_sqp_attrs(to_msqp(qp), attr, attr_mask);
2025 
2026 	/*
2027 	 * If we moved QP0 to RTR, bring the IB link up; if we moved
2028 	 * QP0 to RESET or ERROR, bring the link back down.
2029 	 */
2030 	if (is_qp0(dev, qp)) {
2031 		if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
2032 			if (mlx4_INIT_PORT(dev->dev, qp->port))
2033 				pr_warn("INIT_PORT failed for port %d\n",
2034 				       qp->port);
2035 
2036 		if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
2037 		    (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
2038 			mlx4_CLOSE_PORT(dev->dev, qp->port);
2039 	}
2040 
2041 	/*
2042 	 * If we moved a kernel QP to RESET, clean up all old CQ
2043 	 * entries and reinitialize the QP.
2044 	 */
2045 	if (new_state == IB_QPS_RESET) {
2046 		if (!ibqp->uobject) {
2047 			mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
2048 					 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
2049 			if (send_cq != recv_cq)
2050 				mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
2051 
2052 			qp->rq.head = 0;
2053 			qp->rq.tail = 0;
2054 			qp->sq.head = 0;
2055 			qp->sq.tail = 0;
2056 			qp->sq_next_wqe = 0;
2057 			if (qp->rq.wqe_cnt)
2058 				*qp->db.db  = 0;
2059 
2060 			if (qp->flags & MLX4_IB_QP_NETIF)
2061 				mlx4_ib_steer_qp_reg(dev, qp, 0);
2062 		}
2063 		if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
2064 			mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
2065 			qp->pri.smac = 0;
2066 			qp->pri.smac_port = 0;
2067 		}
2068 		if (qp->alt.smac) {
2069 			mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
2070 			qp->alt.smac = 0;
2071 		}
2072 		if (qp->pri.vid < 0x1000) {
2073 			mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
2074 			qp->pri.vid = 0xFFFF;
2075 			qp->pri.candidate_vid = 0xFFFF;
2076 			qp->pri.update_vid = 0;
2077 		}
2078 
2079 		if (qp->alt.vid < 0x1000) {
2080 			mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
2081 			qp->alt.vid = 0xFFFF;
2082 			qp->alt.candidate_vid = 0xFFFF;
2083 			qp->alt.update_vid = 0;
2084 		}
2085 	}
2086 out:
2087 	if (err && qp->counter_index)
2088 		mlx4_ib_free_qp_counter(dev, qp);
2089 	if (err && steer_qp)
2090 		mlx4_ib_steer_qp_reg(dev, qp, 0);
2091 	kfree(context);
2092 	if (qp->pri.candidate_smac ||
2093 	    (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
2094 		if (err) {
2095 			mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
2096 		} else {
2097 			if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
2098 				mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
2099 			qp->pri.smac = qp->pri.candidate_smac;
2100 			qp->pri.smac_index = qp->pri.candidate_smac_index;
2101 			qp->pri.smac_port = qp->pri.candidate_smac_port;
2102 		}
2103 		qp->pri.candidate_smac = 0;
2104 		qp->pri.candidate_smac_index = 0;
2105 		qp->pri.candidate_smac_port = 0;
2106 	}
2107 	if (qp->alt.candidate_smac) {
2108 		if (err) {
2109 			mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
2110 		} else {
2111 			if (qp->alt.smac)
2112 				mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
2113 			qp->alt.smac = qp->alt.candidate_smac;
2114 			qp->alt.smac_index = qp->alt.candidate_smac_index;
2115 			qp->alt.smac_port = qp->alt.candidate_smac_port;
2116 		}
2117 		qp->alt.candidate_smac = 0;
2118 		qp->alt.candidate_smac_index = 0;
2119 		qp->alt.candidate_smac_port = 0;
2120 	}
2121 
2122 	if (qp->pri.update_vid) {
2123 		if (err) {
2124 			if (qp->pri.candidate_vid < 0x1000)
2125 				mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port,
2126 						     qp->pri.candidate_vid);
2127 		} else {
2128 			if (qp->pri.vid < 0x1000)
2129 				mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port,
2130 						     qp->pri.vid);
2131 			qp->pri.vid = qp->pri.candidate_vid;
2132 			qp->pri.vlan_port = qp->pri.candidate_vlan_port;
2133 			qp->pri.vlan_index =  qp->pri.candidate_vlan_index;
2134 		}
2135 		qp->pri.candidate_vid = 0xFFFF;
2136 		qp->pri.update_vid = 0;
2137 	}
2138 
2139 	if (qp->alt.update_vid) {
2140 		if (err) {
2141 			if (qp->alt.candidate_vid < 0x1000)
2142 				mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port,
2143 						     qp->alt.candidate_vid);
2144 		} else {
2145 			if (qp->alt.vid < 0x1000)
2146 				mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port,
2147 						     qp->alt.vid);
2148 			qp->alt.vid = qp->alt.candidate_vid;
2149 			qp->alt.vlan_port = qp->alt.candidate_vlan_port;
2150 			qp->alt.vlan_index =  qp->alt.candidate_vlan_index;
2151 		}
2152 		qp->alt.candidate_vid = 0xFFFF;
2153 		qp->alt.update_vid = 0;
2154 	}
2155 
2156 	return err;
2157 }
2158 
2159 static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2160 			      int attr_mask, struct ib_udata *udata)
2161 {
2162 	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
2163 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
2164 	enum ib_qp_state cur_state, new_state;
2165 	int err = -EINVAL;
2166 	int ll;
2167 	mutex_lock(&qp->mutex);
2168 
2169 	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
2170 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
2171 
2172 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
2173 		ll = IB_LINK_LAYER_UNSPECIFIED;
2174 	} else {
2175 		int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
2176 		ll = rdma_port_get_link_layer(&dev->ib_dev, port);
2177 	}
2178 
2179 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
2180 				attr_mask, ll)) {
2181 		pr_debug("qpn 0x%x: invalid attribute mask specified "
2182 			 "for transition %d to %d. qp_type %d,"
2183 			 " attr_mask 0x%x\n",
2184 			 ibqp->qp_num, cur_state, new_state,
2185 			 ibqp->qp_type, attr_mask);
2186 		goto out;
2187 	}
2188 
2189 	if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
2190 		if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
2191 			if ((ibqp->qp_type == IB_QPT_RC) ||
2192 			    (ibqp->qp_type == IB_QPT_UD) ||
2193 			    (ibqp->qp_type == IB_QPT_UC) ||
2194 			    (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
2195 			    (ibqp->qp_type == IB_QPT_XRC_INI)) {
2196 				attr->port_num = mlx4_ib_bond_next_port(dev);
2197 			}
2198 		} else {
2199 			/* no sense in changing port_num
2200 			 * when ports are bonded */
2201 			attr_mask &= ~IB_QP_PORT;
2202 		}
2203 	}
2204 
2205 	if ((attr_mask & IB_QP_PORT) &&
2206 	    (attr->port_num == 0 || attr->port_num > dev->num_ports)) {
2207 		pr_debug("qpn 0x%x: invalid port number (%d) specified "
2208 			 "for transition %d to %d. qp_type %d\n",
2209 			 ibqp->qp_num, attr->port_num, cur_state,
2210 			 new_state, ibqp->qp_type);
2211 		goto out;
2212 	}
2213 
2214 	if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
2215 	    (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
2216 	     IB_LINK_LAYER_ETHERNET))
2217 		goto out;
2218 
2219 	if (attr_mask & IB_QP_PKEY_INDEX) {
2220 		int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
2221 		if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
2222 			pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
2223 				 "for transition %d to %d. qp_type %d\n",
2224 				 ibqp->qp_num, attr->pkey_index, cur_state,
2225 				 new_state, ibqp->qp_type);
2226 			goto out;
2227 		}
2228 	}
2229 
2230 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
2231 	    attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
2232 		pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
2233 			 "Transition %d to %d. qp_type %d\n",
2234 			 ibqp->qp_num, attr->max_rd_atomic, cur_state,
2235 			 new_state, ibqp->qp_type);
2236 		goto out;
2237 	}
2238 
2239 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
2240 	    attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
2241 		pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
2242 			 "Transition %d to %d. qp_type %d\n",
2243 			 ibqp->qp_num, attr->max_dest_rd_atomic, cur_state,
2244 			 new_state, ibqp->qp_type);
2245 		goto out;
2246 	}
2247 
2248 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
2249 		err = 0;
2250 		goto out;
2251 	}
2252 
2253 	err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
2254 
2255 	if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
2256 		attr->port_num = 1;
2257 
2258 out:
2259 	mutex_unlock(&qp->mutex);
2260 	return err;
2261 }
2262 
2263 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2264 		      int attr_mask, struct ib_udata *udata)
2265 {
2266 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
2267 	int ret;
2268 
2269 	ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata);
2270 
2271 	if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
2272 		struct mlx4_ib_sqp *sqp = to_msqp(mqp);
2273 		int err = 0;
2274 
2275 		if (sqp->roce_v2_gsi)
2276 			err = ib_modify_qp(sqp->roce_v2_gsi, attr, attr_mask);
2277 		if (err)
2278 			pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n",
2279 			       err);
2280 	}
2281 	return ret;
2282 }
2283 
2284 static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
2285 {
2286 	int i;
2287 	for (i = 0; i < dev->caps.num_ports; i++) {
2288 		if (qpn == dev->caps.qp0_proxy[i] ||
2289 		    qpn == dev->caps.qp0_tunnel[i]) {
2290 			*qkey = dev->caps.qp0_qkey[i];
2291 			return 0;
2292 		}
2293 	}
2294 	return -EINVAL;
2295 }
2296 
2297 static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
2298 				  struct ib_ud_wr *wr,
2299 				  void *wqe, unsigned *mlx_seg_len)
2300 {
2301 	struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
2302 	struct ib_device *ib_dev = &mdev->ib_dev;
2303 	struct mlx4_wqe_mlx_seg *mlx = wqe;
2304 	struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
2305 	struct mlx4_ib_ah *ah = to_mah(wr->ah);
2306 	u16 pkey;
2307 	u32 qkey;
2308 	int send_size;
2309 	int header_size;
2310 	int spc;
2311 	int i;
2312 
2313 	if (wr->wr.opcode != IB_WR_SEND)
2314 		return -EINVAL;
2315 
2316 	send_size = 0;
2317 
2318 	for (i = 0; i < wr->wr.num_sge; ++i)
2319 		send_size += wr->wr.sg_list[i].length;
2320 
2321 	/* for proxy-qp0 sends, need to add in size of tunnel header */
2322 	/* for tunnel-qp0 sends, tunnel header is already in s/g list */
2323 	if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
2324 		send_size += sizeof (struct mlx4_ib_tunnel_header);
2325 
2326 	ib_ud_header_init(send_size, 1, 0, 0, 0, 0, 0, 0, &sqp->ud_header);
2327 
2328 	if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
2329 		sqp->ud_header.lrh.service_level =
2330 			be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
2331 		sqp->ud_header.lrh.destination_lid =
2332 			cpu_to_be16(ah->av.ib.g_slid & 0x7f);
2333 		sqp->ud_header.lrh.source_lid =
2334 			cpu_to_be16(ah->av.ib.g_slid & 0x7f);
2335 	}
2336 
2337 	mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
2338 
2339 	/* force loopback */
2340 	mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR);
2341 	mlx->rlid = sqp->ud_header.lrh.destination_lid;
2342 
2343 	sqp->ud_header.lrh.virtual_lane    = 0;
2344 	sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2345 	ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
2346 	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
2347 	if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
2348 		sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
2349 	else
2350 		sqp->ud_header.bth.destination_qpn =
2351 			cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]);
2352 
2353 	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
2354 	if (mlx4_is_master(mdev->dev)) {
2355 		if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
2356 			return -EINVAL;
2357 	} else {
2358 		if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
2359 			return -EINVAL;
2360 	}
2361 	sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
2362 	sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn);
2363 
2364 	sqp->ud_header.bth.opcode        = IB_OPCODE_UD_SEND_ONLY;
2365 	sqp->ud_header.immediate_present = 0;
2366 
2367 	header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
2368 
2369 	/*
2370 	 * Inline data segments may not cross a 64 byte boundary.  If
2371 	 * our UD header is bigger than the space available up to the
2372 	 * next 64 byte boundary in the WQE, use two inline data
2373 	 * segments to hold the UD header.
2374 	 */
2375 	spc = MLX4_INLINE_ALIGN -
2376 	      ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
2377 	if (header_size <= spc) {
2378 		inl->byte_count = cpu_to_be32((1U << 31) | header_size);
2379 		memcpy(inl + 1, sqp->header_buf, header_size);
2380 		i = 1;
2381 	} else {
2382 		inl->byte_count = cpu_to_be32((1U << 31) | spc);
2383 		memcpy(inl + 1, sqp->header_buf, spc);
2384 
2385 		inl = (void *) (inl + 1) + spc;
2386 		memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
2387 		/*
2388 		 * Need a barrier here to make sure all the data is
2389 		 * visible before the byte_count field is set.
2390 		 * Otherwise the HCA prefetcher could grab the 64-byte
2391 		 * chunk with this inline segment and get a valid (!=
2392 		 * 0xffffffff) byte count but stale data, and end up
2393 		 * generating a packet with bad headers.
2394 		 *
2395 		 * The first inline segment's byte_count field doesn't
2396 		 * need a barrier, because it comes after a
2397 		 * control/MLX segment and therefore is at an offset
2398 		 * of 16 mod 64.
2399 		 */
2400 		wmb();
2401 		inl->byte_count = cpu_to_be32((1U << 31) | (header_size - spc));
2402 		i = 2;
2403 	}
2404 
2405 	*mlx_seg_len =
2406 	ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
2407 	return 0;
2408 }
2409 
2410 static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num)
2411 {
2412 	union sl2vl_tbl_to_u64 tmp_vltab;
2413 	u8 vl;
2414 
2415 	if (sl > 15)
2416 		return 0xf;
2417 	tmp_vltab.sl64 = atomic64_read(&dev->sl2vl[port_num - 1]);
2418 	vl = tmp_vltab.sl8[sl >> 1];
2419 	if (sl & 1)
2420 		vl &= 0x0f;
2421 	else
2422 		vl >>= 4;
2423 	return vl;
2424 }
2425 
2426 #define MLX4_ROCEV2_QP1_SPORT 0xC000
2427 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
2428 			    void *wqe, unsigned *mlx_seg_len)
2429 {
2430 	struct ib_device *ib_dev = sqp->qp.ibqp.device;
2431 	struct mlx4_wqe_mlx_seg *mlx = wqe;
2432 	struct mlx4_wqe_ctrl_seg *ctrl = wqe;
2433 	struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
2434 	struct mlx4_ib_ah *ah = to_mah(wr->ah);
2435 	union ib_gid sgid;
2436 	u16 pkey;
2437 	int send_size;
2438 	int header_size;
2439 	int spc;
2440 	int i;
2441 	int err = 0;
2442 	u16 vlan = 0xffff;
2443 	bool is_eth;
2444 	bool is_vlan = false;
2445 	bool is_grh;
2446 	bool is_udp = false;
2447 	int ip_version = 0;
2448 
2449 	send_size = 0;
2450 	for (i = 0; i < wr->wr.num_sge; ++i)
2451 		send_size += wr->wr.sg_list[i].length;
2452 
2453 	is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
2454 	is_grh = mlx4_ib_ah_grh_present(ah);
2455 	if (is_eth) {
2456 		struct ib_gid_attr gid_attr;
2457 
2458 		if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
2459 			/* When multi-function is enabled, the ib_core gid
2460 			 * indexes don't necessarily match the hw ones, so
2461 			 * we must use our own cache */
2462 			err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev,
2463 							   be32_to_cpu(ah->av.ib.port_pd) >> 24,
2464 							   ah->av.ib.gid_index, &sgid.raw[0]);
2465 			if (err)
2466 				return err;
2467 		} else  {
2468 			err = ib_get_cached_gid(ib_dev,
2469 						be32_to_cpu(ah->av.ib.port_pd) >> 24,
2470 						ah->av.ib.gid_index, &sgid,
2471 						&gid_attr);
2472 			if (!err) {
2473 				if (gid_attr.ndev)
2474 					dev_put(gid_attr.ndev);
2475 				if (!memcmp(&sgid, &zgid, sizeof(sgid)))
2476 					err = -ENOENT;
2477 			}
2478 			if (!err) {
2479 				is_udp = gid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2480 				if (is_udp) {
2481 					if (ipv6_addr_v4mapped((struct in6_addr *)&sgid))
2482 						ip_version = 4;
2483 					else
2484 						ip_version = 6;
2485 					is_grh = false;
2486 				}
2487 			} else {
2488 				return err;
2489 			}
2490 		}
2491 		if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
2492 			vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
2493 			is_vlan = 1;
2494 		}
2495 	}
2496 	err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh,
2497 			  ip_version, is_udp, 0, &sqp->ud_header);
2498 	if (err)
2499 		return err;
2500 
2501 	if (!is_eth) {
2502 		sqp->ud_header.lrh.service_level =
2503 			be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
2504 		sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid;
2505 		sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
2506 	}
2507 
2508 	if (is_grh || (ip_version == 6)) {
2509 		sqp->ud_header.grh.traffic_class =
2510 			(be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
2511 		sqp->ud_header.grh.flow_label    =
2512 			ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
2513 		sqp->ud_header.grh.hop_limit     = ah->av.ib.hop_limit;
2514 		if (is_eth) {
2515 			memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
2516 		} else {
2517 			if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
2518 				/* When multi-function is enabled, the ib_core gid
2519 				 * indexes don't necessarily match the hw ones, so
2520 				 * we must use our own cache
2521 				 */
2522 				sqp->ud_header.grh.source_gid.global.subnet_prefix =
2523 					cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
2524 								    demux[sqp->qp.port - 1].
2525 								    subnet_prefix)));
2526 				sqp->ud_header.grh.source_gid.global.interface_id =
2527 					to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
2528 						       guid_cache[ah->av.ib.gid_index];
2529 			} else {
2530 				ib_get_cached_gid(ib_dev,
2531 						  be32_to_cpu(ah->av.ib.port_pd) >> 24,
2532 						  ah->av.ib.gid_index,
2533 						  &sqp->ud_header.grh.source_gid, NULL);
2534 			}
2535 		}
2536 		memcpy(sqp->ud_header.grh.destination_gid.raw,
2537 		       ah->av.ib.dgid, 16);
2538 	}
2539 
2540 	if (ip_version == 4) {
2541 		sqp->ud_header.ip4.tos =
2542 			(be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
2543 		sqp->ud_header.ip4.id = 0;
2544 		sqp->ud_header.ip4.frag_off = htons(IP_DF);
2545 		sqp->ud_header.ip4.ttl = ah->av.eth.hop_limit;
2546 
2547 		memcpy(&sqp->ud_header.ip4.saddr,
2548 		       sgid.raw + 12, 4);
2549 		memcpy(&sqp->ud_header.ip4.daddr, ah->av.ib.dgid + 12, 4);
2550 		sqp->ud_header.ip4.check = ib_ud_ip4_csum(&sqp->ud_header);
2551 	}
2552 
2553 	if (is_udp) {
2554 		sqp->ud_header.udp.dport = htons(ROCE_V2_UDP_DPORT);
2555 		sqp->ud_header.udp.sport = htons(MLX4_ROCEV2_QP1_SPORT);
2556 		sqp->ud_header.udp.csum = 0;
2557 	}
2558 
2559 	mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
2560 
2561 	if (!is_eth) {
2562 		mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
2563 					  (sqp->ud_header.lrh.destination_lid ==
2564 					   IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
2565 					  (sqp->ud_header.lrh.service_level << 8));
2566 		if (ah->av.ib.port_pd & cpu_to_be32(0x80000000))
2567 			mlx->flags |= cpu_to_be32(0x1); /* force loopback */
2568 		mlx->rlid = sqp->ud_header.lrh.destination_lid;
2569 	}
2570 
2571 	switch (wr->wr.opcode) {
2572 	case IB_WR_SEND:
2573 		sqp->ud_header.bth.opcode	 = IB_OPCODE_UD_SEND_ONLY;
2574 		sqp->ud_header.immediate_present = 0;
2575 		break;
2576 	case IB_WR_SEND_WITH_IMM:
2577 		sqp->ud_header.bth.opcode	 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2578 		sqp->ud_header.immediate_present = 1;
2579 		sqp->ud_header.immediate_data    = wr->wr.ex.imm_data;
2580 		break;
2581 	default:
2582 		return -EINVAL;
2583 	}
2584 
2585 	if (is_eth) {
2586 		struct in6_addr in6;
2587 		u16 ether_type;
2588 		u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
2589 
2590 		ether_type = (!is_udp) ? MLX4_IB_IBOE_ETHERTYPE :
2591 			(ip_version == 4 ? ETHERTYPE_IP : ETHERTYPE_IPV6);
2592 
2593 		mlx->sched_prio = cpu_to_be16(pcp);
2594 
2595 		ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac);
2596 		memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
2597 		memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
2598 		memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
2599 		memcpy(&in6, sgid.raw, sizeof(in6));
2600 
2601 
2602 		if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
2603 			mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
2604 		if (!is_vlan) {
2605 			sqp->ud_header.eth.type = cpu_to_be16(ether_type);
2606 		} else {
2607 			sqp->ud_header.vlan.type = cpu_to_be16(ether_type);
2608 			sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
2609 		}
2610 	} else {
2611 		sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 :
2612 							sl_to_vl(to_mdev(ib_dev),
2613 								 sqp->ud_header.lrh.service_level,
2614 								 sqp->qp.port);
2615 		if (sqp->qp.ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
2616 			return -EINVAL;
2617 		if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
2618 			sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
2619 	}
2620 	sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2621 	if (!sqp->qp.ibqp.qp_num)
2622 		ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
2623 	else
2624 		ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
2625 	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
2626 	sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
2627 	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
2628 	sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
2629 					       sqp->qkey : wr->remote_qkey);
2630 	sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
2631 
2632 	header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
2633 
2634 	if (0) {
2635 		pr_err("built UD header of size %d:\n", header_size);
2636 		for (i = 0; i < header_size / 4; ++i) {
2637 			if (i % 8 == 0)
2638 				pr_err("  [%02x] ", i * 4);
2639 			pr_cont(" %08x",
2640 				be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
2641 			if ((i + 1) % 8 == 0)
2642 				pr_cont("\n");
2643 		}
2644 		pr_err("\n");
2645 	}
2646 
2647 	/*
2648 	 * Inline data segments may not cross a 64 byte boundary.  If
2649 	 * our UD header is bigger than the space available up to the
2650 	 * next 64 byte boundary in the WQE, use two inline data
2651 	 * segments to hold the UD header.
2652 	 */
2653 	spc = MLX4_INLINE_ALIGN -
2654 		((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
2655 	if (header_size <= spc) {
2656 		inl->byte_count = cpu_to_be32(1U << 31 | header_size);
2657 		memcpy(inl + 1, sqp->header_buf, header_size);
2658 		i = 1;
2659 	} else {
2660 		inl->byte_count = cpu_to_be32(1U << 31 | spc);
2661 		memcpy(inl + 1, sqp->header_buf, spc);
2662 
2663 		inl = (void *) (inl + 1) + spc;
2664 		memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
2665 		/*
2666 		 * Need a barrier here to make sure all the data is
2667 		 * visible before the byte_count field is set.
2668 		 * Otherwise the HCA prefetcher could grab the 64-byte
2669 		 * chunk with this inline segment and get a valid (!=
2670 		 * 0xffffffff) byte count but stale data, and end up
2671 		 * generating a packet with bad headers.
2672 		 *
2673 		 * The first inline segment's byte_count field doesn't
2674 		 * need a barrier, because it comes after a
2675 		 * control/MLX segment and therefore is at an offset
2676 		 * of 16 mod 64.
2677 		 */
2678 		wmb();
2679 		inl->byte_count = cpu_to_be32(1U << 31 | (header_size - spc));
2680 		i = 2;
2681 	}
2682 
2683 	*mlx_seg_len =
2684 		ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
2685 	return 0;
2686 }
2687 
2688 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
2689 {
2690 	unsigned cur;
2691 	struct mlx4_ib_cq *cq;
2692 
2693 	cur = wq->head - wq->tail;
2694 	if (likely(cur + nreq < wq->max_post))
2695 		return 0;
2696 
2697 	cq = to_mcq(ib_cq);
2698 	spin_lock(&cq->lock);
2699 	cur = wq->head - wq->tail;
2700 	spin_unlock(&cq->lock);
2701 
2702 	return cur + nreq >= wq->max_post;
2703 }
2704 
2705 static __be32 convert_access(int acc)
2706 {
2707 	return (acc & IB_ACCESS_REMOTE_ATOMIC ?
2708 		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC)       : 0) |
2709 	       (acc & IB_ACCESS_REMOTE_WRITE  ?
2710 		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
2711 	       (acc & IB_ACCESS_REMOTE_READ   ?
2712 		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ)  : 0) |
2713 	       (acc & IB_ACCESS_LOCAL_WRITE   ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE)  : 0) |
2714 		cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
2715 }
2716 
2717 static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
2718 			struct ib_reg_wr *wr)
2719 {
2720 	struct mlx4_ib_mr *mr = to_mmr(wr->mr);
2721 
2722 	fseg->flags		= convert_access(wr->access);
2723 	fseg->mem_key		= cpu_to_be32(wr->key);
2724 	fseg->buf_list		= cpu_to_be64(mr->page_map);
2725 	fseg->start_addr	= cpu_to_be64(mr->ibmr.iova);
2726 	fseg->reg_len		= cpu_to_be64(mr->ibmr.length);
2727 	fseg->offset		= 0; /* XXX -- is this just for ZBVA? */
2728 	fseg->page_size		= cpu_to_be32(ilog2(mr->ibmr.page_size));
2729 	fseg->reserved[0]	= 0;
2730 	fseg->reserved[1]	= 0;
2731 }
2732 
2733 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
2734 {
2735 	memset(iseg, 0, sizeof(*iseg));
2736 	iseg->mem_key = cpu_to_be32(rkey);
2737 }
2738 
2739 static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
2740 					  u64 remote_addr, u32 rkey)
2741 {
2742 	rseg->raddr    = cpu_to_be64(remote_addr);
2743 	rseg->rkey     = cpu_to_be32(rkey);
2744 	rseg->reserved = 0;
2745 }
2746 
2747 static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
2748 		struct ib_atomic_wr *wr)
2749 {
2750 	if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
2751 		aseg->swap_add = cpu_to_be64(wr->swap);
2752 		aseg->compare  = cpu_to_be64(wr->compare_add);
2753 	} else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
2754 		aseg->swap_add = cpu_to_be64(wr->compare_add);
2755 		aseg->compare  = cpu_to_be64(wr->compare_add_mask);
2756 	} else {
2757 		aseg->swap_add = cpu_to_be64(wr->compare_add);
2758 		aseg->compare  = 0;
2759 	}
2760 
2761 }
2762 
2763 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
2764 				  struct ib_atomic_wr *wr)
2765 {
2766 	aseg->swap_add		= cpu_to_be64(wr->swap);
2767 	aseg->swap_add_mask	= cpu_to_be64(wr->swap_mask);
2768 	aseg->compare		= cpu_to_be64(wr->compare_add);
2769 	aseg->compare_mask	= cpu_to_be64(wr->compare_add_mask);
2770 }
2771 
2772 static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
2773 			     struct ib_ud_wr *wr)
2774 {
2775 	memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
2776 	dseg->dqpn = cpu_to_be32(wr->remote_qpn);
2777 	dseg->qkey = cpu_to_be32(wr->remote_qkey);
2778 	dseg->vlan = to_mah(wr->ah)->av.eth.vlan;
2779 	memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6);
2780 }
2781 
2782 static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
2783 				    struct mlx4_wqe_datagram_seg *dseg,
2784 				    struct ib_ud_wr *wr,
2785 				    enum mlx4_ib_qp_type qpt)
2786 {
2787 	union mlx4_ext_av *av = &to_mah(wr->ah)->av;
2788 	struct mlx4_av sqp_av = {0};
2789 	int port = *((u8 *) &av->ib.port_pd) & 0x3;
2790 
2791 	/* force loopback */
2792 	sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000);
2793 	sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */
2794 	sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel &
2795 			cpu_to_be32(0xf0000000);
2796 
2797 	memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
2798 	if (qpt == MLX4_IB_QPT_PROXY_GSI)
2799 		dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
2800 	else
2801 		dseg->dqpn = cpu_to_be32(dev->dev->caps.qp0_tunnel[port - 1]);
2802 	/* Use QKEY from the QP context, which is set by master */
2803 	dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
2804 }
2805 
2806 static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
2807 {
2808 	struct mlx4_wqe_inline_seg *inl = wqe;
2809 	struct mlx4_ib_tunnel_header hdr;
2810 	struct mlx4_ib_ah *ah = to_mah(wr->ah);
2811 	int spc;
2812 	int i;
2813 
2814 	memcpy(&hdr.av, &ah->av, sizeof hdr.av);
2815 	hdr.remote_qpn = cpu_to_be32(wr->remote_qpn);
2816 	hdr.pkey_index = cpu_to_be16(wr->pkey_index);
2817 	hdr.qkey = cpu_to_be32(wr->remote_qkey);
2818 	memcpy(hdr.mac, ah->av.eth.mac, 6);
2819 	hdr.vlan = ah->av.eth.vlan;
2820 
2821 	spc = MLX4_INLINE_ALIGN -
2822 		((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
2823 	if (sizeof (hdr) <= spc) {
2824 		memcpy(inl + 1, &hdr, sizeof (hdr));
2825 		wmb();
2826 		inl->byte_count = cpu_to_be32((1U << 31) | (u32)sizeof(hdr));
2827 		i = 1;
2828 	} else {
2829 		memcpy(inl + 1, &hdr, spc);
2830 		wmb();
2831 		inl->byte_count = cpu_to_be32((1U << 31) | spc);
2832 
2833 		inl = (void *) (inl + 1) + spc;
2834 		memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc);
2835 		wmb();
2836 		inl->byte_count = cpu_to_be32((1U << 31) | (u32)(sizeof (hdr) - spc));
2837 		i = 2;
2838 	}
2839 
2840 	*mlx_seg_len =
2841 		ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16);
2842 }
2843 
2844 static void set_mlx_icrc_seg(void *dseg)
2845 {
2846 	u32 *t = dseg;
2847 	struct mlx4_wqe_inline_seg *iseg = dseg;
2848 
2849 	t[1] = 0;
2850 
2851 	/*
2852 	 * Need a barrier here before writing the byte_count field to
2853 	 * make sure that all the data is visible before the
2854 	 * byte_count field is set.  Otherwise, if the segment begins
2855 	 * a new cacheline, the HCA prefetcher could grab the 64-byte
2856 	 * chunk and get a valid (!= * 0xffffffff) byte count but
2857 	 * stale data, and end up sending the wrong data.
2858 	 */
2859 	wmb();
2860 
2861 	iseg->byte_count = cpu_to_be32((1U << 31) | 4);
2862 }
2863 
2864 static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
2865 {
2866 	dseg->lkey       = cpu_to_be32(sg->lkey);
2867 	dseg->addr       = cpu_to_be64(sg->addr);
2868 
2869 	/*
2870 	 * Need a barrier here before writing the byte_count field to
2871 	 * make sure that all the data is visible before the
2872 	 * byte_count field is set.  Otherwise, if the segment begins
2873 	 * a new cacheline, the HCA prefetcher could grab the 64-byte
2874 	 * chunk and get a valid (!= * 0xffffffff) byte count but
2875 	 * stale data, and end up sending the wrong data.
2876 	 */
2877 	wmb();
2878 
2879 	dseg->byte_count = cpu_to_be32(sg->length);
2880 }
2881 
2882 static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
2883 {
2884 	dseg->byte_count = cpu_to_be32(sg->length);
2885 	dseg->lkey       = cpu_to_be32(sg->lkey);
2886 	dseg->addr       = cpu_to_be64(sg->addr);
2887 }
2888 
2889 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
2890 			 struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
2891 			 __be32 *lso_hdr_sz, __be32 *blh)
2892 {
2893 	unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
2894 
2895 	if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
2896 		*blh = cpu_to_be32(1 << 6);
2897 
2898 	if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
2899 		     wr->wr.num_sge > qp->sq.max_gs - (halign >> 4)))
2900 		return -EINVAL;
2901 
2902 	memcpy(wqe->header, wr->header, wr->hlen);
2903 
2904 	*lso_hdr_sz  = cpu_to_be32(wr->mss << 16 | wr->hlen);
2905 	*lso_seg_len = halign;
2906 	return 0;
2907 }
2908 
2909 static __be32 send_ieth(struct ib_send_wr *wr)
2910 {
2911 	switch (wr->opcode) {
2912 	case IB_WR_SEND_WITH_IMM:
2913 	case IB_WR_RDMA_WRITE_WITH_IMM:
2914 		return wr->ex.imm_data;
2915 
2916 	case IB_WR_SEND_WITH_INV:
2917 		return cpu_to_be32(wr->ex.invalidate_rkey);
2918 
2919 	default:
2920 		return 0;
2921 	}
2922 }
2923 
2924 static void add_zero_len_inline(void *wqe)
2925 {
2926 	struct mlx4_wqe_inline_seg *inl = wqe;
2927 	memset(wqe, 0, 16);
2928 	inl->byte_count = cpu_to_be32(1U << 31);
2929 }
2930 
2931 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2932 		      struct ib_send_wr **bad_wr)
2933 {
2934 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
2935 	void *wqe;
2936 	struct mlx4_wqe_ctrl_seg *ctrl;
2937 	struct mlx4_wqe_data_seg *dseg;
2938 	unsigned long flags;
2939 	int nreq;
2940 	int err = 0;
2941 	unsigned ind;
2942 	int uninitialized_var(stamp);
2943 	int uninitialized_var(size);
2944 	unsigned uninitialized_var(seglen);
2945 	__be32 dummy;
2946 	__be32 *lso_wqe;
2947 	__be32 lso_hdr_sz = 0;
2948 	__be32 blh;
2949 	int i;
2950 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
2951 
2952 	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
2953 		struct mlx4_ib_sqp *sqp = to_msqp(qp);
2954 
2955 		if (sqp->roce_v2_gsi) {
2956 			struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah);
2957 			struct ib_gid_attr gid_attr;
2958 			union ib_gid gid;
2959 
2960 			if (!ib_get_cached_gid(ibqp->device,
2961 					       be32_to_cpu(ah->av.ib.port_pd) >> 24,
2962 					       ah->av.ib.gid_index, &gid,
2963 					       &gid_attr)) {
2964 				if (gid_attr.ndev)
2965 					dev_put(gid_attr.ndev);
2966 				qp = (gid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
2967 					to_mqp(sqp->roce_v2_gsi) : qp;
2968 			} else {
2969 				pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n",
2970 				       ah->av.ib.gid_index);
2971 			}
2972 		}
2973 	}
2974 
2975 	spin_lock_irqsave(&qp->sq.lock, flags);
2976 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
2977 		err = -EIO;
2978 		*bad_wr = wr;
2979 		nreq = 0;
2980 		goto out;
2981 	}
2982 
2983 	ind = qp->sq_next_wqe;
2984 
2985 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
2986 		lso_wqe = &dummy;
2987 		blh = 0;
2988 
2989 		if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
2990 			err = -ENOMEM;
2991 			*bad_wr = wr;
2992 			goto out;
2993 		}
2994 
2995 		if (unlikely(wr->num_sge > qp->sq.max_gs)) {
2996 			err = -EINVAL;
2997 			*bad_wr = wr;
2998 			goto out;
2999 		}
3000 
3001 		ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
3002 		qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
3003 
3004 		ctrl->srcrb_flags =
3005 			(wr->send_flags & IB_SEND_SIGNALED ?
3006 			 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
3007 			(wr->send_flags & IB_SEND_SOLICITED ?
3008 			 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
3009 			((wr->send_flags & IB_SEND_IP_CSUM) ?
3010 			 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
3011 				     MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
3012 			qp->sq_signal_bits;
3013 
3014 		ctrl->imm = send_ieth(wr);
3015 
3016 		wqe += sizeof *ctrl;
3017 		size = sizeof *ctrl / 16;
3018 
3019 		switch (qp->mlx4_ib_qp_type) {
3020 		case MLX4_IB_QPT_RC:
3021 		case MLX4_IB_QPT_UC:
3022 			switch (wr->opcode) {
3023 			case IB_WR_ATOMIC_CMP_AND_SWP:
3024 			case IB_WR_ATOMIC_FETCH_AND_ADD:
3025 			case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
3026 				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
3027 					      atomic_wr(wr)->rkey);
3028 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
3029 
3030 				set_atomic_seg(wqe, atomic_wr(wr));
3031 				wqe  += sizeof (struct mlx4_wqe_atomic_seg);
3032 
3033 				size += (sizeof (struct mlx4_wqe_raddr_seg) +
3034 					 sizeof (struct mlx4_wqe_atomic_seg)) / 16;
3035 
3036 				break;
3037 
3038 			case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
3039 				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
3040 					      atomic_wr(wr)->rkey);
3041 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
3042 
3043 				set_masked_atomic_seg(wqe, atomic_wr(wr));
3044 				wqe  += sizeof (struct mlx4_wqe_masked_atomic_seg);
3045 
3046 				size += (sizeof (struct mlx4_wqe_raddr_seg) +
3047 					 sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
3048 
3049 				break;
3050 
3051 			case IB_WR_RDMA_READ:
3052 			case IB_WR_RDMA_WRITE:
3053 			case IB_WR_RDMA_WRITE_WITH_IMM:
3054 				set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
3055 					      rdma_wr(wr)->rkey);
3056 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
3057 				size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
3058 				break;
3059 
3060 			case IB_WR_LOCAL_INV:
3061 				ctrl->srcrb_flags |=
3062 					cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
3063 				set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
3064 				wqe  += sizeof (struct mlx4_wqe_local_inval_seg);
3065 				size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
3066 				break;
3067 
3068 			case IB_WR_REG_MR:
3069 				ctrl->srcrb_flags |=
3070 					cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
3071 				set_reg_seg(wqe, reg_wr(wr));
3072 				wqe  += sizeof(struct mlx4_wqe_fmr_seg);
3073 				size += sizeof(struct mlx4_wqe_fmr_seg) / 16;
3074 				break;
3075 
3076 			default:
3077 				/* No extra segments required for sends */
3078 				break;
3079 			}
3080 			break;
3081 
3082 		case MLX4_IB_QPT_TUN_SMI_OWNER:
3083 			err =  build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
3084 					ctrl, &seglen);
3085 			if (unlikely(err)) {
3086 				*bad_wr = wr;
3087 				goto out;
3088 			}
3089 			wqe  += seglen;
3090 			size += seglen / 16;
3091 			break;
3092 		case MLX4_IB_QPT_TUN_SMI:
3093 		case MLX4_IB_QPT_TUN_GSI:
3094 			/* this is a UD qp used in MAD responses to slaves. */
3095 			set_datagram_seg(wqe, ud_wr(wr));
3096 			/* set the forced-loopback bit in the data seg av */
3097 			*(__be32 *) wqe |= cpu_to_be32(0x80000000);
3098 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
3099 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
3100 			break;
3101 		case MLX4_IB_QPT_UD:
3102 			set_datagram_seg(wqe, ud_wr(wr));
3103 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
3104 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
3105 
3106 			if (wr->opcode == IB_WR_LSO) {
3107 				err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen,
3108 						&lso_hdr_sz, &blh);
3109 				if (unlikely(err)) {
3110 					*bad_wr = wr;
3111 					goto out;
3112 				}
3113 				lso_wqe = (__be32 *) wqe;
3114 				wqe  += seglen;
3115 				size += seglen / 16;
3116 			}
3117 			break;
3118 
3119 		case MLX4_IB_QPT_PROXY_SMI_OWNER:
3120 			err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
3121 					ctrl, &seglen);
3122 			if (unlikely(err)) {
3123 				*bad_wr = wr;
3124 				goto out;
3125 			}
3126 			wqe  += seglen;
3127 			size += seglen / 16;
3128 			/* to start tunnel header on a cache-line boundary */
3129 			add_zero_len_inline(wqe);
3130 			wqe += 16;
3131 			size++;
3132 			build_tunnel_header(ud_wr(wr), wqe, &seglen);
3133 			wqe  += seglen;
3134 			size += seglen / 16;
3135 			break;
3136 		case MLX4_IB_QPT_PROXY_SMI:
3137 		case MLX4_IB_QPT_PROXY_GSI:
3138 			/* If we are tunneling special qps, this is a UD qp.
3139 			 * In this case we first add a UD segment targeting
3140 			 * the tunnel qp, and then add a header with address
3141 			 * information */
3142 			set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe,
3143 						ud_wr(wr),
3144 						qp->mlx4_ib_qp_type);
3145 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
3146 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
3147 			build_tunnel_header(ud_wr(wr), wqe, &seglen);
3148 			wqe  += seglen;
3149 			size += seglen / 16;
3150 			break;
3151 
3152 		case MLX4_IB_QPT_SMI:
3153 		case MLX4_IB_QPT_GSI:
3154 			err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl,
3155 					&seglen);
3156 			if (unlikely(err)) {
3157 				*bad_wr = wr;
3158 				goto out;
3159 			}
3160 			wqe  += seglen;
3161 			size += seglen / 16;
3162 			break;
3163 
3164 		default:
3165 			break;
3166 		}
3167 
3168 		/*
3169 		 * Write data segments in reverse order, so as to
3170 		 * overwrite cacheline stamp last within each
3171 		 * cacheline.  This avoids issues with WQE
3172 		 * prefetching.
3173 		 */
3174 
3175 		dseg = wqe;
3176 		dseg += wr->num_sge - 1;
3177 		size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
3178 
3179 		/* Add one more inline data segment for ICRC for MLX sends */
3180 		if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
3181 			     qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI ||
3182 			     qp->mlx4_ib_qp_type &
3183 			     (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) {
3184 			set_mlx_icrc_seg(dseg + 1);
3185 			size += sizeof (struct mlx4_wqe_data_seg) / 16;
3186 		}
3187 
3188 		for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
3189 			set_data_seg(dseg, wr->sg_list + i);
3190 
3191 		/*
3192 		 * Possibly overwrite stamping in cacheline with LSO
3193 		 * segment only after making sure all data segments
3194 		 * are written.
3195 		 */
3196 		wmb();
3197 		*lso_wqe = lso_hdr_sz;
3198 
3199 		ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
3200 					     MLX4_WQE_CTRL_FENCE : 0) | size;
3201 
3202 		/*
3203 		 * Make sure descriptor is fully written before
3204 		 * setting ownership bit (because HW can start
3205 		 * executing as soon as we do).
3206 		 */
3207 		wmb();
3208 
3209 		if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {
3210 			*bad_wr = wr;
3211 			err = -EINVAL;
3212 			goto out;
3213 		}
3214 
3215 		ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
3216 			(ind & qp->sq.wqe_cnt ? cpu_to_be32(1U << 31) : 0) | blh;
3217 
3218 		stamp = ind + qp->sq_spare_wqes;
3219 		ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
3220 
3221 		/*
3222 		 * We can improve latency by not stamping the last
3223 		 * send queue WQE until after ringing the doorbell, so
3224 		 * only stamp here if there are still more WQEs to post.
3225 		 *
3226 		 * Same optimization applies to padding with NOP wqe
3227 		 * in case of WQE shrinking (used to prevent wrap-around
3228 		 * in the middle of WR).
3229 		 */
3230 		if (wr->next) {
3231 			stamp_send_wqe(qp, stamp, size * 16);
3232 			ind = pad_wraparound(qp, ind);
3233 		}
3234 	}
3235 
3236 out:
3237 	if (likely(nreq)) {
3238 		qp->sq.head += nreq;
3239 
3240 		/*
3241 		 * Make sure that descriptors are written before
3242 		 * doorbell record.
3243 		 */
3244 		wmb();
3245 
3246 		writel(qp->doorbell_qpn,
3247 		       to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
3248 
3249 		/*
3250 		 * Make sure doorbells don't leak out of SQ spinlock
3251 		 * and reach the HCA out of order.
3252 		 */
3253 		mmiowb();
3254 
3255 		stamp_send_wqe(qp, stamp, size * 16);
3256 
3257 		ind = pad_wraparound(qp, ind);
3258 		qp->sq_next_wqe = ind;
3259 	}
3260 
3261 	spin_unlock_irqrestore(&qp->sq.lock, flags);
3262 
3263 	return err;
3264 }
3265 
3266 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3267 		      struct ib_recv_wr **bad_wr)
3268 {
3269 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
3270 	struct mlx4_wqe_data_seg *scat;
3271 	unsigned long flags;
3272 	int err = 0;
3273 	int nreq;
3274 	int ind;
3275 	int max_gs;
3276 	int i;
3277 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
3278 
3279 	max_gs = qp->rq.max_gs;
3280 	spin_lock_irqsave(&qp->rq.lock, flags);
3281 
3282 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
3283 		err = -EIO;
3284 		*bad_wr = wr;
3285 		nreq = 0;
3286 		goto out;
3287 	}
3288 
3289 	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
3290 
3291 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
3292 		if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
3293 			err = -ENOMEM;
3294 			*bad_wr = wr;
3295 			goto out;
3296 		}
3297 
3298 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
3299 			err = -EINVAL;
3300 			*bad_wr = wr;
3301 			goto out;
3302 		}
3303 
3304 		scat = get_recv_wqe(qp, ind);
3305 
3306 		if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
3307 		    MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
3308 			ib_dma_sync_single_for_device(ibqp->device,
3309 						      qp->sqp_proxy_rcv[ind].map,
3310 						      sizeof (struct mlx4_ib_proxy_sqp_hdr),
3311 						      DMA_FROM_DEVICE);
3312 			scat->byte_count =
3313 				cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr));
3314 			/* use dma lkey from upper layer entry */
3315 			scat->lkey = cpu_to_be32(wr->sg_list->lkey);
3316 			scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map);
3317 			scat++;
3318 			max_gs--;
3319 		}
3320 
3321 		for (i = 0; i < wr->num_sge; ++i)
3322 			__set_data_seg(scat + i, wr->sg_list + i);
3323 
3324 		if (i < max_gs) {
3325 			scat[i].byte_count = 0;
3326 			scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);
3327 			scat[i].addr       = 0;
3328 		}
3329 
3330 		qp->rq.wrid[ind] = wr->wr_id;
3331 
3332 		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
3333 	}
3334 
3335 out:
3336 	if (likely(nreq)) {
3337 		qp->rq.head += nreq;
3338 
3339 		/*
3340 		 * Make sure that descriptors are written before
3341 		 * doorbell record.
3342 		 */
3343 		wmb();
3344 
3345 		*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
3346 	}
3347 
3348 	spin_unlock_irqrestore(&qp->rq.lock, flags);
3349 
3350 	return err;
3351 }
3352 
3353 static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
3354 {
3355 	switch (mlx4_state) {
3356 	case MLX4_QP_STATE_RST:      return IB_QPS_RESET;
3357 	case MLX4_QP_STATE_INIT:     return IB_QPS_INIT;
3358 	case MLX4_QP_STATE_RTR:      return IB_QPS_RTR;
3359 	case MLX4_QP_STATE_RTS:      return IB_QPS_RTS;
3360 	case MLX4_QP_STATE_SQ_DRAINING:
3361 	case MLX4_QP_STATE_SQD:      return IB_QPS_SQD;
3362 	case MLX4_QP_STATE_SQER:     return IB_QPS_SQE;
3363 	case MLX4_QP_STATE_ERR:      return IB_QPS_ERR;
3364 	default:		     return -1;
3365 	}
3366 }
3367 
3368 static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state)
3369 {
3370 	switch (mlx4_mig_state) {
3371 	case MLX4_QP_PM_ARMED:		return IB_MIG_ARMED;
3372 	case MLX4_QP_PM_REARM:		return IB_MIG_REARM;
3373 	case MLX4_QP_PM_MIGRATED:	return IB_MIG_MIGRATED;
3374 	default: return -1;
3375 	}
3376 }
3377 
3378 static int to_ib_qp_access_flags(int mlx4_flags)
3379 {
3380 	int ib_flags = 0;
3381 
3382 	if (mlx4_flags & MLX4_QP_BIT_RRE)
3383 		ib_flags |= IB_ACCESS_REMOTE_READ;
3384 	if (mlx4_flags & MLX4_QP_BIT_RWE)
3385 		ib_flags |= IB_ACCESS_REMOTE_WRITE;
3386 	if (mlx4_flags & MLX4_QP_BIT_RAE)
3387 		ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
3388 
3389 	return ib_flags;
3390 }
3391 
3392 static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
3393 				struct mlx4_qp_path *path)
3394 {
3395 	struct mlx4_dev *dev = ibdev->dev;
3396 	int is_eth;
3397 
3398 	memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
3399 	ib_ah_attr->port_num	  = path->sched_queue & 0x40 ? 2 : 1;
3400 
3401 	if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
3402 		return;
3403 
3404 	is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
3405 		IB_LINK_LAYER_ETHERNET;
3406 	if (is_eth)
3407 		ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
3408 		((path->sched_queue & 4) << 1);
3409 	else
3410 		ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
3411 
3412 	ib_ah_attr->dlid	  = be16_to_cpu(path->rlid);
3413 	ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
3414 	ib_ah_attr->static_rate   = path->static_rate ? path->static_rate - 5 : 0;
3415 	ib_ah_attr->ah_flags      = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
3416 	if (ib_ah_attr->ah_flags) {
3417 		ib_ah_attr->grh.sgid_index = path->mgid_index;
3418 		ib_ah_attr->grh.hop_limit  = path->hop_limit;
3419 		ib_ah_attr->grh.traffic_class =
3420 			(be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
3421 		ib_ah_attr->grh.flow_label =
3422 			be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
3423 		memcpy(ib_ah_attr->grh.dgid.raw,
3424 			path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
3425 	}
3426 }
3427 
3428 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3429 		     struct ib_qp_init_attr *qp_init_attr)
3430 {
3431 	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
3432 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
3433 	struct mlx4_qp_context context;
3434 	int mlx4_state;
3435 	int err = 0;
3436 
3437 	mutex_lock(&qp->mutex);
3438 
3439 	if (qp->state == IB_QPS_RESET) {
3440 		qp_attr->qp_state = IB_QPS_RESET;
3441 		goto done;
3442 	}
3443 
3444 	err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
3445 	if (err) {
3446 		err = -EINVAL;
3447 		goto out;
3448 	}
3449 
3450 	mlx4_state = be32_to_cpu(context.flags) >> 28;
3451 
3452 	qp->state		     = to_ib_qp_state(mlx4_state);
3453 	qp_attr->qp_state	     = qp->state;
3454 	qp_attr->path_mtu	     = context.mtu_msgmax >> 5;
3455 	qp_attr->path_mig_state	     =
3456 		to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
3457 	qp_attr->qkey		     = be32_to_cpu(context.qkey);
3458 	qp_attr->rq_psn		     = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff;
3459 	qp_attr->sq_psn		     = be32_to_cpu(context.next_send_psn) & 0xffffff;
3460 	qp_attr->dest_qp_num	     = be32_to_cpu(context.remote_qpn) & 0xffffff;
3461 	qp_attr->qp_access_flags     =
3462 		to_ib_qp_access_flags(be32_to_cpu(context.params2));
3463 
3464 	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3465 		to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
3466 		to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
3467 		qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
3468 		qp_attr->alt_port_num	= qp_attr->alt_ah_attr.port_num;
3469 	}
3470 
3471 	qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
3472 	if (qp_attr->qp_state == IB_QPS_INIT)
3473 		qp_attr->port_num = qp->port;
3474 	else
3475 		qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
3476 
3477 	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3478 	qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;
3479 
3480 	qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7);
3481 
3482 	qp_attr->max_dest_rd_atomic =
3483 		1 << ((be32_to_cpu(context.params2) >> 21) & 0x7);
3484 	qp_attr->min_rnr_timer	    =
3485 		(be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f;
3486 	qp_attr->timeout	    = context.pri_path.ackto >> 3;
3487 	qp_attr->retry_cnt	    = (be32_to_cpu(context.params1) >> 16) & 0x7;
3488 	qp_attr->rnr_retry	    = (be32_to_cpu(context.params1) >> 13) & 0x7;
3489 	qp_attr->alt_timeout	    = context.alt_path.ackto >> 3;
3490 
3491 done:
3492 	qp_attr->cur_qp_state	     = qp_attr->qp_state;
3493 	qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
3494 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
3495 
3496 	if (!ibqp->uobject) {
3497 		qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;
3498 		qp_attr->cap.max_send_sge = qp->sq.max_gs;
3499 	} else {
3500 		qp_attr->cap.max_send_wr  = 0;
3501 		qp_attr->cap.max_send_sge = 0;
3502 	}
3503 
3504 	/*
3505 	 * We don't support inline sends for kernel QPs (yet), and we
3506 	 * don't know what userspace's value should be.
3507 	 */
3508 	qp_attr->cap.max_inline_data = 0;
3509 
3510 	qp_init_attr->cap	     = qp_attr->cap;
3511 
3512 	qp_init_attr->create_flags = 0;
3513 	if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3514 		qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3515 
3516 	if (qp->flags & MLX4_IB_QP_LSO)
3517 		qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
3518 
3519 	if (qp->flags & MLX4_IB_QP_NETIF)
3520 		qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP;
3521 
3522 	qp_init_attr->sq_sig_type =
3523 		qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
3524 		IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3525 
3526 out:
3527 	mutex_unlock(&qp->mutex);
3528 	return err;
3529 }
3530 
3531