xref: /freebsd/sys/dev/bnxt/bnxt_re/ib_verbs.c (revision acd884de)
1 /*
2  * Copyright (c) 2015-2024, Broadcom. All rights reserved.  The term
3  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Description: IB Verbs interpreter
29  */
30 
31 #include <linux/if_ether.h>
32 #include <linux/etherdevice.h>
33 #include <rdma/uverbs_ioctl.h>
34 
35 #include "bnxt_re.h"
36 #include "ib_verbs.h"
37 
38 static inline
get_ib_umem_sgl(struct ib_umem * umem,u32 * nmap)39 struct scatterlist *get_ib_umem_sgl(struct ib_umem *umem, u32 *nmap)
40 {
41 
42 	*nmap = umem->nmap;
43 	return umem->sg_head.sgl;
44 }
45 
bnxt_re_peer_mem_release(struct ib_umem * umem)46 static inline void bnxt_re_peer_mem_release(struct ib_umem *umem)
47 {
48 	dev_dbg(NULL, "ib_umem_release getting invoked \n");
49 	ib_umem_release(umem);
50 }
51 
bnxt_re_resolve_dmac_task(struct work_struct * work)52 void bnxt_re_resolve_dmac_task(struct work_struct *work)
53 {
54 	int rc = -1;
55 	struct bnxt_re_dev *rdev;
56 	struct ib_ah_attr	*ah_attr;
57 	struct bnxt_re_resolve_dmac_work *dmac_work =
58 			container_of(work, struct bnxt_re_resolve_dmac_work, work);
59 
60 	rdev = dmac_work->rdev;
61 	ah_attr = dmac_work->ah_attr;
62 	rc = ib_resolve_eth_dmac(&rdev->ibdev, ah_attr);
63 	if (rc)
64 		dev_err(rdev_to_dev(dmac_work->rdev),
65 			"Failed to resolve dest mac rc = %d\n", rc);
66 	atomic_set(&dmac_work->status_wait, rc << 8);
67 }
68 
__from_ib_access_flags(int iflags)69 static int __from_ib_access_flags(int iflags)
70 {
71 	int qflags = 0;
72 
73 	if (iflags & IB_ACCESS_LOCAL_WRITE)
74 		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
75 	if (iflags & IB_ACCESS_REMOTE_READ)
76 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
77 	if (iflags & IB_ACCESS_REMOTE_WRITE)
78 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
79 	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
80 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
81 	if (iflags & IB_ACCESS_MW_BIND)
82 		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
83 	if (iflags & IB_ZERO_BASED)
84 		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
85 	if (iflags & IB_ACCESS_ON_DEMAND)
86 		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
87 	return qflags;
88 };
89 
__to_ib_access_flags(int qflags)90 static enum ib_access_flags __to_ib_access_flags(int qflags)
91 {
92 	enum ib_access_flags iflags = 0;
93 
94 	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
95 		iflags |= IB_ACCESS_LOCAL_WRITE;
96 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
97 		iflags |= IB_ACCESS_REMOTE_WRITE;
98 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
99 		iflags |= IB_ACCESS_REMOTE_READ;
100 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
101 		iflags |= IB_ACCESS_REMOTE_ATOMIC;
102 	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
103 		iflags |= IB_ACCESS_MW_BIND;
104 	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
105 		iflags |= IB_ZERO_BASED;
106 	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
107 		iflags |= IB_ACCESS_ON_DEMAND;
108 	return iflags;
109 };
110 
bnxt_re_copy_to_udata(struct bnxt_re_dev * rdev,void * data,int len,struct ib_udata * udata)111 static int bnxt_re_copy_to_udata(struct bnxt_re_dev *rdev, void *data,
112 				 int len, struct ib_udata *udata)
113 {
114 	int rc;
115 
116 	rc = ib_copy_to_udata(udata, data, len);
117 	if (rc)
118 		dev_err(rdev_to_dev(rdev),
119 			"ucontext copy failed from %ps rc %d\n",
120 			__builtin_return_address(0), rc);
121 
122 	return rc;
123 }
124 
bnxt_re_get_netdev(struct ib_device * ibdev,u8 port_num)125 struct ifnet *bnxt_re_get_netdev(struct ib_device *ibdev,
126 				 u8 port_num)
127 {
128 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
129 	struct ifnet *netdev = NULL;
130 
131 	rcu_read_lock();
132 
133 	if (!rdev || !rdev->netdev)
134 		goto end;
135 
136 	netdev = rdev->netdev;
137 
138 	/* In case of active-backup bond mode, return active slave */
139 	if (netdev)
140 		dev_hold(netdev);
141 
142 end:
143 	rcu_read_unlock();
144 	return netdev;
145 }
146 
bnxt_re_query_device(struct ib_device * ibdev,struct ib_device_attr * ib_attr,struct ib_udata * udata)147 int bnxt_re_query_device(struct ib_device *ibdev,
148 			 struct ib_device_attr *ib_attr,
149 			 struct ib_udata *udata)
150 {
151 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
152 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
153 
154 	memset(ib_attr, 0, sizeof(*ib_attr));
155 
156 	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, 4);
157 	bnxt_qplib_get_guid(rdev->dev_addr, (u8 *)&ib_attr->sys_image_guid);
158 	ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
159 	ib_attr->page_size_cap = dev_attr->page_size_cap;
160 	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
161 	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
162 	ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
163 	ib_attr->max_qp = dev_attr->max_qp;
164 	ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
165 	/*
166 	 * Read and set from the module param 'min_tx_depth'
167 	 * only once after the driver load
168 	 */
169 	if (rdev->min_tx_depth == 1 &&
170 	    min_tx_depth < dev_attr->max_qp_wqes)
171 		rdev->min_tx_depth = min_tx_depth;
172 	ib_attr->device_cap_flags =
173 				    IB_DEVICE_CURR_QP_STATE_MOD
174 				    | IB_DEVICE_RC_RNR_NAK_GEN
175 				    | IB_DEVICE_SHUTDOWN_PORT
176 				    | IB_DEVICE_SYS_IMAGE_GUID
177 				    | IB_DEVICE_LOCAL_DMA_LKEY
178 				    | IB_DEVICE_RESIZE_MAX_WR
179 				    | IB_DEVICE_PORT_ACTIVE_EVENT
180 				    | IB_DEVICE_N_NOTIFY_CQ
181 				    | IB_DEVICE_MEM_WINDOW
182 				    | IB_DEVICE_MEM_WINDOW_TYPE_2B
183 				    | IB_DEVICE_MEM_MGT_EXTENSIONS;
184 	ib_attr->max_send_sge = dev_attr->max_qp_sges;
185 	ib_attr->max_recv_sge = dev_attr->max_qp_sges;
186 	ib_attr->max_sge_rd = dev_attr->max_qp_sges;
187 	ib_attr->max_cq = dev_attr->max_cq;
188 	ib_attr->max_cqe = dev_attr->max_cq_wqes;
189 	ib_attr->max_mr = dev_attr->max_mr;
190 	ib_attr->max_pd = dev_attr->max_pd;
191 	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
192 	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
193 	if (dev_attr->is_atomic) {
194 		ib_attr->atomic_cap = IB_ATOMIC_GLOB;
195 		ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
196 	}
197 	ib_attr->max_ee_rd_atom = 0;
198 	ib_attr->max_res_rd_atom = 0;
199 	ib_attr->max_ee_init_rd_atom = 0;
200 	ib_attr->max_ee = 0;
201 	ib_attr->max_rdd = 0;
202 	ib_attr->max_mw = dev_attr->max_mw;
203 	ib_attr->max_raw_ipv6_qp = 0;
204 	ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
205 	ib_attr->max_mcast_grp = 0;
206 	ib_attr->max_mcast_qp_attach = 0;
207 	ib_attr->max_total_mcast_qp_attach = 0;
208 	ib_attr->max_ah = dev_attr->max_ah;
209 	ib_attr->max_srq = dev_attr->max_srq;
210 	ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
211 	ib_attr->max_srq_sge = dev_attr->max_srq_sges;
212 
213 	ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
214 	ib_attr->max_pkeys = 1;
215 	ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
216 	ib_attr->sig_prot_cap = 0;
217 	ib_attr->sig_guard_cap = 0;
218 	ib_attr->odp_caps.general_caps = 0;
219 
220 	return 0;
221 }
222 
bnxt_re_modify_device(struct ib_device * ibdev,int device_modify_mask,struct ib_device_modify * device_modify)223 int bnxt_re_modify_device(struct ib_device *ibdev,
224 			  int device_modify_mask,
225 			  struct ib_device_modify *device_modify)
226 {
227 	dev_dbg(rdev_to_dev(rdev), "Modify device with mask 0x%x\n",
228 		device_modify_mask);
229 
230 	switch (device_modify_mask) {
231 	case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
232 		/* Modify the GUID requires the modification of the GID table */
233 		/* GUID should be made as READ-ONLY */
234 		break;
235 	case IB_DEVICE_MODIFY_NODE_DESC:
236 		/* Node Desc should be made as READ-ONLY */
237 		break;
238 	default:
239 		break;
240 	}
241 	return 0;
242 }
243 
__to_ib_speed_width(u32 espeed,u8 * speed,u8 * width)244 static void __to_ib_speed_width(u32 espeed, u8 *speed, u8 *width)
245 {
246 	switch (espeed) {
247 	case SPEED_1000:
248 		*speed = IB_SPEED_SDR;
249 		*width = IB_WIDTH_1X;
250 		break;
251 	case SPEED_10000:
252 		*speed = IB_SPEED_QDR;
253 		*width = IB_WIDTH_1X;
254 		break;
255 	case SPEED_20000:
256 		*speed = IB_SPEED_DDR;
257 		*width = IB_WIDTH_4X;
258 		break;
259 	case SPEED_25000:
260 		*speed = IB_SPEED_EDR;
261 		*width = IB_WIDTH_1X;
262 		break;
263 	case SPEED_40000:
264 		*speed = IB_SPEED_QDR;
265 		*width = IB_WIDTH_4X;
266 		break;
267 	case SPEED_50000:
268 		*speed = IB_SPEED_EDR;
269 		*width = IB_WIDTH_2X;
270 		break;
271 	case SPEED_100000:
272 		*speed = IB_SPEED_EDR;
273 		*width = IB_WIDTH_4X;
274 		break;
275 	case SPEED_200000:
276 		*speed = IB_SPEED_HDR;
277 		*width = IB_WIDTH_4X;
278 		break;
279 	default:
280 		*speed = IB_SPEED_SDR;
281 		*width = IB_WIDTH_1X;
282 		break;
283 	}
284 }
285 
286 /* Port */
bnxt_re_query_port(struct ib_device * ibdev,u8 port_num,struct ib_port_attr * port_attr)287 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
288 		       struct ib_port_attr *port_attr)
289 {
290 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
291 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
292 	u8 active_speed = 0, active_width = 0;
293 
294 	dev_dbg(rdev_to_dev(rdev), "QUERY PORT with port_num 0x%x\n", port_num);
295 	memset(port_attr, 0, sizeof(*port_attr));
296 
297 	port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
298 	port_attr->state = bnxt_re_get_link_state(rdev);
299 	if (port_attr->state == IB_PORT_ACTIVE)
300 		port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
301 	port_attr->max_mtu = IB_MTU_4096;
302 	port_attr->active_mtu = iboe_get_mtu(rdev->netdev->if_mtu);
303 	port_attr->gid_tbl_len = dev_attr->max_sgid;
304 	port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
305 				    IB_PORT_DEVICE_MGMT_SUP |
306 				    IB_PORT_VENDOR_CLASS_SUP |
307 				    IB_PORT_IP_BASED_GIDS;
308 
309 	port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
310 	port_attr->bad_pkey_cntr = 0;
311 	port_attr->qkey_viol_cntr = 0;
312 	port_attr->pkey_tbl_len = dev_attr->max_pkey;
313 	port_attr->lid = 0;
314 	port_attr->sm_lid = 0;
315 	port_attr->lmc = 0;
316 	port_attr->max_vl_num = 4;
317 	port_attr->sm_sl = 0;
318 	port_attr->subnet_timeout = 0;
319 	port_attr->init_type_reply = 0;
320 	rdev->espeed = rdev->en_dev->espeed;
321 
322 	if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
323 		__to_ib_speed_width(rdev->espeed, &active_speed,
324 				    &active_width);
325 
326 	port_attr->active_speed = active_speed;
327 	port_attr->active_width = active_width;
328 
329 	return 0;
330 }
331 
bnxt_re_modify_port(struct ib_device * ibdev,u8 port_num,int port_modify_mask,struct ib_port_modify * port_modify)332 int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
333 			int port_modify_mask,
334 			struct ib_port_modify *port_modify)
335 {
336 	dev_dbg(rdev_to_dev(rdev), "Modify port with mask 0x%x\n",
337 		port_modify_mask);
338 
339 	switch (port_modify_mask) {
340 	case IB_PORT_SHUTDOWN:
341 		break;
342 	case IB_PORT_INIT_TYPE:
343 		break;
344 	case IB_PORT_RESET_QKEY_CNTR:
345 		break;
346 	default:
347 		break;
348 	}
349 	return 0;
350 }
351 
bnxt_re_get_port_immutable(struct ib_device * ibdev,u8 port_num,struct ib_port_immutable * immutable)352 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
353 			       struct ib_port_immutable *immutable)
354 {
355 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
356 	struct ib_port_attr port_attr;
357 
358 	if (bnxt_re_query_port(ibdev, port_num, &port_attr))
359 		return -EINVAL;
360 
361 	immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
362 	immutable->gid_tbl_len = port_attr.gid_tbl_len;
363 	if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV1_CAP)
364 		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
365 	else if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV2_CAP)
366 		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
367 	else
368 		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
369 					    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
370 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
371 	return 0;
372 }
373 
bnxt_re_compat_qfwstr(void)374 void bnxt_re_compat_qfwstr(void)
375 {
376 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
377 
378 	sprintf(str, "%d.%d.%d.%d", rdev->dev_attr->fw_ver[0],
379 		rdev->dev_attr->fw_ver[1], rdev->dev_attr->fw_ver[2],
380 		rdev->dev_attr->fw_ver[3]);
381 }
382 
bnxt_re_query_pkey(struct ib_device * ibdev,u8 port_num,u16 index,u16 * pkey)383 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
384 		       u16 index, u16 *pkey)
385 {
386 	if (index > 0)
387 		return -EINVAL;
388 
389 	*pkey = IB_DEFAULT_PKEY_FULL;
390 
391 	return 0;
392 }
393 
bnxt_re_query_gid(struct ib_device * ibdev,u8 port_num,int index,union ib_gid * gid)394 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
395 		      int index, union ib_gid *gid)
396 {
397 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
398 	int rc = 0;
399 
400 	/* Ignore port_num */
401 	memset(gid, 0, sizeof(*gid));
402 	rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
403 				 &rdev->qplib_res.sgid_tbl, index,
404 				 (struct bnxt_qplib_gid *)gid);
405 	return rc;
406 }
407 
bnxt_re_del_gid(struct ib_device * ibdev,u8 port_num,unsigned int index,void ** context)408 int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
409 		    unsigned int index, void **context)
410 {
411 	int rc = 0;
412 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
413 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
414 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
415 	struct bnxt_qplib_gid *gid_to_del;
416 	u16 vlan_id = 0xFFFF;
417 
418 	/* Delete the entry from the hardware */
419 	ctx = *context;
420 	if (!ctx) {
421 		dev_err(rdev_to_dev(rdev), "GID entry has no ctx?!\n");
422 		return -EINVAL;
423 	}
424 	if (sgid_tbl && sgid_tbl->active) {
425 		if (ctx->idx >= sgid_tbl->max) {
426 			dev_dbg(rdev_to_dev(rdev), "GID index out of range?!\n");
427 			return -EINVAL;
428 		}
429 		gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
430 		vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
431 		ctx->refcnt--;
432 		/* DEL_GID is called via WQ context(netdevice_event_work_handler)
433 		 * or via the ib_unregister_device path. In the former case QP1
434 		 * may not be destroyed yet, in which case just return as FW
435 		 * needs that entry to be present and will fail it's deletion.
436 		 * We could get invoked again after QP1 is destroyed OR get an
437 		 * ADD_GID call with a different GID value for the same index
438 		 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
439 		 */
440 		if (ctx->idx == 0 &&
441 		    rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
442 		    (rdev->gsi_ctx.gsi_sqp ||
443 		     rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)) {
444 			dev_dbg(rdev_to_dev(rdev),
445 				"Trying to delete GID0 while QP1 is alive\n");
446 			if (!ctx->refcnt) {
447 				rdev->gid_map[index] = -1;
448 				ctx_tbl = sgid_tbl->ctx;
449 				ctx_tbl[ctx->idx] = NULL;
450 				kfree(ctx);
451 			}
452 			return 0;
453 		}
454 		rdev->gid_map[index] = -1;
455 		if (!ctx->refcnt) {
456 			rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
457 						 vlan_id, true);
458 			if (!rc) {
459 				dev_dbg(rdev_to_dev(rdev), "GID remove success\n");
460 				ctx_tbl = sgid_tbl->ctx;
461 				ctx_tbl[ctx->idx] = NULL;
462 				kfree(ctx);
463 			} else {
464 				dev_err(rdev_to_dev(rdev),
465 					"Remove GID failed rc = 0x%x\n", rc);
466 			}
467 		}
468 	} else {
469 		dev_dbg(rdev_to_dev(rdev), "GID sgid_tbl does not exist!\n");
470 		return -EINVAL;
471 	}
472 	return rc;
473 }
474 
bnxt_re_add_gid(struct ib_device * ibdev,u8 port_num,unsigned int index,const union ib_gid * gid,const struct ib_gid_attr * attr,void ** context)475 int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
476 		    unsigned int index, const union ib_gid *gid,
477 		    const struct ib_gid_attr *attr, void **context)
478 {
479 	int rc;
480 	u32 tbl_idx = 0;
481 	u16 vlan_id = 0xFFFF;
482 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
483 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
484 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
485 	if ((attr->ndev) && is_vlan_dev(attr->ndev))
486 		vlan_id = vlan_dev_vlan_id(attr->ndev);
487 
488 	rc = bnxt_qplib_add_sgid(sgid_tbl, gid,
489 				 rdev->dev_addr,
490 				 vlan_id, true, &tbl_idx);
491 	if (rc == -EALREADY) {
492 		dev_dbg(rdev_to_dev(rdev), "GID %pI6 is already present\n", gid);
493 		ctx_tbl = sgid_tbl->ctx;
494 		if (!ctx_tbl[tbl_idx]) {
495 			ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
496 			if (!ctx)
497 				return -ENOMEM;
498 			ctx->idx = tbl_idx;
499 			ctx->refcnt = 1;
500 			ctx_tbl[tbl_idx] = ctx;
501 		} else {
502 			ctx_tbl[tbl_idx]->refcnt++;
503 		}
504 		*context = ctx_tbl[tbl_idx];
505 		/* tbl_idx is the HW table index and index is the stack index */
506 		rdev->gid_map[index] = tbl_idx;
507 		return 0;
508 	} else if (rc < 0) {
509 		dev_err(rdev_to_dev(rdev), "Add GID failed rc = 0x%x\n", rc);
510 		return rc;
511 	} else {
512 		ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
513 		if (!ctx) {
514 			dev_err(rdev_to_dev(rdev), "Add GID ctx failed\n");
515 			return -ENOMEM;
516 		}
517 		ctx_tbl = sgid_tbl->ctx;
518 		ctx->idx = tbl_idx;
519 		ctx->refcnt = 1;
520 		ctx_tbl[tbl_idx] = ctx;
521 		/* tbl_idx is the HW table index and index is the stack index */
522 		rdev->gid_map[index] = tbl_idx;
523 		*context = ctx;
524 	}
525 	return rc;
526 }
527 
bnxt_re_get_link_layer(struct ib_device * ibdev,u8 port_num)528 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
529 					    u8 port_num)
530 {
531 	return IB_LINK_LAYER_ETHERNET;
532 }
533 
bnxt_re_legacy_create_fence_wqe(struct bnxt_re_pd * pd)534 static void bnxt_re_legacy_create_fence_wqe(struct bnxt_re_pd *pd)
535 {
536 	struct bnxt_re_legacy_fence_data *fence = &pd->fence;
537 	struct ib_mr *ib_mr = &fence->mr->ib_mr;
538 	struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
539 	struct bnxt_re_dev *rdev = pd->rdev;
540 
541 	if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
542 		return;
543 
544 	memset(wqe, 0, sizeof(*wqe));
545 	wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
546 	wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
547 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
548 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
549 	wqe->bind.zero_based = false;
550 	wqe->bind.parent_l_key = ib_mr->lkey;
551 	wqe->bind.va = (u64)fence->va;
552 	wqe->bind.length = fence->size;
553 	wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
554 	wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
555 
556 	/* Save the initial rkey in fence structure for now;
557 	 * wqe->bind.r_key will be set at (re)bind time.
558 	 */
559 	fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
560 }
561 
bnxt_re_legacy_bind_fence_mw(struct bnxt_qplib_qp * qplib_qp)562 static int bnxt_re_legacy_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
563 {
564 	struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
565 					     qplib_qp);
566 	struct ib_pd *ib_pd = qp->ib_qp.pd;
567 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
568 	struct bnxt_re_legacy_fence_data *fence = &pd->fence;
569 	struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
570 	struct bnxt_qplib_swqe wqe;
571 	int rc;
572 
573 	/* TODO: Need SQ locking here when Fence WQE
574 	 * posting moves up into bnxt_re from bnxt_qplib.
575 	 */
576 	memcpy(&wqe, fence_wqe, sizeof(wqe));
577 	wqe.bind.r_key = fence->bind_rkey;
578 	fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
579 
580 	dev_dbg(rdev_to_dev(qp->rdev),
581 		"Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
582 		wqe.bind.r_key, qp->qplib_qp.id, pd);
583 	rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
584 	if (rc) {
585 		dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
586 		return rc;
587 	}
588 	bnxt_qplib_post_send_db(&qp->qplib_qp);
589 
590 	return rc;
591 }
592 
bnxt_re_legacy_create_fence_mr(struct bnxt_re_pd * pd)593 static int bnxt_re_legacy_create_fence_mr(struct bnxt_re_pd *pd)
594 {
595 	int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
596 	struct bnxt_re_legacy_fence_data *fence = &pd->fence;
597 	struct bnxt_re_dev *rdev = pd->rdev;
598 	struct bnxt_qplib_mrinfo mrinfo;
599 	struct bnxt_re_mr *mr = NULL;
600 	struct ib_mw *ib_mw = NULL;
601 	dma_addr_t dma_addr = 0;
602 	u32 max_mr_count;
603 	u64 pbl_tbl;
604 	int rc;
605 
606 	if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
607 		return 0;
608 
609 	memset(&mrinfo, 0, sizeof(mrinfo));
610 	/* Allocate a small chunk of memory and dma-map it */
611 	fence->va = kzalloc(BNXT_RE_LEGACY_FENCE_BYTES, GFP_KERNEL);
612 	if (!fence->va)
613 		return -ENOMEM;
614 	dma_addr = ib_dma_map_single(&rdev->ibdev, fence->va,
615 				     BNXT_RE_LEGACY_FENCE_BYTES,
616 				     DMA_BIDIRECTIONAL);
617 	rc = ib_dma_mapping_error(&rdev->ibdev, dma_addr);
618 	if (rc) {
619 		dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
620 		rc = -EIO;
621 		fence->dma_addr = 0;
622 		goto free_va;
623 	}
624 	fence->dma_addr = dma_addr;
625 
626 	/* Allocate a MR */
627 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
628 	if (!mr)
629 		goto free_dma_addr;
630 	fence->mr = mr;
631 	mr->rdev = rdev;
632 	mr->qplib_mr.pd = &pd->qplib_pd;
633 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
634 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
635 	if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) {
636 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
637 		if (rc) {
638 			dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
639 			goto free_mr;
640 		}
641 		/* Register MR */
642 		mr->ib_mr.lkey = mr->qplib_mr.lkey;
643 	}
644 	mr->qplib_mr.va         = (u64)fence->va;
645 	mr->qplib_mr.total_size = BNXT_RE_LEGACY_FENCE_BYTES;
646 	pbl_tbl = dma_addr;
647 
648 	mrinfo.mrw = &mr->qplib_mr;
649 	mrinfo.ptes = &pbl_tbl;
650 	mrinfo.sg.npages = BNXT_RE_LEGACY_FENCE_PBL_SIZE;
651 
652 	mrinfo.sg.nmap = 0;
653 	mrinfo.sg.sghead = 0;
654 	mrinfo.sg.pgshft = PAGE_SHIFT;
655 	mrinfo.sg.pgsize = PAGE_SIZE;
656 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
657 	if (rc) {
658 		dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
659 		goto free_mr;
660 	}
661 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
662 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
663 	atomic_inc(&rdev->stats.rsors.mr_count);
664 	max_mr_count =  atomic_read(&rdev->stats.rsors.mr_count);
665 	if (max_mr_count > (atomic_read(&rdev->stats.rsors.max_mr_count)))
666 		atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
667 
668 	ib_mw = bnxt_re_alloc_mw(&pd->ibpd, IB_MW_TYPE_1, NULL);
669 	/* Create a fence MW only for kernel consumers */
670 	if (!ib_mw) {
671 		dev_err(rdev_to_dev(rdev),
672 			"Failed to create fence-MW for PD: %p\n", pd);
673 		rc = -EINVAL;
674 		goto free_mr;
675 	}
676 	fence->mw = ib_mw;
677 
678 	bnxt_re_legacy_create_fence_wqe(pd);
679 	return 0;
680 
681 free_mr:
682 	if (mr->ib_mr.lkey) {
683 		bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
684 		atomic_dec(&rdev->stats.rsors.mr_count);
685 	}
686 	kfree(mr);
687 	fence->mr = NULL;
688 
689 free_dma_addr:
690 	ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr,
691 			    BNXT_RE_LEGACY_FENCE_BYTES, DMA_BIDIRECTIONAL);
692 	fence->dma_addr = 0;
693 
694 free_va:
695 	kfree(fence->va);
696 	fence->va = NULL;
697 	return rc;
698 }
699 
bnxt_re_legacy_destroy_fence_mr(struct bnxt_re_pd * pd)700 static void bnxt_re_legacy_destroy_fence_mr(struct bnxt_re_pd *pd)
701 {
702 	struct bnxt_re_legacy_fence_data *fence = &pd->fence;
703 	struct bnxt_re_dev *rdev = pd->rdev;
704 	struct bnxt_re_mr *mr = fence->mr;
705 
706 	if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
707 		return;
708 
709 	if (fence->mw) {
710 		bnxt_re_dealloc_mw(fence->mw);
711 		fence->mw = NULL;
712 	}
713 	if (mr) {
714 		if (mr->ib_mr.rkey)
715 			bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
716 					     false);
717 		if (mr->ib_mr.lkey)
718 			bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
719 		kfree(mr);
720 		fence->mr = NULL;
721 		atomic_dec(&rdev->stats.rsors.mr_count);
722 	}
723 	if (fence->dma_addr) {
724 		ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr,
725 				    BNXT_RE_LEGACY_FENCE_BYTES,
726 				    DMA_BIDIRECTIONAL);
727 		fence->dma_addr = 0;
728 	}
729 	kfree(fence->va);
730 	fence->va = NULL;
731 }
732 
733 
bnxt_re_get_user_dpi(struct bnxt_re_dev * rdev,struct bnxt_re_ucontext * cntx)734 static int bnxt_re_get_user_dpi(struct bnxt_re_dev *rdev,
735 				struct bnxt_re_ucontext *cntx)
736 {
737 	struct bnxt_qplib_chip_ctx *cctx = rdev->chip_ctx;
738 	int ret = 0;
739 	u8 type;
740 	/* Allocate DPI in alloc_pd or in create_cq to avoid failing of
741 	 * ibv_devinfo and family of application when DPIs are depleted.
742 	 */
743 	type = BNXT_QPLIB_DPI_TYPE_UC;
744 	ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->dpi, cntx, type);
745 	if (ret) {
746 		dev_err(rdev_to_dev(rdev), "Alloc doorbell page failed!\n");
747 		goto out;
748 	}
749 
750 	if (cctx->modes.db_push) {
751 		type = BNXT_QPLIB_DPI_TYPE_WC;
752 		ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->wcdpi,
753 					   cntx, type);
754 		if (ret)
755 			dev_err(rdev_to_dev(rdev), "push dp alloc failed\n");
756 	}
757 out:
758 	return ret;
759 }
760 
761 /* Protection Domains */
bnxt_re_dealloc_pd(struct ib_pd * ib_pd,struct ib_udata * udata)762 void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
763 {
764 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
765 	struct bnxt_re_dev *rdev = pd->rdev;
766 	int rc;
767 
768 	bnxt_re_legacy_destroy_fence_mr(pd);
769 
770 	rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
771 				   &rdev->qplib_res.pd_tbl,
772 				   &pd->qplib_pd);
773 	if (rc)
774 		dev_err_ratelimited(rdev_to_dev(rdev),
775 				    "%s failed rc = %d\n", __func__, rc);
776 	atomic_dec(&rdev->stats.rsors.pd_count);
777 
778 	return;
779 }
780 
bnxt_re_alloc_pd(struct ib_pd * pd_in,struct ib_udata * udata)781 int bnxt_re_alloc_pd(struct ib_pd *pd_in,
782 		     struct ib_udata *udata)
783 {
784 	struct ib_pd *ibpd = pd_in;
785 	struct ib_device *ibdev = ibpd->device;
786 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
787 	struct bnxt_re_ucontext *ucntx =
788 		rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
789 					  ibucontext);
790 	u32 max_pd_count;
791 	int rc;
792 	struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ibpd);
793 
794 	pd->rdev = rdev;
795 	if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
796 		dev_err(rdev_to_dev(rdev),
797 			"Allocate HW Protection Domain failed!\n");
798 		rc = -ENOMEM;
799 		goto fail;
800 	}
801 
802 	if (udata) {
803 		struct bnxt_re_pd_resp resp = {};
804 
805 		if (!ucntx->dpi.dbr) {
806 			rc = bnxt_re_get_user_dpi(rdev, ucntx);
807 			if (rc)
808 				goto dbfail;
809 		}
810 
811 		resp.pdid = pd->qplib_pd.id;
812 		/* Still allow mapping this DBR to the new user PD. */
813 		resp.dpi = ucntx->dpi.dpi;
814 		resp.dbr = (u64)ucntx->dpi.umdbr;
815 		/* Copy only on a valid wcpdi */
816 		if (ucntx->wcdpi.dpi) {
817 			resp.wcdpi = ucntx->wcdpi.dpi;
818 			resp.comp_mask = BNXT_RE_COMP_MASK_PD_HAS_WC_DPI;
819 		}
820 		if (rdev->dbr_pacing) {
821 			WARN_ON(!rdev->dbr_bar_addr);
822 			resp.dbr_bar_addr = (u64)rdev->dbr_bar_addr;
823 			resp.comp_mask |= BNXT_RE_COMP_MASK_PD_HAS_DBR_BAR_ADDR;
824 		}
825 
826 		rc = bnxt_re_copy_to_udata(rdev, &resp,
827 					   min(udata->outlen, sizeof(resp)),
828 					   udata);
829 		if (rc)
830 			goto dbfail;
831 	}
832 
833 	if (!udata)
834 		if (bnxt_re_legacy_create_fence_mr(pd))
835 			dev_warn(rdev_to_dev(rdev),
836 				 "Failed to create Fence-MR\n");
837 
838 	atomic_inc(&rdev->stats.rsors.pd_count);
839 	max_pd_count = atomic_read(&rdev->stats.rsors.pd_count);
840 	if (max_pd_count > atomic_read(&rdev->stats.rsors.max_pd_count))
841 		atomic_set(&rdev->stats.rsors.max_pd_count, max_pd_count);
842 
843 	return 0;
844 dbfail:
845 	(void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
846 				    &pd->qplib_pd);
847 fail:
848 	return rc;
849 }
850 
851 /* Address Handles */
bnxt_re_destroy_ah(struct ib_ah * ib_ah,u32 flags)852 void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
853 {
854 	struct bnxt_re_ah *ah = to_bnxt_re(ib_ah, struct bnxt_re_ah, ibah);
855 	struct bnxt_re_dev *rdev = ah->rdev;
856 	int rc = 0;
857 	bool block = true;
858 
859 	block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
860 
861 	rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
862 	if (rc)
863 		dev_err_ratelimited(rdev_to_dev(rdev),
864 				   "%s id = %d blocking %d failed rc = %d\n",
865 				    __func__, ah->qplib_ah.id, block, rc);
866 	atomic_dec(&rdev->stats.rsors.ah_count);
867 
868 	return;
869 }
870 
_to_bnxt_re_nw_type(enum rdma_network_type ntype)871 static u8 _to_bnxt_re_nw_type(enum rdma_network_type ntype)
872 {
873 	u8 nw_type;
874 	switch (ntype) {
875 		case RDMA_NETWORK_IPV4:
876 			nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
877 			break;
878 		case RDMA_NETWORK_IPV6:
879 			nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
880 			break;
881 		default:
882 			nw_type = CMDQ_CREATE_AH_TYPE_V1;
883 			break;
884 	}
885 	return nw_type;
886 }
887 
888 static inline int
bnxt_re_get_cached_gid(struct ib_device * dev,u8 port_num,int index,union ib_gid * sgid,struct ib_gid_attr ** sgid_attr,struct ib_global_route * grh,struct ib_ah * ah)889 bnxt_re_get_cached_gid(struct ib_device *dev, u8 port_num, int index,
890 		       union ib_gid *sgid, struct ib_gid_attr **sgid_attr,
891 		       struct ib_global_route *grh, struct ib_ah *ah)
892 {
893 	int ret = 0;
894 
895 	ret = ib_get_cached_gid(dev, port_num, index, sgid, *sgid_attr);
896 	return ret;
897 }
898 
899 static inline enum rdma_network_type
bnxt_re_gid_to_network_type(struct ib_gid_attr * sgid_attr,union ib_gid * sgid)900 bnxt_re_gid_to_network_type(struct ib_gid_attr *sgid_attr,
901 			    union ib_gid *sgid)
902 {
903 	return ib_gid_to_network_type(sgid_attr->gid_type, sgid);
904 }
905 
bnxt_re_get_ah_info(struct bnxt_re_dev * rdev,struct ib_ah_attr * ah_attr,struct bnxt_re_ah_info * ah_info)906 static int bnxt_re_get_ah_info(struct bnxt_re_dev *rdev,
907 			       struct ib_ah_attr *ah_attr,
908 			       struct bnxt_re_ah_info *ah_info)
909 {
910 	struct ib_gid_attr *gattr;
911 	enum rdma_network_type ib_ntype;
912 	u8 ntype;
913 	union ib_gid *gid;
914 	int rc = 0;
915 
916 	gid = &ah_info->sgid;
917 	gattr = &ah_info->sgid_attr;
918 
919 	rc = bnxt_re_get_cached_gid(&rdev->ibdev, 1, ah_attr->grh.sgid_index,
920 				    gid, &gattr, &ah_attr->grh, NULL);
921 	if (rc)
922 		return rc;
923 
924 	/* Get vlan tag */
925 	if (gattr->ndev) {
926 		if (is_vlan_dev(gattr->ndev))
927 			ah_info->vlan_tag = vlan_dev_vlan_id(gattr->ndev);
928 		if_rele(gattr->ndev);
929 	}
930 
931 	/* Get network header type for this GID */
932 
933 	ib_ntype = bnxt_re_gid_to_network_type(gattr, gid);
934 	ntype = _to_bnxt_re_nw_type(ib_ntype);
935 	ah_info->nw_type = ntype;
936 
937 	return rc;
938 }
939 
_get_sgid_index(struct bnxt_re_dev * rdev,u8 gindx)940 static u8 _get_sgid_index(struct bnxt_re_dev *rdev, u8 gindx)
941 {
942 	gindx = rdev->gid_map[gindx];
943 	return gindx;
944 }
945 
bnxt_re_init_dmac(struct bnxt_re_dev * rdev,struct ib_ah_attr * ah_attr,struct bnxt_re_ah_info * ah_info,bool is_user,struct bnxt_re_ah * ah)946 static int bnxt_re_init_dmac(struct bnxt_re_dev *rdev, struct ib_ah_attr *ah_attr,
947 			     struct bnxt_re_ah_info *ah_info, bool is_user,
948 			     struct bnxt_re_ah *ah)
949 {
950 	int rc = 0;
951 	u8 *dmac;
952 
953 	if (is_user && !rdma_is_multicast_addr((struct in6_addr *)
954 						ah_attr->grh.dgid.raw) &&
955 	    !rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
956 
957 		u32 retry_count = BNXT_RE_RESOLVE_RETRY_COUNT_US;
958 		struct bnxt_re_resolve_dmac_work *resolve_dmac_work;
959 
960 
961 		resolve_dmac_work = kzalloc(sizeof(*resolve_dmac_work), GFP_ATOMIC);
962 
963 		resolve_dmac_work->rdev = rdev;
964 		resolve_dmac_work->ah_attr = ah_attr;
965 		resolve_dmac_work->ah_info = ah_info;
966 
967 		atomic_set(&resolve_dmac_work->status_wait, 1);
968 		INIT_WORK(&resolve_dmac_work->work, bnxt_re_resolve_dmac_task);
969 		queue_work(rdev->resolve_wq, &resolve_dmac_work->work);
970 
971 		do {
972 			rc = atomic_read(&resolve_dmac_work->status_wait) & 0xFF;
973 			if (!rc)
974 				break;
975 			udelay(1);
976 		} while (--retry_count);
977 		if (atomic_read(&resolve_dmac_work->status_wait)) {
978 			INIT_LIST_HEAD(&resolve_dmac_work->list);
979 			list_add_tail(&resolve_dmac_work->list,
980 					&rdev->mac_wq_list);
981 			return -EFAULT;
982 		}
983 		kfree(resolve_dmac_work);
984 	}
985 	dmac = ROCE_DMAC(ah_attr);
986 	if (dmac)
987 		memcpy(ah->qplib_ah.dmac, dmac, ETH_ALEN);
988 	return rc;
989 }
990 
bnxt_re_create_ah(struct ib_ah * ah_in,struct ib_ah_attr * attr,u32 flags,struct ib_udata * udata)991 int bnxt_re_create_ah(struct ib_ah *ah_in, struct ib_ah_attr *attr,
992 		      u32 flags, struct ib_udata *udata)
993 {
994 
995 	struct ib_ah *ib_ah = ah_in;
996 	struct ib_pd *ib_pd = ib_ah->pd;
997 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ibah);
998 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ibpd);
999 	struct bnxt_re_dev *rdev = pd->rdev;
1000 	struct bnxt_re_ah_info ah_info;
1001 	u32 max_ah_count;
1002 	bool is_user;
1003 	int rc;
1004 	bool block = true;
1005 	struct ib_ah_attr *ah_attr = attr;
1006 	block = !(flags & RDMA_CREATE_AH_SLEEPABLE);
1007 
1008 	if (!(ah_attr->ah_flags & IB_AH_GRH))
1009 		dev_err(rdev_to_dev(rdev), "ah_attr->ah_flags GRH is not set\n");
1010 
1011 	ah->rdev = rdev;
1012 	ah->qplib_ah.pd = &pd->qplib_pd;
1013 	is_user = ib_pd->uobject ? true : false;
1014 
1015 	/* Supply the configuration for the HW */
1016 	memcpy(ah->qplib_ah.dgid.data, ah_attr->grh.dgid.raw,
1017 			sizeof(union ib_gid));
1018 	ah->qplib_ah.sgid_index = _get_sgid_index(rdev, ah_attr->grh.sgid_index);
1019 	if (ah->qplib_ah.sgid_index == 0xFF) {
1020 		dev_err(rdev_to_dev(rdev), "invalid sgid_index!\n");
1021 		rc = -EINVAL;
1022 		goto fail;
1023 	}
1024 	ah->qplib_ah.host_sgid_index = ah_attr->grh.sgid_index;
1025 	ah->qplib_ah.traffic_class = ah_attr->grh.traffic_class;
1026 	ah->qplib_ah.flow_label = ah_attr->grh.flow_label;
1027 	ah->qplib_ah.hop_limit = ah_attr->grh.hop_limit;
1028 	ah->qplib_ah.sl = ah_attr->sl;
1029 	rc = bnxt_re_get_ah_info(rdev, ah_attr, &ah_info);
1030 	if (rc)
1031 		goto fail;
1032 	ah->qplib_ah.nw_type = ah_info.nw_type;
1033 
1034 	rc = bnxt_re_init_dmac(rdev, ah_attr, &ah_info, is_user, ah);
1035 	if (rc)
1036 		goto fail;
1037 
1038 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, block);
1039 	if (rc) {
1040 		dev_err(rdev_to_dev(rdev),
1041 			"Allocate HW Address Handle failed!\n");
1042 		goto fail;
1043 	}
1044 
1045 	/* Write AVID to shared page. */
1046 	if (ib_pd->uobject) {
1047 		struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
1048 		struct bnxt_re_ucontext *uctx;
1049 		unsigned long flag;
1050 		u32 *wrptr;
1051 
1052 		uctx = to_bnxt_re(ib_uctx, struct bnxt_re_ucontext, ibucontext);
1053 		spin_lock_irqsave(&uctx->sh_lock, flag);
1054 		wrptr = (u32 *)((u8 *)uctx->shpg + BNXT_RE_AVID_OFFT);
1055 		*wrptr = ah->qplib_ah.id;
1056 		wmb(); /* make sure cache is updated. */
1057 		spin_unlock_irqrestore(&uctx->sh_lock, flag);
1058 	}
1059 	atomic_inc(&rdev->stats.rsors.ah_count);
1060 	max_ah_count = atomic_read(&rdev->stats.rsors.ah_count);
1061 	if (max_ah_count > atomic_read(&rdev->stats.rsors.max_ah_count))
1062 		atomic_set(&rdev->stats.rsors.max_ah_count, max_ah_count);
1063 
1064 	return 0;
1065 fail:
1066 	return rc;
1067 }
1068 
bnxt_re_modify_ah(struct ib_ah * ib_ah,struct ib_ah_attr * ah_attr)1069 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct ib_ah_attr *ah_attr)
1070 {
1071 	return 0;
1072 }
1073 
bnxt_re_query_ah(struct ib_ah * ib_ah,struct ib_ah_attr * ah_attr)1074 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct ib_ah_attr *ah_attr)
1075 {
1076 	struct bnxt_re_ah *ah = to_bnxt_re(ib_ah, struct bnxt_re_ah, ibah);
1077 
1078 	memcpy(ah_attr->grh.dgid.raw, ah->qplib_ah.dgid.data,
1079 	       sizeof(union ib_gid));
1080 	ah_attr->grh.sgid_index = ah->qplib_ah.host_sgid_index;
1081 	ah_attr->grh.traffic_class = ah->qplib_ah.traffic_class;
1082 	ah_attr->sl = ah->qplib_ah.sl;
1083 	memcpy(ROCE_DMAC(ah_attr), ah->qplib_ah.dmac, ETH_ALEN);
1084 	ah_attr->ah_flags = IB_AH_GRH;
1085 	ah_attr->port_num = 1;
1086 	ah_attr->static_rate = 0;
1087 
1088 	return 0;
1089 }
1090 
1091 /* Shared Receive Queues */
bnxt_re_destroy_srq(struct ib_srq * ib_srq,struct ib_udata * udata)1092 void bnxt_re_destroy_srq(struct ib_srq *ib_srq,
1093 			 struct ib_udata *udata)
1094 {
1095 	struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq, ibsrq);
1096 	struct bnxt_re_dev *rdev = srq->rdev;
1097 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1098 	int rc = 0;
1099 
1100 
1101 	rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1102 	if (rc)
1103 		dev_err_ratelimited(rdev_to_dev(rdev),
1104 				   "%s id = %d failed rc = %d\n",
1105 				    __func__, qplib_srq->id, rc);
1106 
1107 	if (srq->umem && !IS_ERR(srq->umem))
1108 		ib_umem_release(srq->umem);
1109 
1110 	atomic_dec(&rdev->stats.rsors.srq_count);
1111 
1112 	return;
1113 }
1114 
_max_rwqe_sz(int nsge)1115 static u16 _max_rwqe_sz(int nsge)
1116 {
1117 	return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
1118 }
1119 
bnxt_re_get_rwqe_size(struct bnxt_qplib_qp * qplqp,int rsge,int max)1120 static u16 bnxt_re_get_rwqe_size(struct bnxt_qplib_qp *qplqp,
1121 				 int rsge, int max)
1122 {
1123 	if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1124 		rsge = max;
1125 
1126 	return _max_rwqe_sz(rsge);
1127 }
1128 
1129 static inline
ib_umem_get_compat(struct bnxt_re_dev * rdev,struct ib_ucontext * ucontext,struct ib_udata * udata,unsigned long addr,size_t size,int access,int dmasync)1130 struct ib_umem *ib_umem_get_compat(struct bnxt_re_dev *rdev,
1131 				   struct ib_ucontext *ucontext,
1132 				   struct ib_udata *udata,
1133 				   unsigned long addr,
1134 				   size_t size, int access, int dmasync)
1135 {
1136 	return ib_umem_get(ucontext, addr, size, access, dmasync);
1137 }
1138 
1139 static inline
ib_umem_get_flags_compat(struct bnxt_re_dev * rdev,struct ib_ucontext * ucontext,struct ib_udata * udata,unsigned long addr,size_t size,int access,int dmasync)1140 struct ib_umem *ib_umem_get_flags_compat(struct bnxt_re_dev *rdev,
1141 					 struct ib_ucontext *ucontext,
1142 					 struct ib_udata *udata,
1143 					 unsigned long addr,
1144 					 size_t size, int access, int dmasync)
1145 {
1146 	return ib_umem_get_compat(rdev, ucontext, udata, addr, size,
1147 				  access, 0);
1148 }
1149 
ib_umem_num_pages_compat(struct ib_umem * umem)1150 static inline size_t ib_umem_num_pages_compat(struct ib_umem *umem)
1151 {
1152 	return ib_umem_num_pages(umem);
1153 }
1154 
bnxt_re_init_user_srq(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_srq * srq,struct ib_udata * udata)1155 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1156 				 struct bnxt_re_pd *pd,
1157 				 struct bnxt_re_srq *srq,
1158 				 struct ib_udata *udata)
1159 {
1160 	struct bnxt_qplib_sg_info *sginfo;
1161 	struct bnxt_qplib_srq *qplib_srq;
1162 	struct bnxt_re_ucontext *cntx;
1163 	struct ib_ucontext *context;
1164 	struct bnxt_re_srq_req ureq;
1165 	struct ib_umem *umem;
1166 	int rc, bytes = 0;
1167 
1168 	context = pd->ibpd.uobject->context;
1169 	cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
1170 	qplib_srq = &srq->qplib_srq;
1171 	sginfo = &qplib_srq->sginfo;
1172 
1173 	if (udata->inlen < sizeof(ureq))
1174 		dev_warn(rdev_to_dev(rdev),
1175 			 "Update the library ulen %d klen %d\n",
1176 			 (unsigned int)udata->inlen,
1177 			 (unsigned int)sizeof(ureq));
1178 
1179 	rc = ib_copy_from_udata(&ureq, udata,
1180 				min(udata->inlen, sizeof(ureq)));
1181 	if (rc)
1182 		return rc;
1183 
1184 	bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1185 	bytes = PAGE_ALIGN(bytes);
1186 	umem = ib_umem_get_compat(rdev, context, udata, ureq.srqva, bytes,
1187 				  IB_ACCESS_LOCAL_WRITE, 1);
1188 	if (IS_ERR(umem)) {
1189 		dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n",
1190 			__func__, PTR_ERR(umem));
1191 		return PTR_ERR(umem);
1192 	}
1193 
1194 	srq->umem = umem;
1195 	sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
1196 	sginfo->npages = ib_umem_num_pages_compat(umem);
1197 	qplib_srq->srq_handle = ureq.srq_handle;
1198 	qplib_srq->dpi = &cntx->dpi;
1199 	qplib_srq->is_user = true;
1200 
1201 	return 0;
1202 }
1203 
bnxt_re_create_srq(struct ib_srq * srq_in,struct ib_srq_init_attr * srq_init_attr,struct ib_udata * udata)1204 int bnxt_re_create_srq(struct ib_srq *srq_in, struct ib_srq_init_attr *srq_init_attr,
1205 		       struct ib_udata *udata)
1206 {
1207 	struct bnxt_qplib_dev_attr *dev_attr;
1208 	struct bnxt_re_ucontext *cntx = NULL;
1209 	struct ib_ucontext *context;
1210 	struct bnxt_re_dev *rdev;
1211 	struct bnxt_re_pd *pd;
1212 	int rc, entries;
1213 	struct ib_srq *ib_srq = srq_in;
1214 	struct ib_pd *ib_pd = ib_srq->pd;
1215 	struct bnxt_re_srq *srq =
1216 		container_of(ib_srq, struct bnxt_re_srq, ibsrq);
1217 	u32 max_srq_count;
1218 
1219 	pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
1220 	rdev = pd->rdev;
1221 	dev_attr = rdev->dev_attr;
1222 
1223 	if (rdev->mod_exit) {
1224 		dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
1225 		rc = -EIO;
1226 		goto exit;
1227 	}
1228 
1229 	if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1230 		dev_err(rdev_to_dev(rdev), "SRQ type not supported\n");
1231 		rc = -ENOTSUPP;
1232 		goto exit;
1233 	}
1234 
1235 	if (udata) {
1236 		context = pd->ibpd.uobject->context;
1237 		cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
1238 	}
1239 
1240 	if (atomic_read(&rdev->stats.rsors.srq_count) >= dev_attr->max_srq) {
1241 		dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQs)\n");
1242 		rc = -EINVAL;
1243 		goto exit;
1244 	}
1245 
1246 	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1247 		dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQ_WQs)\n");
1248 		rc = -EINVAL;
1249 		goto exit;
1250 	}
1251 
1252 	srq->rdev = rdev;
1253 	srq->qplib_srq.pd = &pd->qplib_pd;
1254 	srq->qplib_srq.dpi = &rdev->dpi_privileged;
1255 
1256 	/* Allocate 1 more than what's provided so posting max doesn't
1257 	   mean empty */
1258 	entries = srq_init_attr->attr.max_wr + 1;
1259 	entries = bnxt_re_init_depth(entries, cntx);
1260 	if (entries > dev_attr->max_srq_wqes + 1)
1261 		entries = dev_attr->max_srq_wqes + 1;
1262 
1263 	srq->qplib_srq.wqe_size = _max_rwqe_sz(6); /* 128 byte wqe size */
1264 	srq->qplib_srq.max_wqe = entries;
1265 	srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1266 	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1267 	srq->srq_limit = srq_init_attr->attr.srq_limit;
1268 	srq->qplib_srq.eventq_hw_ring_id = rdev->nqr.nq[0].ring_id;
1269 	srq->qplib_srq.sginfo.pgsize = PAGE_SIZE;
1270 	srq->qplib_srq.sginfo.pgshft = PAGE_SHIFT;
1271 
1272 	if (udata) {
1273 		rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1274 		if (rc)
1275 			goto fail;
1276 	}
1277 
1278 	rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1279 	if (rc) {
1280 		dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!\n");
1281 		goto fail;
1282 	}
1283 
1284 	if (udata) {
1285 		struct bnxt_re_srq_resp resp;
1286 
1287 		resp.srqid = srq->qplib_srq.id;
1288 		rc = bnxt_re_copy_to_udata(rdev, &resp,
1289 					   min(udata->outlen, sizeof(resp)),
1290 					   udata);
1291 		if (rc) {
1292 			bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq);
1293 			goto fail;
1294 		}
1295 	}
1296 	atomic_inc(&rdev->stats.rsors.srq_count);
1297 	max_srq_count = atomic_read(&rdev->stats.rsors.srq_count);
1298 	if (max_srq_count > atomic_read(&rdev->stats.rsors.max_srq_count))
1299 		atomic_set(&rdev->stats.rsors.max_srq_count, max_srq_count);
1300 	spin_lock_init(&srq->lock);
1301 
1302 	return 0;
1303 fail:
1304 	if (udata && srq->umem && !IS_ERR(srq->umem)) {
1305 		ib_umem_release(srq->umem);
1306 		srq->umem = NULL;
1307 	}
1308 exit:
1309 	return rc;
1310 }
1311 
bnxt_re_modify_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr,enum ib_srq_attr_mask srq_attr_mask,struct ib_udata * udata)1312 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1313 		       enum ib_srq_attr_mask srq_attr_mask,
1314 		       struct ib_udata *udata)
1315 {
1316 	struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq,
1317 					     ibsrq);
1318 	struct bnxt_re_dev *rdev = srq->rdev;
1319 	int rc;
1320 
1321 	switch (srq_attr_mask) {
1322 	case IB_SRQ_MAX_WR:
1323 		/* SRQ resize is not supported */
1324 		break;
1325 	case IB_SRQ_LIMIT:
1326 		/* Change the SRQ threshold */
1327 		if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1328 			return -EINVAL;
1329 
1330 		srq->qplib_srq.threshold = srq_attr->srq_limit;
1331 		rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1332 		if (rc) {
1333 			dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!\n");
1334 			return rc;
1335 		}
1336 		/* On success, update the shadow */
1337 		srq->srq_limit = srq_attr->srq_limit;
1338 
1339 		if (udata) {
1340 			/* Build and send response back to udata */
1341 			rc = bnxt_re_copy_to_udata(rdev, srq, 0, udata);
1342 			if (rc)
1343 				return rc;
1344 		}
1345 		break;
1346 	default:
1347 		dev_err(rdev_to_dev(rdev),
1348 			"Unsupported srq_attr_mask 0x%x\n", srq_attr_mask);
1349 		return -EINVAL;
1350 	}
1351 	return 0;
1352 }
1353 
bnxt_re_query_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr)1354 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1355 {
1356 	struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq,
1357 					     ibsrq);
1358 	struct bnxt_re_dev *rdev = srq->rdev;
1359 	int rc;
1360 
1361 	rc = bnxt_qplib_query_srq(&rdev->qplib_res, &srq->qplib_srq);
1362 	if (rc) {
1363 		dev_err(rdev_to_dev(rdev), "Query HW SRQ (0x%x) failed! rc = %d\n",
1364 			srq->qplib_srq.id, rc);
1365 		return rc;
1366 	}
1367 	srq_attr->max_wr = srq->qplib_srq.max_wqe;
1368 	srq_attr->max_sge = srq->qplib_srq.max_sge;
1369 	srq_attr->srq_limit = srq->qplib_srq.threshold;
1370 
1371 	return 0;
1372 }
1373 
bnxt_re_post_srq_recv(struct ib_srq * ib_srq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1374 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1375 			  const struct ib_recv_wr **bad_wr)
1376 {
1377 	struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq,
1378 					     ibsrq);
1379 	struct bnxt_qplib_swqe wqe = {};
1380 	unsigned long flags;
1381 	int rc = 0;
1382 
1383 	spin_lock_irqsave(&srq->lock, flags);
1384 	while (wr) {
1385 		/* Transcribe each ib_recv_wr to qplib_swqe */
1386 		wqe.num_sge = wr->num_sge;
1387 		wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
1388 		wqe.wr_id = wr->wr_id;
1389 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1390 		rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1391 		if (rc) {
1392 			*bad_wr = wr;
1393 			break;
1394 		}
1395 		wr = wr->next;
1396 	}
1397 	spin_unlock_irqrestore(&srq->lock, flags);
1398 
1399 	return rc;
1400 }
1401 
bnxt_re_lock_cqs(struct bnxt_re_qp * qp)1402 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
1403 {
1404 	unsigned long flags;
1405 
1406 	spin_lock_irqsave(&qp->scq->cq_lock, flags);
1407 	if (qp->rcq && qp->rcq != qp->scq)
1408 		spin_lock(&qp->rcq->cq_lock);
1409 
1410 	return flags;
1411 }
1412 
bnxt_re_unlock_cqs(struct bnxt_re_qp * qp,unsigned long flags)1413 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
1414 				  unsigned long flags)
1415 {
1416 	if (qp->rcq && qp->rcq != qp->scq)
1417 		spin_unlock(&qp->rcq->cq_lock);
1418 	spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
1419 }
1420 
1421 /* Queue Pairs */
bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp * qp)1422 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
1423 {
1424 	struct bnxt_re_qp *gsi_sqp;
1425 	struct bnxt_re_ah *gsi_sah;
1426 	struct bnxt_re_dev *rdev;
1427 	unsigned long flags;
1428 	int rc = 0;
1429 
1430 	rdev = qp->rdev;
1431 	gsi_sqp = rdev->gsi_ctx.gsi_sqp;
1432 	gsi_sah = rdev->gsi_ctx.gsi_sah;
1433 
1434 	/* remove from active qp list */
1435 	mutex_lock(&rdev->qp_lock);
1436 	list_del(&gsi_sqp->list);
1437 	mutex_unlock(&rdev->qp_lock);
1438 
1439 	if (gsi_sah) {
1440 		dev_dbg(rdev_to_dev(rdev), "Destroy the shadow AH\n");
1441 		rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah,
1442 					   true);
1443 		if (rc)
1444 			dev_err(rdev_to_dev(rdev),
1445 				"Destroy HW AH for shadow QP failed!\n");
1446 		atomic_dec(&rdev->stats.rsors.ah_count);
1447 	}
1448 
1449 	dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n");
1450 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1451 	if (rc)
1452 		dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed\n");
1453 
1454 	/* Clean the CQ for shadow QP completions */
1455 	flags = bnxt_re_lock_cqs(gsi_sqp);
1456 	bnxt_qplib_clean_qp(&gsi_sqp->qplib_qp);
1457 	bnxt_re_unlock_cqs(gsi_sqp, flags);
1458 
1459 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1460 	bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1461 	kfree(rdev->gsi_ctx.sqp_tbl);
1462 	kfree(gsi_sah);
1463 	kfree(gsi_sqp);
1464 	rdev->gsi_ctx.gsi_sqp = NULL;
1465 	rdev->gsi_ctx.gsi_sah = NULL;
1466 	rdev->gsi_ctx.sqp_tbl = NULL;
1467 	atomic_dec(&rdev->stats.rsors.qp_count);
1468 
1469 	return 0;
1470 }
1471 
bnxt_re_dump_debug_stats(struct bnxt_re_dev * rdev,u32 active_qps)1472 static void bnxt_re_dump_debug_stats(struct bnxt_re_dev *rdev, u32 active_qps)
1473 {
1474 	u32	total_qp = 0;
1475 	u64	avg_time = 0;
1476 	int	i;
1477 
1478 	if (!rdev->rcfw.sp_perf_stats_enabled)
1479 		return;
1480 
1481 	switch (active_qps) {
1482 	case 1:
1483 		/* Potential hint for Test Stop */
1484 		for (i = 0; i < RCFW_MAX_STAT_INDEX; i++) {
1485 			if (rdev->rcfw.qp_destroy_stats[i]) {
1486 				total_qp++;
1487 				avg_time += rdev->rcfw.qp_destroy_stats[i];
1488 			}
1489 		}
1490 		if (total_qp >= 0 || avg_time >= 0)
1491 			dev_dbg(rdev_to_dev(rdev),
1492 				"Perf Debug: %ps Total (%d) QP destroyed in (%ld) msec\n",
1493 				__builtin_return_address(0), total_qp,
1494 				(long)jiffies_to_msecs(avg_time));
1495 		break;
1496 	case 2:
1497 		/* Potential hint for Test Start */
1498 		dev_dbg(rdev_to_dev(rdev),
1499 			"Perf Debug: %ps active_qps = %d\n",
1500 			__builtin_return_address(0), active_qps);
1501 		break;
1502 	default:
1503 		/* Potential hint to know latency of QP destroy.
1504 		 * Average time taken for 1K QP Destroy.
1505 		 */
1506 		if (active_qps > 1024 && !(active_qps % 1024))
1507 			dev_dbg(rdev_to_dev(rdev),
1508 				"Perf Debug: %ps Active QP (%d) Watermark (%d)\n",
1509 				__builtin_return_address(0), active_qps,
1510 				atomic_read(&rdev->stats.rsors.max_qp_count));
1511 		break;
1512 	}
1513 }
1514 
bnxt_re_destroy_qp(struct ib_qp * ib_qp,struct ib_udata * udata)1515 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
1516 {
1517 	struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
1518 	struct bnxt_re_dev *rdev = qp->rdev;
1519 	unsigned long flags;
1520 	u32 active_qps;
1521 	int rc;
1522 
1523 	mutex_lock(&rdev->qp_lock);
1524 	list_del(&qp->list);
1525 	active_qps = atomic_dec_return(&rdev->stats.rsors.qp_count);
1526 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
1527 		atomic_dec(&rdev->stats.rsors.rc_qp_count);
1528 	else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
1529 		atomic_dec(&rdev->stats.rsors.ud_qp_count);
1530 	mutex_unlock(&rdev->qp_lock);
1531 
1532 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1533 	if (rc)
1534 		dev_err_ratelimited(rdev_to_dev(rdev),
1535 				   "%s id = %d failed rc = %d\n",
1536 				    __func__, qp->qplib_qp.id, rc);
1537 
1538 	if (!ib_qp->uobject) {
1539 		flags = bnxt_re_lock_cqs(qp);
1540 		bnxt_qplib_clean_qp(&qp->qplib_qp);
1541 		bnxt_re_unlock_cqs(qp, flags);
1542 	}
1543 
1544 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
1545 	if (ib_qp->qp_type == IB_QPT_GSI &&
1546 	    rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
1547 		if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL &&
1548 		    rdev->gsi_ctx.gsi_sqp) {
1549 			bnxt_re_destroy_gsi_sqp(qp);
1550 		}
1551 		bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &qp->qplib_qp);
1552 	}
1553 
1554 	if (qp->rumem && !IS_ERR(qp->rumem))
1555 		ib_umem_release(qp->rumem);
1556 	if (qp->sumem && !IS_ERR(qp->sumem))
1557 		ib_umem_release(qp->sumem);
1558 	kfree(qp);
1559 
1560 	bnxt_re_dump_debug_stats(rdev, active_qps);
1561 
1562 	return 0;
1563 }
1564 
__from_ib_qp_type(enum ib_qp_type type)1565 static u8 __from_ib_qp_type(enum ib_qp_type type)
1566 {
1567 	switch (type) {
1568 	case IB_QPT_GSI:
1569 		return CMDQ_CREATE_QP1_TYPE_GSI;
1570 	case IB_QPT_RC:
1571 		return CMDQ_CREATE_QP_TYPE_RC;
1572 	case IB_QPT_UD:
1573 		return CMDQ_CREATE_QP_TYPE_UD;
1574 	case IB_QPT_RAW_ETHERTYPE:
1575 		return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE;
1576 	default:
1577 		return IB_QPT_MAX;
1578 	}
1579 }
1580 
_get_swqe_sz(int nsge)1581 static u16 _get_swqe_sz(int nsge)
1582 {
1583 	return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1584 }
1585 
bnxt_re_get_swqe_size(int ilsize,int nsge)1586 static int bnxt_re_get_swqe_size(int ilsize, int nsge)
1587 {
1588 	u16 wqe_size, calc_ils;
1589 
1590 	wqe_size = _get_swqe_sz(nsge);
1591 	if (ilsize) {
1592 		calc_ils = (sizeof(struct sq_send_hdr) + ilsize);
1593 		wqe_size = max_t(int, calc_ils, wqe_size);
1594 		wqe_size = ALIGN(wqe_size, 32);
1595 	}
1596 	return wqe_size;
1597 }
1598 
bnxt_re_setup_swqe_size(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr)1599 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
1600 				   struct ib_qp_init_attr *init_attr)
1601 {
1602 	struct bnxt_qplib_dev_attr *dev_attr;
1603 	struct bnxt_qplib_qp *qplqp;
1604 	struct bnxt_re_dev *rdev;
1605 	struct bnxt_qplib_q *sq;
1606 	int align, ilsize;
1607 
1608 	rdev = qp->rdev;
1609 	qplqp = &qp->qplib_qp;
1610 	sq = &qplqp->sq;
1611 	dev_attr = rdev->dev_attr;
1612 
1613 	align = sizeof(struct sq_send_hdr);
1614 	ilsize = ALIGN(init_attr->cap.max_inline_data, align);
1615 
1616 	sq->wqe_size = bnxt_re_get_swqe_size(ilsize, sq->max_sge);
1617 	if (sq->wqe_size > _get_swqe_sz(dev_attr->max_qp_sges))
1618 		return -EINVAL;
1619 	/* For Cu/Wh and gen p5 backward compatibility mode
1620 	 * wqe size is fixed to 128 bytes
1621 	 */
1622 	if (sq->wqe_size < _get_swqe_sz(dev_attr->max_qp_sges) &&
1623 	    qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1624 		sq->wqe_size = _get_swqe_sz(dev_attr->max_qp_sges);
1625 
1626 	if (init_attr->cap.max_inline_data) {
1627 		qplqp->max_inline_data = sq->wqe_size -
1628 					 sizeof(struct sq_send_hdr);
1629 		init_attr->cap.max_inline_data = qplqp->max_inline_data;
1630 		if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1631 			sq->max_sge = qplqp->max_inline_data /
1632 				      sizeof(struct sq_sge);
1633 	}
1634 
1635 	return 0;
1636 }
1637 
bnxt_re_init_user_qp(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_qp * qp,struct ib_udata * udata)1638 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev,
1639 				struct bnxt_re_pd *pd, struct bnxt_re_qp *qp,
1640 				struct ib_udata *udata)
1641 {
1642 	struct bnxt_qplib_sg_info *sginfo;
1643 	struct bnxt_qplib_qp *qplib_qp;
1644 	struct bnxt_re_ucontext *cntx;
1645 	struct ib_ucontext *context;
1646 	struct bnxt_re_qp_req ureq;
1647 	struct ib_umem *umem;
1648 	int rc, bytes = 0;
1649 	int psn_nume;
1650 	int psn_sz;
1651 
1652 	qplib_qp = &qp->qplib_qp;
1653 	context = pd->ibpd.uobject->context;
1654 	cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
1655 	sginfo = &qplib_qp->sq.sginfo;
1656 
1657 	if (udata->inlen < sizeof(ureq))
1658 		dev_warn(rdev_to_dev(rdev),
1659 			 "Update the library ulen %d klen %d\n",
1660 			 (unsigned int)udata->inlen,
1661 			 (unsigned int)sizeof(ureq));
1662 
1663 	rc = ib_copy_from_udata(&ureq, udata,
1664 				min(udata->inlen, sizeof(ureq)));
1665 	if (rc)
1666 		return rc;
1667 
1668 	bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
1669 	/* Consider mapping PSN search memory only for RC QPs. */
1670 	if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1671 		psn_sz = _is_chip_gen_p5_p7(rdev->chip_ctx) ?
1672 				sizeof(struct sq_psn_search_ext) :
1673 				sizeof(struct sq_psn_search);
1674 		if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
1675 			psn_sz = sizeof(struct sq_msn_search);
1676 		psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1677 			    qplib_qp->sq.max_wqe :
1678 			    ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
1679 			     sizeof(struct bnxt_qplib_sge));
1680 		if (BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
1681 			psn_nume = roundup_pow_of_two(psn_nume);
1682 
1683 		bytes += (psn_nume * psn_sz);
1684 	}
1685 	bytes = PAGE_ALIGN(bytes);
1686 	umem = ib_umem_get_compat(rdev, context, udata, ureq.qpsva, bytes,
1687 				  IB_ACCESS_LOCAL_WRITE, 1);
1688 	if (IS_ERR(umem)) {
1689 		dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n",
1690 			__func__, PTR_ERR(umem));
1691 		return PTR_ERR(umem);
1692 	}
1693 
1694 	qp->sumem = umem;
1695 	/* pgsize and pgshft were initialize already. */
1696 	sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
1697 	sginfo->npages = ib_umem_num_pages_compat(umem);
1698 	qplib_qp->qp_handle = ureq.qp_handle;
1699 
1700 	if (!qp->qplib_qp.srq) {
1701 		sginfo = &qplib_qp->rq.sginfo;
1702 		bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
1703 		bytes = PAGE_ALIGN(bytes);
1704 		umem = ib_umem_get_compat(rdev,
1705 					  context, udata, ureq.qprva, bytes,
1706 					  IB_ACCESS_LOCAL_WRITE, 1);
1707 		if (IS_ERR(umem)) {
1708 			dev_err(rdev_to_dev(rdev),
1709 				"%s: ib_umem_get failed ret =%ld\n",
1710 				__func__, PTR_ERR(umem));
1711 			goto rqfail;
1712 		}
1713 		qp->rumem = umem;
1714 		/* pgsize and pgshft were initialize already. */
1715 		sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
1716 		sginfo->npages = ib_umem_num_pages_compat(umem);
1717 	}
1718 
1719 	qplib_qp->dpi = &cntx->dpi;
1720 	qplib_qp->is_user = true;
1721 
1722 	return 0;
1723 rqfail:
1724 	ib_umem_release(qp->sumem);
1725 	qp->sumem = NULL;
1726 	qplib_qp->sq.sginfo.sghead = NULL;
1727 	qplib_qp->sq.sginfo.nmap = 0;
1728 
1729 	return PTR_ERR(umem);
1730 }
1731 
bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1732 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd *pd,
1733 					       struct bnxt_qplib_res *qp1_res,
1734 					       struct bnxt_qplib_qp *qp1_qp)
1735 {
1736 	struct bnxt_re_dev *rdev = pd->rdev;
1737 	struct bnxt_re_ah *ah;
1738 	union ib_gid sgid;
1739 	int rc;
1740 
1741 	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
1742 	if (!ah) {
1743 		dev_err(rdev_to_dev(rdev), "Allocate Address Handle failed!\n");
1744 		return NULL;
1745 	}
1746 	memset(ah, 0, sizeof(*ah));
1747 	ah->rdev = rdev;
1748 	ah->qplib_ah.pd = &pd->qplib_pd;
1749 
1750 	rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1751 	if (rc)
1752 		goto fail;
1753 
1754 	/* supply the dgid data same as sgid */
1755 	memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1756 	       sizeof(union ib_gid));
1757 	ah->qplib_ah.sgid_index = 0;
1758 
1759 	ah->qplib_ah.traffic_class = 0;
1760 	ah->qplib_ah.flow_label = 0;
1761 	ah->qplib_ah.hop_limit = 1;
1762 	ah->qplib_ah.sl = 0;
1763 	/* Have DMAC same as SMAC */
1764 	ether_addr_copy(ah->qplib_ah.dmac, rdev->dev_addr);
1765 	dev_dbg(rdev_to_dev(rdev), "ah->qplib_ah.dmac = %x:%x:%x:%x:%x:%x\n",
1766 		ah->qplib_ah.dmac[0], ah->qplib_ah.dmac[1], ah->qplib_ah.dmac[2],
1767 		ah->qplib_ah.dmac[3], ah->qplib_ah.dmac[4], ah->qplib_ah.dmac[5]);
1768 
1769 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, true);
1770 	if (rc) {
1771 		dev_err(rdev_to_dev(rdev),
1772 			"Allocate HW AH for Shadow QP failed!\n");
1773 		goto fail;
1774 	}
1775 	dev_dbg(rdev_to_dev(rdev), "AH ID = %d\n", ah->qplib_ah.id);
1776 	atomic_inc(&rdev->stats.rsors.ah_count);
1777 
1778 	return ah;
1779 fail:
1780 	kfree(ah);
1781 	return NULL;
1782 }
1783 
bnxt_re_update_shadow_ah(struct bnxt_re_dev * rdev)1784 void bnxt_re_update_shadow_ah(struct bnxt_re_dev *rdev)
1785 {
1786 	struct bnxt_re_qp *gsi_qp;
1787 	struct bnxt_re_ah *sah;
1788 	struct bnxt_re_pd *pd;
1789 	struct ib_pd *ib_pd;
1790 	int rc;
1791 
1792 	if (!rdev)
1793 		return;
1794 
1795 	sah = rdev->gsi_ctx.gsi_sah;
1796 
1797 	dev_dbg(rdev_to_dev(rdev), "Updating the AH\n");
1798 	if (sah) {
1799 		/* Check if the AH created with current mac address */
1800 		if (!compare_ether_header(sah->qplib_ah.dmac, rdev->dev_addr)) {
1801 			dev_dbg(rdev_to_dev(rdev),
1802 				"Not modifying shadow AH during AH update\n");
1803 			return;
1804 		}
1805 
1806 		gsi_qp = rdev->gsi_ctx.gsi_qp;
1807 		ib_pd = gsi_qp->ib_qp.pd;
1808 		pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
1809 		rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
1810 					   &sah->qplib_ah, false);
1811 		if (rc) {
1812 			dev_err(rdev_to_dev(rdev),
1813 				"Failed to destroy shadow AH during AH update\n");
1814 			return;
1815 		}
1816 		atomic_dec(&rdev->stats.rsors.ah_count);
1817 		kfree(sah);
1818 		rdev->gsi_ctx.gsi_sah = NULL;
1819 
1820 		sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1821 						  &gsi_qp->qplib_qp);
1822 		if (!sah) {
1823 			dev_err(rdev_to_dev(rdev),
1824 				"Failed to update AH for ShadowQP\n");
1825 			return;
1826 		}
1827 		rdev->gsi_ctx.gsi_sah = sah;
1828 		atomic_inc(&rdev->stats.rsors.ah_count);
1829 	}
1830 }
1831 
bnxt_re_create_shadow_qp(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1832 static struct bnxt_re_qp *bnxt_re_create_shadow_qp(struct bnxt_re_pd *pd,
1833 					    struct bnxt_qplib_res *qp1_res,
1834 					    struct bnxt_qplib_qp *qp1_qp)
1835 {
1836 	struct bnxt_re_dev *rdev = pd->rdev;
1837 	struct bnxt_re_qp *qp;
1838 	int rc;
1839 
1840 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1841 	if (!qp) {
1842 		dev_err(rdev_to_dev(rdev), "Allocate internal UD QP failed!\n");
1843 		return NULL;
1844 	}
1845 	memset(qp, 0, sizeof(*qp));
1846 	qp->rdev = rdev;
1847 
1848 	/* Initialize the shadow QP structure from the QP1 values */
1849 	ether_addr_copy(qp->qplib_qp.smac, rdev->dev_addr);
1850 	qp->qplib_qp.pd = &pd->qplib_pd;
1851 	qp->qplib_qp.qp_handle = (u64)&qp->qplib_qp;
1852 	qp->qplib_qp.type = IB_QPT_UD;
1853 
1854 	qp->qplib_qp.max_inline_data = 0;
1855 	qp->qplib_qp.sig_type = true;
1856 
1857 	/* Shadow QP SQ depth should be same as QP1 RQ depth */
1858 	qp->qplib_qp.sq.wqe_size = bnxt_re_get_swqe_size(0, 6);
1859 	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1860 	qp->qplib_qp.sq.max_sge = 2;
1861 	/* Q full delta can be 1 since it is internal QP */
1862 	qp->qplib_qp.sq.q_full_delta = 1;
1863 	qp->qplib_qp.sq.sginfo.pgsize = PAGE_SIZE;
1864 	qp->qplib_qp.sq.sginfo.pgshft = PAGE_SHIFT;
1865 
1866 	qp->qplib_qp.scq = qp1_qp->scq;
1867 	qp->qplib_qp.rcq = qp1_qp->rcq;
1868 
1869 	qp->qplib_qp.rq.wqe_size = _max_rwqe_sz(6); /* 128 Byte wqe size */
1870 	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1871 	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1872 	qp->qplib_qp.rq.sginfo.pgsize = PAGE_SIZE;
1873 	qp->qplib_qp.rq.sginfo.pgshft = PAGE_SHIFT;
1874 	/* Q full delta can be 1 since it is internal QP */
1875 	qp->qplib_qp.rq.q_full_delta = 1;
1876 	qp->qplib_qp.mtu = qp1_qp->mtu;
1877 	qp->qplib_qp.dpi = &rdev->dpi_privileged;
1878 
1879 	rc = bnxt_qplib_alloc_hdr_buf(qp1_res, &qp->qplib_qp, 0,
1880 				      BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6);
1881 	if (rc)
1882 		goto fail;
1883 
1884 	rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1885 	if (rc) {
1886 		dev_err(rdev_to_dev(rdev), "create HW QP failed!\n");
1887 		goto qp_fail;
1888 	}
1889 
1890 	dev_dbg(rdev_to_dev(rdev), "Created shadow QP with ID = %d\n",
1891 		qp->qplib_qp.id);
1892 	spin_lock_init(&qp->sq_lock);
1893 	INIT_LIST_HEAD(&qp->list);
1894 	mutex_lock(&rdev->qp_lock);
1895 	list_add_tail(&qp->list, &rdev->qp_list);
1896 	atomic_inc(&rdev->stats.rsors.qp_count);
1897 	mutex_unlock(&rdev->qp_lock);
1898 	return qp;
1899 qp_fail:
1900 	bnxt_qplib_free_hdr_buf(qp1_res, &qp->qplib_qp);
1901 fail:
1902 	kfree(qp);
1903 	return NULL;
1904 }
1905 
bnxt_re_init_rq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,void * cntx)1906 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1907 				struct ib_qp_init_attr *init_attr, void *cntx)
1908 {
1909 	struct bnxt_qplib_dev_attr *dev_attr;
1910 	struct bnxt_qplib_qp *qplqp;
1911 	struct bnxt_re_dev *rdev;
1912 	struct bnxt_qplib_q *rq;
1913 	int entries;
1914 
1915 	rdev = qp->rdev;
1916 	qplqp = &qp->qplib_qp;
1917 	rq = &qplqp->rq;
1918 	dev_attr = rdev->dev_attr;
1919 
1920 	if (init_attr->srq) {
1921 		struct bnxt_re_srq *srq;
1922 
1923 		srq = to_bnxt_re(init_attr->srq, struct bnxt_re_srq, ibsrq);
1924 		if (!srq) {
1925 			dev_err(rdev_to_dev(rdev), "SRQ not found\n");
1926 			return -EINVAL;
1927 		}
1928 		qplqp->srq = &srq->qplib_srq;
1929 		rq->max_wqe = 0;
1930 	} else {
1931 		rq->max_sge = init_attr->cap.max_recv_sge;
1932 		if (rq->max_sge > dev_attr->max_qp_sges)
1933 			rq->max_sge = dev_attr->max_qp_sges;
1934 		init_attr->cap.max_recv_sge = rq->max_sge;
1935 		rq->wqe_size = bnxt_re_get_rwqe_size(qplqp, rq->max_sge,
1936 						     dev_attr->max_qp_sges);
1937 
1938 		/* Allocate 1 more than what's provided so posting max doesn't
1939 		   mean empty */
1940 		entries = init_attr->cap.max_recv_wr + 1;
1941 		entries = bnxt_re_init_depth(entries, cntx);
1942 		rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1943 		rq->q_full_delta = 0;
1944 		rq->sginfo.pgsize = PAGE_SIZE;
1945 		rq->sginfo.pgshft = PAGE_SHIFT;
1946 	}
1947 
1948 	return 0;
1949 }
1950 
bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp * qp)1951 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1952 {
1953 	struct bnxt_qplib_dev_attr *dev_attr;
1954 	struct bnxt_qplib_qp *qplqp;
1955 	struct bnxt_re_dev *rdev;
1956 
1957 	rdev = qp->rdev;
1958 	qplqp = &qp->qplib_qp;
1959 	dev_attr = rdev->dev_attr;
1960 
1961 	if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD)
1962 		qplqp->rq.max_sge = dev_attr->max_qp_sges;
1963 }
1964 
bnxt_re_init_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,void * cntx)1965 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1966 				struct ib_qp_init_attr *init_attr,
1967 				void *cntx)
1968 {
1969 	struct bnxt_qplib_dev_attr *dev_attr;
1970 	struct bnxt_qplib_qp *qplqp;
1971 	struct bnxt_re_dev *rdev;
1972 	struct bnxt_qplib_q *sq;
1973 	int diff = 0;
1974 	int entries;
1975 	int rc;
1976 
1977 	rdev = qp->rdev;
1978 	qplqp = &qp->qplib_qp;
1979 	sq = &qplqp->sq;
1980 	dev_attr = rdev->dev_attr;
1981 
1982 	sq->max_sge = init_attr->cap.max_send_sge;
1983 	if (sq->max_sge > dev_attr->max_qp_sges) {
1984 		sq->max_sge = dev_attr->max_qp_sges;
1985 		init_attr->cap.max_send_sge = sq->max_sge;
1986 	}
1987 	rc = bnxt_re_setup_swqe_size(qp, init_attr);
1988 	if (rc)
1989 		return rc;
1990 	/*
1991 	 * Change the SQ depth if user has requested minimum using
1992 	 * configfs. Only supported for kernel consumers. Setting
1993 	 * min_tx_depth to 4096 to handle iser SQ full condition
1994 	 * in most of the newer OS distros
1995 	 */
1996 	entries = init_attr->cap.max_send_wr;
1997 	if (!cntx && rdev->min_tx_depth && init_attr->qp_type != IB_QPT_GSI) {
1998 		/*
1999 		 * If users specify any value greater than 1 use min_tx_depth
2000 		 * provided by user for comparison. Else, compare it with the
2001 		 * BNXT_RE_MIN_KERNEL_QP_TX_DEPTH and adjust it accordingly.
2002 		 */
2003 		if (rdev->min_tx_depth > 1 && entries < rdev->min_tx_depth)
2004 			entries = rdev->min_tx_depth;
2005 		else if (entries < BNXT_RE_MIN_KERNEL_QP_TX_DEPTH)
2006 			entries = BNXT_RE_MIN_KERNEL_QP_TX_DEPTH;
2007 	}
2008 	diff = bnxt_re_get_diff(cntx, rdev->chip_ctx);
2009 	entries = bnxt_re_init_depth(entries + diff + 1, cntx);
2010 	sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
2011 	sq->q_full_delta = diff + 1;
2012 	/*
2013 	 * Reserving one slot for Phantom WQE. Application can
2014 	 * post one extra entry in this case. But allowing this to avoid
2015 	 * unexpected Queue full condition
2016 	 */
2017 	sq->q_full_delta -= 1; /* becomes 0 for gen-p5 */
2018 	sq->sginfo.pgsize = PAGE_SIZE;
2019 	sq->sginfo.pgshft = PAGE_SHIFT;
2020 	return 0;
2021 }
2022 
bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,void * cntx)2023 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
2024 				       struct ib_qp_init_attr *init_attr,
2025 				       void *cntx)
2026 {
2027 	struct bnxt_qplib_dev_attr *dev_attr;
2028 	struct bnxt_qplib_qp *qplqp;
2029 	struct bnxt_re_dev *rdev;
2030 	int entries;
2031 
2032 	rdev = qp->rdev;
2033 	qplqp = &qp->qplib_qp;
2034 	dev_attr = rdev->dev_attr;
2035 
2036 	if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
2037 		entries = init_attr->cap.max_send_wr + 1;
2038 		entries = bnxt_re_init_depth(entries, cntx);
2039 		qplqp->sq.max_wqe = min_t(u32, entries,
2040 					  dev_attr->max_qp_wqes + 1);
2041 		qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
2042 					 init_attr->cap.max_send_wr;
2043 		qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
2044 		if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
2045 			qplqp->sq.max_sge = dev_attr->max_qp_sges;
2046 	}
2047 }
2048 
bnxt_re_init_qp_type(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr)2049 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
2050 				struct ib_qp_init_attr *init_attr)
2051 {
2052 	struct bnxt_qplib_chip_ctx *chip_ctx;
2053 	struct bnxt_re_gsi_context *gsi_ctx;
2054 	int qptype;
2055 
2056 	chip_ctx = rdev->chip_ctx;
2057 	gsi_ctx = &rdev->gsi_ctx;
2058 
2059 	qptype = __from_ib_qp_type(init_attr->qp_type);
2060 	if (qptype == IB_QPT_MAX) {
2061 		dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported\n",
2062 			qptype);
2063 		qptype = -EINVAL;
2064 		goto out;
2065 	}
2066 
2067 	if (_is_chip_gen_p5_p7(chip_ctx) && init_attr->qp_type == IB_QPT_GSI) {
2068 		/* For Thor always force UD mode. */
2069 		qptype = CMDQ_CREATE_QP_TYPE_GSI;
2070 		gsi_ctx->gsi_qp_mode = BNXT_RE_GSI_MODE_UD;
2071 	}
2072 out:
2073 	return qptype;
2074 }
2075 
bnxt_re_init_qp_wqe_mode(struct bnxt_re_dev * rdev)2076 static int bnxt_re_init_qp_wqe_mode(struct bnxt_re_dev *rdev)
2077 {
2078 	return rdev->chip_ctx->modes.wqe_mode;
2079 }
2080 
bnxt_re_init_qp_attr(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)2081 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
2082 				struct ib_qp_init_attr *init_attr,
2083 				struct ib_udata *udata)
2084 {
2085 	struct bnxt_qplib_dev_attr *dev_attr;
2086 	struct bnxt_re_ucontext *cntx = NULL;
2087 	struct ib_ucontext *context;
2088 	struct bnxt_qplib_qp *qplqp;
2089 	struct bnxt_re_dev *rdev;
2090 	struct bnxt_re_cq *cq;
2091 	int rc = 0, qptype;
2092 
2093 	rdev = qp->rdev;
2094 	qplqp = &qp->qplib_qp;
2095 	dev_attr = rdev->dev_attr;
2096 
2097 	if (udata) {
2098 		context = pd->ibpd.uobject->context;
2099 		cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
2100 	}
2101 
2102 	/* Setup misc params */
2103 	qplqp->is_user = false;
2104 	qplqp->pd = &pd->qplib_pd;
2105 	qplqp->qp_handle = (u64)qplqp;
2106 	qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
2107 			    true : false);
2108 	qptype = bnxt_re_init_qp_type(rdev, init_attr);
2109 	if (qptype < 0) {
2110 		rc = qptype;
2111 		goto out;
2112 	}
2113 	qplqp->type = (u8)qptype;
2114 	qplqp->wqe_mode = bnxt_re_init_qp_wqe_mode(rdev);
2115 	ether_addr_copy(qplqp->smac, rdev->dev_addr);
2116 
2117 	if (init_attr->qp_type == IB_QPT_RC) {
2118 		qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
2119 		qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
2120 	}
2121 	qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->if_mtu));
2122 	qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
2123 	if (init_attr->create_flags) {
2124 		dev_dbg(rdev_to_dev(rdev),
2125 			"QP create flags 0x%x not supported\n",
2126 			init_attr->create_flags);
2127 		return -EOPNOTSUPP;
2128 	}
2129 
2130 	/* Setup CQs */
2131 	if (init_attr->send_cq) {
2132 		cq = to_bnxt_re(init_attr->send_cq, struct bnxt_re_cq, ibcq);
2133 		if (!cq) {
2134 			dev_err(rdev_to_dev(rdev), "Send CQ not found\n");
2135 			rc = -EINVAL;
2136 			goto out;
2137 		}
2138 		qplqp->scq = &cq->qplib_cq;
2139 		qp->scq = cq;
2140 	}
2141 
2142 	if (init_attr->recv_cq) {
2143 		cq = to_bnxt_re(init_attr->recv_cq, struct bnxt_re_cq, ibcq);
2144 		if (!cq) {
2145 			dev_err(rdev_to_dev(rdev), "Receive CQ not found\n");
2146 			rc = -EINVAL;
2147 			goto out;
2148 		}
2149 		qplqp->rcq = &cq->qplib_cq;
2150 		qp->rcq = cq;
2151 	}
2152 
2153 	/* Setup RQ/SRQ */
2154 	rc = bnxt_re_init_rq_attr(qp, init_attr, cntx);
2155 	if (rc)
2156 		goto out;
2157 	if (init_attr->qp_type == IB_QPT_GSI)
2158 		bnxt_re_adjust_gsi_rq_attr(qp);
2159 
2160 	/* Setup SQ */
2161 	rc = bnxt_re_init_sq_attr(qp, init_attr, cntx);
2162 	if (rc)
2163 		goto out;
2164 	if (init_attr->qp_type == IB_QPT_GSI)
2165 		bnxt_re_adjust_gsi_sq_attr(qp, init_attr, cntx);
2166 
2167 	if (udata) /* This will update DPI and qp_handle */
2168 		rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
2169 out:
2170 	return rc;
2171 }
2172 
bnxt_re_create_shadow_gsi(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd)2173 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
2174 				     struct bnxt_re_pd *pd)
2175 {
2176 	struct bnxt_re_sqp_entries *sqp_tbl = NULL;
2177 	struct bnxt_re_dev *rdev;
2178 	struct bnxt_re_qp *sqp;
2179 	struct bnxt_re_ah *sah;
2180 	int rc = 0;
2181 
2182 	rdev = qp->rdev;
2183 	/* Create a shadow QP to handle the QP1 traffic */
2184 	sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
2185 			  GFP_KERNEL);
2186 	if (!sqp_tbl)
2187 		return -ENOMEM;
2188 	rdev->gsi_ctx.sqp_tbl = sqp_tbl;
2189 
2190 	sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
2191 	if (!sqp) {
2192 		rc = -ENODEV;
2193 		dev_err(rdev_to_dev(rdev),
2194 			"Failed to create Shadow QP for QP1\n");
2195 		goto out;
2196 	}
2197 	rdev->gsi_ctx.gsi_sqp = sqp;
2198 
2199 	sqp->rcq = qp->rcq;
2200 	sqp->scq = qp->scq;
2201 	sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
2202 			&qp->qplib_qp);
2203 	if (!sah) {
2204 		bnxt_qplib_destroy_qp(&rdev->qplib_res,
2205 				&sqp->qplib_qp);
2206 		rc = -ENODEV;
2207 		dev_err(rdev_to_dev(rdev),
2208 				"Failed to create AH entry for ShadowQP\n");
2209 		goto out;
2210 	}
2211 	rdev->gsi_ctx.gsi_sah = sah;
2212 
2213 	return 0;
2214 out:
2215 	kfree(sqp_tbl);
2216 	return rc;
2217 }
2218 
__get_rq_hdr_buf_size(u8 gsi_mode)2219 static int __get_rq_hdr_buf_size(u8 gsi_mode)
2220 {
2221 	return (gsi_mode == BNXT_RE_GSI_MODE_ALL) ?
2222 		BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 :
2223 		BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE;
2224 }
2225 
__get_sq_hdr_buf_size(u8 gsi_mode)2226 static int __get_sq_hdr_buf_size(u8 gsi_mode)
2227 {
2228 	return (gsi_mode != BNXT_RE_GSI_MODE_ROCE_V1) ?
2229 		BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 :
2230 		BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE;
2231 }
2232 
bnxt_re_create_gsi_qp(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd)2233 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd)
2234 {
2235 	struct bnxt_qplib_qp *qplqp;
2236 	struct bnxt_qplib_res *res;
2237 	struct bnxt_re_dev *rdev;
2238 	u32 sstep, rstep;
2239 	u8 gsi_mode;
2240 	int rc = 0;
2241 
2242 	rdev = qp->rdev;
2243 	qplqp = &qp->qplib_qp;
2244 	res = &rdev->qplib_res;
2245 	gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
2246 
2247 	rstep = __get_rq_hdr_buf_size(gsi_mode);
2248 	sstep = __get_sq_hdr_buf_size(gsi_mode);
2249 	rc = bnxt_qplib_alloc_hdr_buf(res, qplqp, sstep, rstep);
2250 	if (rc)
2251 		goto out;
2252 
2253 	rc = bnxt_qplib_create_qp1(res, qplqp);
2254 	if (rc) {
2255 		dev_err(rdev_to_dev(rdev), "create HW QP1 failed!\n");
2256 		goto out;
2257 	}
2258 
2259 	if (gsi_mode == BNXT_RE_GSI_MODE_ALL)
2260 		rc = bnxt_re_create_shadow_gsi(qp, pd);
2261 out:
2262 	return rc;
2263 }
2264 
bnxt_re_test_qp_limits(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr,struct bnxt_qplib_dev_attr * dev_attr)2265 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
2266 				   struct ib_qp_init_attr *init_attr,
2267 				   struct bnxt_qplib_dev_attr *dev_attr)
2268 {
2269 	bool rc = true;
2270 	int ilsize;
2271 
2272 	ilsize = ALIGN(init_attr->cap.max_inline_data, sizeof(struct sq_sge));
2273 	if ((init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
2274 	    (init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
2275 	    (init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
2276 	    (init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
2277 	    (ilsize > dev_attr->max_inline_data)) {
2278 		dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded! "
2279 			"0x%x/0x%x 0x%x/0x%x 0x%x/0x%x "
2280 			"0x%x/0x%x 0x%x/0x%x\n",
2281 			init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
2282 			init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
2283 			init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
2284 			init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
2285 			init_attr->cap.max_inline_data,
2286 			dev_attr->max_inline_data);
2287 		rc = false;
2288 	}
2289 	return rc;
2290 }
2291 
2292 static inline struct
__get_qp_from_qp_in(struct ib_pd * qp_in,struct bnxt_re_dev * rdev)2293 bnxt_re_qp *__get_qp_from_qp_in(struct ib_pd *qp_in,
2294 				struct bnxt_re_dev *rdev)
2295 {
2296 	struct bnxt_re_qp *qp;
2297 
2298 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2299 	if (!qp)
2300 		dev_err(rdev_to_dev(rdev), "Allocate QP failed!\n");
2301 	return qp;
2302 }
2303 
bnxt_re_create_qp(struct ib_pd * qp_in,struct ib_qp_init_attr * qp_init_attr,struct ib_udata * udata)2304 struct ib_qp *bnxt_re_create_qp(struct ib_pd *qp_in,
2305 			       struct ib_qp_init_attr *qp_init_attr,
2306 			       struct ib_udata *udata)
2307 {
2308 	struct bnxt_re_pd *pd;
2309 	struct ib_pd *ib_pd = qp_in;
2310 	struct bnxt_qplib_dev_attr *dev_attr;
2311 	struct bnxt_re_dev *rdev;
2312 	u32 active_qps, tmp_qps;
2313 	struct bnxt_re_qp *qp;
2314 	int rc;
2315 
2316 	pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
2317 	rdev = pd->rdev;
2318 	dev_attr = rdev->dev_attr;
2319 	if (rdev->mod_exit) {
2320 		rc = -EIO;
2321 		dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
2322 		goto exit;
2323 	}
2324 
2325 	if (atomic_read(&rdev->stats.rsors.qp_count) >= dev_attr->max_qp) {
2326 		dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded(QPs Alloc'd %u of max %u)\n",
2327 			atomic_read(&rdev->stats.rsors.qp_count), dev_attr->max_qp);
2328 		rc = -EINVAL;
2329 		goto exit;
2330 	}
2331 
2332 	rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
2333 	if (!rc) {
2334 		rc = -EINVAL;
2335 		goto exit;
2336 	}
2337 	qp = __get_qp_from_qp_in(qp_in, rdev);
2338 	if (!qp) {
2339 		rc = -ENOMEM;
2340 		goto exit;
2341 	}
2342 	qp->rdev = rdev;
2343 
2344 	rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
2345 	if (rc)
2346 		goto fail;
2347 
2348 	if (qp_init_attr->qp_type == IB_QPT_GSI &&
2349 	    !_is_chip_gen_p5_p7(rdev->chip_ctx)) {
2350 		rc = bnxt_re_create_gsi_qp(qp, pd);
2351 		if (rc == -ENODEV)
2352 			goto qp_destroy;
2353 		if (rc)
2354 			goto fail;
2355 	} else {
2356 		rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
2357 		if (rc) {
2358 			dev_err(rdev_to_dev(rdev), "create HW QP failed!\n");
2359 			goto free_umem;
2360 		}
2361 
2362 		if (udata) {
2363 			struct bnxt_re_qp_resp resp;
2364 
2365 			resp.qpid = qp->qplib_qp.id;
2366 			rc = bnxt_re_copy_to_udata(rdev, &resp,
2367 						   min(udata->outlen, sizeof(resp)),
2368 						   udata);
2369 			if (rc)
2370 				goto qp_destroy;
2371 		}
2372 	}
2373 
2374 	qp->ib_qp.qp_num = qp->qplib_qp.id;
2375 	if (qp_init_attr->qp_type == IB_QPT_GSI)
2376 		rdev->gsi_ctx.gsi_qp = qp;
2377 	spin_lock_init(&qp->sq_lock);
2378 	spin_lock_init(&qp->rq_lock);
2379 	INIT_LIST_HEAD(&qp->list);
2380 	mutex_lock(&rdev->qp_lock);
2381 	list_add_tail(&qp->list, &rdev->qp_list);
2382 	mutex_unlock(&rdev->qp_lock);
2383 	atomic_inc(&rdev->stats.rsors.qp_count);
2384 	active_qps = atomic_read(&rdev->stats.rsors.qp_count);
2385 	if (active_qps > atomic_read(&rdev->stats.rsors.max_qp_count))
2386 		atomic_set(&rdev->stats.rsors.max_qp_count, active_qps);
2387 
2388 	bnxt_re_dump_debug_stats(rdev, active_qps);
2389 
2390 	/* Get the counters for RC QPs and UD QPs */
2391 	if (qp_init_attr->qp_type == IB_QPT_RC) {
2392 		tmp_qps = atomic_inc_return(&rdev->stats.rsors.rc_qp_count);
2393 		if (tmp_qps > atomic_read(&rdev->stats.rsors.max_rc_qp_count))
2394 			atomic_set(&rdev->stats.rsors.max_rc_qp_count, tmp_qps);
2395 	} else if (qp_init_attr->qp_type == IB_QPT_UD) {
2396 		tmp_qps = atomic_inc_return(&rdev->stats.rsors.ud_qp_count);
2397 		if (tmp_qps > atomic_read(&rdev->stats.rsors.max_ud_qp_count))
2398 			atomic_set(&rdev->stats.rsors.max_ud_qp_count, tmp_qps);
2399 	}
2400 
2401 	return &qp->ib_qp;
2402 
2403 qp_destroy:
2404 	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
2405 free_umem:
2406 	if (udata) {
2407 		if (qp->rumem && !IS_ERR(qp->rumem))
2408 			ib_umem_release(qp->rumem);
2409 		if (qp->sumem && !IS_ERR(qp->sumem))
2410 			ib_umem_release(qp->sumem);
2411 	}
2412 fail:
2413 	kfree(qp);
2414 exit:
2415 	return ERR_PTR(rc);
2416 }
2417 
bnxt_re_modify_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp1_qp,int qp_attr_mask)2418 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
2419 			     struct bnxt_re_qp *qp1_qp,
2420 			     int qp_attr_mask)
2421 {
2422 	struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
2423 	int rc = 0;
2424 
2425 	if (qp_attr_mask & IB_QP_STATE) {
2426 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2427 		qp->qplib_qp.state = qp1_qp->qplib_qp.state;
2428 	}
2429 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2430 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2431 		qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
2432 	}
2433 
2434 	if (qp_attr_mask & IB_QP_QKEY) {
2435 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2436 		/* Using a Random  QKEY */
2437 		qp->qplib_qp.qkey = BNXT_RE_QP_RANDOM_QKEY;
2438 	}
2439 	if (qp_attr_mask & IB_QP_SQ_PSN) {
2440 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2441 		qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
2442 	}
2443 
2444 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2445 	if (rc)
2446 		dev_err(rdev_to_dev(rdev), "Modify Shadow QP for QP1 failed\n");
2447 	return rc;
2448 }
2449 
ipv4_from_gid(u8 * gid)2450 static u32 ipv4_from_gid(u8 *gid)
2451 {
2452 	return (gid[15] << 24 | gid[14] << 16 | gid[13] << 8 | gid[12]);
2453 }
2454 
get_source_port(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp)2455 static u16 get_source_port(struct bnxt_re_dev *rdev,
2456 			   struct bnxt_re_qp *qp)
2457 {
2458 	u8 ip_off, data[48], smac[ETH_ALEN];
2459 	u16 crc = 0, buf_len = 0, i;
2460 	u8 addr_len;
2461 	u32 qpn;
2462 
2463 	if (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6) {
2464 		addr_len = 6;
2465 		ip_off = 10;
2466 	} else {
2467 		addr_len = 4;
2468 		ip_off = 12;
2469 	}
2470 
2471 	memcpy(smac, qp->qplib_qp.smac, ETH_ALEN);
2472 
2473 	memset(data, 0, 48);
2474 	memcpy(data, qp->qplib_qp.ah.dmac, ETH_ALEN);
2475 	buf_len += ETH_ALEN;
2476 
2477 	memcpy(data + buf_len, smac, ETH_ALEN);
2478 	buf_len += ETH_ALEN;
2479 
2480 	memcpy(data + buf_len, qp->qplib_qp.ah.dgid.data + ip_off, addr_len);
2481 	buf_len += addr_len;
2482 
2483 	memcpy(data + buf_len, qp->qp_info_entry.sgid.raw + ip_off, addr_len);
2484 	buf_len += addr_len;
2485 
2486 	qpn = htonl(qp->qplib_qp.dest_qpn);
2487 	memcpy(data + buf_len, (u8 *)&qpn + 1, 3);
2488 	buf_len += 3;
2489 
2490 	for (i = 0; i < buf_len; i++)
2491 		crc = crc16(crc, (data + i), 1);
2492 
2493 	return crc;
2494 }
2495 
bnxt_re_update_qp_info(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp)2496 static void bnxt_re_update_qp_info(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
2497 {
2498 	u16 type;
2499 
2500 	type = __from_hw_to_ib_qp_type(qp->qplib_qp.type);
2501 
2502 	/* User-space can extract ip address with sgid_index. */
2503 	if (ipv6_addr_v4mapped((struct in6_addr *)&qp->qplib_qp.ah.dgid)) {
2504 		qp->qp_info_entry.s_ip.ipv4_addr = ipv4_from_gid(qp->qp_info_entry.sgid.raw);
2505 		qp->qp_info_entry.d_ip.ipv4_addr = ipv4_from_gid(qp->qplib_qp.ah.dgid.data);
2506 	} else {
2507 		memcpy(&qp->qp_info_entry.s_ip.ipv6_addr, qp->qp_info_entry.sgid.raw,
2508 		       sizeof(qp->qp_info_entry.s_ip.ipv6_addr));
2509 		memcpy(&qp->qp_info_entry.d_ip.ipv6_addr, qp->qplib_qp.ah.dgid.data,
2510 		       sizeof(qp->qp_info_entry.d_ip.ipv6_addr));
2511 	}
2512 
2513 	if (type == IB_QPT_RC &&
2514 	    (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4 ||
2515 	     qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6)) {
2516 		qp->qp_info_entry.s_port = get_source_port(rdev, qp);
2517 	}
2518 	qp->qp_info_entry.d_port = BNXT_RE_QP_DEST_PORT;
2519 }
2520 
bnxt_qplib_manage_flush_qp(struct bnxt_re_qp * qp)2521 static void bnxt_qplib_manage_flush_qp(struct bnxt_re_qp *qp)
2522 {
2523 	struct bnxt_qplib_q *rq, *sq;
2524 	unsigned long flags;
2525 
2526 	if (qp->sumem)
2527 		return;
2528 
2529 	if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2530 		rq = &qp->qplib_qp.rq;
2531 		sq = &qp->qplib_qp.sq;
2532 
2533 		dev_dbg(rdev_to_dev(qp->rdev),
2534 			"Move QP = %p to flush list\n", qp);
2535 		flags = bnxt_re_lock_cqs(qp);
2536 		bnxt_qplib_add_flush_qp(&qp->qplib_qp);
2537 		bnxt_re_unlock_cqs(qp, flags);
2538 
2539 		if (sq->hwq.prod != sq->hwq.cons)
2540 			bnxt_re_handle_cqn(&qp->scq->qplib_cq);
2541 
2542 		if (qp->rcq && (qp->rcq != qp->scq) &&
2543 		    (rq->hwq.prod != rq->hwq.cons))
2544 			bnxt_re_handle_cqn(&qp->rcq->qplib_cq);
2545 	}
2546 
2547 	if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2548 		dev_dbg(rdev_to_dev(qp->rdev),
2549 			"Move QP = %p out of flush list\n", qp);
2550 		flags = bnxt_re_lock_cqs(qp);
2551 		bnxt_qplib_clean_qp(&qp->qplib_qp);
2552 		bnxt_re_unlock_cqs(qp, flags);
2553 	}
2554 }
2555 
ib_modify_qp_is_ok_compat(enum ib_qp_state cur_state,enum ib_qp_state next_state,enum ib_qp_type type,enum ib_qp_attr_mask mask)2556 bool ib_modify_qp_is_ok_compat(enum ib_qp_state cur_state,
2557 			       enum ib_qp_state next_state,
2558 			       enum ib_qp_type type,
2559 			       enum ib_qp_attr_mask mask)
2560 {
2561 		return (ib_modify_qp_is_ok(cur_state, next_state,
2562 					   type, mask));
2563 }
2564 
bnxt_re_modify_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)2565 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2566 		      int qp_attr_mask, struct ib_udata *udata)
2567 {
2568 	enum ib_qp_state curr_qp_state, new_qp_state;
2569 	struct bnxt_re_modify_qp_ex_resp resp = {};
2570 	struct bnxt_re_modify_qp_ex_req ureq = {};
2571 	struct bnxt_qplib_dev_attr *dev_attr;
2572 	struct bnxt_qplib_ppp *ppp = NULL;
2573 	struct bnxt_re_dev *rdev;
2574 	struct bnxt_re_qp *qp;
2575 	struct ib_gid_attr *sgid_attr;
2576 	struct ib_gid_attr gid_attr;
2577 	union ib_gid sgid, *gid_ptr = NULL;
2578 	u8 nw_type;
2579 	int rc, entries, status;
2580 	bool is_copy_to_udata = false;
2581 	bool is_qpmtu_high = false;
2582 
2583 	qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
2584 	rdev = qp->rdev;
2585 	dev_attr = rdev->dev_attr;
2586 
2587 	qp->qplib_qp.modify_flags = 0;
2588 	ppp = &qp->qplib_qp.ppp;
2589 	if (qp_attr_mask & IB_QP_STATE) {
2590 		curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
2591 		new_qp_state = qp_attr->qp_state;
2592 		if (!ib_modify_qp_is_ok_compat(curr_qp_state, new_qp_state,
2593 					       ib_qp->qp_type, qp_attr_mask)) {
2594 			dev_err(rdev_to_dev(rdev),"invalid attribute mask=0x%x"
2595 				" specified for qpn=0x%x of type=0x%x"
2596 				" current_qp_state=0x%x, new_qp_state=0x%x\n",
2597 				qp_attr_mask, ib_qp->qp_num, ib_qp->qp_type,
2598 				curr_qp_state, new_qp_state);
2599 			return -EINVAL;
2600 		}
2601 		dev_dbg(rdev_to_dev(rdev), "%s:%d INFO attribute mask=0x%x qpn=0x%x "
2602 			"of type=0x%x current_qp_state=0x%x, new_qp_state=0x%x\n",
2603 			__func__, __LINE__, qp_attr_mask, ib_qp->qp_num,
2604 			ib_qp->qp_type, curr_qp_state, new_qp_state);
2605 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2606 		qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
2607 
2608 		if (udata && curr_qp_state == IB_QPS_RESET &&
2609 		    new_qp_state == IB_QPS_INIT) {
2610 			if (!ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
2611 				if (ureq.comp_mask &
2612 				    BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN_MASK) {
2613 					ppp->req = BNXT_QPLIB_PPP_REQ;
2614 					ppp->dpi = ureq.dpi;
2615 				}
2616 			}
2617 		}
2618 	}
2619 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
2620 		qp->qplib_qp.modify_flags |=
2621 				CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
2622 		qp->qplib_qp.en_sqd_async_notify = true;
2623 	}
2624 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
2625 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
2626 		qp->qplib_qp.access =
2627 			__from_ib_access_flags(qp_attr->qp_access_flags);
2628 		/* LOCAL_WRITE access must be set to allow RC receive */
2629 		qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
2630 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
2631 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
2632 	}
2633 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2634 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2635 		qp->qplib_qp.pkey_index = qp_attr->pkey_index;
2636 	}
2637 	if (qp_attr_mask & IB_QP_QKEY) {
2638 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2639 		qp->qplib_qp.qkey = qp_attr->qkey;
2640 	}
2641 	if (qp_attr_mask & IB_QP_AV) {
2642 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
2643 				     CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
2644 				     CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
2645 				     CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
2646 				     CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
2647 				     CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
2648 				     CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
2649 		memcpy(qp->qplib_qp.ah.dgid.data, qp_attr->ah_attr.grh.dgid.raw,
2650 		       sizeof(qp->qplib_qp.ah.dgid.data));
2651 		qp->qplib_qp.ah.flow_label = qp_attr->ah_attr.grh.flow_label;
2652 		qp->qplib_qp.ah.sgid_index = _get_sgid_index(rdev,
2653 						qp_attr->ah_attr.grh.sgid_index);
2654 		qp->qplib_qp.ah.host_sgid_index = qp_attr->ah_attr.grh.sgid_index;
2655 		qp->qplib_qp.ah.hop_limit = qp_attr->ah_attr.grh.hop_limit;
2656 		qp->qplib_qp.ah.traffic_class =
2657 					qp_attr->ah_attr.grh.traffic_class;
2658 		qp->qplib_qp.ah.sl = qp_attr->ah_attr.sl;
2659 		ether_addr_copy(qp->qplib_qp.ah.dmac, ROCE_DMAC(&qp_attr->ah_attr));
2660 		sgid_attr = &gid_attr;
2661 		status = bnxt_re_get_cached_gid(&rdev->ibdev, 1,
2662 						qp_attr->ah_attr.grh.sgid_index,
2663 						&sgid, &sgid_attr,
2664 						&qp_attr->ah_attr.grh, NULL);
2665 		if (!status)
2666 			if_rele(sgid_attr->ndev);
2667 		gid_ptr = &sgid;
2668 		if (sgid_attr->ndev) {
2669 			memcpy(qp->qplib_qp.smac, rdev->dev_addr,
2670 			       ETH_ALEN);
2671 			nw_type = bnxt_re_gid_to_network_type(sgid_attr, &sgid);
2672 			dev_dbg(rdev_to_dev(rdev),
2673 				 "Connection using the nw_type %d\n", nw_type);
2674 			switch (nw_type) {
2675 			case RDMA_NETWORK_IPV4:
2676 				qp->qplib_qp.nw_type =
2677 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
2678 				break;
2679 			case RDMA_NETWORK_IPV6:
2680 				qp->qplib_qp.nw_type =
2681 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
2682 				break;
2683 			default:
2684 				qp->qplib_qp.nw_type =
2685 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
2686 				break;
2687 			}
2688 		}
2689 		memcpy(&qp->qp_info_entry.sgid, gid_ptr, sizeof(qp->qp_info_entry.sgid));
2690 	}
2691 
2692 	/* MTU settings allowed only during INIT -> RTR */
2693 	if (qp_attr->qp_state == IB_QPS_RTR) {
2694 		bnxt_re_init_qpmtu(qp, rdev->netdev->if_mtu, qp_attr_mask, qp_attr,
2695 				   &is_qpmtu_high);
2696 		if (udata && !ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
2697 			if (ureq.comp_mask & BNXT_RE_COMP_MASK_MQP_EX_PATH_MTU_MASK) {
2698 				resp.comp_mask |= BNXT_RE_COMP_MASK_MQP_EX_PATH_MTU_MASK;
2699 				resp.path_mtu = qp->qplib_qp.mtu;
2700 				is_copy_to_udata = true;
2701 			} else if (is_qpmtu_high) {
2702 				dev_err(rdev_to_dev(rdev), "qp %#x invalid mtu\n",
2703 					qp->qplib_qp.id);
2704 				return -EINVAL;
2705 			}
2706 		}
2707 	}
2708 
2709 	if (qp_attr_mask & IB_QP_TIMEOUT) {
2710 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
2711 		qp->qplib_qp.timeout = qp_attr->timeout;
2712 	}
2713 	if (qp_attr_mask & IB_QP_RETRY_CNT) {
2714 		qp->qplib_qp.modify_flags |=
2715 				CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
2716 		qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
2717 	}
2718 	if (qp_attr_mask & IB_QP_RNR_RETRY) {
2719 		qp->qplib_qp.modify_flags |=
2720 				CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
2721 		qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
2722 	}
2723 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
2724 		qp->qplib_qp.modify_flags |=
2725 				CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
2726 		qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
2727 	}
2728 	if (qp_attr_mask & IB_QP_RQ_PSN) {
2729 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
2730 		qp->qplib_qp.rq.psn = qp_attr->rq_psn;
2731 	}
2732 	if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2733 		qp->qplib_qp.modify_flags |=
2734 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
2735 		/* Cap the max_rd_atomic to device max */
2736 		if (qp_attr->max_rd_atomic > dev_attr->max_qp_rd_atom)
2737 			dev_dbg(rdev_to_dev(rdev),
2738 				"max_rd_atomic requested %d is > device max %d\n",
2739 				qp_attr->max_rd_atomic,
2740 				dev_attr->max_qp_rd_atom);
2741 		qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
2742 						   dev_attr->max_qp_rd_atom);
2743 	}
2744 	if (qp_attr_mask & IB_QP_SQ_PSN) {
2745 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2746 		qp->qplib_qp.sq.psn = qp_attr->sq_psn;
2747 	}
2748 	if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2749 		if (qp_attr->max_dest_rd_atomic >
2750 		    dev_attr->max_qp_init_rd_atom) {
2751 			dev_err(rdev_to_dev(rdev),
2752 				"max_dest_rd_atomic requested %d is > device max %d\n",
2753 				qp_attr->max_dest_rd_atomic,
2754 				dev_attr->max_qp_init_rd_atom);
2755 			return -EINVAL;
2756 		}
2757 		qp->qplib_qp.modify_flags |=
2758 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2759 		qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2760 	}
2761 	if (qp_attr_mask & IB_QP_CAP) {
2762 		qp->qplib_qp.modify_flags |=
2763 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2764 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2765 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2766 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2767 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2768 		if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2769 		    (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2770 		    (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2771 		    (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2772 		    (qp_attr->cap.max_inline_data >=
2773 						dev_attr->max_inline_data)) {
2774 			dev_err(rdev_to_dev(rdev),
2775 				"Create QP failed - max exceeded\n");
2776 			return -EINVAL;
2777 		}
2778 		entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
2779 		if (entries > dev_attr->max_qp_wqes)
2780 			entries = dev_attr->max_qp_wqes;
2781 		entries = min_t(u32, entries, dev_attr->max_qp_wqes);
2782 		qp->qplib_qp.sq.max_wqe = entries;
2783 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2784 						qp_attr->cap.max_send_wr;
2785 		/*
2786 		 * Reserving one slot for Phantom WQE. Some application can
2787 		 * post one extra entry in this case. Allowing this to avoid
2788 		 * unexpected Queue full condition
2789 		 */
2790 		qp->qplib_qp.sq.q_full_delta -= 1;
2791 		qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2792 		if (qp->qplib_qp.rq.max_wqe) {
2793 			entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
2794 			if (entries > dev_attr->max_qp_wqes)
2795 				entries = dev_attr->max_qp_wqes;
2796 			qp->qplib_qp.rq.max_wqe = entries;
2797 			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2798 						       qp_attr->cap.max_recv_wr;
2799 			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2800 		} else {
2801 			/* SRQ was used prior, just ignore the RQ caps */
2802 		}
2803 	}
2804 	if (qp_attr_mask & IB_QP_DEST_QPN) {
2805 		qp->qplib_qp.modify_flags |=
2806 				CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2807 		qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2808 	}
2809 
2810 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2811 	if (rc) {
2812 		dev_err(rdev_to_dev(rdev), "Modify HW QP failed!\n");
2813 		return rc;
2814 	}
2815 	if (qp_attr_mask & IB_QP_STATE)
2816 		bnxt_qplib_manage_flush_qp(qp);
2817 	if (ureq.comp_mask & BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN_MASK &&
2818 	    ppp->st_idx_en & CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED) {
2819 		resp.comp_mask |= BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN;
2820 		resp.ppp_st_idx = ppp->st_idx_en >>
2821 				  BNXT_QPLIB_PPP_ST_IDX_SHIFT;
2822 		is_copy_to_udata = true;
2823 	}
2824 
2825 	if (is_copy_to_udata) {
2826 		rc = bnxt_re_copy_to_udata(rdev, &resp,
2827 					   min(udata->outlen, sizeof(resp)),
2828 					   udata);
2829 		if (rc)
2830 			return rc;
2831 	}
2832 
2833 	if (ib_qp->qp_type == IB_QPT_GSI &&
2834 	    rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL &&
2835 	    rdev->gsi_ctx.gsi_sqp)
2836 		rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2837 	/*
2838 	 * Update info when qp_info_info
2839 	 */
2840 	bnxt_re_update_qp_info(rdev, qp);
2841 	return rc;
2842 }
2843 
bnxt_re_query_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)2844 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2845 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2846 {
2847 	struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
2848 	struct bnxt_re_dev *rdev = qp->rdev;
2849 	struct bnxt_qplib_qp *qplib_qp;
2850 	int rc;
2851 
2852 	qplib_qp = kcalloc(1, sizeof(*qplib_qp), GFP_KERNEL);
2853 	if (!qplib_qp)
2854 		return -ENOMEM;
2855 
2856 	qplib_qp->id = qp->qplib_qp.id;
2857 	qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2858 
2859 	rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2860 	if (rc) {
2861 		dev_err(rdev_to_dev(rdev), "Query HW QP (0x%x) failed! rc = %d\n",
2862 			qplib_qp->id, rc);
2863 		goto free_mem;
2864 	}
2865 	qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2866 	qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2867 	qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2868 	qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2869 	qp_attr->pkey_index = qplib_qp->pkey_index;
2870 	qp_attr->qkey = qplib_qp->qkey;
2871 	memcpy(qp_attr->ah_attr.grh.dgid.raw, qplib_qp->ah.dgid.data,
2872 	       sizeof(qplib_qp->ah.dgid.data));
2873 	qp_attr->ah_attr.grh.flow_label = qplib_qp->ah.flow_label;
2874 	qp_attr->ah_attr.grh.sgid_index = qplib_qp->ah.host_sgid_index;
2875 	qp_attr->ah_attr.grh.hop_limit = qplib_qp->ah.hop_limit;
2876 	qp_attr->ah_attr.grh.traffic_class = qplib_qp->ah.traffic_class;
2877 	qp_attr->ah_attr.sl = qplib_qp->ah.sl;
2878 	ether_addr_copy(ROCE_DMAC(&qp_attr->ah_attr), qplib_qp->ah.dmac);
2879 	qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2880 	qp_attr->timeout = qplib_qp->timeout;
2881 	qp_attr->retry_cnt = qplib_qp->retry_cnt;
2882 	qp_attr->rnr_retry = qplib_qp->rnr_retry;
2883 	qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2884 	qp_attr->rq_psn = qplib_qp->rq.psn;
2885 	qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2886 	qp_attr->sq_psn = qplib_qp->sq.psn;
2887 	qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2888 	qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2889 							IB_SIGNAL_REQ_WR;
2890 	qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2891 
2892 	qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2893 	qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2894 	qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2895 	qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2896 	qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2897 	qp_init_attr->cap = qp_attr->cap;
2898 
2899 free_mem:
2900 	kfree(qplib_qp);
2901 	return rc;
2902 }
2903 
2904 /* Builders */
2905 
2906 /* For Raw, the application is responsible to build the entire packet */
bnxt_re_build_raw_send(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2907 static void bnxt_re_build_raw_send(const struct ib_send_wr *wr,
2908 				   struct bnxt_qplib_swqe *wqe)
2909 {
2910 	switch (wr->send_flags) {
2911 	case IB_SEND_IP_CSUM:
2912 		wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2913 		break;
2914 	default:
2915 		/* Pad HW RoCE iCRC */
2916 		wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2917 		break;
2918 	}
2919 }
2920 
2921 /* For QP1, the driver must build the entire RoCE (v1/v2) packet hdr
2922  * as according to the sgid and AV
2923  */
bnxt_re_build_qp1_send(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2924 static int bnxt_re_build_qp1_send(struct bnxt_re_qp *qp, const struct ib_send_wr *wr,
2925 				  struct bnxt_qplib_swqe *wqe, int payload_size)
2926 {
2927 	struct bnxt_re_ah *ah = to_bnxt_re(ud_wr(wr)->ah, struct bnxt_re_ah,
2928 					   ibah);
2929 	struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2930 	struct bnxt_qplib_sge sge;
2931 	int i, rc = 0;
2932 	union ib_gid sgid;
2933 	u16 vlan_id;
2934 	u8 *ptmac;
2935 	void *buf;
2936 
2937 	memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2938 
2939 	/* Get sgid */
2940 	rc = bnxt_re_query_gid(&qp->rdev->ibdev, 1, qplib_ah->sgid_index, &sgid);
2941 	if (rc)
2942 		return rc;
2943 
2944 	/* ETH */
2945 	qp->qp1_hdr.eth_present = 1;
2946 	ptmac = ah->qplib_ah.dmac;
2947 	memcpy(qp->qp1_hdr.eth.dmac_h, ptmac, 4);
2948 	ptmac += 4;
2949 	memcpy(qp->qp1_hdr.eth.dmac_l, ptmac, 2);
2950 
2951 	ptmac = qp->qplib_qp.smac;
2952 	memcpy(qp->qp1_hdr.eth.smac_h, ptmac, 2);
2953 	ptmac += 2;
2954 	memcpy(qp->qp1_hdr.eth.smac_l, ptmac, 4);
2955 
2956 	qp->qp1_hdr.eth.type = cpu_to_be16(BNXT_QPLIB_ETHTYPE_ROCEV1);
2957 
2958 	/* For vlan, check the sgid for vlan existence */
2959 	vlan_id = rdma_get_vlan_id(&sgid);
2960 	if (vlan_id && vlan_id < 0x1000) {
2961 		qp->qp1_hdr.vlan_present = 1;
2962 		qp->qp1_hdr.eth.type = cpu_to_be16(ETH_P_8021Q);
2963 	}
2964 	/* GRH */
2965 	qp->qp1_hdr.grh_present = 1;
2966 	qp->qp1_hdr.grh.ip_version = 6;
2967 	qp->qp1_hdr.grh.payload_length =
2968 		cpu_to_be16((IB_BTH_BYTES + IB_DETH_BYTES + payload_size + 7)
2969 			    & ~3);
2970 	qp->qp1_hdr.grh.next_header = 0x1b;
2971 	memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
2972 	memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2973 	       sizeof(sgid));
2974 
2975 	/* BTH */
2976 	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2977 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2978 		qp->qp1_hdr.immediate_present = 1;
2979 	} else {
2980 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2981 	}
2982 	if (wr->send_flags & IB_SEND_SOLICITED)
2983 		qp->qp1_hdr.bth.solicited_event = 1;
2984 	qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2985 	/* P_key for QP1 is for all members */
2986 	qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2987 	qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2988 	qp->qp1_hdr.bth.ack_req = 0;
2989 	qp->send_psn++;
2990 	qp->send_psn &= BTH_PSN_MASK;
2991 	qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2992 	/* DETH */
2993 	/* Use the priviledged Q_Key for QP1 */
2994 	qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2995 	qp->qp1_hdr.deth.source_qpn = IB_QP1;
2996 
2997 	/* Pack the QP1 to the transmit buffer */
2998 	buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2999 	if (!buf) {
3000 		dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n");
3001 		rc = -ENOMEM;
3002 	}
3003 	for (i = wqe->num_sge; i; i--) {
3004 		wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
3005 		wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
3006 		wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
3007 	}
3008 	wqe->sg_list[0].addr = sge.addr;
3009 	wqe->sg_list[0].lkey = sge.lkey;
3010 	wqe->sg_list[0].size = sge.size;
3011 	wqe->num_sge++;
3012 
3013 	return rc;
3014 }
3015 
bnxt_re_build_gsi_send(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3016 static int bnxt_re_build_gsi_send(struct bnxt_re_qp *qp,
3017 				  const struct ib_send_wr *wr,
3018 				  struct bnxt_qplib_swqe *wqe)
3019 {
3020 	struct bnxt_re_dev *rdev;
3021 	int rc, indx, len = 0;
3022 
3023 	rdev = qp->rdev;
3024 
3025 	/* Mode UD is applicable to Gen P5 only */
3026 	if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)
3027 		return 0;
3028 
3029 	for (indx = 0; indx < wr->num_sge; indx++) {
3030 		wqe->sg_list[indx].addr = wr->sg_list[indx].addr;
3031 		wqe->sg_list[indx].lkey = wr->sg_list[indx].lkey;
3032 		wqe->sg_list[indx].size = wr->sg_list[indx].length;
3033 		len += wr->sg_list[indx].length;
3034 	}
3035 	rc = bnxt_re_build_qp1_send(qp, wr, wqe, len);
3036 	wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
3037 
3038 	return rc;
3039 }
3040 
3041 /* For the MAD layer, it only provides the recv SGE the size of
3042    ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
3043    nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
3044    receive packet (334 bytes) with no VLAN and then copy the GRH
3045    and the MAD datagram out to the provided SGE.
3046 */
3047 
bnxt_re_build_qp1_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe)3048 static int bnxt_re_build_qp1_recv(struct bnxt_re_qp *qp,
3049 				  const struct ib_recv_wr *wr,
3050 				  struct bnxt_qplib_swqe *wqe)
3051 {
3052 	struct bnxt_re_dev *rdev = qp->rdev;
3053 	struct bnxt_qplib_sge ref, sge;
3054 	u8 udp_hdr_size = 0;
3055 	u8 ip_hdr_size = 0;
3056 	int rc = 0;
3057 	int size;
3058 
3059 	if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) {
3060 		/* Create 5 SGEs as according to the following:
3061 		 * Ethernet header (14)
3062 		 * ib_grh (40) - as provided from the wr
3063 		 * ib_bth + ib_deth + UDP(RoCE v2 only)  (28)
3064 		 * MAD (256) - as provided from the wr
3065 		 * iCRC (4)
3066 		 */
3067 
3068 		/* Set RoCE v2 header size and offsets */
3069 		if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ROCE_V2_IPV4)
3070 			ip_hdr_size = 20;
3071 		if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_ROCE_V1)
3072 			udp_hdr_size = 8;
3073 
3074 		/* Save the reference from ULP */
3075 		ref.addr = wr->sg_list[0].addr;
3076 		ref.lkey = wr->sg_list[0].lkey;
3077 		ref.size = wr->sg_list[0].length;
3078 
3079 		/* SGE 1 */
3080 		size = sge.size;
3081 		wqe->sg_list[0].addr = sge.addr;
3082 		wqe->sg_list[0].lkey = sge.lkey;
3083 		wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE;
3084 		size -= wqe->sg_list[0].size;
3085 		if (size <= 0) {
3086 			dev_err(rdev_to_dev(qp->rdev),"QP1 rq buffer is empty!\n");
3087 			rc = -ENOMEM;
3088 			goto done;
3089 		}
3090 		sge.size = (u32)size;
3091 		sge.addr += wqe->sg_list[0].size;
3092 
3093 		/* SGE 2 */
3094 		/* In case of RoCE v2 ipv4 lower 20 bytes should have IP hdr */
3095 		wqe->sg_list[1].addr = ref.addr + ip_hdr_size;
3096 		wqe->sg_list[1].lkey = ref.lkey;
3097 		wqe->sg_list[1].size = sizeof(struct ib_grh) - ip_hdr_size;
3098 		ref.size -= wqe->sg_list[1].size;
3099 		if (ref.size <= 0) {
3100 			dev_err(rdev_to_dev(qp->rdev),
3101 				"QP1 ref buffer is empty!\n");
3102 			rc = -ENOMEM;
3103 			goto done;
3104 		}
3105 		ref.addr += wqe->sg_list[1].size + ip_hdr_size;
3106 
3107 		/* SGE 3 */
3108 		wqe->sg_list[2].addr = sge.addr;
3109 		wqe->sg_list[2].lkey = sge.lkey;
3110 		wqe->sg_list[2].size = BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE +
3111 				       udp_hdr_size;
3112 		size -= wqe->sg_list[2].size;
3113 		if (size <= 0) {
3114 			dev_err(rdev_to_dev(qp->rdev),
3115 				"QP1 rq buffer is empty!\n");
3116 			rc = -ENOMEM;
3117 			goto done;
3118 		}
3119 		sge.size = (u32)size;
3120 		sge.addr += wqe->sg_list[2].size;
3121 
3122 		/* SGE 4 */
3123 		wqe->sg_list[3].addr = ref.addr;
3124 		wqe->sg_list[3].lkey = ref.lkey;
3125 		wqe->sg_list[3].size = ref.size;
3126 		ref.size -= wqe->sg_list[3].size;
3127 		if (ref.size) {
3128 			dev_err(rdev_to_dev(qp->rdev),
3129 				"QP1 ref buffer is incorrect!\n");
3130 			rc = -ENOMEM;
3131 			goto done;
3132 		}
3133 		/* SGE 5 */
3134 		wqe->sg_list[4].addr = sge.addr;
3135 		wqe->sg_list[4].lkey = sge.lkey;
3136 		wqe->sg_list[4].size = sge.size;
3137 		size -= wqe->sg_list[4].size;
3138 		if (size) {
3139 			dev_err(rdev_to_dev(qp->rdev),
3140 				"QP1 rq buffer is incorrect!\n");
3141 			rc = -ENOMEM;
3142 			goto done;
3143 		}
3144 		sge.size = (u32)size;
3145 		wqe->num_sge = 5;
3146 	} else {
3147 		dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n");
3148 		rc = -ENOMEM;
3149 	}
3150 done:
3151 	return rc;
3152 }
3153 
bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe)3154 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
3155 					    const struct ib_recv_wr *wr,
3156 					    struct bnxt_qplib_swqe *wqe)
3157 {
3158 	struct bnxt_re_sqp_entries *sqp_entry;
3159 	struct bnxt_qplib_sge sge;
3160 	struct bnxt_re_dev *rdev;
3161 	u32 rq_prod_index;
3162 	int rc = 0;
3163 
3164 	rdev = qp->rdev;
3165 
3166 	rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
3167 
3168 	if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) {
3169 		/* Create 1 SGE to receive the entire
3170 		 * ethernet packet
3171 		 */
3172 		/* SGE 1 */
3173 		wqe->sg_list[0].addr = sge.addr;
3174 		/* TODO check the lkey to be used */
3175 		wqe->sg_list[0].lkey = sge.lkey;
3176 		wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
3177 		if (sge.size < wqe->sg_list[0].size) {
3178 			dev_err(rdev_to_dev(qp->rdev),
3179 				"QP1 rq buffer is empty!\n");
3180 			rc = -ENOMEM;
3181 			goto done;
3182 		}
3183 
3184 		sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
3185 		sqp_entry->sge.addr = wr->sg_list[0].addr;
3186 		sqp_entry->sge.lkey = wr->sg_list[0].lkey;
3187 		sqp_entry->sge.size = wr->sg_list[0].length;
3188 		/* Store the wrid for reporting completion */
3189 		sqp_entry->wrid = wqe->wr_id;
3190 		/* change the wqe->wrid to table index */
3191 		wqe->wr_id = rq_prod_index;
3192 	}
3193 done:
3194 	return rc;
3195 }
3196 
is_ud_qp(struct bnxt_re_qp * qp)3197 static bool is_ud_qp(struct bnxt_re_qp *qp)
3198 {
3199 	return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
3200 		qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
3201 }
3202 
bnxt_re_build_send_wqe(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3203 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
3204 				  const struct ib_send_wr *wr,
3205 				  struct bnxt_qplib_swqe *wqe)
3206 {
3207 	struct bnxt_re_ah *ah = NULL;
3208 
3209 	if(is_ud_qp(qp)) {
3210 		ah = to_bnxt_re(ud_wr(wr)->ah, struct bnxt_re_ah, ibah);
3211 		wqe->send.q_key = ud_wr(wr)->remote_qkey;
3212 		wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
3213 		wqe->send.avid = ah->qplib_ah.id;
3214 	}
3215 	switch (wr->opcode) {
3216 	case IB_WR_SEND:
3217 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
3218 		break;
3219 	case IB_WR_SEND_WITH_IMM:
3220 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
3221 		wqe->send.imm_data = wr->ex.imm_data;
3222 		break;
3223 	case IB_WR_SEND_WITH_INV:
3224 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
3225 		wqe->send.inv_key = wr->ex.invalidate_rkey;
3226 		break;
3227 	default:
3228 		dev_err(rdev_to_dev(qp->rdev), "%s Invalid opcode %d!\n",
3229 			__func__, wr->opcode);
3230 		return -EINVAL;
3231 	}
3232 	if (wr->send_flags & IB_SEND_SIGNALED)
3233 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3234 	if (wr->send_flags & IB_SEND_FENCE)
3235 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3236 	if (wr->send_flags & IB_SEND_SOLICITED)
3237 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3238 	if (wr->send_flags & IB_SEND_INLINE)
3239 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
3240 
3241 	return 0;
3242 }
3243 
bnxt_re_build_rdma_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3244 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
3245 				  struct bnxt_qplib_swqe *wqe)
3246 {
3247 	switch (wr->opcode) {
3248 	case IB_WR_RDMA_WRITE:
3249 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
3250 		break;
3251 	case IB_WR_RDMA_WRITE_WITH_IMM:
3252 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
3253 		wqe->rdma.imm_data = wr->ex.imm_data;
3254 		break;
3255 	case IB_WR_RDMA_READ:
3256 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
3257 		wqe->rdma.inv_key = wr->ex.invalidate_rkey;
3258 		break;
3259 	default:
3260 		return -EINVAL;
3261 	}
3262 	wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
3263 	wqe->rdma.r_key = rdma_wr(wr)->rkey;
3264 	if (wr->send_flags & IB_SEND_SIGNALED)
3265 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3266 	if (wr->send_flags & IB_SEND_FENCE)
3267 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3268 	if (wr->send_flags & IB_SEND_SOLICITED)
3269 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3270 	if (wr->send_flags & IB_SEND_INLINE)
3271 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
3272 
3273 	return 0;
3274 }
3275 
bnxt_re_build_atomic_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3276 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
3277 				    struct bnxt_qplib_swqe *wqe)
3278 {
3279 	switch (wr->opcode) {
3280 	case IB_WR_ATOMIC_CMP_AND_SWP:
3281 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
3282 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
3283 		wqe->atomic.swap_data = atomic_wr(wr)->swap;
3284 		break;
3285 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3286 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
3287 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
3288 		break;
3289 	default:
3290 		return -EINVAL;
3291 	}
3292 	wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
3293 	wqe->atomic.r_key = atomic_wr(wr)->rkey;
3294 	if (wr->send_flags & IB_SEND_SIGNALED)
3295 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3296 	if (wr->send_flags & IB_SEND_FENCE)
3297 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3298 	if (wr->send_flags & IB_SEND_SOLICITED)
3299 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3300 	return 0;
3301 }
3302 
bnxt_re_build_inv_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3303 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
3304 				 struct bnxt_qplib_swqe *wqe)
3305 {
3306 	wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
3307 	wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
3308 	if (wr->send_flags & IB_SEND_SIGNALED)
3309 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3310 	if (wr->send_flags & IB_SEND_FENCE)
3311 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3312 	if (wr->send_flags & IB_SEND_SOLICITED)
3313 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3314 
3315 	return 0;
3316 }
3317 
bnxt_re_build_reg_wqe(const struct ib_reg_wr * wr,struct bnxt_qplib_swqe * wqe)3318 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
3319 				 struct bnxt_qplib_swqe *wqe)
3320 {
3321 	struct bnxt_re_mr *mr = to_bnxt_re(wr->mr, struct bnxt_re_mr, ib_mr);
3322 	struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
3323 	int reg_len, i, access = wr->access;
3324 
3325 	if (mr->npages > qplib_frpl->max_pg_ptrs) {
3326 		dev_err_ratelimited(rdev_to_dev(mr->rdev),
3327 			" %s: failed npages %d > %d\n", __func__,
3328 			mr->npages, qplib_frpl->max_pg_ptrs);
3329 		return -EINVAL;
3330 	}
3331 
3332 	wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
3333 	wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
3334 	wqe->frmr.levels = qplib_frpl->hwq.level;
3335 	wqe->frmr.page_list = mr->pages;
3336 	wqe->frmr.page_list_len = mr->npages;
3337 	wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
3338 
3339 	if (wr->wr.send_flags & IB_SEND_SIGNALED)
3340 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3341 	if (access & IB_ACCESS_LOCAL_WRITE)
3342 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
3343 	if (access & IB_ACCESS_REMOTE_READ)
3344 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
3345 	if (access & IB_ACCESS_REMOTE_WRITE)
3346 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
3347 	if (access & IB_ACCESS_REMOTE_ATOMIC)
3348 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
3349 	if (access & IB_ACCESS_MW_BIND)
3350 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
3351 
3352 	/* TODO: OFED provides the rkey of the MR instead of the lkey */
3353 	wqe->frmr.l_key = wr->key;
3354 	wqe->frmr.length = wr->mr->length;
3355 	wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
3356 	wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
3357 	wqe->frmr.va = wr->mr->iova;
3358 	reg_len = wqe->frmr.page_list_len * wr->mr->page_size;
3359 
3360 	if (wqe->frmr.length > reg_len) {
3361 		dev_err_ratelimited(rdev_to_dev(mr->rdev),
3362 				    "%s: bnxt_re_mr 0x%px  len (%d > %d)\n",
3363 				    __func__, (void *)mr, wqe->frmr.length,
3364 				    reg_len);
3365 
3366 		for (i = 0; i < mr->npages; i++)
3367 			dev_dbg(rdev_to_dev(mr->rdev),
3368 				"%s: build_reg_wqe page[%d] = 0x%llx\n",
3369 				__func__, i, mr->pages[i]);
3370 
3371 		return -EINVAL;
3372 	}
3373 
3374 	return 0;
3375 }
3376 
bnxt_re_set_sg_list(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3377 static void bnxt_re_set_sg_list(const struct ib_send_wr *wr,
3378 				struct bnxt_qplib_swqe *wqe)
3379 {
3380 	wqe->sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
3381 	wqe->num_sge = wr->num_sge;
3382 }
3383 
bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp * qp)3384 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
3385 {
3386 	if ((qp->ib_qp.qp_type == IB_QPT_UD || qp->ib_qp.qp_type == IB_QPT_GSI ||
3387 	    qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
3388 	    qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
3389 		int qp_attr_mask;
3390 		struct ib_qp_attr qp_attr;
3391 
3392 		qp_attr_mask = IB_QP_STATE;
3393 		qp_attr.qp_state = IB_QPS_RTS;
3394 		bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
3395 		qp->qplib_qp.wqe_cnt = 0;
3396 	}
3397 }
3398 
bnxt_re_post_send_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_send_wr * wr)3399 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
3400 				       struct bnxt_re_qp *qp,
3401 				       const struct ib_send_wr *wr)
3402 {
3403 	struct bnxt_qplib_swqe wqe;
3404 	unsigned long flags;
3405 	int rc = 0;
3406 
3407 	spin_lock_irqsave(&qp->sq_lock, flags);
3408 	while (wr) {
3409 		/* House keeping */
3410 		memset(&wqe, 0, sizeof(wqe));
3411 		/* Common */
3412 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
3413 			dev_err(rdev_to_dev(rdev),
3414 				"Limit exceeded for Send SGEs\n");
3415 			rc = -EINVAL;
3416 			break;
3417 		}
3418 
3419 		bnxt_re_set_sg_list(wr, &wqe);
3420 		wqe.wr_id = wr->wr_id;
3421 		wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
3422 		rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
3423 		if (rc)
3424 			break;
3425 
3426 		rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
3427 		if (rc) {
3428 			dev_err(rdev_to_dev(rdev),
3429 				"bad_wr seen with opcode = 0x%x rc = %d\n",
3430 				wr->opcode, rc);
3431 			break;
3432 		}
3433 		wr = wr->next;
3434 	}
3435 	bnxt_qplib_post_send_db(&qp->qplib_qp);
3436 	bnxt_ud_qp_hw_stall_workaround(qp);
3437 	spin_unlock_irqrestore(&qp->sq_lock, flags);
3438 	return rc;
3439 }
3440 
bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe * wqe)3441 static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
3442 {
3443 	/* Need unconditional fence for non-wire memory opcode
3444 	 * to work as expected.
3445 	 */
3446 	if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
3447 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
3448 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
3449 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
3450 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3451 }
3452 
bnxt_re_post_send(struct ib_qp * ib_qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3453 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
3454 		      const struct ib_send_wr **bad_wr)
3455 {
3456 	struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
3457 	struct bnxt_qplib_sge sge[6];
3458 	struct bnxt_qplib_swqe wqe;
3459 	struct bnxt_re_dev *rdev;
3460 	unsigned long flags;
3461 	int rc = 0;
3462 
3463 	rdev = qp->rdev;
3464 	spin_lock_irqsave(&qp->sq_lock, flags);
3465 	while (wr) {
3466 		/* House keeping */
3467 		memset(&wqe, 0, sizeof(wqe));
3468 		/* Common */
3469 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
3470 			dev_err(rdev_to_dev(rdev),
3471 				"Limit exceeded for Send SGEs\n");
3472 			rc = -EINVAL;
3473 			goto bad;
3474 		}
3475 
3476 		bnxt_re_set_sg_list(wr, &wqe);
3477 		wqe.wr_id = wr->wr_id;
3478 
3479 		switch (wr->opcode) {
3480 		case IB_WR_SEND:
3481 		case IB_WR_SEND_WITH_IMM:
3482 			if (ib_qp->qp_type == IB_QPT_GSI &&
3483 			    rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
3484 				memset(sge, 0, sizeof(sge));
3485 				wqe.sg_list = sge;
3486 				rc = bnxt_re_build_gsi_send(qp, wr, &wqe);
3487 				if (rc)
3488 					goto bad;
3489 			} else if (ib_qp->qp_type == IB_QPT_RAW_ETHERTYPE) {
3490 				bnxt_re_build_raw_send(wr, &wqe);
3491 			}
3492 			switch (wr->send_flags) {
3493 			case IB_SEND_IP_CSUM:
3494 				wqe.rawqp1.lflags |=
3495 					SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
3496 				break;
3497 			default:
3498 				break;
3499 			}
3500 			fallthrough;
3501 		case IB_WR_SEND_WITH_INV:
3502 			rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
3503 			break;
3504 		case IB_WR_RDMA_WRITE:
3505 		case IB_WR_RDMA_WRITE_WITH_IMM:
3506 		case IB_WR_RDMA_READ:
3507 			rc = bnxt_re_build_rdma_wqe(wr, &wqe);
3508 			break;
3509 		case IB_WR_ATOMIC_CMP_AND_SWP:
3510 		case IB_WR_ATOMIC_FETCH_AND_ADD:
3511 			rc = bnxt_re_build_atomic_wqe(wr, &wqe);
3512 			break;
3513 		case IB_WR_RDMA_READ_WITH_INV:
3514 			dev_err(rdev_to_dev(rdev),
3515 				"RDMA Read with Invalidate is not supported\n");
3516 			rc = -EINVAL;
3517 			goto bad;
3518 		case IB_WR_LOCAL_INV:
3519 			rc = bnxt_re_build_inv_wqe(wr, &wqe);
3520 			break;
3521 		case IB_WR_REG_MR:
3522 			rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
3523 			break;
3524 		default:
3525 			/* Unsupported WRs */
3526 			dev_err(rdev_to_dev(rdev),
3527 				"WR (0x%x) is not supported\n", wr->opcode);
3528 			rc = -EINVAL;
3529 			goto bad;
3530 		}
3531 
3532 		if (likely(!rc)) {
3533 			if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
3534 				bnxt_re_legacy_set_uc_fence(&wqe);
3535 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
3536 		}
3537 bad:
3538 		if (unlikely(rc)) {
3539 			dev_err(rdev_to_dev(rdev),
3540 				"bad_wr seen with opcode = 0x%x\n", wr->opcode);
3541 			*bad_wr = wr;
3542 			break;
3543 		}
3544 		wr = wr->next;
3545 	}
3546 	bnxt_qplib_post_send_db(&qp->qplib_qp);
3547 	if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
3548 		bnxt_ud_qp_hw_stall_workaround(qp);
3549 	spin_unlock_irqrestore(&qp->sq_lock, flags);
3550 
3551 	return rc;
3552 }
3553 
bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,struct ib_recv_wr * wr)3554 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
3555 				struct bnxt_re_qp *qp,
3556 				struct ib_recv_wr *wr)
3557 {
3558 	struct bnxt_qplib_swqe wqe;
3559 	int rc = 0;
3560 
3561 	/* rq lock can be pardoned here. */
3562 	while (wr) {
3563 		/* House keeping */
3564 		memset(&wqe, 0, sizeof(wqe));
3565 		/* Common */
3566 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
3567 			dev_err(rdev_to_dev(rdev),
3568 				"Limit exceeded for Receive SGEs\n");
3569 			rc = -EINVAL;
3570 			goto bad;
3571 		}
3572 
3573 		wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
3574 		wqe.num_sge = wr->num_sge;
3575 		wqe.wr_id = wr->wr_id;
3576 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
3577 		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
3578 bad:
3579 		if (rc) {
3580 			dev_err(rdev_to_dev(rdev),
3581 				"bad_wr seen with RQ post\n");
3582 			break;
3583 		}
3584 		wr = wr->next;
3585 	}
3586 	bnxt_qplib_post_recv_db(&qp->qplib_qp);
3587 	return rc;
3588 }
3589 
bnxt_re_build_gsi_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe)3590 static int bnxt_re_build_gsi_recv(struct bnxt_re_qp *qp,
3591 				  const struct ib_recv_wr *wr,
3592 				  struct bnxt_qplib_swqe *wqe)
3593 {
3594 	struct bnxt_re_dev *rdev = qp->rdev;
3595 	int rc = 0;
3596 
3597 	if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL)
3598 		rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, wqe);
3599 	else
3600 		rc = bnxt_re_build_qp1_recv(qp, wr, wqe);
3601 
3602 	return rc;
3603 }
3604 
bnxt_re_post_recv(struct ib_qp * ib_qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)3605 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
3606 		      const struct ib_recv_wr **bad_wr)
3607 {
3608 	struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
3609 	struct bnxt_qplib_sge sge[6];
3610 	struct bnxt_qplib_swqe wqe;
3611 	unsigned long flags;
3612 	u32 count = 0;
3613 	int rc = 0;
3614 
3615 	spin_lock_irqsave(&qp->rq_lock, flags);
3616 	while (wr) {
3617 		memset(&wqe, 0, sizeof(wqe));
3618 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
3619 			dev_err(rdev_to_dev(qp->rdev),
3620 				"Limit exceeded for Receive SGEs\n");
3621 			rc = -EINVAL;
3622 			goto bad;
3623 		}
3624 		wqe.num_sge = wr->num_sge;
3625 		wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
3626 		wqe.wr_id = wr->wr_id;
3627 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
3628 
3629 		if (ib_qp->qp_type == IB_QPT_GSI &&
3630 		    qp->rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
3631 			memset(sge, 0, sizeof(sge));
3632 			wqe.sg_list = sge;
3633 			rc = bnxt_re_build_gsi_recv(qp, wr, &wqe);
3634 			if (rc)
3635 				goto bad;
3636 		}
3637 		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
3638 bad:
3639 		if (rc) {
3640 			dev_err(rdev_to_dev(qp->rdev),
3641 				"bad_wr seen with RQ post\n");
3642 			*bad_wr = wr;
3643 			break;
3644 		}
3645 		/* Ring DB if the RQEs posted reaches a threshold value */
3646 		if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
3647 			bnxt_qplib_post_recv_db(&qp->qplib_qp);
3648 			count = 0;
3649 		}
3650 		wr = wr->next;
3651 	}
3652 
3653 	if (count)
3654 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
3655 	spin_unlock_irqrestore(&qp->rq_lock, flags);
3656 
3657 	return rc;
3658 }
3659 
3660 /* Completion Queues */
bnxt_re_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)3661 void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
3662 {
3663 	struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
3664 	struct bnxt_re_dev *rdev = cq->rdev;
3665 	int rc =  0;
3666 
3667 	if (cq->uctx_cq_page) {
3668 		BNXT_RE_CQ_PAGE_LIST_DEL(cq->uctx, cq);
3669 		free_page((u64)cq->uctx_cq_page);
3670 		cq->uctx_cq_page = NULL;
3671 	}
3672 	if (cq->is_dbr_soft_cq && cq->uctx) {
3673 		void *dbr_page;
3674 
3675 		if (cq->uctx->dbr_recov_cq) {
3676 			dbr_page = cq->uctx->dbr_recov_cq_page;
3677 			cq->uctx->dbr_recov_cq_page = NULL;
3678 			cq->uctx->dbr_recov_cq = NULL;
3679 			free_page((unsigned long)dbr_page);
3680 		}
3681 		goto end;
3682 	}
3683 	/* CQ getting destroyed. Set this state for cqn handler */
3684 	spin_lock_bh(&cq->qplib_cq.compl_lock);
3685 	cq->qplib_cq.destroyed = true;
3686 	spin_unlock_bh(&cq->qplib_cq.compl_lock);
3687 	if (ib_cq->poll_ctx == IB_POLL_WORKQUEUE ||
3688 	    ib_cq->poll_ctx == IB_POLL_UNBOUND_WORKQUEUE)
3689 		cancel_work_sync(&ib_cq->work);
3690 
3691 	rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3692 	if (rc)
3693 		dev_err_ratelimited(rdev_to_dev(rdev),
3694 				   "%s id = %d failed rc = %d\n",
3695 				   __func__, cq->qplib_cq.id, rc);
3696 
3697 	bnxt_re_put_nq(rdev, cq->qplib_cq.nq);
3698 	if (cq->umem && !IS_ERR(cq->umem))
3699 		ib_umem_release(cq->umem);
3700 
3701 	kfree(cq->cql);
3702 	atomic_dec(&rdev->stats.rsors.cq_count);
3703 end:
3704 	return;
3705 }
3706 
3707 static inline struct
__get_cq_from_cq_in(struct ib_cq * cq_in,struct bnxt_re_dev * rdev)3708 bnxt_re_cq *__get_cq_from_cq_in(struct ib_cq *cq_in,
3709 				struct bnxt_re_dev *rdev)
3710 {
3711 	struct bnxt_re_cq *cq;
3712 	cq = container_of(cq_in, struct bnxt_re_cq, ibcq);
3713 	return cq;
3714 }
3715 
bnxt_re_create_cq(struct ib_cq * cq_in,const struct ib_cq_init_attr * attr,struct ib_udata * udata)3716 int bnxt_re_create_cq(struct ib_cq *cq_in,
3717 		      const struct ib_cq_init_attr *attr,
3718 		      struct ib_udata *udata)
3719 {
3720 	struct bnxt_qplib_dev_attr *dev_attr;
3721 	struct bnxt_re_ucontext *uctx = NULL;
3722 	struct ib_ucontext *context = NULL;
3723 	struct bnxt_qplib_cq *qplcq;
3724 	struct bnxt_re_cq_req ureq;
3725 	struct bnxt_re_dev *rdev;
3726 	int rc, entries;
3727 	struct bnxt_re_cq *cq;
3728 	u32 max_active_cqs;
3729 	int cqe = attr->cqe;
3730 
3731 	if (attr->flags)
3732 		return -EOPNOTSUPP;
3733 
3734 	rdev = rdev_from_cq_in(cq_in);
3735 	if (rdev->mod_exit) {
3736 		rc = -EIO;
3737 		dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
3738 		goto exit;
3739 	}
3740 	if (udata) {
3741 		uctx = rdma_udata_to_drv_context(udata,
3742 						 struct bnxt_re_ucontext,
3743 						 ibucontext);
3744 		context = &uctx->ibucontext;
3745 	}
3746 	dev_attr = rdev->dev_attr;
3747 
3748 	if (atomic_read(&rdev->stats.rsors.cq_count) >= dev_attr->max_cq) {
3749 		dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQs)\n");
3750 		rc = -EINVAL;
3751 		goto exit;
3752 	}
3753 	/* Validate CQ fields */
3754 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3755 		dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQ_WQs)\n");
3756 		rc = -EINVAL;
3757 		goto exit;
3758 	}
3759 
3760 	cq = __get_cq_from_cq_in(cq_in, rdev);
3761 	if (!cq) {
3762 		rc = -ENOMEM;
3763 		goto exit;
3764 	}
3765 	cq->rdev = rdev;
3766 	cq->uctx = uctx;
3767 	qplcq = &cq->qplib_cq;
3768 	qplcq->cq_handle = (u64)qplcq;
3769 	/*
3770 	 * Since CQ is for QP1 is shared with Shadow CQ, the size
3771 	 * should be double the size. There is no way to identify
3772 	 * whether this CQ is for GSI QP. So assuming that the first
3773 	 * CQ created is for QP1
3774 	 */
3775 	if (!udata && !rdev->gsi_ctx.first_cq_created &&
3776 	    rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL) {
3777 		rdev->gsi_ctx.first_cq_created = true;
3778 		/*
3779 		 * Total CQE required for the CQ = CQE for QP1 RQ +
3780 		 * CQE for Shadow QP SQEs + CQE for Shadow QP RQEs.
3781 		 * Max entries of shadow QP SQ and RQ = QP1 RQEs = cqe
3782 		 */
3783 		cqe *= 3;
3784 	}
3785 
3786 	entries = bnxt_re_init_depth(cqe + 1, uctx);
3787 	if (entries > dev_attr->max_cq_wqes + 1)
3788 		entries = dev_attr->max_cq_wqes + 1;
3789 
3790 	qplcq->sginfo.pgshft = PAGE_SHIFT;
3791 	qplcq->sginfo.pgsize = PAGE_SIZE;
3792 	if (udata) {
3793 		if (udata->inlen < sizeof(ureq))
3794 			dev_warn(rdev_to_dev(rdev),
3795 				 "Update the library ulen %d klen %d\n",
3796 				 (unsigned int)udata->inlen,
3797 				 (unsigned int)sizeof(ureq));
3798 
3799 		rc = ib_copy_from_udata(&ureq, udata,
3800 					min(udata->inlen, sizeof(ureq)));
3801 		if (rc)
3802 			goto fail;
3803 
3804 		if (BNXT_RE_IS_DBR_PACING_NOTIFY_CQ(ureq)) {
3805 			cq->is_dbr_soft_cq = true;
3806 			goto success;
3807 		}
3808 
3809 		if (BNXT_RE_IS_DBR_RECOV_CQ(ureq)) {
3810 			void *dbr_page;
3811 			u32 *epoch;
3812 
3813 			dbr_page = (void *)__get_free_page(GFP_KERNEL);
3814 			if (!dbr_page) {
3815 				dev_err(rdev_to_dev(rdev),
3816 					"DBR recov CQ page allocation failed!");
3817 				rc = -ENOMEM;
3818 				goto fail;
3819 			}
3820 
3821 			/* memset the epoch and epoch_ack to 0 */
3822 			epoch = dbr_page;
3823 			epoch[0] = 0x0;
3824 			epoch[1] = 0x0;
3825 
3826 			uctx->dbr_recov_cq = cq;
3827 			uctx->dbr_recov_cq_page = dbr_page;
3828 
3829 			cq->is_dbr_soft_cq = true;
3830 			goto success;
3831 		}
3832 
3833 		cq->umem = ib_umem_get_compat
3834 				      (rdev, context, udata, ureq.cq_va,
3835 				       entries * sizeof(struct cq_base),
3836 				       IB_ACCESS_LOCAL_WRITE, 1);
3837 		if (IS_ERR(cq->umem)) {
3838 			rc = PTR_ERR(cq->umem);
3839 			dev_err(rdev_to_dev(rdev),
3840 				"%s: ib_umem_get failed! rc = %d\n",
3841 				__func__, rc);
3842 			goto fail;
3843 		}
3844 		qplcq->sginfo.sghead = get_ib_umem_sgl(cq->umem,
3845 						       &qplcq->sginfo.nmap);
3846 		qplcq->sginfo.npages = ib_umem_num_pages_compat(cq->umem);
3847 		if (!uctx->dpi.dbr) {
3848 			rc = bnxt_re_get_user_dpi(rdev, uctx);
3849 			if (rc)
3850 				goto c2fail;
3851 		}
3852 		qplcq->dpi = &uctx->dpi;
3853 	} else {
3854 		cq->max_cql = entries > MAX_CQL_PER_POLL ? MAX_CQL_PER_POLL : entries;
3855 		cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
3856 				  GFP_KERNEL);
3857 		if (!cq->cql) {
3858 			dev_err(rdev_to_dev(rdev),
3859 				"Allocate CQL for %d failed!\n", cq->max_cql);
3860 			rc = -ENOMEM;
3861 			goto fail;
3862 		}
3863 		qplcq->dpi = &rdev->dpi_privileged;
3864 	}
3865 	/*
3866 	 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
3867 	 * used for getting the NQ index.
3868 	 */
3869 	qplcq->max_wqe = entries;
3870 	qplcq->nq = bnxt_re_get_nq(rdev);
3871 	qplcq->cnq_hw_ring_id = qplcq->nq->ring_id;
3872 
3873 	rc = bnxt_qplib_create_cq(&rdev->qplib_res, qplcq);
3874 	if (rc) {
3875 		dev_err(rdev_to_dev(rdev), "Create HW CQ failed!\n");
3876 		goto fail;
3877 	}
3878 
3879 	INIT_LIST_HEAD(&cq->cq_list);
3880 	cq->ibcq.cqe = entries;
3881 	cq->cq_period = qplcq->period;
3882 
3883 	atomic_inc(&rdev->stats.rsors.cq_count);
3884 	max_active_cqs = atomic_read(&rdev->stats.rsors.cq_count);
3885 	if (max_active_cqs > atomic_read(&rdev->stats.rsors.max_cq_count))
3886 		atomic_set(&rdev->stats.rsors.max_cq_count, max_active_cqs);
3887 	spin_lock_init(&cq->cq_lock);
3888 
3889 	if (udata) {
3890 		struct bnxt_re_cq_resp resp;
3891 
3892 		resp.cqid = qplcq->id;
3893 		resp.tail = qplcq->hwq.cons;
3894 		resp.phase = qplcq->period;
3895 		resp.comp_mask = 0;
3896 		resp.dbr = (u64)uctx->dpi.umdbr;
3897 		resp.dpi = uctx->dpi.dpi;
3898 		resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_DB_INFO;
3899 		/* Copy only on a valid wcpdi */
3900 		if (uctx->wcdpi.dpi) {
3901 			resp.wcdpi = uctx->wcdpi.dpi;
3902 			resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_WC_DPI;
3903 		}
3904 
3905 		if (_is_chip_p7(rdev->chip_ctx)) {
3906 			cq->uctx_cq_page = (void *)__get_free_page(GFP_KERNEL);
3907 
3908 			if (!cq->uctx_cq_page) {
3909 				dev_err(rdev_to_dev(rdev),
3910 					"CQ page allocation failed!\n");
3911 				bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq);
3912 				rc = -ENOMEM;
3913 				goto c2fail;
3914 			}
3915 
3916 			resp.uctx_cq_page = (u64)cq->uctx_cq_page;
3917 			resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_CQ_PAGE;
3918 		}
3919 
3920 		rc = bnxt_re_copy_to_udata(rdev, &resp,
3921 					   min(udata->outlen, sizeof(resp)),
3922 					   udata);
3923 		if (rc) {
3924 			free_page((u64)cq->uctx_cq_page);
3925 			cq->uctx_cq_page = NULL;
3926 			bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq);
3927 			goto c2fail;
3928 		}
3929 
3930 		if (cq->uctx_cq_page)
3931 			BNXT_RE_CQ_PAGE_LIST_ADD(uctx, cq);
3932 	}
3933 
3934 success:
3935 	return 0;
3936 c2fail:
3937 	if (udata && cq->umem && !IS_ERR(cq->umem))
3938 		ib_umem_release(cq->umem);
3939 fail:
3940 	if (cq) {
3941 		if (cq->cql)
3942 			kfree(cq->cql);
3943 	}
3944 exit:
3945 	return rc;
3946 }
3947 
bnxt_re_modify_cq(struct ib_cq * ib_cq,u16 cq_count,u16 cq_period)3948 int bnxt_re_modify_cq(struct ib_cq *ib_cq, u16 cq_count, u16 cq_period)
3949 {
3950 	struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
3951 	struct bnxt_re_dev *rdev = cq->rdev;
3952 	int rc;
3953 
3954 	if ((cq->cq_count != cq_count) || (cq->cq_period != cq_period)) {
3955 		cq->qplib_cq.count = cq_count;
3956 		cq->qplib_cq.period = cq_period;
3957 		rc = bnxt_qplib_modify_cq(&rdev->qplib_res, &cq->qplib_cq);
3958 		if (rc) {
3959 			dev_err(rdev_to_dev(rdev), "Modify HW CQ %#x failed!\n",
3960 				cq->qplib_cq.id);
3961 			return rc;
3962 		}
3963 		/* On success, update the shadow */
3964 		cq->cq_count = cq_count;
3965 		cq->cq_period = cq_period;
3966 	}
3967 	return 0;
3968 }
3969 
bnxt_re_resize_cq_complete(struct bnxt_re_cq * cq)3970 static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
3971 {
3972 	struct bnxt_re_dev *rdev = cq->rdev;
3973 
3974 	bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3975 
3976 	cq->qplib_cq.max_wqe = cq->resize_cqe;
3977 	if (cq->resize_umem) {
3978 		ib_umem_release(cq->umem);
3979 		cq->umem = cq->resize_umem;
3980 		cq->resize_umem = NULL;
3981 		cq->resize_cqe = 0;
3982 	}
3983 }
3984 
bnxt_re_resize_cq(struct ib_cq * ib_cq,int cqe,struct ib_udata * udata)3985 int bnxt_re_resize_cq(struct ib_cq *ib_cq, int cqe, struct ib_udata *udata)
3986 {
3987 	struct bnxt_qplib_sg_info sginfo = {};
3988 	struct bnxt_qplib_dpi *orig_dpi = NULL;
3989 	struct bnxt_qplib_dev_attr *dev_attr;
3990 	struct bnxt_re_ucontext *uctx = NULL;
3991 	struct bnxt_re_resize_cq_req ureq;
3992 	struct ib_ucontext *context = NULL;
3993 	struct bnxt_re_dev *rdev;
3994 	struct bnxt_re_cq *cq;
3995 	int rc, entries;
3996 
3997 	/* Don't allow more than one resize request at the same time.
3998 	 * TODO: need a mutex here when we support kernel consumers of resize.
3999 	 */
4000 	cq =  to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
4001 	rdev = cq->rdev;
4002 	dev_attr = rdev->dev_attr;
4003 	if (ib_cq->uobject) {
4004 		uctx = rdma_udata_to_drv_context(udata,
4005 						 struct bnxt_re_ucontext,
4006 						 ibucontext);
4007 		context = &uctx->ibucontext;
4008 	}
4009 
4010 	if (cq->resize_umem) {
4011 		dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - Busy\n",
4012 			cq->qplib_cq.id);
4013 		return -EBUSY;
4014 	}
4015 
4016 	/* Check the requested cq depth out of supported depth */
4017 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
4018 		dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - max exceeded\n",
4019 			cq->qplib_cq.id);
4020 		return -EINVAL;
4021 	}
4022 
4023 	entries = bnxt_re_init_depth(cqe + 1, uctx);
4024 	entries = min_t(u32, (u32)entries, dev_attr->max_cq_wqes + 1);
4025 
4026 	/* Check to see if the new requested size can be handled by already
4027 	 * existing CQ
4028 	 */
4029 	if (entries == cq->ibcq.cqe) {
4030 		dev_info(rdev_to_dev(rdev), "CQ is already at size %d\n", cqe);
4031 		return 0;
4032 	}
4033 
4034 	if (ib_cq->uobject && udata) {
4035 		if (udata->inlen < sizeof(ureq))
4036 			dev_warn(rdev_to_dev(rdev),
4037 				 "Update the library ulen %d klen %d\n",
4038 				 (unsigned int)udata->inlen,
4039 				 (unsigned int)sizeof(ureq));
4040 
4041 		rc = ib_copy_from_udata(&ureq, udata,
4042 					min(udata->inlen, sizeof(ureq)));
4043 		if (rc)
4044 			goto fail;
4045 
4046 		dev_dbg(rdev_to_dev(rdev), "%s: va %p\n", __func__,
4047 			(void *)ureq.cq_va);
4048 		cq->resize_umem = ib_umem_get_compat
4049 				       (rdev,
4050 					context, udata, ureq.cq_va,
4051 					entries * sizeof(struct cq_base),
4052 					IB_ACCESS_LOCAL_WRITE, 1);
4053 		if (IS_ERR(cq->resize_umem)) {
4054 			rc = PTR_ERR(cq->resize_umem);
4055 			cq->resize_umem = NULL;
4056 			dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n",
4057 				__func__, rc);
4058 			goto fail;
4059 		}
4060 		cq->resize_cqe = entries;
4061 		dev_dbg(rdev_to_dev(rdev), "%s: ib_umem_get() success\n",
4062 			__func__);
4063 		memcpy(&sginfo, &cq->qplib_cq.sginfo, sizeof(sginfo));
4064 		orig_dpi = cq->qplib_cq.dpi;
4065 
4066 		cq->qplib_cq.sginfo.sghead = get_ib_umem_sgl(cq->resize_umem,
4067 						&cq->qplib_cq.sginfo.nmap);
4068 		cq->qplib_cq.sginfo.npages =
4069 				ib_umem_num_pages_compat(cq->resize_umem);
4070 		cq->qplib_cq.sginfo.pgsize = PAGE_SIZE;
4071 		cq->qplib_cq.sginfo.pgshft = PAGE_SHIFT;
4072 		cq->qplib_cq.dpi = &uctx->dpi;
4073 	} else {
4074 		/* TODO: kernel consumer */
4075 	}
4076 
4077 	rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
4078 	if (rc) {
4079 		dev_err(rdev_to_dev(rdev), "Resize HW CQ %#x failed!\n",
4080 			cq->qplib_cq.id);
4081 		goto fail;
4082 	}
4083 
4084 	cq->ibcq.cqe = cq->resize_cqe;
4085 	/* For kernel consumers complete resize here. For uverbs consumers,
4086 	 * we complete it in the context of ibv_poll_cq().
4087 	 */
4088 	if (!cq->resize_umem)
4089 		bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
4090 
4091 	atomic_inc(&rdev->stats.rsors.resize_count);
4092 	return 0;
4093 
4094 fail:
4095 	if (cq->resize_umem) {
4096 		ib_umem_release(cq->resize_umem);
4097 		cq->resize_umem = NULL;
4098 		cq->resize_cqe = 0;
4099 		memcpy(&cq->qplib_cq.sginfo, &sginfo, sizeof(sginfo));
4100 		cq->qplib_cq.dpi = orig_dpi;
4101 	}
4102 	return rc;
4103 }
4104 
__req_to_ib_wc_status(u8 qstatus)4105 static enum ib_wc_status __req_to_ib_wc_status(u8 qstatus)
4106 {
4107 	switch(qstatus) {
4108 	case CQ_REQ_STATUS_OK:
4109 		return IB_WC_SUCCESS;
4110 	case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
4111 		return IB_WC_BAD_RESP_ERR;
4112 	case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
4113 		return IB_WC_LOC_LEN_ERR;
4114 	case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
4115 		return IB_WC_LOC_QP_OP_ERR;
4116 	case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
4117 		return IB_WC_LOC_PROT_ERR;
4118 	case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
4119 		return IB_WC_GENERAL_ERR;
4120 	case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
4121 		return IB_WC_REM_INV_REQ_ERR;
4122 	case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
4123 		return IB_WC_REM_ACCESS_ERR;
4124 	case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
4125 		return IB_WC_REM_OP_ERR;
4126 	case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
4127 		return IB_WC_RNR_RETRY_EXC_ERR;
4128 	case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
4129 		return IB_WC_RETRY_EXC_ERR;
4130 	case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
4131 		return IB_WC_WR_FLUSH_ERR;
4132 	default:
4133 		return IB_WC_GENERAL_ERR;
4134 	}
4135 	return 0;
4136 }
4137 
__rawqp1_to_ib_wc_status(u8 qstatus)4138 static enum ib_wc_status __rawqp1_to_ib_wc_status(u8 qstatus)
4139 {
4140 	switch(qstatus) {
4141 	case CQ_RES_RAWETH_QP1_STATUS_OK:
4142 		return IB_WC_SUCCESS;
4143 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
4144 		return IB_WC_LOC_ACCESS_ERR;
4145 	case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
4146 		return IB_WC_LOC_LEN_ERR;
4147 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
4148 		return IB_WC_LOC_PROT_ERR;
4149 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
4150 		return IB_WC_LOC_QP_OP_ERR;
4151 	case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
4152 		return IB_WC_GENERAL_ERR;
4153 	case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
4154 		return IB_WC_WR_FLUSH_ERR;
4155 	case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
4156 		return IB_WC_WR_FLUSH_ERR;
4157 	default:
4158 		return IB_WC_GENERAL_ERR;
4159 	}
4160 }
4161 
__rc_to_ib_wc_status(u8 qstatus)4162 static enum ib_wc_status __rc_to_ib_wc_status(u8 qstatus)
4163 {
4164 	switch(qstatus) {
4165 	case CQ_RES_RC_STATUS_OK:
4166 		return IB_WC_SUCCESS;
4167 	case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
4168 		return IB_WC_LOC_ACCESS_ERR;
4169 	case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
4170 		return IB_WC_LOC_LEN_ERR;
4171 	case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
4172 		return IB_WC_LOC_PROT_ERR;
4173 	case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
4174 		return IB_WC_LOC_QP_OP_ERR;
4175 	case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
4176 		return IB_WC_GENERAL_ERR;
4177 	case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
4178 		return IB_WC_REM_INV_REQ_ERR;
4179 	case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
4180 		return IB_WC_WR_FLUSH_ERR;
4181 	case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
4182 		return IB_WC_WR_FLUSH_ERR;
4183 	default:
4184 		return IB_WC_GENERAL_ERR;
4185 	}
4186 }
4187 
bnxt_re_process_req_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)4188 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
4189 {
4190 	switch (cqe->type) {
4191 	case BNXT_QPLIB_SWQE_TYPE_SEND:
4192 		wc->opcode = IB_WC_SEND;
4193 		break;
4194 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
4195 		wc->opcode = IB_WC_SEND;
4196 		wc->wc_flags |= IB_WC_WITH_IMM;
4197 		break;
4198 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
4199 		wc->opcode = IB_WC_SEND;
4200 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4201 		break;
4202 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
4203 		wc->opcode = IB_WC_RDMA_WRITE;
4204 		break;
4205 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
4206 		wc->opcode = IB_WC_RDMA_WRITE;
4207 		wc->wc_flags |= IB_WC_WITH_IMM;
4208 		break;
4209 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
4210 		wc->opcode = IB_WC_RDMA_READ;
4211 		break;
4212 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
4213 		wc->opcode = IB_WC_COMP_SWAP;
4214 		break;
4215 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
4216 		wc->opcode = IB_WC_FETCH_ADD;
4217 		break;
4218 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
4219 		wc->opcode = IB_WC_LOCAL_INV;
4220 		break;
4221 	case BNXT_QPLIB_SWQE_TYPE_REG_MR:
4222 		wc->opcode = IB_WC_REG_MR;
4223 		break;
4224 	default:
4225 		wc->opcode = IB_WC_SEND;
4226 		break;
4227 	}
4228 
4229 	wc->status = __req_to_ib_wc_status(cqe->status);
4230 }
4231 
bnxt_re_check_packet_type(u16 raweth_qp1_flags,u16 raweth_qp1_flags2)4232 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags, u16 raweth_qp1_flags2)
4233 {
4234 	bool is_ipv6 = false, is_ipv4 = false;
4235 
4236 	/* raweth_qp1_flags Bit 9-6 indicates itype */
4237 
4238 	if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
4239 	    != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
4240 		return -1;
4241 
4242 	if (raweth_qp1_flags2 &
4243 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
4244 	    raweth_qp1_flags2 &
4245 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
4246 		/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
4247 		(raweth_qp1_flags2 &
4248 		 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
4249 			(is_ipv6 = true) : (is_ipv4 = true);
4250 		return ((is_ipv6) ?
4251 			 BNXT_RE_ROCEV2_IPV6_PACKET :
4252 			 BNXT_RE_ROCEV2_IPV4_PACKET);
4253 	} else {
4254 		return BNXT_RE_ROCE_V1_PACKET;
4255 	}
4256 }
4257 
bnxt_re_is_loopback_packet(struct bnxt_re_dev * rdev,void * rq_hdr_buf)4258 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
4259 					    void *rq_hdr_buf)
4260 {
4261 	u8 *tmp_buf = NULL;
4262 	struct ethhdr *eth_hdr;
4263 	u16 eth_type;
4264 	bool rc = false;
4265 
4266 	tmp_buf = (u8 *)rq_hdr_buf;
4267 	/*
4268 	 * If dest mac is not same as I/F mac, this could be a
4269 	 * loopback address or multicast address, check whether
4270 	 * it is a loopback packet
4271 	 */
4272 	if (!ether_addr_equal(tmp_buf, rdev->dev_addr)) {
4273 		tmp_buf += 4;
4274 		/* Check the  ether type */
4275 		eth_hdr = (struct ethhdr *)tmp_buf;
4276 		eth_type = ntohs(eth_hdr->h_proto);
4277 		switch (eth_type) {
4278 		case BNXT_QPLIB_ETHTYPE_ROCEV1:
4279 			rc = true;
4280 			break;
4281 		default:
4282 			break;
4283 		}
4284 	}
4285 
4286 	return rc;
4287 }
4288 
bnxt_re_is_vlan_in_packet(struct bnxt_re_dev * rdev,void * rq_hdr_buf,struct bnxt_qplib_cqe * cqe)4289 static bool bnxt_re_is_vlan_in_packet(struct bnxt_re_dev *rdev,
4290 				      void *rq_hdr_buf,
4291 				      struct bnxt_qplib_cqe *cqe)
4292 {
4293 	struct vlan_hdr *vlan_hdr;
4294 	struct ethhdr *eth_hdr;
4295 	u8 *tmp_buf = NULL;
4296 	u16 eth_type;
4297 
4298 	tmp_buf = (u8 *)rq_hdr_buf;
4299 	/* Check the  ether type */
4300 	eth_hdr = (struct ethhdr *)tmp_buf;
4301 	eth_type = ntohs(eth_hdr->h_proto);
4302 	if (eth_type == ETH_P_8021Q) {
4303 		tmp_buf += sizeof(struct ethhdr);
4304 		vlan_hdr = (struct vlan_hdr *)tmp_buf;
4305 		cqe->raweth_qp1_metadata =
4306 			ntohs(vlan_hdr->h_vlan_TCI) |
4307 			(eth_type <<
4308 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
4309 		cqe->raweth_qp1_flags2 |=
4310 			CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN;
4311 		return true;
4312 	}
4313 
4314 	return false;
4315 }
4316 
bnxt_re_process_raw_qp_packet_receive(struct bnxt_re_qp * gsi_qp,struct bnxt_qplib_cqe * cqe)4317 static int bnxt_re_process_raw_qp_packet_receive(struct bnxt_re_qp *gsi_qp,
4318 						 struct bnxt_qplib_cqe *cqe)
4319 {
4320 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
4321 	struct bnxt_qplib_hdrbuf *hdr_buf;
4322 	dma_addr_t shrq_hdr_buf_map;
4323 	struct ib_sge s_sge[2] = {};
4324 	struct ib_sge r_sge[2] = {};
4325 	struct ib_recv_wr rwr = {};
4326 	struct bnxt_re_ah *gsi_sah;
4327 	struct bnxt_re_qp *gsi_sqp;
4328 	dma_addr_t rq_hdr_buf_map;
4329 	struct bnxt_re_dev *rdev;
4330 	struct ib_send_wr *swr;
4331 	u32 skip_bytes = 0;
4332 	void *rq_hdr_buf;
4333 	int pkt_type = 0;
4334 	u32 offset = 0;
4335 	u32 tbl_idx;
4336 	int rc;
4337 	struct ib_ud_wr udwr = {};
4338 
4339 	swr = &udwr.wr;
4340 	rdev = gsi_qp->rdev;
4341 	gsi_sqp = rdev->gsi_ctx.gsi_sqp;
4342 	tbl_idx = cqe->wr_id;
4343 
4344 	hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf;
4345 	rq_hdr_buf = (u8 *) hdr_buf->va + tbl_idx * hdr_buf->step;
4346 	rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
4347 							  tbl_idx);
4348 	/* Shadow QP header buffer */
4349 	shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_sqp->qplib_qp,
4350 							    tbl_idx);
4351 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4352 
4353 	/* Find packet type from the cqe */
4354 	pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
4355 					     cqe->raweth_qp1_flags2);
4356 	if (pkt_type < 0) {
4357 		dev_err(rdev_to_dev(rdev), "Not handling this packet\n");
4358 		return -EINVAL;
4359 	}
4360 
4361 	/* Adjust the offset for the user buffer and post in the rq */
4362 
4363 	if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
4364 		offset = 20;
4365 
4366 	/*
4367 	 * QP1 loopback packet has 4 bytes of internal header before
4368 	 * ether header. Skip these four bytes.
4369 	 */
4370 	if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
4371 		skip_bytes = 4;
4372 
4373 	if (bnxt_re_is_vlan_in_packet(rdev, rq_hdr_buf, cqe))
4374 		skip_bytes += VLAN_HLEN;
4375 
4376 	/* Store this cqe */
4377 	memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
4378 	sqp_entry->qp1_qp = gsi_qp;
4379 
4380 	/* First send SGE . Skip the ether header*/
4381 	s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
4382 			+ skip_bytes;
4383 	s_sge[0].lkey = 0xFFFFFFFF;
4384 	s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
4385 				BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
4386 
4387 	/* Second Send SGE */
4388 	s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
4389 			BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
4390 	if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
4391 		s_sge[1].addr += 8;
4392 	s_sge[1].lkey = 0xFFFFFFFF;
4393 	s_sge[1].length = 256;
4394 
4395 	/* First recv SGE */
4396 	r_sge[0].addr = shrq_hdr_buf_map;
4397 	r_sge[0].lkey = 0xFFFFFFFF;
4398 	r_sge[0].length = 40;
4399 
4400 	r_sge[1].addr = sqp_entry->sge.addr + offset;
4401 	r_sge[1].lkey = sqp_entry->sge.lkey;
4402 	r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
4403 
4404 	/* Create receive work request */
4405 	rwr.num_sge = 2;
4406 	rwr.sg_list = r_sge;
4407 	rwr.wr_id = tbl_idx;
4408 	rwr.next = NULL;
4409 
4410 	rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
4411 	if (rc) {
4412 		dev_err(rdev_to_dev(rdev),
4413 			"Failed to post Rx buffers to shadow QP\n");
4414 		return -ENOMEM;
4415 	}
4416 
4417 	swr->num_sge = 2;
4418 	swr->sg_list = s_sge;
4419 	swr->wr_id = tbl_idx;
4420 	swr->opcode = IB_WR_SEND;
4421 	swr->next = NULL;
4422 
4423 	gsi_sah = rdev->gsi_ctx.gsi_sah;
4424 	udwr.ah = &gsi_sah->ibah;
4425 	udwr.remote_qpn = gsi_sqp->qplib_qp.id;
4426 	udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
4427 	/* post data received in the send queue */
4428 	rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
4429 
4430 	return rc;
4431 }
4432 
bnxt_re_process_res_rawqp1_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)4433 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
4434 					  struct bnxt_qplib_cqe *cqe)
4435 {
4436 	wc->opcode = IB_WC_RECV;
4437 	wc->status = __rawqp1_to_ib_wc_status(cqe->status);
4438 	wc->wc_flags |= IB_WC_GRH;
4439 }
4440 
bnxt_re_process_res_rc_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)4441 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
4442 				      struct bnxt_qplib_cqe *cqe)
4443 {
4444 	wc->opcode = IB_WC_RECV;
4445 	wc->status = __rc_to_ib_wc_status(cqe->status);
4446 
4447 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
4448 		wc->wc_flags |= IB_WC_WITH_IMM;
4449 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
4450 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4451 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
4452 	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
4453 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4454 }
4455 
4456 /* Returns TRUE if pkt has valid VLAN and if VLAN id is non-zero */
bnxt_re_is_nonzero_vlanid_pkt(struct bnxt_qplib_cqe * orig_cqe,u16 * vid,u8 * sl)4457 static bool bnxt_re_is_nonzero_vlanid_pkt(struct bnxt_qplib_cqe *orig_cqe,
4458 					  u16 *vid, u8 *sl)
4459 {
4460 	u32 metadata;
4461 	u16 tpid;
4462 	bool ret = false;
4463 	metadata = orig_cqe->raweth_qp1_metadata;
4464 	if (orig_cqe->raweth_qp1_flags2 &
4465 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
4466 		tpid = ((metadata &
4467 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
4468 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
4469 		if (tpid == ETH_P_8021Q) {
4470 			*vid = metadata &
4471 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
4472 			*sl = (metadata &
4473 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
4474 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
4475 			ret = !!(*vid);
4476 		}
4477 	}
4478 
4479 	return ret;
4480 }
4481 
bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp * gsi_sqp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)4482 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
4483 					     struct ib_wc *wc,
4484 					     struct bnxt_qplib_cqe *cqe)
4485 {
4486 	u32 tbl_idx;
4487 	struct bnxt_re_dev *rdev = gsi_sqp->rdev;
4488 	struct bnxt_re_qp *gsi_qp = NULL;
4489 	struct bnxt_qplib_cqe *orig_cqe = NULL;
4490 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
4491 	int nw_type;
4492 	u16 vlan_id;
4493 	u8 sl;
4494 
4495 	tbl_idx = cqe->wr_id;
4496 
4497 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4498 	gsi_qp = sqp_entry->qp1_qp;
4499 	orig_cqe = &sqp_entry->cqe;
4500 
4501 	wc->wr_id = sqp_entry->wrid;
4502 	wc->byte_len = orig_cqe->length;
4503 	wc->qp = &gsi_qp->ib_qp;
4504 
4505 	wc->ex.imm_data = orig_cqe->immdata;
4506 	wc->src_qp = orig_cqe->src_qp;
4507 	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
4508 	if (bnxt_re_is_nonzero_vlanid_pkt(orig_cqe, &vlan_id, &sl)) {
4509 		if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
4510 			wc->sl = sl;
4511 			wc->vlan_id = vlan_id;
4512 			wc->wc_flags |= IB_WC_WITH_VLAN;
4513 		}
4514 	}
4515 	wc->port_num = 1;
4516 	wc->vendor_err = orig_cqe->status;
4517 
4518 	wc->opcode = IB_WC_RECV;
4519 	wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
4520 	wc->wc_flags |= IB_WC_GRH;
4521 
4522 	nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
4523 					    orig_cqe->raweth_qp1_flags2);
4524 	if(nw_type >= 0)
4525 		dev_dbg(rdev_to_dev(rdev), "%s nw_type = %d\n", __func__, nw_type);
4526 }
4527 
bnxt_re_process_res_ud_wc(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)4528 static void bnxt_re_process_res_ud_wc(struct bnxt_re_dev *rdev,
4529 				      struct bnxt_re_qp *qp, struct ib_wc *wc,
4530 				      struct bnxt_qplib_cqe *cqe)
4531 {
4532 	u16 vlan_id = 0;
4533 
4534 	wc->opcode = IB_WC_RECV;
4535 	wc->status = __rc_to_ib_wc_status(cqe->status);
4536 	if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
4537 		wc->wc_flags |= IB_WC_WITH_IMM;
4538 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
4539 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4540 	/* report only on GSI QP for Thor */
4541 	if (rdev->gsi_ctx.gsi_qp->qplib_qp.id == qp->qplib_qp.id &&
4542 	    rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD) {
4543 		wc->wc_flags |= IB_WC_GRH;
4544 		memcpy(wc->smac, cqe->smac, ETH_ALEN);
4545 		wc->wc_flags |= IB_WC_WITH_SMAC;
4546 		if (_is_cqe_v2_supported(rdev->dev_attr->dev_cap_flags)) {
4547 			if (cqe->flags & CQ_RES_UD_V2_FLAGS_META_FORMAT_MASK) {
4548 				if (cqe->cfa_meta &
4549 				    BNXT_QPLIB_CQE_CFA_META1_VALID)
4550 					vlan_id = (cqe->cfa_meta & 0xFFF);
4551 			}
4552 		} else if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
4553 			vlan_id = (cqe->cfa_meta & 0xFFF);
4554 		}
4555 		/* Mark only if vlan_id is non zero */
4556 		if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
4557 			wc->vlan_id = vlan_id;
4558 			wc->wc_flags |= IB_WC_WITH_VLAN;
4559 		}
4560 	}
4561 }
4562 
bnxt_re_legacy_send_phantom_wqe(struct bnxt_re_qp * qp)4563 static int bnxt_re_legacy_send_phantom_wqe(struct bnxt_re_qp *qp)
4564 {
4565 	struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
4566 	unsigned long flags;
4567 	int rc = 0;
4568 
4569 	spin_lock_irqsave(&qp->sq_lock, flags);
4570 
4571 	rc = bnxt_re_legacy_bind_fence_mw(lib_qp);
4572 	if (!rc) {
4573 		lib_qp->sq.phantom_wqe_cnt++;
4574 		dev_dbg(&lib_qp->sq.hwq.pdev->dev,
4575 			"qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
4576 			lib_qp->id, lib_qp->sq.hwq.prod,
4577 			HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
4578 			lib_qp->sq.phantom_wqe_cnt);
4579 	}
4580 
4581 	spin_unlock_irqrestore(&qp->sq_lock, flags);
4582 	return rc;
4583 }
4584 
bnxt_re_poll_cq(struct ib_cq * ib_cq,int num_entries,struct ib_wc * wc)4585 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
4586 {
4587 	struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
4588 	struct bnxt_re_dev *rdev = cq->rdev;
4589 	struct bnxt_re_qp *qp;
4590 	struct bnxt_qplib_cqe *cqe;
4591 	int i, ncqe, budget, init_budget;
4592 	struct bnxt_qplib_q *sq;
4593 	struct bnxt_qplib_qp *lib_qp;
4594 	u32 tbl_idx;
4595 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
4596 	unsigned long flags;
4597 	u8 gsi_mode;
4598 
4599 	/*
4600 	 * DB recovery CQ; only process the door bell pacing alert from
4601 	 * the user lib
4602 	 */
4603 	if (cq->is_dbr_soft_cq) {
4604 		bnxt_re_pacing_alert(rdev);
4605 		return 0;
4606 	}
4607 
4608 	/* User CQ; the only processing we do is to
4609 	 * complete any pending CQ resize operation.
4610 	 */
4611 	if (cq->umem) {
4612 		if (cq->resize_umem)
4613 			bnxt_re_resize_cq_complete(cq);
4614 		return 0;
4615 	}
4616 
4617 	spin_lock_irqsave(&cq->cq_lock, flags);
4618 
4619 	budget = min_t(u32, num_entries, cq->max_cql);
4620 	init_budget = budget;
4621 	if (!cq->cql) {
4622 		dev_err(rdev_to_dev(rdev), "POLL CQ no CQL to use\n");
4623 		goto exit;
4624 	}
4625 	cqe = &cq->cql[0];
4626 	gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
4627 	while (budget) {
4628 		lib_qp = NULL;
4629 		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
4630 		if (lib_qp) {
4631 			sq = &lib_qp->sq;
4632 			if (sq->legacy_send_phantom == true) {
4633 				qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp);
4634 				if (bnxt_re_legacy_send_phantom_wqe(qp) == -ENOMEM)
4635 					dev_err(rdev_to_dev(rdev),
4636 						"Phantom failed! Scheduled to send again\n");
4637 				else
4638 					sq->legacy_send_phantom = false;
4639 			}
4640 		}
4641 		if (ncqe < budget)
4642 			ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
4643 							      cqe + ncqe,
4644 							      budget - ncqe);
4645 
4646 		if (!ncqe)
4647 			break;
4648 
4649 		for (i = 0; i < ncqe; i++, cqe++) {
4650 			/* Transcribe each qplib_wqe back to ib_wc */
4651 			memset(wc, 0, sizeof(*wc));
4652 
4653 			wc->wr_id = cqe->wr_id;
4654 			wc->byte_len = cqe->length;
4655 			qp = to_bnxt_re((struct bnxt_qplib_qp *)cqe->qp_handle,
4656 					struct bnxt_re_qp, qplib_qp);
4657 			if (!qp) {
4658 				dev_err(rdev_to_dev(rdev),
4659 					"POLL CQ bad QP handle\n");
4660 				continue;
4661 			}
4662 			wc->qp = &qp->ib_qp;
4663 			wc->ex.imm_data = cqe->immdata;
4664 			wc->src_qp = cqe->src_qp;
4665 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
4666 			wc->port_num = 1;
4667 			wc->vendor_err = cqe->status;
4668 
4669 			switch(cqe->opcode) {
4670 			case CQ_BASE_CQE_TYPE_REQ:
4671 				if (gsi_mode == BNXT_RE_GSI_MODE_ALL &&
4672 				    qp->qplib_qp.id ==
4673 				    rdev->gsi_ctx.gsi_sqp->qplib_qp.id) {
4674 					/* Handle this completion with
4675 					 * the stored completion */
4676 					 dev_dbg(rdev_to_dev(rdev),
4677 						 "Skipping this UD Send CQ\n");
4678 					memset(wc, 0, sizeof(*wc));
4679 					continue;
4680 				}
4681 				bnxt_re_process_req_wc(wc, cqe);
4682 				break;
4683 			case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
4684 				if (gsi_mode == BNXT_RE_GSI_MODE_ALL) {
4685 					if (!cqe->status) {
4686 						int rc = 0;
4687 						rc = bnxt_re_process_raw_qp_packet_receive(qp, cqe);
4688 						if (!rc) {
4689 							memset(wc, 0,
4690 							       sizeof(*wc));
4691 							continue;
4692 						}
4693 						cqe->status = -1;
4694 					}
4695 					/* Errors need not be looped back.
4696 					 * But change the wr_id to the one
4697 					 * stored in the table
4698 					 */
4699 					tbl_idx = cqe->wr_id;
4700 					sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4701 					wc->wr_id = sqp_entry->wrid;
4702 				}
4703 
4704 				bnxt_re_process_res_rawqp1_wc(wc, cqe);
4705 				break;
4706 			case CQ_BASE_CQE_TYPE_RES_RC:
4707 				bnxt_re_process_res_rc_wc(wc, cqe);
4708 				break;
4709 			case CQ_BASE_CQE_TYPE_RES_UD:
4710 				if (gsi_mode == BNXT_RE_GSI_MODE_ALL &&
4711 				    qp->qplib_qp.id ==
4712 				    rdev->gsi_ctx.gsi_sqp->qplib_qp.id) {
4713 					/* Handle this completion with
4714 					 * the stored completion
4715 					 */
4716 					dev_dbg(rdev_to_dev(rdev),
4717 						"Handling the UD receive CQ\n");
4718 					if (cqe->status) {
4719 						/* TODO handle this completion  as a failure in
4720 						 * loopback porocedure
4721 						 */
4722 						continue;
4723 					} else {
4724 						bnxt_re_process_res_shadow_qp_wc(qp, wc, cqe);
4725 						break;
4726 					}
4727 				}
4728 				bnxt_re_process_res_ud_wc(rdev, qp, wc, cqe);
4729 				break;
4730 			default:
4731 				dev_err(rdev_to_dev(cq->rdev),
4732 					"POLL CQ type 0x%x not handled, skip!\n",
4733 					cqe->opcode);
4734 				continue;
4735 			}
4736 			wc++;
4737 			budget--;
4738 		}
4739 	}
4740 exit:
4741 	spin_unlock_irqrestore(&cq->cq_lock, flags);
4742 	return init_budget - budget;
4743 }
4744 
bnxt_re_req_notify_cq(struct ib_cq * ib_cq,enum ib_cq_notify_flags ib_cqn_flags)4745 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
4746 			  enum ib_cq_notify_flags ib_cqn_flags)
4747 {
4748 	struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
4749 	int type = 0, rc = 0;
4750 	unsigned long flags;
4751 
4752 	spin_lock_irqsave(&cq->cq_lock, flags);
4753 	/* Trigger on the very next completion */
4754 	if (ib_cqn_flags & IB_CQ_NEXT_COMP)
4755 		type = DBC_DBC_TYPE_CQ_ARMALL;
4756 	/* Trigger on the next solicited completion */
4757 	else if (ib_cqn_flags & IB_CQ_SOLICITED)
4758 		type = DBC_DBC_TYPE_CQ_ARMSE;
4759 
4760 	bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
4761 
4762 	/* Poll to see if there are missed events */
4763 	if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
4764 	    !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
4765 		rc = 1;
4766 
4767 	spin_unlock_irqrestore(&cq->cq_lock, flags);
4768 
4769 	return rc;
4770 }
4771 
4772 /* Memory Regions */
bnxt_re_get_dma_mr(struct ib_pd * ib_pd,int mr_access_flags)4773 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
4774 {
4775 	struct bnxt_qplib_mrinfo mrinfo;
4776 	struct bnxt_re_dev *rdev;
4777 	struct bnxt_re_mr *mr;
4778 	struct bnxt_re_pd *pd;
4779 	u32 max_mr_count;
4780 	u64 pbl = 0;
4781 	int rc;
4782 
4783 	memset(&mrinfo, 0, sizeof(mrinfo));
4784 	pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
4785 	rdev = pd->rdev;
4786 
4787 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4788 	if (!mr) {
4789 		dev_err(rdev_to_dev(rdev),
4790 			"Allocate memory for DMA MR failed!\n");
4791 		return ERR_PTR(-ENOMEM);
4792 	}
4793 	mr->rdev = rdev;
4794 	mr->qplib_mr.pd = &pd->qplib_pd;
4795 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
4796 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4797 
4798 	/* Allocate and register 0 as the address */
4799 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4800 	if (rc) {
4801 		dev_err(rdev_to_dev(rdev), "Allocate DMA MR failed!\n");
4802 		goto fail;
4803 	}
4804 	mr->qplib_mr.total_size = -1; /* Infinite length */
4805 	mrinfo.ptes = &pbl;
4806 	mrinfo.sg.npages = 0;
4807 	mrinfo.sg.pgsize = PAGE_SIZE;
4808 	mrinfo.sg.pgshft = PAGE_SHIFT;
4809 	mrinfo.sg.pgsize = PAGE_SIZE;
4810 	mrinfo.mrw = &mr->qplib_mr;
4811 	mrinfo.is_dma = true;
4812 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
4813 	if (rc) {
4814 		dev_err(rdev_to_dev(rdev), "Register DMA MR failed!\n");
4815 		goto fail_mr;
4816 	}
4817 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4818 	if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
4819 			       IB_ACCESS_REMOTE_ATOMIC))
4820 		mr->ib_mr.rkey = mr->ib_mr.lkey;
4821 	atomic_inc(&rdev->stats.rsors.mr_count);
4822 	max_mr_count =  atomic_read(&rdev->stats.rsors.mr_count);
4823 	if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
4824 		atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
4825 
4826 	return &mr->ib_mr;
4827 
4828 fail_mr:
4829 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4830 fail:
4831 	kfree(mr);
4832 	return ERR_PTR(rc);
4833 }
4834 
bnxt_re_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)4835 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
4836 {
4837 	struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
4838 	struct bnxt_re_dev *rdev = mr->rdev;
4839 	int rc = 0;
4840 
4841 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4842 	if (rc)
4843 		dev_err(rdev_to_dev(rdev), "Dereg MR failed (%d): rc - %#x\n",
4844 			mr->qplib_mr.lkey, rc);
4845 
4846 	if (mr->pages) {
4847 		bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
4848 						   &mr->qplib_frpl);
4849 		kfree(mr->pages);
4850 		mr->npages = 0;
4851 		mr->pages = NULL;
4852 	}
4853 	if (!IS_ERR(mr->ib_umem) && mr->ib_umem) {
4854 		mr->is_invalcb_active = false;
4855 		bnxt_re_peer_mem_release(mr->ib_umem);
4856 	}
4857 	kfree(mr);
4858 	atomic_dec(&rdev->stats.rsors.mr_count);
4859 	return 0;
4860 }
4861 
bnxt_re_set_page(struct ib_mr * ib_mr,u64 addr)4862 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
4863 {
4864 	struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
4865 
4866 	if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
4867 		return -ENOMEM;
4868 
4869 	mr->pages[mr->npages++] = addr;
4870 	dev_dbg(NULL, "%s: ibdev %p Set MR pages[%d] = 0x%lx\n",
4871 		ROCE_DRV_MODULE_NAME, ib_mr->device, mr->npages - 1,
4872 		mr->pages[mr->npages - 1]);
4873 	return 0;
4874 }
4875 
bnxt_re_map_mr_sg(struct ib_mr * ib_mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)4876 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg,
4877 		      int sg_nents, unsigned int *sg_offset)
4878 {
4879 	struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
4880 
4881 	mr->npages = 0;
4882 	return ib_sg_to_pages(ib_mr, sg, sg_nents,
4883 			      sg_offset, bnxt_re_set_page);
4884 }
4885 
bnxt_re_alloc_mr(struct ib_pd * ib_pd,enum ib_mr_type type,u32 max_num_sg,struct ib_udata * udata)4886 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
4887 			       u32 max_num_sg, struct ib_udata *udata)
4888 {
4889 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
4890 	struct bnxt_re_dev *rdev = pd->rdev;
4891 	struct bnxt_re_mr *mr;
4892 	u32 max_mr_count;
4893 	int rc;
4894 
4895 	dev_dbg(rdev_to_dev(rdev), "Alloc MR\n");
4896 	if (type != IB_MR_TYPE_MEM_REG) {
4897 		dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported\n", type);
4898 		return ERR_PTR(-EINVAL);
4899 	}
4900 	if (max_num_sg > MAX_PBL_LVL_1_PGS) {
4901 		dev_dbg(rdev_to_dev(rdev), "Max SG exceeded\n");
4902 		return ERR_PTR(-EINVAL);
4903 	}
4904 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4905 	if (!mr) {
4906 		dev_err(rdev_to_dev(rdev), "Allocate MR mem failed!\n");
4907 		return ERR_PTR(-ENOMEM);
4908 	}
4909 	mr->rdev = rdev;
4910 	mr->qplib_mr.pd = &pd->qplib_pd;
4911 	mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
4912 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4913 
4914 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4915 	if (rc) {
4916 		dev_err(rdev_to_dev(rdev), "Allocate MR failed!\n");
4917 		goto fail;
4918 	}
4919 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4920 	mr->ib_mr.rkey = mr->ib_mr.lkey;
4921 	mr->pages = kzalloc(sizeof(u64) * max_num_sg, GFP_KERNEL);
4922 	if (!mr->pages) {
4923 		dev_err(rdev_to_dev(rdev),
4924 			"Allocate MR page list mem failed!\n");
4925 		rc = -ENOMEM;
4926 		goto fail_mr;
4927 	}
4928 	rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
4929 						 &mr->qplib_frpl, max_num_sg);
4930 	if (rc) {
4931 		dev_err(rdev_to_dev(rdev),
4932 			"Allocate HW Fast reg page list failed!\n");
4933 		goto free_page;
4934 	}
4935 	dev_dbg(rdev_to_dev(rdev), "Alloc MR pages = 0x%p\n", mr->pages);
4936 
4937 	atomic_inc(&rdev->stats.rsors.mr_count);
4938 	max_mr_count =  atomic_read(&rdev->stats.rsors.mr_count);
4939 	if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
4940 		atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
4941 	return &mr->ib_mr;
4942 
4943 free_page:
4944 	kfree(mr->pages);
4945 fail_mr:
4946 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4947 fail:
4948 	kfree(mr);
4949 	return ERR_PTR(rc);
4950 }
4951 
4952 /* Memory Windows */
bnxt_re_alloc_mw(struct ib_pd * ib_pd,enum ib_mw_type type,struct ib_udata * udata)4953 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
4954 			       struct ib_udata *udata)
4955 {
4956 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
4957 	struct bnxt_re_dev *rdev = pd->rdev;
4958 	struct bnxt_re_mw *mw;
4959 	u32 max_mw_count;
4960 	int rc;
4961 
4962 	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
4963 	if (!mw) {
4964 		dev_err(rdev_to_dev(rdev), "Allocate MW failed!\n");
4965 		rc = -ENOMEM;
4966 		goto exit;
4967 	}
4968 	mw->rdev = rdev;
4969 	mw->qplib_mw.pd = &pd->qplib_pd;
4970 
4971 	mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
4972 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
4973 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
4974 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
4975 	if (rc) {
4976 		dev_err(rdev_to_dev(rdev), "Allocate MW failed!\n");
4977 		goto fail;
4978 	}
4979 	mw->ib_mw.rkey = mw->qplib_mw.rkey;
4980 	atomic_inc(&rdev->stats.rsors.mw_count);
4981 	max_mw_count = atomic_read(&rdev->stats.rsors.mw_count);
4982 	if (max_mw_count > atomic_read(&rdev->stats.rsors.max_mw_count))
4983 		atomic_set(&rdev->stats.rsors.max_mw_count, max_mw_count);
4984 
4985 	return &mw->ib_mw;
4986 fail:
4987 	kfree(mw);
4988 exit:
4989 	return ERR_PTR(rc);
4990 }
4991 
bnxt_re_dealloc_mw(struct ib_mw * ib_mw)4992 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
4993 {
4994 	struct bnxt_re_mw *mw = to_bnxt_re(ib_mw, struct bnxt_re_mw, ib_mw);
4995 	struct bnxt_re_dev *rdev = mw->rdev;
4996 	int rc;
4997 
4998 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
4999 	if (rc) {
5000 		dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
5001 		return rc;
5002 	}
5003 
5004 	kfree(mw);
5005 	atomic_dec(&rdev->stats.rsors.mw_count);
5006 	return rc;
5007 }
5008 
bnxt_re_page_size_ok(int page_shift)5009 static int bnxt_re_page_size_ok(int page_shift)
5010 {
5011 	switch (page_shift) {
5012 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
5013 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
5014 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
5015 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
5016 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
5017 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
5018 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
5019 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256MB:
5020 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
5021 		return 1;
5022 	default:
5023 		return 0;
5024 	}
5025 }
5026 
bnxt_re_get_page_shift(struct ib_umem * umem,u64 va,u64 st,u64 cmask)5027 static int bnxt_re_get_page_shift(struct ib_umem *umem,
5028 				  u64 va, u64 st, u64 cmask)
5029 {
5030 	int pgshft;
5031 
5032 	pgshft = ilog2(umem->page_size);
5033 
5034 	return pgshft;
5035 }
5036 
bnxt_re_get_num_pages(struct ib_umem * umem,u64 start,u64 length,int page_shift)5037 static int bnxt_re_get_num_pages(struct ib_umem *umem, u64 start, u64 length, int page_shift)
5038 {
5039 	int npages = 0;
5040 
5041 	if (page_shift == PAGE_SHIFT) {
5042 		npages = ib_umem_num_pages_compat(umem);
5043 	} else {
5044 		npages = ALIGN(length, BIT(page_shift)) / BIT(page_shift);
5045 		if (start %  BIT(page_shift))
5046 			npages++;
5047 	}
5048 	return npages;
5049 }
5050 
5051 /* uverbs */
bnxt_re_reg_user_mr(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_udata * udata)5052 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
5053 				  u64 virt_addr, int mr_access_flags,
5054 				  struct ib_udata *udata)
5055 {
5056 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
5057 	struct bnxt_re_dev *rdev = pd->rdev;
5058 	struct bnxt_qplib_mrinfo mrinfo;
5059 	int umem_pgs, page_shift, rc;
5060 	struct bnxt_re_mr *mr;
5061 	struct ib_umem *umem;
5062 	u32 max_mr_count;
5063 	int npages;
5064 
5065 	dev_dbg(rdev_to_dev(rdev), "Reg user MR\n");
5066 
5067 	if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr)
5068 		return ERR_PTR(-ENOMEM);
5069 
5070 	if (rdev->mod_exit) {
5071 		dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
5072 		return ERR_PTR(-EIO);
5073 	}
5074 	memset(&mrinfo, 0, sizeof(mrinfo));
5075 	if (length > BNXT_RE_MAX_MR_SIZE) {
5076 		dev_err(rdev_to_dev(rdev), "Requested MR Size: %lu "
5077 			"> Max supported: %ld\n", length, BNXT_RE_MAX_MR_SIZE);
5078 		return ERR_PTR(-ENOMEM);
5079 	}
5080 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
5081 	if (!mr) {
5082 		dev_err(rdev_to_dev(rdev), "Allocate MR failed!\n");
5083 		return ERR_PTR (-ENOMEM);
5084 	}
5085 	mr->rdev = rdev;
5086 	mr->qplib_mr.pd = &pd->qplib_pd;
5087 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
5088 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
5089 
5090 	if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) {
5091 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
5092 		if (rc) {
5093 			dev_err(rdev_to_dev(rdev), "Alloc MR failed!\n");
5094 			goto fail;
5095 		}
5096 		/* The fixed portion of the rkey is the same as the lkey */
5097 		mr->ib_mr.rkey = mr->qplib_mr.rkey;
5098 	}
5099 
5100 	umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context,
5101 					udata, start, length,
5102 					mr_access_flags, 0);
5103 	if (IS_ERR(umem)) {
5104 		rc = PTR_ERR(umem);
5105 		dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n",
5106 			__func__, rc);
5107 		goto free_mr;
5108 	}
5109 	mr->ib_umem = umem;
5110 
5111 	mr->qplib_mr.va = virt_addr;
5112 	umem_pgs = ib_umem_num_pages_compat(umem);
5113 	if (!umem_pgs) {
5114 		dev_err(rdev_to_dev(rdev), "umem is invalid!\n");
5115 		rc = -EINVAL;
5116 		goto free_umem;
5117 	}
5118 	mr->qplib_mr.total_size = length;
5119 	page_shift = bnxt_re_get_page_shift(umem, virt_addr, start,
5120 					    rdev->dev_attr->page_size_cap);
5121 	if (!bnxt_re_page_size_ok(page_shift)) {
5122 		dev_err(rdev_to_dev(rdev), "umem page size unsupported!\n");
5123 		rc = -EFAULT;
5124 		goto free_umem;
5125 	}
5126 	npages = bnxt_re_get_num_pages(umem, start, length, page_shift);
5127 
5128 	/* Map umem buf ptrs to the PBL */
5129 	mrinfo.sg.npages = npages;
5130 	mrinfo.sg.sghead = get_ib_umem_sgl(umem, &mrinfo.sg.nmap);
5131 	mrinfo.sg.pgshft = page_shift;
5132 	mrinfo.sg.pgsize = BIT(page_shift);
5133 
5134 	mrinfo.mrw = &mr->qplib_mr;
5135 
5136 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
5137 	if (rc) {
5138 		dev_err(rdev_to_dev(rdev), "Reg user MR failed!\n");
5139 		goto free_umem;
5140 	}
5141 
5142 	mr->ib_mr.lkey = mr->ib_mr.rkey = mr->qplib_mr.lkey;
5143 	atomic_inc(&rdev->stats.rsors.mr_count);
5144 	max_mr_count =  atomic_read(&rdev->stats.rsors.mr_count);
5145 	if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
5146 		atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
5147 
5148 	return &mr->ib_mr;
5149 
5150 free_umem:
5151 	bnxt_re_peer_mem_release(mr->ib_umem);
5152 free_mr:
5153 	if (!_is_alloc_mr_unified(rdev->qplib_res.dattr))
5154 		bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
5155 fail:
5156 	kfree(mr);
5157 	return ERR_PTR(rc);
5158 }
5159 
5160 int
bnxt_re_rereg_user_mr(struct ib_mr * ib_mr,int flags,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_pd * ib_pd,struct ib_udata * udata)5161 bnxt_re_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 length,
5162 		      u64 virt_addr, int mr_access_flags,
5163 		      struct ib_pd *ib_pd, struct ib_udata *udata)
5164 {
5165 	struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
5166 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
5167 	int umem_pgs = 0, page_shift = PAGE_SHIFT, rc;
5168 	struct bnxt_re_dev *rdev = mr->rdev;
5169 	struct bnxt_qplib_mrinfo mrinfo;
5170 	struct ib_umem *umem;
5171 	u32 npages;
5172 
5173 	/* TODO: Must decipher what to modify based on the flags */
5174 	memset(&mrinfo, 0, sizeof(mrinfo));
5175 	if (flags & IB_MR_REREG_TRANS) {
5176 		umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context,
5177 						udata, start, length,
5178 						mr_access_flags, 0);
5179 		if (IS_ERR(umem)) {
5180 			rc = PTR_ERR(umem);
5181 			dev_err(rdev_to_dev(rdev),
5182 				"%s: ib_umem_get failed! ret =  %d\n",
5183 				__func__, rc);
5184 			goto fail;
5185 		}
5186 		mr->ib_umem = umem;
5187 
5188 		mr->qplib_mr.va = virt_addr;
5189 		umem_pgs = ib_umem_num_pages_compat(umem);
5190 		if (!umem_pgs) {
5191 			dev_err(rdev_to_dev(rdev), "umem is invalid!\n");
5192 			rc = -EINVAL;
5193 			goto fail_free_umem;
5194 		}
5195 		mr->qplib_mr.total_size = length;
5196 		page_shift = bnxt_re_get_page_shift(umem, virt_addr, start,
5197 					    rdev->dev_attr->page_size_cap);
5198 		if (!bnxt_re_page_size_ok(page_shift)) {
5199 			dev_err(rdev_to_dev(rdev),
5200 				"umem page size unsupported!\n");
5201 			rc = -EFAULT;
5202 			goto fail_free_umem;
5203 		}
5204 		npages = bnxt_re_get_num_pages(umem, start, length, page_shift);
5205 		/* Map umem buf ptrs to the PBL */
5206 		mrinfo.sg.npages = npages;
5207 		mrinfo.sg.sghead = get_ib_umem_sgl(umem, &mrinfo.sg.nmap);
5208 		mrinfo.sg.pgshft = page_shift;
5209 		mrinfo.sg.pgsize = BIT(page_shift);
5210 	}
5211 
5212 	mrinfo.mrw = &mr->qplib_mr;
5213 	if (flags & IB_MR_REREG_PD)
5214 		mr->qplib_mr.pd = &pd->qplib_pd;
5215 
5216 	if (flags & IB_MR_REREG_ACCESS)
5217 		mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
5218 
5219 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
5220 	if (rc) {
5221 		dev_err(rdev_to_dev(rdev), "Rereg user MR failed!\n");
5222 		goto fail_free_umem;
5223 	}
5224 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
5225 
5226 	return 0;
5227 
5228 fail_free_umem:
5229 	bnxt_re_peer_mem_release(mr->ib_umem);
5230 fail:
5231 	return rc;
5232 }
5233 
bnxt_re_check_abi_version(struct bnxt_re_dev * rdev)5234 static int bnxt_re_check_abi_version(struct bnxt_re_dev *rdev)
5235 {
5236 	struct ib_device *ibdev = &rdev->ibdev;
5237 	u32 uverbs_abi_ver;
5238 
5239 	uverbs_abi_ver = GET_UVERBS_ABI_VERSION(ibdev);
5240 	dev_dbg(rdev_to_dev(rdev), "ABI version requested %d\n",
5241 		uverbs_abi_ver);
5242 	if (uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
5243 		dev_dbg(rdev_to_dev(rdev), " is different from the device %d \n",
5244 			BNXT_RE_ABI_VERSION);
5245 		return -EPERM;
5246 	}
5247 	return 0;
5248 }
5249 
bnxt_re_alloc_ucontext(struct ib_ucontext * uctx_in,struct ib_udata * udata)5250 int bnxt_re_alloc_ucontext(struct ib_ucontext *uctx_in,
5251 			   struct ib_udata *udata)
5252 {
5253 	struct ib_ucontext *ctx = uctx_in;
5254 	struct ib_device *ibdev = ctx->device;
5255 	struct bnxt_re_ucontext *uctx =
5256 		container_of(ctx, struct bnxt_re_ucontext, ibucontext);
5257 
5258 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
5259 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
5260 	struct bnxt_re_uctx_resp resp = {};
5261 	struct bnxt_re_uctx_req ureq = {};
5262 	struct bnxt_qplib_chip_ctx *cctx;
5263 	u32 chip_met_rev_num;
5264 	bool genp5 = false;
5265 	int rc;
5266 
5267 	cctx = rdev->chip_ctx;
5268 	rc = bnxt_re_check_abi_version(rdev);
5269 	if (rc)
5270 		goto fail;
5271 
5272 	uctx->rdev = rdev;
5273 	uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
5274 	if (!uctx->shpg) {
5275 		dev_err(rdev_to_dev(rdev), "shared memory allocation failed!\n");
5276 		rc = -ENOMEM;
5277 		goto fail;
5278 	}
5279 	spin_lock_init(&uctx->sh_lock);
5280 	if (BNXT_RE_ABI_VERSION >= 4) {
5281 		chip_met_rev_num = cctx->chip_num;
5282 		chip_met_rev_num |= ((u32)cctx->chip_rev & 0xFF) <<
5283 				     BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
5284 		chip_met_rev_num |= ((u32)cctx->chip_metal & 0xFF) <<
5285 				     BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
5286 		resp.chip_id0 = chip_met_rev_num;
5287 		resp.chip_id1 = 0; /* future extension of chip info */
5288 	}
5289 
5290 	if (BNXT_RE_ABI_VERSION != 4) {
5291 		/*Temp, Use idr_alloc instead*/
5292 		resp.dev_id = rdev->en_dev->pdev->devfn;
5293 		resp.max_qp = rdev->qplib_res.hctx->qp_ctx.max;
5294 	}
5295 
5296 	genp5 = _is_chip_gen_p5_p7(cctx);
5297 	if (BNXT_RE_ABI_VERSION > 5) {
5298 		resp.modes = genp5 ? cctx->modes.wqe_mode : 0;
5299 		if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
5300 			resp.comp_mask = BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED;
5301 	}
5302 
5303 	resp.pg_size = PAGE_SIZE;
5304 	resp.cqe_sz = sizeof(struct cq_base);
5305 	resp.max_cqd = dev_attr->max_cq_wqes;
5306 	if (genp5 && cctx->modes.db_push) {
5307 		resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_WC_DPI_ENABLED;
5308 		if (_is_chip_p7(cctx) &&
5309 		    !(dev_attr->dev_cap_flags &
5310 		      CREQ_QUERY_FUNC_RESP_SB_PINGPONG_PUSH_MODE))
5311 			resp.comp_mask &=
5312 				~BNXT_RE_COMP_MASK_UCNTX_WC_DPI_ENABLED;
5313 	}
5314 
5315 	resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_MQP_EX_SUPPORTED;
5316 
5317 	if (rdev->dbr_pacing)
5318 		resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_DBR_PACING_ENABLED;
5319 
5320 	if (rdev->dbr_drop_recov && rdev->user_dbr_drop_recov)
5321 		resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_DBR_RECOVERY_ENABLED;
5322 
5323 	if (udata->inlen >= sizeof(ureq)) {
5324 		rc = ib_copy_from_udata(&ureq, udata,
5325 					min(udata->inlen, sizeof(ureq)));
5326 		if (rc)
5327 			goto cfail;
5328 		if (bnxt_re_init_pow2_flag(&ureq, &resp))
5329 			dev_warn(rdev_to_dev(rdev),
5330 				 "Enabled roundup logic. Library bug?\n");
5331 		if (bnxt_re_init_rsvd_wqe_flag(&ureq, &resp, genp5))
5332 			dev_warn(rdev_to_dev(rdev),
5333 				 "Rsvd wqe in use! Try the updated library.\n");
5334 	} else {
5335 		dev_warn(rdev_to_dev(rdev),
5336 			 "Enabled roundup logic. Update the library!\n");
5337 		resp.comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED;
5338 
5339 		dev_warn(rdev_to_dev(rdev),
5340 			 "Rsvd wqe in use. Update the library!\n");
5341 		resp.comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
5342 	}
5343 
5344 	uctx->cmask = (uint64_t)resp.comp_mask;
5345 	rc = bnxt_re_copy_to_udata(rdev, &resp,
5346 				   min(udata->outlen, sizeof(resp)),
5347 				   udata);
5348 	if (rc)
5349 		goto cfail;
5350 
5351 	INIT_LIST_HEAD(&uctx->cq_list);
5352 	mutex_init(&uctx->cq_lock);
5353 
5354 	return 0;
5355 cfail:
5356 	free_page((u64)uctx->shpg);
5357 	uctx->shpg = NULL;
5358 fail:
5359 	return rc;
5360 }
5361 
bnxt_re_dealloc_ucontext(struct ib_ucontext * ib_uctx)5362 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
5363 {
5364 	struct bnxt_re_ucontext *uctx = to_bnxt_re(ib_uctx,
5365 						   struct bnxt_re_ucontext,
5366 						   ibucontext);
5367 	struct bnxt_re_dev *rdev = uctx->rdev;
5368 	int rc = 0;
5369 
5370 	if (uctx->shpg)
5371 		free_page((u64)uctx->shpg);
5372 
5373 	if (uctx->dpi.dbr) {
5374 		/* Free DPI only if this is the first PD allocated by the
5375 		 * application and mark the context dpi as NULL
5376 		 */
5377 		if (_is_chip_gen_p5_p7(rdev->chip_ctx) && uctx->wcdpi.dbr) {
5378 			rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
5379 						    &uctx->wcdpi);
5380 			if (rc)
5381 				dev_err(rdev_to_dev(rdev),
5382 						"dealloc push dp failed\n");
5383 			uctx->wcdpi.dbr = NULL;
5384 		}
5385 
5386 		rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
5387 					    &uctx->dpi);
5388 		if (rc)
5389 			dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!\n");
5390 			/* Don't fail, continue*/
5391 		uctx->dpi.dbr = NULL;
5392 	}
5393 	return;
5394 }
5395 
is_bnxt_re_cq_page(struct bnxt_re_ucontext * uctx,u64 pg_off)5396 static struct bnxt_re_cq *is_bnxt_re_cq_page(struct bnxt_re_ucontext *uctx,
5397 				      u64 pg_off)
5398 {
5399 	struct bnxt_re_cq *cq = NULL, *tmp_cq;
5400 
5401 	if (!_is_chip_p7(uctx->rdev->chip_ctx))
5402 		return NULL;
5403 
5404 	mutex_lock(&uctx->cq_lock);
5405 	list_for_each_entry(tmp_cq, &uctx->cq_list, cq_list) {
5406 		if (((u64)tmp_cq->uctx_cq_page >> PAGE_SHIFT) == pg_off) {
5407 			cq = tmp_cq;
5408 			break;
5409 		}
5410 	}
5411 	mutex_unlock(&uctx->cq_lock);
5412 	return cq;
5413 }
5414 
5415 /* Helper function to mmap the virtual memory from user app */
bnxt_re_mmap(struct ib_ucontext * ib_uctx,struct vm_area_struct * vma)5416 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
5417 {
5418 	struct bnxt_re_ucontext *uctx = to_bnxt_re(ib_uctx,
5419 						   struct bnxt_re_ucontext,
5420 						   ibucontext);
5421 	struct bnxt_re_dev *rdev = uctx->rdev;
5422 	struct bnxt_re_cq *cq = NULL;
5423 	int rc = 0;
5424 	u64 pfn;
5425 
5426 	switch (vma->vm_pgoff) {
5427 	case BNXT_RE_MAP_SH_PAGE:
5428 		pfn = vtophys(uctx->shpg) >> PAGE_SHIFT;
5429 		return rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
5430 		dev_dbg(rdev_to_dev(rdev), "%s:%d uctx->shpg 0x%lx, vtophys(uctx->shpg) 0x%lx, pfn = 0x%lx \n",
5431 				__func__, __LINE__, (u64) uctx->shpg, vtophys(uctx->shpg), pfn);
5432 		if (rc) {
5433 			dev_err(rdev_to_dev(rdev), "Shared page mapping failed!\n");
5434 			rc = -EAGAIN;
5435 		}
5436 		return rc;
5437 	case BNXT_RE_MAP_WC:
5438 		vma->vm_page_prot =
5439 			pgprot_writecombine(vma->vm_page_prot);
5440 		pfn = (uctx->wcdpi.umdbr >> PAGE_SHIFT);
5441 		if (!pfn)
5442 			return -EFAULT;
5443 		break;
5444 	case BNXT_RE_DBR_PAGE:
5445 		/* Driver doesn't expect write access request */
5446 		if (vma->vm_flags & VM_WRITE)
5447 			return -EFAULT;
5448 
5449 		pfn = vtophys(rdev->dbr_page) >> PAGE_SHIFT;
5450 		if (!pfn)
5451 			return -EFAULT;
5452 		break;
5453 	case BNXT_RE_MAP_DB_RECOVERY_PAGE:
5454 		pfn = vtophys(uctx->dbr_recov_cq_page) >> PAGE_SHIFT;
5455 		if (!pfn)
5456 			return -EFAULT;
5457 		break;
5458 	default:
5459 		cq = is_bnxt_re_cq_page(uctx, vma->vm_pgoff);
5460 		if (cq) {
5461 			pfn = vtophys((void *)cq->uctx_cq_page) >> PAGE_SHIFT;
5462 			rc = rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
5463 			if (rc) {
5464 				dev_err(rdev_to_dev(rdev),
5465 					"CQ page mapping failed!\n");
5466 				rc = -EAGAIN;
5467 			}
5468 			goto out;
5469 		} else {
5470 			vma->vm_page_prot =
5471 				pgprot_noncached(vma->vm_page_prot);
5472 			pfn = vma->vm_pgoff;
5473 		}
5474 		break;
5475 	}
5476 
5477 	rc = rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
5478 	if (rc) {
5479 		dev_err(rdev_to_dev(rdev), "DPI mapping failed!\n");
5480 		return -EAGAIN;
5481 	}
5482 	rc = __bnxt_re_set_vma_data(uctx, vma);
5483 out:
5484 	return rc;
5485 }
5486 
bnxt_re_process_mad(struct ib_device * ibdev,int mad_flags,u8 port_num,const struct ib_wc * wc,const struct ib_grh * grh,const struct ib_mad_hdr * in_mad,size_t in_mad_size,struct ib_mad_hdr * out_mad,size_t * out_mad_size,u16 * out_mad_pkey_index)5487 int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
5488 			const struct ib_wc *wc, const struct ib_grh *grh,
5489 			const struct ib_mad_hdr *in_mad, size_t in_mad_size,
5490 			struct ib_mad_hdr *out_mad, size_t *out_mad_size,
5491 			u16 *out_mad_pkey_index)
5492 {
5493 	return IB_MAD_RESULT_SUCCESS;
5494 }
5495 
bnxt_re_disassociate_ucntx(struct ib_ucontext * ib_uctx)5496 void bnxt_re_disassociate_ucntx(struct ib_ucontext *ib_uctx)
5497 {
5498 }
5499