xref: /freebsd/sys/dev/irdma/irdma_kcompat.c (revision 81ad6265)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2018 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "irdma_main.h"
37 
38 #define IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
39 
40 static u16 kc_rdma_flow_label_to_udp_sport(u32 fl) {
41 	u32 fl_low = fl & 0x03FFF;
42 	u32 fl_high = fl & 0xFC000;
43 
44 	fl_low ^= fl_high >> 14;
45 
46 	return (u16)(fl_low | IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN);
47 }
48 
49 #define IRDMA_GRH_FLOWLABEL_MASK (0x000FFFFF)
50 
51 static u32 kc_rdma_calc_flow_label(u32 lqpn, u32 rqpn) {
52 	u64 fl = (u64)lqpn * rqpn;
53 
54 	fl ^= fl >> 20;
55 	fl ^= fl >> 40;
56 
57 	return (u32)(fl & IRDMA_GRH_FLOWLABEL_MASK);
58 }
59 
60 u16
61 kc_rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
62 {
63 	if (!fl)
64 		fl = kc_rdma_calc_flow_label(lqpn, rqpn);
65 	return kc_rdma_flow_label_to_udp_sport(fl);
66 }
67 
68 void
69 irdma_get_dev_fw_str(struct ib_device *dev,
70 		     char *str,
71 		     size_t str_len)
72 {
73 	struct irdma_device *iwdev = to_iwdev(dev);
74 
75 	snprintf(str, str_len, "%u.%u",
76 		 irdma_fw_major_ver(&iwdev->rf->sc_dev),
77 		 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
78 }
79 
80 int
81 irdma_add_gid(struct ib_device *device,
82 	      u8 port_num,
83 	      unsigned int index,
84 	      const union ib_gid *gid,
85 	      const struct ib_gid_attr *attr,
86 	      void **context)
87 {
88 	return 0;
89 }
90 
91 int
92 irdma_del_gid(struct ib_device *device,
93 	      u8 port_num,
94 	      unsigned int index,
95 	      void **context)
96 {
97 	return 0;
98 }
99 
100 #if __FreeBSD_version >= 1400026
101 /**
102  * irdma_alloc_mr - register stag for fast memory registration
103  * @pd: ibpd pointer
104  * @mr_type: memory for stag registrion
105  * @max_num_sg: man number of pages
106  * @udata: user data
107  */
108 struct ib_mr *
109 irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
110 	       u32 max_num_sg, struct ib_udata *udata)
111 {
112 #else
113 /**
114  * irdma_alloc_mr - register stag for fast memory registration
115  * @pd: ibpd pointer
116  * @mr_type: memory for stag registrion
117  * @max_num_sg: man number of pages
118  */
119 struct ib_mr *
120 irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
121 	       u32 max_num_sg)
122 {
123 #endif
124 	struct irdma_device *iwdev = to_iwdev(pd->device);
125 	struct irdma_pble_alloc *palloc;
126 	struct irdma_pbl *iwpbl;
127 	struct irdma_mr *iwmr;
128 	int status;
129 	u32 stag;
130 	int err_code = -ENOMEM;
131 
132 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
133 	if (!iwmr)
134 		return ERR_PTR(-ENOMEM);
135 
136 	stag = irdma_create_stag(iwdev);
137 	if (!stag) {
138 		err_code = -ENOMEM;
139 		goto err;
140 	}
141 
142 	iwmr->stag = stag;
143 	iwmr->ibmr.rkey = stag;
144 	iwmr->ibmr.lkey = stag;
145 	iwmr->ibmr.pd = pd;
146 	iwmr->ibmr.device = pd->device;
147 	iwpbl = &iwmr->iwpbl;
148 	iwpbl->iwmr = iwmr;
149 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
150 	palloc = &iwpbl->pble_alloc;
151 	iwmr->page_cnt = max_num_sg;
152 	/* Assume system PAGE_SIZE as the sg page sizes are unknown. */
153 	iwmr->len = max_num_sg * PAGE_SIZE;
154 	status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
155 				false);
156 	if (status)
157 		goto err_get_pble;
158 
159 	err_code = irdma_hw_alloc_stag(iwdev, iwmr);
160 	if (err_code)
161 		goto err_alloc_stag;
162 
163 	iwpbl->pbl_allocated = true;
164 
165 	return &iwmr->ibmr;
166 err_alloc_stag:
167 	irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
168 err_get_pble:
169 	irdma_free_stag(iwdev, stag);
170 err:
171 	kfree(iwmr);
172 
173 	return ERR_PTR(err_code);
174 }
175 
176 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
177 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
178 #if __FreeBSD_version >= 1400026
179 /**
180  * irdma_alloc_ucontext - Allocate the user context data structure
181  * @uctx: context
182  * @udata: user data
183  *
184  * This keeps track of all objects associated with a particular
185  * user-mode client.
186  */
187 int
188 irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
189 {
190 	struct ib_device *ibdev = uctx->device;
191 	struct irdma_device *iwdev = to_iwdev(ibdev);
192 	struct irdma_alloc_ucontext_req req = {0};
193 	struct irdma_alloc_ucontext_resp uresp = {0};
194 	struct irdma_ucontext *ucontext = to_ucontext(uctx);
195 	struct irdma_uk_attrs *uk_attrs;
196 
197 	if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
198 	    udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
199 		return -EINVAL;
200 
201 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
202 		return -EINVAL;
203 
204 	if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
205 		goto ver_error;
206 
207 	ucontext->iwdev = iwdev;
208 	ucontext->abi_ver = req.userspace_ver;
209 
210 	uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
211 	/* GEN_1 support for libi40iw */
212 	if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
213 		if (uk_attrs->hw_rev != IRDMA_GEN_1)
214 			return -EOPNOTSUPP;
215 
216 		ucontext->legacy_mode = true;
217 		uresp.max_qps = iwdev->rf->max_qp;
218 		uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
219 		uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
220 		uresp.kernel_ver = req.userspace_ver;
221 		if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)))
222 			return -EFAULT;
223 	} else {
224 		u64 bar_off;
225 
226 		uresp.kernel_ver = IRDMA_ABI_VER;
227 		uresp.feature_flags = uk_attrs->feature_flags;
228 		uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
229 		uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
230 		uresp.max_hw_inline = uk_attrs->max_hw_inline;
231 		uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
232 		uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
233 		uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
234 		uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
235 		uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
236 		uresp.hw_rev = uk_attrs->hw_rev;
237 
238 		bar_off =
239 		    (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
240 		ucontext->db_mmap_entry =
241 		    irdma_user_mmap_entry_insert(ucontext, bar_off,
242 						 IRDMA_MMAP_IO_NC,
243 						 &uresp.db_mmap_key);
244 		if (!ucontext->db_mmap_entry) {
245 			return -ENOMEM;
246 		}
247 
248 		if (ib_copy_to_udata(udata, &uresp,
249 				     min(sizeof(uresp), udata->outlen))) {
250 			rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
251 			return -EFAULT;
252 		}
253 	}
254 
255 	INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
256 	spin_lock_init(&ucontext->cq_reg_mem_list_lock);
257 	INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
258 	spin_lock_init(&ucontext->qp_reg_mem_list_lock);
259 	INIT_LIST_HEAD(&ucontext->vma_list);
260 	mutex_init(&ucontext->vma_list_mutex);
261 
262 	return 0;
263 
264 ver_error:
265 	irdma_dev_err(&iwdev->rf->sc_dev,
266 		      "Invalid userspace driver version detected. Detected version %d, should be %d\n",
267 		      req.userspace_ver, IRDMA_ABI_VER);
268 	return -EINVAL;
269 }
270 #endif
271 
272 #if __FreeBSD_version < 1400026
273 /**
274  * irdma_alloc_ucontext - Allocate the user context data structure
275  * @ibdev: ib device pointer
276  * @udata: user data
277  *
278  * This keeps track of all objects associated with a particular
279  * user-mode client.
280  */
281 struct ib_ucontext *
282 irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
283 {
284 	struct irdma_device *iwdev = to_iwdev(ibdev);
285 	struct irdma_alloc_ucontext_req req = {0};
286 	struct irdma_alloc_ucontext_resp uresp = {0};
287 	struct irdma_ucontext *ucontext;
288 	struct irdma_uk_attrs *uk_attrs;
289 
290 	if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
291 	    udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
292 		return ERR_PTR(-EINVAL);
293 
294 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
295 		return ERR_PTR(-EINVAL);
296 
297 	if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
298 		goto ver_error;
299 
300 	ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
301 	if (!ucontext)
302 		return ERR_PTR(-ENOMEM);
303 
304 	ucontext->iwdev = iwdev;
305 	ucontext->abi_ver = req.userspace_ver;
306 
307 	uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
308 	/* GEN_1 legacy support with libi40iw */
309 	if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
310 		if (uk_attrs->hw_rev != IRDMA_GEN_1) {
311 			kfree(ucontext);
312 			return ERR_PTR(-EOPNOTSUPP);
313 		}
314 
315 		ucontext->legacy_mode = true;
316 		uresp.max_qps = iwdev->rf->max_qp;
317 		uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
318 		uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
319 		uresp.kernel_ver = req.userspace_ver;
320 		if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) {
321 			kfree(ucontext);
322 			return ERR_PTR(-EFAULT);
323 		}
324 	} else {
325 		u64 bar_off;
326 
327 		uresp.kernel_ver = IRDMA_ABI_VER;
328 		uresp.feature_flags = uk_attrs->feature_flags;
329 		uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
330 		uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
331 		uresp.max_hw_inline = uk_attrs->max_hw_inline;
332 		uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
333 		uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
334 		uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
335 		uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
336 		uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
337 		uresp.hw_rev = uk_attrs->hw_rev;
338 
339 		bar_off =
340 		    (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
341 
342 		spin_lock_init(&ucontext->mmap_tbl_lock);
343 		ucontext->db_mmap_entry =
344 		    irdma_user_mmap_entry_add_hash(ucontext, bar_off,
345 						   IRDMA_MMAP_IO_NC,
346 						   &uresp.db_mmap_key);
347 		if (!ucontext->db_mmap_entry) {
348 			spin_lock_destroy(&ucontext->mmap_tbl_lock);
349 			kfree(ucontext);
350 			return ERR_PTR(-ENOMEM);
351 		}
352 
353 		if (ib_copy_to_udata(udata, &uresp,
354 				     min(sizeof(uresp), udata->outlen))) {
355 			irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry);
356 			spin_lock_destroy(&ucontext->mmap_tbl_lock);
357 			kfree(ucontext);
358 			return ERR_PTR(-EFAULT);
359 		}
360 	}
361 
362 	INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
363 	spin_lock_init(&ucontext->cq_reg_mem_list_lock);
364 	INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
365 	spin_lock_init(&ucontext->qp_reg_mem_list_lock);
366 	INIT_LIST_HEAD(&ucontext->vma_list);
367 	mutex_init(&ucontext->vma_list_mutex);
368 
369 	return &ucontext->ibucontext;
370 
371 ver_error:
372 	ibdev_err(&iwdev->ibdev,
373 		  "Invalid userspace driver version detected. Detected version %d, should be %d\n",
374 		  req.userspace_ver, IRDMA_ABI_VER);
375 	return ERR_PTR(-EINVAL);
376 }
377 #endif
378 
379 #if __FreeBSD_version >= 1400026
380 /**
381  * irdma_dealloc_ucontext - deallocate the user context data structure
382  * @context: user context created during alloc
383  */
384 void
385 irdma_dealloc_ucontext(struct ib_ucontext *context)
386 {
387 	struct irdma_ucontext *ucontext = to_ucontext(context);
388 
389 	rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
390 
391 	return;
392 }
393 #endif
394 
395 #if __FreeBSD_version < 1400026
396 /**
397  * irdma_dealloc_ucontext - deallocate the user context data structure
398  * @context: user context created during alloc
399  */
400 int
401 irdma_dealloc_ucontext(struct ib_ucontext *context)
402 {
403 	struct irdma_ucontext *ucontext = to_ucontext(context);
404 
405 	irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry);
406 	spin_lock_destroy(&ucontext->mmap_tbl_lock);
407 	kfree(ucontext);
408 
409 	return 0;
410 }
411 #endif
412 
413 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
414 #if __FreeBSD_version >= 1400026
415 /**
416  * irdma_alloc_pd - allocate protection domain
417  * @pd: protection domain
418  * @udata: user data
419  */
420 int
421 irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
422 {
423 	struct irdma_pd *iwpd = to_iwpd(pd);
424 	struct irdma_device *iwdev = to_iwdev(pd->device);
425 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
426 	struct irdma_pci_f *rf = iwdev->rf;
427 	struct irdma_alloc_pd_resp uresp = {0};
428 	struct irdma_sc_pd *sc_pd;
429 	u32 pd_id = 0;
430 	int err;
431 
432 	if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
433 		return -EINVAL;
434 
435 	err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
436 			       &rf->next_pd);
437 	if (err)
438 		return err;
439 
440 	sc_pd = &iwpd->sc_pd;
441 	if (udata) {
442 		struct irdma_ucontext *ucontext =
443 		rdma_udata_to_drv_context(udata, struct irdma_ucontext,
444 					  ibucontext);
445 
446 		irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
447 		uresp.pd_id = pd_id;
448 		if (ib_copy_to_udata(udata, &uresp,
449 				     min(sizeof(uresp), udata->outlen))) {
450 			err = -EFAULT;
451 			goto error;
452 		}
453 	} else {
454 		irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
455 	}
456 
457 	return 0;
458 
459 error:
460 
461 	irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
462 
463 	return err;
464 }
465 #endif
466 
467 #if __FreeBSD_version < 1400026
468 /**
469  * irdma_alloc_pd - allocate protection domain
470  * @ibdev: IB device
471  * @context: user context
472  * @udata: user data
473  */
474 struct ib_pd *
475 irdma_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata)
476 {
477 	struct irdma_pd *iwpd;
478 	struct irdma_device *iwdev = to_iwdev(ibdev);
479 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
480 	struct irdma_pci_f *rf = iwdev->rf;
481 	struct irdma_alloc_pd_resp uresp = {0};
482 	struct irdma_sc_pd *sc_pd;
483 	u32 pd_id = 0;
484 	int err;
485 
486 	err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
487 			       &rf->next_pd);
488 	if (err)
489 		return ERR_PTR(err);
490 
491 	iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
492 	if (!iwpd) {
493 		err = -ENOMEM;
494 		goto free_res;
495 	}
496 
497 	sc_pd = &iwpd->sc_pd;
498 	if (udata) {
499 		struct irdma_ucontext *ucontext = to_ucontext(context);
500 
501 		irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
502 		uresp.pd_id = pd_id;
503 		if (ib_copy_to_udata(udata, &uresp,
504 				     min(sizeof(uresp), udata->outlen))) {
505 			err = -EFAULT;
506 			goto error;
507 		}
508 	} else {
509 		irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
510 	}
511 
512 	return &iwpd->ibpd;
513 
514 error:
515 	kfree(iwpd);
516 free_res:
517 
518 	irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
519 
520 	return ERR_PTR(err);
521 }
522 
523 #endif
524 
525 #if __FreeBSD_version >= 1400026
526 void
527 irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
528 {
529 	struct irdma_pd *iwpd = to_iwpd(ibpd);
530 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
531 
532 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
533 }
534 
535 #endif
536 
537 #if __FreeBSD_version < 1400026
538 int
539 irdma_dealloc_pd(struct ib_pd *ibpd)
540 {
541 	struct irdma_pd *iwpd = to_iwpd(ibpd);
542 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
543 
544 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
545 	kfree(iwpd);
546 	return 0;
547 }
548 #endif
549 
550 static void
551 irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info,
552 		   const struct ib_gid_attr *sgid_attr,
553 		   struct sockaddr *sgid_addr, struct sockaddr *dgid_addr,
554 		   u8 *dmac, u8 net_type)
555 {
556 	if (net_type == RDMA_NETWORK_IPV4) {
557 		ah_info->ipv4_valid = true;
558 		ah_info->dest_ip_addr[0] =
559 		    ntohl(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr);
560 		ah_info->src_ip_addr[0] =
561 		    ntohl(((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr);
562 #ifdef VIMAGE
563 		CURVNET_SET_QUIET(vnet);
564 		ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
565 						     ah_info->dest_ip_addr[0]);
566 		CURVNET_RESTORE();
567 #endif
568 		if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr)) {
569 			irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac);
570 		}
571 	} else {
572 		irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
573 				    ((struct sockaddr_in6 *)dgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
574 		irdma_copy_ip_ntohl(ah_info->src_ip_addr,
575 				    ((struct sockaddr_in6 *)sgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
576 		ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
577 						     ah_info->dest_ip_addr);
578 		if (rdma_is_multicast_addr(&((struct sockaddr_in6 *)dgid_addr)->sin6_addr)) {
579 			irdma_mcast_mac_v6(ah_info->dest_ip_addr, dmac);
580 		}
581 	}
582 }
583 
584 static int
585 irdma_create_ah_vlan_tag(struct irdma_device *iwdev,
586 			 struct irdma_ah_info *ah_info,
587 			 const struct ib_gid_attr *sgid_attr,
588 			 u8 *dmac)
589 {
590 	if (sgid_attr->ndev && is_vlan_dev(sgid_attr->ndev))
591 		ah_info->vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev);
592 	else
593 		ah_info->vlan_tag = VLAN_N_VID;
594 
595 	ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, dmac);
596 
597 	if (ah_info->dst_arpindex == -1)
598 		return -EINVAL;
599 
600 	if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
601 		ah_info->vlan_tag = 0;
602 
603 	if (ah_info->vlan_tag < VLAN_N_VID) {
604 		ah_info->insert_vlan_tag = true;
605 		ah_info->vlan_tag |=
606 		    (u16)rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
607 	}
608 	if (iwdev->roce_dcqcn_en) {
609 		ah_info->tc_tos &= ~ECN_CODE_PT_MASK;
610 		ah_info->tc_tos |= ECN_CODE_PT_VAL;
611 	}
612 
613 	return 0;
614 }
615 
616 static int
617 irdma_create_ah_wait(struct irdma_pci_f *rf,
618 		     struct irdma_sc_ah *sc_ah, bool sleep)
619 {
620 	if (!sleep) {
621 		int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms *
622 		CQP_TIMEOUT_THRESHOLD;
623 
624 		do {
625 			irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
626 			mdelay(1);
627 		} while (!sc_ah->ah_info.ah_valid && --cnt);
628 
629 		if (!cnt)
630 			return -ETIMEDOUT;
631 	}
632 	return 0;
633 }
634 
635 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
636 
637 #if __FreeBSD_version >= 1400026
638 /**
639  * irdma_create_ah - create address handle
640  * @ib_ah: ptr to AH
641  * @attr: address handle attributes
642  * @flags: AH flags to wait
643  * @udata: user data
644  *
645  * returns 0 on success, error otherwise
646  */
647 int
648 irdma_create_ah(struct ib_ah *ib_ah,
649 		struct ib_ah_attr *attr, u32 flags,
650 		struct ib_udata *udata)
651 {
652 	struct irdma_pd *pd = to_iwpd(ib_ah->pd);
653 	struct irdma_ah *ah = container_of(ib_ah, struct irdma_ah, ibah);
654 	struct irdma_device *iwdev = to_iwdev(ib_ah->pd->device);
655 	union ib_gid sgid;
656 	struct ib_gid_attr sgid_attr;
657 	struct irdma_pci_f *rf = iwdev->rf;
658 	struct irdma_sc_ah *sc_ah;
659 	u32 ah_id = 0;
660 	struct irdma_ah_info *ah_info;
661 	struct irdma_create_ah_resp uresp;
662 	union {
663 		struct sockaddr saddr;
664 		struct sockaddr_in saddr_in;
665 		struct sockaddr_in6 saddr_in6;
666 	} sgid_addr, dgid_addr;
667 	int err;
668 	u8 dmac[ETH_ALEN];
669 	bool sleep = (flags & RDMA_CREATE_AH_SLEEPABLE) != 0;
670 
671 	if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
672 		return -EINVAL;
673 
674 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
675 			       rf->max_ah, &ah_id, &rf->next_ah);
676 
677 	if (err)
678 		return err;
679 
680 	ah->pd = pd;
681 	sc_ah = &ah->sc_ah;
682 	sc_ah->ah_info.ah_idx = ah_id;
683 	sc_ah->ah_info.vsi = &iwdev->vsi;
684 	irdma_sc_init_ah(&rf->sc_dev, sc_ah);
685 	ah->sgid_index = attr->grh.sgid_index;
686 	memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));
687 	rcu_read_lock();
688 	err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num,
689 				attr->grh.sgid_index, &sgid, &sgid_attr);
690 	rcu_read_unlock();
691 	if (err) {
692 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
693 			    "GID lookup at idx=%d with port=%d failed\n",
694 			    attr->grh.sgid_index, attr->port_num);
695 		err = -EINVAL;
696 		goto err_gid_l2;
697 	}
698 	rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
699 	rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
700 	ah->av.attrs = *attr;
701 	ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr,
702 							sgid_attr.gid_type,
703 							&sgid);
704 
705 	if (sgid_attr.ndev)
706 		dev_put(sgid_attr.ndev);
707 
708 	ah->av.sgid_addr.saddr = sgid_addr.saddr;
709 	ah->av.dgid_addr.saddr = dgid_addr.saddr;
710 	ah_info = &sc_ah->ah_info;
711 	ah_info->ah_idx = ah_id;
712 	ah_info->pd_idx = pd->sc_pd.pd_id;
713 	ether_addr_copy(ah_info->mac_addr, IF_LLADDR(iwdev->netdev));
714 
715 	if (attr->ah_flags & IB_AH_GRH) {
716 		ah_info->flow_label = attr->grh.flow_label;
717 		ah_info->hop_ttl = attr->grh.hop_limit;
718 		ah_info->tc_tos = attr->grh.traffic_class;
719 	}
720 
721 	ether_addr_copy(dmac, attr->dmac);
722 
723 	irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
724 			   dmac, ah->av.net_type);
725 
726 	err = irdma_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr, dmac);
727 	if (err)
728 		goto err_gid_l2;
729 
730 	err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
731 			      sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
732 	if (err) {
733 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
734 			    "CQP-OP Create AH fail");
735 		goto err_gid_l2;
736 	}
737 
738 	err = irdma_create_ah_wait(rf, sc_ah, sleep);
739 	if (err) {
740 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
741 			    "CQP create AH timed out");
742 		goto err_gid_l2;
743 	}
744 
745 	if (udata) {
746 		uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
747 		err = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
748 		if (err) {
749 			irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
750 					IRDMA_OP_AH_DESTROY, false, NULL, ah);
751 			goto err_gid_l2;
752 		}
753 	}
754 
755 	return 0;
756 err_gid_l2:
757 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
758 
759 	return err;
760 }
761 #endif
762 
763 void
764 irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr)
765 {
766 	ether_addr_copy(dmac, attr->dmac);
767 }
768 
769 #if __FreeBSD_version < 1400026
770 struct ib_ah *
771 irdma_create_ah_stub(struct ib_pd *ibpd,
772 		     struct ib_ah_attr *attr,
773 		     struct ib_udata *udata)
774 #else
775 int
776 irdma_create_ah_stub(struct ib_ah *ib_ah,
777 		     struct ib_ah_attr *attr, u32 flags,
778 		     struct ib_udata *udata)
779 #endif
780 {
781 #if __FreeBSD_version >= 1400026
782 	return -ENOSYS;
783 #else
784 	return ERR_PTR(-ENOSYS);
785 #endif
786 }
787 
788 #if __FreeBSD_version >= 1400026
789 void
790 irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags)
791 {
792 	return;
793 }
794 #else
795 int
796 irdma_destroy_ah_stub(struct ib_ah *ibah)
797 {
798 	return -ENOSYS;
799 }
800 #endif
801 
802 #if __FreeBSD_version < 1400026
803 /**
804  * irdma_create_ah - create address handle
805  * @ibpd: ptr to pd
806  * @attr: address handle attributes
807  * @udata: user data
808  *
809  * returns a pointer to an address handle
810  */
811 struct ib_ah *
812 irdma_create_ah(struct ib_pd *ibpd,
813 		struct ib_ah_attr *attr,
814 		struct ib_udata *udata)
815 {
816 	struct irdma_pd *pd = to_iwpd(ibpd);
817 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
818 	struct irdma_ah *ah;
819 	union ib_gid sgid;
820 	struct ib_gid_attr sgid_attr;
821 	struct irdma_pci_f *rf = iwdev->rf;
822 	struct irdma_sc_ah *sc_ah;
823 	u32 ah_id = 0;
824 	struct irdma_ah_info *ah_info;
825 	struct irdma_create_ah_resp uresp;
826 	union {
827 		struct sockaddr saddr;
828 		struct sockaddr_in saddr_in;
829 		struct sockaddr_in6 saddr_in6;
830 	} sgid_addr, dgid_addr;
831 	int err;
832 	u8 dmac[ETH_ALEN];
833 	bool sleep = udata ? true : false;
834 
835 	if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
836 		return ERR_PTR(-EINVAL);
837 
838 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
839 			       rf->max_ah, &ah_id, &rf->next_ah);
840 
841 	if (err)
842 		return ERR_PTR(err);
843 
844 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
845 	if (!ah) {
846 		irdma_free_rsrc(rf, rf->allocated_ahs, ah_id);
847 		return ERR_PTR(-ENOMEM);
848 	}
849 
850 	ah->pd = pd;
851 	sc_ah = &ah->sc_ah;
852 	sc_ah->ah_info.ah_idx = ah_id;
853 	sc_ah->ah_info.vsi = &iwdev->vsi;
854 	irdma_sc_init_ah(&rf->sc_dev, sc_ah);
855 	ah->sgid_index = attr->grh.sgid_index;
856 	memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));
857 	rcu_read_lock();
858 	err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num,
859 				attr->grh.sgid_index, &sgid, &sgid_attr);
860 	rcu_read_unlock();
861 	if (err) {
862 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
863 			    "GID lookup at idx=%d with port=%d failed\n",
864 			    attr->grh.sgid_index, attr->port_num);
865 		err = -EINVAL;
866 		goto err_gid_l2;
867 	}
868 	rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
869 	rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
870 	ah->av.attrs = *attr;
871 	ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr,
872 							sgid_attr.gid_type,
873 							&sgid);
874 
875 	if (sgid_attr.ndev)
876 		dev_put(sgid_attr.ndev);
877 
878 	ah->av.sgid_addr.saddr = sgid_addr.saddr;
879 	ah->av.dgid_addr.saddr = dgid_addr.saddr;
880 	ah_info = &sc_ah->ah_info;
881 	ah_info->ah_idx = ah_id;
882 	ah_info->pd_idx = pd->sc_pd.pd_id;
883 
884 	ether_addr_copy(ah_info->mac_addr, IF_LLADDR(iwdev->netdev));
885 	if (attr->ah_flags & IB_AH_GRH) {
886 		ah_info->flow_label = attr->grh.flow_label;
887 		ah_info->hop_ttl = attr->grh.hop_limit;
888 		ah_info->tc_tos = attr->grh.traffic_class;
889 	}
890 
891 	if (udata)
892 		ib_resolve_eth_dmac(ibpd->device, attr);
893 	irdma_ether_copy(dmac, attr);
894 
895 	irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
896 			   dmac, ah->av.net_type);
897 
898 	err = irdma_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr, dmac);
899 	if (err)
900 		goto err_gid_l2;
901 
902 	err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
903 			      sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
904 	if (err) {
905 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
906 			    "CQP-OP Create AH fail");
907 		goto err_gid_l2;
908 	}
909 
910 	err = irdma_create_ah_wait(rf, sc_ah, sleep);
911 	if (err) {
912 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
913 			    "CQP create AH timed out");
914 		goto err_gid_l2;
915 	}
916 
917 	if (udata) {
918 		uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
919 		err = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
920 		if (err) {
921 			irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
922 					IRDMA_OP_AH_DESTROY, false, NULL, ah);
923 			goto err_gid_l2;
924 		}
925 	}
926 
927 	return &ah->ibah;
928 err_gid_l2:
929 	kfree(ah);
930 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
931 
932 	return ERR_PTR(err);
933 }
934 #endif
935 
936 /**
937  * irdma_free_qp_rsrc - free up memory resources for qp
938  * @iwqp: qp ptr (user or kernel)
939  */
940 void
941 irdma_free_qp_rsrc(struct irdma_qp *iwqp)
942 {
943 	struct irdma_device *iwdev = iwqp->iwdev;
944 	struct irdma_pci_f *rf = iwdev->rf;
945 	u32 qp_num = iwqp->ibqp.qp_num;
946 
947 	irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
948 	irdma_dealloc_push_page(rf, &iwqp->sc_qp);
949 	if (iwqp->sc_qp.vsi) {
950 		irdma_qp_rem_qos(&iwqp->sc_qp);
951 		iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
952 					   iwqp->sc_qp.user_pri);
953 	}
954 
955 	if (qp_num > 2)
956 		irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
957 	irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
958 	irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
959 	kfree(iwqp->kqp.sig_trk_mem);
960 	iwqp->kqp.sig_trk_mem = NULL;
961 	kfree(iwqp->kqp.sq_wrid_mem);
962 	kfree(iwqp->kqp.rq_wrid_mem);
963 	kfree(iwqp->sg_list);
964 	kfree(iwqp);
965 }
966 
967 /**
968  * irdma_create_qp - create qp
969  * @ibpd: ptr of pd
970  * @init_attr: attributes for qp
971  * @udata: user data for create qp
972  */
973 struct ib_qp *
974 irdma_create_qp(struct ib_pd *ibpd,
975 		struct ib_qp_init_attr *init_attr,
976 		struct ib_udata *udata)
977 {
978 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
979 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
980 	struct irdma_pd *iwpd = to_iwpd(ibpd);
981 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
982 	struct irdma_pci_f *rf = iwdev->rf;
983 	struct irdma_qp *iwqp;
984 	struct irdma_create_qp_resp uresp = {0};
985 	u32 qp_num = 0;
986 	int ret;
987 	int err_code;
988 	struct irdma_sc_qp *qp;
989 	struct irdma_sc_dev *dev = &rf->sc_dev;
990 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
991 	struct irdma_qp_init_info init_info = {{0}};
992 	struct irdma_qp_host_ctx_info *ctx_info;
993 
994 	err_code = irdma_validate_qp_attrs(init_attr, iwdev);
995 	if (err_code)
996 		return ERR_PTR(err_code);
997 
998 	if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
999 		      udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
1000 		return ERR_PTR(-EINVAL);
1001 
1002 	init_info.vsi = &iwdev->vsi;
1003 	init_info.qp_uk_init_info.uk_attrs = uk_attrs;
1004 	init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
1005 	init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
1006 	init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
1007 	init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
1008 	init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
1009 
1010 	iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
1011 	if (!iwqp)
1012 		return ERR_PTR(-ENOMEM);
1013 
1014 	iwqp->sg_list = kcalloc(uk_attrs->max_hw_wq_frags, sizeof(*iwqp->sg_list),
1015 				GFP_KERNEL);
1016 	if (!iwqp->sg_list) {
1017 		kfree(iwqp);
1018 		return ERR_PTR(-ENOMEM);
1019 	}
1020 
1021 	qp = &iwqp->sc_qp;
1022 	qp->qp_uk.back_qp = iwqp;
1023 	qp->qp_uk.lock = &iwqp->lock;
1024 	qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
1025 
1026 	iwqp->iwdev = iwdev;
1027 	iwqp->q2_ctx_mem.size = IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE;
1028 	iwqp->q2_ctx_mem.va = irdma_allocate_dma_mem(dev->hw, &iwqp->q2_ctx_mem,
1029 						     iwqp->q2_ctx_mem.size,
1030 						     256);
1031 	if (!iwqp->q2_ctx_mem.va) {
1032 		kfree(iwqp->sg_list);
1033 		kfree(iwqp);
1034 		return ERR_PTR(-ENOMEM);
1035 	}
1036 
1037 	init_info.q2 = iwqp->q2_ctx_mem.va;
1038 	init_info.q2_pa = iwqp->q2_ctx_mem.pa;
1039 	init_info.host_ctx = (__le64 *) (init_info.q2 + IRDMA_Q2_BUF_SIZE);
1040 	init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
1041 
1042 	if (init_attr->qp_type == IB_QPT_GSI)
1043 		qp_num = 1;
1044 	else
1045 		err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
1046 					    &qp_num, &rf->next_qp);
1047 	if (err_code)
1048 		goto error;
1049 
1050 	iwqp->iwpd = iwpd;
1051 	iwqp->ibqp.qp_num = qp_num;
1052 	qp = &iwqp->sc_qp;
1053 	iwqp->iwscq = to_iwcq(init_attr->send_cq);
1054 	iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
1055 	iwqp->host_ctx.va = init_info.host_ctx;
1056 	iwqp->host_ctx.pa = init_info.host_ctx_pa;
1057 	iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
1058 
1059 	init_info.pd = &iwpd->sc_pd;
1060 	init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
1061 	if (!rdma_protocol_roce(&iwdev->ibdev, 1))
1062 		init_info.qp_uk_init_info.first_sq_wq = 1;
1063 	iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
1064 	init_waitqueue_head(&iwqp->waitq);
1065 	init_waitqueue_head(&iwqp->mod_qp_waitq);
1066 
1067 	if (udata) {
1068 		init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
1069 		err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, init_attr);
1070 	} else {
1071 		INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
1072 		init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
1073 		err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
1074 	}
1075 
1076 	if (err_code) {
1077 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1078 			    "setup qp failed\n");
1079 		goto error;
1080 	}
1081 
1082 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1083 		if (init_attr->qp_type == IB_QPT_RC) {
1084 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
1085 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
1086 			    IRDMA_WRITE_WITH_IMM |
1087 			    IRDMA_ROCE;
1088 		} else {
1089 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
1090 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
1091 			    IRDMA_ROCE;
1092 		}
1093 	} else {
1094 		init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
1095 		init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
1096 	}
1097 
1098 	ret = irdma_sc_qp_init(qp, &init_info);
1099 	if (ret) {
1100 		err_code = -EPROTO;
1101 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1102 			    "qp_init fail\n");
1103 		goto error;
1104 	}
1105 
1106 	ctx_info = &iwqp->ctx_info;
1107 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1108 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1109 
1110 	if (rdma_protocol_roce(&iwdev->ibdev, 1))
1111 		irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
1112 	else
1113 		irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
1114 
1115 	err_code = irdma_cqp_create_qp_cmd(iwqp);
1116 	if (err_code)
1117 		goto error;
1118 
1119 	atomic_set(&iwqp->refcnt, 1);
1120 	spin_lock_init(&iwqp->lock);
1121 	spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
1122 	iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
1123 	rf->qp_table[qp_num] = iwqp;
1124 
1125 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1126 		if (dev->ws_add(&iwdev->vsi, 0)) {
1127 			irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
1128 			err_code = -EINVAL;
1129 			goto error;
1130 		}
1131 
1132 		irdma_qp_add_qos(&iwqp->sc_qp);
1133 	}
1134 
1135 	if (udata) {
1136 		/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
1137 		if (udata->outlen < sizeof(uresp)) {
1138 			uresp.lsmm = 1;
1139 			uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
1140 		} else {
1141 			if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
1142 				uresp.lsmm = 1;
1143 		}
1144 		uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
1145 		uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
1146 		uresp.qp_id = qp_num;
1147 		uresp.qp_caps = qp->qp_uk.qp_caps;
1148 
1149 		err_code = ib_copy_to_udata(udata, &uresp,
1150 					    min(sizeof(uresp), udata->outlen));
1151 		if (err_code) {
1152 			irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1153 				    "copy_to_udata failed\n");
1154 			kc_irdma_destroy_qp(&iwqp->ibqp, udata);
1155 			return ERR_PTR(err_code);
1156 		}
1157 	}
1158 
1159 	init_completion(&iwqp->free_qp);
1160 	return &iwqp->ibqp;
1161 
1162 error:
1163 	irdma_free_qp_rsrc(iwqp);
1164 
1165 	return ERR_PTR(err_code);
1166 }
1167 
1168 /**
1169  * irdma_destroy_qp - destroy qp
1170  * @ibqp: qp's ib pointer also to get to device's qp address
1171  * @udata: user data
1172  */
1173 #if __FreeBSD_version >= 1400026
1174 int
1175 irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1176 #else
1177 int
1178 irdma_destroy_qp(struct ib_qp *ibqp)
1179 #endif
1180 {
1181 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1182 	struct irdma_device *iwdev = iwqp->iwdev;
1183 
1184 	if (iwqp->sc_qp.qp_uk.destroy_pending)
1185 		goto free_rsrc;
1186 	iwqp->sc_qp.qp_uk.destroy_pending = true;
1187 	if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
1188 		irdma_modify_qp_to_err(&iwqp->sc_qp);
1189 
1190 	irdma_qp_rem_ref(&iwqp->ibqp);
1191 	wait_for_completion(&iwqp->free_qp);
1192 	irdma_free_lsmm_rsrc(iwqp);
1193 	if (!iwdev->rf->reset &&
1194 	    irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp))
1195 		return -ENOTRECOVERABLE;
1196 free_rsrc:
1197 	if (!iwqp->user_mode) {
1198 		if (iwqp->iwscq) {
1199 			irdma_clean_cqes(iwqp, iwqp->iwscq);
1200 			if (iwqp->iwrcq != iwqp->iwscq)
1201 				irdma_clean_cqes(iwqp, iwqp->iwrcq);
1202 		}
1203 	}
1204 	irdma_remove_push_mmap_entries(iwqp);
1205 	irdma_free_qp_rsrc(iwqp);
1206 
1207 	return 0;
1208 }
1209 
1210 /**
1211  * irdma_create_cq - create cq
1212  * @ibcq: CQ allocated
1213  * @attr: attributes for cq
1214  * @udata: user data
1215  */
1216 #if __FreeBSD_version >= 1400026
1217 int
1218 irdma_create_cq(struct ib_cq *ibcq,
1219 		const struct ib_cq_init_attr *attr,
1220 		struct ib_udata *udata)
1221 #else
1222 struct ib_cq *
1223 irdma_create_cq(struct ib_device *ibdev,
1224 		const struct ib_cq_init_attr *attr,
1225 		struct ib_ucontext *context,
1226 		struct ib_udata *udata)
1227 #endif
1228 {
1229 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
1230 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
1231 #if __FreeBSD_version >= 1400026
1232 	struct ib_device *ibdev = ibcq->device;
1233 #endif
1234 	struct irdma_device *iwdev = to_iwdev(ibdev);
1235 	struct irdma_pci_f *rf = iwdev->rf;
1236 #if __FreeBSD_version >= 1400026
1237 	struct irdma_cq *iwcq = to_iwcq(ibcq);
1238 #else
1239 	struct irdma_cq *iwcq;
1240 #endif
1241 	u32 cq_num = 0;
1242 	struct irdma_sc_cq *cq;
1243 	struct irdma_sc_dev *dev = &rf->sc_dev;
1244 	struct irdma_cq_init_info info = {0};
1245 	int status;
1246 	struct irdma_cqp_request *cqp_request;
1247 	struct cqp_cmds_info *cqp_info;
1248 	struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1249 	unsigned long flags;
1250 	int err_code;
1251 	int entries = attr->cqe;
1252 	bool cqe_64byte_ena;
1253 
1254 #if __FreeBSD_version >= 1400026
1255 	err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
1256 	if (err_code)
1257 		return err_code;
1258 
1259 	if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
1260 		      udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
1261 		return -EINVAL;
1262 #else
1263 	err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
1264 	if (err_code)
1265 		return ERR_PTR(err_code);
1266 
1267 	if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
1268 		      udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
1269 		return ERR_PTR(-EINVAL);
1270 
1271 	iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
1272 	if (!iwcq)
1273 		return ERR_PTR(-ENOMEM);
1274 #endif
1275 	err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
1276 				    &rf->next_cq);
1277 	if (err_code)
1278 #if __FreeBSD_version >= 1400026
1279 		return err_code;
1280 #else
1281 		goto error;
1282 #endif
1283 	cq = &iwcq->sc_cq;
1284 	cq->back_cq = iwcq;
1285 	atomic_set(&iwcq->refcnt, 1);
1286 	spin_lock_init(&iwcq->lock);
1287 	INIT_LIST_HEAD(&iwcq->resize_list);
1288 	INIT_LIST_HEAD(&iwcq->cmpl_generated);
1289 	info.dev = dev;
1290 	ukinfo->cq_size = max(entries, 4);
1291 	ukinfo->cq_id = cq_num;
1292 	cqe_64byte_ena = (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE) ? true : false;
1293 	ukinfo->avoid_mem_cflct = cqe_64byte_ena;
1294 	iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1295 	if (attr->comp_vector < rf->ceqs_count)
1296 		info.ceq_id = attr->comp_vector;
1297 	info.ceq_id_valid = true;
1298 	info.ceqe_mask = 1;
1299 	info.type = IRDMA_CQ_TYPE_IWARP;
1300 	info.vsi = &iwdev->vsi;
1301 
1302 	if (udata) {
1303 		struct irdma_ucontext *ucontext;
1304 		struct irdma_create_cq_req req = {0};
1305 		struct irdma_cq_mr *cqmr;
1306 		struct irdma_pbl *iwpbl;
1307 		struct irdma_pbl *iwpbl_shadow;
1308 		struct irdma_cq_mr *cqmr_shadow;
1309 
1310 		iwcq->user_mode = true;
1311 #if __FreeBSD_version >= 1400026
1312 		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1313 #else
1314 		ucontext = to_ucontext(context);
1315 #endif
1316 
1317 		if (ib_copy_from_udata(&req, udata,
1318 				       min(sizeof(req), udata->inlen))) {
1319 			err_code = -EFAULT;
1320 			goto cq_free_rsrc;
1321 		}
1322 
1323 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1324 		iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
1325 				      &ucontext->cq_reg_mem_list);
1326 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1327 		if (!iwpbl) {
1328 			err_code = -EPROTO;
1329 			goto cq_free_rsrc;
1330 		}
1331 		iwcq->iwpbl = iwpbl;
1332 		iwcq->cq_mem_size = 0;
1333 		cqmr = &iwpbl->cq_mr;
1334 
1335 		if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1336 		    IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
1337 			spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1338 			iwpbl_shadow = irdma_get_pbl((unsigned long)req.user_shadow_area,
1339 						     &ucontext->cq_reg_mem_list);
1340 			spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1341 
1342 			if (!iwpbl_shadow) {
1343 				err_code = -EPROTO;
1344 				goto cq_free_rsrc;
1345 			}
1346 			iwcq->iwpbl_shadow = iwpbl_shadow;
1347 			cqmr_shadow = &iwpbl_shadow->cq_mr;
1348 			info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
1349 			cqmr->split = true;
1350 		} else {
1351 			info.shadow_area_pa = cqmr->shadow;
1352 		}
1353 		if (iwpbl->pbl_allocated) {
1354 			info.virtual_map = true;
1355 			info.pbl_chunk_size = 1;
1356 			info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1357 		} else {
1358 			info.cq_base_pa = cqmr->cq_pbl.addr;
1359 		}
1360 	} else {
1361 		/* Kmode allocations */
1362 		int rsize;
1363 
1364 		if (entries < 1 || entries > rf->max_cqe) {
1365 			err_code = -EINVAL;
1366 			goto cq_free_rsrc;
1367 		}
1368 
1369 		entries++;
1370 		if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1371 			entries *= 2;
1372 		ukinfo->cq_size = entries;
1373 
1374 		if (cqe_64byte_ena)
1375 			rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
1376 		else
1377 			rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
1378 		iwcq->kmem.size = round_up(rsize, IRDMA_HW_PAGE_SIZE);
1379 		iwcq->kmem.va = irdma_allocate_dma_mem(dev->hw, &iwcq->kmem,
1380 						       iwcq->kmem.size, IRDMA_HW_PAGE_SIZE);
1381 		if (!iwcq->kmem.va) {
1382 			err_code = -ENOMEM;
1383 			goto cq_free_rsrc;
1384 		}
1385 
1386 		iwcq->kmem_shadow.size = IRDMA_SHADOW_AREA_SIZE << 3;
1387 		iwcq->kmem_shadow.va = irdma_allocate_dma_mem(dev->hw,
1388 							      &iwcq->kmem_shadow,
1389 							      iwcq->kmem_shadow.size,
1390 							      64);
1391 
1392 		if (!iwcq->kmem_shadow.va) {
1393 			err_code = -ENOMEM;
1394 			goto cq_free_rsrc;
1395 		}
1396 		info.shadow_area_pa = iwcq->kmem_shadow.pa;
1397 		ukinfo->shadow_area = iwcq->kmem_shadow.va;
1398 		ukinfo->cq_base = iwcq->kmem.va;
1399 		info.cq_base_pa = iwcq->kmem.pa;
1400 	}
1401 
1402 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1403 		info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
1404 						 (u32)IRDMA_MAX_CQ_READ_THRESH);
1405 	if (irdma_sc_cq_init(cq, &info)) {
1406 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1407 			    "init cq fail\n");
1408 		err_code = -EPROTO;
1409 		goto cq_free_rsrc;
1410 	}
1411 
1412 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1413 	if (!cqp_request) {
1414 		err_code = -ENOMEM;
1415 		goto cq_free_rsrc;
1416 	}
1417 	cqp_info = &cqp_request->info;
1418 	cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
1419 	cqp_info->post_sq = 1;
1420 	cqp_info->in.u.cq_create.cq = cq;
1421 	cqp_info->in.u.cq_create.check_overflow = true;
1422 	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1423 	status = irdma_handle_cqp_op(rf, cqp_request);
1424 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1425 	if (status) {
1426 		err_code = -ENOMEM;
1427 		goto cq_free_rsrc;
1428 	}
1429 
1430 	if (udata) {
1431 		struct irdma_create_cq_resp resp = {0};
1432 
1433 		resp.cq_id = info.cq_uk_init_info.cq_id;
1434 		resp.cq_size = info.cq_uk_init_info.cq_size;
1435 		if (ib_copy_to_udata(udata, &resp,
1436 				     min(sizeof(resp), udata->outlen))) {
1437 			irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1438 				    "copy to user data\n");
1439 			err_code = -EPROTO;
1440 			goto cq_destroy;
1441 		}
1442 	}
1443 
1444 	rf->cq_table[cq_num] = iwcq;
1445 	init_completion(&iwcq->free_cq);
1446 
1447 #if __FreeBSD_version >= 1400026
1448 	return 0;
1449 #else
1450 	return &iwcq->ibcq;
1451 #endif
1452 cq_destroy:
1453 	irdma_cq_wq_destroy(rf, cq);
1454 cq_free_rsrc:
1455 	irdma_cq_free_rsrc(rf, iwcq);
1456 #if __FreeBSD_version >= 1400026
1457 	return err_code;
1458 #else
1459 error:
1460 	kfree(iwcq);
1461 	return ERR_PTR(err_code);
1462 #endif
1463 }
1464 
1465 /**
1466  * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
1467  * @iwmr: iwmr for IB's user page addresses
1468  * @pbl: ple pointer to save 1 level or 0 level pble
1469  * @level: indicated level 0, 1 or 2
1470  */
1471 
1472 void
1473 irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
1474 			enum irdma_pble_level level)
1475 {
1476 	struct ib_umem *region = iwmr->region;
1477 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1478 	int chunk_pages, entry, i;
1479 	struct scatterlist *sg;
1480 	u64 pg_addr = 0;
1481 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1482 	struct irdma_pble_info *pinfo;
1483 	u32 idx = 0;
1484 	u32 pbl_cnt = 0;
1485 
1486 	pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
1487 	for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
1488 		chunk_pages = DIV_ROUND_UP(sg_dma_len(sg), iwmr->page_size);
1489 		if (iwmr->type == IRDMA_MEMREG_TYPE_QP && !iwpbl->qp_mr.sq_page)
1490 			iwpbl->qp_mr.sq_page = sg_page(sg);
1491 		for (i = 0; i < chunk_pages; i++) {
1492 			pg_addr = sg_dma_address(sg) + (i * iwmr->page_size);
1493 			if ((entry + i) == 0)
1494 				*pbl = pg_addr & iwmr->page_msk;
1495 			else if (!(pg_addr & ~iwmr->page_msk))
1496 				*pbl = pg_addr;
1497 			else
1498 				continue;
1499 			if (++pbl_cnt == palloc->total_cnt)
1500 				break;
1501 			pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
1502 		}
1503 	}
1504 }
1505 
1506 /**
1507  * irdma_destroy_ah - Destroy address handle
1508  * @ibah: pointer to address handle
1509  * @ah_flags: destroy flags
1510  */
1511 
1512 #if __FreeBSD_version >= 1400026
1513 void
1514 irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
1515 {
1516 	struct irdma_device *iwdev = to_iwdev(ibah->device);
1517 	struct irdma_ah *ah = to_iwah(ibah);
1518 
1519 	irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
1520 			false, NULL, ah);
1521 
1522 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
1523 			ah->sc_ah.ah_info.ah_idx);
1524 }
1525 #endif
1526 
1527 #if __FreeBSD_version < 1400026
1528 int
1529 irdma_destroy_ah(struct ib_ah *ibah)
1530 {
1531 	struct irdma_device *iwdev = to_iwdev(ibah->device);
1532 	struct irdma_ah *ah = to_iwah(ibah);
1533 
1534 	irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
1535 			false, NULL, ah);
1536 
1537 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
1538 			ah->sc_ah.ah_info.ah_idx);
1539 
1540 	kfree(ah);
1541 	return 0;
1542 }
1543 #endif
1544 
1545 #if __FreeBSD_version >= 1400026
1546 int
1547 irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
1548 #else
1549 int
1550 irdma_dereg_mr(struct ib_mr *ib_mr)
1551 #endif
1552 {
1553 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
1554 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
1555 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1556 	int ret;
1557 
1558 	if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
1559 		if (iwmr->region) {
1560 			struct irdma_ucontext *ucontext;
1561 #if __FreeBSD_version >= 1400026
1562 
1563 			ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1564 
1565 #else
1566 			struct ib_pd *ibpd = ib_mr->pd;
1567 
1568 			ucontext = to_ucontext(ibpd->uobject->context);
1569 #endif
1570 			irdma_del_memlist(iwmr, ucontext);
1571 		}
1572 		goto done;
1573 	}
1574 
1575 	ret = irdma_hwdereg_mr(ib_mr);
1576 	if (ret)
1577 		return ret;
1578 
1579 	irdma_free_stag(iwdev, iwmr->stag);
1580 done:
1581 	if (iwpbl->pbl_allocated)
1582 		irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
1583 
1584 	if (iwmr->region)
1585 		ib_umem_release(iwmr->region);
1586 
1587 	kfree(iwmr);
1588 
1589 	return 0;
1590 }
1591 
1592 /*
1593  * irdma_rereg_user_mr - Re-Register a user memory region @ibmr: ib mem to access iwarp mr pointer @flags: bit mask to
1594  * indicate which of the attr's of MR modified @start: virtual start address @len: length of mr @virt: virtual address
1595  * @new access flags: bit mask of access flags @new_pd: ptr of pd @udata: user data
1596  */
1597 int
1598 irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len,
1599 		    u64 virt, int new_access, struct ib_pd *new_pd,
1600 		    struct ib_udata *udata)
1601 {
1602 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
1603 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
1604 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1605 	int ret;
1606 
1607 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
1608 		return -EINVAL;
1609 
1610 	if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
1611 		return -EOPNOTSUPP;
1612 
1613 	ret = irdma_hwdereg_mr(ib_mr);
1614 	if (ret)
1615 		return ret;
1616 
1617 	if (flags & IB_MR_REREG_ACCESS)
1618 		iwmr->access = new_access;
1619 
1620 	if (flags & IB_MR_REREG_PD) {
1621 		iwmr->ibmr.pd = new_pd;
1622 		iwmr->ibmr.device = new_pd->device;
1623 	}
1624 
1625 	if (flags & IB_MR_REREG_TRANS) {
1626 		if (iwpbl->pbl_allocated) {
1627 			irdma_free_pble(iwdev->rf->pble_rsrc,
1628 					&iwpbl->pble_alloc);
1629 			iwpbl->pbl_allocated = false;
1630 		}
1631 		if (iwmr->region) {
1632 			ib_umem_release(iwmr->region);
1633 			iwmr->region = NULL;
1634 		}
1635 
1636 		ib_mr = irdma_rereg_mr_trans(iwmr, start, len, virt, udata);
1637 		if (IS_ERR(ib_mr))
1638 			return PTR_ERR(ib_mr);
1639 
1640 	} else {
1641 		ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
1642 		if (ret)
1643 			return ret;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 int
1650 kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
1651 			  u16 *vlan_id)
1652 {
1653 	int ret;
1654 	union ib_gid sgid;
1655 	struct ib_gid_attr sgid_attr;
1656 	struct irdma_av *av = &iwqp->roce_ah.av;
1657 
1658 	ret = ib_get_cached_gid(iwqp->ibqp.device, attr->ah_attr.port_num,
1659 				attr->ah_attr.grh.sgid_index, &sgid,
1660 				&sgid_attr);
1661 	if (ret)
1662 		return ret;
1663 
1664 	if (sgid_attr.ndev) {
1665 		*vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
1666 		ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, IF_LLADDR(sgid_attr.ndev));
1667 	}
1668 
1669 	rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid);
1670 	dev_put(sgid_attr.ndev);
1671 	iwqp->sc_qp.user_pri = iwqp->ctx_info.user_pri;
1672 
1673 	return 0;
1674 }
1675 
1676 #if __FreeBSD_version >= 1400026
1677 /**
1678  * irdma_destroy_cq - destroy cq
1679  * @ib_cq: cq pointer
1680  * @udata: user data
1681  */
1682 void
1683 irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1684 {
1685 	struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1686 	struct irdma_cq *iwcq = to_iwcq(ib_cq);
1687 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1688 	struct irdma_sc_dev *dev = cq->dev;
1689 	struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1690 	struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1691 	unsigned long flags;
1692 
1693 	spin_lock_irqsave(&iwcq->lock, flags);
1694 	if (!list_empty(&iwcq->cmpl_generated))
1695 		irdma_remove_cmpls_list(iwcq);
1696 	if (!list_empty(&iwcq->resize_list))
1697 		irdma_process_resize_list(iwcq, iwdev, NULL);
1698 	spin_unlock_irqrestore(&iwcq->lock, flags);
1699 
1700 	irdma_cq_rem_ref(ib_cq);
1701 	wait_for_completion(&iwcq->free_cq);
1702 
1703 	irdma_cq_wq_destroy(iwdev->rf, cq);
1704 
1705 	spin_lock_irqsave(&iwceq->ce_lock, flags);
1706 	irdma_sc_cleanup_ceqes(cq, ceq);
1707 	spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1708 	irdma_cq_free_rsrc(iwdev->rf, iwcq);
1709 }
1710 
1711 #endif
1712 #if __FreeBSD_version < 1400026
1713 /**
1714  * irdma_destroy_cq - destroy cq
1715  * @ib_cq: cq pointer
1716  */
1717 int
1718 irdma_destroy_cq(struct ib_cq *ib_cq)
1719 {
1720 	struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1721 	struct irdma_cq *iwcq = to_iwcq(ib_cq);
1722 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1723 	struct irdma_sc_dev *dev = cq->dev;
1724 	struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1725 	struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1726 	unsigned long flags;
1727 
1728 	spin_lock_irqsave(&iwcq->lock, flags);
1729 	if (!list_empty(&iwcq->cmpl_generated))
1730 		irdma_remove_cmpls_list(iwcq);
1731 	if (!list_empty(&iwcq->resize_list))
1732 		irdma_process_resize_list(iwcq, iwdev, NULL);
1733 	spin_unlock_irqrestore(&iwcq->lock, flags);
1734 
1735 	irdma_cq_rem_ref(ib_cq);
1736 	wait_for_completion(&iwcq->free_cq);
1737 
1738 	irdma_cq_wq_destroy(iwdev->rf, cq);
1739 
1740 	spin_lock_irqsave(&iwceq->ce_lock, flags);
1741 	irdma_sc_cleanup_ceqes(cq, ceq);
1742 	spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1743 
1744 	irdma_cq_free_rsrc(iwdev->rf, iwcq);
1745 	kfree(iwcq);
1746 
1747 	return 0;
1748 }
1749 
1750 #endif
1751 /**
1752  * irdma_alloc_mw - Allocate memory window
1753  * @pd: Protection domain
1754  * @type: Window type
1755  * @udata: user data pointer
1756  */
1757 struct ib_mw *
1758 irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1759 	       struct ib_udata *udata)
1760 {
1761 	struct irdma_device *iwdev = to_iwdev(pd->device);
1762 	struct irdma_mr *iwmr;
1763 	int err_code;
1764 	u32 stag;
1765 
1766 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1767 	if (!iwmr)
1768 		return ERR_PTR(-ENOMEM);
1769 
1770 	stag = irdma_create_stag(iwdev);
1771 	if (!stag) {
1772 		kfree(iwmr);
1773 		return ERR_PTR(-ENOMEM);
1774 	}
1775 
1776 	iwmr->stag = stag;
1777 	iwmr->ibmw.rkey = stag;
1778 	iwmr->ibmw.pd = pd;
1779 	iwmr->ibmw.type = type;
1780 	iwmr->ibmw.device = pd->device;
1781 
1782 	err_code = irdma_hw_alloc_mw(iwdev, iwmr);
1783 	if (err_code) {
1784 		irdma_free_stag(iwdev, stag);
1785 		kfree(iwmr);
1786 		return ERR_PTR(err_code);
1787 	}
1788 
1789 	return &iwmr->ibmw;
1790 }
1791 
1792 /**
1793  * kc_set_loc_seq_num_mss - Set local seq number and mss
1794  * @cm_node: cm node info
1795  */
1796 void
1797 kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node)
1798 {
1799 	struct timespec ts;
1800 
1801 	getnanotime(&ts);
1802 	cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
1803 	if (cm_node->iwdev->vsi.mtu > 1500 &&
1804 	    2 * cm_node->iwdev->vsi.mtu > cm_node->iwdev->rcv_wnd)
1805 		cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
1806 		    (1500 - IRDMA_MTU_TO_MSS_IPV4) :
1807 		    (1500 - IRDMA_MTU_TO_MSS_IPV6);
1808 	else
1809 		cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
1810 		    (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4) :
1811 		    (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6);
1812 }
1813 
1814 #if __FreeBSD_version < 1400026
1815 struct irdma_vma_data {
1816 	struct list_head list;
1817 	struct vm_area_struct *vma;
1818 	struct mutex *vma_list_mutex;	/* protect the vma_list */
1819 };
1820 
1821 /**
1822  * irdma_vma_open -
1823  * @vma: User VMA
1824  */
1825 static void
1826 irdma_vma_open(struct vm_area_struct *vma)
1827 {
1828 	vma->vm_ops = NULL;
1829 }
1830 
1831 /**
1832  * irdma_vma_close - Remove vma data from vma list
1833  * @vma: User VMA
1834  */
1835 static void
1836 irdma_vma_close(struct vm_area_struct *vma)
1837 {
1838 	struct irdma_vma_data *vma_data;
1839 
1840 	vma_data = vma->vm_private_data;
1841 	vma->vm_private_data = NULL;
1842 	vma_data->vma = NULL;
1843 	mutex_lock(vma_data->vma_list_mutex);
1844 	list_del(&vma_data->list);
1845 	mutex_unlock(vma_data->vma_list_mutex);
1846 	kfree(vma_data);
1847 }
1848 
1849 static const struct vm_operations_struct irdma_vm_ops = {
1850 	.open = irdma_vma_open,
1851 	.close = irdma_vma_close
1852 };
1853 
1854 /**
1855  * irdma_set_vma_data - Save vma data in context list
1856  * @vma: User VMA
1857  * @context: ib user context
1858  */
1859 static int
1860 irdma_set_vma_data(struct vm_area_struct *vma,
1861 		   struct irdma_ucontext *context)
1862 {
1863 	struct list_head *vma_head = &context->vma_list;
1864 	struct irdma_vma_data *vma_entry;
1865 
1866 	vma_entry = kzalloc(sizeof(*vma_entry), GFP_KERNEL);
1867 	if (!vma_entry)
1868 		return -ENOMEM;
1869 
1870 	vma->vm_private_data = vma_entry;
1871 	vma->vm_ops = &irdma_vm_ops;
1872 
1873 	vma_entry->vma = vma;
1874 	vma_entry->vma_list_mutex = &context->vma_list_mutex;
1875 
1876 	mutex_lock(&context->vma_list_mutex);
1877 	list_add(&vma_entry->list, vma_head);
1878 	mutex_unlock(&context->vma_list_mutex);
1879 
1880 	return 0;
1881 }
1882 
1883 /**
1884  * irdma_disassociate_ucontext - Disassociate user context
1885  * @context: ib user context
1886  */
1887 void
1888 irdma_disassociate_ucontext(struct ib_ucontext *context)
1889 {
1890 	struct irdma_ucontext *ucontext = to_ucontext(context);
1891 
1892 	struct irdma_vma_data *vma_data, *n;
1893 	struct vm_area_struct *vma;
1894 
1895 	mutex_lock(&ucontext->vma_list_mutex);
1896 	list_for_each_entry_safe(vma_data, n, &ucontext->vma_list, list) {
1897 		vma = vma_data->vma;
1898 		zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
1899 
1900 		vma->vm_ops = NULL;
1901 		list_del(&vma_data->list);
1902 		kfree(vma_data);
1903 	}
1904 	mutex_unlock(&ucontext->vma_list_mutex);
1905 }
1906 
1907 int
1908 rdma_user_mmap_io(struct ib_ucontext *context, struct vm_area_struct *vma,
1909 		  unsigned long pfn, unsigned long size, pgprot_t prot)
1910 {
1911 	if (io_remap_pfn_range(vma,
1912 			       vma->vm_start,
1913 			       pfn,
1914 			       size,
1915 			       prot))
1916 		return -EAGAIN;
1917 
1918 	return irdma_set_vma_data(vma, to_ucontext(context));
1919 }
1920 #else
1921 /**
1922  * irdma_disassociate_ucontext - Disassociate user context
1923  * @context: ib user context
1924  */
1925 void
1926 irdma_disassociate_ucontext(struct ib_ucontext *context)
1927 {
1928 }
1929 #endif
1930 
1931 struct ib_device *
1932 ib_device_get_by_netdev(struct ifnet *netdev, int driver_id)
1933 {
1934 	struct irdma_device *iwdev;
1935 	struct irdma_handler *hdl;
1936 	unsigned long flags;
1937 
1938 	spin_lock_irqsave(&irdma_handler_lock, flags);
1939 	list_for_each_entry(hdl, &irdma_handlers, list) {
1940 		iwdev = hdl->iwdev;
1941 		if (netdev == iwdev->netdev) {
1942 			spin_unlock_irqrestore(&irdma_handler_lock,
1943 					       flags);
1944 			return &iwdev->ibdev;
1945 		}
1946 	}
1947 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
1948 
1949 	return NULL;
1950 }
1951 
1952 void
1953 ib_unregister_device_put(struct ib_device *device)
1954 {
1955 	ib_unregister_device(device);
1956 }
1957 
1958 /**
1959  * irdma_query_gid_roce - Query port GID for Roce
1960  * @ibdev: device pointer from stack
1961  * @port: port number
1962  * @index: Entry index
1963  * @gid: Global ID
1964  */
1965 int
1966 irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index,
1967 		     union ib_gid *gid)
1968 {
1969 	int ret;
1970 
1971 	ret = rdma_query_gid(ibdev, port, index, gid);
1972 	if (ret == -EAGAIN) {
1973 		memcpy(gid, &zgid, sizeof(*gid));
1974 		return 0;
1975 	}
1976 
1977 	return ret;
1978 }
1979 
1980 /**
1981  * irdma_modify_port - modify port attributes
1982  * @ibdev: device pointer from stack
1983  * @port: port number for query
1984  * @mask: Property mask
1985  * @props: returning device attributes
1986  */
1987 int
1988 irdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
1989 		  struct ib_port_modify *props)
1990 {
1991 	if (port > 1)
1992 		return -EINVAL;
1993 
1994 	return 0;
1995 }
1996 
1997 /**
1998  * irdma_query_pkey - Query partition key
1999  * @ibdev: device pointer from stack
2000  * @port: port number
2001  * @index: index of pkey
2002  * @pkey: pointer to store the pkey
2003  */
2004 int
2005 irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
2006 		 u16 *pkey)
2007 {
2008 	if (index >= IRDMA_PKEY_TBL_SZ)
2009 		return -EINVAL;
2010 
2011 	*pkey = IRDMA_DEFAULT_PKEY;
2012 	return 0;
2013 }
2014 
2015 int
2016 irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
2017 			  struct ib_port_immutable *immutable)
2018 {
2019 	struct ib_port_attr attr;
2020 	int err;
2021 
2022 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2023 	err = ib_query_port(ibdev, port_num, &attr);
2024 	if (err)
2025 		return err;
2026 
2027 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2028 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
2029 	immutable->gid_tbl_len = attr.gid_tbl_len;
2030 
2031 	return 0;
2032 }
2033 
2034 int
2035 irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2036 			struct ib_port_immutable *immutable)
2037 {
2038 	struct ib_port_attr attr;
2039 	int err;
2040 
2041 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2042 	err = ib_query_port(ibdev, port_num, &attr);
2043 	if (err)
2044 		return err;
2045 	immutable->gid_tbl_len = 1;
2046 
2047 	return 0;
2048 }
2049 
2050 /**
2051  * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed
2052  * @link_speed: netdev phy link speed
2053  * @active_speed: IB port speed
2054  * @active_width: IB port width
2055  */
2056 void
2057 irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,
2058 			      u8 *active_width)
2059 {
2060 	if (link_speed <= SPEED_1000) {
2061 		*active_width = IB_WIDTH_1X;
2062 		*active_speed = IB_SPEED_SDR;
2063 	} else if (link_speed <= SPEED_10000) {
2064 		*active_width = IB_WIDTH_1X;
2065 		*active_speed = IB_SPEED_FDR10;
2066 	} else if (link_speed <= SPEED_20000) {
2067 		*active_width = IB_WIDTH_4X;
2068 		*active_speed = IB_SPEED_DDR;
2069 	} else if (link_speed <= SPEED_25000) {
2070 		*active_width = IB_WIDTH_1X;
2071 		*active_speed = IB_SPEED_EDR;
2072 	} else if (link_speed <= SPEED_40000) {
2073 		*active_width = IB_WIDTH_4X;
2074 		*active_speed = IB_SPEED_FDR10;
2075 	} else {
2076 		*active_width = IB_WIDTH_4X;
2077 		*active_speed = IB_SPEED_EDR;
2078 	}
2079 }
2080 
2081 /**
2082  * irdma_query_port - get port attributes
2083  * @ibdev: device pointer from stack
2084  * @port: port number for query
2085  * @props: returning device attributes
2086  */
2087 int
2088 irdma_query_port(struct ib_device *ibdev, u8 port,
2089 		 struct ib_port_attr *props)
2090 {
2091 	struct irdma_device *iwdev = to_iwdev(ibdev);
2092 	struct ifnet *netdev = iwdev->netdev;
2093 
2094 	/* no need to zero out pros here. done by caller */
2095 
2096 	props->max_mtu = IB_MTU_4096;
2097 	props->active_mtu = ib_mtu_int_to_enum(netdev->if_mtu);
2098 	props->lid = 1;
2099 	props->lmc = 0;
2100 	props->sm_lid = 0;
2101 	props->sm_sl = 0;
2102 	if ((netdev->if_link_state == LINK_STATE_UP) && (netdev->if_drv_flags & IFF_DRV_RUNNING)) {
2103 		props->state = IB_PORT_ACTIVE;
2104 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
2105 	} else {
2106 		props->state = IB_PORT_DOWN;
2107 		props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
2108 	}
2109 	irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
2110 				      &props->active_width);
2111 
2112 	if (rdma_protocol_roce(ibdev, 1)) {
2113 		props->gid_tbl_len = 32;
2114 		kc_set_props_ip_gid_caps(props);
2115 		props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
2116 	} else {
2117 		props->gid_tbl_len = 1;
2118 	}
2119 	props->qkey_viol_cntr = 0;
2120 	props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
2121 	props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
2122 
2123 	return 0;
2124 }
2125 
2126 static const char *const irdma_hw_stat_names[] = {
2127 	/* gen1 - 32-bit */
2128 	[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2129 	[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2130 	[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2131 	[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2132 	[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2133 	[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2134 	[IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
2135 	/* gen1 - 64-bit */
2136 	[IRDMA_HW_STAT_INDEX_IP4RXOCTS] = "ip4InOctets",
2137 	[IRDMA_HW_STAT_INDEX_IP4RXPKTS] = "ip4InPkts",
2138 	[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = "ip4InReasmRqd",
2139 	[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = "ip4InMcastPkts",
2140 	[IRDMA_HW_STAT_INDEX_IP4TXOCTS] = "ip4OutOctets",
2141 	[IRDMA_HW_STAT_INDEX_IP4TXPKTS] = "ip4OutPkts",
2142 	[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = "ip4OutSegRqd",
2143 	[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = "ip4OutMcastPkts",
2144 	[IRDMA_HW_STAT_INDEX_IP6RXOCTS] = "ip6InOctets",
2145 	[IRDMA_HW_STAT_INDEX_IP6RXPKTS] = "ip6InPkts",
2146 	[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = "ip6InReasmRqd",
2147 	[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = "ip6InMcastPkts",
2148 	[IRDMA_HW_STAT_INDEX_IP6TXOCTS] = "ip6OutOctets",
2149 	[IRDMA_HW_STAT_INDEX_IP6TXPKTS] = "ip6OutPkts",
2150 	[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = "ip6OutSegRqd",
2151 	[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = "ip6OutMcastPkts",
2152 	[IRDMA_HW_STAT_INDEX_RDMARXRDS] = "InRdmaReads",
2153 	[IRDMA_HW_STAT_INDEX_RDMARXSNDS] = "InRdmaSends",
2154 	[IRDMA_HW_STAT_INDEX_RDMARXWRS] = "InRdmaWrites",
2155 	[IRDMA_HW_STAT_INDEX_RDMATXRDS] = "OutRdmaReads",
2156 	[IRDMA_HW_STAT_INDEX_RDMATXSNDS] = "OutRdmaSends",
2157 	[IRDMA_HW_STAT_INDEX_RDMATXWRS] = "OutRdmaWrites",
2158 	[IRDMA_HW_STAT_INDEX_RDMAVBND] = "RdmaBnd",
2159 	[IRDMA_HW_STAT_INDEX_RDMAVINV] = "RdmaInv",
2160 
2161 	/* gen2 - 32-bit */
2162 	[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
2163 	[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
2164 	[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
2165 	/* gen2 - 64-bit */
2166 	[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = "ip4InMcastOctets",
2167 	[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = "ip4OutMcastOctets",
2168 	[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = "ip6InMcastOctets",
2169 	[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = "ip6OutMcastOctets",
2170 	[IRDMA_HW_STAT_INDEX_UDPRXPKTS] = "RxUDP",
2171 	[IRDMA_HW_STAT_INDEX_UDPTXPKTS] = "TxUDP",
2172 	[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = "RxECNMrkd",
2173 	[IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "RetransSegs",
2174 	[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "InOptErrors",
2175 	[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "InProtoErrors",
2176 	[IRDMA_HW_STAT_INDEX_TCPRXSEGS] = "InSegs",
2177 	[IRDMA_HW_STAT_INDEX_TCPTXSEG] = "OutSegs",
2178 };
2179 
2180 /**
2181  * irdma_alloc_hw_stats - Allocate a hw stats structure
2182  * @ibdev: device pointer from stack
2183  * @port_num: port number
2184  */
2185 struct rdma_hw_stats *
2186 irdma_alloc_hw_stats(struct ib_device *ibdev,
2187 		     u8 port_num)
2188 {
2189 	struct irdma_device *iwdev = to_iwdev(ibdev);
2190 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
2191 
2192 	int num_counters = dev->hw_attrs.max_stat_idx;
2193 	unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2194 
2195 	return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
2196 					  lifespan);
2197 }
2198 
2199 /**
2200  * irdma_get_hw_stats - Populates the rdma_hw_stats structure
2201  * @ibdev: device pointer from stack
2202  * @stats: stats pointer from stack
2203  * @port_num: port number
2204  * @index: which hw counter the stack is requesting we update
2205  */
2206 int
2207 irdma_get_hw_stats(struct ib_device *ibdev,
2208 		   struct rdma_hw_stats *stats, u8 port_num,
2209 		   int index)
2210 {
2211 	struct irdma_device *iwdev = to_iwdev(ibdev);
2212 	struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
2213 
2214 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
2215 		irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
2216 
2217 	memcpy(&stats->value[0], hw_stats, sizeof(u64)* stats->num_counters);
2218 
2219 	return stats->num_counters;
2220 }
2221 
2222 /**
2223  * irdma_query_gid - Query port GID
2224  * @ibdev: device pointer from stack
2225  * @port: port number
2226  * @index: Entry index
2227  * @gid: Global ID
2228  */
2229 int
2230 irdma_query_gid(struct ib_device *ibdev, u8 port, int index,
2231 		union ib_gid *gid)
2232 {
2233 	struct irdma_device *iwdev = to_iwdev(ibdev);
2234 
2235 	memset(gid->raw, 0, sizeof(gid->raw));
2236 	ether_addr_copy(gid->raw, IF_LLADDR(iwdev->netdev));
2237 
2238 	return 0;
2239 }
2240 
2241 enum rdma_link_layer
2242 irdma_get_link_layer(struct ib_device *ibdev,
2243 		     u8 port_num)
2244 {
2245 	return IB_LINK_LAYER_ETHERNET;
2246 }
2247 
2248 inline enum ib_mtu
2249 ib_mtu_int_to_enum(int mtu)
2250 {
2251 	if (mtu >= 4096)
2252 		return IB_MTU_4096;
2253 	else if (mtu >= 2048)
2254 		return IB_MTU_2048;
2255 	else if (mtu >= 1024)
2256 		return IB_MTU_1024;
2257 	else if (mtu >= 512)
2258 		return IB_MTU_512;
2259 	else
2260 		return IB_MTU_256;
2261 }
2262 
2263 inline void
2264 kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev)
2265 {
2266 	iwdev->ibdev.uverbs_cmd_mask |=
2267 	    BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
2268 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
2269 	    BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
2270 	    BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST);
2271 }
2272 
2273 inline void
2274 kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
2275 {
2276 	iwdev->ibdev.uverbs_cmd_mask =
2277 	    BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
2278 	    BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
2279 	    BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
2280 	    BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
2281 	    BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
2282 	    BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
2283 	    BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
2284 	    BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
2285 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2286 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
2287 	    BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) |
2288 	    BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
2289 	    BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2290 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
2291 	    BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
2292 	    BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
2293 	    BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) |
2294 	    BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
2295 	    BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
2296 	    BIT_ULL(IB_USER_VERBS_CMD_BIND_MW) |
2297 	    BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
2298 	    BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) |
2299 	    BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
2300 	iwdev->ibdev.uverbs_ex_cmd_mask =
2301 	    BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_QP) |
2302 	    BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
2303 
2304 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
2305 		iwdev->ibdev.uverbs_ex_cmd_mask |= BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ);
2306 }
2307