xref: /linux/drivers/infiniband/hw/irdma/verbs.c (revision db10cb9b)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "main.h"
4 
5 /**
6  * irdma_query_device - get device attributes
7  * @ibdev: device pointer from stack
8  * @props: returning device attributes
9  * @udata: user data
10  */
11 static int irdma_query_device(struct ib_device *ibdev,
12 			      struct ib_device_attr *props,
13 			      struct ib_udata *udata)
14 {
15 	struct irdma_device *iwdev = to_iwdev(ibdev);
16 	struct irdma_pci_f *rf = iwdev->rf;
17 	struct pci_dev *pcidev = iwdev->rf->pcidev;
18 	struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
19 
20 	if (udata->inlen || udata->outlen)
21 		return -EINVAL;
22 
23 	memset(props, 0, sizeof(*props));
24 	addrconf_addr_eui48((u8 *)&props->sys_image_guid,
25 			    iwdev->netdev->dev_addr);
26 	props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
27 			irdma_fw_minor_ver(&rf->sc_dev);
28 	props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
29 				  IB_DEVICE_MEM_MGT_EXTENSIONS;
30 	props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
31 	props->vendor_id = pcidev->vendor;
32 	props->vendor_part_id = pcidev->device;
33 
34 	props->hw_ver = rf->pcidev->revision;
35 	props->page_size_cap = hw_attrs->page_size_cap;
36 	props->max_mr_size = hw_attrs->max_mr_size;
37 	props->max_qp = rf->max_qp - rf->used_qps;
38 	props->max_qp_wr = hw_attrs->max_qp_wr;
39 	props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
40 	props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
41 	props->max_cq = rf->max_cq - rf->used_cqs;
42 	props->max_cqe = rf->max_cqe - 1;
43 	props->max_mr = rf->max_mr - rf->used_mrs;
44 	props->max_mw = props->max_mr;
45 	props->max_pd = rf->max_pd - rf->used_pds;
46 	props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
47 	props->max_qp_rd_atom = hw_attrs->max_hw_ird;
48 	props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
49 	if (rdma_protocol_roce(ibdev, 1)) {
50 		props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
51 		props->max_pkeys = IRDMA_PKEY_TBL_SZ;
52 	}
53 
54 	props->max_ah = rf->max_ah;
55 	props->max_mcast_grp = rf->max_mcg;
56 	props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
57 	props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
58 	props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
59 #define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
60 	if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
61 		props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
62 
63 	return 0;
64 }
65 
66 /**
67  * irdma_query_port - get port attributes
68  * @ibdev: device pointer from stack
69  * @port: port number for query
70  * @props: returning device attributes
71  */
72 static int irdma_query_port(struct ib_device *ibdev, u32 port,
73 			    struct ib_port_attr *props)
74 {
75 	struct irdma_device *iwdev = to_iwdev(ibdev);
76 	struct net_device *netdev = iwdev->netdev;
77 
78 	/* no need to zero out pros here. done by caller */
79 
80 	props->max_mtu = IB_MTU_4096;
81 	props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
82 	props->lid = 1;
83 	props->lmc = 0;
84 	props->sm_lid = 0;
85 	props->sm_sl = 0;
86 	if (netif_carrier_ok(netdev) && netif_running(netdev)) {
87 		props->state = IB_PORT_ACTIVE;
88 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
89 	} else {
90 		props->state = IB_PORT_DOWN;
91 		props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
92 	}
93 
94 	ib_get_eth_speed(ibdev, port, &props->active_speed,
95 			 &props->active_width);
96 
97 	if (rdma_protocol_roce(ibdev, 1)) {
98 		props->gid_tbl_len = 32;
99 		props->ip_gids = true;
100 		props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
101 	} else {
102 		props->gid_tbl_len = 1;
103 	}
104 	props->qkey_viol_cntr = 0;
105 	props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
106 	props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
107 
108 	return 0;
109 }
110 
111 /**
112  * irdma_disassociate_ucontext - Disassociate user context
113  * @context: ib user context
114  */
115 static void irdma_disassociate_ucontext(struct ib_ucontext *context)
116 {
117 }
118 
119 static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
120 			     struct vm_area_struct *vma)
121 {
122 	u64 pfn;
123 
124 	if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
125 		return -EINVAL;
126 
127 	vma->vm_private_data = ucontext;
128 	pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
129 	       pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
130 
131 	return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
132 				 pgprot_noncached(vma->vm_page_prot), NULL);
133 }
134 
135 static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
136 {
137 	struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
138 
139 	kfree(entry);
140 }
141 
142 static struct rdma_user_mmap_entry*
143 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
144 			     enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
145 {
146 	struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
147 	int ret;
148 
149 	if (!entry)
150 		return NULL;
151 
152 	entry->bar_offset = bar_offset;
153 	entry->mmap_flag = mmap_flag;
154 
155 	ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
156 					  &entry->rdma_entry, PAGE_SIZE);
157 	if (ret) {
158 		kfree(entry);
159 		return NULL;
160 	}
161 	*mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
162 
163 	return &entry->rdma_entry;
164 }
165 
166 /**
167  * irdma_mmap - user memory map
168  * @context: context created during alloc
169  * @vma: kernel info for user memory map
170  */
171 static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
172 {
173 	struct rdma_user_mmap_entry *rdma_entry;
174 	struct irdma_user_mmap_entry *entry;
175 	struct irdma_ucontext *ucontext;
176 	u64 pfn;
177 	int ret;
178 
179 	ucontext = to_ucontext(context);
180 
181 	/* Legacy support for libi40iw with hard-coded mmap key */
182 	if (ucontext->legacy_mode)
183 		return irdma_mmap_legacy(ucontext, vma);
184 
185 	rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
186 	if (!rdma_entry) {
187 		ibdev_dbg(&ucontext->iwdev->ibdev,
188 			  "VERBS: pgoff[0x%lx] does not have valid entry\n",
189 			  vma->vm_pgoff);
190 		return -EINVAL;
191 	}
192 
193 	entry = to_irdma_mmap_entry(rdma_entry);
194 	ibdev_dbg(&ucontext->iwdev->ibdev,
195 		  "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
196 		  entry->bar_offset, entry->mmap_flag);
197 
198 	pfn = (entry->bar_offset +
199 	      pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
200 
201 	switch (entry->mmap_flag) {
202 	case IRDMA_MMAP_IO_NC:
203 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
204 					pgprot_noncached(vma->vm_page_prot),
205 					rdma_entry);
206 		break;
207 	case IRDMA_MMAP_IO_WC:
208 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
209 					pgprot_writecombine(vma->vm_page_prot),
210 					rdma_entry);
211 		break;
212 	default:
213 		ret = -EINVAL;
214 	}
215 
216 	if (ret)
217 		ibdev_dbg(&ucontext->iwdev->ibdev,
218 			  "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
219 			  entry->bar_offset, entry->mmap_flag, ret);
220 	rdma_user_mmap_entry_put(rdma_entry);
221 
222 	return ret;
223 }
224 
225 /**
226  * irdma_alloc_push_page - allocate a push page for qp
227  * @iwqp: qp pointer
228  */
229 static void irdma_alloc_push_page(struct irdma_qp *iwqp)
230 {
231 	struct irdma_cqp_request *cqp_request;
232 	struct cqp_cmds_info *cqp_info;
233 	struct irdma_device *iwdev = iwqp->iwdev;
234 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
235 	int status;
236 
237 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
238 	if (!cqp_request)
239 		return;
240 
241 	cqp_info = &cqp_request->info;
242 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
243 	cqp_info->post_sq = 1;
244 	cqp_info->in.u.manage_push_page.info.push_idx = 0;
245 	cqp_info->in.u.manage_push_page.info.qs_handle =
246 		qp->vsi->qos[qp->user_pri].qs_handle;
247 	cqp_info->in.u.manage_push_page.info.free_page = 0;
248 	cqp_info->in.u.manage_push_page.info.push_page_type = 0;
249 	cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
250 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
251 
252 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
253 	if (!status && cqp_request->compl_info.op_ret_val <
254 	    iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
255 		qp->push_idx = cqp_request->compl_info.op_ret_val;
256 		qp->push_offset = 0;
257 	}
258 
259 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
260 }
261 
262 /**
263  * irdma_alloc_ucontext - Allocate the user context data structure
264  * @uctx: uverbs context pointer
265  * @udata: user data
266  *
267  * This keeps track of all objects associated with a particular
268  * user-mode client.
269  */
270 static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
271 				struct ib_udata *udata)
272 {
273 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
274 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
275 	struct ib_device *ibdev = uctx->device;
276 	struct irdma_device *iwdev = to_iwdev(ibdev);
277 	struct irdma_alloc_ucontext_req req = {};
278 	struct irdma_alloc_ucontext_resp uresp = {};
279 	struct irdma_ucontext *ucontext = to_ucontext(uctx);
280 	struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
281 
282 	if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
283 	    udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
284 		return -EINVAL;
285 
286 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
287 		return -EINVAL;
288 
289 	if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
290 		goto ver_error;
291 
292 	ucontext->iwdev = iwdev;
293 	ucontext->abi_ver = req.userspace_ver;
294 
295 	if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
296 		ucontext->use_raw_attrs = true;
297 
298 	/* GEN_1 legacy support with libi40iw */
299 	if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
300 		if (uk_attrs->hw_rev != IRDMA_GEN_1)
301 			return -EOPNOTSUPP;
302 
303 		ucontext->legacy_mode = true;
304 		uresp.max_qps = iwdev->rf->max_qp;
305 		uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
306 		uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
307 		uresp.kernel_ver = req.userspace_ver;
308 		if (ib_copy_to_udata(udata, &uresp,
309 				     min(sizeof(uresp), udata->outlen)))
310 			return -EFAULT;
311 	} else {
312 		u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
313 
314 		ucontext->db_mmap_entry =
315 			irdma_user_mmap_entry_insert(ucontext, bar_off,
316 						     IRDMA_MMAP_IO_NC,
317 						     &uresp.db_mmap_key);
318 		if (!ucontext->db_mmap_entry)
319 			return -ENOMEM;
320 
321 		uresp.kernel_ver = IRDMA_ABI_VER;
322 		uresp.feature_flags = uk_attrs->feature_flags;
323 		uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
324 		uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
325 		uresp.max_hw_inline = uk_attrs->max_hw_inline;
326 		uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
327 		uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
328 		uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
329 		uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
330 		uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
331 		uresp.hw_rev = uk_attrs->hw_rev;
332 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
333 		uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
334 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
335 		if (ib_copy_to_udata(udata, &uresp,
336 				     min(sizeof(uresp), udata->outlen))) {
337 			rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
338 			return -EFAULT;
339 		}
340 	}
341 
342 	INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
343 	spin_lock_init(&ucontext->cq_reg_mem_list_lock);
344 	INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
345 	spin_lock_init(&ucontext->qp_reg_mem_list_lock);
346 
347 	return 0;
348 
349 ver_error:
350 	ibdev_err(&iwdev->ibdev,
351 		  "Invalid userspace driver version detected. Detected version %d, should be %d\n",
352 		  req.userspace_ver, IRDMA_ABI_VER);
353 	return -EINVAL;
354 }
355 
356 /**
357  * irdma_dealloc_ucontext - deallocate the user context data structure
358  * @context: user context created during alloc
359  */
360 static void irdma_dealloc_ucontext(struct ib_ucontext *context)
361 {
362 	struct irdma_ucontext *ucontext = to_ucontext(context);
363 
364 	rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
365 }
366 
367 /**
368  * irdma_alloc_pd - allocate protection domain
369  * @pd: PD pointer
370  * @udata: user data
371  */
372 static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
373 {
374 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
375 	struct irdma_pd *iwpd = to_iwpd(pd);
376 	struct irdma_device *iwdev = to_iwdev(pd->device);
377 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
378 	struct irdma_pci_f *rf = iwdev->rf;
379 	struct irdma_alloc_pd_resp uresp = {};
380 	struct irdma_sc_pd *sc_pd;
381 	u32 pd_id = 0;
382 	int err;
383 
384 	if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
385 		return -EINVAL;
386 
387 	err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
388 			       &rf->next_pd);
389 	if (err)
390 		return err;
391 
392 	sc_pd = &iwpd->sc_pd;
393 	if (udata) {
394 		struct irdma_ucontext *ucontext =
395 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
396 						  ibucontext);
397 		irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
398 		uresp.pd_id = pd_id;
399 		if (ib_copy_to_udata(udata, &uresp,
400 				     min(sizeof(uresp), udata->outlen))) {
401 			err = -EFAULT;
402 			goto error;
403 		}
404 	} else {
405 		irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
406 	}
407 
408 	return 0;
409 error:
410 	irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
411 
412 	return err;
413 }
414 
415 /**
416  * irdma_dealloc_pd - deallocate pd
417  * @ibpd: ptr of pd to be deallocated
418  * @udata: user data
419  */
420 static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
421 {
422 	struct irdma_pd *iwpd = to_iwpd(ibpd);
423 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
424 
425 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
426 
427 	return 0;
428 }
429 
430 /**
431  * irdma_get_pbl - Retrieve pbl from a list given a virtual
432  * address
433  * @va: user virtual address
434  * @pbl_list: pbl list to search in (QP's or CQ's)
435  */
436 static struct irdma_pbl *irdma_get_pbl(unsigned long va,
437 				       struct list_head *pbl_list)
438 {
439 	struct irdma_pbl *iwpbl;
440 
441 	list_for_each_entry (iwpbl, pbl_list, list) {
442 		if (iwpbl->user_base == va) {
443 			list_del(&iwpbl->list);
444 			iwpbl->on_list = false;
445 			return iwpbl;
446 		}
447 	}
448 
449 	return NULL;
450 }
451 
452 /**
453  * irdma_clean_cqes - clean cq entries for qp
454  * @iwqp: qp ptr (user or kernel)
455  * @iwcq: cq ptr
456  */
457 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
458 {
459 	struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
460 	unsigned long flags;
461 
462 	spin_lock_irqsave(&iwcq->lock, flags);
463 	irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
464 	spin_unlock_irqrestore(&iwcq->lock, flags);
465 }
466 
467 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
468 {
469 	if (iwqp->push_db_mmap_entry) {
470 		rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
471 		iwqp->push_db_mmap_entry = NULL;
472 	}
473 	if (iwqp->push_wqe_mmap_entry) {
474 		rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
475 		iwqp->push_wqe_mmap_entry = NULL;
476 	}
477 }
478 
479 static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
480 					 struct irdma_qp *iwqp,
481 					 u64 *push_wqe_mmap_key,
482 					 u64 *push_db_mmap_key)
483 {
484 	struct irdma_device *iwdev = ucontext->iwdev;
485 	u64 rsvd, bar_off;
486 
487 	rsvd = IRDMA_PF_BAR_RSVD;
488 	bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
489 	/* skip over db page */
490 	bar_off += IRDMA_HW_PAGE_SIZE;
491 	/* push wqe page */
492 	bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
493 	iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
494 					bar_off, IRDMA_MMAP_IO_WC,
495 					push_wqe_mmap_key);
496 	if (!iwqp->push_wqe_mmap_entry)
497 		return -ENOMEM;
498 
499 	/* push doorbell page */
500 	bar_off += IRDMA_HW_PAGE_SIZE;
501 	iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
502 					bar_off, IRDMA_MMAP_IO_NC,
503 					push_db_mmap_key);
504 	if (!iwqp->push_db_mmap_entry) {
505 		rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
506 		return -ENOMEM;
507 	}
508 
509 	return 0;
510 }
511 
512 /**
513  * irdma_destroy_qp - destroy qp
514  * @ibqp: qp's ib pointer also to get to device's qp address
515  * @udata: user data
516  */
517 static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
518 {
519 	struct irdma_qp *iwqp = to_iwqp(ibqp);
520 	struct irdma_device *iwdev = iwqp->iwdev;
521 
522 	iwqp->sc_qp.qp_uk.destroy_pending = true;
523 
524 	if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
525 		irdma_modify_qp_to_err(&iwqp->sc_qp);
526 
527 	if (!iwqp->user_mode)
528 		cancel_delayed_work_sync(&iwqp->dwork_flush);
529 
530 	if (!iwqp->user_mode) {
531 		if (iwqp->iwscq) {
532 			irdma_clean_cqes(iwqp, iwqp->iwscq);
533 			if (iwqp->iwrcq != iwqp->iwscq)
534 				irdma_clean_cqes(iwqp, iwqp->iwrcq);
535 		}
536 	}
537 
538 	irdma_qp_rem_ref(&iwqp->ibqp);
539 	wait_for_completion(&iwqp->free_qp);
540 	irdma_free_lsmm_rsrc(iwqp);
541 	irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
542 
543 	irdma_remove_push_mmap_entries(iwqp);
544 	irdma_free_qp_rsrc(iwqp);
545 
546 	return 0;
547 }
548 
549 /**
550  * irdma_setup_virt_qp - setup for allocation of virtual qp
551  * @iwdev: irdma device
552  * @iwqp: qp ptr
553  * @init_info: initialize info to return
554  */
555 static void irdma_setup_virt_qp(struct irdma_device *iwdev,
556 			       struct irdma_qp *iwqp,
557 			       struct irdma_qp_init_info *init_info)
558 {
559 	struct irdma_pbl *iwpbl = iwqp->iwpbl;
560 	struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
561 
562 	iwqp->page = qpmr->sq_page;
563 	init_info->shadow_area_pa = qpmr->shadow;
564 	if (iwpbl->pbl_allocated) {
565 		init_info->virtual_map = true;
566 		init_info->sq_pa = qpmr->sq_pbl.idx;
567 		init_info->rq_pa = qpmr->rq_pbl.idx;
568 	} else {
569 		init_info->sq_pa = qpmr->sq_pbl.addr;
570 		init_info->rq_pa = qpmr->rq_pbl.addr;
571 	}
572 }
573 
574 /**
575  * irdma_setup_umode_qp - setup sq and rq size in user mode qp
576  * @udata: udata
577  * @iwdev: iwarp device
578  * @iwqp: qp ptr (user or kernel)
579  * @info: initialize info to return
580  * @init_attr: Initial QP create attributes
581  */
582 static int irdma_setup_umode_qp(struct ib_udata *udata,
583 				struct irdma_device *iwdev,
584 				struct irdma_qp *iwqp,
585 				struct irdma_qp_init_info *info,
586 				struct ib_qp_init_attr *init_attr)
587 {
588 	struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
589 				struct irdma_ucontext, ibucontext);
590 	struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
591 	struct irdma_create_qp_req req;
592 	unsigned long flags;
593 	int ret;
594 
595 	ret = ib_copy_from_udata(&req, udata,
596 				 min(sizeof(req), udata->inlen));
597 	if (ret) {
598 		ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
599 		return ret;
600 	}
601 
602 	iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
603 	iwqp->user_mode = 1;
604 	if (req.user_wqe_bufs) {
605 		info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
606 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
607 		iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
608 					    &ucontext->qp_reg_mem_list);
609 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
610 
611 		if (!iwqp->iwpbl) {
612 			ret = -ENODATA;
613 			ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
614 			return ret;
615 		}
616 	}
617 
618 	if (!ucontext->use_raw_attrs) {
619 		/**
620 		 * Maintain backward compat with older ABI which passes sq and
621 		 * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
622 		 * There is no way to compute the correct value of
623 		 * iwqp->max_send_wr/max_recv_wr in the kernel.
624 		 */
625 		iwqp->max_send_wr = init_attr->cap.max_send_wr;
626 		iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
627 		ukinfo->sq_size = init_attr->cap.max_send_wr;
628 		ukinfo->rq_size = init_attr->cap.max_recv_wr;
629 		irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
630 				       &ukinfo->rq_shift);
631 	} else {
632 		ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
633 						   &ukinfo->sq_shift);
634 		if (ret)
635 			return ret;
636 
637 		ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
638 						   &ukinfo->rq_shift);
639 		if (ret)
640 			return ret;
641 
642 		iwqp->max_send_wr =
643 			(ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
644 		iwqp->max_recv_wr =
645 			(ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
646 		ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
647 		ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
648 	}
649 
650 	irdma_setup_virt_qp(iwdev, iwqp, info);
651 
652 	return 0;
653 }
654 
655 /**
656  * irdma_setup_kmode_qp - setup initialization for kernel mode qp
657  * @iwdev: iwarp device
658  * @iwqp: qp ptr (user or kernel)
659  * @info: initialize info to return
660  * @init_attr: Initial QP create attributes
661  */
662 static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
663 				struct irdma_qp *iwqp,
664 				struct irdma_qp_init_info *info,
665 				struct ib_qp_init_attr *init_attr)
666 {
667 	struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
668 	u32 size;
669 	int status;
670 	struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
671 
672 	status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
673 					      &ukinfo->sq_shift);
674 	if (status)
675 		return status;
676 
677 	status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
678 					      &ukinfo->rq_shift);
679 	if (status)
680 		return status;
681 
682 	iwqp->kqp.sq_wrid_mem =
683 		kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
684 	if (!iwqp->kqp.sq_wrid_mem)
685 		return -ENOMEM;
686 
687 	iwqp->kqp.rq_wrid_mem =
688 		kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
689 
690 	if (!iwqp->kqp.rq_wrid_mem) {
691 		kfree(iwqp->kqp.sq_wrid_mem);
692 		iwqp->kqp.sq_wrid_mem = NULL;
693 		return -ENOMEM;
694 	}
695 
696 	ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
697 	ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
698 
699 	size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
700 	size += (IRDMA_SHADOW_AREA_SIZE << 3);
701 
702 	mem->size = ALIGN(size, 256);
703 	mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
704 				     &mem->pa, GFP_KERNEL);
705 	if (!mem->va) {
706 		kfree(iwqp->kqp.sq_wrid_mem);
707 		iwqp->kqp.sq_wrid_mem = NULL;
708 		kfree(iwqp->kqp.rq_wrid_mem);
709 		iwqp->kqp.rq_wrid_mem = NULL;
710 		return -ENOMEM;
711 	}
712 
713 	ukinfo->sq = mem->va;
714 	info->sq_pa = mem->pa;
715 	ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
716 	info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
717 	ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
718 	info->shadow_area_pa =
719 		info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
720 	ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
721 	ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
722 	ukinfo->qp_id = iwqp->ibqp.qp_num;
723 
724 	iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
725 	iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
726 	init_attr->cap.max_send_wr = iwqp->max_send_wr;
727 	init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
728 
729 	return 0;
730 }
731 
732 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
733 {
734 	struct irdma_pci_f *rf = iwqp->iwdev->rf;
735 	struct irdma_cqp_request *cqp_request;
736 	struct cqp_cmds_info *cqp_info;
737 	struct irdma_create_qp_info *qp_info;
738 	int status;
739 
740 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
741 	if (!cqp_request)
742 		return -ENOMEM;
743 
744 	cqp_info = &cqp_request->info;
745 	qp_info = &cqp_request->info.in.u.qp_create.info;
746 	memset(qp_info, 0, sizeof(*qp_info));
747 	qp_info->mac_valid = true;
748 	qp_info->cq_num_valid = true;
749 	qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
750 
751 	cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
752 	cqp_info->post_sq = 1;
753 	cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
754 	cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
755 	status = irdma_handle_cqp_op(rf, cqp_request);
756 	irdma_put_cqp_request(&rf->cqp, cqp_request);
757 
758 	return status;
759 }
760 
761 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
762 					       struct irdma_qp_host_ctx_info *ctx_info)
763 {
764 	struct irdma_device *iwdev = iwqp->iwdev;
765 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
766 	struct irdma_roce_offload_info *roce_info;
767 	struct irdma_udp_offload_info *udp_info;
768 
769 	udp_info = &iwqp->udp_info;
770 	udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
771 	udp_info->cwnd = iwdev->roce_cwnd;
772 	udp_info->rexmit_thresh = 2;
773 	udp_info->rnr_nak_thresh = 2;
774 	udp_info->src_port = 0xc000;
775 	udp_info->dst_port = ROCE_V2_UDP_DPORT;
776 	roce_info = &iwqp->roce_info;
777 	ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
778 
779 	roce_info->rd_en = true;
780 	roce_info->wr_rdresp_en = true;
781 	roce_info->bind_en = true;
782 	roce_info->dcqcn_en = false;
783 	roce_info->rtomin = 5;
784 
785 	roce_info->ack_credits = iwdev->roce_ackcreds;
786 	roce_info->ird_size = dev->hw_attrs.max_hw_ird;
787 	roce_info->ord_size = dev->hw_attrs.max_hw_ord;
788 
789 	if (!iwqp->user_mode) {
790 		roce_info->priv_mode_en = true;
791 		roce_info->fast_reg_en = true;
792 		roce_info->udprivcq_en = true;
793 	}
794 	roce_info->roce_tver = 0;
795 
796 	ctx_info->roce_info = &iwqp->roce_info;
797 	ctx_info->udp_info = &iwqp->udp_info;
798 	irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
799 }
800 
801 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
802 					     struct irdma_qp_host_ctx_info *ctx_info)
803 {
804 	struct irdma_device *iwdev = iwqp->iwdev;
805 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
806 	struct irdma_iwarp_offload_info *iwarp_info;
807 
808 	iwarp_info = &iwqp->iwarp_info;
809 	ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
810 	iwarp_info->rd_en = true;
811 	iwarp_info->wr_rdresp_en = true;
812 	iwarp_info->bind_en = true;
813 	iwarp_info->ecn_en = true;
814 	iwarp_info->rtomin = 5;
815 
816 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
817 		iwarp_info->ib_rd_en = true;
818 	if (!iwqp->user_mode) {
819 		iwarp_info->priv_mode_en = true;
820 		iwarp_info->fast_reg_en = true;
821 	}
822 	iwarp_info->ddp_ver = 1;
823 	iwarp_info->rdmap_ver = 1;
824 
825 	ctx_info->iwarp_info = &iwqp->iwarp_info;
826 	ctx_info->iwarp_info_valid = true;
827 	irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
828 	ctx_info->iwarp_info_valid = false;
829 }
830 
831 static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
832 				   struct irdma_device *iwdev)
833 {
834 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
835 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
836 
837 	if (init_attr->create_flags)
838 		return -EOPNOTSUPP;
839 
840 	if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
841 	    init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
842 	    init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
843 		return -EINVAL;
844 
845 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
846 		if (init_attr->qp_type != IB_QPT_RC &&
847 		    init_attr->qp_type != IB_QPT_UD &&
848 		    init_attr->qp_type != IB_QPT_GSI)
849 			return -EOPNOTSUPP;
850 	} else {
851 		if (init_attr->qp_type != IB_QPT_RC)
852 			return -EOPNOTSUPP;
853 	}
854 
855 	return 0;
856 }
857 
858 static void irdma_flush_worker(struct work_struct *work)
859 {
860 	struct delayed_work *dwork = to_delayed_work(work);
861 	struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
862 
863 	irdma_generate_flush_completions(iwqp);
864 }
865 
866 /**
867  * irdma_create_qp - create qp
868  * @ibqp: ptr of qp
869  * @init_attr: attributes for qp
870  * @udata: user data for create qp
871  */
872 static int irdma_create_qp(struct ib_qp *ibqp,
873 			   struct ib_qp_init_attr *init_attr,
874 			   struct ib_udata *udata)
875 {
876 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
877 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
878 	struct ib_pd *ibpd = ibqp->pd;
879 	struct irdma_pd *iwpd = to_iwpd(ibpd);
880 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
881 	struct irdma_pci_f *rf = iwdev->rf;
882 	struct irdma_qp *iwqp = to_iwqp(ibqp);
883 	struct irdma_create_qp_resp uresp = {};
884 	u32 qp_num = 0;
885 	int err_code;
886 	struct irdma_sc_qp *qp;
887 	struct irdma_sc_dev *dev = &rf->sc_dev;
888 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
889 	struct irdma_qp_init_info init_info = {};
890 	struct irdma_qp_host_ctx_info *ctx_info;
891 
892 	err_code = irdma_validate_qp_attrs(init_attr, iwdev);
893 	if (err_code)
894 		return err_code;
895 
896 	if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
897 		      udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
898 		return -EINVAL;
899 
900 	init_info.vsi = &iwdev->vsi;
901 	init_info.qp_uk_init_info.uk_attrs = uk_attrs;
902 	init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
903 	init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
904 	init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
905 	init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
906 	init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
907 
908 	qp = &iwqp->sc_qp;
909 	qp->qp_uk.back_qp = iwqp;
910 	qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
911 
912 	iwqp->iwdev = iwdev;
913 	iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
914 				      256);
915 	iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
916 						 iwqp->q2_ctx_mem.size,
917 						 &iwqp->q2_ctx_mem.pa,
918 						 GFP_KERNEL);
919 	if (!iwqp->q2_ctx_mem.va)
920 		return -ENOMEM;
921 
922 	init_info.q2 = iwqp->q2_ctx_mem.va;
923 	init_info.q2_pa = iwqp->q2_ctx_mem.pa;
924 	init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
925 	init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
926 
927 	if (init_attr->qp_type == IB_QPT_GSI)
928 		qp_num = 1;
929 	else
930 		err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
931 					    &qp_num, &rf->next_qp);
932 	if (err_code)
933 		goto error;
934 
935 	iwqp->iwpd = iwpd;
936 	iwqp->ibqp.qp_num = qp_num;
937 	qp = &iwqp->sc_qp;
938 	iwqp->iwscq = to_iwcq(init_attr->send_cq);
939 	iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
940 	iwqp->host_ctx.va = init_info.host_ctx;
941 	iwqp->host_ctx.pa = init_info.host_ctx_pa;
942 	iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
943 
944 	init_info.pd = &iwpd->sc_pd;
945 	init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
946 	if (!rdma_protocol_roce(&iwdev->ibdev, 1))
947 		init_info.qp_uk_init_info.first_sq_wq = 1;
948 	iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
949 	init_waitqueue_head(&iwqp->waitq);
950 	init_waitqueue_head(&iwqp->mod_qp_waitq);
951 
952 	if (udata) {
953 		init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
954 		err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
955 						init_attr);
956 	} else {
957 		INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
958 		init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
959 		err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
960 	}
961 
962 	if (err_code) {
963 		ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
964 		goto error;
965 	}
966 
967 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
968 		if (init_attr->qp_type == IB_QPT_RC) {
969 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
970 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
971 							    IRDMA_WRITE_WITH_IMM |
972 							    IRDMA_ROCE;
973 		} else {
974 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
975 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
976 							    IRDMA_ROCE;
977 		}
978 	} else {
979 		init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
980 		init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
981 	}
982 
983 	if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
984 		init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
985 
986 	err_code = irdma_sc_qp_init(qp, &init_info);
987 	if (err_code) {
988 		ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
989 		goto error;
990 	}
991 
992 	ctx_info = &iwqp->ctx_info;
993 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
994 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
995 
996 	if (rdma_protocol_roce(&iwdev->ibdev, 1))
997 		irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
998 	else
999 		irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
1000 
1001 	err_code = irdma_cqp_create_qp_cmd(iwqp);
1002 	if (err_code)
1003 		goto error;
1004 
1005 	refcount_set(&iwqp->refcnt, 1);
1006 	spin_lock_init(&iwqp->lock);
1007 	spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
1008 	iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1009 	rf->qp_table[qp_num] = iwqp;
1010 
1011 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1012 		if (dev->ws_add(&iwdev->vsi, 0)) {
1013 			irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
1014 			err_code = -EINVAL;
1015 			goto error;
1016 		}
1017 
1018 		irdma_qp_add_qos(&iwqp->sc_qp);
1019 	}
1020 
1021 	if (udata) {
1022 		/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
1023 		if (udata->outlen < sizeof(uresp)) {
1024 			uresp.lsmm = 1;
1025 			uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
1026 		} else {
1027 			if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
1028 				uresp.lsmm = 1;
1029 		}
1030 		uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
1031 		uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
1032 		uresp.qp_id = qp_num;
1033 		uresp.qp_caps = qp->qp_uk.qp_caps;
1034 
1035 		err_code = ib_copy_to_udata(udata, &uresp,
1036 					    min(sizeof(uresp), udata->outlen));
1037 		if (err_code) {
1038 			ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
1039 			irdma_destroy_qp(&iwqp->ibqp, udata);
1040 			return err_code;
1041 		}
1042 	}
1043 
1044 	init_completion(&iwqp->free_qp);
1045 	return 0;
1046 
1047 error:
1048 	irdma_free_qp_rsrc(iwqp);
1049 	return err_code;
1050 }
1051 
1052 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
1053 {
1054 	int acc_flags = 0;
1055 
1056 	if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
1057 		if (iwqp->roce_info.wr_rdresp_en) {
1058 			acc_flags |= IB_ACCESS_LOCAL_WRITE;
1059 			acc_flags |= IB_ACCESS_REMOTE_WRITE;
1060 		}
1061 		if (iwqp->roce_info.rd_en)
1062 			acc_flags |= IB_ACCESS_REMOTE_READ;
1063 		if (iwqp->roce_info.bind_en)
1064 			acc_flags |= IB_ACCESS_MW_BIND;
1065 	} else {
1066 		if (iwqp->iwarp_info.wr_rdresp_en) {
1067 			acc_flags |= IB_ACCESS_LOCAL_WRITE;
1068 			acc_flags |= IB_ACCESS_REMOTE_WRITE;
1069 		}
1070 		if (iwqp->iwarp_info.rd_en)
1071 			acc_flags |= IB_ACCESS_REMOTE_READ;
1072 		if (iwqp->iwarp_info.bind_en)
1073 			acc_flags |= IB_ACCESS_MW_BIND;
1074 	}
1075 	return acc_flags;
1076 }
1077 
1078 /**
1079  * irdma_query_qp - query qp attributes
1080  * @ibqp: qp pointer
1081  * @attr: attributes pointer
1082  * @attr_mask: Not used
1083  * @init_attr: qp attributes to return
1084  */
1085 static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1086 			  int attr_mask, struct ib_qp_init_attr *init_attr)
1087 {
1088 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1089 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
1090 
1091 	memset(attr, 0, sizeof(*attr));
1092 	memset(init_attr, 0, sizeof(*init_attr));
1093 
1094 	attr->qp_state = iwqp->ibqp_state;
1095 	attr->cur_qp_state = iwqp->ibqp_state;
1096 	attr->cap.max_send_wr = iwqp->max_send_wr;
1097 	attr->cap.max_recv_wr = iwqp->max_recv_wr;
1098 	attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
1099 	attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
1100 	attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
1101 	attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
1102 	attr->port_num = 1;
1103 	if (rdma_protocol_roce(ibqp->device, 1)) {
1104 		attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
1105 		attr->qkey = iwqp->roce_info.qkey;
1106 		attr->rq_psn = iwqp->udp_info.epsn;
1107 		attr->sq_psn = iwqp->udp_info.psn_nxt;
1108 		attr->dest_qp_num = iwqp->roce_info.dest_qp;
1109 		attr->pkey_index = iwqp->roce_info.p_key;
1110 		attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
1111 		attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
1112 		attr->max_rd_atomic = iwqp->roce_info.ord_size;
1113 		attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
1114 	}
1115 
1116 	init_attr->event_handler = iwqp->ibqp.event_handler;
1117 	init_attr->qp_context = iwqp->ibqp.qp_context;
1118 	init_attr->send_cq = iwqp->ibqp.send_cq;
1119 	init_attr->recv_cq = iwqp->ibqp.recv_cq;
1120 	init_attr->cap = attr->cap;
1121 
1122 	return 0;
1123 }
1124 
1125 /**
1126  * irdma_query_pkey - Query partition key
1127  * @ibdev: device pointer from stack
1128  * @port: port number
1129  * @index: index of pkey
1130  * @pkey: pointer to store the pkey
1131  */
1132 static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1133 			    u16 *pkey)
1134 {
1135 	if (index >= IRDMA_PKEY_TBL_SZ)
1136 		return -EINVAL;
1137 
1138 	*pkey = IRDMA_DEFAULT_PKEY;
1139 	return 0;
1140 }
1141 
1142 static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio)
1143 {
1144 	struct net_device *ndev;
1145 
1146 	rcu_read_lock();
1147 	ndev = rcu_dereference(attr->ndev);
1148 	if (!ndev)
1149 		goto exit;
1150 	if (is_vlan_dev(ndev)) {
1151 		u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio);
1152 
1153 		prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1154 	}
1155 exit:
1156 	rcu_read_unlock();
1157 	return prio;
1158 }
1159 
1160 /**
1161  * irdma_modify_qp_roce - modify qp request
1162  * @ibqp: qp's pointer for modify
1163  * @attr: access attributes
1164  * @attr_mask: state mask
1165  * @udata: user data
1166  */
1167 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1168 			 int attr_mask, struct ib_udata *udata)
1169 {
1170 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1171 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1172 	struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
1173 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1174 	struct irdma_device *iwdev = iwqp->iwdev;
1175 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1176 	struct irdma_qp_host_ctx_info *ctx_info;
1177 	struct irdma_roce_offload_info *roce_info;
1178 	struct irdma_udp_offload_info *udp_info;
1179 	struct irdma_modify_qp_info info = {};
1180 	struct irdma_modify_qp_resp uresp = {};
1181 	struct irdma_modify_qp_req ureq = {};
1182 	unsigned long flags;
1183 	u8 issue_modify_qp = 0;
1184 	int ret = 0;
1185 
1186 	ctx_info = &iwqp->ctx_info;
1187 	roce_info = &iwqp->roce_info;
1188 	udp_info = &iwqp->udp_info;
1189 
1190 	if (udata) {
1191 		/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1192 		if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1193 		    (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1194 			return -EINVAL;
1195 	}
1196 
1197 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1198 		return -EOPNOTSUPP;
1199 
1200 	if (attr_mask & IB_QP_DEST_QPN)
1201 		roce_info->dest_qp = attr->dest_qp_num;
1202 
1203 	if (attr_mask & IB_QP_PKEY_INDEX) {
1204 		ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
1205 				       &roce_info->p_key);
1206 		if (ret)
1207 			return ret;
1208 	}
1209 
1210 	if (attr_mask & IB_QP_QKEY)
1211 		roce_info->qkey = attr->qkey;
1212 
1213 	if (attr_mask & IB_QP_PATH_MTU)
1214 		udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
1215 
1216 	if (attr_mask & IB_QP_SQ_PSN) {
1217 		udp_info->psn_nxt = attr->sq_psn;
1218 		udp_info->lsn =  0xffff;
1219 		udp_info->psn_una = attr->sq_psn;
1220 		udp_info->psn_max = attr->sq_psn;
1221 	}
1222 
1223 	if (attr_mask & IB_QP_RQ_PSN)
1224 		udp_info->epsn = attr->rq_psn;
1225 
1226 	if (attr_mask & IB_QP_RNR_RETRY)
1227 		udp_info->rnr_nak_thresh = attr->rnr_retry;
1228 
1229 	if (attr_mask & IB_QP_RETRY_CNT)
1230 		udp_info->rexmit_thresh = attr->retry_cnt;
1231 
1232 	ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
1233 
1234 	if (attr_mask & IB_QP_AV) {
1235 		struct irdma_av *av = &iwqp->roce_ah.av;
1236 		const struct ib_gid_attr *sgid_attr =
1237 				attr->ah_attr.grh.sgid_attr;
1238 		u16 vlan_id = VLAN_N_VID;
1239 		u32 local_ip[4];
1240 
1241 		memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
1242 		if (attr->ah_attr.ah_flags & IB_AH_GRH) {
1243 			udp_info->ttl = attr->ah_attr.grh.hop_limit;
1244 			udp_info->flow_label = attr->ah_attr.grh.flow_label;
1245 			udp_info->tos = attr->ah_attr.grh.traffic_class;
1246 			udp_info->src_port =
1247 				rdma_get_udp_sport(udp_info->flow_label,
1248 						   ibqp->qp_num,
1249 						   roce_info->dest_qp);
1250 			irdma_qp_rem_qos(&iwqp->sc_qp);
1251 			dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1252 			if (iwqp->sc_qp.vsi->dscp_mode)
1253 				ctx_info->user_pri =
1254 				iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
1255 			else
1256 				ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1257 		}
1258 		ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
1259 					      ctx_info->roce_info->mac_addr);
1260 		if (ret)
1261 			return ret;
1262 		ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr,
1263 							      ctx_info->user_pri);
1264 		if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1265 			return -ENOMEM;
1266 		iwqp->sc_qp.user_pri = ctx_info->user_pri;
1267 		irdma_qp_add_qos(&iwqp->sc_qp);
1268 
1269 		if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
1270 			vlan_id = 0;
1271 		if (vlan_id < VLAN_N_VID) {
1272 			udp_info->insert_vlan_tag = true;
1273 			udp_info->vlan_tag = vlan_id |
1274 				ctx_info->user_pri << VLAN_PRIO_SHIFT;
1275 		} else {
1276 			udp_info->insert_vlan_tag = false;
1277 		}
1278 
1279 		av->attrs = attr->ah_attr;
1280 		rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
1281 		rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1282 		av->net_type = rdma_gid_attr_network_type(sgid_attr);
1283 		if (av->net_type == RDMA_NETWORK_IPV6) {
1284 			__be32 *daddr =
1285 				av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1286 			__be32 *saddr =
1287 				av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1288 
1289 			irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1290 			irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1291 
1292 			udp_info->ipv4 = false;
1293 			irdma_copy_ip_ntohl(local_ip, daddr);
1294 
1295 		} else if (av->net_type == RDMA_NETWORK_IPV4) {
1296 			__be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1297 			__be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1298 
1299 			local_ip[0] = ntohl(daddr);
1300 
1301 			udp_info->ipv4 = true;
1302 			udp_info->dest_ip_addr[0] = 0;
1303 			udp_info->dest_ip_addr[1] = 0;
1304 			udp_info->dest_ip_addr[2] = 0;
1305 			udp_info->dest_ip_addr[3] = local_ip[0];
1306 
1307 			udp_info->local_ipaddr[0] = 0;
1308 			udp_info->local_ipaddr[1] = 0;
1309 			udp_info->local_ipaddr[2] = 0;
1310 			udp_info->local_ipaddr[3] = ntohl(saddr);
1311 		}
1312 		udp_info->arp_idx =
1313 			irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
1314 				      attr->ah_attr.roce.dmac);
1315 	}
1316 
1317 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1318 		if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1319 			ibdev_err(&iwdev->ibdev,
1320 				  "rd_atomic = %d, above max_hw_ord=%d\n",
1321 				  attr->max_rd_atomic,
1322 				  dev->hw_attrs.max_hw_ord);
1323 			return -EINVAL;
1324 		}
1325 		if (attr->max_rd_atomic)
1326 			roce_info->ord_size = attr->max_rd_atomic;
1327 		info.ord_valid = true;
1328 	}
1329 
1330 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1331 		if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1332 			ibdev_err(&iwdev->ibdev,
1333 				  "rd_atomic = %d, above max_hw_ird=%d\n",
1334 				   attr->max_rd_atomic,
1335 				   dev->hw_attrs.max_hw_ird);
1336 			return -EINVAL;
1337 		}
1338 		if (attr->max_dest_rd_atomic)
1339 			roce_info->ird_size = attr->max_dest_rd_atomic;
1340 	}
1341 
1342 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
1343 		if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1344 			roce_info->wr_rdresp_en = true;
1345 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1346 			roce_info->wr_rdresp_en = true;
1347 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1348 			roce_info->rd_en = true;
1349 	}
1350 
1351 	wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1352 
1353 	ibdev_dbg(&iwdev->ibdev,
1354 		  "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1355 		  __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1356 		  iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
1357 
1358 	spin_lock_irqsave(&iwqp->lock, flags);
1359 	if (attr_mask & IB_QP_STATE) {
1360 		if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1361 					iwqp->ibqp.qp_type, attr_mask)) {
1362 			ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1363 				   iwqp->ibqp.qp_num, iwqp->ibqp_state,
1364 				   attr->qp_state);
1365 			ret = -EINVAL;
1366 			goto exit;
1367 		}
1368 		info.curr_iwarp_state = iwqp->iwarp_state;
1369 
1370 		switch (attr->qp_state) {
1371 		case IB_QPS_INIT:
1372 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1373 				ret = -EINVAL;
1374 				goto exit;
1375 			}
1376 
1377 			if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1378 				info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1379 				issue_modify_qp = 1;
1380 			}
1381 			break;
1382 		case IB_QPS_RTR:
1383 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1384 				ret = -EINVAL;
1385 				goto exit;
1386 			}
1387 			info.arp_cache_idx_valid = true;
1388 			info.cq_num_valid = true;
1389 			info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1390 			issue_modify_qp = 1;
1391 			break;
1392 		case IB_QPS_RTS:
1393 			if (iwqp->ibqp_state < IB_QPS_RTR ||
1394 			    iwqp->ibqp_state == IB_QPS_ERR) {
1395 				ret = -EINVAL;
1396 				goto exit;
1397 			}
1398 
1399 			info.arp_cache_idx_valid = true;
1400 			info.cq_num_valid = true;
1401 			info.ord_valid = true;
1402 			info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1403 			issue_modify_qp = 1;
1404 			if (iwdev->push_mode && udata &&
1405 			    iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1406 			    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1407 				spin_unlock_irqrestore(&iwqp->lock, flags);
1408 				irdma_alloc_push_page(iwqp);
1409 				spin_lock_irqsave(&iwqp->lock, flags);
1410 			}
1411 			break;
1412 		case IB_QPS_SQD:
1413 			if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1414 				goto exit;
1415 
1416 			if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1417 				ret = -EINVAL;
1418 				goto exit;
1419 			}
1420 
1421 			info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1422 			issue_modify_qp = 1;
1423 			break;
1424 		case IB_QPS_SQE:
1425 		case IB_QPS_ERR:
1426 		case IB_QPS_RESET:
1427 			if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
1428 				spin_unlock_irqrestore(&iwqp->lock, flags);
1429 				info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1430 				irdma_hw_modify_qp(iwdev, iwqp, &info, true);
1431 				spin_lock_irqsave(&iwqp->lock, flags);
1432 			}
1433 
1434 			if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1435 				spin_unlock_irqrestore(&iwqp->lock, flags);
1436 				if (udata && udata->inlen) {
1437 					if (ib_copy_from_udata(&ureq, udata,
1438 					    min(sizeof(ureq), udata->inlen)))
1439 						return -EINVAL;
1440 
1441 					irdma_flush_wqes(iwqp,
1442 					    (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1443 					    (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1444 					    IRDMA_REFLUSH);
1445 				}
1446 				return 0;
1447 			}
1448 
1449 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1450 			issue_modify_qp = 1;
1451 			break;
1452 		default:
1453 			ret = -EINVAL;
1454 			goto exit;
1455 		}
1456 
1457 		iwqp->ibqp_state = attr->qp_state;
1458 	}
1459 
1460 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1461 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1462 	irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1463 	spin_unlock_irqrestore(&iwqp->lock, flags);
1464 
1465 	if (attr_mask & IB_QP_STATE) {
1466 		if (issue_modify_qp) {
1467 			ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1468 			if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1469 				return -EINVAL;
1470 			spin_lock_irqsave(&iwqp->lock, flags);
1471 			if (iwqp->iwarp_state == info.curr_iwarp_state) {
1472 				iwqp->iwarp_state = info.next_iwarp_state;
1473 				iwqp->ibqp_state = attr->qp_state;
1474 			}
1475 			if (iwqp->ibqp_state > IB_QPS_RTS &&
1476 			    !iwqp->flush_issued) {
1477 				spin_unlock_irqrestore(&iwqp->lock, flags);
1478 				irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1479 						       IRDMA_FLUSH_RQ |
1480 						       IRDMA_FLUSH_WAIT);
1481 				iwqp->flush_issued = 1;
1482 			} else {
1483 				spin_unlock_irqrestore(&iwqp->lock, flags);
1484 			}
1485 		} else {
1486 			iwqp->ibqp_state = attr->qp_state;
1487 		}
1488 		if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1489 			struct irdma_ucontext *ucontext;
1490 
1491 			ucontext = rdma_udata_to_drv_context(udata,
1492 					struct irdma_ucontext, ibucontext);
1493 			if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1494 			    !iwqp->push_wqe_mmap_entry &&
1495 			    !irdma_setup_push_mmap_entries(ucontext, iwqp,
1496 				&uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1497 				uresp.push_valid = 1;
1498 				uresp.push_offset = iwqp->sc_qp.push_offset;
1499 			}
1500 			ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1501 					       udata->outlen));
1502 			if (ret) {
1503 				irdma_remove_push_mmap_entries(iwqp);
1504 				ibdev_dbg(&iwdev->ibdev,
1505 					  "VERBS: copy_to_udata failed\n");
1506 				return ret;
1507 			}
1508 		}
1509 	}
1510 
1511 	return 0;
1512 exit:
1513 	spin_unlock_irqrestore(&iwqp->lock, flags);
1514 
1515 	return ret;
1516 }
1517 
1518 /**
1519  * irdma_modify_qp - modify qp request
1520  * @ibqp: qp's pointer for modify
1521  * @attr: access attributes
1522  * @attr_mask: state mask
1523  * @udata: user data
1524  */
1525 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1526 		    struct ib_udata *udata)
1527 {
1528 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1529 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1530 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1531 	struct irdma_device *iwdev = iwqp->iwdev;
1532 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1533 	struct irdma_qp_host_ctx_info *ctx_info;
1534 	struct irdma_tcp_offload_info *tcp_info;
1535 	struct irdma_iwarp_offload_info *offload_info;
1536 	struct irdma_modify_qp_info info = {};
1537 	struct irdma_modify_qp_resp uresp = {};
1538 	struct irdma_modify_qp_req ureq = {};
1539 	u8 issue_modify_qp = 0;
1540 	u8 dont_wait = 0;
1541 	int err;
1542 	unsigned long flags;
1543 
1544 	if (udata) {
1545 		/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1546 		if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1547 		    (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1548 			return -EINVAL;
1549 	}
1550 
1551 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1552 		return -EOPNOTSUPP;
1553 
1554 	ctx_info = &iwqp->ctx_info;
1555 	offload_info = &iwqp->iwarp_info;
1556 	tcp_info = &iwqp->tcp_info;
1557 	wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1558 	ibdev_dbg(&iwdev->ibdev,
1559 		  "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1560 		  __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1561 		  iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
1562 		  iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1563 
1564 	spin_lock_irqsave(&iwqp->lock, flags);
1565 	if (attr_mask & IB_QP_STATE) {
1566 		info.curr_iwarp_state = iwqp->iwarp_state;
1567 		switch (attr->qp_state) {
1568 		case IB_QPS_INIT:
1569 		case IB_QPS_RTR:
1570 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1571 				err = -EINVAL;
1572 				goto exit;
1573 			}
1574 
1575 			if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1576 				info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1577 				issue_modify_qp = 1;
1578 			}
1579 			if (iwdev->push_mode && udata &&
1580 			    iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1581 			    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1582 				spin_unlock_irqrestore(&iwqp->lock, flags);
1583 				irdma_alloc_push_page(iwqp);
1584 				spin_lock_irqsave(&iwqp->lock, flags);
1585 			}
1586 			break;
1587 		case IB_QPS_RTS:
1588 			if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1589 			    !iwqp->cm_id) {
1590 				err = -EINVAL;
1591 				goto exit;
1592 			}
1593 
1594 			issue_modify_qp = 1;
1595 			iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1596 			iwqp->hte_added = 1;
1597 			info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1598 			info.tcp_ctx_valid = true;
1599 			info.ord_valid = true;
1600 			info.arp_cache_idx_valid = true;
1601 			info.cq_num_valid = true;
1602 			break;
1603 		case IB_QPS_SQD:
1604 			if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1605 				err = 0;
1606 				goto exit;
1607 			}
1608 
1609 			if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1610 			    iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1611 				err = 0;
1612 				goto exit;
1613 			}
1614 
1615 			if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1616 				err = -EINVAL;
1617 				goto exit;
1618 			}
1619 
1620 			info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1621 			issue_modify_qp = 1;
1622 			break;
1623 		case IB_QPS_SQE:
1624 			if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1625 				err = -EINVAL;
1626 				goto exit;
1627 			}
1628 
1629 			info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1630 			issue_modify_qp = 1;
1631 			break;
1632 		case IB_QPS_ERR:
1633 		case IB_QPS_RESET:
1634 			if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1635 				spin_unlock_irqrestore(&iwqp->lock, flags);
1636 				if (udata && udata->inlen) {
1637 					if (ib_copy_from_udata(&ureq, udata,
1638 					    min(sizeof(ureq), udata->inlen)))
1639 						return -EINVAL;
1640 
1641 					irdma_flush_wqes(iwqp,
1642 					    (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1643 					    (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1644 					    IRDMA_REFLUSH);
1645 				}
1646 				return 0;
1647 			}
1648 
1649 			if (iwqp->sc_qp.term_flags) {
1650 				spin_unlock_irqrestore(&iwqp->lock, flags);
1651 				irdma_terminate_del_timer(&iwqp->sc_qp);
1652 				spin_lock_irqsave(&iwqp->lock, flags);
1653 			}
1654 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1655 			if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1656 			    iwdev->iw_status &&
1657 			    iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1658 				info.reset_tcp_conn = true;
1659 			else
1660 				dont_wait = 1;
1661 
1662 			issue_modify_qp = 1;
1663 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1664 			break;
1665 		default:
1666 			err = -EINVAL;
1667 			goto exit;
1668 		}
1669 
1670 		iwqp->ibqp_state = attr->qp_state;
1671 	}
1672 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
1673 		ctx_info->iwarp_info_valid = true;
1674 		if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1675 			offload_info->wr_rdresp_en = true;
1676 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1677 			offload_info->wr_rdresp_en = true;
1678 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1679 			offload_info->rd_en = true;
1680 	}
1681 
1682 	if (ctx_info->iwarp_info_valid) {
1683 		ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1684 		ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1685 		irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1686 	}
1687 	spin_unlock_irqrestore(&iwqp->lock, flags);
1688 
1689 	if (attr_mask & IB_QP_STATE) {
1690 		if (issue_modify_qp) {
1691 			ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1692 			if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1693 				return -EINVAL;
1694 		}
1695 
1696 		spin_lock_irqsave(&iwqp->lock, flags);
1697 		if (iwqp->iwarp_state == info.curr_iwarp_state) {
1698 			iwqp->iwarp_state = info.next_iwarp_state;
1699 			iwqp->ibqp_state = attr->qp_state;
1700 		}
1701 		spin_unlock_irqrestore(&iwqp->lock, flags);
1702 	}
1703 
1704 	if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1705 		if (dont_wait) {
1706 			if (iwqp->hw_tcp_state) {
1707 				spin_lock_irqsave(&iwqp->lock, flags);
1708 				iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1709 				iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1710 				spin_unlock_irqrestore(&iwqp->lock, flags);
1711 			}
1712 			irdma_cm_disconn(iwqp);
1713 		} else {
1714 			int close_timer_started;
1715 
1716 			spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1717 
1718 			if (iwqp->cm_node) {
1719 				refcount_inc(&iwqp->cm_node->refcnt);
1720 				spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1721 				close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1722 				if (iwqp->cm_id && close_timer_started == 1)
1723 					irdma_schedule_cm_timer(iwqp->cm_node,
1724 						(struct irdma_puda_buf *)iwqp,
1725 						IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1726 
1727 				irdma_rem_ref_cm_node(iwqp->cm_node);
1728 			} else {
1729 				spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1730 			}
1731 		}
1732 	}
1733 	if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
1734 	    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1735 		struct irdma_ucontext *ucontext;
1736 
1737 		ucontext = rdma_udata_to_drv_context(udata,
1738 					struct irdma_ucontext, ibucontext);
1739 		if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1740 		    !iwqp->push_wqe_mmap_entry &&
1741 		    !irdma_setup_push_mmap_entries(ucontext, iwqp,
1742 			&uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1743 			uresp.push_valid = 1;
1744 			uresp.push_offset = iwqp->sc_qp.push_offset;
1745 		}
1746 
1747 		err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1748 				       udata->outlen));
1749 		if (err) {
1750 			irdma_remove_push_mmap_entries(iwqp);
1751 			ibdev_dbg(&iwdev->ibdev,
1752 				  "VERBS: copy_to_udata failed\n");
1753 			return err;
1754 		}
1755 	}
1756 
1757 	return 0;
1758 exit:
1759 	spin_unlock_irqrestore(&iwqp->lock, flags);
1760 
1761 	return err;
1762 }
1763 
1764 /**
1765  * irdma_cq_free_rsrc - free up resources for cq
1766  * @rf: RDMA PCI function
1767  * @iwcq: cq ptr
1768  */
1769 static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1770 {
1771 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1772 
1773 	if (!iwcq->user_mode) {
1774 		dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
1775 				  iwcq->kmem.va, iwcq->kmem.pa);
1776 		iwcq->kmem.va = NULL;
1777 		dma_free_coherent(rf->sc_dev.hw->device,
1778 				  iwcq->kmem_shadow.size,
1779 				  iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
1780 		iwcq->kmem_shadow.va = NULL;
1781 	}
1782 
1783 	irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1784 }
1785 
1786 /**
1787  * irdma_free_cqbuf - worker to free a cq buffer
1788  * @work: provides access to the cq buffer to free
1789  */
1790 static void irdma_free_cqbuf(struct work_struct *work)
1791 {
1792 	struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1793 
1794 	dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
1795 			  cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
1796 	cq_buf->kmem_buf.va = NULL;
1797 	kfree(cq_buf);
1798 }
1799 
1800 /**
1801  * irdma_process_resize_list - remove resized cq buffers from the resize_list
1802  * @iwcq: cq which owns the resize_list
1803  * @iwdev: irdma device
1804  * @lcqe_buf: the buffer where the last cqe is received
1805  */
1806 static int irdma_process_resize_list(struct irdma_cq *iwcq,
1807 				     struct irdma_device *iwdev,
1808 				     struct irdma_cq_buf *lcqe_buf)
1809 {
1810 	struct list_head *tmp_node, *list_node;
1811 	struct irdma_cq_buf *cq_buf;
1812 	int cnt = 0;
1813 
1814 	list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1815 		cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1816 		if (cq_buf == lcqe_buf)
1817 			return cnt;
1818 
1819 		list_del(&cq_buf->list);
1820 		queue_work(iwdev->cleanup_wq, &cq_buf->work);
1821 		cnt++;
1822 	}
1823 
1824 	return cnt;
1825 }
1826 
1827 /**
1828  * irdma_destroy_cq - destroy cq
1829  * @ib_cq: cq pointer
1830  * @udata: user data
1831  */
1832 static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1833 {
1834 	struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1835 	struct irdma_cq *iwcq = to_iwcq(ib_cq);
1836 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1837 	struct irdma_sc_dev *dev = cq->dev;
1838 	struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1839 	struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1840 	unsigned long flags;
1841 
1842 	spin_lock_irqsave(&iwcq->lock, flags);
1843 	if (!list_empty(&iwcq->cmpl_generated))
1844 		irdma_remove_cmpls_list(iwcq);
1845 	if (!list_empty(&iwcq->resize_list))
1846 		irdma_process_resize_list(iwcq, iwdev, NULL);
1847 	spin_unlock_irqrestore(&iwcq->lock, flags);
1848 
1849 	irdma_cq_rem_ref(ib_cq);
1850 	wait_for_completion(&iwcq->free_cq);
1851 
1852 	irdma_cq_wq_destroy(iwdev->rf, cq);
1853 
1854 	spin_lock_irqsave(&iwceq->ce_lock, flags);
1855 	irdma_sc_cleanup_ceqes(cq, ceq);
1856 	spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1857 	irdma_cq_free_rsrc(iwdev->rf, iwcq);
1858 
1859 	return 0;
1860 }
1861 
1862 /**
1863  * irdma_resize_cq - resize cq
1864  * @ibcq: cq to be resized
1865  * @entries: desired cq size
1866  * @udata: user data
1867  */
1868 static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
1869 			   struct ib_udata *udata)
1870 {
1871 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
1872 	struct irdma_cq *iwcq = to_iwcq(ibcq);
1873 	struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
1874 	struct irdma_cqp_request *cqp_request;
1875 	struct cqp_cmds_info *cqp_info;
1876 	struct irdma_modify_cq_info *m_info;
1877 	struct irdma_modify_cq_info info = {};
1878 	struct irdma_dma_mem kmem_buf;
1879 	struct irdma_cq_mr *cqmr_buf;
1880 	struct irdma_pbl *iwpbl_buf;
1881 	struct irdma_device *iwdev;
1882 	struct irdma_pci_f *rf;
1883 	struct irdma_cq_buf *cq_buf = NULL;
1884 	unsigned long flags;
1885 	int ret;
1886 
1887 	iwdev = to_iwdev(ibcq->device);
1888 	rf = iwdev->rf;
1889 
1890 	if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1891 	    IRDMA_FEATURE_CQ_RESIZE))
1892 		return -EOPNOTSUPP;
1893 
1894 	if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
1895 		return -EINVAL;
1896 
1897 	if (entries > rf->max_cqe)
1898 		return -EINVAL;
1899 
1900 	if (!iwcq->user_mode) {
1901 		entries++;
1902 		if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1903 			entries *= 2;
1904 	}
1905 
1906 	info.cq_size = max(entries, 4);
1907 
1908 	if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
1909 		return 0;
1910 
1911 	if (udata) {
1912 		struct irdma_resize_cq_req req = {};
1913 		struct irdma_ucontext *ucontext =
1914 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
1915 						  ibucontext);
1916 
1917 		/* CQ resize not supported with legacy GEN_1 libi40iw */
1918 		if (ucontext->legacy_mode)
1919 			return -EOPNOTSUPP;
1920 
1921 		if (ib_copy_from_udata(&req, udata,
1922 				       min(sizeof(req), udata->inlen)))
1923 			return -EINVAL;
1924 
1925 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1926 		iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
1927 					  &ucontext->cq_reg_mem_list);
1928 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1929 
1930 		if (!iwpbl_buf)
1931 			return -ENOMEM;
1932 
1933 		cqmr_buf = &iwpbl_buf->cq_mr;
1934 		if (iwpbl_buf->pbl_allocated) {
1935 			info.virtual_map = true;
1936 			info.pbl_chunk_size = 1;
1937 			info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
1938 		} else {
1939 			info.cq_pa = cqmr_buf->cq_pbl.addr;
1940 		}
1941 	} else {
1942 		/* Kmode CQ resize */
1943 		int rsize;
1944 
1945 		rsize = info.cq_size * sizeof(struct irdma_cqe);
1946 		kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
1947 		kmem_buf.va = dma_alloc_coherent(dev->hw->device,
1948 						 kmem_buf.size, &kmem_buf.pa,
1949 						 GFP_KERNEL);
1950 		if (!kmem_buf.va)
1951 			return -ENOMEM;
1952 
1953 		info.cq_base = kmem_buf.va;
1954 		info.cq_pa = kmem_buf.pa;
1955 		cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
1956 		if (!cq_buf) {
1957 			ret = -ENOMEM;
1958 			goto error;
1959 		}
1960 	}
1961 
1962 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1963 	if (!cqp_request) {
1964 		ret = -ENOMEM;
1965 		goto error;
1966 	}
1967 
1968 	info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
1969 	info.cq_resize = true;
1970 
1971 	cqp_info = &cqp_request->info;
1972 	m_info = &cqp_info->in.u.cq_modify.info;
1973 	memcpy(m_info, &info, sizeof(*m_info));
1974 
1975 	cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
1976 	cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
1977 	cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
1978 	cqp_info->post_sq = 1;
1979 	ret = irdma_handle_cqp_op(rf, cqp_request);
1980 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1981 	if (ret)
1982 		goto error;
1983 
1984 	spin_lock_irqsave(&iwcq->lock, flags);
1985 	if (cq_buf) {
1986 		cq_buf->kmem_buf = iwcq->kmem;
1987 		cq_buf->hw = dev->hw;
1988 		memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
1989 		INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
1990 		list_add_tail(&cq_buf->list, &iwcq->resize_list);
1991 		iwcq->kmem = kmem_buf;
1992 	}
1993 
1994 	irdma_sc_cq_resize(&iwcq->sc_cq, &info);
1995 	ibcq->cqe = info.cq_size - 1;
1996 	spin_unlock_irqrestore(&iwcq->lock, flags);
1997 
1998 	return 0;
1999 error:
2000 	if (!udata) {
2001 		dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
2002 				  kmem_buf.pa);
2003 		kmem_buf.va = NULL;
2004 	}
2005 	kfree(cq_buf);
2006 
2007 	return ret;
2008 }
2009 
2010 static inline int cq_validate_flags(u32 flags, u8 hw_rev)
2011 {
2012 	/* GEN1 does not support CQ create flags */
2013 	if (hw_rev == IRDMA_GEN_1)
2014 		return flags ? -EOPNOTSUPP : 0;
2015 
2016 	return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
2017 }
2018 
2019 /**
2020  * irdma_create_cq - create cq
2021  * @ibcq: CQ allocated
2022  * @attr: attributes for cq
2023  * @udata: user data
2024  */
2025 static int irdma_create_cq(struct ib_cq *ibcq,
2026 			   const struct ib_cq_init_attr *attr,
2027 			   struct ib_udata *udata)
2028 {
2029 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
2030 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
2031 	struct ib_device *ibdev = ibcq->device;
2032 	struct irdma_device *iwdev = to_iwdev(ibdev);
2033 	struct irdma_pci_f *rf = iwdev->rf;
2034 	struct irdma_cq *iwcq = to_iwcq(ibcq);
2035 	u32 cq_num = 0;
2036 	struct irdma_sc_cq *cq;
2037 	struct irdma_sc_dev *dev = &rf->sc_dev;
2038 	struct irdma_cq_init_info info = {};
2039 	struct irdma_cqp_request *cqp_request;
2040 	struct cqp_cmds_info *cqp_info;
2041 	struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
2042 	unsigned long flags;
2043 	int err_code;
2044 	int entries = attr->cqe;
2045 
2046 	err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
2047 	if (err_code)
2048 		return err_code;
2049 
2050 	if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
2051 		      udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
2052 		return -EINVAL;
2053 
2054 	err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
2055 				    &rf->next_cq);
2056 	if (err_code)
2057 		return err_code;
2058 
2059 	cq = &iwcq->sc_cq;
2060 	cq->back_cq = iwcq;
2061 	refcount_set(&iwcq->refcnt, 1);
2062 	spin_lock_init(&iwcq->lock);
2063 	INIT_LIST_HEAD(&iwcq->resize_list);
2064 	INIT_LIST_HEAD(&iwcq->cmpl_generated);
2065 	info.dev = dev;
2066 	ukinfo->cq_size = max(entries, 4);
2067 	ukinfo->cq_id = cq_num;
2068 	iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
2069 	if (attr->comp_vector < rf->ceqs_count)
2070 		info.ceq_id = attr->comp_vector;
2071 	info.ceq_id_valid = true;
2072 	info.ceqe_mask = 1;
2073 	info.type = IRDMA_CQ_TYPE_IWARP;
2074 	info.vsi = &iwdev->vsi;
2075 
2076 	if (udata) {
2077 		struct irdma_ucontext *ucontext;
2078 		struct irdma_create_cq_req req = {};
2079 		struct irdma_cq_mr *cqmr;
2080 		struct irdma_pbl *iwpbl;
2081 		struct irdma_pbl *iwpbl_shadow;
2082 		struct irdma_cq_mr *cqmr_shadow;
2083 
2084 		iwcq->user_mode = true;
2085 		ucontext =
2086 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2087 						  ibucontext);
2088 		if (ib_copy_from_udata(&req, udata,
2089 				       min(sizeof(req), udata->inlen))) {
2090 			err_code = -EFAULT;
2091 			goto cq_free_rsrc;
2092 		}
2093 
2094 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2095 		iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
2096 				      &ucontext->cq_reg_mem_list);
2097 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2098 		if (!iwpbl) {
2099 			err_code = -EPROTO;
2100 			goto cq_free_rsrc;
2101 		}
2102 
2103 		iwcq->iwpbl = iwpbl;
2104 		iwcq->cq_mem_size = 0;
2105 		cqmr = &iwpbl->cq_mr;
2106 
2107 		if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
2108 		    IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
2109 			spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2110 			iwpbl_shadow = irdma_get_pbl(
2111 					(unsigned long)req.user_shadow_area,
2112 					&ucontext->cq_reg_mem_list);
2113 			spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2114 
2115 			if (!iwpbl_shadow) {
2116 				err_code = -EPROTO;
2117 				goto cq_free_rsrc;
2118 			}
2119 			iwcq->iwpbl_shadow = iwpbl_shadow;
2120 			cqmr_shadow = &iwpbl_shadow->cq_mr;
2121 			info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
2122 			cqmr->split = true;
2123 		} else {
2124 			info.shadow_area_pa = cqmr->shadow;
2125 		}
2126 		if (iwpbl->pbl_allocated) {
2127 			info.virtual_map = true;
2128 			info.pbl_chunk_size = 1;
2129 			info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
2130 		} else {
2131 			info.cq_base_pa = cqmr->cq_pbl.addr;
2132 		}
2133 	} else {
2134 		/* Kmode allocations */
2135 		int rsize;
2136 
2137 		if (entries < 1 || entries > rf->max_cqe) {
2138 			err_code = -EINVAL;
2139 			goto cq_free_rsrc;
2140 		}
2141 
2142 		entries++;
2143 		if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2144 			entries *= 2;
2145 		ukinfo->cq_size = entries;
2146 
2147 		rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
2148 		iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
2149 		iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
2150 						   iwcq->kmem.size,
2151 						   &iwcq->kmem.pa, GFP_KERNEL);
2152 		if (!iwcq->kmem.va) {
2153 			err_code = -ENOMEM;
2154 			goto cq_free_rsrc;
2155 		}
2156 
2157 		iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
2158 					       64);
2159 		iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
2160 							  iwcq->kmem_shadow.size,
2161 							  &iwcq->kmem_shadow.pa,
2162 							  GFP_KERNEL);
2163 		if (!iwcq->kmem_shadow.va) {
2164 			err_code = -ENOMEM;
2165 			goto cq_free_rsrc;
2166 		}
2167 		info.shadow_area_pa = iwcq->kmem_shadow.pa;
2168 		ukinfo->shadow_area = iwcq->kmem_shadow.va;
2169 		ukinfo->cq_base = iwcq->kmem.va;
2170 		info.cq_base_pa = iwcq->kmem.pa;
2171 	}
2172 
2173 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2174 		info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
2175 						 (u32)IRDMA_MAX_CQ_READ_THRESH);
2176 
2177 	if (irdma_sc_cq_init(cq, &info)) {
2178 		ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
2179 		err_code = -EPROTO;
2180 		goto cq_free_rsrc;
2181 	}
2182 
2183 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2184 	if (!cqp_request) {
2185 		err_code = -ENOMEM;
2186 		goto cq_free_rsrc;
2187 	}
2188 
2189 	cqp_info = &cqp_request->info;
2190 	cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
2191 	cqp_info->post_sq = 1;
2192 	cqp_info->in.u.cq_create.cq = cq;
2193 	cqp_info->in.u.cq_create.check_overflow = true;
2194 	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
2195 	err_code = irdma_handle_cqp_op(rf, cqp_request);
2196 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2197 	if (err_code)
2198 		goto cq_free_rsrc;
2199 
2200 	if (udata) {
2201 		struct irdma_create_cq_resp resp = {};
2202 
2203 		resp.cq_id = info.cq_uk_init_info.cq_id;
2204 		resp.cq_size = info.cq_uk_init_info.cq_size;
2205 		if (ib_copy_to_udata(udata, &resp,
2206 				     min(sizeof(resp), udata->outlen))) {
2207 			ibdev_dbg(&iwdev->ibdev,
2208 				  "VERBS: copy to user data\n");
2209 			err_code = -EPROTO;
2210 			goto cq_destroy;
2211 		}
2212 	}
2213 	rf->cq_table[cq_num] = iwcq;
2214 	init_completion(&iwcq->free_cq);
2215 
2216 	return 0;
2217 cq_destroy:
2218 	irdma_cq_wq_destroy(rf, cq);
2219 cq_free_rsrc:
2220 	irdma_cq_free_rsrc(rf, iwcq);
2221 
2222 	return err_code;
2223 }
2224 
2225 /**
2226  * irdma_get_mr_access - get hw MR access permissions from IB access flags
2227  * @access: IB access flags
2228  */
2229 static inline u16 irdma_get_mr_access(int access)
2230 {
2231 	u16 hw_access = 0;
2232 
2233 	hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
2234 		     IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
2235 	hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
2236 		     IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
2237 	hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
2238 		     IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
2239 	hw_access |= (access & IB_ACCESS_MW_BIND) ?
2240 		     IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
2241 	hw_access |= (access & IB_ZERO_BASED) ?
2242 		     IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
2243 	hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
2244 
2245 	return hw_access;
2246 }
2247 
2248 /**
2249  * irdma_free_stag - free stag resource
2250  * @iwdev: irdma device
2251  * @stag: stag to free
2252  */
2253 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
2254 {
2255 	u32 stag_idx;
2256 
2257 	stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
2258 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
2259 }
2260 
2261 /**
2262  * irdma_create_stag - create random stag
2263  * @iwdev: irdma device
2264  */
2265 static u32 irdma_create_stag(struct irdma_device *iwdev)
2266 {
2267 	u32 stag = 0;
2268 	u32 stag_index = 0;
2269 	u32 next_stag_index;
2270 	u32 driver_key;
2271 	u32 random;
2272 	u8 consumer_key;
2273 	int ret;
2274 
2275 	get_random_bytes(&random, sizeof(random));
2276 	consumer_key = (u8)random;
2277 
2278 	driver_key = random & ~iwdev->rf->mr_stagmask;
2279 	next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
2280 	next_stag_index %= iwdev->rf->max_mr;
2281 
2282 	ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
2283 			       iwdev->rf->max_mr, &stag_index,
2284 			       &next_stag_index);
2285 	if (ret)
2286 		return stag;
2287 	stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
2288 	stag |= driver_key;
2289 	stag += (u32)consumer_key;
2290 
2291 	return stag;
2292 }
2293 
2294 /**
2295  * irdma_next_pbl_addr - Get next pbl address
2296  * @pbl: pointer to a pble
2297  * @pinfo: info pointer
2298  * @idx: index
2299  */
2300 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
2301 				       u32 *idx)
2302 {
2303 	*idx += 1;
2304 	if (!(*pinfo) || *idx != (*pinfo)->cnt)
2305 		return ++pbl;
2306 	*idx = 0;
2307 	(*pinfo)++;
2308 
2309 	return (*pinfo)->addr;
2310 }
2311 
2312 /**
2313  * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
2314  * @iwmr: iwmr for IB's user page addresses
2315  * @pbl: ple pointer to save 1 level or 0 level pble
2316  * @level: indicated level 0, 1 or 2
2317  */
2318 static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
2319 				    enum irdma_pble_level level)
2320 {
2321 	struct ib_umem *region = iwmr->region;
2322 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2323 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2324 	struct irdma_pble_info *pinfo;
2325 	struct ib_block_iter biter;
2326 	u32 idx = 0;
2327 	u32 pbl_cnt = 0;
2328 
2329 	pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
2330 
2331 	if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
2332 		iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
2333 
2334 	rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
2335 		*pbl = rdma_block_iter_dma_address(&biter);
2336 		if (++pbl_cnt == palloc->total_cnt)
2337 			break;
2338 		pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
2339 	}
2340 }
2341 
2342 /**
2343  * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
2344  * @arr: lvl1 pbl array
2345  * @npages: page count
2346  * @pg_size: page size
2347  *
2348  */
2349 static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
2350 {
2351 	u32 pg_idx;
2352 
2353 	for (pg_idx = 0; pg_idx < npages; pg_idx++) {
2354 		if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
2355 			return false;
2356 	}
2357 
2358 	return true;
2359 }
2360 
2361 /**
2362  * irdma_check_mr_contiguous - check if MR is physically contiguous
2363  * @palloc: pbl allocation struct
2364  * @pg_size: page size
2365  */
2366 static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
2367 				      u32 pg_size)
2368 {
2369 	struct irdma_pble_level2 *lvl2 = &palloc->level2;
2370 	struct irdma_pble_info *leaf = lvl2->leaf;
2371 	u64 *arr = NULL;
2372 	u64 *start_addr = NULL;
2373 	int i;
2374 	bool ret;
2375 
2376 	if (palloc->level == PBLE_LEVEL_1) {
2377 		arr = palloc->level1.addr;
2378 		ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
2379 						 pg_size);
2380 		return ret;
2381 	}
2382 
2383 	start_addr = leaf->addr;
2384 
2385 	for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
2386 		arr = leaf->addr;
2387 		if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
2388 			return false;
2389 		ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
2390 		if (!ret)
2391 			return false;
2392 	}
2393 
2394 	return true;
2395 }
2396 
2397 /**
2398  * irdma_setup_pbles - copy user pg address to pble's
2399  * @rf: RDMA PCI function
2400  * @iwmr: mr pointer for this memory registration
2401  * @lvl: requested pble levels
2402  */
2403 static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
2404 			     u8 lvl)
2405 {
2406 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2407 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2408 	struct irdma_pble_info *pinfo;
2409 	u64 *pbl;
2410 	int status;
2411 	enum irdma_pble_level level = PBLE_LEVEL_1;
2412 
2413 	if (lvl) {
2414 		status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
2415 					lvl);
2416 		if (status)
2417 			return status;
2418 
2419 		iwpbl->pbl_allocated = true;
2420 		level = palloc->level;
2421 		pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
2422 						  palloc->level2.leaf;
2423 		pbl = pinfo->addr;
2424 	} else {
2425 		pbl = iwmr->pgaddrmem;
2426 	}
2427 
2428 	irdma_copy_user_pgaddrs(iwmr, pbl, level);
2429 
2430 	if (lvl)
2431 		iwmr->pgaddrmem[0] = *pbl;
2432 
2433 	return 0;
2434 }
2435 
2436 /**
2437  * irdma_handle_q_mem - handle memory for qp and cq
2438  * @iwdev: irdma device
2439  * @req: information for q memory management
2440  * @iwpbl: pble struct
2441  * @lvl: pble level mask
2442  */
2443 static int irdma_handle_q_mem(struct irdma_device *iwdev,
2444 			      struct irdma_mem_reg_req *req,
2445 			      struct irdma_pbl *iwpbl, u8 lvl)
2446 {
2447 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2448 	struct irdma_mr *iwmr = iwpbl->iwmr;
2449 	struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
2450 	struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
2451 	struct irdma_hmc_pble *hmc_p;
2452 	u64 *arr = iwmr->pgaddrmem;
2453 	u32 pg_size, total;
2454 	int err = 0;
2455 	bool ret = true;
2456 
2457 	pg_size = iwmr->page_size;
2458 	err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2459 	if (err)
2460 		return err;
2461 
2462 	if (lvl)
2463 		arr = palloc->level1.addr;
2464 
2465 	switch (iwmr->type) {
2466 	case IRDMA_MEMREG_TYPE_QP:
2467 		total = req->sq_pages + req->rq_pages;
2468 		hmc_p = &qpmr->sq_pbl;
2469 		qpmr->shadow = (dma_addr_t)arr[total];
2470 
2471 		if (lvl) {
2472 			ret = irdma_check_mem_contiguous(arr, req->sq_pages,
2473 							 pg_size);
2474 			if (ret)
2475 				ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
2476 								 req->rq_pages,
2477 								 pg_size);
2478 		}
2479 
2480 		if (!ret) {
2481 			hmc_p->idx = palloc->level1.idx;
2482 			hmc_p = &qpmr->rq_pbl;
2483 			hmc_p->idx = palloc->level1.idx + req->sq_pages;
2484 		} else {
2485 			hmc_p->addr = arr[0];
2486 			hmc_p = &qpmr->rq_pbl;
2487 			hmc_p->addr = arr[req->sq_pages];
2488 		}
2489 		break;
2490 	case IRDMA_MEMREG_TYPE_CQ:
2491 		hmc_p = &cqmr->cq_pbl;
2492 
2493 		if (!cqmr->split)
2494 			cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
2495 
2496 		if (lvl)
2497 			ret = irdma_check_mem_contiguous(arr, req->cq_pages,
2498 							 pg_size);
2499 
2500 		if (!ret)
2501 			hmc_p->idx = palloc->level1.idx;
2502 		else
2503 			hmc_p->addr = arr[0];
2504 	break;
2505 	default:
2506 		ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
2507 		err = -EINVAL;
2508 	}
2509 
2510 	if (lvl && ret) {
2511 		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2512 		iwpbl->pbl_allocated = false;
2513 	}
2514 
2515 	return err;
2516 }
2517 
2518 /**
2519  * irdma_hw_alloc_mw - create the hw memory window
2520  * @iwdev: irdma device
2521  * @iwmr: pointer to memory window info
2522  */
2523 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
2524 {
2525 	struct irdma_mw_alloc_info *info;
2526 	struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2527 	struct irdma_cqp_request *cqp_request;
2528 	struct cqp_cmds_info *cqp_info;
2529 	int status;
2530 
2531 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2532 	if (!cqp_request)
2533 		return -ENOMEM;
2534 
2535 	cqp_info = &cqp_request->info;
2536 	info = &cqp_info->in.u.mw_alloc.info;
2537 	memset(info, 0, sizeof(*info));
2538 	if (iwmr->ibmw.type == IB_MW_TYPE_1)
2539 		info->mw_wide = true;
2540 
2541 	info->page_size = PAGE_SIZE;
2542 	info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2543 	info->pd_id = iwpd->sc_pd.pd_id;
2544 	info->remote_access = true;
2545 	cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
2546 	cqp_info->post_sq = 1;
2547 	cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
2548 	cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
2549 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2550 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2551 
2552 	return status;
2553 }
2554 
2555 /**
2556  * irdma_alloc_mw - Allocate memory window
2557  * @ibmw: Memory Window
2558  * @udata: user data pointer
2559  */
2560 static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2561 {
2562 	struct irdma_device *iwdev = to_iwdev(ibmw->device);
2563 	struct irdma_mr *iwmr = to_iwmw(ibmw);
2564 	int err_code;
2565 	u32 stag;
2566 
2567 	stag = irdma_create_stag(iwdev);
2568 	if (!stag)
2569 		return -ENOMEM;
2570 
2571 	iwmr->stag = stag;
2572 	ibmw->rkey = stag;
2573 
2574 	err_code = irdma_hw_alloc_mw(iwdev, iwmr);
2575 	if (err_code) {
2576 		irdma_free_stag(iwdev, stag);
2577 		return err_code;
2578 	}
2579 
2580 	return 0;
2581 }
2582 
2583 /**
2584  * irdma_dealloc_mw - Dealloc memory window
2585  * @ibmw: memory window structure.
2586  */
2587 static int irdma_dealloc_mw(struct ib_mw *ibmw)
2588 {
2589 	struct ib_pd *ibpd = ibmw->pd;
2590 	struct irdma_pd *iwpd = to_iwpd(ibpd);
2591 	struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
2592 	struct irdma_device *iwdev = to_iwdev(ibmw->device);
2593 	struct irdma_cqp_request *cqp_request;
2594 	struct cqp_cmds_info *cqp_info;
2595 	struct irdma_dealloc_stag_info *info;
2596 
2597 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2598 	if (!cqp_request)
2599 		return -ENOMEM;
2600 
2601 	cqp_info = &cqp_request->info;
2602 	info = &cqp_info->in.u.dealloc_stag.info;
2603 	memset(info, 0, sizeof(*info));
2604 	info->pd_id = iwpd->sc_pd.pd_id;
2605 	info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
2606 	info->mr = false;
2607 	cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2608 	cqp_info->post_sq = 1;
2609 	cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2610 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2611 	irdma_handle_cqp_op(iwdev->rf, cqp_request);
2612 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2613 	irdma_free_stag(iwdev, iwmr->stag);
2614 
2615 	return 0;
2616 }
2617 
2618 /**
2619  * irdma_hw_alloc_stag - cqp command to allocate stag
2620  * @iwdev: irdma device
2621  * @iwmr: irdma mr pointer
2622  */
2623 static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
2624 			       struct irdma_mr *iwmr)
2625 {
2626 	struct irdma_allocate_stag_info *info;
2627 	struct ib_pd *pd = iwmr->ibmr.pd;
2628 	struct irdma_pd *iwpd = to_iwpd(pd);
2629 	int status;
2630 	struct irdma_cqp_request *cqp_request;
2631 	struct cqp_cmds_info *cqp_info;
2632 
2633 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2634 	if (!cqp_request)
2635 		return -ENOMEM;
2636 
2637 	cqp_info = &cqp_request->info;
2638 	info = &cqp_info->in.u.alloc_stag.info;
2639 	memset(info, 0, sizeof(*info));
2640 	info->page_size = PAGE_SIZE;
2641 	info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2642 	info->pd_id = iwpd->sc_pd.pd_id;
2643 	info->total_len = iwmr->len;
2644 	info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
2645 	info->remote_access = true;
2646 	cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
2647 	cqp_info->post_sq = 1;
2648 	cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
2649 	cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
2650 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2651 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2652 
2653 	return status;
2654 }
2655 
2656 /**
2657  * irdma_alloc_mr - register stag for fast memory registration
2658  * @pd: ibpd pointer
2659  * @mr_type: memory for stag registrion
2660  * @max_num_sg: man number of pages
2661  */
2662 static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2663 				    u32 max_num_sg)
2664 {
2665 	struct irdma_device *iwdev = to_iwdev(pd->device);
2666 	struct irdma_pble_alloc *palloc;
2667 	struct irdma_pbl *iwpbl;
2668 	struct irdma_mr *iwmr;
2669 	u32 stag;
2670 	int err_code;
2671 
2672 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2673 	if (!iwmr)
2674 		return ERR_PTR(-ENOMEM);
2675 
2676 	stag = irdma_create_stag(iwdev);
2677 	if (!stag) {
2678 		err_code = -ENOMEM;
2679 		goto err;
2680 	}
2681 
2682 	iwmr->stag = stag;
2683 	iwmr->ibmr.rkey = stag;
2684 	iwmr->ibmr.lkey = stag;
2685 	iwmr->ibmr.pd = pd;
2686 	iwmr->ibmr.device = pd->device;
2687 	iwpbl = &iwmr->iwpbl;
2688 	iwpbl->iwmr = iwmr;
2689 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2690 	palloc = &iwpbl->pble_alloc;
2691 	iwmr->page_cnt = max_num_sg;
2692 	/* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
2693 	iwmr->len = max_num_sg * PAGE_SIZE;
2694 	err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
2695 				  false);
2696 	if (err_code)
2697 		goto err_get_pble;
2698 
2699 	err_code = irdma_hw_alloc_stag(iwdev, iwmr);
2700 	if (err_code)
2701 		goto err_alloc_stag;
2702 
2703 	iwpbl->pbl_allocated = true;
2704 
2705 	return &iwmr->ibmr;
2706 err_alloc_stag:
2707 	irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2708 err_get_pble:
2709 	irdma_free_stag(iwdev, stag);
2710 err:
2711 	kfree(iwmr);
2712 
2713 	return ERR_PTR(err_code);
2714 }
2715 
2716 /**
2717  * irdma_set_page - populate pbl list for fmr
2718  * @ibmr: ib mem to access iwarp mr pointer
2719  * @addr: page dma address fro pbl list
2720  */
2721 static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
2722 {
2723 	struct irdma_mr *iwmr = to_iwmr(ibmr);
2724 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2725 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2726 	u64 *pbl;
2727 
2728 	if (unlikely(iwmr->npages == iwmr->page_cnt))
2729 		return -ENOMEM;
2730 
2731 	if (palloc->level == PBLE_LEVEL_2) {
2732 		struct irdma_pble_info *palloc_info =
2733 			palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
2734 
2735 		palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
2736 	} else {
2737 		pbl = palloc->level1.addr;
2738 		pbl[iwmr->npages] = addr;
2739 	}
2740 	iwmr->npages++;
2741 
2742 	return 0;
2743 }
2744 
2745 /**
2746  * irdma_map_mr_sg - map of sg list for fmr
2747  * @ibmr: ib mem to access iwarp mr pointer
2748  * @sg: scatter gather list
2749  * @sg_nents: number of sg pages
2750  * @sg_offset: scatter gather list for fmr
2751  */
2752 static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2753 			   int sg_nents, unsigned int *sg_offset)
2754 {
2755 	struct irdma_mr *iwmr = to_iwmr(ibmr);
2756 
2757 	iwmr->npages = 0;
2758 
2759 	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
2760 }
2761 
2762 /**
2763  * irdma_hwreg_mr - send cqp command for memory registration
2764  * @iwdev: irdma device
2765  * @iwmr: irdma mr pointer
2766  * @access: access for MR
2767  */
2768 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
2769 			  u16 access)
2770 {
2771 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2772 	struct irdma_reg_ns_stag_info *stag_info;
2773 	struct ib_pd *pd = iwmr->ibmr.pd;
2774 	struct irdma_pd *iwpd = to_iwpd(pd);
2775 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2776 	struct irdma_cqp_request *cqp_request;
2777 	struct cqp_cmds_info *cqp_info;
2778 	int ret;
2779 
2780 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2781 	if (!cqp_request)
2782 		return -ENOMEM;
2783 
2784 	cqp_info = &cqp_request->info;
2785 	stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
2786 	memset(stag_info, 0, sizeof(*stag_info));
2787 	stag_info->va = iwpbl->user_base;
2788 	stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2789 	stag_info->stag_key = (u8)iwmr->stag;
2790 	stag_info->total_len = iwmr->len;
2791 	stag_info->access_rights = irdma_get_mr_access(access);
2792 	stag_info->pd_id = iwpd->sc_pd.pd_id;
2793 	stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
2794 	if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
2795 		stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
2796 	else
2797 		stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2798 	stag_info->page_size = iwmr->page_size;
2799 
2800 	if (iwpbl->pbl_allocated) {
2801 		if (palloc->level == PBLE_LEVEL_1) {
2802 			stag_info->first_pm_pbl_index = palloc->level1.idx;
2803 			stag_info->chunk_size = 1;
2804 		} else {
2805 			stag_info->first_pm_pbl_index = palloc->level2.root.idx;
2806 			stag_info->chunk_size = 3;
2807 		}
2808 	} else {
2809 		stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
2810 	}
2811 
2812 	cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
2813 	cqp_info->post_sq = 1;
2814 	cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
2815 	cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
2816 	ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2817 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2818 
2819 	return ret;
2820 }
2821 
2822 static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
2823 {
2824 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2825 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2826 	u32 stag;
2827 	u8 lvl;
2828 	int err;
2829 
2830 	lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
2831 
2832 	err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2833 	if (err)
2834 		return err;
2835 
2836 	if (lvl) {
2837 		err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
2838 						iwmr->page_size);
2839 		if (err) {
2840 			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
2841 			iwpbl->pbl_allocated = false;
2842 		}
2843 	}
2844 
2845 	stag = irdma_create_stag(iwdev);
2846 	if (!stag) {
2847 		err = -ENOMEM;
2848 		goto free_pble;
2849 	}
2850 
2851 	iwmr->stag = stag;
2852 	iwmr->ibmr.rkey = stag;
2853 	iwmr->ibmr.lkey = stag;
2854 	err = irdma_hwreg_mr(iwdev, iwmr, access);
2855 	if (err)
2856 		goto err_hwreg;
2857 
2858 	return 0;
2859 
2860 err_hwreg:
2861 	irdma_free_stag(iwdev, stag);
2862 
2863 free_pble:
2864 	if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
2865 		irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
2866 
2867 	return err;
2868 }
2869 
2870 static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
2871 					 struct ib_pd *pd, u64 virt,
2872 					 enum irdma_memreg_type reg_type)
2873 {
2874 	struct irdma_device *iwdev = to_iwdev(pd->device);
2875 	struct irdma_pbl *iwpbl;
2876 	struct irdma_mr *iwmr;
2877 	unsigned long pgsz_bitmap;
2878 
2879 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2880 	if (!iwmr)
2881 		return ERR_PTR(-ENOMEM);
2882 
2883 	iwpbl = &iwmr->iwpbl;
2884 	iwpbl->iwmr = iwmr;
2885 	iwmr->region = region;
2886 	iwmr->ibmr.pd = pd;
2887 	iwmr->ibmr.device = pd->device;
2888 	iwmr->ibmr.iova = virt;
2889 	iwmr->type = reg_type;
2890 
2891 	pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
2892 		iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE;
2893 
2894 	iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
2895 	if (unlikely(!iwmr->page_size)) {
2896 		kfree(iwmr);
2897 		return ERR_PTR(-EOPNOTSUPP);
2898 	}
2899 
2900 	iwmr->len = region->length;
2901 	iwpbl->user_base = virt;
2902 	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
2903 
2904 	return iwmr;
2905 }
2906 
2907 static void irdma_free_iwmr(struct irdma_mr *iwmr)
2908 {
2909 	kfree(iwmr);
2910 }
2911 
2912 static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
2913 				     struct ib_udata *udata,
2914 				     struct irdma_mr *iwmr)
2915 {
2916 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2917 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2918 	struct irdma_ucontext *ucontext = NULL;
2919 	unsigned long flags;
2920 	u32 total;
2921 	int err;
2922 	u8 lvl;
2923 
2924 	total = req.sq_pages + req.rq_pages + 1;
2925 	if (total > iwmr->page_cnt)
2926 		return -EINVAL;
2927 
2928 	total = req.sq_pages + req.rq_pages;
2929 	lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
2930 	err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
2931 	if (err)
2932 		return err;
2933 
2934 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2935 					     ibucontext);
2936 	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2937 	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
2938 	iwpbl->on_list = true;
2939 	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2940 
2941 	return 0;
2942 }
2943 
2944 static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
2945 				     struct ib_udata *udata,
2946 				     struct irdma_mr *iwmr)
2947 {
2948 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2949 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2950 	struct irdma_ucontext *ucontext = NULL;
2951 	u8 shadow_pgcnt = 1;
2952 	unsigned long flags;
2953 	u32 total;
2954 	int err;
2955 	u8 lvl;
2956 
2957 	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2958 		shadow_pgcnt = 0;
2959 	total = req.cq_pages + shadow_pgcnt;
2960 	if (total > iwmr->page_cnt)
2961 		return -EINVAL;
2962 
2963 	lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
2964 	err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
2965 	if (err)
2966 		return err;
2967 
2968 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2969 					     ibucontext);
2970 	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2971 	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
2972 	iwpbl->on_list = true;
2973 	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2974 
2975 	return 0;
2976 }
2977 
2978 /**
2979  * irdma_reg_user_mr - Register a user memory region
2980  * @pd: ptr of pd
2981  * @start: virtual start address
2982  * @len: length of mr
2983  * @virt: virtual address
2984  * @access: access of mr
2985  * @udata: user data
2986  */
2987 static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
2988 				       u64 virt, int access,
2989 				       struct ib_udata *udata)
2990 {
2991 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
2992 	struct irdma_device *iwdev = to_iwdev(pd->device);
2993 	struct irdma_mem_reg_req req = {};
2994 	struct ib_umem *region = NULL;
2995 	struct irdma_mr *iwmr = NULL;
2996 	int err;
2997 
2998 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
2999 		return ERR_PTR(-EINVAL);
3000 
3001 	if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
3002 		return ERR_PTR(-EINVAL);
3003 
3004 	region = ib_umem_get(pd->device, start, len, access);
3005 
3006 	if (IS_ERR(region)) {
3007 		ibdev_dbg(&iwdev->ibdev,
3008 			  "VERBS: Failed to create ib_umem region\n");
3009 		return (struct ib_mr *)region;
3010 	}
3011 
3012 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
3013 		ib_umem_release(region);
3014 		return ERR_PTR(-EFAULT);
3015 	}
3016 
3017 	iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
3018 	if (IS_ERR(iwmr)) {
3019 		ib_umem_release(region);
3020 		return (struct ib_mr *)iwmr;
3021 	}
3022 
3023 	switch (req.reg_type) {
3024 	case IRDMA_MEMREG_TYPE_QP:
3025 		err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
3026 		if (err)
3027 			goto error;
3028 
3029 		break;
3030 	case IRDMA_MEMREG_TYPE_CQ:
3031 		err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
3032 		if (err)
3033 			goto error;
3034 		break;
3035 	case IRDMA_MEMREG_TYPE_MEM:
3036 		err = irdma_reg_user_mr_type_mem(iwmr, access);
3037 		if (err)
3038 			goto error;
3039 
3040 		break;
3041 	default:
3042 		err = -EINVAL;
3043 		goto error;
3044 	}
3045 
3046 	return &iwmr->ibmr;
3047 error:
3048 	ib_umem_release(region);
3049 	irdma_free_iwmr(iwmr);
3050 
3051 	return ERR_PTR(err);
3052 }
3053 
3054 static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
3055 					      u64 len, u64 virt,
3056 					      int fd, int access,
3057 					      struct ib_udata *udata)
3058 {
3059 	struct irdma_device *iwdev = to_iwdev(pd->device);
3060 	struct ib_umem_dmabuf *umem_dmabuf;
3061 	struct irdma_mr *iwmr;
3062 	int err;
3063 
3064 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3065 		return ERR_PTR(-EINVAL);
3066 
3067 	umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
3068 	if (IS_ERR(umem_dmabuf)) {
3069 		err = PTR_ERR(umem_dmabuf);
3070 		ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
3071 		return ERR_PTR(err);
3072 	}
3073 
3074 	iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
3075 	if (IS_ERR(iwmr)) {
3076 		err = PTR_ERR(iwmr);
3077 		goto err_release;
3078 	}
3079 
3080 	err = irdma_reg_user_mr_type_mem(iwmr, access);
3081 	if (err)
3082 		goto err_iwmr;
3083 
3084 	return &iwmr->ibmr;
3085 
3086 err_iwmr:
3087 	irdma_free_iwmr(iwmr);
3088 
3089 err_release:
3090 	ib_umem_release(&umem_dmabuf->umem);
3091 
3092 	return ERR_PTR(err);
3093 }
3094 
3095 /**
3096  * irdma_reg_phys_mr - register kernel physical memory
3097  * @pd: ibpd pointer
3098  * @addr: physical address of memory to register
3099  * @size: size of memory to register
3100  * @access: Access rights
3101  * @iova_start: start of virtual address for physical buffers
3102  */
3103 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
3104 				u64 *iova_start)
3105 {
3106 	struct irdma_device *iwdev = to_iwdev(pd->device);
3107 	struct irdma_pbl *iwpbl;
3108 	struct irdma_mr *iwmr;
3109 	u32 stag;
3110 	int ret;
3111 
3112 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
3113 	if (!iwmr)
3114 		return ERR_PTR(-ENOMEM);
3115 
3116 	iwmr->ibmr.pd = pd;
3117 	iwmr->ibmr.device = pd->device;
3118 	iwpbl = &iwmr->iwpbl;
3119 	iwpbl->iwmr = iwmr;
3120 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
3121 	iwpbl->user_base = *iova_start;
3122 	stag = irdma_create_stag(iwdev);
3123 	if (!stag) {
3124 		ret = -ENOMEM;
3125 		goto err;
3126 	}
3127 
3128 	iwmr->stag = stag;
3129 	iwmr->ibmr.iova = *iova_start;
3130 	iwmr->ibmr.rkey = stag;
3131 	iwmr->ibmr.lkey = stag;
3132 	iwmr->page_cnt = 1;
3133 	iwmr->pgaddrmem[0] = addr;
3134 	iwmr->len = size;
3135 	iwmr->page_size = SZ_4K;
3136 	ret = irdma_hwreg_mr(iwdev, iwmr, access);
3137 	if (ret) {
3138 		irdma_free_stag(iwdev, stag);
3139 		goto err;
3140 	}
3141 
3142 	return &iwmr->ibmr;
3143 
3144 err:
3145 	kfree(iwmr);
3146 
3147 	return ERR_PTR(ret);
3148 }
3149 
3150 /**
3151  * irdma_get_dma_mr - register physical mem
3152  * @pd: ptr of pd
3153  * @acc: access for memory
3154  */
3155 static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
3156 {
3157 	u64 kva = 0;
3158 
3159 	return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
3160 }
3161 
3162 /**
3163  * irdma_del_memlist - Deleting pbl list entries for CQ/QP
3164  * @iwmr: iwmr for IB's user page addresses
3165  * @ucontext: ptr to user context
3166  */
3167 static void irdma_del_memlist(struct irdma_mr *iwmr,
3168 			      struct irdma_ucontext *ucontext)
3169 {
3170 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3171 	unsigned long flags;
3172 
3173 	switch (iwmr->type) {
3174 	case IRDMA_MEMREG_TYPE_CQ:
3175 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
3176 		if (iwpbl->on_list) {
3177 			iwpbl->on_list = false;
3178 			list_del(&iwpbl->list);
3179 		}
3180 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
3181 		break;
3182 	case IRDMA_MEMREG_TYPE_QP:
3183 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
3184 		if (iwpbl->on_list) {
3185 			iwpbl->on_list = false;
3186 			list_del(&iwpbl->list);
3187 		}
3188 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
3189 		break;
3190 	default:
3191 		break;
3192 	}
3193 }
3194 
3195 /**
3196  * irdma_dereg_mr - deregister mr
3197  * @ib_mr: mr ptr for dereg
3198  * @udata: user data
3199  */
3200 static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3201 {
3202 	struct ib_pd *ibpd = ib_mr->pd;
3203 	struct irdma_pd *iwpd = to_iwpd(ibpd);
3204 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
3205 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3206 	struct irdma_dealloc_stag_info *info;
3207 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3208 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
3209 	struct irdma_cqp_request *cqp_request;
3210 	struct cqp_cmds_info *cqp_info;
3211 	int status;
3212 
3213 	if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
3214 		if (iwmr->region) {
3215 			struct irdma_ucontext *ucontext;
3216 
3217 			ucontext = rdma_udata_to_drv_context(udata,
3218 						struct irdma_ucontext,
3219 						ibucontext);
3220 			irdma_del_memlist(iwmr, ucontext);
3221 		}
3222 		goto done;
3223 	}
3224 
3225 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3226 	if (!cqp_request)
3227 		return -ENOMEM;
3228 
3229 	cqp_info = &cqp_request->info;
3230 	info = &cqp_info->in.u.dealloc_stag.info;
3231 	memset(info, 0, sizeof(*info));
3232 	info->pd_id = iwpd->sc_pd.pd_id;
3233 	info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
3234 	info->mr = true;
3235 	if (iwpbl->pbl_allocated)
3236 		info->dealloc_pbl = true;
3237 
3238 	cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
3239 	cqp_info->post_sq = 1;
3240 	cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3241 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
3242 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3243 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3244 	if (status)
3245 		return status;
3246 
3247 	irdma_free_stag(iwdev, iwmr->stag);
3248 done:
3249 	if (iwpbl->pbl_allocated)
3250 		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
3251 	ib_umem_release(iwmr->region);
3252 	kfree(iwmr);
3253 
3254 	return 0;
3255 }
3256 
3257 /**
3258  * irdma_post_send -  kernel application wr
3259  * @ibqp: qp ptr for wr
3260  * @ib_wr: work request ptr
3261  * @bad_wr: return of bad wr if err
3262  */
3263 static int irdma_post_send(struct ib_qp *ibqp,
3264 			   const struct ib_send_wr *ib_wr,
3265 			   const struct ib_send_wr **bad_wr)
3266 {
3267 	struct irdma_qp *iwqp;
3268 	struct irdma_qp_uk *ukqp;
3269 	struct irdma_sc_dev *dev;
3270 	struct irdma_post_sq_info info;
3271 	int err = 0;
3272 	unsigned long flags;
3273 	bool inv_stag;
3274 	struct irdma_ah *ah;
3275 
3276 	iwqp = to_iwqp(ibqp);
3277 	ukqp = &iwqp->sc_qp.qp_uk;
3278 	dev = &iwqp->iwdev->rf->sc_dev;
3279 
3280 	spin_lock_irqsave(&iwqp->lock, flags);
3281 	while (ib_wr) {
3282 		memset(&info, 0, sizeof(info));
3283 		inv_stag = false;
3284 		info.wr_id = (ib_wr->wr_id);
3285 		if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
3286 			info.signaled = true;
3287 		if (ib_wr->send_flags & IB_SEND_FENCE)
3288 			info.read_fence = true;
3289 		switch (ib_wr->opcode) {
3290 		case IB_WR_SEND_WITH_IMM:
3291 			if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
3292 				info.imm_data_valid = true;
3293 				info.imm_data = ntohl(ib_wr->ex.imm_data);
3294 			} else {
3295 				err = -EINVAL;
3296 				break;
3297 			}
3298 			fallthrough;
3299 		case IB_WR_SEND:
3300 		case IB_WR_SEND_WITH_INV:
3301 			if (ib_wr->opcode == IB_WR_SEND ||
3302 			    ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
3303 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
3304 					info.op_type = IRDMA_OP_TYPE_SEND_SOL;
3305 				else
3306 					info.op_type = IRDMA_OP_TYPE_SEND;
3307 			} else {
3308 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
3309 					info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
3310 				else
3311 					info.op_type = IRDMA_OP_TYPE_SEND_INV;
3312 				info.stag_to_inv = ib_wr->ex.invalidate_rkey;
3313 			}
3314 
3315 			info.op.send.num_sges = ib_wr->num_sge;
3316 			info.op.send.sg_list = ib_wr->sg_list;
3317 			if (iwqp->ibqp.qp_type == IB_QPT_UD ||
3318 			    iwqp->ibqp.qp_type == IB_QPT_GSI) {
3319 				ah = to_iwah(ud_wr(ib_wr)->ah);
3320 				info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
3321 				info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
3322 				info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
3323 			}
3324 
3325 			if (ib_wr->send_flags & IB_SEND_INLINE)
3326 				err = irdma_uk_inline_send(ukqp, &info, false);
3327 			else
3328 				err = irdma_uk_send(ukqp, &info, false);
3329 			break;
3330 		case IB_WR_RDMA_WRITE_WITH_IMM:
3331 			if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
3332 				info.imm_data_valid = true;
3333 				info.imm_data = ntohl(ib_wr->ex.imm_data);
3334 			} else {
3335 				err = -EINVAL;
3336 				break;
3337 			}
3338 			fallthrough;
3339 		case IB_WR_RDMA_WRITE:
3340 			if (ib_wr->send_flags & IB_SEND_SOLICITED)
3341 				info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
3342 			else
3343 				info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
3344 
3345 			info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
3346 			info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
3347 			info.op.rdma_write.rem_addr.addr =
3348 				rdma_wr(ib_wr)->remote_addr;
3349 			info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
3350 			if (ib_wr->send_flags & IB_SEND_INLINE)
3351 				err = irdma_uk_inline_rdma_write(ukqp, &info, false);
3352 			else
3353 				err = irdma_uk_rdma_write(ukqp, &info, false);
3354 			break;
3355 		case IB_WR_RDMA_READ_WITH_INV:
3356 			inv_stag = true;
3357 			fallthrough;
3358 		case IB_WR_RDMA_READ:
3359 			if (ib_wr->num_sge >
3360 			    dev->hw_attrs.uk_attrs.max_hw_read_sges) {
3361 				err = -EINVAL;
3362 				break;
3363 			}
3364 			info.op_type = IRDMA_OP_TYPE_RDMA_READ;
3365 			info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
3366 			info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
3367 			info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
3368 			info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
3369 			err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
3370 			break;
3371 		case IB_WR_LOCAL_INV:
3372 			info.op_type = IRDMA_OP_TYPE_INV_STAG;
3373 			info.local_fence = info.read_fence;
3374 			info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
3375 			err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
3376 			break;
3377 		case IB_WR_REG_MR: {
3378 			struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
3379 			struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
3380 			struct irdma_fast_reg_stag_info stag_info = {};
3381 
3382 			stag_info.signaled = info.signaled;
3383 			stag_info.read_fence = info.read_fence;
3384 			stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
3385 			stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
3386 			stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
3387 			stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
3388 			stag_info.wr_id = ib_wr->wr_id;
3389 			stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
3390 			stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
3391 			stag_info.total_len = iwmr->ibmr.length;
3392 			stag_info.reg_addr_pa = *palloc->level1.addr;
3393 			stag_info.first_pm_pbl_index = palloc->level1.idx;
3394 			stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
3395 			if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
3396 				stag_info.chunk_size = 1;
3397 			err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
3398 							true);
3399 			break;
3400 		}
3401 		default:
3402 			err = -EINVAL;
3403 			ibdev_dbg(&iwqp->iwdev->ibdev,
3404 				  "VERBS: upost_send bad opcode = 0x%x\n",
3405 				  ib_wr->opcode);
3406 			break;
3407 		}
3408 
3409 		if (err)
3410 			break;
3411 		ib_wr = ib_wr->next;
3412 	}
3413 
3414 	if (!iwqp->flush_issued) {
3415 		if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
3416 			irdma_uk_qp_post_wr(ukqp);
3417 		spin_unlock_irqrestore(&iwqp->lock, flags);
3418 	} else {
3419 		spin_unlock_irqrestore(&iwqp->lock, flags);
3420 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3421 				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
3422 	}
3423 	if (err)
3424 		*bad_wr = ib_wr;
3425 
3426 	return err;
3427 }
3428 
3429 /**
3430  * irdma_post_recv - post receive wr for kernel application
3431  * @ibqp: ib qp pointer
3432  * @ib_wr: work request for receive
3433  * @bad_wr: bad wr caused an error
3434  */
3435 static int irdma_post_recv(struct ib_qp *ibqp,
3436 			   const struct ib_recv_wr *ib_wr,
3437 			   const struct ib_recv_wr **bad_wr)
3438 {
3439 	struct irdma_qp *iwqp;
3440 	struct irdma_qp_uk *ukqp;
3441 	struct irdma_post_rq_info post_recv = {};
3442 	unsigned long flags;
3443 	int err = 0;
3444 
3445 	iwqp = to_iwqp(ibqp);
3446 	ukqp = &iwqp->sc_qp.qp_uk;
3447 
3448 	spin_lock_irqsave(&iwqp->lock, flags);
3449 	while (ib_wr) {
3450 		post_recv.num_sges = ib_wr->num_sge;
3451 		post_recv.wr_id = ib_wr->wr_id;
3452 		post_recv.sg_list = ib_wr->sg_list;
3453 		err = irdma_uk_post_receive(ukqp, &post_recv);
3454 		if (err) {
3455 			ibdev_dbg(&iwqp->iwdev->ibdev,
3456 				  "VERBS: post_recv err %d\n", err);
3457 			goto out;
3458 		}
3459 
3460 		ib_wr = ib_wr->next;
3461 	}
3462 
3463 out:
3464 	spin_unlock_irqrestore(&iwqp->lock, flags);
3465 	if (iwqp->flush_issued)
3466 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3467 				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
3468 
3469 	if (err)
3470 		*bad_wr = ib_wr;
3471 
3472 	return err;
3473 }
3474 
3475 /**
3476  * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
3477  * @opcode: iwarp flush code
3478  */
3479 static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
3480 {
3481 	switch (opcode) {
3482 	case FLUSH_PROT_ERR:
3483 		return IB_WC_LOC_PROT_ERR;
3484 	case FLUSH_REM_ACCESS_ERR:
3485 		return IB_WC_REM_ACCESS_ERR;
3486 	case FLUSH_LOC_QP_OP_ERR:
3487 		return IB_WC_LOC_QP_OP_ERR;
3488 	case FLUSH_REM_OP_ERR:
3489 		return IB_WC_REM_OP_ERR;
3490 	case FLUSH_LOC_LEN_ERR:
3491 		return IB_WC_LOC_LEN_ERR;
3492 	case FLUSH_GENERAL_ERR:
3493 		return IB_WC_WR_FLUSH_ERR;
3494 	case FLUSH_RETRY_EXC_ERR:
3495 		return IB_WC_RETRY_EXC_ERR;
3496 	case FLUSH_MW_BIND_ERR:
3497 		return IB_WC_MW_BIND_ERR;
3498 	case FLUSH_REM_INV_REQ_ERR:
3499 		return IB_WC_REM_INV_REQ_ERR;
3500 	case FLUSH_FATAL_ERR:
3501 	default:
3502 		return IB_WC_FATAL_ERR;
3503 	}
3504 }
3505 
3506 /**
3507  * irdma_process_cqe - process cqe info
3508  * @entry: processed cqe
3509  * @cq_poll_info: cqe info
3510  */
3511 static void irdma_process_cqe(struct ib_wc *entry,
3512 			      struct irdma_cq_poll_info *cq_poll_info)
3513 {
3514 	struct irdma_sc_qp *qp;
3515 
3516 	entry->wc_flags = 0;
3517 	entry->pkey_index = 0;
3518 	entry->wr_id = cq_poll_info->wr_id;
3519 
3520 	qp = cq_poll_info->qp_handle;
3521 	entry->qp = qp->qp_uk.back_qp;
3522 
3523 	if (cq_poll_info->error) {
3524 		entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
3525 				irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
3526 
3527 		entry->vendor_err = cq_poll_info->major_err << 16 |
3528 				    cq_poll_info->minor_err;
3529 	} else {
3530 		entry->status = IB_WC_SUCCESS;
3531 		if (cq_poll_info->imm_valid) {
3532 			entry->ex.imm_data = htonl(cq_poll_info->imm_data);
3533 			entry->wc_flags |= IB_WC_WITH_IMM;
3534 		}
3535 		if (cq_poll_info->ud_smac_valid) {
3536 			ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
3537 			entry->wc_flags |= IB_WC_WITH_SMAC;
3538 		}
3539 
3540 		if (cq_poll_info->ud_vlan_valid) {
3541 			u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
3542 
3543 			entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
3544 			if (vlan) {
3545 				entry->vlan_id = vlan;
3546 				entry->wc_flags |= IB_WC_WITH_VLAN;
3547 			}
3548 		} else {
3549 			entry->sl = 0;
3550 		}
3551 	}
3552 
3553 	if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
3554 		set_ib_wc_op_sq(cq_poll_info, entry);
3555 	} else {
3556 		set_ib_wc_op_rq(cq_poll_info, entry,
3557 				qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM);
3558 		if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
3559 		    cq_poll_info->stag_invalid_set) {
3560 			entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
3561 			entry->wc_flags |= IB_WC_WITH_INVALIDATE;
3562 		}
3563 	}
3564 
3565 	if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
3566 		entry->src_qp = cq_poll_info->ud_src_qpn;
3567 		entry->slid = 0;
3568 		entry->wc_flags |=
3569 			(IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
3570 		entry->network_hdr_type = cq_poll_info->ipv4 ?
3571 						  RDMA_NETWORK_IPV4 :
3572 						  RDMA_NETWORK_IPV6;
3573 	} else {
3574 		entry->src_qp = cq_poll_info->qp_id;
3575 	}
3576 
3577 	entry->byte_len = cq_poll_info->bytes_xfered;
3578 }
3579 
3580 /**
3581  * irdma_poll_one - poll one entry of the CQ
3582  * @ukcq: ukcq to poll
3583  * @cur_cqe: current CQE info to be filled in
3584  * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
3585  *
3586  * Returns the internal irdma device error code or 0 on success
3587  */
3588 static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
3589 				 struct irdma_cq_poll_info *cur_cqe,
3590 				 struct ib_wc *entry)
3591 {
3592 	int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
3593 
3594 	if (ret)
3595 		return ret;
3596 
3597 	irdma_process_cqe(entry, cur_cqe);
3598 
3599 	return 0;
3600 }
3601 
3602 /**
3603  * __irdma_poll_cq - poll cq for completion (kernel apps)
3604  * @iwcq: cq to poll
3605  * @num_entries: number of entries to poll
3606  * @entry: wr of a completed entry
3607  */
3608 static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
3609 {
3610 	struct list_head *tmp_node, *list_node;
3611 	struct irdma_cq_buf *last_buf = NULL;
3612 	struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
3613 	struct irdma_cq_buf *cq_buf;
3614 	int ret;
3615 	struct irdma_device *iwdev;
3616 	struct irdma_cq_uk *ukcq;
3617 	bool cq_new_cqe = false;
3618 	int resized_bufs = 0;
3619 	int npolled = 0;
3620 
3621 	iwdev = to_iwdev(iwcq->ibcq.device);
3622 	ukcq = &iwcq->sc_cq.cq_uk;
3623 
3624 	/* go through the list of previously resized CQ buffers */
3625 	list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
3626 		cq_buf = container_of(list_node, struct irdma_cq_buf, list);
3627 		while (npolled < num_entries) {
3628 			ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
3629 			if (!ret) {
3630 				++npolled;
3631 				cq_new_cqe = true;
3632 				continue;
3633 			}
3634 			if (ret == -ENOENT)
3635 				break;
3636 			 /* QP using the CQ is destroyed. Skip reporting this CQE */
3637 			if (ret == -EFAULT) {
3638 				cq_new_cqe = true;
3639 				continue;
3640 			}
3641 			goto error;
3642 		}
3643 
3644 		/* save the resized CQ buffer which received the last cqe */
3645 		if (cq_new_cqe)
3646 			last_buf = cq_buf;
3647 		cq_new_cqe = false;
3648 	}
3649 
3650 	/* check the current CQ for new cqes */
3651 	while (npolled < num_entries) {
3652 		ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
3653 		if (ret == -ENOENT) {
3654 			ret = irdma_generated_cmpls(iwcq, cur_cqe);
3655 			if (!ret)
3656 				irdma_process_cqe(entry + npolled, cur_cqe);
3657 		}
3658 		if (!ret) {
3659 			++npolled;
3660 			cq_new_cqe = true;
3661 			continue;
3662 		}
3663 
3664 		if (ret == -ENOENT)
3665 			break;
3666 		/* QP using the CQ is destroyed. Skip reporting this CQE */
3667 		if (ret == -EFAULT) {
3668 			cq_new_cqe = true;
3669 			continue;
3670 		}
3671 		goto error;
3672 	}
3673 
3674 	if (cq_new_cqe)
3675 		/* all previous CQ resizes are complete */
3676 		resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
3677 	else if (last_buf)
3678 		/* only CQ resizes up to the last_buf are complete */
3679 		resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
3680 	if (resized_bufs)
3681 		/* report to the HW the number of complete CQ resizes */
3682 		irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
3683 
3684 	return npolled;
3685 error:
3686 	ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
3687 		  __func__, ret);
3688 
3689 	return ret;
3690 }
3691 
3692 /**
3693  * irdma_poll_cq - poll cq for completion (kernel apps)
3694  * @ibcq: cq to poll
3695  * @num_entries: number of entries to poll
3696  * @entry: wr of a completed entry
3697  */
3698 static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
3699 			 struct ib_wc *entry)
3700 {
3701 	struct irdma_cq *iwcq;
3702 	unsigned long flags;
3703 	int ret;
3704 
3705 	iwcq = to_iwcq(ibcq);
3706 
3707 	spin_lock_irqsave(&iwcq->lock, flags);
3708 	ret = __irdma_poll_cq(iwcq, num_entries, entry);
3709 	spin_unlock_irqrestore(&iwcq->lock, flags);
3710 
3711 	return ret;
3712 }
3713 
3714 /**
3715  * irdma_req_notify_cq - arm cq kernel application
3716  * @ibcq: cq to arm
3717  * @notify_flags: notofication flags
3718  */
3719 static int irdma_req_notify_cq(struct ib_cq *ibcq,
3720 			       enum ib_cq_notify_flags notify_flags)
3721 {
3722 	struct irdma_cq *iwcq;
3723 	struct irdma_cq_uk *ukcq;
3724 	unsigned long flags;
3725 	enum irdma_cmpl_notify cq_notify;
3726 	bool promo_event = false;
3727 	int ret = 0;
3728 
3729 	cq_notify = notify_flags == IB_CQ_SOLICITED ?
3730 		    IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
3731 	iwcq = to_iwcq(ibcq);
3732 	ukcq = &iwcq->sc_cq.cq_uk;
3733 
3734 	spin_lock_irqsave(&iwcq->lock, flags);
3735 	/* Only promote to arm the CQ for any event if the last arm event was solicited. */
3736 	if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
3737 		promo_event = true;
3738 
3739 	if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
3740 		iwcq->last_notify = cq_notify;
3741 		irdma_uk_cq_request_notification(ukcq, cq_notify);
3742 	}
3743 
3744 	if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3745 	    (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
3746 		ret = 1;
3747 	spin_unlock_irqrestore(&iwcq->lock, flags);
3748 
3749 	return ret;
3750 }
3751 
3752 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
3753 				     struct ib_port_immutable *immutable)
3754 {
3755 	struct ib_port_attr attr;
3756 	int err;
3757 
3758 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3759 	err = ib_query_port(ibdev, port_num, &attr);
3760 	if (err)
3761 		return err;
3762 
3763 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3764 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
3765 	immutable->gid_tbl_len = attr.gid_tbl_len;
3766 
3767 	return 0;
3768 }
3769 
3770 static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
3771 				   struct ib_port_immutable *immutable)
3772 {
3773 	struct ib_port_attr attr;
3774 	int err;
3775 
3776 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
3777 	err = ib_query_port(ibdev, port_num, &attr);
3778 	if (err)
3779 		return err;
3780 	immutable->gid_tbl_len = attr.gid_tbl_len;
3781 
3782 	return 0;
3783 }
3784 
3785 static const struct rdma_stat_desc irdma_hw_stat_names[] = {
3786 	/* gen1 - 32-bit */
3787 	[IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name		= "ip4InDiscards",
3788 	[IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name		= "ip4InTruncatedPkts",
3789 	[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name		= "ip4OutNoRoutes",
3790 	[IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name		= "ip6InDiscards",
3791 	[IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name		= "ip6InTruncatedPkts",
3792 	[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name		= "ip6OutNoRoutes",
3793 	[IRDMA_HW_STAT_INDEX_TCPRTXSEG].name		= "tcpRetransSegs",
3794 	[IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name		= "tcpInOptErrors",
3795 	[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name	= "tcpInProtoErrors",
3796 	[IRDMA_HW_STAT_INDEX_RXVLANERR].name		= "rxVlanErrors",
3797 	/* gen1 - 64-bit */
3798 	[IRDMA_HW_STAT_INDEX_IP4RXOCTS].name		= "ip4InOctets",
3799 	[IRDMA_HW_STAT_INDEX_IP4RXPKTS].name		= "ip4InPkts",
3800 	[IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name		= "ip4InReasmRqd",
3801 	[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name		= "ip4InMcastPkts",
3802 	[IRDMA_HW_STAT_INDEX_IP4TXOCTS].name		= "ip4OutOctets",
3803 	[IRDMA_HW_STAT_INDEX_IP4TXPKTS].name		= "ip4OutPkts",
3804 	[IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name		= "ip4OutSegRqd",
3805 	[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name		= "ip4OutMcastPkts",
3806 	[IRDMA_HW_STAT_INDEX_IP6RXOCTS].name		= "ip6InOctets",
3807 	[IRDMA_HW_STAT_INDEX_IP6RXPKTS].name		= "ip6InPkts",
3808 	[IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name		= "ip6InReasmRqd",
3809 	[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name		= "ip6InMcastPkts",
3810 	[IRDMA_HW_STAT_INDEX_IP6TXOCTS].name		= "ip6OutOctets",
3811 	[IRDMA_HW_STAT_INDEX_IP6TXPKTS].name		= "ip6OutPkts",
3812 	[IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name		= "ip6OutSegRqd",
3813 	[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name		= "ip6OutMcastPkts",
3814 	[IRDMA_HW_STAT_INDEX_TCPRXSEGS].name		= "tcpInSegs",
3815 	[IRDMA_HW_STAT_INDEX_TCPTXSEG].name		= "tcpOutSegs",
3816 	[IRDMA_HW_STAT_INDEX_RDMARXRDS].name		= "iwInRdmaReads",
3817 	[IRDMA_HW_STAT_INDEX_RDMARXSNDS].name		= "iwInRdmaSends",
3818 	[IRDMA_HW_STAT_INDEX_RDMARXWRS].name		= "iwInRdmaWrites",
3819 	[IRDMA_HW_STAT_INDEX_RDMATXRDS].name		= "iwOutRdmaReads",
3820 	[IRDMA_HW_STAT_INDEX_RDMATXSNDS].name		= "iwOutRdmaSends",
3821 	[IRDMA_HW_STAT_INDEX_RDMATXWRS].name		= "iwOutRdmaWrites",
3822 	[IRDMA_HW_STAT_INDEX_RDMAVBND].name		= "iwRdmaBnd",
3823 	[IRDMA_HW_STAT_INDEX_RDMAVINV].name		= "iwRdmaInv",
3824 
3825 	/* gen2 - 32-bit */
3826 	[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name	= "cnpHandled",
3827 	[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name	= "cnpIgnored",
3828 	[IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name		= "cnpSent",
3829 	/* gen2 - 64-bit */
3830 	[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name		= "ip4InMcastOctets",
3831 	[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name		= "ip4OutMcastOctets",
3832 	[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name		= "ip6InMcastOctets",
3833 	[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name		= "ip6OutMcastOctets",
3834 	[IRDMA_HW_STAT_INDEX_UDPRXPKTS].name		= "RxUDP",
3835 	[IRDMA_HW_STAT_INDEX_UDPTXPKTS].name		= "TxUDP",
3836 	[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name	= "RxECNMrkd",
3837 
3838 };
3839 
3840 static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
3841 {
3842 	struct irdma_device *iwdev = to_iwdev(dev);
3843 
3844 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
3845 		 irdma_fw_major_ver(&iwdev->rf->sc_dev),
3846 		 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
3847 }
3848 
3849 /**
3850  * irdma_alloc_hw_port_stats - Allocate a hw stats structure
3851  * @ibdev: device pointer from stack
3852  * @port_num: port number
3853  */
3854 static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
3855 						       u32 port_num)
3856 {
3857 	struct irdma_device *iwdev = to_iwdev(ibdev);
3858 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
3859 
3860 	int num_counters = dev->hw_attrs.max_stat_idx;
3861 	unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
3862 
3863 	return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
3864 					  lifespan);
3865 }
3866 
3867 /**
3868  * irdma_get_hw_stats - Populates the rdma_hw_stats structure
3869  * @ibdev: device pointer from stack
3870  * @stats: stats pointer from stack
3871  * @port_num: port number
3872  * @index: which hw counter the stack is requesting we update
3873  */
3874 static int irdma_get_hw_stats(struct ib_device *ibdev,
3875 			      struct rdma_hw_stats *stats, u32 port_num,
3876 			      int index)
3877 {
3878 	struct irdma_device *iwdev = to_iwdev(ibdev);
3879 	struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
3880 
3881 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
3882 		irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
3883 	else
3884 		irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
3885 
3886 	memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters);
3887 
3888 	return stats->num_counters;
3889 }
3890 
3891 /**
3892  * irdma_query_gid - Query port GID
3893  * @ibdev: device pointer from stack
3894  * @port: port number
3895  * @index: Entry index
3896  * @gid: Global ID
3897  */
3898 static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
3899 			   union ib_gid *gid)
3900 {
3901 	struct irdma_device *iwdev = to_iwdev(ibdev);
3902 
3903 	memset(gid->raw, 0, sizeof(gid->raw));
3904 	ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
3905 
3906 	return 0;
3907 }
3908 
3909 /**
3910  * mcast_list_add -  Add a new mcast item to list
3911  * @rf: RDMA PCI function
3912  * @new_elem: pointer to element to add
3913  */
3914 static void mcast_list_add(struct irdma_pci_f *rf,
3915 			   struct mc_table_list *new_elem)
3916 {
3917 	list_add(&new_elem->list, &rf->mc_qht_list.list);
3918 }
3919 
3920 /**
3921  * mcast_list_del - Remove an mcast item from list
3922  * @mc_qht_elem: pointer to mcast table list element
3923  */
3924 static void mcast_list_del(struct mc_table_list *mc_qht_elem)
3925 {
3926 	if (mc_qht_elem)
3927 		list_del(&mc_qht_elem->list);
3928 }
3929 
3930 /**
3931  * mcast_list_lookup_ip - Search mcast list for address
3932  * @rf: RDMA PCI function
3933  * @ip_mcast: pointer to mcast IP address
3934  */
3935 static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
3936 						  u32 *ip_mcast)
3937 {
3938 	struct mc_table_list *mc_qht_el;
3939 	struct list_head *pos, *q;
3940 
3941 	list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
3942 		mc_qht_el = list_entry(pos, struct mc_table_list, list);
3943 		if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
3944 			    sizeof(mc_qht_el->mc_info.dest_ip)))
3945 			return mc_qht_el;
3946 	}
3947 
3948 	return NULL;
3949 }
3950 
3951 /**
3952  * irdma_mcast_cqp_op - perform a mcast cqp operation
3953  * @iwdev: irdma device
3954  * @mc_grp_ctx: mcast group info
3955  * @op: operation
3956  *
3957  * returns error status
3958  */
3959 static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
3960 			      struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
3961 {
3962 	struct cqp_cmds_info *cqp_info;
3963 	struct irdma_cqp_request *cqp_request;
3964 	int status;
3965 
3966 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3967 	if (!cqp_request)
3968 		return -ENOMEM;
3969 
3970 	cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
3971 	cqp_info = &cqp_request->info;
3972 	cqp_info->cqp_cmd = op;
3973 	cqp_info->post_sq = 1;
3974 	cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
3975 	cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
3976 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3977 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3978 
3979 	return status;
3980 }
3981 
3982 /**
3983  * irdma_mcast_mac - Get the multicast MAC for an IP address
3984  * @ip_addr: IPv4 or IPv6 address
3985  * @mac: pointer to result MAC address
3986  * @ipv4: flag indicating IPv4 or IPv6
3987  *
3988  */
3989 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
3990 {
3991 	u8 *ip = (u8 *)ip_addr;
3992 
3993 	if (ipv4) {
3994 		unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
3995 						0x00, 0x00};
3996 
3997 		mac4[3] = ip[2] & 0x7F;
3998 		mac4[4] = ip[1];
3999 		mac4[5] = ip[0];
4000 		ether_addr_copy(mac, mac4);
4001 	} else {
4002 		unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
4003 						0x00, 0x00};
4004 
4005 		mac6[2] = ip[3];
4006 		mac6[3] = ip[2];
4007 		mac6[4] = ip[1];
4008 		mac6[5] = ip[0];
4009 		ether_addr_copy(mac, mac6);
4010 	}
4011 }
4012 
4013 /**
4014  * irdma_attach_mcast - attach a qp to a multicast group
4015  * @ibqp: ptr to qp
4016  * @ibgid: pointer to global ID
4017  * @lid: local ID
4018  *
4019  * returns error status
4020  */
4021 static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4022 {
4023 	struct irdma_qp *iwqp = to_iwqp(ibqp);
4024 	struct irdma_device *iwdev = iwqp->iwdev;
4025 	struct irdma_pci_f *rf = iwdev->rf;
4026 	struct mc_table_list *mc_qht_elem;
4027 	struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4028 	unsigned long flags;
4029 	u32 ip_addr[4] = {};
4030 	u32 mgn;
4031 	u32 no_mgs;
4032 	int ret = 0;
4033 	bool ipv4;
4034 	u16 vlan_id;
4035 	union irdma_sockaddr sgid_addr;
4036 	unsigned char dmac[ETH_ALEN];
4037 
4038 	rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4039 
4040 	if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
4041 		irdma_copy_ip_ntohl(ip_addr,
4042 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4043 		irdma_get_vlan_mac_ipv6(ip_addr, &vlan_id, NULL);
4044 		ipv4 = false;
4045 		ibdev_dbg(&iwdev->ibdev,
4046 			  "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
4047 			  ip_addr);
4048 		irdma_mcast_mac(ip_addr, dmac, false);
4049 	} else {
4050 		ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4051 		ipv4 = true;
4052 		vlan_id = irdma_get_vlan_ipv4(ip_addr);
4053 		irdma_mcast_mac(ip_addr, dmac, true);
4054 		ibdev_dbg(&iwdev->ibdev,
4055 			  "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
4056 			  ibqp->qp_num, ip_addr, dmac);
4057 	}
4058 
4059 	spin_lock_irqsave(&rf->qh_list_lock, flags);
4060 	mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4061 	if (!mc_qht_elem) {
4062 		struct irdma_dma_mem *dma_mem_mc;
4063 
4064 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4065 		mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
4066 		if (!mc_qht_elem)
4067 			return -ENOMEM;
4068 
4069 		mc_qht_elem->mc_info.ipv4_valid = ipv4;
4070 		memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
4071 		       sizeof(mc_qht_elem->mc_info.dest_ip));
4072 		ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
4073 				       &mgn, &rf->next_mcg);
4074 		if (ret) {
4075 			kfree(mc_qht_elem);
4076 			return -ENOMEM;
4077 		}
4078 
4079 		mc_qht_elem->mc_info.mgn = mgn;
4080 		dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
4081 		dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
4082 					 IRDMA_HW_PAGE_SIZE);
4083 		dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
4084 						    dma_mem_mc->size,
4085 						    &dma_mem_mc->pa,
4086 						    GFP_KERNEL);
4087 		if (!dma_mem_mc->va) {
4088 			irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
4089 			kfree(mc_qht_elem);
4090 			return -ENOMEM;
4091 		}
4092 
4093 		mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
4094 		memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
4095 		       sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
4096 		mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
4097 		mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
4098 		if (vlan_id < VLAN_N_VID)
4099 			mc_qht_elem->mc_grp_ctx.vlan_valid = true;
4100 		mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
4101 		mc_qht_elem->mc_grp_ctx.qs_handle =
4102 			iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
4103 		ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
4104 
4105 		spin_lock_irqsave(&rf->qh_list_lock, flags);
4106 		mcast_list_add(rf, mc_qht_elem);
4107 	} else {
4108 		if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
4109 		    IRDMA_MAX_MGS_PER_CTX) {
4110 			spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4111 			return -ENOMEM;
4112 		}
4113 	}
4114 
4115 	mcg_info.qp_id = iwqp->ibqp.qp_num;
4116 	no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
4117 	irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4118 	spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4119 
4120 	/* Only if there is a change do we need to modify or create */
4121 	if (!no_mgs) {
4122 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4123 					 IRDMA_OP_MC_CREATE);
4124 	} else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4125 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4126 					 IRDMA_OP_MC_MODIFY);
4127 	} else {
4128 		return 0;
4129 	}
4130 
4131 	if (ret)
4132 		goto error;
4133 
4134 	return 0;
4135 
4136 error:
4137 	irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4138 	if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4139 		mcast_list_del(mc_qht_elem);
4140 		dma_free_coherent(rf->hw.device,
4141 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4142 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4143 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4144 		mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4145 		irdma_free_rsrc(rf, rf->allocated_mcgs,
4146 				mc_qht_elem->mc_grp_ctx.mg_id);
4147 		kfree(mc_qht_elem);
4148 	}
4149 
4150 	return ret;
4151 }
4152 
4153 /**
4154  * irdma_detach_mcast - detach a qp from a multicast group
4155  * @ibqp: ptr to qp
4156  * @ibgid: pointer to global ID
4157  * @lid: local ID
4158  *
4159  * returns error status
4160  */
4161 static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4162 {
4163 	struct irdma_qp *iwqp = to_iwqp(ibqp);
4164 	struct irdma_device *iwdev = iwqp->iwdev;
4165 	struct irdma_pci_f *rf = iwdev->rf;
4166 	u32 ip_addr[4] = {};
4167 	struct mc_table_list *mc_qht_elem;
4168 	struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4169 	int ret;
4170 	unsigned long flags;
4171 	union irdma_sockaddr sgid_addr;
4172 
4173 	rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4174 	if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
4175 		irdma_copy_ip_ntohl(ip_addr,
4176 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4177 	else
4178 		ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4179 
4180 	spin_lock_irqsave(&rf->qh_list_lock, flags);
4181 	mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4182 	if (!mc_qht_elem) {
4183 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4184 		ibdev_dbg(&iwdev->ibdev,
4185 			  "VERBS: address not found MCG\n");
4186 		return 0;
4187 	}
4188 
4189 	mcg_info.qp_id = iwqp->ibqp.qp_num;
4190 	irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4191 	if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4192 		mcast_list_del(mc_qht_elem);
4193 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4194 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4195 					 IRDMA_OP_MC_DESTROY);
4196 		if (ret) {
4197 			ibdev_dbg(&iwdev->ibdev,
4198 				  "VERBS: failed MC_DESTROY MCG\n");
4199 			spin_lock_irqsave(&rf->qh_list_lock, flags);
4200 			mcast_list_add(rf, mc_qht_elem);
4201 			spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4202 			return -EAGAIN;
4203 		}
4204 
4205 		dma_free_coherent(rf->hw.device,
4206 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4207 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4208 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4209 		mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4210 		irdma_free_rsrc(rf, rf->allocated_mcgs,
4211 				mc_qht_elem->mc_grp_ctx.mg_id);
4212 		kfree(mc_qht_elem);
4213 	} else {
4214 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4215 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4216 					 IRDMA_OP_MC_MODIFY);
4217 		if (ret) {
4218 			ibdev_dbg(&iwdev->ibdev,
4219 				  "VERBS: failed Modify MCG\n");
4220 			return ret;
4221 		}
4222 	}
4223 
4224 	return 0;
4225 }
4226 
4227 static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep)
4228 {
4229 	struct irdma_pci_f *rf = iwdev->rf;
4230 	int err;
4231 
4232 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx,
4233 			       &rf->next_ah);
4234 	if (err)
4235 		return err;
4236 
4237 	err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep,
4238 			      irdma_gsi_ud_qp_ah_cb, &ah->sc_ah);
4239 
4240 	if (err) {
4241 		ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail");
4242 		goto err_ah_create;
4243 	}
4244 
4245 	if (!sleep) {
4246 		int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
4247 
4248 		do {
4249 			irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
4250 			mdelay(1);
4251 		} while (!ah->sc_ah.ah_info.ah_valid && --cnt);
4252 
4253 		if (!cnt) {
4254 			ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out");
4255 			err = -ETIMEDOUT;
4256 			goto err_ah_create;
4257 		}
4258 	}
4259 	return 0;
4260 
4261 err_ah_create:
4262 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx);
4263 
4264 	return err;
4265 }
4266 
4267 static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
4268 {
4269 	struct irdma_pd *pd = to_iwpd(ibah->pd);
4270 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4271 	struct rdma_ah_attr *ah_attr = attr->ah_attr;
4272 	const struct ib_gid_attr *sgid_attr;
4273 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4274 	struct irdma_pci_f *rf = iwdev->rf;
4275 	struct irdma_sc_ah *sc_ah;
4276 	struct irdma_ah_info *ah_info;
4277 	union irdma_sockaddr sgid_addr, dgid_addr;
4278 	int err;
4279 	u8 dmac[ETH_ALEN];
4280 
4281 	ah->pd = pd;
4282 	sc_ah = &ah->sc_ah;
4283 	sc_ah->ah_info.vsi = &iwdev->vsi;
4284 	irdma_sc_init_ah(&rf->sc_dev, sc_ah);
4285 	ah->sgid_index = ah_attr->grh.sgid_index;
4286 	sgid_attr = ah_attr->grh.sgid_attr;
4287 	memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
4288 	rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
4289 	rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
4290 	ah->av.attrs = *ah_attr;
4291 	ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
4292 	ah_info = &sc_ah->ah_info;
4293 	ah_info->pd_idx = pd->sc_pd.pd_id;
4294 	if (ah_attr->ah_flags & IB_AH_GRH) {
4295 		ah_info->flow_label = ah_attr->grh.flow_label;
4296 		ah_info->hop_ttl = ah_attr->grh.hop_limit;
4297 		ah_info->tc_tos = ah_attr->grh.traffic_class;
4298 	}
4299 
4300 	ether_addr_copy(dmac, ah_attr->roce.dmac);
4301 	if (ah->av.net_type == RDMA_NETWORK_IPV4) {
4302 		ah_info->ipv4_valid = true;
4303 		ah_info->dest_ip_addr[0] =
4304 			ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
4305 		ah_info->src_ip_addr[0] =
4306 			ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4307 		ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
4308 						     ah_info->dest_ip_addr[0]);
4309 		if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
4310 			ah_info->do_lpbk = true;
4311 			irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
4312 		}
4313 	} else {
4314 		irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
4315 				    dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4316 		irdma_copy_ip_ntohl(ah_info->src_ip_addr,
4317 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4318 		ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
4319 						     ah_info->dest_ip_addr);
4320 		if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
4321 			ah_info->do_lpbk = true;
4322 			irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
4323 		}
4324 	}
4325 
4326 	err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
4327 				      ah_info->mac_addr);
4328 	if (err)
4329 		return err;
4330 
4331 	ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
4332 					      ah_info->ipv4_valid, dmac);
4333 
4334 	if (ah_info->dst_arpindex == -1)
4335 		return -EINVAL;
4336 
4337 	if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
4338 		ah_info->vlan_tag = 0;
4339 
4340 	if (ah_info->vlan_tag < VLAN_N_VID) {
4341 		u8 prio = rt_tos2priority(ah_info->tc_tos);
4342 
4343 		prio = irdma_roce_get_vlan_prio(sgid_attr, prio);
4344 
4345 		ah_info->vlan_tag |= (u16)prio << VLAN_PRIO_SHIFT;
4346 		ah_info->insert_vlan_tag = true;
4347 	}
4348 
4349 	return 0;
4350 }
4351 
4352 /**
4353  * irdma_ah_exists - Check for existing identical AH
4354  * @iwdev: irdma device
4355  * @new_ah: AH to check for
4356  *
4357  * returns true if AH is found, false if not found.
4358  */
4359 static bool irdma_ah_exists(struct irdma_device *iwdev,
4360 			    struct irdma_ah *new_ah)
4361 {
4362 	struct irdma_ah *ah;
4363 	u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^
4364 		  new_ah->sc_ah.ah_info.dest_ip_addr[1] ^
4365 		  new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
4366 		  new_ah->sc_ah.ah_info.dest_ip_addr[3];
4367 
4368 	hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) {
4369 		/* Set ah_valid and ah_id the same so memcmp can work */
4370 		new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
4371 		new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
4372 		if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info,
4373 			    sizeof(ah->sc_ah.ah_info))) {
4374 			refcount_inc(&ah->refcnt);
4375 			new_ah->parent_ah = ah;
4376 			return true;
4377 		}
4378 	}
4379 
4380 	return false;
4381 }
4382 
4383 /**
4384  * irdma_destroy_ah - Destroy address handle
4385  * @ibah: pointer to address handle
4386  * @ah_flags: flags for sleepable
4387  */
4388 static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
4389 {
4390 	struct irdma_device *iwdev = to_iwdev(ibah->device);
4391 	struct irdma_ah *ah = to_iwah(ibah);
4392 
4393 	if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
4394 		mutex_lock(&iwdev->ah_tbl_lock);
4395 		if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
4396 			mutex_unlock(&iwdev->ah_tbl_lock);
4397 			return 0;
4398 		}
4399 		hash_del(&ah->parent_ah->list);
4400 		kfree(ah->parent_ah);
4401 		mutex_unlock(&iwdev->ah_tbl_lock);
4402 	}
4403 
4404 	irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
4405 			false, NULL, ah);
4406 
4407 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
4408 			ah->sc_ah.ah_info.ah_idx);
4409 
4410 	return 0;
4411 }
4412 
4413 /**
4414  * irdma_create_user_ah - create user address handle
4415  * @ibah: address handle
4416  * @attr: address handle attributes
4417  * @udata: User data
4418  *
4419  * returns 0 on success, error otherwise
4420  */
4421 static int irdma_create_user_ah(struct ib_ah *ibah,
4422 				struct rdma_ah_init_attr *attr,
4423 				struct ib_udata *udata)
4424 {
4425 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
4426 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4427 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4428 	struct irdma_create_ah_resp uresp;
4429 	struct irdma_ah *parent_ah;
4430 	int err;
4431 
4432 	if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
4433 		return -EINVAL;
4434 
4435 	err = irdma_setup_ah(ibah, attr);
4436 	if (err)
4437 		return err;
4438 	mutex_lock(&iwdev->ah_tbl_lock);
4439 	if (!irdma_ah_exists(iwdev, ah)) {
4440 		err = irdma_create_hw_ah(iwdev, ah, true);
4441 		if (err) {
4442 			mutex_unlock(&iwdev->ah_tbl_lock);
4443 			return err;
4444 		}
4445 		/* Add new AH to list */
4446 		parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL);
4447 		if (parent_ah) {
4448 			u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^
4449 				  parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^
4450 				  parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^
4451 				  parent_ah->sc_ah.ah_info.dest_ip_addr[3];
4452 
4453 			ah->parent_ah = parent_ah;
4454 			hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key);
4455 			refcount_set(&parent_ah->refcnt, 1);
4456 		}
4457 	}
4458 	mutex_unlock(&iwdev->ah_tbl_lock);
4459 
4460 	uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
4461 	err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
4462 	if (err)
4463 		irdma_destroy_ah(ibah, attr->flags);
4464 
4465 	return err;
4466 }
4467 
4468 /**
4469  * irdma_create_ah - create address handle
4470  * @ibah: address handle
4471  * @attr: address handle attributes
4472  * @udata: NULL
4473  *
4474  * returns 0 on success, error otherwise
4475  */
4476 static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
4477 			   struct ib_udata *udata)
4478 {
4479 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4480 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4481 	int err;
4482 
4483 	err = irdma_setup_ah(ibah, attr);
4484 	if (err)
4485 		return err;
4486 	err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE);
4487 
4488 	return err;
4489 }
4490 
4491 /**
4492  * irdma_query_ah - Query address handle
4493  * @ibah: pointer to address handle
4494  * @ah_attr: address handle attributes
4495  */
4496 static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
4497 {
4498 	struct irdma_ah *ah = to_iwah(ibah);
4499 
4500 	memset(ah_attr, 0, sizeof(*ah_attr));
4501 	if (ah->av.attrs.ah_flags & IB_AH_GRH) {
4502 		ah_attr->ah_flags = IB_AH_GRH;
4503 		ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
4504 		ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
4505 		ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
4506 		ah_attr->grh.sgid_index = ah->sgid_index;
4507 		memcpy(&ah_attr->grh.dgid, &ah->dgid,
4508 		       sizeof(ah_attr->grh.dgid));
4509 	}
4510 
4511 	return 0;
4512 }
4513 
4514 static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
4515 						 u32 port_num)
4516 {
4517 	return IB_LINK_LAYER_ETHERNET;
4518 }
4519 
4520 static const struct ib_device_ops irdma_roce_dev_ops = {
4521 	.attach_mcast = irdma_attach_mcast,
4522 	.create_ah = irdma_create_ah,
4523 	.create_user_ah = irdma_create_user_ah,
4524 	.destroy_ah = irdma_destroy_ah,
4525 	.detach_mcast = irdma_detach_mcast,
4526 	.get_link_layer = irdma_get_link_layer,
4527 	.get_port_immutable = irdma_roce_port_immutable,
4528 	.modify_qp = irdma_modify_qp_roce,
4529 	.query_ah = irdma_query_ah,
4530 	.query_pkey = irdma_query_pkey,
4531 };
4532 
4533 static const struct ib_device_ops irdma_iw_dev_ops = {
4534 	.get_port_immutable = irdma_iw_port_immutable,
4535 	.iw_accept = irdma_accept,
4536 	.iw_add_ref = irdma_qp_add_ref,
4537 	.iw_connect = irdma_connect,
4538 	.iw_create_listen = irdma_create_listen,
4539 	.iw_destroy_listen = irdma_destroy_listen,
4540 	.iw_get_qp = irdma_get_qp,
4541 	.iw_reject = irdma_reject,
4542 	.iw_rem_ref = irdma_qp_rem_ref,
4543 	.modify_qp = irdma_modify_qp,
4544 	.query_gid = irdma_query_gid,
4545 };
4546 
4547 static const struct ib_device_ops irdma_dev_ops = {
4548 	.owner = THIS_MODULE,
4549 	.driver_id = RDMA_DRIVER_IRDMA,
4550 	.uverbs_abi_ver = IRDMA_ABI_VER,
4551 
4552 	.alloc_hw_port_stats = irdma_alloc_hw_port_stats,
4553 	.alloc_mr = irdma_alloc_mr,
4554 	.alloc_mw = irdma_alloc_mw,
4555 	.alloc_pd = irdma_alloc_pd,
4556 	.alloc_ucontext = irdma_alloc_ucontext,
4557 	.create_cq = irdma_create_cq,
4558 	.create_qp = irdma_create_qp,
4559 	.dealloc_driver = irdma_ib_dealloc_device,
4560 	.dealloc_mw = irdma_dealloc_mw,
4561 	.dealloc_pd = irdma_dealloc_pd,
4562 	.dealloc_ucontext = irdma_dealloc_ucontext,
4563 	.dereg_mr = irdma_dereg_mr,
4564 	.destroy_cq = irdma_destroy_cq,
4565 	.destroy_qp = irdma_destroy_qp,
4566 	.disassociate_ucontext = irdma_disassociate_ucontext,
4567 	.get_dev_fw_str = irdma_get_dev_fw_str,
4568 	.get_dma_mr = irdma_get_dma_mr,
4569 	.get_hw_stats = irdma_get_hw_stats,
4570 	.map_mr_sg = irdma_map_mr_sg,
4571 	.mmap = irdma_mmap,
4572 	.mmap_free = irdma_mmap_free,
4573 	.poll_cq = irdma_poll_cq,
4574 	.post_recv = irdma_post_recv,
4575 	.post_send = irdma_post_send,
4576 	.query_device = irdma_query_device,
4577 	.query_port = irdma_query_port,
4578 	.query_qp = irdma_query_qp,
4579 	.reg_user_mr = irdma_reg_user_mr,
4580 	.reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf,
4581 	.req_notify_cq = irdma_req_notify_cq,
4582 	.resize_cq = irdma_resize_cq,
4583 	INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
4584 	INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
4585 	INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
4586 	INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
4587 	INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
4588 	INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
4589 };
4590 
4591 /**
4592  * irdma_init_roce_device - initialization of roce rdma device
4593  * @iwdev: irdma device
4594  */
4595 static void irdma_init_roce_device(struct irdma_device *iwdev)
4596 {
4597 	iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
4598 	addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4599 			    iwdev->netdev->dev_addr);
4600 	ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
4601 }
4602 
4603 /**
4604  * irdma_init_iw_device - initialization of iwarp rdma device
4605  * @iwdev: irdma device
4606  */
4607 static void irdma_init_iw_device(struct irdma_device *iwdev)
4608 {
4609 	struct net_device *netdev = iwdev->netdev;
4610 
4611 	iwdev->ibdev.node_type = RDMA_NODE_RNIC;
4612 	addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4613 			    netdev->dev_addr);
4614 	memcpy(iwdev->ibdev.iw_ifname, netdev->name,
4615 	       sizeof(iwdev->ibdev.iw_ifname));
4616 	ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
4617 }
4618 
4619 /**
4620  * irdma_init_rdma_device - initialization of rdma device
4621  * @iwdev: irdma device
4622  */
4623 static void irdma_init_rdma_device(struct irdma_device *iwdev)
4624 {
4625 	struct pci_dev *pcidev = iwdev->rf->pcidev;
4626 
4627 	if (iwdev->roce_mode)
4628 		irdma_init_roce_device(iwdev);
4629 	else
4630 		irdma_init_iw_device(iwdev);
4631 
4632 	iwdev->ibdev.phys_port_cnt = 1;
4633 	iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
4634 	iwdev->ibdev.dev.parent = &pcidev->dev;
4635 	ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
4636 }
4637 
4638 /**
4639  * irdma_port_ibevent - indicate port event
4640  * @iwdev: irdma device
4641  */
4642 void irdma_port_ibevent(struct irdma_device *iwdev)
4643 {
4644 	struct ib_event event;
4645 
4646 	event.device = &iwdev->ibdev;
4647 	event.element.port_num = 1;
4648 	event.event =
4649 		iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4650 	ib_dispatch_event(&event);
4651 }
4652 
4653 /**
4654  * irdma_ib_unregister_device - unregister rdma device from IB
4655  * core
4656  * @iwdev: irdma device
4657  */
4658 void irdma_ib_unregister_device(struct irdma_device *iwdev)
4659 {
4660 	iwdev->iw_status = 0;
4661 	irdma_port_ibevent(iwdev);
4662 	ib_unregister_device(&iwdev->ibdev);
4663 }
4664 
4665 /**
4666  * irdma_ib_register_device - register irdma device to IB core
4667  * @iwdev: irdma device
4668  */
4669 int irdma_ib_register_device(struct irdma_device *iwdev)
4670 {
4671 	int ret;
4672 
4673 	irdma_init_rdma_device(iwdev);
4674 
4675 	ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
4676 	if (ret)
4677 		goto error;
4678 	dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
4679 	ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
4680 	if (ret)
4681 		goto error;
4682 
4683 	iwdev->iw_status = 1;
4684 	irdma_port_ibevent(iwdev);
4685 
4686 	return 0;
4687 
4688 error:
4689 	if (ret)
4690 		ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
4691 
4692 	return ret;
4693 }
4694 
4695 /**
4696  * irdma_ib_dealloc_device
4697  * @ibdev: ib device
4698  *
4699  * callback from ibdev dealloc_driver to deallocate resources
4700  * unber irdma device
4701  */
4702 void irdma_ib_dealloc_device(struct ib_device *ibdev)
4703 {
4704 	struct irdma_device *iwdev = to_iwdev(ibdev);
4705 
4706 	irdma_rt_deinit_hw(iwdev);
4707 	irdma_ctrl_deinit_hw(iwdev->rf);
4708 	kfree(iwdev->rf);
4709 }
4710