xref: /freebsd/sys/dev/irdma/irdma_utils.c (revision e3aa18ad)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2021 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "irdma_main.h"
37 
38 LIST_HEAD(irdma_handlers);
39 DEFINE_SPINLOCK(irdma_handler_lock);
40 
41 /**
42  * irdma_arp_table -manage arp table
43  * @rf: RDMA PCI function
44  * @ip_addr: ip address for device
45  * @mac_addr: mac address ptr
46  * @action: modify, delete or add
47  */
48 int
49 irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, u8 *mac_addr,
50 		u32 action)
51 {
52 	unsigned long flags;
53 	int arp_index;
54 	u32 ip[4] = {};
55 
56 	memcpy(ip, ip_addr, sizeof(ip));
57 
58 	spin_lock_irqsave(&rf->arp_lock, flags);
59 	for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
60 		if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
61 			break;
62 	}
63 
64 	switch (action) {
65 	case IRDMA_ARP_ADD:
66 		if (arp_index != rf->arp_table_size) {
67 			arp_index = -1;
68 			break;
69 		}
70 
71 		arp_index = 0;
72 		if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
73 				     (u32 *)&arp_index, &rf->next_arp_index)) {
74 			arp_index = -1;
75 			break;
76 		}
77 
78 		memcpy(rf->arp_table[arp_index].ip_addr, ip,
79 		       sizeof(rf->arp_table[arp_index].ip_addr));
80 		ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
81 		break;
82 	case IRDMA_ARP_RESOLVE:
83 		if (arp_index == rf->arp_table_size)
84 			arp_index = -1;
85 		break;
86 	case IRDMA_ARP_DELETE:
87 		if (arp_index == rf->arp_table_size) {
88 			arp_index = -1;
89 			break;
90 		}
91 
92 		memset(rf->arp_table[arp_index].ip_addr, 0,
93 		       sizeof(rf->arp_table[arp_index].ip_addr));
94 		eth_zero_addr(rf->arp_table[arp_index].mac_addr);
95 		irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
96 		break;
97 	default:
98 		arp_index = -1;
99 		break;
100 	}
101 
102 	spin_unlock_irqrestore(&rf->arp_lock, flags);
103 	return arp_index;
104 }
105 
106 /**
107  * irdma_add_arp - add a new arp entry if needed
108  * @rf: RDMA function
109  * @ip: IP address
110  * @mac: MAC address
111  */
112 int
113 irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, u8 *mac)
114 {
115 	int arpidx;
116 
117 	arpidx = irdma_arp_table(rf, &ip[0], NULL, IRDMA_ARP_RESOLVE);
118 	if (arpidx >= 0) {
119 		if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
120 			return arpidx;
121 
122 		irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
123 				       IRDMA_ARP_DELETE);
124 	}
125 
126 	irdma_manage_arp_cache(rf, mac, ip, IRDMA_ARP_ADD);
127 
128 	return irdma_arp_table(rf, ip, NULL, IRDMA_ARP_RESOLVE);
129 }
130 
131 /**
132  * irdma_alloc_and_get_cqp_request - get cqp struct
133  * @cqp: device cqp ptr
134  * @wait: cqp to be used in wait mode
135  */
136 struct irdma_cqp_request *
137 irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
138 				bool wait)
139 {
140 	struct irdma_cqp_request *cqp_request = NULL;
141 	unsigned long flags;
142 
143 	spin_lock_irqsave(&cqp->req_lock, flags);
144 	if (!list_empty(&cqp->cqp_avail_reqs)) {
145 		cqp_request = list_entry(cqp->cqp_avail_reqs.next,
146 					 struct irdma_cqp_request, list);
147 		list_del_init(&cqp_request->list);
148 	}
149 	spin_unlock_irqrestore(&cqp->req_lock, flags);
150 	if (!cqp_request) {
151 		cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
152 		if (cqp_request) {
153 			cqp_request->dynamic = true;
154 			if (wait)
155 				init_waitqueue_head(&cqp_request->waitq);
156 		}
157 	}
158 	if (!cqp_request) {
159 		irdma_debug(cqp->sc_cqp.dev, IRDMA_DEBUG_ERR,
160 			    "CQP Request Fail: No Memory");
161 		return NULL;
162 	}
163 
164 	cqp_request->waiting = wait;
165 	atomic_set(&cqp_request->refcnt, 1);
166 	memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
167 
168 	return cqp_request;
169 }
170 
171 /**
172  * irdma_get_cqp_request - increase refcount for cqp_request
173  * @cqp_request: pointer to cqp_request instance
174  */
175 static inline void
176 irdma_get_cqp_request(struct irdma_cqp_request *cqp_request)
177 {
178 	atomic_inc(&cqp_request->refcnt);
179 }
180 
181 /**
182  * irdma_free_cqp_request - free cqp request
183  * @cqp: cqp ptr
184  * @cqp_request: to be put back in cqp list
185  */
186 void
187 irdma_free_cqp_request(struct irdma_cqp *cqp,
188 		       struct irdma_cqp_request *cqp_request)
189 {
190 	unsigned long flags;
191 
192 	if (cqp_request->dynamic) {
193 		kfree(cqp_request);
194 	} else {
195 		cqp_request->request_done = false;
196 		cqp_request->callback_fcn = NULL;
197 		cqp_request->waiting = false;
198 
199 		spin_lock_irqsave(&cqp->req_lock, flags);
200 		list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
201 		spin_unlock_irqrestore(&cqp->req_lock, flags);
202 	}
203 	wake_up(&cqp->remove_wq);
204 }
205 
206 /**
207  * irdma_put_cqp_request - dec ref count and free if 0
208  * @cqp: cqp ptr
209  * @cqp_request: to be put back in cqp list
210  */
211 void
212 irdma_put_cqp_request(struct irdma_cqp *cqp,
213 		      struct irdma_cqp_request *cqp_request)
214 {
215 	if (atomic_dec_and_test(&cqp_request->refcnt))
216 		irdma_free_cqp_request(cqp, cqp_request);
217 }
218 
219 /**
220  * irdma_free_pending_cqp_request -free pending cqp request objs
221  * @cqp: cqp ptr
222  * @cqp_request: to be put back in cqp list
223  */
224 static void
225 irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
226 			       struct irdma_cqp_request *cqp_request)
227 {
228 	if (cqp_request->waiting) {
229 		cqp_request->compl_info.error = true;
230 		cqp_request->request_done = true;
231 		wake_up(&cqp_request->waitq);
232 	}
233 	wait_event_timeout(cqp->remove_wq,
234 			   atomic_read(&cqp_request->refcnt) == 1, 1000);
235 	irdma_put_cqp_request(cqp, cqp_request);
236 }
237 
238 /**
239  * irdma_cleanup_pending_cqp_op - clean-up cqp with no
240  * completions
241  * @rf: RDMA PCI function
242  */
243 void
244 irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
245 {
246 	struct irdma_sc_dev *dev = &rf->sc_dev;
247 	struct irdma_cqp *cqp = &rf->cqp;
248 	struct irdma_cqp_request *cqp_request = NULL;
249 	struct cqp_cmds_info *pcmdinfo = NULL;
250 	u32 i, pending_work, wqe_idx;
251 
252 	pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
253 	wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
254 	for (i = 0; i < pending_work; i++) {
255 		cqp_request = (struct irdma_cqp_request *)(unsigned long)
256 		    cqp->scratch_array[wqe_idx];
257 		if (cqp_request)
258 			irdma_free_pending_cqp_request(cqp, cqp_request);
259 		wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring);
260 	}
261 
262 	while (!list_empty(&dev->cqp_cmd_head)) {
263 		pcmdinfo = irdma_remove_cqp_head(dev);
264 		cqp_request =
265 		    container_of(pcmdinfo, struct irdma_cqp_request, info);
266 		if (cqp_request)
267 			irdma_free_pending_cqp_request(cqp, cqp_request);
268 	}
269 }
270 
271 /**
272  * irdma_wait_event - wait for completion
273  * @rf: RDMA PCI function
274  * @cqp_request: cqp request to wait
275  */
276 static int
277 irdma_wait_event(struct irdma_pci_f *rf,
278 		 struct irdma_cqp_request *cqp_request)
279 {
280 	struct irdma_cqp_timeout cqp_timeout = {0};
281 	bool cqp_error = false;
282 	int err_code = 0;
283 
284 	cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
285 	do {
286 		irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
287 		if (wait_event_timeout(cqp_request->waitq,
288 				       cqp_request->request_done,
289 				       msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
290 			break;
291 
292 		irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
293 
294 		if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
295 			continue;
296 
297 		if (!rf->reset) {
298 			rf->reset = true;
299 			rf->gen_ops.request_reset(rf);
300 		}
301 		return -ETIMEDOUT;
302 	} while (1);
303 
304 	cqp_error = cqp_request->compl_info.error;
305 	if (cqp_error) {
306 		err_code = -EIO;
307 		if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
308 		    cqp_request->compl_info.min_err_code == 0x8029) {
309 			if (!rf->reset) {
310 				rf->reset = true;
311 				rf->gen_ops.request_reset(rf);
312 			}
313 		}
314 	}
315 
316 	return err_code;
317 }
318 
319 static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
320 	[IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd",
321 	[IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd",
322 	[IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
323 	[IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
324 	[IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
325 	[IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd",
326 	[IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
327 	[IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
328 	[IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
329 	[IRDMA_OP_CQ_CREATE] = "Create CQ Cmd",
330 	[IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd",
331 	[IRDMA_OP_QP_CREATE] = "Create QP Cmd",
332 	[IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd",
333 	[IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd",
334 	[IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd",
335 	[IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd",
336 	[IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd",
337 	[IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd",
338 	[IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd",
339 	[IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd",
340 	[IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd",
341 	[IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
342 	[IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
343 	[IRDMA_OP_RESUME] = "Resume QP Cmd",
344 	[IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd",
345 	[IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
346 	[IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
347 	[IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
348 	[IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd",
349 	[IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd",
350 	[IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd",
351 	[IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd",
352 	[IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd",
353 	[IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd",
354 	[IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd",
355 	[IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd",
356 	[IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
357 	[IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
358 	[IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
359 	[IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
360 	[IRDMA_OP_GEN_AE] = "Generate AE Cmd",
361 	[IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
362 	[IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd",
363 	[IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
364 	[IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
365 	[IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
366 };
367 
368 static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
369 	{0xffff, 0x8006, "Flush No Wqe Pending"},
370 	{0xffff, 0x8007, "Modify QP Bad Close"},
371 	{0xffff, 0x8009, "LLP Closed"},
372 	{0xffff, 0x800a, "Reset Not Sent"}
373 };
374 
375 /**
376  * irdma_cqp_crit_err - check if CQP error is critical
377  * @dev: pointer to dev structure
378  * @cqp_cmd: code for last CQP operation
379  * @maj_err_code: major error code
380  * @min_err_code: minot error code
381  */
382 bool
383 irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
384 		   u16 maj_err_code, u16 min_err_code)
385 {
386 	int i;
387 
388 	for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) {
389 		if (maj_err_code == irdma_noncrit_err_list[i].maj &&
390 		    min_err_code == irdma_noncrit_err_list[i].min) {
391 			irdma_debug(dev, IRDMA_DEBUG_CQP,
392 				    "[%s Error][%s] maj=0x%x min=0x%x\n",
393 				    irdma_noncrit_err_list[i].desc,
394 				    irdma_cqp_cmd_names[cqp_cmd],
395 				    maj_err_code,
396 				    min_err_code);
397 			return false;
398 		}
399 	}
400 	return true;
401 }
402 
403 /**
404  * irdma_handle_cqp_op - process cqp command
405  * @rf: RDMA PCI function
406  * @cqp_request: cqp request to process
407  */
408 int
409 irdma_handle_cqp_op(struct irdma_pci_f *rf,
410 		    struct irdma_cqp_request *cqp_request)
411 {
412 	struct irdma_sc_dev *dev = &rf->sc_dev;
413 	struct cqp_cmds_info *info = &cqp_request->info;
414 	int status;
415 	bool put_cqp_request = true;
416 
417 	if (rf->reset)
418 		return -EBUSY;
419 
420 	irdma_get_cqp_request(cqp_request);
421 	status = irdma_process_cqp_cmd(dev, info);
422 	if (status)
423 		goto err;
424 
425 	if (cqp_request->waiting) {
426 		put_cqp_request = false;
427 		status = irdma_wait_event(rf, cqp_request);
428 		if (status)
429 			goto err;
430 	}
431 
432 	return 0;
433 
434 err:
435 	if (irdma_cqp_crit_err(dev, info->cqp_cmd,
436 			       cqp_request->compl_info.maj_err_code,
437 			       cqp_request->compl_info.min_err_code))
438 		irdma_dev_err(dev,
439 			      "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
440 			      irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting,
441 			      cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code,
442 			      cqp_request->compl_info.min_err_code);
443 
444 	if (put_cqp_request)
445 		irdma_put_cqp_request(&rf->cqp, cqp_request);
446 
447 	return status;
448 }
449 
450 void
451 irdma_qp_add_ref(struct ib_qp *ibqp)
452 {
453 	struct irdma_qp *iwqp = to_iwqp(ibqp);
454 
455 	atomic_inc(&iwqp->refcnt);
456 }
457 
458 void
459 irdma_qp_rem_ref(struct ib_qp *ibqp)
460 {
461 	struct irdma_qp *iwqp = to_iwqp(ibqp);
462 	struct irdma_device *iwdev = iwqp->iwdev;
463 	unsigned long flags;
464 
465 	spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
466 	if (!atomic_dec_and_test(&iwqp->refcnt)) {
467 		spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
468 		return;
469 	}
470 
471 	iwdev->rf->qp_table[iwqp->ibqp.qp_num] = NULL;
472 	spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
473 	complete(&iwqp->free_qp);
474 }
475 
476 void
477 irdma_cq_add_ref(struct ib_cq *ibcq)
478 {
479 	struct irdma_cq *iwcq = to_iwcq(ibcq);
480 
481 	atomic_inc(&iwcq->refcnt);
482 }
483 
484 void
485 irdma_cq_rem_ref(struct ib_cq *ibcq)
486 {
487 	struct irdma_cq *iwcq = to_iwcq(ibcq);
488 	struct irdma_pci_f *rf = container_of(iwcq->sc_cq.dev, struct irdma_pci_f, sc_dev);
489 	unsigned long flags;
490 
491 	spin_lock_irqsave(&rf->cqtable_lock, flags);
492 	if (!atomic_dec_and_test(&iwcq->refcnt)) {
493 		spin_unlock_irqrestore(&rf->cqtable_lock, flags);
494 		return;
495 	}
496 
497 	rf->cq_table[iwcq->cq_num] = NULL;
498 	spin_unlock_irqrestore(&rf->cqtable_lock, flags);
499 	complete(&iwcq->free_cq);
500 }
501 
502 struct ib_device *
503 irdma_get_ibdev(struct irdma_sc_dev *dev)
504 {
505 	return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
506 }
507 
508 /**
509  * irdma_get_qp - get qp address
510  * @device: iwarp device
511  * @qpn: qp number
512  */
513 struct ib_qp *
514 irdma_get_qp(struct ib_device *device, int qpn)
515 {
516 	struct irdma_device *iwdev = to_iwdev(device);
517 
518 	if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
519 		return NULL;
520 
521 	return &iwdev->rf->qp_table[qpn]->ibqp;
522 }
523 
524 /**
525  * irdma_get_hw_addr - return hw addr
526  * @par: points to shared dev
527  */
528 u8 __iomem * irdma_get_hw_addr(void *par){
529 	struct irdma_sc_dev *dev = par;
530 
531 	return dev->hw->hw_addr;
532 }
533 
534 /**
535  * irdma_remove_cqp_head - return head entry and remove
536  * @dev: device
537  */
538 void *
539 irdma_remove_cqp_head(struct irdma_sc_dev *dev)
540 {
541 	struct list_head *entry;
542 	struct list_head *list = &dev->cqp_cmd_head;
543 
544 	if (list_empty(list))
545 		return NULL;
546 
547 	entry = list->next;
548 	list_del(entry);
549 
550 	return entry;
551 }
552 
553 /**
554  * irdma_cqp_sds_cmd - create cqp command for sd
555  * @dev: hardware control device structure
556  * @sdinfo: information for sd cqp
557  *
558  */
559 int
560 irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
561 		  struct irdma_update_sds_info *sdinfo)
562 {
563 	struct irdma_cqp_request *cqp_request;
564 	struct cqp_cmds_info *cqp_info;
565 	struct irdma_pci_f *rf = dev_to_rf(dev);
566 	int status;
567 
568 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
569 	if (!cqp_request)
570 		return -ENOMEM;
571 
572 	cqp_info = &cqp_request->info;
573 	memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
574 	       sizeof(cqp_info->in.u.update_pe_sds.info));
575 	cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS;
576 	cqp_info->post_sq = 1;
577 	cqp_info->in.u.update_pe_sds.dev = dev;
578 	cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
579 
580 	status = irdma_handle_cqp_op(rf, cqp_request);
581 	irdma_put_cqp_request(&rf->cqp, cqp_request);
582 
583 	return status;
584 }
585 
586 /**
587  * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume
588  * @qp: hardware control qp
589  * @op: suspend or resume
590  */
591 int
592 irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op)
593 {
594 	struct irdma_sc_dev *dev = qp->dev;
595 	struct irdma_cqp_request *cqp_request;
596 	struct irdma_sc_cqp *cqp = dev->cqp;
597 	struct cqp_cmds_info *cqp_info;
598 	struct irdma_pci_f *rf = dev_to_rf(dev);
599 	int status;
600 
601 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
602 	if (!cqp_request)
603 		return -ENOMEM;
604 
605 	cqp_info = &cqp_request->info;
606 	cqp_info->cqp_cmd = op;
607 	cqp_info->in.u.suspend_resume.cqp = cqp;
608 	cqp_info->in.u.suspend_resume.qp = qp;
609 	cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
610 
611 	status = irdma_handle_cqp_op(rf, cqp_request);
612 	irdma_put_cqp_request(&rf->cqp, cqp_request);
613 
614 	return status;
615 }
616 
617 /**
618  * irdma_term_modify_qp - modify qp for term message
619  * @qp: hardware control qp
620  * @next_state: qp's next state
621  * @term: terminate code
622  * @term_len: length
623  */
624 void
625 irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
626 		     u8 term_len)
627 {
628 	struct irdma_qp *iwqp;
629 
630 	iwqp = qp->qp_uk.back_qp;
631 	irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
632 };
633 
634 /**
635  * irdma_terminate_done - after terminate is completed
636  * @qp: hardware control qp
637  * @timeout_occurred: indicates if terminate timer expired
638  */
639 void
640 irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
641 {
642 	struct irdma_qp *iwqp;
643 	u8 hte = 0;
644 	bool first_time;
645 	unsigned long flags;
646 
647 	iwqp = qp->qp_uk.back_qp;
648 	spin_lock_irqsave(&iwqp->lock, flags);
649 	if (iwqp->hte_added) {
650 		iwqp->hte_added = 0;
651 		hte = 1;
652 	}
653 	first_time = !(qp->term_flags & IRDMA_TERM_DONE);
654 	qp->term_flags |= IRDMA_TERM_DONE;
655 	spin_unlock_irqrestore(&iwqp->lock, flags);
656 	if (first_time) {
657 		if (!timeout_occurred)
658 			irdma_terminate_del_timer(qp);
659 
660 		irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
661 		irdma_cm_disconn(iwqp);
662 	}
663 }
664 
665 static void
666 irdma_terminate_timeout(struct timer_list *t)
667 {
668 	struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
669 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
670 
671 	irdma_terminate_done(qp, 1);
672 	irdma_qp_rem_ref(&iwqp->ibqp);
673 }
674 
675 /**
676  * irdma_terminate_start_timer - start terminate timeout
677  * @qp: hardware control qp
678  */
679 void
680 irdma_terminate_start_timer(struct irdma_sc_qp *qp)
681 {
682 	struct irdma_qp *iwqp;
683 
684 	iwqp = qp->qp_uk.back_qp;
685 	irdma_qp_add_ref(&iwqp->ibqp);
686 	timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
687 	iwqp->terminate_timer.expires = jiffies + HZ;
688 
689 	add_timer(&iwqp->terminate_timer);
690 }
691 
692 /**
693  * irdma_terminate_del_timer - delete terminate timeout
694  * @qp: hardware control qp
695  */
696 void
697 irdma_terminate_del_timer(struct irdma_sc_qp *qp)
698 {
699 	struct irdma_qp *iwqp;
700 	int ret;
701 
702 	iwqp = qp->qp_uk.back_qp;
703 	ret = irdma_del_timer_compat(&iwqp->terminate_timer);
704 	if (ret)
705 		irdma_qp_rem_ref(&iwqp->ibqp);
706 }
707 
708 /**
709  * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
710  * @dev: function device struct
711  * @val_mem: buffer for fpm
712  * @hmc_fn_id: function id for fpm
713  */
714 int
715 irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
716 			    struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
717 {
718 	struct irdma_cqp_request *cqp_request;
719 	struct cqp_cmds_info *cqp_info;
720 	struct irdma_pci_f *rf = dev_to_rf(dev);
721 	int status;
722 
723 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
724 	if (!cqp_request)
725 		return -ENOMEM;
726 
727 	cqp_info = &cqp_request->info;
728 	cqp_request->param = NULL;
729 	cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
730 	cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
731 	cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
732 	cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
733 	cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
734 	cqp_info->post_sq = 1;
735 	cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
736 
737 	status = irdma_handle_cqp_op(rf, cqp_request);
738 	irdma_put_cqp_request(&rf->cqp, cqp_request);
739 
740 	return status;
741 }
742 
743 /**
744  * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
745  * @dev: hardware control device structure
746  * @val_mem: buffer with fpm values
747  * @hmc_fn_id: function id for fpm
748  */
749 int
750 irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
751 			     struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
752 {
753 	struct irdma_cqp_request *cqp_request;
754 	struct cqp_cmds_info *cqp_info;
755 	struct irdma_pci_f *rf = dev_to_rf(dev);
756 	int status;
757 
758 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
759 	if (!cqp_request)
760 		return -ENOMEM;
761 
762 	cqp_info = &cqp_request->info;
763 	cqp_request->param = NULL;
764 	cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
765 	cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
766 	cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
767 	cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
768 	cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
769 	cqp_info->post_sq = 1;
770 	cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
771 
772 	status = irdma_handle_cqp_op(rf, cqp_request);
773 	irdma_put_cqp_request(&rf->cqp, cqp_request);
774 
775 	return status;
776 }
777 
778 /**
779  * irdma_cqp_cq_create_cmd - create a cq for the cqp
780  * @dev: device pointer
781  * @cq: pointer to created cq
782  */
783 int
784 irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
785 {
786 	struct irdma_pci_f *rf = dev_to_rf(dev);
787 	struct irdma_cqp *iwcqp = &rf->cqp;
788 	struct irdma_cqp_request *cqp_request;
789 	struct cqp_cmds_info *cqp_info;
790 	int status;
791 
792 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
793 	if (!cqp_request)
794 		return -ENOMEM;
795 
796 	cqp_info = &cqp_request->info;
797 	cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
798 	cqp_info->post_sq = 1;
799 	cqp_info->in.u.cq_create.cq = cq;
800 	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
801 
802 	status = irdma_handle_cqp_op(rf, cqp_request);
803 	irdma_put_cqp_request(iwcqp, cqp_request);
804 
805 	return status;
806 }
807 
808 /**
809  * irdma_cqp_qp_create_cmd - create a qp for the cqp
810  * @dev: device pointer
811  * @qp: pointer to created qp
812  */
813 int
814 irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
815 {
816 	struct irdma_pci_f *rf = dev_to_rf(dev);
817 	struct irdma_cqp *iwcqp = &rf->cqp;
818 	struct irdma_cqp_request *cqp_request;
819 	struct cqp_cmds_info *cqp_info;
820 	struct irdma_create_qp_info *qp_info;
821 	int status;
822 
823 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
824 	if (!cqp_request)
825 		return -ENOMEM;
826 
827 	cqp_info = &cqp_request->info;
828 	qp_info = &cqp_request->info.in.u.qp_create.info;
829 	memset(qp_info, 0, sizeof(*qp_info));
830 	qp_info->cq_num_valid = true;
831 	qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
832 	cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
833 	cqp_info->post_sq = 1;
834 	cqp_info->in.u.qp_create.qp = qp;
835 	cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
836 
837 	status = irdma_handle_cqp_op(rf, cqp_request);
838 	irdma_put_cqp_request(iwcqp, cqp_request);
839 
840 	return status;
841 }
842 
843 /**
844  * irdma_dealloc_push_page - free a push page for qp
845  * @rf: RDMA PCI function
846  * @qp: hardware control qp
847  */
848 void
849 irdma_dealloc_push_page(struct irdma_pci_f *rf,
850 			struct irdma_sc_qp *qp)
851 {
852 	struct irdma_cqp_request *cqp_request;
853 	struct cqp_cmds_info *cqp_info;
854 	int status;
855 
856 	if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
857 		return;
858 
859 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
860 	if (!cqp_request)
861 		return;
862 
863 	cqp_info = &cqp_request->info;
864 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
865 	cqp_info->post_sq = 1;
866 	cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
867 	cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
868 	cqp_info->in.u.manage_push_page.info.free_page = 1;
869 	cqp_info->in.u.manage_push_page.info.push_page_type = 0;
870 	cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
871 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
872 	status = irdma_handle_cqp_op(rf, cqp_request);
873 	if (!status)
874 		qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
875 	irdma_put_cqp_request(&rf->cqp, cqp_request);
876 }
877 
878 /**
879  * irdma_cq_wq_destroy - send cq destroy cqp
880  * @rf: RDMA PCI function
881  * @cq: hardware control cq
882  */
883 void
884 irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
885 {
886 	struct irdma_cqp_request *cqp_request;
887 	struct cqp_cmds_info *cqp_info;
888 
889 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
890 	if (!cqp_request)
891 		return;
892 
893 	cqp_info = &cqp_request->info;
894 	cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY;
895 	cqp_info->post_sq = 1;
896 	cqp_info->in.u.cq_destroy.cq = cq;
897 	cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
898 
899 	irdma_handle_cqp_op(rf, cqp_request);
900 	irdma_put_cqp_request(&rf->cqp, cqp_request);
901 }
902 
903 /**
904  * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait
905  * @cqp_request: modify QP completion
906  */
907 static void
908 irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
909 {
910 	struct cqp_cmds_info *cqp_info;
911 	struct irdma_qp *iwqp;
912 
913 	cqp_info = &cqp_request->info;
914 	iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
915 	atomic_dec(&iwqp->hw_mod_qp_pend);
916 	wake_up(&iwqp->mod_qp_waitq);
917 }
918 
919 /**
920  * irdma_hw_modify_qp - setup cqp for modify qp
921  * @iwdev: RDMA device
922  * @iwqp: qp ptr (user or kernel)
923  * @info: info for modify qp
924  * @wait: flag to wait or not for modify qp completion
925  */
926 int
927 irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
928 		   struct irdma_modify_qp_info *info, bool wait)
929 {
930 	int status;
931 	struct irdma_pci_f *rf = iwdev->rf;
932 	struct irdma_cqp_request *cqp_request;
933 	struct cqp_cmds_info *cqp_info;
934 	struct irdma_modify_qp_info *m_info;
935 
936 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
937 	if (!cqp_request)
938 		return -ENOMEM;
939 
940 	if (!wait) {
941 		cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
942 		atomic_inc(&iwqp->hw_mod_qp_pend);
943 	}
944 	cqp_info = &cqp_request->info;
945 	m_info = &cqp_info->in.u.qp_modify.info;
946 	memcpy(m_info, info, sizeof(*m_info));
947 	cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
948 	cqp_info->post_sq = 1;
949 	cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
950 	cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
951 	status = irdma_handle_cqp_op(rf, cqp_request);
952 	irdma_put_cqp_request(&rf->cqp, cqp_request);
953 	if (status) {
954 		if (rdma_protocol_roce(&iwdev->ibdev, 1))
955 			return status;
956 
957 		switch (m_info->next_iwarp_state) {
958 			struct irdma_gen_ae_info ae_info;
959 
960 		case IRDMA_QP_STATE_RTS:
961 		case IRDMA_QP_STATE_IDLE:
962 		case IRDMA_QP_STATE_TERMINATE:
963 		case IRDMA_QP_STATE_CLOSING:
964 			if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE)
965 				irdma_send_reset(iwqp->cm_node);
966 			else
967 				iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
968 			if (!wait) {
969 				ae_info.ae_code = IRDMA_AE_BAD_CLOSE;
970 				ae_info.ae_src = 0;
971 				irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
972 			} else {
973 				cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
974 									      wait);
975 				if (!cqp_request)
976 					return -ENOMEM;
977 
978 				cqp_info = &cqp_request->info;
979 				m_info = &cqp_info->in.u.qp_modify.info;
980 				memcpy(m_info, info, sizeof(*m_info));
981 				cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
982 				cqp_info->post_sq = 1;
983 				cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
984 				cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
985 				m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR;
986 				m_info->reset_tcp_conn = true;
987 				irdma_handle_cqp_op(rf, cqp_request);
988 				irdma_put_cqp_request(&rf->cqp, cqp_request);
989 			}
990 			break;
991 		case IRDMA_QP_STATE_ERROR:
992 		default:
993 			break;
994 		}
995 	}
996 
997 	return status;
998 }
999 
1000 /**
1001  * irdma_cqp_cq_destroy_cmd - destroy the cqp cq
1002  * @dev: device pointer
1003  * @cq: pointer to cq
1004  */
1005 void
1006 irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1007 {
1008 	struct irdma_pci_f *rf = dev_to_rf(dev);
1009 
1010 	irdma_cq_wq_destroy(rf, cq);
1011 }
1012 
1013 /**
1014  * irdma_cqp_qp_destroy_cmd - destroy the cqp
1015  * @dev: device pointer
1016  * @qp: pointer to qp
1017  */
1018 int
1019 irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1020 {
1021 	struct irdma_pci_f *rf = dev_to_rf(dev);
1022 	struct irdma_cqp *iwcqp = &rf->cqp;
1023 	struct irdma_cqp_request *cqp_request;
1024 	struct cqp_cmds_info *cqp_info;
1025 	int status;
1026 
1027 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1028 	if (!cqp_request)
1029 		return -ENOMEM;
1030 
1031 	cqp_info = &cqp_request->info;
1032 	memset(cqp_info, 0, sizeof(*cqp_info));
1033 	cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
1034 	cqp_info->post_sq = 1;
1035 	cqp_info->in.u.qp_destroy.qp = qp;
1036 	cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1037 	cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1038 
1039 	status = irdma_handle_cqp_op(rf, cqp_request);
1040 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1041 
1042 	return status;
1043 }
1044 
1045 /**
1046  * irdma_ieq_mpa_crc_ae - generate AE for crc error
1047  * @dev: hardware control device structure
1048  * @qp: hardware control qp
1049  */
1050 void
1051 irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1052 {
1053 	struct irdma_gen_ae_info info = {0};
1054 	struct irdma_pci_f *rf = dev_to_rf(dev);
1055 
1056 	irdma_debug(dev, IRDMA_DEBUG_AEQ, "Generate MPA CRC AE\n");
1057 	info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1058 	info.ae_src = IRDMA_AE_SOURCE_RQ;
1059 	irdma_gen_ae(rf, qp, &info, false);
1060 }
1061 
1062 /**
1063  * irdma_ieq_get_qp - get qp based on quad in puda buffer
1064  * @dev: hardware control device structure
1065  * @buf: receive puda buffer on exception q
1066  */
1067 struct irdma_sc_qp *
1068 irdma_ieq_get_qp(struct irdma_sc_dev *dev,
1069 		 struct irdma_puda_buf *buf)
1070 {
1071 	struct irdma_qp *iwqp;
1072 	struct irdma_cm_node *cm_node;
1073 	struct irdma_device *iwdev = buf->vsi->back_vsi;
1074 	u32 loc_addr[4] = {0};
1075 	u32 rem_addr[4] = {0};
1076 	u16 loc_port, rem_port;
1077 	struct ip6_hdr *ip6h;
1078 	struct ip *iph = (struct ip *)buf->iph;
1079 	struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1080 
1081 	if (iph->ip_v == 4) {
1082 		loc_addr[0] = ntohl(iph->ip_dst.s_addr);
1083 		rem_addr[0] = ntohl(iph->ip_src.s_addr);
1084 	} else {
1085 		ip6h = (struct ip6_hdr *)buf->iph;
1086 		irdma_copy_ip_ntohl(loc_addr, ip6h->ip6_dst.__u6_addr.__u6_addr32);
1087 		irdma_copy_ip_ntohl(rem_addr, ip6h->ip6_src.__u6_addr.__u6_addr32);
1088 	}
1089 	loc_port = ntohs(tcph->th_dport);
1090 	rem_port = ntohs(tcph->th_sport);
1091 	cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1092 				  loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF);
1093 	if (!cm_node)
1094 		return NULL;
1095 
1096 	iwqp = cm_node->iwqp;
1097 	irdma_rem_ref_cm_node(cm_node);
1098 
1099 	return &iwqp->sc_qp;
1100 }
1101 
1102 /**
1103  * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs
1104  * @qp: qp ptr
1105  */
1106 void
1107 irdma_send_ieq_ack(struct irdma_sc_qp *qp)
1108 {
1109 	struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node;
1110 	struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf;
1111 	struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1112 
1113 	cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum;
1114 	cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->th_ack);
1115 
1116 	irdma_send_ack(cm_node);
1117 }
1118 
1119 /**
1120  * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer
1121  * @qp: qp pointer
1122  * @ah_info: AH info pointer
1123  */
1124 void
1125 irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
1126 			   struct irdma_ah_info *ah_info)
1127 {
1128 	struct irdma_puda_buf *buf = qp->pfpdu.ah_buf;
1129 	struct ip *iph;
1130 	struct ip6_hdr *ip6h;
1131 
1132 	memset(ah_info, 0, sizeof(*ah_info));
1133 	ah_info->do_lpbk = true;
1134 	ah_info->vlan_tag = buf->vlan_id;
1135 	ah_info->insert_vlan_tag = buf->vlan_valid;
1136 	ah_info->ipv4_valid = buf->ipv4;
1137 	ah_info->vsi = qp->vsi;
1138 
1139 	if (buf->smac_valid)
1140 		ether_addr_copy(ah_info->mac_addr, buf->smac);
1141 
1142 	if (buf->ipv4) {
1143 		ah_info->ipv4_valid = true;
1144 		iph = (struct ip *)buf->iph;
1145 		ah_info->hop_ttl = iph->ip_ttl;
1146 		ah_info->tc_tos = iph->ip_tos;
1147 		ah_info->dest_ip_addr[0] = ntohl(iph->ip_dst.s_addr);
1148 		ah_info->src_ip_addr[0] = ntohl(iph->ip_src.s_addr);
1149 	} else {
1150 		ip6h = (struct ip6_hdr *)buf->iph;
1151 		ah_info->hop_ttl = ip6h->ip6_hops;
1152 		ah_info->tc_tos = ip6h->ip6_vfc;
1153 		irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
1154 				    ip6h->ip6_dst.__u6_addr.__u6_addr32);
1155 		irdma_copy_ip_ntohl(ah_info->src_ip_addr,
1156 				    ip6h->ip6_src.__u6_addr.__u6_addr32);
1157 	}
1158 
1159 	ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev),
1160 						ah_info->dest_ip_addr,
1161 						NULL, IRDMA_ARP_RESOLVE);
1162 }
1163 
1164 /**
1165  * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer
1166  * @buf: puda to update
1167  * @len: length of buffer
1168  * @seqnum: seq number for tcp
1169  */
1170 static void
1171 irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf,
1172 				 u16 len, u32 seqnum)
1173 {
1174 	struct tcphdr *tcph;
1175 	struct ip *iph;
1176 	u16 iphlen;
1177 	u16 pktsize;
1178 	u8 *addr = buf->mem.va;
1179 
1180 	iphlen = (buf->ipv4) ? 20 : 40;
1181 	iph = (struct ip *)(addr + buf->maclen);
1182 	tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1183 	pktsize = len + buf->tcphlen + iphlen;
1184 	iph->ip_len = htons(pktsize);
1185 	tcph->th_seq = htonl(seqnum);
1186 }
1187 
1188 /**
1189  * irdma_ieq_update_tcpip_info - update tcpip in the buffer
1190  * @buf: puda to update
1191  * @len: length of buffer
1192  * @seqnum: seq number for tcp
1193  */
1194 void
1195 irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
1196 			    u32 seqnum)
1197 {
1198 	struct tcphdr *tcph;
1199 	u8 *addr;
1200 
1201 	if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1202 		return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum);
1203 
1204 	addr = buf->mem.va;
1205 	tcph = (struct tcphdr *)addr;
1206 	tcph->th_seq = htonl(seqnum);
1207 }
1208 
1209 /**
1210  * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda
1211  * buffer
1212  * @info: to get information
1213  * @buf: puda buffer
1214  */
1215 static int
1216 irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1217 			       struct irdma_puda_buf *buf)
1218 {
1219 	struct ip *iph;
1220 	struct ip6_hdr *ip6h;
1221 	struct tcphdr *tcph;
1222 	u16 iphlen;
1223 	u16 pkt_len;
1224 	u8 *mem = buf->mem.va;
1225 	struct ether_header *ethh = buf->mem.va;
1226 
1227 	if (ethh->ether_type == htons(0x8100)) {
1228 		info->vlan_valid = true;
1229 		buf->vlan_id = ntohs(((struct ether_vlan_header *)ethh)->evl_tag) &
1230 		    EVL_VLID_MASK;
1231 	}
1232 
1233 	buf->maclen = (info->vlan_valid) ? 18 : 14;
1234 	iphlen = (info->l3proto) ? 40 : 20;
1235 	buf->ipv4 = (info->l3proto) ? false : true;
1236 	buf->iph = mem + buf->maclen;
1237 	iph = (struct ip *)buf->iph;
1238 	buf->tcph = buf->iph + iphlen;
1239 	tcph = (struct tcphdr *)buf->tcph;
1240 
1241 	if (buf->ipv4) {
1242 		pkt_len = ntohs(iph->ip_len);
1243 	} else {
1244 		ip6h = (struct ip6_hdr *)buf->iph;
1245 		pkt_len = ntohs(ip6h->ip6_plen) + iphlen;
1246 	}
1247 
1248 	buf->totallen = pkt_len + buf->maclen;
1249 
1250 	if (info->payload_len < buf->totallen) {
1251 		irdma_debug(buf->vsi->dev, IRDMA_DEBUG_ERR,
1252 			    "payload_len = 0x%x totallen expected0x%x\n",
1253 			    info->payload_len, buf->totallen);
1254 		return -EINVAL;
1255 	}
1256 
1257 	buf->tcphlen = tcph->th_off << 2;
1258 	buf->datalen = pkt_len - iphlen - buf->tcphlen;
1259 	buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1260 	buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1261 	buf->seqnum = ntohl(tcph->th_seq);
1262 
1263 	return 0;
1264 }
1265 
1266 /**
1267  * irdma_puda_get_tcpip_info - get tcpip info from puda buffer
1268  * @info: to get information
1269  * @buf: puda buffer
1270  */
1271 int
1272 irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1273 			  struct irdma_puda_buf *buf)
1274 {
1275 	struct tcphdr *tcph;
1276 	u32 pkt_len;
1277 	u8 *mem;
1278 
1279 	if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1280 		return irdma_gen1_puda_get_tcpip_info(info, buf);
1281 
1282 	mem = buf->mem.va;
1283 	buf->vlan_valid = info->vlan_valid;
1284 	if (info->vlan_valid)
1285 		buf->vlan_id = info->vlan;
1286 
1287 	buf->ipv4 = info->ipv4;
1288 	if (buf->ipv4)
1289 		buf->iph = mem + IRDMA_IPV4_PAD;
1290 	else
1291 		buf->iph = mem;
1292 
1293 	buf->tcph = mem + IRDMA_TCP_OFFSET;
1294 	tcph = (struct tcphdr *)buf->tcph;
1295 	pkt_len = info->payload_len;
1296 	buf->totallen = pkt_len;
1297 	buf->tcphlen = tcph->th_off << 2;
1298 	buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen;
1299 	buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1300 	buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen;
1301 	buf->seqnum = ntohl(tcph->th_seq);
1302 
1303 	if (info->smac_valid) {
1304 		ether_addr_copy(buf->smac, info->smac);
1305 		buf->smac_valid = true;
1306 	}
1307 
1308 	return 0;
1309 }
1310 
1311 /**
1312  * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats
1313  * @t: timer_list pointer
1314  */
1315 static void
1316 irdma_hw_stats_timeout(struct timer_list *t)
1317 {
1318 	struct irdma_vsi_pestat *pf_devstat =
1319 	from_timer(pf_devstat, t, stats_timer);
1320 	struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
1321 
1322 	if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1323 		irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
1324 
1325 	mod_timer(&pf_devstat->stats_timer,
1326 		  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1327 }
1328 
1329 /**
1330  * irdma_hw_stats_start_timer - Start periodic stats timer
1331  * @vsi: vsi structure pointer
1332  */
1333 void
1334 irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi)
1335 {
1336 	struct irdma_vsi_pestat *devstat = vsi->pestat;
1337 
1338 	timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0);
1339 	mod_timer(&devstat->stats_timer,
1340 		  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1341 }
1342 
1343 /**
1344  * irdma_hw_stats_stop_timer - Delete periodic stats timer
1345  * @vsi: pointer to vsi structure
1346  */
1347 void
1348 irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
1349 {
1350 	struct irdma_vsi_pestat *devstat = vsi->pestat;
1351 
1352 	del_timer_sync(&devstat->stats_timer);
1353 }
1354 
1355 /**
1356  * irdma_process_stats - Checking for wrap and update stats
1357  * @pestat: stats structure pointer
1358  */
1359 static inline void
1360 irdma_process_stats(struct irdma_vsi_pestat *pestat)
1361 {
1362 	sc_vsi_update_stats(pestat->vsi);
1363 }
1364 
1365 /**
1366  * irdma_process_cqp_stats - Checking for wrap and update stats
1367  * @cqp_request: cqp_request structure pointer
1368  */
1369 static void
1370 irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
1371 {
1372 	struct irdma_vsi_pestat *pestat = cqp_request->param;
1373 
1374 	irdma_process_stats(pestat);
1375 }
1376 
1377 /**
1378  * irdma_cqp_gather_stats_cmd - Gather stats
1379  * @dev: pointer to device structure
1380  * @pestat: pointer to stats info
1381  * @wait: flag to wait or not wait for stats
1382  */
1383 int
1384 irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
1385 			   struct irdma_vsi_pestat *pestat, bool wait)
1386 {
1387 
1388 	struct irdma_pci_f *rf = dev_to_rf(dev);
1389 	struct irdma_cqp *iwcqp = &rf->cqp;
1390 	struct irdma_cqp_request *cqp_request;
1391 	struct cqp_cmds_info *cqp_info;
1392 	int status;
1393 
1394 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1395 	if (!cqp_request)
1396 		return -ENOMEM;
1397 
1398 	cqp_info = &cqp_request->info;
1399 	memset(cqp_info, 0, sizeof(*cqp_info));
1400 	cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
1401 	cqp_info->post_sq = 1;
1402 	cqp_info->in.u.stats_gather.info = pestat->gather_info;
1403 	cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request;
1404 	cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
1405 	cqp_request->param = pestat;
1406 	if (!wait)
1407 		cqp_request->callback_fcn = irdma_process_cqp_stats;
1408 	status = irdma_handle_cqp_op(rf, cqp_request);
1409 	if (wait)
1410 		irdma_process_stats(pestat);
1411 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1412 
1413 	return status;
1414 }
1415 
1416 /**
1417  * irdma_cqp_stats_inst_cmd - Allocate/free stats instance
1418  * @vsi: pointer to vsi structure
1419  * @cmd: command to allocate or free
1420  * @stats_info: pointer to allocate stats info
1421  */
1422 int
1423 irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
1424 			 struct irdma_stats_inst_info *stats_info)
1425 {
1426 	struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
1427 	struct irdma_cqp *iwcqp = &rf->cqp;
1428 	struct irdma_cqp_request *cqp_request;
1429 	struct cqp_cmds_info *cqp_info;
1430 	int status;
1431 	bool wait = false;
1432 
1433 	if (cmd == IRDMA_OP_STATS_ALLOCATE)
1434 		wait = true;
1435 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1436 	if (!cqp_request)
1437 		return -ENOMEM;
1438 
1439 	cqp_info = &cqp_request->info;
1440 	memset(cqp_info, 0, sizeof(*cqp_info));
1441 	cqp_info->cqp_cmd = cmd;
1442 	cqp_info->post_sq = 1;
1443 	cqp_info->in.u.stats_manage.info = *stats_info;
1444 	cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request;
1445 	cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
1446 	status = irdma_handle_cqp_op(rf, cqp_request);
1447 	if (wait)
1448 		stats_info->stats_idx = cqp_request->compl_info.op_ret_val;
1449 	irdma_put_cqp_request(iwcqp, cqp_request);
1450 
1451 	return status;
1452 }
1453 
1454 /**
1455  * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
1456  * @dev: pointer to device info
1457  * @sc_ceq: pointer to ceq structure
1458  * @op: Create or Destroy
1459  */
1460 int
1461 irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
1462 		  u8 op)
1463 {
1464 	struct irdma_cqp_request *cqp_request;
1465 	struct cqp_cmds_info *cqp_info;
1466 	struct irdma_pci_f *rf = dev_to_rf(dev);
1467 	int status;
1468 
1469 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1470 	if (!cqp_request)
1471 		return -ENOMEM;
1472 
1473 	cqp_info = &cqp_request->info;
1474 	cqp_info->post_sq = 1;
1475 	cqp_info->cqp_cmd = op;
1476 	cqp_info->in.u.ceq_create.ceq = sc_ceq;
1477 	cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request;
1478 
1479 	status = irdma_handle_cqp_op(rf, cqp_request);
1480 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1481 
1482 	return status;
1483 }
1484 
1485 /**
1486  * irdma_cqp_aeq_cmd - Create/Destroy AEQ
1487  * @dev: pointer to device info
1488  * @sc_aeq: pointer to aeq structure
1489  * @op: Create or Destroy
1490  */
1491 int
1492 irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
1493 		  u8 op)
1494 {
1495 	struct irdma_cqp_request *cqp_request;
1496 	struct cqp_cmds_info *cqp_info;
1497 	struct irdma_pci_f *rf = dev_to_rf(dev);
1498 	int status;
1499 
1500 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1501 	if (!cqp_request)
1502 		return -ENOMEM;
1503 
1504 	cqp_info = &cqp_request->info;
1505 	cqp_info->post_sq = 1;
1506 	cqp_info->cqp_cmd = op;
1507 	cqp_info->in.u.aeq_create.aeq = sc_aeq;
1508 	cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request;
1509 
1510 	status = irdma_handle_cqp_op(rf, cqp_request);
1511 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1512 
1513 	return status;
1514 }
1515 
1516 /**
1517  * irdma_cqp_ws_node_cmd - Add/modify/delete ws node
1518  * @dev: pointer to device structure
1519  * @cmd: Add, modify or delete
1520  * @node_info: pointer to ws node info
1521  */
1522 int
1523 irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
1524 		      struct irdma_ws_node_info *node_info)
1525 {
1526 	struct irdma_pci_f *rf = dev_to_rf(dev);
1527 	struct irdma_cqp *iwcqp = &rf->cqp;
1528 	struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
1529 	struct irdma_cqp_request *cqp_request;
1530 	struct cqp_cmds_info *cqp_info;
1531 	int status;
1532 	bool poll;
1533 
1534 	if (!rf->sc_dev.ceq_valid)
1535 		poll = true;
1536 	else
1537 		poll = false;
1538 
1539 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
1540 	if (!cqp_request)
1541 		return -ENOMEM;
1542 
1543 	cqp_info = &cqp_request->info;
1544 	memset(cqp_info, 0, sizeof(*cqp_info));
1545 	cqp_info->cqp_cmd = cmd;
1546 	cqp_info->post_sq = 1;
1547 	cqp_info->in.u.ws_node.info = *node_info;
1548 	cqp_info->in.u.ws_node.cqp = cqp;
1549 	cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
1550 	status = irdma_handle_cqp_op(rf, cqp_request);
1551 	if (status)
1552 		goto exit;
1553 
1554 	if (poll) {
1555 		struct irdma_ccq_cqe_info compl_info;
1556 
1557 		status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
1558 						       &compl_info);
1559 		node_info->qs_handle = compl_info.op_ret_val;
1560 		irdma_debug(cqp->dev, IRDMA_DEBUG_DCB,
1561 			    "opcode=%d, compl_info.retval=%d\n",
1562 			    compl_info.op_code, compl_info.op_ret_val);
1563 	} else {
1564 		node_info->qs_handle = cqp_request->compl_info.op_ret_val;
1565 	}
1566 
1567 exit:
1568 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1569 
1570 	return status;
1571 }
1572 
1573 /**
1574  * irdma_cqp_up_map_cmd - Set the up-up mapping
1575  * @dev: pointer to device structure
1576  * @cmd: map command
1577  * @map_info: pointer to up map info
1578  */
1579 int
1580 irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
1581 		     struct irdma_up_info *map_info)
1582 {
1583 	struct irdma_pci_f *rf = dev_to_rf(dev);
1584 	struct irdma_cqp *iwcqp = &rf->cqp;
1585 	struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
1586 	struct irdma_cqp_request *cqp_request;
1587 	struct cqp_cmds_info *cqp_info;
1588 	int status;
1589 
1590 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, false);
1591 	if (!cqp_request)
1592 		return -ENOMEM;
1593 
1594 	cqp_info = &cqp_request->info;
1595 	memset(cqp_info, 0, sizeof(*cqp_info));
1596 	cqp_info->cqp_cmd = cmd;
1597 	cqp_info->post_sq = 1;
1598 	cqp_info->in.u.up_map.info = *map_info;
1599 	cqp_info->in.u.up_map.cqp = cqp;
1600 	cqp_info->in.u.up_map.scratch = (uintptr_t)cqp_request;
1601 
1602 	status = irdma_handle_cqp_op(rf, cqp_request);
1603 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1604 
1605 	return status;
1606 }
1607 
1608 /**
1609  * irdma_ah_cqp_op - perform an AH cqp operation
1610  * @rf: RDMA PCI function
1611  * @sc_ah: address handle
1612  * @cmd: AH operation
1613  * @wait: wait if true
1614  * @callback_fcn: Callback function on CQP op completion
1615  * @cb_param: parameter for callback function
1616  *
1617  * returns errno
1618  */
1619 int
1620 irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
1621 		bool wait,
1622 		void (*callback_fcn) (struct irdma_cqp_request *),
1623 		void *cb_param)
1624 {
1625 	struct irdma_cqp_request *cqp_request;
1626 	struct cqp_cmds_info *cqp_info;
1627 	int status;
1628 
1629 	if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
1630 		return -EINVAL;
1631 
1632 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1633 	if (!cqp_request)
1634 		return -ENOMEM;
1635 
1636 	cqp_info = &cqp_request->info;
1637 	cqp_info->cqp_cmd = cmd;
1638 	cqp_info->post_sq = 1;
1639 	if (cmd == IRDMA_OP_AH_CREATE) {
1640 		cqp_info->in.u.ah_create.info = sc_ah->ah_info;
1641 		cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
1642 		cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
1643 	} else if (cmd == IRDMA_OP_AH_DESTROY) {
1644 		cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
1645 		cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
1646 		cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
1647 	}
1648 
1649 	if (!wait) {
1650 		cqp_request->callback_fcn = callback_fcn;
1651 		cqp_request->param = cb_param;
1652 	}
1653 	status = irdma_handle_cqp_op(rf, cqp_request);
1654 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1655 
1656 	if (status)
1657 		return -ENOMEM;
1658 
1659 	if (wait)
1660 		sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE);
1661 
1662 	return 0;
1663 }
1664 
1665 /**
1666  * irdma_ieq_ah_cb - callback after creation of AH for IEQ
1667  * @cqp_request: pointer to cqp_request of create AH
1668  */
1669 static void
1670 irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
1671 {
1672 	struct irdma_sc_qp *qp = cqp_request->param;
1673 	struct irdma_sc_ah *sc_ah = qp->pfpdu.ah;
1674 	unsigned long flags;
1675 
1676 	spin_lock_irqsave(&qp->pfpdu.lock, flags);
1677 	if (!cqp_request->compl_info.op_ret_val) {
1678 		sc_ah->ah_info.ah_valid = true;
1679 		irdma_ieq_process_fpdus(qp, qp->vsi->ieq);
1680 	} else {
1681 		sc_ah->ah_info.ah_valid = false;
1682 		irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
1683 	}
1684 	spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
1685 }
1686 
1687 /**
1688  * irdma_ilq_ah_cb - callback after creation of AH for ILQ
1689  * @cqp_request: pointer to cqp_request of create AH
1690  */
1691 static void
1692 irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
1693 {
1694 	struct irdma_cm_node *cm_node = cqp_request->param;
1695 	struct irdma_sc_ah *sc_ah = cm_node->ah;
1696 
1697 	sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
1698 	irdma_add_conn_est_qh(cm_node);
1699 }
1700 
1701 /**
1702  * irdma_puda_create_ah - create AH for ILQ/IEQ qp's
1703  * @dev: device pointer
1704  * @ah_info: Address handle info
1705  * @wait: When true will wait for operation to complete
1706  * @type: ILQ/IEQ
1707  * @cb_param: Callback param when not waiting
1708  * @ah_ret: Returned pointer to address handle if created
1709  *
1710  */
1711 int
1712 irdma_puda_create_ah(struct irdma_sc_dev *dev,
1713 		     struct irdma_ah_info *ah_info, bool wait,
1714 		     enum puda_rsrc_type type, void *cb_param,
1715 		     struct irdma_sc_ah **ah_ret)
1716 {
1717 	struct irdma_sc_ah *ah;
1718 	struct irdma_pci_f *rf = dev_to_rf(dev);
1719 	int err;
1720 
1721 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
1722 	*ah_ret = ah;
1723 	if (!ah)
1724 		return -ENOMEM;
1725 
1726 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
1727 			       &ah_info->ah_idx, &rf->next_ah);
1728 	if (err)
1729 		goto err_free;
1730 
1731 	ah->dev = dev;
1732 	ah->ah_info = *ah_info;
1733 
1734 	if (type == IRDMA_PUDA_RSRC_TYPE_ILQ)
1735 		err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
1736 				      irdma_ilq_ah_cb, cb_param);
1737 	else
1738 		err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
1739 				      irdma_ieq_ah_cb, cb_param);
1740 
1741 	if (err)
1742 		goto error;
1743 	return 0;
1744 
1745 error:
1746 	irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
1747 err_free:
1748 	kfree(ah);
1749 	*ah_ret = NULL;
1750 	return -ENOMEM;
1751 }
1752 
1753 /**
1754  * irdma_puda_free_ah - free a puda address handle
1755  * @dev: device pointer
1756  * @ah: The address handle to free
1757  */
1758 void
1759 irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
1760 {
1761 	struct irdma_pci_f *rf = dev_to_rf(dev);
1762 
1763 	if (!ah)
1764 		return;
1765 
1766 	if (ah->ah_info.ah_valid) {
1767 		irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
1768 		irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
1769 	}
1770 
1771 	kfree(ah);
1772 }
1773 
1774 /**
1775  * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
1776  * @cqp_request: pointer to cqp_request of create AH
1777  */
1778 void
1779 irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
1780 {
1781 	struct irdma_sc_ah *sc_ah = cqp_request->param;
1782 
1783 	if (!cqp_request->compl_info.op_ret_val)
1784 		sc_ah->ah_info.ah_valid = true;
1785 	else
1786 		sc_ah->ah_info.ah_valid = false;
1787 }
1788 
1789 /**
1790  * irdma_prm_add_pble_mem - add moemory to pble resources
1791  * @pprm: pble resource manager
1792  * @pchunk: chunk of memory to add
1793  */
1794 int
1795 irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
1796 		       struct irdma_chunk *pchunk)
1797 {
1798 	u64 sizeofbitmap;
1799 
1800 	if (pchunk->size & 0xfff)
1801 		return -EINVAL;
1802 
1803 	sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
1804 
1805 	pchunk->bitmapmem.size = sizeofbitmap >> 3;
1806 	pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_ATOMIC);
1807 
1808 	if (!pchunk->bitmapmem.va)
1809 		return -ENOMEM;
1810 
1811 	pchunk->bitmapbuf = pchunk->bitmapmem.va;
1812 	bitmap_zero(pchunk->bitmapbuf, sizeofbitmap);
1813 
1814 	pchunk->sizeofbitmap = sizeofbitmap;
1815 	/* each pble is 8 bytes hence shift by 3 */
1816 	pprm->total_pble_alloc += pchunk->size >> 3;
1817 	pprm->free_pble_cnt += pchunk->size >> 3;
1818 
1819 	return 0;
1820 }
1821 
1822 /**
1823  * irdma_prm_get_pbles - get pble's from prm
1824  * @pprm: pble resource manager
1825  * @chunkinfo: nformation about chunk where pble's were acquired
1826  * @mem_size: size of pble memory needed
1827  * @vaddr: returns virtual address of pble memory
1828  * @fpm_addr: returns fpm address of pble memory
1829  */
1830 int
1831 irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
1832 		    struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
1833 		    u64 **vaddr, u64 *fpm_addr)
1834 {
1835 	u64 bits_needed;
1836 	u64 bit_idx = PBLE_INVALID_IDX;
1837 	struct irdma_chunk *pchunk = NULL;
1838 	struct list_head *chunk_entry = (&pprm->clist)->next;
1839 	u32 offset;
1840 	unsigned long flags;
1841 	*vaddr = NULL;
1842 	*fpm_addr = 0;
1843 
1844 	bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift));
1845 
1846 	spin_lock_irqsave(&pprm->prm_lock, flags);
1847 	while (chunk_entry != &pprm->clist) {
1848 		pchunk = (struct irdma_chunk *)chunk_entry;
1849 		bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf,
1850 						     pchunk->sizeofbitmap, 0,
1851 						     bits_needed, 0);
1852 		if (bit_idx < pchunk->sizeofbitmap)
1853 			break;
1854 
1855 		/* list.next used macro */
1856 		chunk_entry = (&pchunk->list)->next;
1857 	}
1858 
1859 	if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
1860 		spin_unlock_irqrestore(&pprm->prm_lock, flags);
1861 		return -ENOMEM;
1862 	}
1863 
1864 	bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
1865 	offset = bit_idx << pprm->pble_shift;
1866 	*vaddr = (u64 *)((u8 *)pchunk->vaddr + offset);
1867 	*fpm_addr = pchunk->fpm_addr + offset;
1868 
1869 	chunkinfo->pchunk = pchunk;
1870 	chunkinfo->bit_idx = bit_idx;
1871 	chunkinfo->bits_used = bits_needed;
1872 	/* 3 is sizeof pble divide */
1873 	pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3);
1874 	spin_unlock_irqrestore(&pprm->prm_lock, flags);
1875 
1876 	return 0;
1877 }
1878 
1879 /**
1880  * irdma_prm_return_pbles - return pbles back to prm
1881  * @pprm: pble resource manager
1882  * @chunkinfo: chunk where pble's were acquired and to be freed
1883  */
1884 void
1885 irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
1886 		       struct irdma_pble_chunkinfo *chunkinfo)
1887 {
1888 	unsigned long flags;
1889 
1890 	spin_lock_irqsave(&pprm->prm_lock, flags);
1891 	pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3);
1892 	bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx,
1893 		     chunkinfo->bits_used);
1894 	spin_unlock_irqrestore(&pprm->prm_lock, flags);
1895 }
1896 
1897 int
1898 irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t * pg_dma,
1899 		       u32 pg_cnt)
1900 {
1901 	struct page *vm_page;
1902 	int i;
1903 	u8 *addr;
1904 
1905 	addr = (u8 *)(uintptr_t)va;
1906 	for (i = 0; i < pg_cnt; i++) {
1907 		vm_page = vmalloc_to_page(addr);
1908 		if (!vm_page)
1909 			goto err;
1910 
1911 		pg_dma[i] = dma_map_page(hw_to_dev(hw), vm_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1912 		if (dma_mapping_error(hw_to_dev(hw), pg_dma[i]))
1913 			goto err;
1914 
1915 		addr += PAGE_SIZE;
1916 	}
1917 
1918 	return 0;
1919 
1920 err:
1921 	irdma_unmap_vm_page_list(hw, pg_dma, i);
1922 	return -ENOMEM;
1923 }
1924 
1925 void
1926 irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t * pg_dma, u32 pg_cnt)
1927 {
1928 	int i;
1929 
1930 	for (i = 0; i < pg_cnt; i++)
1931 		dma_unmap_page(hw_to_dev(hw), pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
1932 }
1933 
1934 /**
1935  * irdma_pble_free_paged_mem - free virtual paged memory
1936  * @chunk: chunk to free with paged memory
1937  */
1938 void
1939 irdma_pble_free_paged_mem(struct irdma_chunk *chunk)
1940 {
1941 	if (!chunk->pg_cnt)
1942 		goto done;
1943 
1944 	irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs,
1945 				 chunk->pg_cnt);
1946 
1947 done:
1948 	kfree(chunk->dmainfo.dmaaddrs);
1949 	chunk->dmainfo.dmaaddrs = NULL;
1950 	vfree(chunk->vaddr);
1951 	chunk->vaddr = NULL;
1952 	chunk->type = 0;
1953 }
1954 
1955 /**
1956  * irdma_pble_get_paged_mem -allocate paged memory for pbles
1957  * @chunk: chunk to add for paged memory
1958  * @pg_cnt: number of pages needed
1959  */
1960 int
1961 irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt)
1962 {
1963 	u32 size;
1964 	void *va;
1965 
1966 	chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
1967 	if (!chunk->dmainfo.dmaaddrs)
1968 		return -ENOMEM;
1969 
1970 	size = PAGE_SIZE * pg_cnt;
1971 	va = vmalloc(size);
1972 	if (!va)
1973 		goto err;
1974 
1975 	if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs,
1976 				   pg_cnt)) {
1977 		vfree(va);
1978 		goto err;
1979 	}
1980 	chunk->vaddr = va;
1981 	chunk->size = size;
1982 	chunk->pg_cnt = pg_cnt;
1983 	chunk->type = PBLE_SD_PAGED;
1984 
1985 	return 0;
1986 err:
1987 	kfree(chunk->dmainfo.dmaaddrs);
1988 	chunk->dmainfo.dmaaddrs = NULL;
1989 
1990 	return -ENOMEM;
1991 }
1992 
1993 /**
1994  * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID
1995  * @dev: device pointer
1996  */
1997 u16
1998 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev)
1999 {
2000 	struct irdma_pci_f *rf = dev_to_rf(dev);
2001 	u32 next = 1;
2002 	u32 node_id;
2003 
2004 	if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
2005 			     &node_id, &next))
2006 		return IRDMA_WS_NODE_INVALID;
2007 
2008 	return (u16)node_id;
2009 }
2010 
2011 /**
2012  * irdma_free_ws_node_id - Free a tx scheduler node ID
2013  * @dev: device pointer
2014  * @node_id: Work scheduler node ID
2015  */
2016 void
2017 irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id)
2018 {
2019 	struct irdma_pci_f *rf = dev_to_rf(dev);
2020 
2021 	irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
2022 }
2023 
2024 /**
2025  * irdma_modify_qp_to_err - Modify a QP to error
2026  * @sc_qp: qp structure
2027  */
2028 void
2029 irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
2030 {
2031 	struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
2032 	struct ib_qp_attr attr;
2033 
2034 	if (qp->iwdev->rf->reset)
2035 		return;
2036 	attr.qp_state = IB_QPS_ERR;
2037 
2038 	if (rdma_protocol_roce(qp->ibqp.device, 1))
2039 		irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2040 	else
2041 		irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2042 }
2043 
2044 void
2045 irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
2046 {
2047 	struct ib_event ibevent;
2048 
2049 	if (!iwqp->ibqp.event_handler)
2050 		return;
2051 
2052 	switch (event) {
2053 	case IRDMA_QP_EVENT_CATASTROPHIC:
2054 		ibevent.event = IB_EVENT_QP_FATAL;
2055 		break;
2056 	case IRDMA_QP_EVENT_ACCESS_ERR:
2057 		ibevent.event = IB_EVENT_QP_ACCESS_ERR;
2058 		break;
2059 	}
2060 	ibevent.device = iwqp->ibqp.device;
2061 	ibevent.element.qp = &iwqp->ibqp;
2062 	iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
2063 }
2064 
2065 static void
2066 clear_qp_ctx_addr(__le64 * ctx)
2067 {
2068 	u64 tmp;
2069 
2070 	get_64bit_val(ctx, 272, &tmp);
2071 	tmp &= GENMASK_ULL(63, 58);
2072 	set_64bit_val(ctx, 272, tmp);
2073 
2074 	get_64bit_val(ctx, 296, &tmp);
2075 	tmp &= GENMASK_ULL(7, 0);
2076 	set_64bit_val(ctx, 296, tmp);
2077 
2078 	get_64bit_val(ctx, 312, &tmp);
2079 	tmp &= GENMASK_ULL(7, 0);
2080 	set_64bit_val(ctx, 312, tmp);
2081 
2082 	set_64bit_val(ctx, 368, 0);
2083 }
2084 
2085 /**
2086  * irdma_upload_qp_context - upload raw QP context
2087  * @iwqp: QP pointer
2088  * @freeze: freeze QP
2089  * @raw: raw context flag
2090  */
2091 int
2092 irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw)
2093 {
2094 	struct irdma_dma_mem dma_mem;
2095 	struct irdma_sc_dev *dev;
2096 	struct irdma_sc_qp *qp;
2097 	struct irdma_cqp *iwcqp;
2098 	struct irdma_cqp_request *cqp_request;
2099 	struct cqp_cmds_info *cqp_info;
2100 	struct irdma_upload_context_info *info;
2101 	struct irdma_pci_f *rf;
2102 	int ret;
2103 	u32 *ctx;
2104 
2105 	rf = iwqp->iwdev->rf;
2106 	if (!rf)
2107 		return -EINVAL;
2108 
2109 	qp = &iwqp->sc_qp;
2110 	dev = &rf->sc_dev;
2111 	iwcqp = &rf->cqp;
2112 
2113 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2114 	if (!cqp_request)
2115 		return -EINVAL;
2116 
2117 	cqp_info = &cqp_request->info;
2118 	info = &cqp_info->in.u.qp_upload_context.info;
2119 	memset(info, 0, sizeof(struct irdma_upload_context_info));
2120 	cqp_info->cqp_cmd = IRDMA_OP_QP_UPLOAD_CONTEXT;
2121 	cqp_info->post_sq = 1;
2122 	cqp_info->in.u.qp_upload_context.dev = dev;
2123 	cqp_info->in.u.qp_upload_context.scratch = (uintptr_t)cqp_request;
2124 
2125 	dma_mem.size = PAGE_SIZE;
2126 	dma_mem.va = irdma_allocate_dma_mem(dev->hw, &dma_mem, dma_mem.size, PAGE_SIZE);
2127 	if (!dma_mem.va) {
2128 		irdma_put_cqp_request(&rf->cqp, cqp_request);
2129 		return -ENOMEM;
2130 	}
2131 
2132 	ctx = dma_mem.va;
2133 	info->buf_pa = dma_mem.pa;
2134 	info->raw_format = raw;
2135 	info->freeze_qp = freeze;
2136 	info->qp_type = qp->qp_uk.qp_type;	/* 1 is iWARP and 2 UDA */
2137 	info->qp_id = qp->qp_uk.qp_id;
2138 	ret = irdma_handle_cqp_op(rf, cqp_request);
2139 	if (ret)
2140 		goto error;
2141 
2142 	irdma_debug(dev, IRDMA_DEBUG_QP, "PRINT CONTXT QP [%d]\n", info->qp_id);
2143 	{
2144 		u32 i, j;
2145 
2146 		clear_qp_ctx_addr(dma_mem.va);
2147 		for (i = 0, j = 0; i < 32; i++, j += 4)
2148 			irdma_debug(dev, IRDMA_DEBUG_QP,
2149 				    "%d:\t [%08X %08x %08X %08X]\n",
2150 				    (j * 4), ctx[j], ctx[j + 1], ctx[j + 2],
2151 				    ctx[j + 3]);
2152 	}
2153 error:
2154 	irdma_put_cqp_request(iwcqp, cqp_request);
2155 	irdma_free_dma_mem(dev->hw, &dma_mem);
2156 
2157 	return ret;
2158 }
2159 
2160 bool
2161 irdma_cq_empty(struct irdma_cq *iwcq)
2162 {
2163 	struct irdma_cq_uk *ukcq;
2164 	u64 qword3;
2165 	__le64 *cqe;
2166 	u8 polarity;
2167 
2168 	ukcq = &iwcq->sc_cq.cq_uk;
2169 	cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
2170 	get_64bit_val(cqe, 24, &qword3);
2171 	polarity = (u8)RS_64(qword3, IRDMA_CQ_VALID);
2172 
2173 	return polarity != ukcq->polarity;
2174 }
2175 
2176 void
2177 irdma_remove_cmpls_list(struct irdma_cq *iwcq)
2178 {
2179 	struct irdma_cmpl_gen *cmpl_node;
2180 	struct list_head *tmp_node, *list_node;
2181 
2182 	list_for_each_safe(list_node, tmp_node, &iwcq->cmpl_generated) {
2183 		cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list);
2184 		list_del(&cmpl_node->list);
2185 		kfree(cmpl_node);
2186 	}
2187 }
2188 
2189 int
2190 irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info)
2191 {
2192 	struct irdma_cmpl_gen *cmpl;
2193 
2194 	if (!iwcq || list_empty(&iwcq->cmpl_generated))
2195 		return -ENOENT;
2196 	cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list);
2197 	list_del(&cmpl->list);
2198 	memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info));
2199 	kfree(cmpl);
2200 
2201 	irdma_debug(iwcq->sc_cq.dev, IRDMA_DEBUG_VERBS,
2202 		    "%s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%lx\n",
2203 		    __func__, cq_poll_info->qp_id, cq_poll_info->op_type, cq_poll_info->wr_id);
2204 
2205 	return 0;
2206 }
2207 
2208 /**
2209  * irdma_set_cpi_common_values - fill in values for polling info struct
2210  * @cpi: resulting structure of cq_poll_info type
2211  * @qp: QPair
2212  * @qp_num: id of the QP
2213  */
2214 static void
2215 irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
2216 			    struct irdma_qp_uk *qp, u32 qp_num)
2217 {
2218 	cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
2219 	cpi->error = 1;
2220 	cpi->major_err = IRDMA_FLUSH_MAJOR_ERR;
2221 	cpi->minor_err = FLUSH_GENERAL_ERR;
2222 	cpi->qp_handle = (irdma_qp_handle) (uintptr_t)qp;
2223 	cpi->qp_id = qp_num;
2224 }
2225 
2226 static inline void
2227 irdma_comp_handler(struct irdma_cq *cq)
2228 {
2229 	if (cq->sc_cq.cq_uk.armed && cq->ibcq.comp_handler)
2230 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
2231 }
2232 
2233 /**
2234  * irdma_generate_flush_completions - generate completion from WRs
2235  * @iwqp: pointer to QP
2236  */
2237 void
2238 irdma_generate_flush_completions(struct irdma_qp *iwqp)
2239 {
2240 	struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
2241 	struct irdma_ring *sq_ring = &qp->sq_ring;
2242 	struct irdma_ring *rq_ring = &qp->rq_ring;
2243 	struct irdma_cmpl_gen *cmpl;
2244 	__le64 *sw_wqe;
2245 	u64 wqe_qword;
2246 	u32 wqe_idx;
2247 	u8 compl_generated = 0;
2248 	unsigned long flags;
2249 
2250 #define SQ_COMPL_GENERATED (0x01)
2251 #define RQ_COMPL_GENERATED (0x02)
2252 
2253 	spin_lock_irqsave(&iwqp->iwscq->lock, flags);
2254 	if (irdma_cq_empty(iwqp->iwscq)) {
2255 		while (IRDMA_RING_MORE_WORK(*sq_ring)) {
2256 			cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
2257 			if (!cmpl) {
2258 				spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
2259 				return;
2260 			}
2261 
2262 			wqe_idx = sq_ring->tail;
2263 			irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2264 
2265 			cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
2266 			cmpl->cpi.signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
2267 			sw_wqe = qp->sq_base[wqe_idx].elem;
2268 			get_64bit_val(sw_wqe, IRDMA_BYTE_24, &wqe_qword);
2269 			cmpl->cpi.op_type = (u8)RS_64(wqe_qword, IRDMAQPSQ_OPCODE);
2270 			/* remove the SQ WR by moving SQ tail */
2271 			IRDMA_RING_SET_TAIL(*sq_ring, sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
2272 
2273 			irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV,
2274 				    "%s: adding wr_id = 0x%lx SQ Completion to list qp_id=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id);
2275 			list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
2276 			compl_generated |= SQ_COMPL_GENERATED;
2277 		}
2278 	} else {
2279 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, IRDMA_FLUSH_DELAY_MS / 2);
2280 	}
2281 	spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
2282 
2283 	spin_lock_irqsave(&iwqp->iwrcq->lock, flags);
2284 	if (irdma_cq_empty(iwqp->iwrcq)) {
2285 		while (IRDMA_RING_MORE_WORK(*rq_ring)) {
2286 			cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
2287 			if (!cmpl) {
2288 				spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
2289 				return;
2290 			}
2291 
2292 			wqe_idx = rq_ring->tail;
2293 			irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2294 
2295 			cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
2296 			cmpl->cpi.signaled = 1;
2297 			cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
2298 			/* remove the RQ WR by moving RQ tail */
2299 			IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
2300 			irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV,
2301 				    "%s: adding wr_id = 0x%lx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
2302 				    __func__, cmpl->cpi.wr_id, qp->qp_id, wqe_idx);
2303 			list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
2304 
2305 			compl_generated |= RQ_COMPL_GENERATED;
2306 		}
2307 	} else {
2308 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, IRDMA_FLUSH_DELAY_MS / 2);
2309 	}
2310 	spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
2311 
2312 	if (iwqp->iwscq == iwqp->iwrcq) {
2313 		if (compl_generated)
2314 			irdma_comp_handler(iwqp->iwscq);
2315 		return;
2316 	}
2317 	if (compl_generated & SQ_COMPL_GENERATED)
2318 		irdma_comp_handler(iwqp->iwscq);
2319 	if (compl_generated & RQ_COMPL_GENERATED)
2320 		irdma_comp_handler(iwqp->iwrcq);
2321 	if (compl_generated)
2322 		irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_VERBS,
2323 			    "0x%X (SQ 0x1, RQ 0x2, both 0x3) completions generated for QP %d\n",
2324 			    compl_generated, iwqp->ibqp.qp_num);
2325 }
2326