xref: /freebsd/sys/dev/irdma/irdma_uk.c (revision e0c4386e)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2023 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "osdep.h"
36 #include "irdma_defs.h"
37 #include "irdma_user.h"
38 #include "irdma.h"
39 
40 /**
41  * irdma_set_fragment - set fragment in wqe
42  * @wqe: wqe for setting fragment
43  * @offset: offset value
44  * @sge: sge length and stag
45  * @valid: The wqe valid
46  */
47 static void
48 irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
49 		   u8 valid)
50 {
51 	if (sge) {
52 		set_64bit_val(wqe, offset,
53 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
54 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
55 			      FIELD_PREP(IRDMAQPSQ_VALID, valid) |
56 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
57 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
58 	} else {
59 		set_64bit_val(wqe, offset, 0);
60 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
61 			      FIELD_PREP(IRDMAQPSQ_VALID, valid));
62 	}
63 }
64 
65 /**
66  * irdma_set_fragment_gen_1 - set fragment in wqe
67  * @wqe: wqe for setting fragment
68  * @offset: offset value
69  * @sge: sge length and stag
70  * @valid: wqe valid flag
71  */
72 static void
73 irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
74 			 struct irdma_sge *sge, u8 valid)
75 {
76 	if (sge) {
77 		set_64bit_val(wqe, offset,
78 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
79 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
80 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
81 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
82 	} else {
83 		set_64bit_val(wqe, offset, 0);
84 		set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
85 	}
86 }
87 
88 /**
89  * irdma_nop_hdr - Format header section of noop WQE
90  * @qp: hw qp ptr
91  */
92 static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){
93 	return FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
94 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, false) |
95 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
96 }
97 
98 /**
99  * irdma_nop_1 - insert a NOP wqe
100  * @qp: hw qp ptr
101  */
102 static int
103 irdma_nop_1(struct irdma_qp_uk *qp)
104 {
105 	__le64 *wqe;
106 	u32 wqe_idx;
107 
108 	if (!qp->sq_ring.head)
109 		return -EINVAL;
110 
111 	wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
112 	wqe = qp->sq_base[wqe_idx].elem;
113 
114 	qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
115 
116 	set_64bit_val(wqe, IRDMA_BYTE_0, 0);
117 	set_64bit_val(wqe, IRDMA_BYTE_8, 0);
118 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
119 
120 	/* make sure WQE is written before valid bit is set */
121 	irdma_wmb();
122 
123 	set_64bit_val(wqe, IRDMA_BYTE_24, irdma_nop_hdr(qp));
124 
125 	return 0;
126 }
127 
128 /**
129  * irdma_clr_wqes - clear next 128 sq entries
130  * @qp: hw qp ptr
131  * @qp_wqe_idx: wqe_idx
132  */
133 void
134 irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
135 {
136 	__le64 *wqe;
137 	u32 wqe_idx;
138 
139 	if (!(qp_wqe_idx & 0x7F)) {
140 		wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
141 		wqe = qp->sq_base[wqe_idx].elem;
142 		if (wqe_idx)
143 			memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
144 		else
145 			memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
146 	}
147 }
148 
149 /**
150  * irdma_uk_qp_post_wr - ring doorbell
151  * @qp: hw qp ptr
152  */
153 void
154 irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
155 {
156 	u64 temp;
157 	u32 hw_sq_tail;
158 	u32 sw_sq_head;
159 
160 	/* valid bit is written and loads completed before reading shadow */
161 	irdma_mb();
162 
163 	/* read the doorbell shadow area */
164 	get_64bit_val(qp->shadow_area, IRDMA_BYTE_0, &temp);
165 
166 	hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
167 	sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
168 	if (sw_sq_head != qp->initial_ring.head) {
169 		if (qp->push_dropped) {
170 			db_wr32(qp->qp_id, qp->wqe_alloc_db);
171 			qp->push_dropped = false;
172 		} else if (sw_sq_head != hw_sq_tail) {
173 			if (sw_sq_head > qp->initial_ring.head) {
174 				if (hw_sq_tail >= qp->initial_ring.head &&
175 				    hw_sq_tail < sw_sq_head)
176 					db_wr32(qp->qp_id, qp->wqe_alloc_db);
177 			} else {
178 				if (hw_sq_tail >= qp->initial_ring.head ||
179 				    hw_sq_tail < sw_sq_head)
180 					db_wr32(qp->qp_id, qp->wqe_alloc_db);
181 			}
182 		}
183 	}
184 
185 	qp->initial_ring.head = qp->sq_ring.head;
186 }
187 
188 /**
189  * irdma_qp_ring_push_db -  ring qp doorbell
190  * @qp: hw qp ptr
191  * @wqe_idx: wqe index
192  */
193 static void
194 irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
195 {
196 	set_32bit_val(qp->push_db, 0,
197 		      FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
198 	qp->initial_ring.head = qp->sq_ring.head;
199 	qp->push_mode = true;
200 	qp->push_dropped = false;
201 }
202 
203 void
204 irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
205 		  u32 wqe_idx, bool post_sq)
206 {
207 	__le64 *push;
208 
209 	if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
210 	    IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
211 	    !qp->push_mode) {
212 		if (post_sq)
213 			irdma_uk_qp_post_wr(qp);
214 	} else {
215 		push = (__le64 *) ((uintptr_t)qp->push_wqe +
216 				   (wqe_idx & 0x7) * 0x20);
217 		irdma_memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
218 		irdma_qp_ring_push_db(qp, wqe_idx);
219 	}
220 }
221 
222 /**
223  * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
224  * @qp: hw qp ptr
225  * @wqe_idx: return wqe index
226  * @quanta: (in/out) ptr to size of WR in quanta. Modified in case pad is needed
227  * @total_size: size of WR in bytes
228  * @info: info on WR
229  */
230 __le64 *
231 irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
232 			   u16 *quanta, u32 total_size,
233 			   struct irdma_post_sq_info *info)
234 {
235 	__le64 *wqe;
236 	__le64 *wqe_0 = NULL;
237 	u32 nop_wqe_idx;
238 	u16 avail_quanta, wqe_quanta = *quanta;
239 	u16 i;
240 
241 	avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
242 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
243 	     qp->uk_attrs->max_hw_sq_chunk);
244 
245 	if (*quanta <= avail_quanta) {
246 		/* WR fits in current chunk */
247 		if (*quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
248 			return NULL;
249 	} else {
250 		/* Need to pad with NOP */
251 		if (*quanta + avail_quanta >
252 		    IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
253 			return NULL;
254 
255 		nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
256 		for (i = 0; i < avail_quanta; i++) {
257 			irdma_nop_1(qp);
258 			IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
259 		}
260 		if (qp->push_db && info->push_wqe)
261 			irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
262 					  avail_quanta, nop_wqe_idx, true);
263 	}
264 
265 	*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
266 	if (!*wqe_idx)
267 		qp->swqe_polarity = !qp->swqe_polarity;
268 
269 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, *quanta);
270 
271 	irdma_clr_wqes(qp, *wqe_idx);
272 
273 	wqe = qp->sq_base[*wqe_idx].elem;
274 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && wqe_quanta == 1 &&
275 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
276 		wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
277 		wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID,
278 						  qp->swqe_polarity ? 0 : 1));
279 	}
280 	qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
281 	qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
282 	qp->sq_wrtrk_array[*wqe_idx].quanta = wqe_quanta;
283 	qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
284 
285 	return wqe;
286 }
287 
288 /**
289  * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
290  * @qp: hw qp ptr
291  * @wqe_idx: return wqe index
292  */
293 __le64 *
294 irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
295 {
296 	__le64 *wqe;
297 	int ret_code;
298 
299 	if (IRDMA_RING_FULL_ERR(qp->rq_ring))
300 		return NULL;
301 
302 	IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
303 	if (ret_code)
304 		return NULL;
305 
306 	if (!*wqe_idx)
307 		qp->rwqe_polarity = !qp->rwqe_polarity;
308 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
309 	wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
310 
311 	return wqe;
312 }
313 
314 /**
315  * irdma_uk_rdma_write - rdma write operation
316  * @qp: hw qp ptr
317  * @info: post sq information
318  * @post_sq: flag to post sq
319  */
320 int
321 irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
322 		    bool post_sq)
323 {
324 	u64 hdr;
325 	__le64 *wqe;
326 	struct irdma_rdma_write *op_info;
327 	u32 i, wqe_idx;
328 	u32 total_size = 0, byte_off;
329 	int ret_code;
330 	u32 frag_cnt, addl_frag_cnt;
331 	bool read_fence = false;
332 	u16 quanta;
333 
334 	info->push_wqe = qp->push_db ? true : false;
335 
336 	op_info = &info->op.rdma_write;
337 	if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
338 		return -EINVAL;
339 
340 	for (i = 0; i < op_info->num_lo_sges; i++)
341 		total_size += op_info->lo_sg_list[i].len;
342 
343 	read_fence |= info->read_fence;
344 
345 	if (info->imm_data_valid)
346 		frag_cnt = op_info->num_lo_sges + 1;
347 	else
348 		frag_cnt = op_info->num_lo_sges;
349 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
350 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
351 	if (ret_code)
352 		return ret_code;
353 
354 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
355 	if (!wqe)
356 		return -ENOSPC;
357 
358 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
359 	set_64bit_val(wqe, IRDMA_BYTE_16,
360 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
361 
362 	if (info->imm_data_valid) {
363 		set_64bit_val(wqe, IRDMA_BYTE_0,
364 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
365 		i = 0;
366 	} else {
367 		qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
368 					    op_info->lo_sg_list,
369 					    qp->swqe_polarity);
370 		i = 1;
371 	}
372 
373 	for (byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; i++) {
374 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
375 					    &op_info->lo_sg_list[i],
376 					    qp->swqe_polarity);
377 		byte_off += 16;
378 	}
379 
380 	/* if not an odd number set valid bit in next fragment */
381 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
382 	    frag_cnt) {
383 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
384 					    qp->swqe_polarity);
385 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
386 			++addl_frag_cnt;
387 	}
388 
389 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
390 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
391 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
392 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
393 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
394 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
395 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
396 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
397 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
398 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
399 
400 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
401 
402 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
403 	if (info->push_wqe)
404 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
405 	else if (post_sq)
406 		irdma_uk_qp_post_wr(qp);
407 
408 	return 0;
409 }
410 
411 /**
412  * irdma_uk_rdma_read - rdma read command
413  * @qp: hw qp ptr
414  * @info: post sq information
415  * @inv_stag: flag for inv_stag
416  * @post_sq: flag to post sq
417  */
418 int
419 irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
420 		   bool inv_stag, bool post_sq)
421 {
422 	struct irdma_rdma_read *op_info;
423 	int ret_code;
424 	u32 i, byte_off, total_size = 0;
425 	bool local_fence = false;
426 	bool ord_fence = false;
427 	u32 addl_frag_cnt;
428 	__le64 *wqe;
429 	u32 wqe_idx;
430 	u16 quanta;
431 	u64 hdr;
432 
433 	info->push_wqe = qp->push_db ? true : false;
434 
435 	op_info = &info->op.rdma_read;
436 	if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
437 		return -EINVAL;
438 
439 	for (i = 0; i < op_info->num_lo_sges; i++)
440 		total_size += op_info->lo_sg_list[i].len;
441 
442 	ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
443 	if (ret_code)
444 		return ret_code;
445 
446 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
447 	if (!wqe)
448 		return -ENOSPC;
449 
450 	if (qp->rd_fence_rate && (qp->ord_cnt++ == qp->rd_fence_rate)) {
451 		ord_fence = true;
452 		qp->ord_cnt = 0;
453 	}
454 
455 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
456 	addl_frag_cnt = op_info->num_lo_sges > 1 ?
457 	    (op_info->num_lo_sges - 1) : 0;
458 	local_fence |= info->local_fence;
459 
460 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, op_info->lo_sg_list,
461 				    qp->swqe_polarity);
462 	for (i = 1, byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; ++i) {
463 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
464 					    &op_info->lo_sg_list[i],
465 					    qp->swqe_polarity);
466 		byte_off += IRDMA_BYTE_16;
467 	}
468 
469 	/* if not an odd number set valid bit in next fragment */
470 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
471 	    !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
472 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
473 					    qp->swqe_polarity);
474 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
475 			++addl_frag_cnt;
476 	}
477 	set_64bit_val(wqe, IRDMA_BYTE_16,
478 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
479 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
480 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
481 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
482 	    FIELD_PREP(IRDMAQPSQ_OPCODE,
483 		       (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
484 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
485 	    FIELD_PREP(IRDMAQPSQ_READFENCE,
486 		       info->read_fence || ord_fence ? 1 : 0) |
487 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
488 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
489 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
490 
491 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
492 
493 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
494 	if (info->push_wqe)
495 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
496 	else if (post_sq)
497 		irdma_uk_qp_post_wr(qp);
498 
499 	return 0;
500 }
501 
502 /**
503  * irdma_uk_send - rdma send command
504  * @qp: hw qp ptr
505  * @info: post sq information
506  * @post_sq: flag to post sq
507  */
508 int
509 irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
510 	      bool post_sq)
511 {
512 	__le64 *wqe;
513 	struct irdma_post_send *op_info;
514 	u64 hdr;
515 	u32 i, wqe_idx, total_size = 0, byte_off;
516 	int ret_code;
517 	u32 frag_cnt, addl_frag_cnt;
518 	bool read_fence = false;
519 	u16 quanta;
520 
521 	info->push_wqe = qp->push_db ? true : false;
522 
523 	op_info = &info->op.send;
524 	if (qp->max_sq_frag_cnt < op_info->num_sges)
525 		return -EINVAL;
526 
527 	for (i = 0; i < op_info->num_sges; i++)
528 		total_size += op_info->sg_list[i].len;
529 
530 	if (info->imm_data_valid)
531 		frag_cnt = op_info->num_sges + 1;
532 	else
533 		frag_cnt = op_info->num_sges;
534 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
535 	if (ret_code)
536 		return ret_code;
537 
538 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
539 	if (!wqe)
540 		return -ENOSPC;
541 
542 	read_fence |= info->read_fence;
543 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
544 	if (info->imm_data_valid) {
545 		set_64bit_val(wqe, IRDMA_BYTE_0,
546 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
547 		i = 0;
548 	} else {
549 		qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
550 					    frag_cnt ? op_info->sg_list : NULL,
551 					    qp->swqe_polarity);
552 		i = 1;
553 	}
554 
555 	for (byte_off = IRDMA_BYTE_32; i < op_info->num_sges; i++) {
556 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
557 					    qp->swqe_polarity);
558 		byte_off += IRDMA_BYTE_16;
559 	}
560 
561 	/* if not an odd number set valid bit in next fragment */
562 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
563 	    frag_cnt) {
564 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
565 					    qp->swqe_polarity);
566 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
567 			++addl_frag_cnt;
568 	}
569 
570 	set_64bit_val(wqe, IRDMA_BYTE_16,
571 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
572 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
573 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
574 	    FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
575 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
576 		       (info->imm_data_valid ? 1 : 0)) |
577 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
578 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
579 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
580 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
581 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
582 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
583 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
584 	    FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
585 	    FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
586 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
587 
588 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
589 
590 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
591 	if (info->push_wqe)
592 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
593 	else if (post_sq)
594 		irdma_uk_qp_post_wr(qp);
595 
596 	return 0;
597 }
598 
599 /**
600  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
601  * @wqe: pointer to wqe
602  * @sge_list: table of pointers to inline data
603  * @num_sges: Total inline data length
604  * @polarity: compatibility parameter
605  */
606 static void
607 irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
608 			     u32 num_sges, u8 polarity)
609 {
610 	u32 quanta_bytes_remaining = 16;
611 	u32 i;
612 
613 	for (i = 0; i < num_sges; i++) {
614 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
615 		u32 sge_len = sge_list[i].len;
616 
617 		while (sge_len) {
618 			u32 bytes_copied;
619 
620 			bytes_copied = min(sge_len, quanta_bytes_remaining);
621 			irdma_memcpy(wqe, cur_sge, bytes_copied);
622 			wqe += bytes_copied;
623 			cur_sge += bytes_copied;
624 			quanta_bytes_remaining -= bytes_copied;
625 			sge_len -= bytes_copied;
626 
627 			if (!quanta_bytes_remaining) {
628 				/* Remaining inline bytes reside after hdr */
629 				wqe += 16;
630 				quanta_bytes_remaining = 32;
631 			}
632 		}
633 	}
634 }
635 
636 /**
637  * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
638  * @data_size: data size for inline
639  *
640  * Gets the quanta based on inline and immediate data.
641  */
642 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size) {
643 	return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
644 }
645 
646 /**
647  * irdma_copy_inline_data - Copy inline data to wqe
648  * @wqe: pointer to wqe
649  * @sge_list: table of pointers to inline data
650  * @num_sges: number of SGE's
651  * @polarity: polarity of wqe valid bit
652  */
653 static void
654 irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list,
655 		       u32 num_sges, u8 polarity)
656 {
657 	u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
658 	u32 quanta_bytes_remaining = 8;
659 	u32 i;
660 	bool first_quanta = true;
661 
662 	wqe += 8;
663 
664 	for (i = 0; i < num_sges; i++) {
665 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
666 		u32 sge_len = sge_list[i].len;
667 
668 		while (sge_len) {
669 			u32 bytes_copied;
670 
671 			bytes_copied = min(sge_len, quanta_bytes_remaining);
672 			irdma_memcpy(wqe, cur_sge, bytes_copied);
673 			wqe += bytes_copied;
674 			cur_sge += bytes_copied;
675 			quanta_bytes_remaining -= bytes_copied;
676 			sge_len -= bytes_copied;
677 
678 			if (!quanta_bytes_remaining) {
679 				quanta_bytes_remaining = 31;
680 
681 				/* Remaining inline bytes reside after hdr */
682 				if (first_quanta) {
683 					first_quanta = false;
684 					wqe += 16;
685 				} else {
686 					*wqe = inline_valid;
687 					wqe++;
688 				}
689 			}
690 		}
691 	}
692 	if (!first_quanta && quanta_bytes_remaining < 31)
693 		*(wqe + quanta_bytes_remaining) = inline_valid;
694 }
695 
696 /**
697  * irdma_inline_data_size_to_quanta - based on inline data, quanta
698  * @data_size: data size for inline
699  *
700  * Gets the quanta based on inline and immediate data.
701  */
702 static u16 irdma_inline_data_size_to_quanta(u32 data_size) {
703 	if (data_size <= 8)
704 		return IRDMA_QP_WQE_MIN_QUANTA;
705 	else if (data_size <= 39)
706 		return 2;
707 	else if (data_size <= 70)
708 		return 3;
709 	else if (data_size <= 101)
710 		return 4;
711 	else if (data_size <= 132)
712 		return 5;
713 	else if (data_size <= 163)
714 		return 6;
715 	else if (data_size <= 194)
716 		return 7;
717 	else
718 		return 8;
719 }
720 
721 /**
722  * irdma_uk_inline_rdma_write - inline rdma write operation
723  * @qp: hw qp ptr
724  * @info: post sq information
725  * @post_sq: flag to post sq
726  */
727 int
728 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
729 			   struct irdma_post_sq_info *info, bool post_sq)
730 {
731 	__le64 *wqe;
732 	struct irdma_rdma_write *op_info;
733 	u64 hdr = 0;
734 	u32 wqe_idx;
735 	bool read_fence = false;
736 	u16 quanta;
737 	u32 i, total_size = 0;
738 
739 	info->push_wqe = qp->push_db ? true : false;
740 	op_info = &info->op.rdma_write;
741 
742 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
743 		return -EINVAL;
744 
745 	for (i = 0; i < op_info->num_lo_sges; i++)
746 		total_size += op_info->lo_sg_list[i].len;
747 
748 	if (unlikely(total_size > qp->max_inline_data))
749 		return -EINVAL;
750 
751 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
752 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
753 	if (!wqe)
754 		return -ENOSPC;
755 
756 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
757 	read_fence |= info->read_fence;
758 	set_64bit_val(wqe, IRDMA_BYTE_16,
759 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
760 
761 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
762 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
763 	    FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
764 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
765 	    FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
766 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
767 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
768 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
769 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
770 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
771 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
772 
773 	if (info->imm_data_valid)
774 		set_64bit_val(wqe, IRDMA_BYTE_0,
775 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
776 
777 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
778 					op_info->num_lo_sges, qp->swqe_polarity);
779 
780 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
781 
782 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
783 
784 	if (info->push_wqe)
785 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
786 	else if (post_sq)
787 		irdma_uk_qp_post_wr(qp);
788 
789 	return 0;
790 }
791 
792 /**
793  * irdma_uk_inline_send - inline send operation
794  * @qp: hw qp ptr
795  * @info: post sq information
796  * @post_sq: flag to post sq
797  */
798 int
799 irdma_uk_inline_send(struct irdma_qp_uk *qp,
800 		     struct irdma_post_sq_info *info, bool post_sq)
801 {
802 	__le64 *wqe;
803 	struct irdma_post_send *op_info;
804 	u64 hdr;
805 	u32 wqe_idx;
806 	bool read_fence = false;
807 	u16 quanta;
808 	u32 i, total_size = 0;
809 
810 	info->push_wqe = qp->push_db ? true : false;
811 	op_info = &info->op.send;
812 
813 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
814 		return -EINVAL;
815 
816 	for (i = 0; i < op_info->num_sges; i++)
817 		total_size += op_info->sg_list[i].len;
818 
819 	if (unlikely(total_size > qp->max_inline_data))
820 		return -EINVAL;
821 
822 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
823 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
824 	if (!wqe)
825 		return -ENOSPC;
826 
827 	set_64bit_val(wqe, IRDMA_BYTE_16,
828 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
829 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
830 
831 	read_fence |= info->read_fence;
832 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
833 	    FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
834 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
835 	    FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
836 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
837 		       (info->imm_data_valid ? 1 : 0)) |
838 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
839 	    FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
840 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
841 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
842 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
843 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
844 	    FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
845 	    FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
846 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
847 
848 	if (info->imm_data_valid)
849 		set_64bit_val(wqe, IRDMA_BYTE_0,
850 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
851 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
852 					op_info->num_sges, qp->swqe_polarity);
853 
854 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
855 
856 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
857 
858 	if (info->push_wqe)
859 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
860 	else if (post_sq)
861 		irdma_uk_qp_post_wr(qp);
862 
863 	return 0;
864 }
865 
866 /**
867  * irdma_uk_stag_local_invalidate - stag invalidate operation
868  * @qp: hw qp ptr
869  * @info: post sq information
870  * @post_sq: flag to post sq
871  */
872 int
873 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
874 			       struct irdma_post_sq_info *info,
875 			       bool post_sq)
876 {
877 	__le64 *wqe;
878 	struct irdma_inv_local_stag *op_info;
879 	u64 hdr;
880 	u32 wqe_idx;
881 	bool local_fence = false;
882 	struct irdma_sge sge = {0};
883 	u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
884 
885 	info->push_wqe = qp->push_db ? true : false;
886 	op_info = &info->op.inv_local_stag;
887 	local_fence = info->local_fence;
888 
889 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
890 	if (!wqe)
891 		return -ENOSPC;
892 
893 	sge.stag = op_info->target_stag;
894 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
895 
896 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
897 
898 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
899 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
900 	    FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
901 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
902 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
903 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
904 
905 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
906 
907 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
908 
909 	if (info->push_wqe)
910 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
911 	else if (post_sq)
912 		irdma_uk_qp_post_wr(qp);
913 
914 	return 0;
915 }
916 
917 /**
918  * irdma_uk_post_receive - post receive wqe
919  * @qp: hw qp ptr
920  * @info: post rq information
921  */
922 int
923 irdma_uk_post_receive(struct irdma_qp_uk *qp,
924 		      struct irdma_post_rq_info *info)
925 {
926 	u32 wqe_idx, i, byte_off;
927 	u32 addl_frag_cnt;
928 	__le64 *wqe;
929 	u64 hdr;
930 
931 	if (qp->max_rq_frag_cnt < info->num_sges)
932 		return -EINVAL;
933 
934 	wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
935 	if (!wqe)
936 		return -ENOSPC;
937 
938 	qp->rq_wrid_array[wqe_idx] = info->wr_id;
939 	addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
940 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, info->sg_list,
941 				    qp->rwqe_polarity);
942 
943 	for (i = 1, byte_off = IRDMA_BYTE_32; i < info->num_sges; i++) {
944 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
945 					    qp->rwqe_polarity);
946 		byte_off += 16;
947 	}
948 
949 	/* if not an odd number set valid bit in next fragment */
950 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
951 	    info->num_sges) {
952 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
953 					    qp->rwqe_polarity);
954 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
955 			++addl_frag_cnt;
956 	}
957 
958 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
959 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
960 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
961 
962 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
963 
964 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
965 
966 	return 0;
967 }
968 
969 /**
970  * irdma_uk_cq_resize - reset the cq buffer info
971  * @cq: cq to resize
972  * @cq_base: new cq buffer addr
973  * @cq_size: number of cqes
974  */
975 void
976 irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
977 {
978 	cq->cq_base = cq_base;
979 	cq->cq_size = cq_size;
980 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
981 	cq->polarity = 1;
982 }
983 
984 /**
985  * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
986  * @cq: cq to resize
987  * @cq_cnt: the count of the resized cq buffers
988  */
989 void
990 irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
991 {
992 	u64 temp_val;
993 	u16 sw_cq_sel;
994 	u8 arm_next_se;
995 	u8 arm_next;
996 	u8 arm_seq_num;
997 
998 	get_64bit_val(cq->shadow_area, 32, &temp_val);
999 
1000 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1001 	sw_cq_sel += cq_cnt;
1002 
1003 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1004 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1005 	arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1006 
1007 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1008 	    FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1009 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1010 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1011 
1012 	set_64bit_val(cq->shadow_area, 32, temp_val);
1013 }
1014 
1015 /**
1016  * irdma_uk_cq_request_notification - cq notification request (door bell)
1017  * @cq: hw cq
1018  * @cq_notify: notification type
1019  */
1020 void
1021 irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1022 				 enum irdma_cmpl_notify cq_notify)
1023 {
1024 	u64 temp_val;
1025 	u16 sw_cq_sel;
1026 	u8 arm_next_se = 0;
1027 	u8 arm_next = 0;
1028 	u8 arm_seq_num;
1029 
1030 	get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val);
1031 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1032 	arm_seq_num++;
1033 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1034 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1035 	arm_next_se |= 1;
1036 	if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1037 		arm_next = 1;
1038 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1039 	    FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1040 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1041 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1042 
1043 	set_64bit_val(cq->shadow_area, IRDMA_BYTE_32, temp_val);
1044 
1045 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
1046 
1047 	db_wr32(cq->cq_id, cq->cqe_alloc_db);
1048 }
1049 
1050 static int
1051 irdma_check_rq_cqe(struct irdma_qp_uk *qp, u32 *array_idx)
1052 {
1053 	u32 exp_idx = (qp->last_rx_cmpl_idx + 1) % qp->rq_size;
1054 
1055 	if (*array_idx != exp_idx) {
1056 
1057 		*array_idx = exp_idx;
1058 		qp->last_rx_cmpl_idx = exp_idx;
1059 
1060 		return -1;
1061 	}
1062 
1063 	qp->last_rx_cmpl_idx = *array_idx;
1064 
1065 	return 0;
1066 }
1067 
1068 /**
1069  * irdma_skip_duplicate_flush_cmpl - check last cmpl and update wqe if needed
1070  *
1071  * @ring: sq/rq ring
1072  * @flush_seen: information if flush for specific ring was already seen
1073  * @comp_status: completion status
1074  * @wqe_idx: new value of WQE index returned if there is more work on ring
1075  */
1076 static inline int
1077 irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring, u8 flush_seen,
1078 				enum irdma_cmpl_status comp_status,
1079 				u32 *wqe_idx)
1080 {
1081 	if (flush_seen) {
1082 		if (IRDMA_RING_MORE_WORK(ring))
1083 			*wqe_idx = ring.tail;
1084 		else
1085 			return -ENOENT;
1086 	}
1087 
1088 	return 0;
1089 }
1090 
1091 /**
1092  * irdma_detect_unsignaled_cmpls - check if unsignaled cmpl is to be reported
1093  * @cq: hw cq
1094  * @qp: hw qp
1095  * @info: cq poll information collected
1096  * @wge_idx: index of the WR in SQ ring
1097  */
1098 static int
1099 irdma_detect_unsignaled_cmpls(struct irdma_cq_uk *cq,
1100 			      struct irdma_qp_uk *qp,
1101 			      struct irdma_cq_poll_info *info,
1102 			      u32 wqe_idx)
1103 {
1104 	u64 qword0, qword1, qword2, qword3;
1105 	__le64 *cqe, *wqe;
1106 	int i;
1107 	u32 widx;
1108 
1109 	if (qp->sq_wrtrk_array[wqe_idx].signaled == 0) {
1110 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1111 		irdma_pr_err("%p %d %d\n", cqe, cq->cq_ring.head, wqe_idx);
1112 		for (i = -10; i <= 10; i++) {
1113 			IRDMA_GET_CQ_ELEM_AT_OFFSET(cq, i + cq->cq_ring.size, cqe);
1114 			get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
1115 			get_64bit_val(cqe, IRDMA_BYTE_8, &qword1);
1116 			get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
1117 			get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1118 			widx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1119 			irdma_pr_err("%d %04x %p %016lx %016lx %016lx %016lx ",
1120 				     i, widx, cqe, qword0, qword1, qword2, qword3);
1121 			if ((u8)FIELD_GET(IRDMA_CQ_SQ, qword3)) {
1122 				irdma_pr_err("%lx %x %x %x ",
1123 					     qp->sq_wrtrk_array[widx].wrid, qp->sq_wrtrk_array[widx].wr_len,
1124 					     qp->sq_wrtrk_array[widx].quanta, qp->sq_wrtrk_array[widx].signaled);
1125 				wqe = qp->sq_base[widx].elem;
1126 				get_64bit_val(wqe, IRDMA_BYTE_0, &qword0);
1127 				get_64bit_val(wqe, IRDMA_BYTE_8, &qword1);
1128 				get_64bit_val(wqe, IRDMA_BYTE_16, &qword2);
1129 				get_64bit_val(wqe, IRDMA_BYTE_24, &qword3);
1130 
1131 				irdma_pr_err("%016lx %016lx %016lx %016lx \n",
1132 					     qword0, qword1, qword2, qword3);
1133 			} else {
1134 				irdma_pr_err("\n");
1135 			}
1136 		}
1137 		return -ENOENT;
1138 	}
1139 
1140 	return 0;
1141 }
1142 
1143 /**
1144  * irdma_uk_cq_poll_cmpl - get cq completion info
1145  * @cq: hw cq
1146  * @info: cq poll information returned
1147  */
1148 int
1149 irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
1150 		      struct irdma_cq_poll_info *info)
1151 {
1152 	u64 comp_ctx, qword0, qword2, qword3;
1153 	__le64 *cqe;
1154 	struct irdma_qp_uk *qp;
1155 	struct irdma_ring *pring = NULL;
1156 	u32 wqe_idx;
1157 	int ret_code;
1158 	bool move_cq_head = true;
1159 	u8 polarity;
1160 	bool ext_valid;
1161 	__le64 *ext_cqe;
1162 
1163 	if (cq->avoid_mem_cflct)
1164 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1165 	else
1166 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1167 
1168 	get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1169 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1170 	if (polarity != cq->polarity)
1171 		return -ENOENT;
1172 
1173 	/* Ensure CQE contents are read after valid bit is checked */
1174 	rmb();
1175 
1176 	ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1177 	if (ext_valid) {
1178 		u64 qword6, qword7;
1179 		u32 peek_head;
1180 
1181 		if (cq->avoid_mem_cflct) {
1182 			ext_cqe = (__le64 *) ((u8 *)cqe + 32);
1183 			get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
1184 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1185 		} else {
1186 			peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1187 			ext_cqe = cq->cq_base[peek_head].buf;
1188 			get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
1189 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1190 			if (!peek_head)
1191 				polarity ^= 1;
1192 		}
1193 		if (polarity != cq->polarity)
1194 			return -ENOENT;
1195 
1196 		/* Ensure ext CQE contents are read after ext valid bit is checked */
1197 		rmb();
1198 
1199 		info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1200 		if (info->imm_valid) {
1201 			u64 qword4;
1202 
1203 			get_64bit_val(ext_cqe, IRDMA_BYTE_0, &qword4);
1204 			info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1205 		}
1206 		info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1207 		info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1208 		if (info->ud_smac_valid || info->ud_vlan_valid) {
1209 			get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
1210 			if (info->ud_vlan_valid)
1211 				info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1212 			if (info->ud_smac_valid) {
1213 				info->ud_smac[5] = qword6 & 0xFF;
1214 				info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1215 				info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1216 				info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1217 				info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1218 				info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1219 			}
1220 		}
1221 	} else {
1222 		info->imm_valid = false;
1223 		info->ud_smac_valid = false;
1224 		info->ud_vlan_valid = false;
1225 	}
1226 
1227 	info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1228 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1229 	info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1230 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1231 	get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
1232 	qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
1233 	if (info->error) {
1234 		info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1235 		info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1236 		switch (info->major_err) {
1237 		case IRDMA_FLUSH_MAJOR_ERR:
1238 			/* Set the min error to standard flush error code for remaining cqes */
1239 			if (info->minor_err != FLUSH_GENERAL_ERR) {
1240 				qword3 &= ~IRDMA_CQ_MINERR;
1241 				qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1242 				set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
1243 			}
1244 			info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1245 			break;
1246 		default:
1247 			info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1248 			break;
1249 		}
1250 	} else {
1251 		info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1252 	}
1253 
1254 	get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
1255 	get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
1256 
1257 	info->stat.raw = (u32)FIELD_GET(IRDMACQ_TCPSQN_ROCEPSN_RTT_TS, qword0);
1258 	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1259 	info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1260 
1261 	info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1262 	if (!qp || qp->destroy_pending) {
1263 		ret_code = -EFAULT;
1264 		goto exit;
1265 	}
1266 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1267 	info->qp_handle = (irdma_qp_handle) (irdma_uintptr) qp;
1268 	info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1269 
1270 	if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
1271 		u32 array_idx;
1272 
1273 		ret_code = irdma_skip_duplicate_flush_cmpl(qp->rq_ring,
1274 							   qp->rq_flush_seen,
1275 							   info->comp_status,
1276 							   &wqe_idx);
1277 		if (ret_code != 0)
1278 			goto exit;
1279 
1280 		array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1281 
1282 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1283 		    info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1284 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1285 				ret_code = -ENOENT;
1286 				goto exit;
1287 			}
1288 
1289 			info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1290 			info->signaled = 1;
1291 			array_idx = qp->rq_ring.tail;
1292 		} else {
1293 			info->wr_id = qp->rq_wrid_array[array_idx];
1294 			info->signaled = 1;
1295 			if (irdma_check_rq_cqe(qp, &array_idx)) {
1296 				info->wr_id = qp->rq_wrid_array[array_idx];
1297 				info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1298 				IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1299 				return 0;
1300 			}
1301 		}
1302 
1303 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1304 
1305 		if (qword3 & IRDMACQ_STAG) {
1306 			info->stag_invalid_set = true;
1307 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1308 		} else {
1309 			info->stag_invalid_set = false;
1310 		}
1311 		IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1312 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1313 			qp->rq_flush_seen = true;
1314 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1315 				qp->rq_flush_complete = true;
1316 			else
1317 				move_cq_head = false;
1318 		}
1319 		pring = &qp->rq_ring;
1320 	} else {		/* q_type is IRDMA_CQE_QTYPE_SQ */
1321 		if (qp->first_sq_wq) {
1322 			if (wqe_idx + 1 >= qp->conn_wqes)
1323 				qp->first_sq_wq = false;
1324 
1325 			if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1326 				IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1327 				IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1328 				set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
1329 					      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1330 				memset(info, 0,
1331 				       sizeof(struct irdma_cq_poll_info));
1332 				return irdma_uk_cq_poll_cmpl(cq, info);
1333 			}
1334 		}
1335 		/* cease posting push mode on push drop */
1336 		if (info->push_dropped) {
1337 			qp->push_mode = false;
1338 			qp->push_dropped = true;
1339 		}
1340 		ret_code = irdma_skip_duplicate_flush_cmpl(qp->sq_ring,
1341 							   qp->sq_flush_seen,
1342 							   info->comp_status,
1343 							   &wqe_idx);
1344 		if (ret_code != 0)
1345 			goto exit;
1346 		if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1347 			info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1348 			info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
1349 			if (!info->comp_status)
1350 				info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1351 			ret_code = irdma_detect_unsignaled_cmpls(cq, qp, info, wqe_idx);
1352 			if (ret_code != 0)
1353 				goto exit;
1354 			info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1355 			IRDMA_RING_SET_TAIL(qp->sq_ring,
1356 					    wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1357 		} else {
1358 			unsigned long flags;
1359 
1360 			spin_lock_irqsave(qp->lock, flags);
1361 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1362 				spin_unlock_irqrestore(qp->lock, flags);
1363 				ret_code = -ENOENT;
1364 				goto exit;
1365 			}
1366 
1367 			do {
1368 				__le64 *sw_wqe;
1369 				u64 wqe_qword;
1370 				u32 tail;
1371 
1372 				tail = qp->sq_ring.tail;
1373 				sw_wqe = qp->sq_base[tail].elem;
1374 				get_64bit_val(sw_wqe, IRDMA_BYTE_24,
1375 					      &wqe_qword);
1376 				info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
1377 							      wqe_qword);
1378 				IRDMA_RING_SET_TAIL(qp->sq_ring,
1379 						    tail + qp->sq_wrtrk_array[tail].quanta);
1380 				if (info->op_type != IRDMAQP_OP_NOP) {
1381 					info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1382 					info->signaled = qp->sq_wrtrk_array[tail].signaled;
1383 					info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1384 					break;
1385 				}
1386 			} while (1);
1387 
1388 			if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
1389 			    info->minor_err == FLUSH_PROT_ERR)
1390 				info->minor_err = FLUSH_MW_BIND_ERR;
1391 			qp->sq_flush_seen = true;
1392 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1393 				qp->sq_flush_complete = true;
1394 			spin_unlock_irqrestore(qp->lock, flags);
1395 		}
1396 		pring = &qp->sq_ring;
1397 	}
1398 
1399 	ret_code = 0;
1400 
1401 exit:
1402 	if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1403 		if (pring && IRDMA_RING_MORE_WORK(*pring))
1404 			move_cq_head = false;
1405 	}
1406 
1407 	if (move_cq_head) {
1408 		IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1409 		if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1410 			cq->polarity ^= 1;
1411 
1412 		if (ext_valid && !cq->avoid_mem_cflct) {
1413 			IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1414 			if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1415 				cq->polarity ^= 1;
1416 		}
1417 
1418 		IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1419 		if (!cq->avoid_mem_cflct && ext_valid)
1420 			IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1421 		set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
1422 			      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1423 	} else {
1424 		qword3 &= ~IRDMA_CQ_WQEIDX;
1425 		qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1426 		set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
1427 	}
1428 
1429 	return ret_code;
1430 }
1431 
1432 /**
1433  * irdma_round_up_wq - return round up qp wq depth
1434  * @wqdepth: wq depth in quanta to round up
1435  */
1436 static int
1437 irdma_round_up_wq(u32 wqdepth)
1438 {
1439 	int scount = 1;
1440 
1441 	for (wqdepth--; scount <= 16; scount *= 2)
1442 		wqdepth |= wqdepth >> scount;
1443 
1444 	return ++wqdepth;
1445 }
1446 
1447 /**
1448  * irdma_get_wqe_shift - get shift count for maximum wqe size
1449  * @uk_attrs: qp HW attributes
1450  * @sge: Maximum Scatter Gather Elements wqe
1451  * @inline_data: Maximum inline data size
1452  * @shift: Returns the shift needed based on sge
1453  *
1454  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1455  * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1456  * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1457  * size of 64 bytes).
1458  * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1459  * size of 256 bytes).
1460  */
1461 void
1462 irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1463 		    u32 inline_data, u8 *shift)
1464 {
1465 	*shift = 0;
1466 	if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1467 		if (sge > 1 || inline_data > 8) {
1468 			if (sge < 4 && inline_data <= 39)
1469 				*shift = 1;
1470 			else if (sge < 8 && inline_data <= 101)
1471 				*shift = 2;
1472 			else
1473 				*shift = 3;
1474 		}
1475 	} else if (sge > 1 || inline_data > 16) {
1476 		*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1477 	}
1478 }
1479 
1480 /*
1481  * irdma_get_sqdepth - get SQ depth (quanta) @uk_attrs: qp HW attributes @sq_size: SQ size @shift: shift which
1482  * determines size of WQE @sqdepth: depth of SQ
1483  */
1484 int
1485 irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
1486 {
1487 	*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
1488 
1489 	if (*sqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
1490 		*sqdepth = uk_attrs->min_hw_wq_size << shift;
1491 	else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1492 		return -EINVAL;
1493 
1494 	return 0;
1495 }
1496 
1497 /*
1498  * irdma_get_rqdepth - get RQ depth (quanta) @uk_attrs: qp HW attributes @rq_size: SRQ size @shift: shift which
1499  * determines size of WQE @rqdepth: depth of RQ/SRQ
1500  */
1501 int
1502 irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
1503 {
1504 	*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
1505 
1506 	if (*rqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
1507 		*rqdepth = uk_attrs->min_hw_wq_size << shift;
1508 	else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1509 		return -EINVAL;
1510 
1511 	return 0;
1512 }
1513 
1514 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1515 	.iw_copy_inline_data = irdma_copy_inline_data,
1516 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1517 	.iw_set_fragment = irdma_set_fragment,
1518 };
1519 
1520 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1521 	.iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1522 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1523 	.iw_set_fragment = irdma_set_fragment_gen_1,
1524 };
1525 
1526 /**
1527  * irdma_setup_connection_wqes - setup WQEs necessary to complete
1528  * connection.
1529  * @qp: hw qp (user and kernel)
1530  * @info: qp initialization info
1531  */
1532 static void
1533 irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1534 			    struct irdma_qp_uk_init_info *info)
1535 {
1536 	u16 move_cnt = 1;
1537 
1538 	if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
1539 		move_cnt = 3;
1540 
1541 	qp->conn_wqes = move_cnt;
1542 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1543 	IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1544 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1545 }
1546 
1547 /**
1548  * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
1549  * @ukinfo: qp initialization info
1550  * @sq_shift: Returns shift of SQ
1551  * @rq_shift: Returns shift of RQ
1552  */
1553 void
1554 irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
1555 		       u8 *rq_shift)
1556 {
1557 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
1558 
1559 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1560 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1561 			    ukinfo->max_sq_frag_cnt,
1562 			    ukinfo->max_inline_data, sq_shift);
1563 
1564 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1565 			    rq_shift);
1566 
1567 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1568 		if (ukinfo->abi_ver > 4)
1569 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1570 	}
1571 }
1572 
1573 /**
1574  * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
1575  * @ukinfo: qp initialization info
1576  * @sq_depth: Returns depth of SQ
1577  * @sq_shift: Returns shift of SQ
1578  */
1579 int
1580 irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
1581 			     u32 *sq_depth, u8 *sq_shift)
1582 {
1583 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
1584 	int status;
1585 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1586 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1587 			    ukinfo->max_sq_frag_cnt,
1588 			    ukinfo->max_inline_data, sq_shift);
1589 	status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
1590 				   *sq_shift, sq_depth);
1591 
1592 	return status;
1593 }
1594 
1595 /**
1596  * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
1597  * @ukinfo: qp initialization info
1598  * @rq_depth: Returns depth of RQ
1599  * @rq_shift: Returns shift of RQ
1600  */
1601 int
1602 irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
1603 			     u32 *rq_depth, u8 *rq_shift)
1604 {
1605 	int status;
1606 
1607 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1608 			    rq_shift);
1609 
1610 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1611 		if (ukinfo->abi_ver > 4)
1612 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1613 	}
1614 
1615 	status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
1616 				   *rq_shift, rq_depth);
1617 
1618 	return status;
1619 }
1620 
1621 /**
1622  * irdma_uk_qp_init - initialize shared qp
1623  * @qp: hw qp (user and kernel)
1624  * @info: qp initialization info
1625  *
1626  * initializes the vars used in both user and kernel mode.
1627  * size of the wqe depends on numbers of max. fragements
1628  * allowed. Then size of wqe * the number of wqes should be the
1629  * amount of memory allocated for sq and rq.
1630  */
1631 int
1632 irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1633 {
1634 	int ret_code = 0;
1635 	u32 sq_ring_size;
1636 
1637 	qp->uk_attrs = info->uk_attrs;
1638 	if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1639 	    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1640 		return -EINVAL;
1641 
1642 	qp->qp_caps = info->qp_caps;
1643 	qp->sq_base = info->sq;
1644 	qp->rq_base = info->rq;
1645 	qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1646 	qp->shadow_area = info->shadow_area;
1647 	qp->sq_wrtrk_array = info->sq_wrtrk_array;
1648 
1649 	qp->rq_wrid_array = info->rq_wrid_array;
1650 	qp->wqe_alloc_db = info->wqe_alloc_db;
1651 	qp->last_rx_cmpl_idx = 0xffffffff;
1652 	qp->rd_fence_rate = info->rd_fence_rate;
1653 	qp->qp_id = info->qp_id;
1654 	qp->sq_size = info->sq_size;
1655 	qp->push_mode = false;
1656 	qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1657 	sq_ring_size = qp->sq_size << info->sq_shift;
1658 	IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1659 	IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1660 	if (info->first_sq_wq) {
1661 		irdma_setup_connection_wqes(qp, info);
1662 		qp->swqe_polarity = 1;
1663 		qp->first_sq_wq = true;
1664 	} else {
1665 		qp->swqe_polarity = 0;
1666 	}
1667 	qp->swqe_polarity_deferred = 1;
1668 	qp->rwqe_polarity = 0;
1669 	qp->rq_size = info->rq_size;
1670 	qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1671 	qp->max_inline_data = info->max_inline_data;
1672 	qp->rq_wqe_size = info->rq_shift;
1673 	IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1674 	qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
1675 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1676 		qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1677 	else
1678 		qp->wqe_ops = iw_wqe_uk_ops;
1679 	return ret_code;
1680 }
1681 
1682 /**
1683  * irdma_uk_cq_init - initialize shared cq (user and kernel)
1684  * @cq: hw cq
1685  * @info: hw cq initialization info
1686  */
1687 int
1688 irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info)
1689 {
1690 	cq->cq_base = info->cq_base;
1691 	cq->cq_id = info->cq_id;
1692 	cq->cq_size = info->cq_size;
1693 	cq->cqe_alloc_db = info->cqe_alloc_db;
1694 	cq->cq_ack_db = info->cq_ack_db;
1695 	cq->shadow_area = info->shadow_area;
1696 	cq->avoid_mem_cflct = info->avoid_mem_cflct;
1697 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1698 	cq->polarity = 1;
1699 
1700 	return 0;
1701 }
1702 
1703 /**
1704  * irdma_uk_clean_cq - clean cq entries
1705  * @q: completion context
1706  * @cq: cq to clean
1707  */
1708 int
1709 irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1710 {
1711 	__le64 *cqe;
1712 	u64 qword3, comp_ctx;
1713 	u32 cq_head;
1714 	u8 polarity, temp;
1715 
1716 	cq_head = cq->cq_ring.head;
1717 	temp = cq->polarity;
1718 	do {
1719 		if (cq->avoid_mem_cflct)
1720 			cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1721 		else
1722 			cqe = cq->cq_base[cq_head].buf;
1723 		get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1724 		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1725 
1726 		if (polarity != temp)
1727 			break;
1728 
1729 		/* Ensure CQE contents are read after valid bit is checked */
1730 		rmb();
1731 
1732 		get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
1733 		if ((void *)(irdma_uintptr) comp_ctx == q)
1734 			set_64bit_val(cqe, IRDMA_BYTE_8, 0);
1735 
1736 		cq_head = (cq_head + 1) % cq->cq_ring.size;
1737 		if (!cq_head)
1738 			temp ^= 1;
1739 	} while (true);
1740 	return 0;
1741 }
1742 
1743 /**
1744  * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1745  * @frag_cnt: number of fragments
1746  * @quanta: quanta for frag_cnt
1747  */
1748 int
1749 irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1750 {
1751 	switch (frag_cnt) {
1752 	case 0:
1753 	case 1:
1754 		*quanta = IRDMA_QP_WQE_MIN_QUANTA;
1755 		break;
1756 	case 2:
1757 	case 3:
1758 		*quanta = 2;
1759 		break;
1760 	case 4:
1761 	case 5:
1762 		*quanta = 3;
1763 		break;
1764 	case 6:
1765 	case 7:
1766 		*quanta = 4;
1767 		break;
1768 	case 8:
1769 	case 9:
1770 		*quanta = 5;
1771 		break;
1772 	case 10:
1773 	case 11:
1774 		*quanta = 6;
1775 		break;
1776 	case 12:
1777 	case 13:
1778 		*quanta = 7;
1779 		break;
1780 	case 14:
1781 	case 15:		/* when immediate data is present */
1782 		*quanta = 8;
1783 		break;
1784 	default:
1785 		return -EINVAL;
1786 	}
1787 
1788 	return 0;
1789 }
1790 
1791 /**
1792  * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1793  * @frag_cnt: number of fragments
1794  * @wqe_size: size in bytes given frag_cnt
1795  */
1796 int
1797 irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1798 {
1799 	switch (frag_cnt) {
1800 	case 0:
1801 	case 1:
1802 		*wqe_size = 32;
1803 		break;
1804 	case 2:
1805 	case 3:
1806 		*wqe_size = 64;
1807 		break;
1808 	case 4:
1809 	case 5:
1810 	case 6:
1811 	case 7:
1812 		*wqe_size = 128;
1813 		break;
1814 	case 8:
1815 	case 9:
1816 	case 10:
1817 	case 11:
1818 	case 12:
1819 	case 13:
1820 	case 14:
1821 		*wqe_size = 256;
1822 		break;
1823 	default:
1824 		return -EINVAL;
1825 	}
1826 
1827 	return 0;
1828 }
1829