xref: /freebsd/sys/dev/irdma/irdma_type.h (revision 2a58b312)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #ifndef IRDMA_TYPE_H
37 #define IRDMA_TYPE_H
38 
39 #include "osdep.h"
40 
41 #include "irdma.h"
42 #include "irdma_user.h"
43 #include "irdma_hmc.h"
44 #include "irdma_uda.h"
45 #include "irdma_ws.h"
46 enum irdma_debug_flag {
47 	IRDMA_DEBUG_NONE	= 0x00000000,
48 	IRDMA_DEBUG_ERR		= 0x00000001,
49 	IRDMA_DEBUG_INIT	= 0x00000002,
50 	IRDMA_DEBUG_DEV		= 0x00000004,
51 	IRDMA_DEBUG_CM		= 0x00000008,
52 	IRDMA_DEBUG_VERBS	= 0x00000010,
53 	IRDMA_DEBUG_PUDA	= 0x00000020,
54 	IRDMA_DEBUG_ILQ		= 0x00000040,
55 	IRDMA_DEBUG_IEQ		= 0x00000080,
56 	IRDMA_DEBUG_QP		= 0x00000100,
57 	IRDMA_DEBUG_CQ		= 0x00000200,
58 	IRDMA_DEBUG_MR		= 0x00000400,
59 	IRDMA_DEBUG_PBLE	= 0x00000800,
60 	IRDMA_DEBUG_WQE		= 0x00001000,
61 	IRDMA_DEBUG_AEQ		= 0x00002000,
62 	IRDMA_DEBUG_CQP		= 0x00004000,
63 	IRDMA_DEBUG_HMC		= 0x00008000,
64 	IRDMA_DEBUG_USER	= 0x00010000,
65 	IRDMA_DEBUG_VIRT	= 0x00020000,
66 	IRDMA_DEBUG_DCB		= 0x00040000,
67 	IRDMA_DEBUG_CQE		= 0x00800000,
68 	IRDMA_DEBUG_CLNT	= 0x01000000,
69 	IRDMA_DEBUG_WS		= 0x02000000,
70 	IRDMA_DEBUG_STATS	= 0x04000000,
71 	IRDMA_DEBUG_ALL		= 0xFFFFFFFF,
72 };
73 
74 enum irdma_page_size {
75 	IRDMA_PAGE_SIZE_4K = 0,
76 	IRDMA_PAGE_SIZE_2M,
77 	IRDMA_PAGE_SIZE_1G,
78 };
79 
80 enum irdma_hdrct_flags {
81 	DDP_LEN_FLAG  = 0x80,
82 	DDP_HDR_FLAG  = 0x40,
83 	RDMA_HDR_FLAG = 0x20,
84 };
85 
86 enum irdma_term_layers {
87 	LAYER_RDMA = 0,
88 	LAYER_DDP  = 1,
89 	LAYER_MPA  = 2,
90 };
91 
92 enum irdma_term_error_types {
93 	RDMAP_REMOTE_PROT = 1,
94 	RDMAP_REMOTE_OP   = 2,
95 	DDP_CATASTROPHIC  = 0,
96 	DDP_TAGGED_BUF    = 1,
97 	DDP_UNTAGGED_BUF  = 2,
98 	DDP_LLP		  = 3,
99 };
100 
101 enum irdma_term_rdma_errors {
102 	RDMAP_INV_STAG		  = 0x00,
103 	RDMAP_INV_BOUNDS	  = 0x01,
104 	RDMAP_ACCESS		  = 0x02,
105 	RDMAP_UNASSOC_STAG	  = 0x03,
106 	RDMAP_TO_WRAP		  = 0x04,
107 	RDMAP_INV_RDMAP_VER       = 0x05,
108 	RDMAP_UNEXPECTED_OP       = 0x06,
109 	RDMAP_CATASTROPHIC_LOCAL  = 0x07,
110 	RDMAP_CATASTROPHIC_GLOBAL = 0x08,
111 	RDMAP_CANT_INV_STAG       = 0x09,
112 	RDMAP_UNSPECIFIED	  = 0xff,
113 };
114 
115 enum irdma_term_ddp_errors {
116 	DDP_CATASTROPHIC_LOCAL      = 0x00,
117 	DDP_TAGGED_INV_STAG	    = 0x00,
118 	DDP_TAGGED_BOUNDS	    = 0x01,
119 	DDP_TAGGED_UNASSOC_STAG     = 0x02,
120 	DDP_TAGGED_TO_WRAP	    = 0x03,
121 	DDP_TAGGED_INV_DDP_VER      = 0x04,
122 	DDP_UNTAGGED_INV_QN	    = 0x01,
123 	DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
124 	DDP_UNTAGGED_INV_MSN_RANGE  = 0x03,
125 	DDP_UNTAGGED_INV_MO	    = 0x04,
126 	DDP_UNTAGGED_INV_TOO_LONG   = 0x05,
127 	DDP_UNTAGGED_INV_DDP_VER    = 0x06,
128 };
129 
130 enum irdma_term_mpa_errors {
131 	MPA_CLOSED  = 0x01,
132 	MPA_CRC     = 0x02,
133 	MPA_MARKER  = 0x03,
134 	MPA_REQ_RSP = 0x04,
135 };
136 
137 enum irdma_hw_stats_index {
138 	/* gen1 - 32-bit */
139 	IRDMA_HW_STAT_INDEX_IP4RXDISCARD	= 0,
140 	IRDMA_HW_STAT_INDEX_IP4RXTRUNC		= 1,
141 	IRDMA_HW_STAT_INDEX_IP4TXNOROUTE	= 2,
142 	IRDMA_HW_STAT_INDEX_IP6RXDISCARD	= 3,
143 	IRDMA_HW_STAT_INDEX_IP6RXTRUNC		= 4,
144 	IRDMA_HW_STAT_INDEX_IP6TXNOROUTE	= 5,
145 	IRDMA_HW_STAT_INDEX_TCPRTXSEG		= 6,
146 	IRDMA_HW_STAT_INDEX_TCPRXOPTERR		= 7,
147 	IRDMA_HW_STAT_INDEX_TCPRXPROTOERR	= 8,
148 	IRDMA_HW_STAT_INDEX_RXVLANERR		= 9,
149 	/* gen1 - 64-bit */
150 	IRDMA_HW_STAT_INDEX_IP4RXOCTS		= 10,
151 	IRDMA_HW_STAT_INDEX_IP4RXPKTS		= 11,
152 	IRDMA_HW_STAT_INDEX_IP4RXFRAGS		= 12,
153 	IRDMA_HW_STAT_INDEX_IP4RXMCPKTS		= 13,
154 	IRDMA_HW_STAT_INDEX_IP4TXOCTS		= 14,
155 	IRDMA_HW_STAT_INDEX_IP4TXPKTS		= 15,
156 	IRDMA_HW_STAT_INDEX_IP4TXFRAGS		= 16,
157 	IRDMA_HW_STAT_INDEX_IP4TXMCPKTS		= 17,
158 	IRDMA_HW_STAT_INDEX_IP6RXOCTS		= 18,
159 	IRDMA_HW_STAT_INDEX_IP6RXPKTS		= 19,
160 	IRDMA_HW_STAT_INDEX_IP6RXFRAGS		= 20,
161 	IRDMA_HW_STAT_INDEX_IP6RXMCPKTS		= 21,
162 	IRDMA_HW_STAT_INDEX_IP6TXOCTS		= 22,
163 	IRDMA_HW_STAT_INDEX_IP6TXPKTS		= 23,
164 	IRDMA_HW_STAT_INDEX_IP6TXFRAGS		= 24,
165 	IRDMA_HW_STAT_INDEX_IP6TXMCPKTS		= 25,
166 	IRDMA_HW_STAT_INDEX_TCPRXSEGS		= 26,
167 	IRDMA_HW_STAT_INDEX_TCPTXSEG		= 27,
168 	IRDMA_HW_STAT_INDEX_RDMARXRDS		= 28,
169 	IRDMA_HW_STAT_INDEX_RDMARXSNDS		= 29,
170 	IRDMA_HW_STAT_INDEX_RDMARXWRS		= 30,
171 	IRDMA_HW_STAT_INDEX_RDMATXRDS		= 31,
172 	IRDMA_HW_STAT_INDEX_RDMATXSNDS		= 32,
173 	IRDMA_HW_STAT_INDEX_RDMATXWRS		= 33,
174 	IRDMA_HW_STAT_INDEX_RDMAVBND		= 34,
175 	IRDMA_HW_STAT_INDEX_RDMAVINV		= 35,
176 	IRDMA_HW_STAT_INDEX_IP4RXMCOCTS		= 36,
177 	IRDMA_HW_STAT_INDEX_IP4TXMCOCTS		= 37,
178 	IRDMA_HW_STAT_INDEX_IP6RXMCOCTS		= 38,
179 	IRDMA_HW_STAT_INDEX_IP6TXMCOCTS		= 39,
180 	IRDMA_HW_STAT_INDEX_UDPRXPKTS		= 40,
181 	IRDMA_HW_STAT_INDEX_UDPTXPKTS		= 41,
182 	IRDMA_HW_STAT_INDEX_MAX_GEN_1		= 42, /* Must be same value as next entry */
183 
184 	/* gen2 - 64-bit */
185 	IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS	= 42,
186 
187 	/* gen2 - 32-bit */
188 	IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED	= 43,
189 	IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED	= 44,
190 	IRDMA_HW_STAT_INDEX_TXNPCNPSENT		= 45,
191 	IRDMA_HW_STAT_INDEX_MAX_GEN_2		= 46,
192 };
193 
194 #define IRDMA_MIN_FEATURES 2
195 
196 enum irdma_feature_type {
197 	IRDMA_FEATURE_FW_INFO = 0,
198 	IRDMA_HW_VERSION_INFO = 1,
199 	IRDMA_QSETS_MAX       = 26,
200 	IRDMA_MAX_FEATURES, /* Must be last entry */
201 };
202 
203 enum irdma_sched_prio_type {
204 	IRDMA_PRIO_WEIGHTED_RR     = 1,
205 	IRDMA_PRIO_STRICT	   = 2,
206 	IRDMA_PRIO_WEIGHTED_STRICT = 3,
207 };
208 
209 enum irdma_vm_vf_type {
210 	IRDMA_VF_TYPE = 0,
211 	IRDMA_VM_TYPE,
212 	IRDMA_PF_TYPE,
213 };
214 
215 enum irdma_cqp_hmc_profile {
216 	IRDMA_HMC_PROFILE_DEFAULT  = 1,
217 	IRDMA_HMC_PROFILE_FAVOR_VF = 2,
218 	IRDMA_HMC_PROFILE_EQUAL    = 3,
219 };
220 
221 enum irdma_quad_entry_type {
222 	IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
223 	IRDMA_QHASH_TYPE_TCP_SYN,
224 	IRDMA_QHASH_TYPE_UDP_UNICAST,
225 	IRDMA_QHASH_TYPE_UDP_MCAST,
226 	IRDMA_QHASH_TYPE_ROCE_MCAST,
227 	IRDMA_QHASH_TYPE_ROCEV2_HW,
228 };
229 
230 enum irdma_quad_hash_manage_type {
231 	IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
232 	IRDMA_QHASH_MANAGE_TYPE_ADD,
233 	IRDMA_QHASH_MANAGE_TYPE_MODIFY,
234 };
235 
236 enum irdma_syn_rst_handling {
237 	IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
238 	IRDMA_SYN_RST_HANDLING_HW_TCP,
239 	IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
240 	IRDMA_SYN_RST_HANDLING_FW_TCP,
241 };
242 
243 enum irdma_queue_type {
244 	IRDMA_QUEUE_TYPE_SQ_RQ = 0,
245 	IRDMA_QUEUE_TYPE_CQP,
246 };
247 
248 struct irdma_sc_dev;
249 struct irdma_vsi_pestat;
250 
251 struct irdma_dcqcn_cc_params {
252 	u8 cc_cfg_valid;
253 	u8 min_dec_factor;
254 	u8 min_rate;
255 	u8 dcqcn_f;
256 	u16 rai_factor;
257 	u16 hai_factor;
258 	u16 dcqcn_t;
259 	u32 dcqcn_b;
260 	u32 rreduce_mperiod;
261 };
262 
263 struct irdma_cqp_init_info {
264 	u64 cqp_compl_ctx;
265 	u64 host_ctx_pa;
266 	u64 sq_pa;
267 	struct irdma_sc_dev *dev;
268 	struct irdma_cqp_quanta *sq;
269 	struct irdma_dcqcn_cc_params dcqcn_params;
270 	__le64 *host_ctx;
271 	u64 *scratch_array;
272 	u32 sq_size;
273 	u16 hw_maj_ver;
274 	u16 hw_min_ver;
275 	u8 struct_ver;
276 	u8 hmc_profile;
277 	u8 ena_vf_count;
278 	u8 ceqs_per_vf;
279 	bool en_datacenter_tcp:1;
280 	bool disable_packed:1;
281 	bool rocev2_rto_policy:1;
282 	bool en_rem_endpoint_trk:1;
283 	enum irdma_protocol_used protocol_used;
284 };
285 
286 struct irdma_terminate_hdr {
287 	u8 layer_etype;
288 	u8 error_code;
289 	u8 hdrct;
290 	u8 rsvd;
291 };
292 
293 struct irdma_cqp_sq_wqe {
294 	__le64 buf[IRDMA_CQP_WQE_SIZE];
295 };
296 
297 struct irdma_sc_aeqe {
298 	__le64 buf[IRDMA_AEQE_SIZE];
299 };
300 
301 struct irdma_ceqe {
302 	__le64 buf[IRDMA_CEQE_SIZE];
303 };
304 
305 struct irdma_cqp_ctx {
306 	__le64 buf[IRDMA_CQP_CTX_SIZE];
307 };
308 
309 struct irdma_cq_shadow_area {
310 	__le64 buf[IRDMA_SHADOW_AREA_SIZE];
311 };
312 
313 struct irdma_dev_hw_stats {
314 	u64 stats_val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
315 };
316 
317 struct irdma_gather_stats {
318 	u64 val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
319 };
320 
321 struct irdma_hw_stat_map {
322 	u16 byteoff;
323 	u8 bitoff;
324 	u64 bitmask;
325 };
326 
327 struct irdma_stats_gather_info {
328 	bool use_hmc_fcn_index:1;
329 	bool use_stats_inst:1;
330 	u16 hmc_fcn_index;
331 	u16 stats_inst_index;
332 	struct irdma_dma_mem stats_buff_mem;
333 	void *gather_stats_va;
334 	void *last_gather_stats_va;
335 };
336 
337 struct irdma_vsi_pestat {
338 	struct irdma_hw *hw;
339 	struct irdma_dev_hw_stats hw_stats;
340 	struct irdma_stats_gather_info gather_info;
341 	struct OS_TIMER stats_timer;
342 	struct irdma_sc_vsi *vsi;
343 	spinlock_t lock; /* rdma stats lock */
344 };
345 
346 struct irdma_hw {
347 	u8 IOMEM *hw_addr;
348 	u8 IOMEM *priv_hw_addr;
349 	void *dev_context;
350 	struct irdma_hmc_info hmc;
351 };
352 
353 struct irdma_pfpdu {
354 	struct list_head rxlist;
355 	u32 rcv_nxt;
356 	u32 fps;
357 	u32 max_fpdu_data;
358 	u32 nextseqnum;
359 	u32 rcv_start_seq;
360 	bool mode:1;
361 	bool mpa_crc_err:1;
362 	u8  marker_len;
363 	u64 total_ieq_bufs;
364 	u64 fpdu_processed;
365 	u64 bad_seq_num;
366 	u64 crc_err;
367 	u64 no_tx_bufs;
368 	u64 tx_err;
369 	u64 out_of_order;
370 	u64 pmode_count;
371 	struct irdma_sc_ah *ah;
372 	struct irdma_puda_buf *ah_buf;
373 	spinlock_t lock; /* fpdu processing lock */
374 	struct irdma_puda_buf *lastrcv_buf;
375 };
376 
377 struct irdma_sc_pd {
378 	struct irdma_sc_dev *dev;
379 	u32 pd_id;
380 	int abi_ver;
381 };
382 
383 struct irdma_cqp_quanta {
384 	__le64 elem[IRDMA_CQP_WQE_SIZE];
385 };
386 
387 struct irdma_sc_cqp {
388 	u32 size;
389 	u64 sq_pa;
390 	u64 host_ctx_pa;
391 	void *back_cqp;
392 	struct irdma_sc_dev *dev;
393 	int (*process_cqp_sds)(struct irdma_sc_dev *dev,
394 			       struct irdma_update_sds_info *info);
395 	struct irdma_dma_mem sdbuf;
396 	struct irdma_ring sq_ring;
397 	struct irdma_cqp_quanta *sq_base;
398 	struct irdma_dcqcn_cc_params dcqcn_params;
399 	__le64 *host_ctx;
400 	u64 *scratch_array;
401 	u32 cqp_id;
402 	u32 sq_size;
403 	u32 hw_sq_size;
404 	u16 hw_maj_ver;
405 	u16 hw_min_ver;
406 	u8 struct_ver;
407 	u8 polarity;
408 	u8 hmc_profile;
409 	u8 ena_vf_count;
410 	u8 timeout_count;
411 	u8 ceqs_per_vf;
412 	bool en_datacenter_tcp:1;
413 	bool disable_packed:1;
414 	bool rocev2_rto_policy:1;
415 	bool en_rem_endpoint_trk:1;
416 	enum irdma_protocol_used protocol_used;
417 };
418 
419 struct irdma_sc_aeq {
420 	u32 size;
421 	u64 aeq_elem_pa;
422 	struct irdma_sc_dev *dev;
423 	struct irdma_sc_aeqe *aeqe_base;
424 	void *pbl_list;
425 	u32 elem_cnt;
426 	struct irdma_ring aeq_ring;
427 	u8 pbl_chunk_size;
428 	u32 first_pm_pbl_idx;
429 	u32 msix_idx;
430 	u8 polarity;
431 	bool virtual_map:1;
432 };
433 
434 struct irdma_sc_ceq {
435 	u32 size;
436 	u64 ceq_elem_pa;
437 	struct irdma_sc_dev *dev;
438 	struct irdma_ceqe *ceqe_base;
439 	void *pbl_list;
440 	u32 ceq_id;
441 	u32 elem_cnt;
442 	struct irdma_ring ceq_ring;
443 	u8 pbl_chunk_size;
444 	u8 tph_val;
445 	u32 first_pm_pbl_idx;
446 	u8 polarity;
447 	struct irdma_sc_vsi *vsi;
448 	struct irdma_sc_cq **reg_cq;
449 	u32 reg_cq_size;
450 	spinlock_t req_cq_lock; /* protect access to reg_cq array */
451 	bool virtual_map:1;
452 	bool tph_en:1;
453 	bool itr_no_expire:1;
454 };
455 
456 struct irdma_sc_cq {
457 	struct irdma_cq_uk cq_uk;
458 	u64 cq_pa;
459 	u64 shadow_area_pa;
460 	struct irdma_sc_dev *dev;
461 	struct irdma_sc_vsi *vsi;
462 	void *pbl_list;
463 	void *back_cq;
464 	u32 ceq_id;
465 	u32 shadow_read_threshold;
466 	u8 pbl_chunk_size;
467 	u8 cq_type;
468 	u8 tph_val;
469 	u32 first_pm_pbl_idx;
470 	bool ceqe_mask:1;
471 	bool virtual_map:1;
472 	bool check_overflow:1;
473 	bool ceq_id_valid:1;
474 	bool tph_en;
475 };
476 
477 struct irdma_sc_qp {
478 	struct irdma_qp_uk qp_uk;
479 	u64 sq_pa;
480 	u64 rq_pa;
481 	u64 hw_host_ctx_pa;
482 	u64 shadow_area_pa;
483 	u64 q2_pa;
484 	struct irdma_sc_dev *dev;
485 	struct irdma_sc_vsi *vsi;
486 	struct irdma_sc_pd *pd;
487 	__le64 *hw_host_ctx;
488 	void *llp_stream_handle;
489 	struct irdma_pfpdu pfpdu;
490 	u32 ieq_qp;
491 	u8 *q2_buf;
492 	u64 qp_compl_ctx;
493 	u32 push_idx;
494 	u16 qs_handle;
495 	u16 push_offset;
496 	u8 flush_wqes_count;
497 	u8 sq_tph_val;
498 	u8 rq_tph_val;
499 	u8 qp_state;
500 	u8 hw_sq_size;
501 	u8 hw_rq_size;
502 	u8 src_mac_addr_idx;
503 	bool on_qoslist:1;
504 	bool ieq_pass_thru:1;
505 	bool sq_tph_en:1;
506 	bool rq_tph_en:1;
507 	bool rcv_tph_en:1;
508 	bool xmit_tph_en:1;
509 	bool virtual_map:1;
510 	bool flush_sq:1;
511 	bool flush_rq:1;
512 	bool sq_flush_code:1;
513 	bool rq_flush_code:1;
514 	enum irdma_flush_opcode flush_code;
515 	enum irdma_qp_event_type event_type;
516 	u8 term_flags;
517 	u8 user_pri;
518 	struct list_head list;
519 };
520 
521 struct irdma_stats_inst_info {
522 	bool use_hmc_fcn_index;
523 	u16 hmc_fn_id;
524 	u16 stats_idx;
525 };
526 
527 struct irdma_up_info {
528 	u8 map[8];
529 	u8 cnp_up_override;
530 	u16 hmc_fcn_idx;
531 	bool use_vlan:1;
532 	bool use_cnp_up_override:1;
533 };
534 
535 #define IRDMA_MAX_WS_NODES	0x3FF
536 #define IRDMA_WS_NODE_INVALID	0xFFFF
537 
538 struct irdma_ws_node_info {
539 	u16 id;
540 	u16 vsi;
541 	u16 parent_id;
542 	u16 qs_handle;
543 	bool type_leaf:1;
544 	bool enable:1;
545 	u8 prio_type;
546 	u8 tc;
547 	u8 weight;
548 };
549 
550 struct irdma_hmc_fpm_misc {
551 	u32 max_ceqs;
552 	u32 max_sds;
553 	u32 xf_block_size;
554 	u32 q1_block_size;
555 	u32 ht_multiplier;
556 	u32 timer_bucket;
557 	u32 rrf_block_size;
558 	u32 ooiscf_block_size;
559 };
560 
561 #define IRDMA_LEAF_DEFAULT_REL_BW		64
562 #define IRDMA_PARENT_DEFAULT_REL_BW		1
563 
564 struct irdma_qos {
565 	struct list_head qplist;
566 	struct mutex qos_mutex; /* protect QoS attributes per QoS level */
567 	u32 l2_sched_node_id;
568 	u16 qs_handle;
569 	u8 traffic_class;
570 	u8 rel_bw;
571 	u8 prio_type;
572 	bool valid;
573 };
574 
575 struct irdma_config_check {
576 	bool config_ok:1;
577 	bool lfc_set:1;
578 	bool pfc_set:1;
579 	u8 traffic_class;
580 	u16 qs_handle;
581 };
582 
583 #define IRDMA_INVALID_STATS_IDX 0xff
584 struct irdma_sc_vsi {
585 	u16 vsi_idx;
586 	struct irdma_sc_dev *dev;
587 	void *back_vsi;
588 	u32 ilq_count;
589 	struct irdma_virt_mem ilq_mem;
590 	struct irdma_puda_rsrc *ilq;
591 	u32 ieq_count;
592 	struct irdma_virt_mem ieq_mem;
593 	struct irdma_puda_rsrc *ieq;
594 	u32 exception_lan_q;
595 	u16 mtu;
596 	enum irdma_vm_vf_type vm_vf_type;
597 	bool stats_inst_alloc:1;
598 	bool tc_change_pending:1;
599 	bool mtu_change_pending:1;
600 	struct irdma_vsi_pestat *pestat;
601 	ATOMIC qp_suspend_reqs;
602 	int (*register_qset)(struct irdma_sc_vsi *vsi,
603 			     struct irdma_ws_node *tc_node);
604 	void (*unregister_qset)(struct irdma_sc_vsi *vsi,
605 				struct irdma_ws_node *tc_node);
606 	struct irdma_config_check cfg_check[IRDMA_MAX_USER_PRIORITY];
607 	bool tc_print_warning[IRDMA_MAX_TRAFFIC_CLASS];
608 	u8 qos_rel_bw;
609 	u8 qos_prio_type;
610 	u16 stats_idx;
611 	u8 dscp_map[IRDMA_DSCP_NUM_VAL];
612 	struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
613 	bool dscp_mode:1;
614 };
615 
616 struct irdma_sc_dev {
617 	struct list_head cqp_cmd_head; /* head of the CQP command list */
618 	spinlock_t cqp_lock; /* protect CQP list access */
619 	struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
620 	u64 fpm_query_buf_pa;
621 	u64 fpm_commit_buf_pa;
622 	__le64 *fpm_query_buf;
623 	__le64 *fpm_commit_buf;
624 	struct irdma_hw *hw;
625 	u8 IOMEM *db_addr;
626 	u32 IOMEM *wqe_alloc_db;
627 	u32 IOMEM *cq_arm_db;
628 	u32 IOMEM *aeq_alloc_db;
629 	u32 IOMEM *cqp_db;
630 	u32 IOMEM *cq_ack_db;
631 	u32 IOMEM *ceq_itr_mask_db;
632 	u32 IOMEM *aeq_itr_mask_db;
633 	u32 IOMEM *hw_regs[IRDMA_MAX_REGS];
634 	u32 ceq_itr;   /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
635 	u64 hw_masks[IRDMA_MAX_MASKS];
636 	u8 hw_shifts[IRDMA_MAX_SHIFTS];
637 	const struct irdma_hw_stat_map *hw_stats_map;
638 	u64 feature_info[IRDMA_MAX_FEATURES];
639 	u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
640 	struct irdma_hw_attrs hw_attrs;
641 	struct irdma_hmc_info *hmc_info;
642 	struct irdma_sc_cqp *cqp;
643 	struct irdma_sc_aeq *aeq;
644 	struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
645 	struct irdma_sc_cq *ccq;
646 	const struct irdma_irq_ops *irq_ops;
647 	struct irdma_hmc_fpm_misc hmc_fpm_misc;
648 	struct irdma_ws_node *ws_tree_root;
649 	struct mutex ws_mutex; /* ws tree mutex */
650 	u32 debug_mask;
651 	u16 num_vfs;
652 	u16 hmc_fn_id;
653 	u8 vf_id;
654 	bool vchnl_up:1;
655 	bool ceq_valid:1;
656 	u8 pci_rev;
657 	int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
658 	void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
659 	void (*ws_reset)(struct irdma_sc_vsi *vsi);
660 };
661 
662 struct irdma_modify_cq_info {
663 	u64 cq_pa;
664 	struct irdma_cqe *cq_base;
665 	u32 cq_size;
666 	u32 shadow_read_threshold;
667 	u8 pbl_chunk_size;
668 	u32 first_pm_pbl_idx;
669 	bool virtual_map:1;
670 	bool check_overflow;
671 	bool cq_resize:1;
672 };
673 
674 struct irdma_create_qp_info {
675 	bool ord_valid:1;
676 	bool tcp_ctx_valid:1;
677 	bool cq_num_valid:1;
678 	bool arp_cache_idx_valid:1;
679 	bool mac_valid:1;
680 	bool force_lpb;
681 	u8 next_iwarp_state;
682 };
683 
684 struct irdma_modify_qp_info {
685 	u64 rx_win0;
686 	u64 rx_win1;
687 	u16 new_mss;
688 	u8 next_iwarp_state;
689 	u8 curr_iwarp_state;
690 	u8 termlen;
691 	bool ord_valid:1;
692 	bool tcp_ctx_valid:1;
693 	bool udp_ctx_valid:1;
694 	bool cq_num_valid:1;
695 	bool arp_cache_idx_valid:1;
696 	bool reset_tcp_conn:1;
697 	bool remove_hash_idx:1;
698 	bool dont_send_term:1;
699 	bool dont_send_fin:1;
700 	bool cached_var_valid:1;
701 	bool mss_change:1;
702 	bool force_lpb:1;
703 	bool mac_valid:1;
704 };
705 
706 struct irdma_ccq_cqe_info {
707 	struct irdma_sc_cqp *cqp;
708 	u64 scratch;
709 	u32 op_ret_val;
710 	u16 maj_err_code;
711 	u16 min_err_code;
712 	u8 op_code;
713 	bool error;
714 };
715 
716 struct irdma_qos_tc_info {
717 	u64 tc_ctx;
718 	u8 rel_bw;
719 	u8 prio_type;
720 	u8 egress_virt_up;
721 	u8 ingress_virt_up;
722 };
723 
724 struct irdma_l2params {
725 	struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
726 	u32 num_apps;
727 	u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
728 	u16 mtu;
729 	u8 up2tc[IRDMA_MAX_USER_PRIORITY];
730 	u8 dscp_map[IRDMA_DSCP_NUM_VAL];
731 	u8 num_tc;
732 	u8 vsi_rel_bw;
733 	u8 vsi_prio_type;
734 	bool mtu_changed:1;
735 	bool tc_changed:1;
736 	bool dscp_mode:1;
737 };
738 
739 struct irdma_vsi_init_info {
740 	struct irdma_sc_dev *dev;
741 	void *back_vsi;
742 	struct irdma_l2params *params;
743 	u16 exception_lan_q;
744 	u16 pf_data_vsi_num;
745 	enum irdma_vm_vf_type vm_vf_type;
746 	int (*register_qset)(struct irdma_sc_vsi *vsi,
747 			     struct irdma_ws_node *tc_node);
748 	void (*unregister_qset)(struct irdma_sc_vsi *vsi,
749 				struct irdma_ws_node *tc_node);
750 };
751 
752 struct irdma_vsi_stats_info {
753 	struct irdma_vsi_pestat *pestat;
754 	u8 fcn_id;
755 	bool alloc_stats_inst;
756 };
757 
758 struct irdma_device_init_info {
759 	u64 fpm_query_buf_pa;
760 	u64 fpm_commit_buf_pa;
761 	__le64 *fpm_query_buf;
762 	__le64 *fpm_commit_buf;
763 	struct irdma_hw *hw;
764 	void IOMEM *bar0;
765 	u16 max_vfs;
766 	u16 hmc_fn_id;
767 	u32 debug_mask;
768 };
769 
770 struct irdma_ceq_init_info {
771 	u64 ceqe_pa;
772 	struct irdma_sc_dev *dev;
773 	u64 *ceqe_base;
774 	void *pbl_list;
775 	u32 elem_cnt;
776 	u32 ceq_id;
777 	bool virtual_map:1;
778 	bool tph_en:1;
779 	bool itr_no_expire:1;
780 	u8 pbl_chunk_size;
781 	u8 tph_val;
782 	u32 first_pm_pbl_idx;
783 	struct irdma_sc_vsi *vsi;
784 	struct irdma_sc_cq **reg_cq;
785 	u32 reg_cq_idx;
786 };
787 
788 struct irdma_aeq_init_info {
789 	u64 aeq_elem_pa;
790 	struct irdma_sc_dev *dev;
791 	u32 *aeqe_base;
792 	void *pbl_list;
793 	u32 elem_cnt;
794 	bool virtual_map;
795 	u8 pbl_chunk_size;
796 	u32 first_pm_pbl_idx;
797 	u32 msix_idx;
798 };
799 
800 struct irdma_ccq_init_info {
801 	u64 cq_pa;
802 	u64 shadow_area_pa;
803 	struct irdma_sc_dev *dev;
804 	struct irdma_cqe *cq_base;
805 	__le64 *shadow_area;
806 	void *pbl_list;
807 	u32 num_elem;
808 	u32 ceq_id;
809 	u32 shadow_read_threshold;
810 	bool ceqe_mask:1;
811 	bool ceq_id_valid:1;
812 	bool avoid_mem_cflct:1;
813 	bool virtual_map:1;
814 	bool tph_en:1;
815 	u8 tph_val;
816 	u8 pbl_chunk_size;
817 	u32 first_pm_pbl_idx;
818 	struct irdma_sc_vsi *vsi;
819 };
820 
821 struct irdma_udp_offload_info {
822 	bool ipv4:1;
823 	bool insert_vlan_tag:1;
824 	u8 ttl;
825 	u8 tos;
826 	u16 src_port;
827 	u16 dst_port;
828 	u32 dest_ip_addr[4];
829 	u32 snd_mss;
830 	u16 vlan_tag;
831 	u16 arp_idx;
832 	u32 flow_label;
833 	u8 udp_state;
834 	u32 psn_nxt;
835 	u32 lsn;
836 	u32 epsn;
837 	u32 psn_max;
838 	u32 psn_una;
839 	u32 local_ipaddr[4];
840 	u32 cwnd;
841 	u8 rexmit_thresh;
842 	u8 rnr_nak_thresh;
843 };
844 
845 struct irdma_roce_offload_info {
846 	u16 p_key;
847 	u32 err_rq_idx;
848 	u32 qkey;
849 	u32 dest_qp;
850 	u8 roce_tver;
851 	u8 ack_credits;
852 	u8 err_rq_idx_valid;
853 	u32 pd_id;
854 	u16 ord_size;
855 	u16 ird_size;
856 	bool is_qp1:1;
857 	bool udprivcq_en:1;
858 	bool dcqcn_en:1;
859 	bool rcv_no_icrc:1;
860 	bool wr_rdresp_en:1;
861 	bool bind_en:1;
862 	bool fast_reg_en:1;
863 	bool priv_mode_en:1;
864 	bool rd_en:1;
865 	bool timely_en:1;
866 	bool dctcp_en:1;
867 	bool fw_cc_enable:1;
868 	bool use_stats_inst:1;
869 	u16 t_high;
870 	u16 t_low;
871 	u8 last_byte_sent;
872 	u8 mac_addr[ETH_ALEN];
873 	u8 rtomin;
874 };
875 
876 struct irdma_iwarp_offload_info {
877 	u16 rcv_mark_offset;
878 	u16 snd_mark_offset;
879 	u8 ddp_ver;
880 	u8 rdmap_ver;
881 	u8 iwarp_mode;
882 	u32 err_rq_idx;
883 	u32 pd_id;
884 	u16 ord_size;
885 	u16 ird_size;
886 	bool ib_rd_en:1;
887 	bool align_hdrs:1;
888 	bool rcv_no_mpa_crc:1;
889 	bool err_rq_idx_valid:1;
890 	bool snd_mark_en:1;
891 	bool rcv_mark_en:1;
892 	bool wr_rdresp_en:1;
893 	bool bind_en:1;
894 	bool fast_reg_en:1;
895 	bool priv_mode_en:1;
896 	bool rd_en:1;
897 	bool timely_en:1;
898 	bool use_stats_inst:1;
899 	bool ecn_en:1;
900 	bool dctcp_en:1;
901 	u16 t_high;
902 	u16 t_low;
903 	u8 last_byte_sent;
904 	u8 mac_addr[ETH_ALEN];
905 	u8 rtomin;
906 };
907 
908 struct irdma_tcp_offload_info {
909 	bool ipv4:1;
910 	bool no_nagle:1;
911 	bool insert_vlan_tag:1;
912 	bool time_stamp:1;
913 	bool drop_ooo_seg:1;
914 	bool avoid_stretch_ack:1;
915 	bool wscale:1;
916 	bool ignore_tcp_opt:1;
917 	bool ignore_tcp_uns_opt:1;
918 	u8 cwnd_inc_limit;
919 	u8 dup_ack_thresh;
920 	u8 ttl;
921 	u8 src_mac_addr_idx;
922 	u8 tos;
923 	u16 src_port;
924 	u16 dst_port;
925 	u32 dest_ip_addr[4];
926 	//u32 dest_ip_addr0;
927 	//u32 dest_ip_addr1;
928 	//u32 dest_ip_addr2;
929 	//u32 dest_ip_addr3;
930 	u32 snd_mss;
931 	u16 syn_rst_handling;
932 	u16 vlan_tag;
933 	u16 arp_idx;
934 	u32 flow_label;
935 	u8 tcp_state;
936 	u8 snd_wscale;
937 	u8 rcv_wscale;
938 	u32 time_stamp_recent;
939 	u32 time_stamp_age;
940 	u32 snd_nxt;
941 	u32 snd_wnd;
942 	u32 rcv_nxt;
943 	u32 rcv_wnd;
944 	u32 snd_max;
945 	u32 snd_una;
946 	u32 srtt;
947 	u32 rtt_var;
948 	u32 ss_thresh;
949 	u32 cwnd;
950 	u32 snd_wl1;
951 	u32 snd_wl2;
952 	u32 max_snd_window;
953 	u8 rexmit_thresh;
954 	u32 local_ipaddr[4];
955 };
956 
957 struct irdma_qp_host_ctx_info {
958 	u64 qp_compl_ctx;
959 	union {
960 		struct irdma_tcp_offload_info *tcp_info;
961 		struct irdma_udp_offload_info *udp_info;
962 	};
963 	union {
964 		struct irdma_iwarp_offload_info *iwarp_info;
965 		struct irdma_roce_offload_info *roce_info;
966 	};
967 	u32 send_cq_num;
968 	u32 rcv_cq_num;
969 	u32 rem_endpoint_idx;
970 	u16 stats_idx;
971 	bool srq_valid:1;
972 	bool tcp_info_valid:1;
973 	bool iwarp_info_valid:1;
974 	bool stats_idx_valid:1;
975 	u8 user_pri;
976 };
977 
978 struct irdma_aeqe_info {
979 	u64 compl_ctx;
980 	u32 qp_cq_id;
981 	u32 wqe_idx;
982 	u16 ae_id;
983 	u8 tcp_state;
984 	u8 iwarp_state;
985 	bool qp:1;
986 	bool cq:1;
987 	bool sq:1;
988 	bool rq:1;
989 	bool in_rdrsp_wr:1;
990 	bool out_rdrsp:1;
991 	bool aeqe_overflow:1;
992 	/* This flag is used to determine if we should pass the rq tail
993 	 * in the QP context for FW/HW. It is set when ae_src is rq for GEN1/GEN2
994 	 * And additionally set for inbound atomic, read and write for GEN3
995 	 */
996 	bool err_rq_idx_valid:1;
997 	u8 q2_data_written;
998 	u8 ae_src;
999 };
1000 
1001 struct irdma_allocate_stag_info {
1002 	u64 total_len;
1003 	u64 first_pm_pbl_idx;
1004 	u32 chunk_size;
1005 	u32 stag_idx;
1006 	u32 page_size;
1007 	u32 pd_id;
1008 	u16 access_rights;
1009 	bool remote_access:1;
1010 	bool use_hmc_fcn_index:1;
1011 	bool use_pf_rid:1;
1012 	bool all_memory:1;
1013 	u16 hmc_fcn_index;
1014 };
1015 
1016 struct irdma_mw_alloc_info {
1017 	u32 mw_stag_index;
1018 	u32 page_size;
1019 	u32 pd_id;
1020 	bool remote_access:1;
1021 	bool mw_wide:1;
1022 	bool mw1_bind_dont_vldt_key:1;
1023 };
1024 
1025 struct irdma_reg_ns_stag_info {
1026 	u64 reg_addr_pa;
1027 	u64 va;
1028 	u64 total_len;
1029 	u32 page_size;
1030 	u32 chunk_size;
1031 	u32 first_pm_pbl_index;
1032 	enum irdma_addressing_type addr_type;
1033 	irdma_stag_index stag_idx;
1034 	u16 access_rights;
1035 	u32 pd_id;
1036 	irdma_stag_key stag_key;
1037 	bool use_hmc_fcn_index:1;
1038 	u16 hmc_fcn_index;
1039 	bool use_pf_rid:1;
1040 	bool all_memory:1;
1041 };
1042 
1043 struct irdma_fast_reg_stag_info {
1044 	u64 wr_id;
1045 	u64 reg_addr_pa;
1046 	u64 fbo;
1047 	void *va;
1048 	u64 total_len;
1049 	u32 page_size;
1050 	u32 chunk_size;
1051 	u32 first_pm_pbl_index;
1052 	enum irdma_addressing_type addr_type;
1053 	irdma_stag_index stag_idx;
1054 	u16 access_rights;
1055 	u32 pd_id;
1056 	irdma_stag_key stag_key;
1057 	bool local_fence:1;
1058 	bool read_fence:1;
1059 	bool signaled:1;
1060 	bool push_wqe:1;
1061 	bool use_hmc_fcn_index:1;
1062 	u16 hmc_fcn_index;
1063 	bool use_pf_rid:1;
1064 	bool defer_flag:1;
1065 };
1066 
1067 struct irdma_dealloc_stag_info {
1068 	u32 stag_idx;
1069 	u32 pd_id;
1070 	bool mr:1;
1071 	bool dealloc_pbl:1;
1072 };
1073 
1074 struct irdma_register_shared_stag {
1075 	u64 va;
1076 	enum irdma_addressing_type addr_type;
1077 	irdma_stag_index new_stag_idx;
1078 	irdma_stag_index parent_stag_idx;
1079 	u32 access_rights;
1080 	u32 pd_id;
1081 	u32 page_size;
1082 	irdma_stag_key new_stag_key;
1083 };
1084 
1085 struct irdma_qp_init_info {
1086 	struct irdma_qp_uk_init_info qp_uk_init_info;
1087 	struct irdma_sc_pd *pd;
1088 	struct irdma_sc_vsi *vsi;
1089 	__le64 *host_ctx;
1090 	u8 *q2;
1091 	u64 sq_pa;
1092 	u64 rq_pa;
1093 	u64 host_ctx_pa;
1094 	u64 q2_pa;
1095 	u64 shadow_area_pa;
1096 	u8 sq_tph_val;
1097 	u8 rq_tph_val;
1098 	bool sq_tph_en:1;
1099 	bool rq_tph_en:1;
1100 	bool rcv_tph_en:1;
1101 	bool xmit_tph_en:1;
1102 	bool virtual_map:1;
1103 };
1104 
1105 struct irdma_cq_init_info {
1106 	struct irdma_sc_dev *dev;
1107 	u64 cq_base_pa;
1108 	u64 shadow_area_pa;
1109 	u32 ceq_id;
1110 	u32 shadow_read_threshold;
1111 	u8 pbl_chunk_size;
1112 	u32 first_pm_pbl_idx;
1113 	bool virtual_map:1;
1114 	bool ceqe_mask:1;
1115 	bool ceq_id_valid:1;
1116 	bool tph_en:1;
1117 	u8 tph_val;
1118 	u8 type;
1119 	struct irdma_cq_uk_init_info cq_uk_init_info;
1120 	struct irdma_sc_vsi *vsi;
1121 };
1122 
1123 struct irdma_upload_context_info {
1124 	u64 buf_pa;
1125 	u32 qp_id;
1126 	u8 qp_type;
1127 	bool freeze_qp:1;
1128 	bool raw_format:1;
1129 };
1130 
1131 struct irdma_local_mac_entry_info {
1132 	u8 mac_addr[6];
1133 	u16 entry_idx;
1134 };
1135 
1136 struct irdma_add_arp_cache_entry_info {
1137 	u8 mac_addr[ETH_ALEN];
1138 	u32 reach_max;
1139 	u16 arp_index;
1140 	bool permanent;
1141 };
1142 
1143 struct irdma_apbvt_info {
1144 	u16 port;
1145 	bool add;
1146 };
1147 
1148 struct irdma_qhash_table_info {
1149 	struct irdma_sc_vsi *vsi;
1150 	enum irdma_quad_hash_manage_type manage;
1151 	enum irdma_quad_entry_type entry_type;
1152 	bool vlan_valid:1;
1153 	bool ipv4_valid:1;
1154 	u8 mac_addr[ETH_ALEN];
1155 	u16 vlan_id;
1156 	u8 user_pri;
1157 	u32 qp_num;
1158 	u32 dest_ip[4];
1159 	u32 src_ip[4];
1160 	u16 dest_port;
1161 	u16 src_port;
1162 };
1163 
1164 struct irdma_cqp_manage_push_page_info {
1165 	u32 push_idx;
1166 	u16 qs_handle;
1167 	u8 free_page;
1168 	u8 push_page_type;
1169 };
1170 
1171 struct irdma_qp_flush_info {
1172 	u16 sq_minor_code;
1173 	u16 sq_major_code;
1174 	u16 rq_minor_code;
1175 	u16 rq_major_code;
1176 	u16 ae_code;
1177 	u8 ae_src;
1178 	bool sq:1;
1179 	bool rq:1;
1180 	bool userflushcode:1;
1181 	bool generate_ae:1;
1182 };
1183 
1184 struct irdma_gen_ae_info {
1185 	u16 ae_code;
1186 	u8 ae_src;
1187 };
1188 
1189 struct irdma_cqp_timeout {
1190 	u64 compl_cqp_cmds;
1191 	u32 count;
1192 };
1193 
1194 struct irdma_irq_ops {
1195 	void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
1196 	void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
1197 			      bool enable);
1198 	void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
1199 	void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
1200 };
1201 
1202 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
1203 int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
1204 			bool check_overflow, bool post_sq);
1205 int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
1206 int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
1207 			      struct irdma_ccq_cqe_info *info);
1208 int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
1209 		      struct irdma_ccq_init_info *info);
1210 
1211 int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
1212 int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
1213 
1214 int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
1215 int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
1216 		      struct irdma_ceq_init_info *info);
1217 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
1218 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
1219 
1220 int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
1221 		      struct irdma_aeq_init_info *info);
1222 int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
1223 			   struct irdma_aeqe_info *info);
1224 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
1225 
1226 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
1227 		      int abi_ver);
1228 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
1229 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
1230 			      struct irdma_sc_dev *dev);
1231 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
1232 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp, bool free_hwcqp);
1233 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
1234 		      struct irdma_cqp_init_info *info);
1235 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
1236 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
1237 				  struct irdma_ccq_cqe_info *cmpl_info);
1238 int irdma_sc_qp_create(struct irdma_sc_qp *qp,
1239 		       struct irdma_create_qp_info *info, u64 scratch,
1240 		       bool post_sq);
1241 int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
1242 			bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq);
1243 int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
1244 			   struct irdma_qp_flush_info *info, u64 scratch,
1245 			   bool post_sq);
1246 int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
1247 int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
1248 		       struct irdma_modify_qp_info *info, u64 scratch,
1249 		       bool post_sq);
1250 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1251 			irdma_stag stag);
1252 void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
1253 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
1254 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1255 			struct irdma_qp_host_ctx_info *info);
1256 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1257 			     struct irdma_qp_host_ctx_info *info);
1258 int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
1259 int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
1260 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
1261 int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq);
1262 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
1263 					u16 hmc_fn_id, bool post_sq,
1264 					bool poll_registers);
1265 
1266 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
1267 struct cqp_info {
1268 	union {
1269 		struct {
1270 			struct irdma_sc_qp *qp;
1271 			struct irdma_create_qp_info info;
1272 			u64 scratch;
1273 		} qp_create;
1274 
1275 		struct {
1276 			struct irdma_sc_qp *qp;
1277 			struct irdma_modify_qp_info info;
1278 			u64 scratch;
1279 		} qp_modify;
1280 
1281 		struct {
1282 			struct irdma_sc_qp *qp;
1283 			u64 scratch;
1284 			bool remove_hash_idx;
1285 			bool ignore_mw_bnd;
1286 		} qp_destroy;
1287 
1288 		struct {
1289 			struct irdma_sc_cq *cq;
1290 			u64 scratch;
1291 			bool check_overflow;
1292 		} cq_create;
1293 
1294 		struct {
1295 			struct irdma_sc_cq *cq;
1296 			struct irdma_modify_cq_info info;
1297 			u64 scratch;
1298 		} cq_modify;
1299 
1300 		struct {
1301 			struct irdma_sc_cq *cq;
1302 			u64 scratch;
1303 		} cq_destroy;
1304 
1305 		struct {
1306 			struct irdma_sc_dev *dev;
1307 			struct irdma_allocate_stag_info info;
1308 			u64 scratch;
1309 		} alloc_stag;
1310 
1311 		struct {
1312 			struct irdma_sc_dev *dev;
1313 			struct irdma_mw_alloc_info info;
1314 			u64 scratch;
1315 		} mw_alloc;
1316 
1317 		struct {
1318 			struct irdma_sc_dev *dev;
1319 			struct irdma_reg_ns_stag_info info;
1320 			u64 scratch;
1321 		} mr_reg_non_shared;
1322 
1323 		struct {
1324 			struct irdma_sc_dev *dev;
1325 			struct irdma_dealloc_stag_info info;
1326 			u64 scratch;
1327 		} dealloc_stag;
1328 
1329 		struct {
1330 			struct irdma_sc_cqp *cqp;
1331 			struct irdma_add_arp_cache_entry_info info;
1332 			u64 scratch;
1333 		} add_arp_cache_entry;
1334 
1335 		struct {
1336 			struct irdma_sc_cqp *cqp;
1337 			u64 scratch;
1338 			u16 arp_index;
1339 		} del_arp_cache_entry;
1340 
1341 		struct {
1342 			struct irdma_sc_cqp *cqp;
1343 			struct irdma_local_mac_entry_info info;
1344 			u64 scratch;
1345 		} add_local_mac_entry;
1346 
1347 		struct {
1348 			struct irdma_sc_cqp *cqp;
1349 			u64 scratch;
1350 			u8 entry_idx;
1351 			u8 ignore_ref_count;
1352 		} del_local_mac_entry;
1353 
1354 		struct {
1355 			struct irdma_sc_cqp *cqp;
1356 			u64 scratch;
1357 		} alloc_local_mac_entry;
1358 
1359 		struct {
1360 			struct irdma_sc_cqp *cqp;
1361 			struct irdma_cqp_manage_push_page_info info;
1362 			u64 scratch;
1363 		} manage_push_page;
1364 
1365 		struct {
1366 			struct irdma_sc_dev *dev;
1367 			struct irdma_upload_context_info info;
1368 			u64 scratch;
1369 		} qp_upload_context;
1370 
1371 		struct {
1372 			struct irdma_sc_dev *dev;
1373 			struct irdma_hmc_fcn_info info;
1374 			u64 scratch;
1375 		} manage_hmc_pm;
1376 
1377 		struct {
1378 			struct irdma_sc_ceq *ceq;
1379 			u64 scratch;
1380 		} ceq_create;
1381 
1382 		struct {
1383 			struct irdma_sc_ceq *ceq;
1384 			u64 scratch;
1385 		} ceq_destroy;
1386 
1387 		struct {
1388 			struct irdma_sc_aeq *aeq;
1389 			u64 scratch;
1390 		} aeq_create;
1391 
1392 		struct {
1393 			struct irdma_sc_aeq *aeq;
1394 			u64 scratch;
1395 		} aeq_destroy;
1396 
1397 		struct {
1398 			struct irdma_sc_qp *qp;
1399 			struct irdma_qp_flush_info info;
1400 			u64 scratch;
1401 		} qp_flush_wqes;
1402 
1403 		struct {
1404 			struct irdma_sc_qp *qp;
1405 			struct irdma_gen_ae_info info;
1406 			u64 scratch;
1407 		} gen_ae;
1408 
1409 		struct {
1410 			struct irdma_sc_cqp *cqp;
1411 			void *fpm_val_va;
1412 			u64 fpm_val_pa;
1413 			u16 hmc_fn_id;
1414 			u64 scratch;
1415 		} query_fpm_val;
1416 
1417 		struct {
1418 			struct irdma_sc_cqp *cqp;
1419 			void *fpm_val_va;
1420 			u64 fpm_val_pa;
1421 			u16 hmc_fn_id;
1422 			u64 scratch;
1423 		} commit_fpm_val;
1424 
1425 		struct {
1426 			struct irdma_sc_cqp *cqp;
1427 			struct irdma_apbvt_info info;
1428 			u64 scratch;
1429 		} manage_apbvt_entry;
1430 
1431 		struct {
1432 			struct irdma_sc_cqp *cqp;
1433 			struct irdma_qhash_table_info info;
1434 			u64 scratch;
1435 		} manage_qhash_table_entry;
1436 
1437 		struct {
1438 			struct irdma_sc_dev *dev;
1439 			struct irdma_update_sds_info info;
1440 			u64 scratch;
1441 		} update_pe_sds;
1442 
1443 		struct {
1444 			struct irdma_sc_cqp *cqp;
1445 			struct irdma_sc_qp *qp;
1446 			u64 scratch;
1447 		} suspend_resume;
1448 
1449 		struct {
1450 			struct irdma_sc_cqp *cqp;
1451 			struct irdma_ah_info info;
1452 			u64 scratch;
1453 		} ah_create;
1454 
1455 		struct {
1456 			struct irdma_sc_cqp *cqp;
1457 			struct irdma_ah_info info;
1458 			u64 scratch;
1459 		} ah_destroy;
1460 
1461 		struct {
1462 			struct irdma_sc_cqp *cqp;
1463 			struct irdma_mcast_grp_info info;
1464 			u64 scratch;
1465 		} mc_create;
1466 
1467 		struct {
1468 			struct irdma_sc_cqp *cqp;
1469 			struct irdma_mcast_grp_info info;
1470 			u64 scratch;
1471 		} mc_destroy;
1472 
1473 		struct {
1474 			struct irdma_sc_cqp *cqp;
1475 			struct irdma_mcast_grp_info info;
1476 			u64 scratch;
1477 		} mc_modify;
1478 
1479 		struct {
1480 			struct irdma_sc_cqp *cqp;
1481 			struct irdma_stats_inst_info info;
1482 			u64 scratch;
1483 		} stats_manage;
1484 
1485 		struct {
1486 			struct irdma_sc_cqp *cqp;
1487 			struct irdma_stats_gather_info info;
1488 			u64 scratch;
1489 		} stats_gather;
1490 
1491 		struct {
1492 			struct irdma_sc_cqp *cqp;
1493 			struct irdma_ws_node_info info;
1494 			u64 scratch;
1495 		} ws_node;
1496 
1497 		struct {
1498 			struct irdma_sc_cqp *cqp;
1499 			struct irdma_up_info info;
1500 			u64 scratch;
1501 		} up_map;
1502 
1503 		struct {
1504 			struct irdma_sc_cqp *cqp;
1505 			struct irdma_dma_mem query_buff_mem;
1506 			u64 scratch;
1507 		} query_rdma;
1508 	} u;
1509 };
1510 
1511 struct cqp_cmds_info {
1512 	struct list_head cqp_cmd_entry;
1513 	u8 cqp_cmd;
1514 	u8 post_sq;
1515 	struct cqp_info in;
1516 };
1517 
1518 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
1519 					   u32 *wqe_idx);
1520 
1521 /**
1522  * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1523  * @cqp: struct for cqp hw
1524  * @scratch: private data for CQP WQE
1525  */
1526 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
1527 {
1528 	u32 wqe_idx;
1529 
1530 	return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1531 }
1532 #endif /* IRDMA_TYPE_H */
1533