xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_hsi_common.h (revision 0957b409)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 #ifndef __ECORE_HSI_COMMON__
32 #define __ECORE_HSI_COMMON__
33 /********************************/
34 /* Add include to common target */
35 /********************************/
36 #include "common_hsi.h"
37 
38 
39 /*
40  * opcodes for the event ring
41  */
42 enum common_event_opcode
43 {
44 	COMMON_EVENT_PF_START,
45 	COMMON_EVENT_PF_STOP,
46 	COMMON_EVENT_VF_START,
47 	COMMON_EVENT_VF_STOP,
48 	COMMON_EVENT_VF_PF_CHANNEL,
49 	COMMON_EVENT_VF_FLR,
50 	COMMON_EVENT_PF_UPDATE,
51 	COMMON_EVENT_MALICIOUS_VF,
52 	COMMON_EVENT_RL_UPDATE,
53 	COMMON_EVENT_EMPTY,
54 	MAX_COMMON_EVENT_OPCODE
55 };
56 
57 
58 /*
59  * Common Ramrod Command IDs
60  */
61 enum common_ramrod_cmd_id
62 {
63 	COMMON_RAMROD_UNUSED,
64 	COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
65 	COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
66 	COMMON_RAMROD_VF_START /* VF Function Start */,
67 	COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
68 	COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
69 	COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */,
70 	COMMON_RAMROD_EMPTY /* Empty Ramrod */,
71 	MAX_COMMON_RAMROD_CMD_ID
72 };
73 
74 
75 /*
76  * How ll2 should deal with packet upon errors
77  */
78 enum core_error_handle
79 {
80 	LL2_DROP_PACKET /* If error occurs drop packet */,
81 	LL2_DO_NOTHING /* If error occurs do nothing */,
82 	LL2_ASSERT /* If error occurs assert */,
83 	MAX_CORE_ERROR_HANDLE
84 };
85 
86 
87 /*
88  * opcodes for the event ring
89  */
90 enum core_event_opcode
91 {
92 	CORE_EVENT_TX_QUEUE_START,
93 	CORE_EVENT_TX_QUEUE_STOP,
94 	CORE_EVENT_RX_QUEUE_START,
95 	CORE_EVENT_RX_QUEUE_STOP,
96 	CORE_EVENT_RX_QUEUE_FLUSH,
97 	CORE_EVENT_TX_QUEUE_UPDATE,
98 	MAX_CORE_EVENT_OPCODE
99 };
100 
101 
102 /*
103  * The L4 pseudo checksum mode for Core
104  */
105 enum core_l4_pseudo_checksum_mode
106 {
107 	CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH /* Pseudo Checksum on packet is calculated with the correct packet length. */,
108 	CORE_L4_PSEUDO_CSUM_ZERO_LENGTH /* Pseudo Checksum on packet is calculated with zero length. */,
109 	MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
110 };
111 
112 
113 /*
114  * Light-L2 RX Producers in Tstorm RAM
115  */
116 struct core_ll2_port_stats
117 {
118 	struct regpair gsi_invalid_hdr;
119 	struct regpair gsi_invalid_pkt_length;
120 	struct regpair gsi_unsupported_pkt_typ;
121 	struct regpair gsi_crcchksm_error;
122 };
123 
124 
125 /*
126  * Ethernet TX Per Queue Stats
127  */
128 struct core_ll2_pstorm_per_queue_stat
129 {
130 	struct regpair sent_ucast_bytes /* number of total bytes sent without errors */;
131 	struct regpair sent_mcast_bytes /* number of total bytes sent without errors */;
132 	struct regpair sent_bcast_bytes /* number of total bytes sent without errors */;
133 	struct regpair sent_ucast_pkts /* number of total packets sent without errors */;
134 	struct regpair sent_mcast_pkts /* number of total packets sent without errors */;
135 	struct regpair sent_bcast_pkts /* number of total packets sent without errors */;
136 };
137 
138 
139 /*
140  * Light-L2 RX Producers in Tstorm RAM
141  */
142 struct core_ll2_rx_prod
143 {
144 	__le16 bd_prod /* BD Producer */;
145 	__le16 cqe_prod /* CQE Producer */;
146 	__le32 reserved;
147 };
148 
149 
150 struct core_ll2_tstorm_per_queue_stat
151 {
152 	struct regpair packet_too_big_discard /* Number of packets discarded because they are bigger than MTU */;
153 	struct regpair no_buff_discard /* Number of packets discarded due to lack of host buffers */;
154 };
155 
156 
157 struct core_ll2_ustorm_per_queue_stat
158 {
159 	struct regpair rcv_ucast_bytes;
160 	struct regpair rcv_mcast_bytes;
161 	struct regpair rcv_bcast_bytes;
162 	struct regpair rcv_ucast_pkts;
163 	struct regpair rcv_mcast_pkts;
164 	struct regpair rcv_bcast_pkts;
165 };
166 
167 
168 /*
169  * Core Ramrod Command IDs (light L2)
170  */
171 enum core_ramrod_cmd_id
172 {
173 	CORE_RAMROD_UNUSED,
174 	CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
175 	CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
176 	CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
177 	CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
178 	CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
179 	CORE_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
180 	MAX_CORE_RAMROD_CMD_ID
181 };
182 
183 
184 /*
185  * Core RX CQE Type for Light L2
186  */
187 enum core_roce_flavor_type
188 {
189 	CORE_ROCE,
190 	CORE_RROCE,
191 	MAX_CORE_ROCE_FLAVOR_TYPE
192 };
193 
194 
195 /*
196  * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
197  */
198 struct core_rx_action_on_error
199 {
200 	u8 error_type;
201 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK  0x3 /* ll2 how to handle error packet_too_big (use enum core_error_handle) */
202 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
203 #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK         0x3 /* ll2 how to handle error with no_buff  (use enum core_error_handle) */
204 #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT        2
205 #define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK        0xF
206 #define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT       4
207 };
208 
209 
210 /*
211  * Core RX BD for Light L2
212  */
213 struct core_rx_bd
214 {
215 	struct regpair addr;
216 	__le16 reserved[4];
217 };
218 
219 
220 /*
221  * Core RX CM offload BD for Light L2
222  */
223 struct core_rx_bd_with_buff_len
224 {
225 	struct regpair addr;
226 	__le16 buff_length;
227 	__le16 reserved[3];
228 };
229 
230 /*
231  * Core RX CM offload BD for Light L2
232  */
233 union core_rx_bd_union
234 {
235 	struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
236 	struct core_rx_bd_with_buff_len rx_bd_with_len /* Core Rx Bd with dynamic buffer length */;
237 };
238 
239 
240 
241 /*
242  * Opaque Data for Light L2 RX CQE .
243  */
244 struct core_rx_cqe_opaque_data
245 {
246 	__le32 data[2] /* Opaque CQE Data */;
247 };
248 
249 
250 /*
251  * Core RX CQE Type for Light L2
252  */
253 enum core_rx_cqe_type
254 {
255 	CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */,
256 	CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */,
257 	CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */,
258 	CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */,
259 	MAX_CORE_RX_CQE_TYPE
260 };
261 
262 
263 /*
264  * Core RX CQE for Light L2 .
265  */
266 struct core_rx_fast_path_cqe
267 {
268 	u8 type /* CQE type (use enum core_rx_cqe_type) */;
269 	u8 placement_offset /* Offset (in bytes) of the packet from start of the buffer */;
270 	struct parsing_and_err_flags parse_flags /* Parsing and error flags from the parser */;
271 	__le16 packet_length /* Total packet length (from the parser) */;
272 	__le16 vlan /* 802.1q VLAN tag */;
273 	struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
274 	struct parsing_err_flags err_flags /* bit- map: each bit represents a specific error. errors indications are provided by the cracker. see spec for detailed description */;
275 	__le16 reserved0;
276 	__le32 reserved1[3];
277 };
278 
279 /*
280  * Core Rx CM offload CQE .
281  */
282 struct core_rx_gsi_offload_cqe
283 {
284 	u8 type /* CQE type (use enum core_rx_cqe_type) */;
285 	u8 data_length_error /* set if gsi data is bigger than buff */;
286 	struct parsing_and_err_flags parse_flags /* Parsing and error flags from the parser */;
287 	__le16 data_length /* Total packet length (from the parser) */;
288 	__le16 vlan /* 802.1q VLAN tag */;
289 	__le32 src_mac_addrhi /* hi 4 bytes source mac address */;
290 	__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
291 	__le16 qp_id /* These are the lower 16 bit of QP id in RoCE BTH header */;
292 	__le32 src_qp /* Source QP from DETH header */;
293 	__le32 reserved[3];
294 };
295 
296 /*
297  * Core RX CQE for Light L2 .
298  */
299 struct core_rx_slow_path_cqe
300 {
301 	u8 type /* CQE type (use enum core_rx_cqe_type) */;
302 	u8 ramrod_cmd_id;
303 	__le16 echo;
304 	struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
305 	__le32 reserved1[5];
306 };
307 
308 /*
309  * Core RX CM offload BD for Light L2
310  */
311 union core_rx_cqe_union
312 {
313 	struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
314 	struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
315 	struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
316 };
317 
318 
319 
320 
321 
322 /*
323  * Ramrod data for rx queue start ramrod
324  */
325 struct core_rx_start_ramrod_data
326 {
327 	struct regpair bd_base /* bd address of the first bd page */;
328 	struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
329 	__le16 mtu /* Maximum transmission unit */;
330 	__le16 sb_id /* Status block ID */;
331 	u8 sb_index /* index of the protocol index */;
332 	u8 complete_cqe_flg /* post completion to the CQE ring if set */;
333 	u8 complete_event_flg /* post completion to the event ring if set */;
334 	u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
335 	__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
336 	u8 inner_vlan_stripping_en /* if set, 802.1q tags will be removed and copied to CQE */;
337 	u8 report_outer_vlan /* if set and inner vlan does not exist, the outer vlan will copied to CQE as inner vlan. should be used in MF_OVLAN mode only. */;
338 	u8 queue_id /* Light L2 RX Queue ID */;
339 	u8 main_func_queue /* Is this the main queue for the PF */;
340 	u8 mf_si_bcast_accept_all /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if main_func_queue is set. */;
341 	u8 mf_si_mcast_accept_all /* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if main_func_queue is set. */;
342 	struct core_rx_action_on_error action_on_error /* Specifies how ll2 should deal with packets errors: packet_too_big and no_buff */;
343 	u8 gsi_offload_flag /* set when in GSI offload mode on ROCE connection */;
344 	u8 reserved[6];
345 };
346 
347 
348 /*
349  * Ramrod data for rx queue stop ramrod
350  */
351 struct core_rx_stop_ramrod_data
352 {
353 	u8 complete_cqe_flg /* post completion to the CQE ring if set */;
354 	u8 complete_event_flg /* post completion to the event ring if set */;
355 	u8 queue_id /* Light L2 RX Queue ID */;
356 	u8 reserved1;
357 	__le16 reserved2[2];
358 };
359 
360 
361 /*
362  * Flags for Core TX BD
363  */
364 struct core_tx_bd_data
365 {
366 	__le16 as_bitfield;
367 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK         0x1 /* Do not allow additional VLAN manipulations on this packet (DCB) */
368 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT        0
369 #define CORE_TX_BD_DATA_VLAN_INSERTION_MASK          0x1 /* Insert VLAN into packet. Cannot be set for LB packets (tx_dst == CORE_TX_DEST_LB) */
370 #define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT         1
371 #define CORE_TX_BD_DATA_START_BD_MASK                0x1 /* This is the first BD of the packet (for debug) */
372 #define CORE_TX_BD_DATA_START_BD_SHIFT               2
373 #define CORE_TX_BD_DATA_IP_CSUM_MASK                 0x1 /* Calculate the IP checksum for the packet */
374 #define CORE_TX_BD_DATA_IP_CSUM_SHIFT                3
375 #define CORE_TX_BD_DATA_L4_CSUM_MASK                 0x1 /* Calculate the L4 checksum for the packet */
376 #define CORE_TX_BD_DATA_L4_CSUM_SHIFT                4
377 #define CORE_TX_BD_DATA_IPV6_EXT_MASK                0x1 /* Packet is IPv6 with extensions */
378 #define CORE_TX_BD_DATA_IPV6_EXT_SHIFT               5
379 #define CORE_TX_BD_DATA_L4_PROTOCOL_MASK             0x1 /* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol: 0-TCP, 1-UDP */
380 #define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT            6
381 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK     0x1 /* The pseudo checksum mode to place in the L4 checksum field. Required only when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode) */
382 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT    7
383 #define CORE_TX_BD_DATA_NBDS_MASK                    0xF /* Number of BDs that make up one packet - width wide enough to present CORE_LL2_TX_MAX_BDS_PER_PACKET */
384 #define CORE_TX_BD_DATA_NBDS_SHIFT                   8
385 #define CORE_TX_BD_DATA_ROCE_FLAV_MASK               0x1 /* Use roce_flavor enum - Differentiate between Roce flavors is valid when connType is ROCE (use enum core_roce_flavor_type) */
386 #define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT              12
387 #define CORE_TX_BD_DATA_IP_LEN_MASK                  0x1 /* Calculate ip length */
388 #define CORE_TX_BD_DATA_IP_LEN_SHIFT                 13
389 #define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK  0x1 /* disables the STAG insertion, relevant only in MF OVLAN mode. */
390 #define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
391 #define CORE_TX_BD_DATA_RESERVED0_MASK               0x1
392 #define CORE_TX_BD_DATA_RESERVED0_SHIFT              15
393 };
394 
395 /*
396  * Core TX BD for Light L2
397  */
398 struct core_tx_bd
399 {
400 	struct regpair addr /* Buffer Address */;
401 	__le16 nbytes /* Number of Bytes in Buffer */;
402 	__le16 nw_vlan_or_lb_echo /* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack packets: echo data to pass to Rx */;
403 	struct core_tx_bd_data bd_data /* BD flags */;
404 	__le16 bitfield1;
405 #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK  0x3FFF /* L4 Header Offset from start of packet (in Words). This is needed if both l4_csum and ipv6_ext are set */
406 #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
407 #define CORE_TX_BD_TX_DST_MASK           0x3 /* Packet destination - Network, Loopback or Drop (use enum core_tx_dest) */
408 #define CORE_TX_BD_TX_DST_SHIFT          14
409 };
410 
411 
412 
413 /*
414  * Light L2 TX Destination
415  */
416 enum core_tx_dest
417 {
418 	CORE_TX_DEST_NW /* TX Destination to the Network */,
419 	CORE_TX_DEST_LB /* TX Destination to the Loopback */,
420 	CORE_TX_DEST_RESERVED,
421 	CORE_TX_DEST_DROP /* TX Drop */,
422 	MAX_CORE_TX_DEST
423 };
424 
425 
426 /*
427  * Ramrod data for tx queue start ramrod
428  */
429 struct core_tx_start_ramrod_data
430 {
431 	struct regpair pbl_base_addr /* Address of the pbl page */;
432 	__le16 mtu /* Maximum transmission unit */;
433 	__le16 sb_id /* Status block ID */;
434 	u8 sb_index /* Status block protocol index */;
435 	u8 stats_en /* Statistics Enable */;
436 	u8 stats_id /* Statistics Counter ID */;
437 	u8 conn_type /* connection type that loaded ll2 (use enum protocol_type) */;
438 	__le16 pbl_size /* Number of BD pages pointed by PBL */;
439 	__le16 qm_pq_id /* QM PQ ID */;
440 	u8 gsi_offload_flag /* set when in GSI offload mode on ROCE connection */;
441 	u8 resrved[3];
442 };
443 
444 
445 /*
446  * Ramrod data for tx queue stop ramrod
447  */
448 struct core_tx_stop_ramrod_data
449 {
450 	__le32 reserved0[2];
451 };
452 
453 
454 /*
455  * Ramrod data for tx queue update ramrod
456  */
457 struct core_tx_update_ramrod_data
458 {
459 	u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
460 	u8 reserved0;
461 	__le16 qm_pq_id /* Updated QM PQ ID */;
462 	__le32 reserved1[1];
463 };
464 
465 
466 /*
467  * Enum flag for what type of DCB data to update
468  */
469 enum dcb_dscp_update_mode
470 {
471 	DONT_UPDATE_DCB_DSCP /* use when no change should be done to DCB data */,
472 	UPDATE_DCB /* use to update only L2 (vlan) priority */,
473 	UPDATE_DSCP /* use to update only IP DSCP */,
474 	UPDATE_DCB_DSCP /* update vlan pri and DSCP */,
475 	MAX_DCB_DSCP_UPDATE_MODE
476 };
477 
478 
479 /*
480  * The core storm context for the Ystorm
481  */
482 struct ystorm_core_conn_st_ctx
483 {
484 	__le32 reserved[4];
485 };
486 
487 /*
488  * The core storm context for the Pstorm
489  */
490 struct pstorm_core_conn_st_ctx
491 {
492 	__le32 reserved[4];
493 };
494 
495 /*
496  * Core Slowpath Connection storm context of Xstorm
497  */
498 struct xstorm_core_conn_st_ctx
499 {
500 	__le32 spq_base_lo /* SPQ Ring Base Address low dword */;
501 	__le32 spq_base_hi /* SPQ Ring Base Address high dword */;
502 	struct regpair consolid_base_addr /* Consolidation Ring Base Address */;
503 	__le16 spq_cons /* SPQ Ring Consumer */;
504 	__le16 consolid_cons /* Consolidation Ring Consumer */;
505 	__le32 reserved0[55] /* Pad to 15 cycles */;
506 };
507 
508 struct e4_xstorm_core_conn_ag_ctx
509 {
510 	u8 reserved0 /* cdu_validation */;
511 	u8 state /* state */;
512 	u8 flags0;
513 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1 /* exist_in_qm0 */
514 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
515 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1 /* exist_in_qm1 */
516 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
517 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1 /* exist_in_qm2 */
518 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
519 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1 /* exist_in_qm3 */
520 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
521 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1 /* bit4 */
522 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
523 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1 /* cf_array_active */
524 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
525 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1 /* bit6 */
526 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
527 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1 /* bit7 */
528 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
529 	u8 flags1;
530 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1 /* bit8 */
531 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
532 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1 /* bit9 */
533 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
534 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1 /* bit10 */
535 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
536 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1 /* bit11 */
537 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
538 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1 /* bit12 */
539 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
540 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1 /* bit13 */
541 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
542 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1 /* bit14 */
543 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
544 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1 /* bit15 */
545 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
546 	u8 flags2;
547 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3 /* timer0cf */
548 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
549 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3 /* timer1cf */
550 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
551 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3 /* timer2cf */
552 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
553 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3 /* timer_stop_all */
554 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
555 	u8 flags3;
556 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3 /* cf4 */
557 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
558 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3 /* cf5 */
559 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
560 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3 /* cf6 */
561 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
562 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3 /* cf7 */
563 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
564 	u8 flags4;
565 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3 /* cf8 */
566 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
567 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3 /* cf9 */
568 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
569 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3 /* cf10 */
570 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
571 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3 /* cf11 */
572 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
573 	u8 flags5;
574 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3 /* cf12 */
575 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
576 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3 /* cf13 */
577 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
578 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3 /* cf14 */
579 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
580 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3 /* cf15 */
581 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
582 	u8 flags6;
583 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3 /* cf16 */
584 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
585 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3 /* cf_array_cf */
586 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
587 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3 /* cf18 */
588 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
589 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3 /* cf19 */
590 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
591 	u8 flags7;
592 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3 /* cf20 */
593 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
594 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3 /* cf21 */
595 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
596 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3 /* cf22 */
597 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
598 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1 /* cf0en */
599 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
600 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1 /* cf1en */
601 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
602 	u8 flags8;
603 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1 /* cf2en */
604 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
605 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1 /* cf3en */
606 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
607 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1 /* cf4en */
608 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
609 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1 /* cf5en */
610 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
611 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1 /* cf6en */
612 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
613 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1 /* cf7en */
614 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
615 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1 /* cf8en */
616 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
617 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1 /* cf9en */
618 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
619 	u8 flags9;
620 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1 /* cf10en */
621 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
622 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1 /* cf11en */
623 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
624 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1 /* cf12en */
625 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
626 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1 /* cf13en */
627 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
628 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1 /* cf14en */
629 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
630 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1 /* cf15en */
631 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
632 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1 /* cf16en */
633 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
634 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1 /* cf_array_cf_en */
635 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
636 	u8 flags10;
637 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1 /* cf18en */
638 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
639 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1 /* cf19en */
640 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
641 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1 /* cf20en */
642 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
643 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1 /* cf21en */
644 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
645 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1 /* cf22en */
646 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
647 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1 /* cf23en */
648 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
649 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1 /* rule0en */
650 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
651 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1 /* rule1en */
652 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
653 	u8 flags11;
654 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1 /* rule2en */
655 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
656 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1 /* rule3en */
657 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
658 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1 /* rule4en */
659 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
660 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1 /* rule5en */
661 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
662 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1 /* rule6en */
663 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
664 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1 /* rule7en */
665 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
666 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1 /* rule8en */
667 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
668 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1 /* rule9en */
669 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
670 	u8 flags12;
671 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1 /* rule10en */
672 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
673 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1 /* rule11en */
674 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
675 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1 /* rule12en */
676 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
677 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1 /* rule13en */
678 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
679 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1 /* rule14en */
680 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
681 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1 /* rule15en */
682 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
683 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1 /* rule16en */
684 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
685 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1 /* rule17en */
686 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
687 	u8 flags13;
688 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1 /* rule18en */
689 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
690 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1 /* rule19en */
691 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
692 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1 /* rule20en */
693 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
694 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1 /* rule21en */
695 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
696 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1 /* rule22en */
697 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
698 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1 /* rule23en */
699 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
700 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1 /* rule24en */
701 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
702 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1 /* rule25en */
703 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
704 	u8 flags14;
705 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1 /* bit16 */
706 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
707 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1 /* bit17 */
708 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
709 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1 /* bit18 */
710 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
711 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1 /* bit19 */
712 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
713 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1 /* bit20 */
714 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
715 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1 /* bit21 */
716 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
717 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3 /* cf23 */
718 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
719 	u8 byte2 /* byte2 */;
720 	__le16 physical_q0 /* physical_q0 */;
721 	__le16 consolid_prod /* physical_q1 */;
722 	__le16 reserved16 /* physical_q2 */;
723 	__le16 tx_bd_cons /* word3 */;
724 	__le16 tx_bd_or_spq_prod /* word4 */;
725 	__le16 word5 /* word5 */;
726 	__le16 conn_dpi /* conn_dpi */;
727 	u8 byte3 /* byte3 */;
728 	u8 byte4 /* byte4 */;
729 	u8 byte5 /* byte5 */;
730 	u8 byte6 /* byte6 */;
731 	__le32 reg0 /* reg0 */;
732 	__le32 reg1 /* reg1 */;
733 	__le32 reg2 /* reg2 */;
734 	__le32 reg3 /* reg3 */;
735 	__le32 reg4 /* reg4 */;
736 	__le32 reg5 /* cf_array0 */;
737 	__le32 reg6 /* cf_array1 */;
738 	__le16 word7 /* word7 */;
739 	__le16 word8 /* word8 */;
740 	__le16 word9 /* word9 */;
741 	__le16 word10 /* word10 */;
742 	__le32 reg7 /* reg7 */;
743 	__le32 reg8 /* reg8 */;
744 	__le32 reg9 /* reg9 */;
745 	u8 byte7 /* byte7 */;
746 	u8 byte8 /* byte8 */;
747 	u8 byte9 /* byte9 */;
748 	u8 byte10 /* byte10 */;
749 	u8 byte11 /* byte11 */;
750 	u8 byte12 /* byte12 */;
751 	u8 byte13 /* byte13 */;
752 	u8 byte14 /* byte14 */;
753 	u8 byte15 /* byte15 */;
754 	u8 e5_reserved /* e5_reserved */;
755 	__le16 word11 /* word11 */;
756 	__le32 reg10 /* reg10 */;
757 	__le32 reg11 /* reg11 */;
758 	__le32 reg12 /* reg12 */;
759 	__le32 reg13 /* reg13 */;
760 	__le32 reg14 /* reg14 */;
761 	__le32 reg15 /* reg15 */;
762 	__le32 reg16 /* reg16 */;
763 	__le32 reg17 /* reg17 */;
764 	__le32 reg18 /* reg18 */;
765 	__le32 reg19 /* reg19 */;
766 	__le16 word12 /* word12 */;
767 	__le16 word13 /* word13 */;
768 	__le16 word14 /* word14 */;
769 	__le16 word15 /* word15 */;
770 };
771 
772 struct e4_tstorm_core_conn_ag_ctx
773 {
774 	u8 byte0 /* cdu_validation */;
775 	u8 byte1 /* state */;
776 	u8 flags0;
777 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
778 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
779 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
780 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
781 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1 /* bit2 */
782 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
783 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1 /* bit3 */
784 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
785 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1 /* bit4 */
786 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
787 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1 /* bit5 */
788 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
789 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
790 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
791 	u8 flags1;
792 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
793 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
794 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
795 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
796 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
797 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
798 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
799 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
800 	u8 flags2;
801 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
802 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
803 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
804 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
805 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3 /* cf7 */
806 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
807 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3 /* cf8 */
808 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
809 	u8 flags3;
810 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3 /* cf9 */
811 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
812 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3 /* cf10 */
813 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
814 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
815 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
816 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
817 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
818 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
819 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
820 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
821 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
822 	u8 flags4;
823 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
824 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
825 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
826 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
827 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
828 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
829 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1 /* cf7en */
830 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
831 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1 /* cf8en */
832 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
833 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1 /* cf9en */
834 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
835 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1 /* cf10en */
836 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
837 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
838 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
839 	u8 flags5;
840 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
841 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
842 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
843 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
844 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
845 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
846 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
847 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
848 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
849 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
850 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
851 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
852 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
853 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
854 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
855 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
856 	__le32 reg0 /* reg0 */;
857 	__le32 reg1 /* reg1 */;
858 	__le32 reg2 /* reg2 */;
859 	__le32 reg3 /* reg3 */;
860 	__le32 reg4 /* reg4 */;
861 	__le32 reg5 /* reg5 */;
862 	__le32 reg6 /* reg6 */;
863 	__le32 reg7 /* reg7 */;
864 	__le32 reg8 /* reg8 */;
865 	u8 byte2 /* byte2 */;
866 	u8 byte3 /* byte3 */;
867 	__le16 word0 /* word0 */;
868 	u8 byte4 /* byte4 */;
869 	u8 byte5 /* byte5 */;
870 	__le16 word1 /* word1 */;
871 	__le16 word2 /* conn_dpi */;
872 	__le16 word3 /* word3 */;
873 	__le32 reg9 /* reg9 */;
874 	__le32 reg10 /* reg10 */;
875 };
876 
877 struct e4_ustorm_core_conn_ag_ctx
878 {
879 	u8 reserved /* cdu_validation */;
880 	u8 byte1 /* state */;
881 	u8 flags0;
882 #define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
883 #define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
884 #define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
885 #define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
886 #define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
887 #define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
888 #define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
889 #define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
890 #define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
891 #define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
892 	u8 flags1;
893 #define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
894 #define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT     0
895 #define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
896 #define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT     2
897 #define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
898 #define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT     4
899 #define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
900 #define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT     6
901 	u8 flags2;
902 #define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
903 #define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
904 #define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
905 #define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
906 #define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
907 #define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
908 #define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
909 #define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   3
910 #define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
911 #define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   4
912 #define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
913 #define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   5
914 #define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
915 #define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   6
916 #define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
917 #define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
918 	u8 flags3;
919 #define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
920 #define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
921 #define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
922 #define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
923 #define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
924 #define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
925 #define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
926 #define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
927 #define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
928 #define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
929 #define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
930 #define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
931 #define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
932 #define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
933 #define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
934 #define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
935 	u8 byte2 /* byte2 */;
936 	u8 byte3 /* byte3 */;
937 	__le16 word0 /* conn_dpi */;
938 	__le16 word1 /* word1 */;
939 	__le32 rx_producers /* reg0 */;
940 	__le32 reg1 /* reg1 */;
941 	__le32 reg2 /* reg2 */;
942 	__le32 reg3 /* reg3 */;
943 	__le16 word2 /* word2 */;
944 	__le16 word3 /* word3 */;
945 };
946 
947 /*
948  * The core storm context for the Mstorm
949  */
950 struct mstorm_core_conn_st_ctx
951 {
952 	__le32 reserved[24];
953 };
954 
955 /*
956  * The core storm context for the Ustorm
957  */
958 struct ustorm_core_conn_st_ctx
959 {
960 	__le32 reserved[4];
961 };
962 
963 /*
964  * core connection context
965  */
966 struct e4_core_conn_context
967 {
968 	struct ystorm_core_conn_st_ctx ystorm_st_context /* ystorm storm context */;
969 	struct regpair ystorm_st_padding[2] /* padding */;
970 	struct pstorm_core_conn_st_ctx pstorm_st_context /* pstorm storm context */;
971 	struct regpair pstorm_st_padding[2] /* padding */;
972 	struct xstorm_core_conn_st_ctx xstorm_st_context /* xstorm storm context */;
973 	struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
974 	struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
975 	struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
976 	struct mstorm_core_conn_st_ctx mstorm_st_context /* mstorm storm context */;
977 	struct ustorm_core_conn_st_ctx ustorm_st_context /* ustorm storm context */;
978 	struct regpair ustorm_st_padding[2] /* padding */;
979 };
980 
981 
982 struct e5_xstorm_core_conn_ag_ctx
983 {
984 	u8 reserved0 /* cdu_validation */;
985 	u8 state_and_core_id /* state_and_core_id */;
986 	u8 flags0;
987 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1 /* exist_in_qm0 */
988 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
989 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1 /* exist_in_qm1 */
990 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
991 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1 /* exist_in_qm2 */
992 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
993 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1 /* exist_in_qm3 */
994 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
995 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1 /* bit4 */
996 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
997 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1 /* cf_array_active */
998 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
999 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1 /* bit6 */
1000 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
1001 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1 /* bit7 */
1002 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
1003 	u8 flags1;
1004 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1 /* bit8 */
1005 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
1006 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1 /* bit9 */
1007 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
1008 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1 /* bit10 */
1009 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
1010 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1 /* bit11 */
1011 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
1012 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1 /* bit12 */
1013 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
1014 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1 /* bit13 */
1015 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
1016 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1 /* bit14 */
1017 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
1018 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1 /* bit15 */
1019 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
1020 	u8 flags2;
1021 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3 /* timer0cf */
1022 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
1023 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3 /* timer1cf */
1024 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
1025 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3 /* timer2cf */
1026 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
1027 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3 /* timer_stop_all */
1028 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
1029 	u8 flags3;
1030 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3 /* cf4 */
1031 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
1032 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3 /* cf5 */
1033 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
1034 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3 /* cf6 */
1035 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
1036 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3 /* cf7 */
1037 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
1038 	u8 flags4;
1039 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3 /* cf8 */
1040 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
1041 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3 /* cf9 */
1042 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
1043 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3 /* cf10 */
1044 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
1045 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3 /* cf11 */
1046 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
1047 	u8 flags5;
1048 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3 /* cf12 */
1049 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
1050 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3 /* cf13 */
1051 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
1052 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3 /* cf14 */
1053 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
1054 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3 /* cf15 */
1055 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
1056 	u8 flags6;
1057 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3 /* cf16 */
1058 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
1059 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3 /* cf_array_cf */
1060 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
1061 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3 /* cf18 */
1062 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
1063 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3 /* cf19 */
1064 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
1065 	u8 flags7;
1066 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3 /* cf20 */
1067 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
1068 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3 /* cf21 */
1069 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
1070 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3 /* cf22 */
1071 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
1072 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1 /* cf0en */
1073 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
1074 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1 /* cf1en */
1075 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
1076 	u8 flags8;
1077 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1 /* cf2en */
1078 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
1079 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1 /* cf3en */
1080 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
1081 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1 /* cf4en */
1082 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
1083 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1 /* cf5en */
1084 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
1085 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1 /* cf6en */
1086 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
1087 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1 /* cf7en */
1088 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
1089 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1 /* cf8en */
1090 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
1091 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1 /* cf9en */
1092 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
1093 	u8 flags9;
1094 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1 /* cf10en */
1095 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
1096 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1 /* cf11en */
1097 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
1098 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1 /* cf12en */
1099 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
1100 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1 /* cf13en */
1101 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
1102 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1 /* cf14en */
1103 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
1104 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1 /* cf15en */
1105 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
1106 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1 /* cf16en */
1107 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
1108 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1 /* cf_array_cf_en */
1109 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
1110 	u8 flags10;
1111 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1 /* cf18en */
1112 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
1113 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1 /* cf19en */
1114 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
1115 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1 /* cf20en */
1116 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
1117 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1 /* cf21en */
1118 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
1119 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1 /* cf22en */
1120 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
1121 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1 /* cf23en */
1122 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
1123 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1 /* rule0en */
1124 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
1125 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1 /* rule1en */
1126 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
1127 	u8 flags11;
1128 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1 /* rule2en */
1129 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
1130 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1 /* rule3en */
1131 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
1132 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1 /* rule4en */
1133 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
1134 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1 /* rule5en */
1135 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
1136 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1 /* rule6en */
1137 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
1138 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1 /* rule7en */
1139 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
1140 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1 /* rule8en */
1141 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
1142 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1 /* rule9en */
1143 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
1144 	u8 flags12;
1145 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1 /* rule10en */
1146 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
1147 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1 /* rule11en */
1148 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
1149 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1 /* rule12en */
1150 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
1151 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1 /* rule13en */
1152 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
1153 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1 /* rule14en */
1154 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
1155 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1 /* rule15en */
1156 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
1157 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1 /* rule16en */
1158 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
1159 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1 /* rule17en */
1160 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
1161 	u8 flags13;
1162 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1 /* rule18en */
1163 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
1164 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1 /* rule19en */
1165 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
1166 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1 /* rule20en */
1167 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
1168 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1 /* rule21en */
1169 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
1170 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1 /* rule22en */
1171 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
1172 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1 /* rule23en */
1173 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
1174 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1 /* rule24en */
1175 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
1176 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1 /* rule25en */
1177 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
1178 	u8 flags14;
1179 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1 /* bit16 */
1180 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
1181 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1 /* bit17 */
1182 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
1183 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1 /* bit18 */
1184 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
1185 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1 /* bit19 */
1186 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
1187 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1 /* bit20 */
1188 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
1189 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1 /* bit21 */
1190 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
1191 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3 /* cf23 */
1192 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
1193 	u8 byte2 /* byte2 */;
1194 	__le16 physical_q0 /* physical_q0 */;
1195 	__le16 consolid_prod /* physical_q1 */;
1196 	__le16 reserved16 /* physical_q2 */;
1197 	__le16 tx_bd_cons /* word3 */;
1198 	__le16 tx_bd_or_spq_prod /* word4 */;
1199 	__le16 word5 /* word5 */;
1200 	__le16 conn_dpi /* conn_dpi */;
1201 	u8 byte3 /* byte3 */;
1202 	u8 byte4 /* byte4 */;
1203 	u8 byte5 /* byte5 */;
1204 	u8 byte6 /* byte6 */;
1205 	__le32 reg0 /* reg0 */;
1206 	__le32 reg1 /* reg1 */;
1207 	__le32 reg2 /* reg2 */;
1208 	__le32 reg3 /* reg3 */;
1209 	__le32 reg4 /* reg4 */;
1210 	__le32 reg5 /* cf_array0 */;
1211 	__le32 reg6 /* cf_array1 */;
1212 	u8 flags15;
1213 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK         0x1 /* bit22 */
1214 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT        0
1215 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK         0x1 /* bit23 */
1216 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT        1
1217 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK         0x1 /* bit24 */
1218 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT        2
1219 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK         0x3 /* cf24 */
1220 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT        3
1221 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK         0x1 /* cf24en */
1222 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT        5
1223 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK         0x1 /* rule26en */
1224 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT        6
1225 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_MASK         0x1 /* rule27en */
1226 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_SHIFT        7
1227 	u8 byte7 /* byte7 */;
1228 	__le16 word7 /* word7 */;
1229 	__le16 word8 /* word8 */;
1230 	__le16 word9 /* word9 */;
1231 	__le16 word10 /* word10 */;
1232 	__le16 word11 /* word11 */;
1233 	__le32 reg7 /* reg7 */;
1234 	__le32 reg8 /* reg8 */;
1235 	__le32 reg9 /* reg9 */;
1236 	u8 byte8 /* byte8 */;
1237 	u8 byte9 /* byte9 */;
1238 	u8 byte10 /* byte10 */;
1239 	u8 byte11 /* byte11 */;
1240 	u8 byte12 /* byte12 */;
1241 	u8 byte13 /* byte13 */;
1242 	u8 byte14 /* byte14 */;
1243 	u8 byte15 /* byte15 */;
1244 	__le32 reg10 /* reg10 */;
1245 	__le32 reg11 /* reg11 */;
1246 	__le32 reg12 /* reg12 */;
1247 	__le32 reg13 /* reg13 */;
1248 	__le32 reg14 /* reg14 */;
1249 	__le32 reg15 /* reg15 */;
1250 	__le32 reg16 /* reg16 */;
1251 	__le32 reg17 /* reg17 */;
1252 	__le32 reg18 /* reg18 */;
1253 	__le32 reg19 /* reg19 */;
1254 	__le16 word12 /* word12 */;
1255 	__le16 word13 /* word13 */;
1256 	__le16 word14 /* word14 */;
1257 	__le16 word15 /* word15 */;
1258 };
1259 
1260 struct e5_tstorm_core_conn_ag_ctx
1261 {
1262 	u8 byte0 /* cdu_validation */;
1263 	u8 byte1 /* state_and_core_id */;
1264 	u8 flags0;
1265 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
1266 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT         0
1267 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
1268 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT         1
1269 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK          0x1 /* bit2 */
1270 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT         2
1271 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK          0x1 /* bit3 */
1272 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT         3
1273 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK          0x1 /* bit4 */
1274 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT         4
1275 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK          0x1 /* bit5 */
1276 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT         5
1277 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0_MASK           0x3 /* timer0cf */
1278 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT          6
1279 	u8 flags1;
1280 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1_MASK           0x3 /* timer1cf */
1281 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT          0
1282 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2_MASK           0x3 /* timer2cf */
1283 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT          2
1284 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3_MASK           0x3 /* timer_stop_all */
1285 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT          4
1286 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4_MASK           0x3 /* cf4 */
1287 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT          6
1288 	u8 flags2;
1289 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5_MASK           0x3 /* cf5 */
1290 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT          0
1291 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6_MASK           0x3 /* cf6 */
1292 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT          2
1293 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7_MASK           0x3 /* cf7 */
1294 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT          4
1295 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8_MASK           0x3 /* cf8 */
1296 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT          6
1297 	u8 flags3;
1298 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9_MASK           0x3 /* cf9 */
1299 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT          0
1300 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10_MASK          0x3 /* cf10 */
1301 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT         2
1302 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
1303 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT        4
1304 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
1305 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT        5
1306 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
1307 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT        6
1308 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK         0x1 /* cf3en */
1309 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT        7
1310 	u8 flags4;
1311 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK         0x1 /* cf4en */
1312 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT        0
1313 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK         0x1 /* cf5en */
1314 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT        1
1315 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK         0x1 /* cf6en */
1316 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT        2
1317 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK         0x1 /* cf7en */
1318 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT        3
1319 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK         0x1 /* cf8en */
1320 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT        4
1321 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK         0x1 /* cf9en */
1322 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT        5
1323 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK        0x1 /* cf10en */
1324 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT       6
1325 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
1326 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT      7
1327 	u8 flags5;
1328 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
1329 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT      0
1330 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
1331 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT      1
1332 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
1333 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT      2
1334 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
1335 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT      3
1336 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
1337 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT      4
1338 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK       0x1 /* rule6en */
1339 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT      5
1340 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK       0x1 /* rule7en */
1341 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT      6
1342 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK       0x1 /* rule8en */
1343 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT      7
1344 	u8 flags6;
1345 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit6 */
1346 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
1347 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK  0x1 /* bit7 */
1348 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
1349 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK  0x1 /* bit8 */
1350 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
1351 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK  0x3 /* cf11 */
1352 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
1353 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK  0x1 /* cf11en */
1354 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
1355 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK  0x1 /* rule9en */
1356 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
1357 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_MASK  0x1 /* rule10en */
1358 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
1359 	u8 byte2 /* byte2 */;
1360 	__le16 word0 /* word0 */;
1361 	__le32 reg0 /* reg0 */;
1362 	__le32 reg1 /* reg1 */;
1363 	__le32 reg2 /* reg2 */;
1364 	__le32 reg3 /* reg3 */;
1365 	__le32 reg4 /* reg4 */;
1366 	__le32 reg5 /* reg5 */;
1367 	__le32 reg6 /* reg6 */;
1368 	__le32 reg7 /* reg7 */;
1369 	__le32 reg8 /* reg8 */;
1370 	u8 byte3 /* byte3 */;
1371 	u8 byte4 /* byte4 */;
1372 	u8 byte5 /* byte5 */;
1373 	u8 e4_reserved8 /* byte6 */;
1374 	__le16 word1 /* word1 */;
1375 	__le16 word2 /* conn_dpi */;
1376 	__le32 reg9 /* reg9 */;
1377 	__le16 word3 /* word3 */;
1378 	__le16 e4_reserved9 /* word4 */;
1379 };
1380 
1381 struct e5_ustorm_core_conn_ag_ctx
1382 {
1383 	u8 reserved /* cdu_validation */;
1384 	u8 byte1 /* state_and_core_id */;
1385 	u8 flags0;
1386 #define E5_USTORM_CORE_CONN_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
1387 #define E5_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT         0
1388 #define E5_USTORM_CORE_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
1389 #define E5_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT         1
1390 #define E5_USTORM_CORE_CONN_AG_CTX_CF0_MASK           0x3 /* timer0cf */
1391 #define E5_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT          2
1392 #define E5_USTORM_CORE_CONN_AG_CTX_CF1_MASK           0x3 /* timer1cf */
1393 #define E5_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT          4
1394 #define E5_USTORM_CORE_CONN_AG_CTX_CF2_MASK           0x3 /* timer2cf */
1395 #define E5_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT          6
1396 	u8 flags1;
1397 #define E5_USTORM_CORE_CONN_AG_CTX_CF3_MASK           0x3 /* timer_stop_all */
1398 #define E5_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT          0
1399 #define E5_USTORM_CORE_CONN_AG_CTX_CF4_MASK           0x3 /* cf4 */
1400 #define E5_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT          2
1401 #define E5_USTORM_CORE_CONN_AG_CTX_CF5_MASK           0x3 /* cf5 */
1402 #define E5_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT          4
1403 #define E5_USTORM_CORE_CONN_AG_CTX_CF6_MASK           0x3 /* cf6 */
1404 #define E5_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT          6
1405 	u8 flags2;
1406 #define E5_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
1407 #define E5_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT        0
1408 #define E5_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
1409 #define E5_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT        1
1410 #define E5_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
1411 #define E5_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT        2
1412 #define E5_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK         0x1 /* cf3en */
1413 #define E5_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT        3
1414 #define E5_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK         0x1 /* cf4en */
1415 #define E5_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT        4
1416 #define E5_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK         0x1 /* cf5en */
1417 #define E5_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT        5
1418 #define E5_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK         0x1 /* cf6en */
1419 #define E5_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT        6
1420 #define E5_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
1421 #define E5_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT      7
1422 	u8 flags3;
1423 #define E5_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
1424 #define E5_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT      0
1425 #define E5_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
1426 #define E5_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT      1
1427 #define E5_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
1428 #define E5_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT      2
1429 #define E5_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
1430 #define E5_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT      3
1431 #define E5_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
1432 #define E5_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT      4
1433 #define E5_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK       0x1 /* rule6en */
1434 #define E5_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT      5
1435 #define E5_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK       0x1 /* rule7en */
1436 #define E5_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT      6
1437 #define E5_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK       0x1 /* rule8en */
1438 #define E5_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT      7
1439 	u8 flags4;
1440 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit2 */
1441 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
1442 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK  0x1 /* bit3 */
1443 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
1444 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK  0x3 /* cf7 */
1445 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
1446 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK  0x3 /* cf8 */
1447 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT 4
1448 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK  0x1 /* cf7en */
1449 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT 6
1450 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK  0x1 /* cf8en */
1451 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT 7
1452 	u8 byte2 /* byte2 */;
1453 	__le16 word0 /* conn_dpi */;
1454 	__le16 word1 /* word1 */;
1455 	__le32 rx_producers /* reg0 */;
1456 	__le32 reg1 /* reg1 */;
1457 	__le32 reg2 /* reg2 */;
1458 	__le32 reg3 /* reg3 */;
1459 	__le16 word2 /* word2 */;
1460 	__le16 word3 /* word3 */;
1461 };
1462 
1463 /*
1464  * core connection context
1465  */
1466 struct e5_core_conn_context
1467 {
1468 	struct ystorm_core_conn_st_ctx ystorm_st_context /* ystorm storm context */;
1469 	struct regpair ystorm_st_padding[2] /* padding */;
1470 	struct pstorm_core_conn_st_ctx pstorm_st_context /* pstorm storm context */;
1471 	struct regpair pstorm_st_padding[2] /* padding */;
1472 	struct xstorm_core_conn_st_ctx xstorm_st_context /* xstorm storm context */;
1473 	struct regpair xstorm_st_padding[2] /* padding */;
1474 	struct e5_xstorm_core_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
1475 	struct e5_tstorm_core_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
1476 	struct e5_ustorm_core_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
1477 	struct mstorm_core_conn_st_ctx mstorm_st_context /* mstorm storm context */;
1478 	struct ustorm_core_conn_st_ctx ustorm_st_context /* ustorm storm context */;
1479 	struct regpair ustorm_st_padding[2] /* padding */;
1480 };
1481 
1482 
1483 struct eth_mstorm_per_pf_stat
1484 {
1485 	struct regpair gre_discard_pkts /* Dropped GRE RX packets */;
1486 	struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */;
1487 	struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */;
1488 	struct regpair lb_discard_pkts /* Dropped Tx switched packets */;
1489 };
1490 
1491 
1492 struct eth_mstorm_per_queue_stat
1493 {
1494 	struct regpair ttl0_discard /* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (in IPv6) */;
1495 	struct regpair packet_too_big_discard /* Number of packets discarded because they are bigger than MTU */;
1496 	struct regpair no_buff_discard /* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */;
1497 	struct regpair not_active_discard /* Number of packets discarded because of no active Rx connection */;
1498 	struct regpair tpa_coalesced_pkts /* number of coalesced packets in all TPA aggregations */;
1499 	struct regpair tpa_coalesced_events /* total number of TPA aggregations */;
1500 	struct regpair tpa_aborts_num /* number of aggregations, which abnormally ended */;
1501 	struct regpair tpa_coalesced_bytes /* total TCP payload length in all TPA aggregations */;
1502 };
1503 
1504 
1505 /*
1506  * Ethernet TX Per PF
1507  */
1508 struct eth_pstorm_per_pf_stat
1509 {
1510 	struct regpair sent_lb_ucast_bytes /* number of total ucast bytes sent on loopback port without errors */;
1511 	struct regpair sent_lb_mcast_bytes /* number of total mcast bytes sent on loopback port without errors */;
1512 	struct regpair sent_lb_bcast_bytes /* number of total bcast bytes sent on loopback port without errors */;
1513 	struct regpair sent_lb_ucast_pkts /* number of total ucast packets sent on loopback port without errors */;
1514 	struct regpair sent_lb_mcast_pkts /* number of total mcast packets sent on loopback port without errors */;
1515 	struct regpair sent_lb_bcast_pkts /* number of total bcast packets sent on loopback port without errors */;
1516 	struct regpair sent_gre_bytes /* Sent GRE bytes */;
1517 	struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */;
1518 	struct regpair sent_geneve_bytes /* Sent GENEVE bytes */;
1519 	struct regpair sent_gre_pkts /* Sent GRE packets */;
1520 	struct regpair sent_vxlan_pkts /* Sent VXLAN packets */;
1521 	struct regpair sent_geneve_pkts /* Sent GENEVE packets */;
1522 	struct regpair gre_drop_pkts /* Dropped GRE TX packets */;
1523 	struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
1524 	struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
1525 };
1526 
1527 
1528 /*
1529  * Ethernet TX Per Queue Stats
1530  */
1531 struct eth_pstorm_per_queue_stat
1532 {
1533 	struct regpair sent_ucast_bytes /* number of total bytes sent without errors */;
1534 	struct regpair sent_mcast_bytes /* number of total bytes sent without errors */;
1535 	struct regpair sent_bcast_bytes /* number of total bytes sent without errors */;
1536 	struct regpair sent_ucast_pkts /* number of total packets sent without errors */;
1537 	struct regpair sent_mcast_pkts /* number of total packets sent without errors */;
1538 	struct regpair sent_bcast_pkts /* number of total packets sent without errors */;
1539 	struct regpair error_drop_pkts /* number of total packets dropped due to errors */;
1540 };
1541 
1542 
1543 /*
1544  * ETH Rx producers data
1545  */
1546 struct eth_rx_rate_limit
1547 {
1548 	__le16 mult /* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */;
1549 	__le16 cnst /* Constant term to add (or subtract from number of cycles) */;
1550 	u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
1551 	u8 reserved0;
1552 	__le16 reserved1;
1553 };
1554 
1555 
1556 struct eth_ustorm_per_pf_stat
1557 {
1558 	struct regpair rcv_lb_ucast_bytes /* number of total ucast bytes received on loopback port without errors */;
1559 	struct regpair rcv_lb_mcast_bytes /* number of total mcast bytes received on loopback port without errors */;
1560 	struct regpair rcv_lb_bcast_bytes /* number of total bcast bytes received on loopback port without errors */;
1561 	struct regpair rcv_lb_ucast_pkts /* number of total ucast packets received on loopback port without errors */;
1562 	struct regpair rcv_lb_mcast_pkts /* number of total mcast packets received on loopback port without errors */;
1563 	struct regpair rcv_lb_bcast_pkts /* number of total bcast packets received on loopback port without errors */;
1564 	struct regpair rcv_gre_bytes /* Received GRE bytes */;
1565 	struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */;
1566 	struct regpair rcv_geneve_bytes /* Received GENEVE bytes */;
1567 	struct regpair rcv_gre_pkts /* Received GRE packets */;
1568 	struct regpair rcv_vxlan_pkts /* Received VXLAN packets */;
1569 	struct regpair rcv_geneve_pkts /* Received GENEVE packets */;
1570 };
1571 
1572 
1573 struct eth_ustorm_per_queue_stat
1574 {
1575 	struct regpair rcv_ucast_bytes;
1576 	struct regpair rcv_mcast_bytes;
1577 	struct regpair rcv_bcast_bytes;
1578 	struct regpair rcv_ucast_pkts;
1579 	struct regpair rcv_mcast_pkts;
1580 	struct regpair rcv_bcast_pkts;
1581 };
1582 
1583 
1584 /*
1585  * Event Ring VF-PF Channel data
1586  */
1587 struct vf_pf_channel_eqe_data
1588 {
1589 	struct regpair msg_addr /* VF-PF message address */;
1590 };
1591 
1592 /*
1593  * Event Ring malicious VF data
1594  */
1595 struct malicious_vf_eqe_data
1596 {
1597 	u8 vf_id /* Malicious VF ID */;
1598 	u8 err_id /* Malicious VF error (use enum malicious_vf_error_id) */;
1599 	__le16 reserved[3];
1600 };
1601 
1602 /*
1603  * Event Ring initial cleanup data
1604  */
1605 struct initial_cleanup_eqe_data
1606 {
1607 	u8 vf_id /* VF ID */;
1608 	u8 reserved[7];
1609 };
1610 
1611 /*
1612  * Event Data Union
1613  */
1614 union event_ring_data
1615 {
1616 	u8 bytes[8] /* Byte Array */;
1617 	struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
1618 	struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
1619 	struct iscsi_connect_done_results iscsi_conn_done_info /* Dedicated fields to iscsi connect done results */;
1620 	union rdma_eqe_data rdma_data /* Dedicated field for RDMA data */;
1621 	struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
1622 	struct initial_cleanup_eqe_data vf_init_cleanup /* VF Initial Cleanup data */;
1623 };
1624 
1625 
1626 /*
1627  * Event Ring Entry
1628  */
1629 struct event_ring_entry
1630 {
1631 	u8 protocol_id /* Event Protocol ID (use enum protocol_type) */;
1632 	u8 opcode /* Event Opcode */;
1633 	__le16 reserved0 /* Reserved */;
1634 	__le16 echo /* Echo value from ramrod data on the host */;
1635 	u8 fw_return_code /* FW return code for SP ramrods */;
1636 	u8 flags;
1637 #define EVENT_RING_ENTRY_ASYNC_MASK      0x1 /* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
1638 #define EVENT_RING_ENTRY_ASYNC_SHIFT     0
1639 #define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
1640 #define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
1641 	union event_ring_data data;
1642 };
1643 
1644 /*
1645  * Event Ring Next Page Address
1646  */
1647 struct event_ring_next_addr
1648 {
1649 	struct regpair addr /* Next Page Address */;
1650 	__le32 reserved[2] /* Reserved */;
1651 };
1652 
1653 /*
1654  * Event Ring Element
1655  */
1656 union event_ring_element
1657 {
1658 	struct event_ring_entry entry /* Event Ring Entry */;
1659 	struct event_ring_next_addr next_addr /* Event Ring Next Page Address */;
1660 };
1661 
1662 
1663 
1664 
1665 /*
1666  * Ports mode
1667  */
1668 enum fw_flow_ctrl_mode
1669 {
1670 	flow_ctrl_pause,
1671 	flow_ctrl_pfc,
1672 	MAX_FW_FLOW_CTRL_MODE
1673 };
1674 
1675 
1676 /*
1677  * GFT profile type.
1678  */
1679 enum gft_profile_type
1680 {
1681 	GFT_PROFILE_TYPE_4_TUPLE /* tunnel type, inner 4 tuple, IP type and L4 type match. */,
1682 	GFT_PROFILE_TYPE_L4_DST_PORT /* tunnel type, inner L4 destination port, IP type and L4 type match. */,
1683 	GFT_PROFILE_TYPE_IP_DST_ADDR /* tunnel type, inner IP destination address and IP type match. */,
1684 	GFT_PROFILE_TYPE_IP_SRC_ADDR /* tunnel type, inner IP source address and IP type match. */,
1685 	GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */,
1686 	MAX_GFT_PROFILE_TYPE
1687 };
1688 
1689 
1690 /*
1691  * Major and Minor hsi Versions
1692  */
1693 struct hsi_fp_ver_struct
1694 {
1695 	u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */;
1696 	u8 major_ver_arr[2] /* Major Version of driver loading pf */;
1697 };
1698 
1699 
1700 
1701 /*
1702  * Integration Phase
1703  */
1704 enum integ_phase
1705 {
1706 	INTEG_PHASE_BB_A0_LATEST=3 /* BB A0 latest integration phase */,
1707 	INTEG_PHASE_BB_B0_NO_MCP=10 /* BB B0 without MCP */,
1708 	INTEG_PHASE_BB_B0_WITH_MCP=11 /* BB B0 with MCP */,
1709 	MAX_INTEG_PHASE
1710 };
1711 
1712 
1713 /*
1714  * Ports mode
1715  */
1716 enum iwarp_ll2_tx_queues
1717 {
1718 	IWARP_LL2_IN_ORDER_TX_QUEUE=1 /* LL2 queue for OOO packets sent in-order by the driver */,
1719 	IWARP_LL2_ALIGNED_TX_QUEUE /* LL2 queue for unaligned packets sent aligned by the driver */,
1720 	IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE /* LL2 queue for unaligned packets sent aligned and was right-trimmed by the driver */,
1721 	IWARP_LL2_ERROR /* Error indication */,
1722 	MAX_IWARP_LL2_TX_QUEUES
1723 };
1724 
1725 
1726 
1727 /*
1728  * Malicious VF error ID
1729  */
1730 enum malicious_vf_error_id
1731 {
1732 	MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
1733 	VF_PF_CHANNEL_NOT_READY /* Writing to VF/PF channel when it is not ready */,
1734 	VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
1735 	VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
1736 	ETH_PACKET_TOO_SMALL /* TX packet is shorter then reported on BDs or from minimal size */,
1737 	ETH_ILLEGAL_VLAN_MODE /* Tx packet with marked as insert VLAN when its illegal */,
1738 	ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
1739 	ETH_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */,
1740 	ETH_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */,
1741 	ETH_ILLEGAL_NBDS /* indicated number of BDs for the packet is illegal */,
1742 	ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
1743 	ETH_INSUFFICIENT_BDS /* There are not enough BDs for transmission of even one packet */,
1744 	ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
1745 	ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
1746 	ETH_ZERO_SIZE_BD /* empty BD (which not contains control flags) is illegal  */,
1747 	ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit  */,
1748 	ETH_INSUFFICIENT_PAYLOAD /* In LSO its expected that on the local BD ring there will be at least MSS bytes of data */,
1749 	ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
1750 	ETH_TUNN_IPV6_EXT_NBD_ERR /* Tunneled packet with IPv6+Ext without a proper number of BDs */,
1751 	ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
1752 	ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
1753 	ETH_PACKET_SIZE_TOO_LARGE /* packet scanned is too large (can be 9700 at most) */,
1754 	MAX_MALICIOUS_VF_ERROR_ID
1755 };
1756 
1757 
1758 
1759 /*
1760  * Mstorm non-triggering VF zone
1761  */
1762 struct mstorm_non_trigger_vf_zone
1763 {
1764 	struct eth_mstorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
1765 	struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD] /* VF RX queues producers */;
1766 };
1767 
1768 
1769 /*
1770  * Mstorm VF zone
1771  */
1772 struct mstorm_vf_zone
1773 {
1774 	struct mstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
1775 };
1776 
1777 
1778 /*
1779  * vlan header including TPID and TCI fields
1780  */
1781 struct vlan_header
1782 {
1783 	__le16 tpid /* Tag Protocol Identifier */;
1784 	__le16 tci /* Tag Control Information */;
1785 };
1786 
1787 /*
1788  * outer tag configurations
1789  */
1790 struct outer_tag_config_struct
1791 {
1792 	u8 enable_stag_pri_change /* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette Davis, UFP with Host Control mode, and UFP with DCB over base interface. else - 0. */;
1793 	u8 pri_map_valid /* If inner_to_outer_pri_map is initialize then set pri_map_valid */;
1794 	u8 reserved[2];
1795 	struct vlan_header outer_tag /* In case mf_mode is MF_OVLAN, this field specifies the outer tag protocol identifier and outer tag control information */;
1796 	u8 inner_to_outer_pri_map[8] /* Map from inner to outer priority. Set pri_map_valid when init map */;
1797 };
1798 
1799 
1800 /*
1801  * personality per PF
1802  */
1803 enum personality_type
1804 {
1805 	BAD_PERSONALITY_TYP,
1806 	PERSONALITY_ISCSI /* iSCSI and LL2 */,
1807 	PERSONALITY_FCOE /* Fcoe and LL2 */,
1808 	PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
1809 	PERSONALITY_RDMA /* Roce and LL2 */,
1810 	PERSONALITY_CORE /* CORE(LL2) */,
1811 	PERSONALITY_ETH /* Ethernet */,
1812 	PERSONALITY_TOE /* Toe and LL2 */,
1813 	MAX_PERSONALITY_TYPE
1814 };
1815 
1816 
1817 /*
1818  * tunnel configuration
1819  */
1820 struct pf_start_tunnel_config
1821 {
1822 	u8 set_vxlan_udp_port_flg /* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set - FW will use a default port */;
1823 	u8 set_geneve_udp_port_flg /* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set - FW will use a default port */;
1824 	u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. (use enum tunnel_clss) */;
1825 	u8 tunnel_clss_l2geneve /* Rx classification scheme for l2 GENEVE tunnel. (use enum tunnel_clss) */;
1826 	u8 tunnel_clss_ipgeneve /* Rx classification scheme for ip GENEVE tunnel. (use enum tunnel_clss) */;
1827 	u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. (use enum tunnel_clss) */;
1828 	u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. (use enum tunnel_clss) */;
1829 	u8 reserved;
1830 	__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */;
1831 	__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */;
1832 };
1833 
1834 /*
1835  * Ramrod data for PF start ramrod
1836  */
1837 struct pf_start_ramrod_data
1838 {
1839 	struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
1840 	struct regpair consolid_q_pbl_addr /* PBL address of consolidation queue */;
1841 	struct pf_start_tunnel_config tunnel_config /* tunnel configuration. */;
1842 	__le16 event_ring_sb_id /* Status block ID */;
1843 	u8 base_vf_id /* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */;
1844 	u8 num_vfs /* Amount of vfs owned by PF */;
1845 	u8 event_ring_num_pages /* Number of PBL pages in event ring */;
1846 	u8 event_ring_sb_index /* Status block index */;
1847 	u8 path_id /* HW path ID (engine ID) */;
1848 	u8 warning_as_error /* In FW asserts, treat warning as error */;
1849 	u8 dont_log_ramrods /* If not set - throw a warning for each ramrod (for debug) */;
1850 	u8 personality /* define what type of personality is new PF (use enum personality_type) */;
1851 	__le16 log_type_mask /* Log type mask. Each bit set enables a corresponding event type logging. Event types are defined as ASSERT_LOG_TYPE_xxx */;
1852 	u8 mf_mode /* Multi function mode (use enum mf_mode) */;
1853 	u8 integ_phase /* Integration phase (use enum integ_phase) */;
1854 	u8 allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in Switch Independent function mode */;
1855 	u8 reserved0;
1856 	struct hsi_fp_ver_struct hsi_fp_ver /* FP HSI version to be used by FW */;
1857 	struct outer_tag_config_struct outer_tag_config /* Outer tag configurations */;
1858 };
1859 
1860 
1861 
1862 /*
1863  * Per protocol DCB data
1864  */
1865 struct protocol_dcb_data
1866 {
1867 	u8 dcb_enable_flag /* Enable DCB */;
1868 	u8 dscp_enable_flag /* Enable updating DSCP value */;
1869 	u8 dcb_priority /* DCB priority */;
1870 	u8 dcb_tc /* DCB TC */;
1871 	u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */;
1872 	u8 dcb_dont_add_vlan0 /* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged frames */;
1873 };
1874 
1875 /*
1876  * Update tunnel configuration
1877  */
1878 struct pf_update_tunnel_config
1879 {
1880 	u8 update_rx_pf_clss /* Update RX per PF tunnel classification scheme. */;
1881 	u8 update_rx_def_ucast_clss /* Update per PORT default tunnel RX classification scheme for traffic with unknown unicast outer MAC in NPAR mode. */;
1882 	u8 update_rx_def_non_ucast_clss /* Update per PORT default tunnel RX classification scheme for traffic with non unicast outer MAC in NPAR mode. */;
1883 	u8 set_vxlan_udp_port_flg /* Update VXLAN tunnel UDP destination port. */;
1884 	u8 set_geneve_udp_port_flg /* Update GENEVE tunnel UDP destination port. */;
1885 	u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. (use enum tunnel_clss) */;
1886 	u8 tunnel_clss_l2geneve /* Classification scheme for l2 GENEVE tunnel. (use enum tunnel_clss) */;
1887 	u8 tunnel_clss_ipgeneve /* Classification scheme for ip GENEVE tunnel. (use enum tunnel_clss) */;
1888 	u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. (use enum tunnel_clss) */;
1889 	u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. (use enum tunnel_clss) */;
1890 	__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
1891 	__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
1892 	__le16 reserved;
1893 };
1894 
1895 /*
1896  * Data for port update ramrod
1897  */
1898 struct pf_update_ramrod_data
1899 {
1900 	u8 update_eth_dcb_data_mode /* Update Eth DCB  data indication (use enum dcb_dscp_update_mode) */;
1901 	u8 update_fcoe_dcb_data_mode /* Update FCOE DCB  data indication (use enum dcb_dscp_update_mode) */;
1902 	u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB  data indication (use enum dcb_dscp_update_mode) */;
1903 	u8 update_roce_dcb_data_mode /* Update ROCE DCB  data indication (use enum dcb_dscp_update_mode) */;
1904 	u8 update_rroce_dcb_data_mode /* Update RROCE (RoceV2) DCB  data indication (use enum dcb_dscp_update_mode) */;
1905 	u8 update_iwarp_dcb_data_mode /* Update IWARP DCB  data indication (use enum dcb_dscp_update_mode) */;
1906 	u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
1907 	u8 update_enable_stag_pri_change /* Update Enable STAG Priority Change indication */;
1908 	struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
1909 	struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
1910 	struct protocol_dcb_data iscsi_dcb_data /* core iscsi related fields */;
1911 	struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
1912 	struct protocol_dcb_data rroce_dcb_data /* core roce related fields */;
1913 	struct protocol_dcb_data iwarp_dcb_data /* core iwarp related fields */;
1914 	__le16 mf_vlan /* new outer vlan id value */;
1915 	u8 enable_stag_pri_change /* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette Davis, UFP with Host Control mode, and UFP with DCB over base interface. else - 0. */;
1916 	u8 reserved;
1917 	struct pf_update_tunnel_config tunnel_config /* tunnel configuration. */;
1918 };
1919 
1920 
1921 
1922 /*
1923  * Ports mode
1924  */
1925 enum ports_mode
1926 {
1927 	ENGX2_PORTX1 /* 2 engines x 1 port */,
1928 	ENGX2_PORTX2 /* 2 engines x 2 ports */,
1929 	ENGX1_PORTX1 /* 1 engine  x 1 port */,
1930 	ENGX1_PORTX2 /* 1 engine  x 2 ports */,
1931 	ENGX1_PORTX4 /* 1 engine  x 4 ports */,
1932 	MAX_PORTS_MODE
1933 };
1934 
1935 
1936 
1937 /*
1938  * use to index in hsi_fp_[major|minor]_ver_arr per protocol
1939  */
1940 enum protocol_version_array_key
1941 {
1942 	ETH_VER_KEY=0,
1943 	ROCE_VER_KEY,
1944 	MAX_PROTOCOL_VERSION_ARRAY_KEY
1945 };
1946 
1947 
1948 
1949 /*
1950  * RDMA TX Stats
1951  */
1952 struct rdma_sent_stats
1953 {
1954 	struct regpair sent_bytes /* number of total RDMA bytes sent */;
1955 	struct regpair sent_pkts /* number of total RDMA packets sent */;
1956 };
1957 
1958 /*
1959  * Pstorm non-triggering VF zone
1960  */
1961 struct pstorm_non_trigger_vf_zone
1962 {
1963 	struct eth_pstorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
1964 	struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
1965 };
1966 
1967 
1968 /*
1969  * Pstorm VF zone
1970  */
1971 struct pstorm_vf_zone
1972 {
1973 	struct pstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
1974 	struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
1975 };
1976 
1977 
1978 /*
1979  * Ramrod Header of SPQE
1980  */
1981 struct ramrod_header
1982 {
1983 	__le32 cid /* Slowpath Connection CID */;
1984 	u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
1985 	u8 protocol_id /* Ramrod Protocol ID (use enum protocol_type) */;
1986 	__le16 echo /* Ramrod echo */;
1987 };
1988 
1989 
1990 /*
1991  * RDMA RX Stats
1992  */
1993 struct rdma_rcv_stats
1994 {
1995 	struct regpair rcv_bytes /* number of total RDMA bytes received */;
1996 	struct regpair rcv_pkts /* number of total RDMA packets received */;
1997 };
1998 
1999 
2000 
2001 /*
2002  * Data for update QCN/DCQCN RL ramrod
2003  */
2004 struct rl_update_ramrod_data
2005 {
2006 	u8 qcn_update_param_flg /* Update QCN global params: timeout. */;
2007 	u8 dcqcn_update_param_flg /* Update DCQCN global params: timeout, g, k. */;
2008 	u8 rl_init_flg /* Init RL parameters, when RL disabled. */;
2009 	u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */;
2010 	u8 rl_stop_flg /* Stop RL. */;
2011 	u8 rl_id_first /* ID of first or single RL, that will be updated. */;
2012 	u8 rl_id_last /* ID of last RL, that will be updated. If clear, single RL will updated. */;
2013 	u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
2014 	__le32 rl_bc_rate /* Byte Counter Limit. */;
2015 	__le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
2016 	__le16 rl_r_ai /* Active increase rate. */;
2017 	__le16 rl_r_hai /* Hyper active increase rate. */;
2018 	__le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */;
2019 	__le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
2020 	__le32 dcqcn_timeuot_us /* DCQCN timeout. */;
2021 	__le32 qcn_timeuot_us /* QCN timeout. */;
2022 	__le32 reserved[2];
2023 };
2024 
2025 
2026 /*
2027  * Slowpath Element (SPQE)
2028  */
2029 struct slow_path_element
2030 {
2031 	struct ramrod_header hdr /* Ramrod Header */;
2032 	struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
2033 };
2034 
2035 
2036 /*
2037  * Tstorm non-triggering VF zone
2038  */
2039 struct tstorm_non_trigger_vf_zone
2040 {
2041 	struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
2042 };
2043 
2044 
2045 struct tstorm_per_port_stat
2046 {
2047 	struct regpair trunc_error_discard /* packet is dropped because it was truncated in NIG */;
2048 	struct regpair mac_error_discard /* packet is dropped because of Ethernet FCS error */;
2049 	struct regpair mftag_filter_discard /* packet is dropped because classification was unsuccessful */;
2050 	struct regpair eth_mac_filter_discard /* packet was passed to Ethernet and dropped because of no mac filter match */;
2051 	struct regpair ll2_mac_filter_discard /* packet passed to Light L2 and dropped because Light L2 is not configured for this PF */;
2052 	struct regpair ll2_conn_disabled_discard /* packet passed to Light L2 and dropped because Light L2 is not configured for this PF */;
2053 	struct regpair iscsi_irregular_pkt /* packet is an ISCSI irregular packet */;
2054 	struct regpair fcoe_irregular_pkt /* packet is an FCOE irregular packet */;
2055 	struct regpair roce_irregular_pkt /* packet is an ROCE irregular packet */;
2056 	struct regpair iwarp_irregular_pkt /* packet is an IWARP irregular packet */;
2057 	struct regpair eth_irregular_pkt /* packet is an ETH irregular packet */;
2058 	struct regpair toe_irregular_pkt /* packet is an TOE irregular packet */;
2059 	struct regpair preroce_irregular_pkt /* packet is an PREROCE irregular packet */;
2060 	struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
2061 	struct regpair eth_vxlan_tunn_filter_discard /* VXLAN dropped packets */;
2062 	struct regpair eth_geneve_tunn_filter_discard /* GENEVE dropped packets */;
2063 	struct regpair eth_gft_drop_pkt /* GFT dropped packets */;
2064 };
2065 
2066 
2067 /*
2068  * Tstorm VF zone
2069  */
2070 struct tstorm_vf_zone
2071 {
2072 	struct tstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
2073 };
2074 
2075 
2076 /*
2077  * Tunnel classification scheme
2078  */
2079 enum tunnel_clss
2080 {
2081 	TUNNEL_CLSS_MAC_VLAN=0 /* Use MAC and VLAN from first L2 header for vport classification. */,
2082 	TUNNEL_CLSS_MAC_VNI /* Use MAC from first L2 header and VNI from tunnel header for vport classification */,
2083 	TUNNEL_CLSS_INNER_MAC_VLAN /* Use MAC and VLAN from last L2 header for vport classification */,
2084 	TUNNEL_CLSS_INNER_MAC_VNI /* Use MAC from last L2 header and VNI from tunnel header for vport classification */,
2085 	TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE /* Use MAC and VLAN from last L2 header for vport classification. If no exact match, use MAC and VLAN from first L2 header for classification. */,
2086 	MAX_TUNNEL_CLSS
2087 };
2088 
2089 
2090 
2091 /*
2092  * Ustorm non-triggering VF zone
2093  */
2094 struct ustorm_non_trigger_vf_zone
2095 {
2096 	struct eth_ustorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
2097 	struct regpair vf_pf_msg_addr /* VF-PF message address */;
2098 };
2099 
2100 
2101 /*
2102  * Ustorm triggering VF zone
2103  */
2104 struct ustorm_trigger_vf_zone
2105 {
2106 	u8 vf_pf_msg_valid /* VF-PF message valid flag */;
2107 	u8 reserved[7];
2108 };
2109 
2110 
2111 /*
2112  * Ustorm VF zone
2113  */
2114 struct ustorm_vf_zone
2115 {
2116 	struct ustorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
2117 	struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
2118 };
2119 
2120 
2121 /*
2122  * VF-PF channel data
2123  */
2124 struct vf_pf_channel_data
2125 {
2126 	__le32 ready /* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel is ready for a new transaction. */;
2127 	u8 valid /* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is valid. */;
2128 	u8 reserved0;
2129 	__le16 reserved1;
2130 };
2131 
2132 
2133 
2134 /*
2135  * Ramrod data for VF start ramrod
2136  */
2137 struct vf_start_ramrod_data
2138 {
2139 	u8 vf_id /* VF ID */;
2140 	u8 enable_flr_ack /* If set, initial cleanup ack will be sent to parent PF SP event queue */;
2141 	__le16 opaque_fid /* VF opaque FID */;
2142 	u8 personality /* define what type of personality is new VF (use enum personality_type) */;
2143 	u8 reserved[7];
2144 	struct hsi_fp_ver_struct hsi_fp_ver /* FP HSI version to be used by FW */;
2145 };
2146 
2147 
2148 /*
2149  * Ramrod data for VF start ramrod
2150  */
2151 struct vf_stop_ramrod_data
2152 {
2153 	u8 vf_id /* VF ID */;
2154 	u8 reserved0;
2155 	__le16 reserved1;
2156 	__le32 reserved2;
2157 };
2158 
2159 
2160 /*
2161  * VF zone size mode.
2162  */
2163 enum vf_zone_size_mode
2164 {
2165 	VF_ZONE_SIZE_MODE_DEFAULT /* Default VF zone size. Up to 192 VF supported. */,
2166 	VF_ZONE_SIZE_MODE_DOUBLE /* Doubled VF zone size. Up to 96 VF supported. */,
2167 	VF_ZONE_SIZE_MODE_QUAD /* Quad VF zone size. Up to 48 VF supported. */,
2168 	MAX_VF_ZONE_SIZE_MODE
2169 };
2170 
2171 
2172 
2173 
2174 
2175 /*
2176  * Attentions status block
2177  */
2178 struct atten_status_block
2179 {
2180 	__le32 atten_bits;
2181 	__le32 atten_ack;
2182 	__le16 reserved0;
2183 	__le16 sb_index /* status block running index */;
2184 	__le32 reserved1;
2185 };
2186 
2187 
2188 /*
2189  * DMAE command
2190  */
2191 struct dmae_cmd
2192 {
2193 	__le32 opcode;
2194 #define DMAE_CMD_SRC_MASK              0x1 /* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */
2195 #define DMAE_CMD_SRC_SHIFT             0
2196 #define DMAE_CMD_DST_MASK              0x3 /* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None (use enum dmae_cmd_dst_enum) */
2197 #define DMAE_CMD_DST_SHIFT             1
2198 #define DMAE_CMD_C_DST_MASK            0x1 /* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */
2199 #define DMAE_CMD_C_DST_SHIFT           3
2200 #define DMAE_CMD_CRC_RESET_MASK        0x1 /* Reset the CRC result (do not use the previous result as the seed) */
2201 #define DMAE_CMD_CRC_RESET_SHIFT       4
2202 #define DMAE_CMD_SRC_ADDR_RESET_MASK   0x1 /* Reset the source address in the next go to the same source address of the previous go */
2203 #define DMAE_CMD_SRC_ADDR_RESET_SHIFT  5
2204 #define DMAE_CMD_DST_ADDR_RESET_MASK   0x1 /* Reset the destination address in the next go to the same destination address of the previous go */
2205 #define DMAE_CMD_DST_ADDR_RESET_SHIFT  6
2206 #define DMAE_CMD_COMP_FUNC_MASK        0x1 /* 0   completion function is the same as src function, 1 - 0   completion function is the same as dst function (use enum dmae_cmd_comp_func_enum) */
2207 #define DMAE_CMD_COMP_FUNC_SHIFT       7
2208 #define DMAE_CMD_COMP_WORD_EN_MASK     0x1 /* 0 - Do not write a completion word, 1 - Write a completion word (use enum dmae_cmd_comp_word_en_enum) */
2209 #define DMAE_CMD_COMP_WORD_EN_SHIFT    8
2210 #define DMAE_CMD_COMP_CRC_EN_MASK      0x1 /* 0 - Do not write a CRC word, 1 - Write a CRC word (use enum dmae_cmd_comp_crc_en_enum) */
2211 #define DMAE_CMD_COMP_CRC_EN_SHIFT     9
2212 #define DMAE_CMD_COMP_CRC_OFFSET_MASK  0x7 /* The CRC word should be taken from the DMAE address space from address 9+X, where X is the value in these bits. */
2213 #define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
2214 #define DMAE_CMD_RESERVED1_MASK        0x1
2215 #define DMAE_CMD_RESERVED1_SHIFT       13
2216 #define DMAE_CMD_ENDIANITY_MODE_MASK   0x3
2217 #define DMAE_CMD_ENDIANITY_MODE_SHIFT  14
2218 #define DMAE_CMD_ERR_HANDLING_MASK     0x3 /* The field specifies how the completion word is affected by PCIe read error. 0   Send a regular completion, 1 - Send a completion with an error indication, 2   do not send a completion (use enum dmae_cmd_error_handling_enum) */
2219 #define DMAE_CMD_ERR_HANDLING_SHIFT    16
2220 #define DMAE_CMD_PORT_ID_MASK          0x3 /* The port ID to be placed on the  RF FID  field of the GRC bus. this field is used both when GRC is the destination and when it is the source of the DMAE transaction. */
2221 #define DMAE_CMD_PORT_ID_SHIFT         18
2222 #define DMAE_CMD_SRC_PF_ID_MASK        0xF /* Source PCI function number [3:0] */
2223 #define DMAE_CMD_SRC_PF_ID_SHIFT       20
2224 #define DMAE_CMD_DST_PF_ID_MASK        0xF /* Destination PCI function number [3:0] */
2225 #define DMAE_CMD_DST_PF_ID_SHIFT       24
2226 #define DMAE_CMD_SRC_VF_ID_VALID_MASK  0x1 /* Source VFID valid */
2227 #define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
2228 #define DMAE_CMD_DST_VF_ID_VALID_MASK  0x1 /* Destination VFID valid */
2229 #define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
2230 #define DMAE_CMD_RESERVED2_MASK        0x3
2231 #define DMAE_CMD_RESERVED2_SHIFT       30
2232 	__le32 src_addr_lo /* PCIe source address low in bytes or GRC source address in DW */;
2233 	__le32 src_addr_hi /* PCIe source address high in bytes or reserved (if source is GRC) */;
2234 	__le32 dst_addr_lo /* PCIe destination address low in bytes or GRC destination address in DW */;
2235 	__le32 dst_addr_hi /* PCIe destination address high in bytes or reserved (if destination is GRC) */;
2236 	__le16 length_dw /* Length in DW */;
2237 	__le16 opcode_b;
2238 #define DMAE_CMD_SRC_VF_ID_MASK        0xFF /* Source VF id */
2239 #define DMAE_CMD_SRC_VF_ID_SHIFT       0
2240 #define DMAE_CMD_DST_VF_ID_MASK        0xFF /* Destination VF id */
2241 #define DMAE_CMD_DST_VF_ID_SHIFT       8
2242 	__le32 comp_addr_lo /* PCIe completion address low in bytes or GRC completion address in DW */;
2243 	__le32 comp_addr_hi /* PCIe completion address high in bytes or reserved (if completion address is GRC) */;
2244 	__le32 comp_val /* Value to write to completion address */;
2245 	__le32 crc32 /* crc16 result */;
2246 	__le32 crc_32_c /* crc32_c result */;
2247 	__le16 crc16 /* crc16 result */;
2248 	__le16 crc16_c /* crc16_c result */;
2249 	__le16 crc10 /* crc_t10 result */;
2250 	__le16 reserved;
2251 	__le16 xsum16 /* checksum16 result  */;
2252 	__le16 xsum8 /* checksum8 result  */;
2253 };
2254 
2255 
2256 enum dmae_cmd_comp_crc_en_enum
2257 {
2258 	dmae_cmd_comp_crc_disabled /* Do not write a CRC word */,
2259 	dmae_cmd_comp_crc_enabled /* Write a CRC word */,
2260 	MAX_DMAE_CMD_COMP_CRC_EN_ENUM
2261 };
2262 
2263 
2264 enum dmae_cmd_comp_func_enum
2265 {
2266 	dmae_cmd_comp_func_to_src /* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */,
2267 	dmae_cmd_comp_func_to_dst /* completion word and/or CRC will be sent to DST-PCI function/DST VFID */,
2268 	MAX_DMAE_CMD_COMP_FUNC_ENUM
2269 };
2270 
2271 
2272 enum dmae_cmd_comp_word_en_enum
2273 {
2274 	dmae_cmd_comp_word_disabled /* Do not write a completion word */,
2275 	dmae_cmd_comp_word_enabled /* Write the completion word */,
2276 	MAX_DMAE_CMD_COMP_WORD_EN_ENUM
2277 };
2278 
2279 
2280 enum dmae_cmd_c_dst_enum
2281 {
2282 	dmae_cmd_c_dst_pcie,
2283 	dmae_cmd_c_dst_grc,
2284 	MAX_DMAE_CMD_C_DST_ENUM
2285 };
2286 
2287 
2288 enum dmae_cmd_dst_enum
2289 {
2290 	dmae_cmd_dst_none_0,
2291 	dmae_cmd_dst_pcie,
2292 	dmae_cmd_dst_grc,
2293 	dmae_cmd_dst_none_3,
2294 	MAX_DMAE_CMD_DST_ENUM
2295 };
2296 
2297 
2298 enum dmae_cmd_error_handling_enum
2299 {
2300 	dmae_cmd_error_handling_send_regular_comp /* Send a regular completion (with no error indication) */,
2301 	dmae_cmd_error_handling_send_comp_with_err /* Send a completion with an error indication (i.e. set bit 31 of the completion word) */,
2302 	dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */,
2303 	MAX_DMAE_CMD_ERROR_HANDLING_ENUM
2304 };
2305 
2306 
2307 enum dmae_cmd_src_enum
2308 {
2309 	dmae_cmd_src_pcie /* The source is the PCIe */,
2310 	dmae_cmd_src_grc /* The source is the GRC */,
2311 	MAX_DMAE_CMD_SRC_ENUM
2312 };
2313 
2314 
2315 struct e4_mstorm_core_conn_ag_ctx
2316 {
2317 	u8 byte0 /* cdu_validation */;
2318 	u8 byte1 /* state */;
2319 	u8 flags0;
2320 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
2321 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
2322 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
2323 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
2324 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
2325 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
2326 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
2327 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
2328 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
2329 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
2330 	u8 flags1;
2331 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
2332 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
2333 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
2334 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
2335 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
2336 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
2337 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
2338 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2339 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
2340 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2341 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
2342 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2343 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
2344 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2345 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
2346 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2347 	__le16 word0 /* word0 */;
2348 	__le16 word1 /* word1 */;
2349 	__le32 reg0 /* reg0 */;
2350 	__le32 reg1 /* reg1 */;
2351 };
2352 
2353 
2354 
2355 
2356 
2357 struct e4_ystorm_core_conn_ag_ctx
2358 {
2359 	u8 byte0 /* cdu_validation */;
2360 	u8 byte1 /* state */;
2361 	u8 flags0;
2362 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
2363 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
2364 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
2365 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
2366 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
2367 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
2368 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
2369 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
2370 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
2371 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
2372 	u8 flags1;
2373 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
2374 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
2375 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
2376 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
2377 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
2378 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
2379 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
2380 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2381 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
2382 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2383 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
2384 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2385 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
2386 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2387 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
2388 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2389 	u8 byte2 /* byte2 */;
2390 	u8 byte3 /* byte3 */;
2391 	__le16 word0 /* word0 */;
2392 	__le32 reg0 /* reg0 */;
2393 	__le32 reg1 /* reg1 */;
2394 	__le16 word1 /* word1 */;
2395 	__le16 word2 /* word2 */;
2396 	__le16 word3 /* word3 */;
2397 	__le16 word4 /* word4 */;
2398 	__le32 reg2 /* reg2 */;
2399 	__le32 reg3 /* reg3 */;
2400 };
2401 
2402 
2403 struct e5_mstorm_core_conn_ag_ctx
2404 {
2405 	u8 byte0 /* cdu_validation */;
2406 	u8 byte1 /* state_and_core_id */;
2407 	u8 flags0;
2408 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
2409 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
2410 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
2411 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
2412 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
2413 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
2414 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
2415 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
2416 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
2417 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
2418 	u8 flags1;
2419 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
2420 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
2421 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
2422 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
2423 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
2424 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
2425 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
2426 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2427 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
2428 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2429 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
2430 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2431 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
2432 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2433 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
2434 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2435 	__le16 word0 /* word0 */;
2436 	__le16 word1 /* word1 */;
2437 	__le32 reg0 /* reg0 */;
2438 	__le32 reg1 /* reg1 */;
2439 };
2440 
2441 
2442 
2443 
2444 
2445 struct e5_ystorm_core_conn_ag_ctx
2446 {
2447 	u8 byte0 /* cdu_validation */;
2448 	u8 byte1 /* state_and_core_id */;
2449 	u8 flags0;
2450 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
2451 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
2452 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
2453 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
2454 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
2455 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
2456 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
2457 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
2458 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
2459 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
2460 	u8 flags1;
2461 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
2462 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
2463 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
2464 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
2465 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
2466 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
2467 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
2468 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2469 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
2470 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2471 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
2472 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2473 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
2474 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2475 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
2476 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2477 	u8 byte2 /* byte2 */;
2478 	u8 byte3 /* byte3 */;
2479 	__le16 word0 /* word0 */;
2480 	__le32 reg0 /* reg0 */;
2481 	__le32 reg1 /* reg1 */;
2482 	__le16 word1 /* word1 */;
2483 	__le16 word2 /* word2 */;
2484 	__le16 word3 /* word3 */;
2485 	__le16 word4 /* word4 */;
2486 	__le32 reg2 /* reg2 */;
2487 	__le32 reg3 /* reg3 */;
2488 };
2489 
2490 
2491 struct fw_asserts_ram_section
2492 {
2493 	__le16 section_ram_line_offset /* The offset of the section in the RAM in RAM lines (64-bit units) */;
2494 	__le16 section_ram_line_size /* The size of the section in RAM lines (64-bit units) */;
2495 	u8 list_dword_offset /* The offset of the asserts list within the section in dwords */;
2496 	u8 list_element_dword_size /* The size of an assert list element in dwords */;
2497 	u8 list_num_elements /* The number of elements in the asserts list */;
2498 	u8 list_next_index_dword_offset /* The offset of the next list index field within the section in dwords */;
2499 };
2500 
2501 
2502 struct fw_ver_num
2503 {
2504 	u8 major /* Firmware major version number */;
2505 	u8 minor /* Firmware minor version number */;
2506 	u8 rev /* Firmware revision version number */;
2507 	u8 eng /* Firmware engineering version number (for bootleg versions) */;
2508 };
2509 
2510 struct fw_ver_info
2511 {
2512 	__le16 tools_ver /* Tools version number */;
2513 	u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
2514 	u8 reserved1;
2515 	struct fw_ver_num num /* FW version number */;
2516 	__le32 timestamp /* FW Timestamp in unix time  (sec. since 1970) */;
2517 	__le32 reserved2;
2518 };
2519 
2520 struct fw_info
2521 {
2522 	struct fw_ver_info ver /* FW version information */;
2523 	struct fw_asserts_ram_section fw_asserts_section /* Info regarding the FW asserts section in the Storm RAM */;
2524 };
2525 
2526 
2527 struct fw_info_location
2528 {
2529 	__le32 grc_addr /* GRC address where the fw_info struct is located. */;
2530 	__le32 size /* Size of the fw_info structure (thats located at the grc_addr). */;
2531 };
2532 
2533 
2534 
2535 
2536 /*
2537  * IGU cleanup command
2538  */
2539 struct igu_cleanup
2540 {
2541 	__le32 sb_id_and_flags;
2542 #define IGU_CLEANUP_RESERVED0_MASK     0x7FFFFFF
2543 #define IGU_CLEANUP_RESERVED0_SHIFT    0
2544 #define IGU_CLEANUP_CLEANUP_SET_MASK   0x1 /* cleanup clear - 0, set - 1 */
2545 #define IGU_CLEANUP_CLEANUP_SET_SHIFT  27
2546 #define IGU_CLEANUP_CLEANUP_TYPE_MASK  0x7
2547 #define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
2548 #define IGU_CLEANUP_COMMAND_TYPE_MASK  0x1 /* must always be set (use enum command_type_bit) */
2549 #define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
2550 	__le32 reserved1;
2551 };
2552 
2553 
2554 /*
2555  * IGU firmware driver command
2556  */
2557 union igu_command
2558 {
2559 	struct igu_prod_cons_update prod_cons_update;
2560 	struct igu_cleanup cleanup;
2561 };
2562 
2563 
2564 /*
2565  * IGU firmware driver command
2566  */
2567 struct igu_command_reg_ctrl
2568 {
2569 	__le16 opaque_fid;
2570 	__le16 igu_command_reg_ctrl_fields;
2571 #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK  0xFFF
2572 #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
2573 #define IGU_COMMAND_REG_CTRL_RESERVED_MASK      0x7
2574 #define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT     12
2575 #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK  0x1 /* command typ: 0 - read, 1 - write */
2576 #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
2577 };
2578 
2579 
2580 /*
2581  * IGU mapping line structure
2582  */
2583 struct igu_mapping_line
2584 {
2585 	__le32 igu_mapping_line_fields;
2586 #define IGU_MAPPING_LINE_VALID_MASK            0x1
2587 #define IGU_MAPPING_LINE_VALID_SHIFT           0
2588 #define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK    0xFF
2589 #define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT   1
2590 #define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK  0xFF /* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */
2591 #define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
2592 #define IGU_MAPPING_LINE_PF_VALID_MASK         0x1 /* PF-1, VF-0 */
2593 #define IGU_MAPPING_LINE_PF_VALID_SHIFT        17
2594 #define IGU_MAPPING_LINE_IPS_GROUP_MASK        0x3F
2595 #define IGU_MAPPING_LINE_IPS_GROUP_SHIFT       18
2596 #define IGU_MAPPING_LINE_RESERVED_MASK         0xFF
2597 #define IGU_MAPPING_LINE_RESERVED_SHIFT        24
2598 };
2599 
2600 
2601 /*
2602  * IGU MSIX line structure
2603  */
2604 struct igu_msix_vector
2605 {
2606 	struct regpair address;
2607 	__le32 data;
2608 	__le32 msix_vector_fields;
2609 #define IGU_MSIX_VECTOR_MASK_BIT_MASK      0x1
2610 #define IGU_MSIX_VECTOR_MASK_BIT_SHIFT     0
2611 #define IGU_MSIX_VECTOR_RESERVED0_MASK     0x7FFF
2612 #define IGU_MSIX_VECTOR_RESERVED0_SHIFT    1
2613 #define IGU_MSIX_VECTOR_STEERING_TAG_MASK  0xFF
2614 #define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
2615 #define IGU_MSIX_VECTOR_RESERVED1_MASK     0xFF
2616 #define IGU_MSIX_VECTOR_RESERVED1_SHIFT    24
2617 };
2618 
2619 
2620 /*
2621  * per encapsulation type enabling flags
2622  */
2623 struct prs_reg_encapsulation_type_en
2624 {
2625 	u8 flags;
2626 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK     0x1 /* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */
2627 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT    0
2628 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK      0x1 /* Enable bit for IP-over-GRE (IP GRE) encapsulation. */
2629 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT     1
2630 #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK            0x1 /* Enable bit for VXLAN encapsulation. */
2631 #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT           2
2632 #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK            0x1 /* Enable bit for T-Tag encapsulation. */
2633 #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT           3
2634 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK  0x1 /* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */
2635 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
2636 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK   0x1 /* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */
2637 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT  5
2638 #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK                0x3
2639 #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT               6
2640 };
2641 
2642 
2643 enum pxp_tph_st_hint
2644 {
2645 	TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
2646 	TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
2647 	TPH_ST_HINT_TARGET /* Device Write and Host Read, or Host Write and Device Read */,
2648 	TPH_ST_HINT_TARGET_PRIO /* Device Write and Host Read, or Host Write and Device Read - with temporal reuse */,
2649 	MAX_PXP_TPH_ST_HINT
2650 };
2651 
2652 
2653 /*
2654  * QM hardware structure of enable bypass credit mask
2655  */
2656 struct qm_rf_bypass_mask
2657 {
2658 	u8 flags;
2659 #define QM_RF_BYPASS_MASK_LINEVOQ_MASK    0x1
2660 #define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT   0
2661 #define QM_RF_BYPASS_MASK_RESERVED0_MASK  0x1
2662 #define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
2663 #define QM_RF_BYPASS_MASK_PFWFQ_MASK      0x1
2664 #define QM_RF_BYPASS_MASK_PFWFQ_SHIFT     2
2665 #define QM_RF_BYPASS_MASK_VPWFQ_MASK      0x1
2666 #define QM_RF_BYPASS_MASK_VPWFQ_SHIFT     3
2667 #define QM_RF_BYPASS_MASK_PFRL_MASK       0x1
2668 #define QM_RF_BYPASS_MASK_PFRL_SHIFT      4
2669 #define QM_RF_BYPASS_MASK_VPQCNRL_MASK    0x1
2670 #define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT   5
2671 #define QM_RF_BYPASS_MASK_FWPAUSE_MASK    0x1
2672 #define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT   6
2673 #define QM_RF_BYPASS_MASK_RESERVED1_MASK  0x1
2674 #define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
2675 };
2676 
2677 
2678 /*
2679  * QM hardware structure of opportunistic credit mask
2680  */
2681 struct qm_rf_opportunistic_mask
2682 {
2683 	__le16 flags;
2684 #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK     0x1
2685 #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT    0
2686 #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK     0x1
2687 #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT    1
2688 #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK       0x1
2689 #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT      2
2690 #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK       0x1
2691 #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT      3
2692 #define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK        0x1
2693 #define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT       4
2694 #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK     0x1
2695 #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT    5
2696 #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK     0x1
2697 #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT    6
2698 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK   0x1
2699 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT  7
2700 #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK  0x1
2701 #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
2702 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK   0x7F
2703 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT  9
2704 };
2705 
2706 
2707 /*
2708  * E4 QM hardware structure of QM map memory
2709  */
2710 struct qm_rf_pq_map_e4
2711 {
2712 	__le32 reg;
2713 #define QM_RF_PQ_MAP_E4_PQ_VALID_MASK          0x1 /* PQ active */
2714 #define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT         0
2715 #define QM_RF_PQ_MAP_E4_RL_ID_MASK             0xFF /* RL ID */
2716 #define QM_RF_PQ_MAP_E4_RL_ID_SHIFT            1
2717 #define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK          0x1FF /* the first PQ associated with the VPORT and VOQ of this PQ */
2718 #define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT         9
2719 #define QM_RF_PQ_MAP_E4_VOQ_MASK               0x1F /* VOQ */
2720 #define QM_RF_PQ_MAP_E4_VOQ_SHIFT              18
2721 #define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK  0x3 /* WRR weight */
2722 #define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
2723 #define QM_RF_PQ_MAP_E4_RL_VALID_MASK          0x1 /* RL active */
2724 #define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT         25
2725 #define QM_RF_PQ_MAP_E4_RESERVED_MASK          0x3F
2726 #define QM_RF_PQ_MAP_E4_RESERVED_SHIFT         26
2727 };
2728 
2729 
2730 /*
2731  * E5 QM hardware structure of QM map memory
2732  */
2733 struct qm_rf_pq_map_e5
2734 {
2735 	__le32 reg;
2736 #define QM_RF_PQ_MAP_E5_PQ_VALID_MASK          0x1 /* PQ active */
2737 #define QM_RF_PQ_MAP_E5_PQ_VALID_SHIFT         0
2738 #define QM_RF_PQ_MAP_E5_RL_ID_MASK             0xFF /* RL ID */
2739 #define QM_RF_PQ_MAP_E5_RL_ID_SHIFT            1
2740 #define QM_RF_PQ_MAP_E5_VP_PQ_ID_MASK          0x1FF /* the first PQ associated with the VPORT and VOQ of this PQ */
2741 #define QM_RF_PQ_MAP_E5_VP_PQ_ID_SHIFT         9
2742 #define QM_RF_PQ_MAP_E5_VOQ_MASK               0x3F /* VOQ */
2743 #define QM_RF_PQ_MAP_E5_VOQ_SHIFT              18
2744 #define QM_RF_PQ_MAP_E5_WRR_WEIGHT_GROUP_MASK  0x3 /* WRR weight */
2745 #define QM_RF_PQ_MAP_E5_WRR_WEIGHT_GROUP_SHIFT 24
2746 #define QM_RF_PQ_MAP_E5_RL_VALID_MASK          0x1 /* RL active */
2747 #define QM_RF_PQ_MAP_E5_RL_VALID_SHIFT         26
2748 #define QM_RF_PQ_MAP_E5_RESERVED_MASK          0x1F
2749 #define QM_RF_PQ_MAP_E5_RESERVED_SHIFT         27
2750 };
2751 
2752 
2753 /*
2754  * Completion params for aggregated interrupt completion
2755  */
2756 struct sdm_agg_int_comp_params
2757 {
2758 	__le16 params;
2759 #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK      0x3F /* the number of aggregated interrupt, 0-31 */
2760 #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT     0
2761 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK  0x1 /* 1 - set a bit in aggregated vector, 0 - dont set */
2762 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
2763 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK     0x1FF /* Number of bit in the aggregated vector, 0-279 (TBD) */
2764 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT    7
2765 };
2766 
2767 
2768 /*
2769  * SDM operation gen command (generate aggregative interrupt)
2770  */
2771 struct sdm_op_gen
2772 {
2773 	__le32 command;
2774 #define SDM_OP_GEN_COMP_PARAM_MASK  0xFFFF /* completion parameters 0-15 */
2775 #define SDM_OP_GEN_COMP_PARAM_SHIFT 0
2776 #define SDM_OP_GEN_COMP_TYPE_MASK   0xF /* completion type 16-19 */
2777 #define SDM_OP_GEN_COMP_TYPE_SHIFT  16
2778 #define SDM_OP_GEN_RESERVED_MASK    0xFFF /* reserved 20-31 */
2779 #define SDM_OP_GEN_RESERVED_SHIFT   20
2780 };
2781 
2782 #endif /* __ECORE_HSI_COMMON__ */
2783