xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_hsi_common.h (revision 0e6acb26)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 
32 #ifndef __ECORE_HSI_COMMON__
33 #define __ECORE_HSI_COMMON__
34 /********************************/
35 /* Add include to common target */
36 /********************************/
37 #include "common_hsi.h"
38 
39 
40 /*
41  * opcodes for the event ring
42  */
43 enum common_event_opcode
44 {
45 	COMMON_EVENT_PF_START,
46 	COMMON_EVENT_PF_STOP,
47 	COMMON_EVENT_VF_START,
48 	COMMON_EVENT_VF_STOP,
49 	COMMON_EVENT_VF_PF_CHANNEL,
50 	COMMON_EVENT_VF_FLR,
51 	COMMON_EVENT_PF_UPDATE,
52 	COMMON_EVENT_MALICIOUS_VF,
53 	COMMON_EVENT_RL_UPDATE,
54 	COMMON_EVENT_EMPTY,
55 	MAX_COMMON_EVENT_OPCODE
56 };
57 
58 
59 /*
60  * Common Ramrod Command IDs
61  */
62 enum common_ramrod_cmd_id
63 {
64 	COMMON_RAMROD_UNUSED,
65 	COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
66 	COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
67 	COMMON_RAMROD_VF_START /* VF Function Start */,
68 	COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
69 	COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
70 	COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */,
71 	COMMON_RAMROD_EMPTY /* Empty Ramrod */,
72 	MAX_COMMON_RAMROD_CMD_ID
73 };
74 
75 
76 /*
77  * The core storm context for the Ystorm
78  */
79 struct ystorm_core_conn_st_ctx
80 {
81 	__le32 reserved[4];
82 };
83 
84 /*
85  * The core storm context for the Pstorm
86  */
87 struct pstorm_core_conn_st_ctx
88 {
89 	__le32 reserved[4];
90 };
91 
92 /*
93  * Core Slowpath Connection storm context of Xstorm
94  */
95 struct xstorm_core_conn_st_ctx
96 {
97 	__le32 spq_base_lo /* SPQ Ring Base Address low dword */;
98 	__le32 spq_base_hi /* SPQ Ring Base Address high dword */;
99 	struct regpair consolid_base_addr /* Consolidation Ring Base Address */;
100 	__le16 spq_cons /* SPQ Ring Consumer */;
101 	__le16 consolid_cons /* Consolidation Ring Consumer */;
102 	__le32 reserved0[55] /* Pad to 15 cycles */;
103 };
104 
105 struct e4_xstorm_core_conn_ag_ctx
106 {
107 	u8 reserved0 /* cdu_validation */;
108 	u8 core_state /* state */;
109 	u8 flags0;
110 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1 /* exist_in_qm0 */
111 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
112 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1 /* exist_in_qm1 */
113 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
114 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1 /* exist_in_qm2 */
115 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
116 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1 /* exist_in_qm3 */
117 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
118 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1 /* bit4 */
119 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
120 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1 /* cf_array_active */
121 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
122 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1 /* bit6 */
123 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
124 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1 /* bit7 */
125 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
126 	u8 flags1;
127 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1 /* bit8 */
128 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
129 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1 /* bit9 */
130 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
131 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1 /* bit10 */
132 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
133 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1 /* bit11 */
134 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
135 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1 /* bit12 */
136 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
137 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1 /* bit13 */
138 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
139 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1 /* bit14 */
140 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
141 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1 /* bit15 */
142 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
143 	u8 flags2;
144 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3 /* timer0cf */
145 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
146 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3 /* timer1cf */
147 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
148 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3 /* timer2cf */
149 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
150 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3 /* timer_stop_all */
151 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
152 	u8 flags3;
153 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3 /* cf4 */
154 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
155 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3 /* cf5 */
156 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
157 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3 /* cf6 */
158 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
159 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3 /* cf7 */
160 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
161 	u8 flags4;
162 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3 /* cf8 */
163 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
164 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3 /* cf9 */
165 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
166 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3 /* cf10 */
167 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
168 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3 /* cf11 */
169 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
170 	u8 flags5;
171 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3 /* cf12 */
172 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
173 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3 /* cf13 */
174 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
175 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3 /* cf14 */
176 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
177 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3 /* cf15 */
178 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
179 	u8 flags6;
180 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3 /* cf16 */
181 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
182 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3 /* cf_array_cf */
183 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
184 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3 /* cf18 */
185 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
186 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3 /* cf19 */
187 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
188 	u8 flags7;
189 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3 /* cf20 */
190 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
191 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3 /* cf21 */
192 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
193 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3 /* cf22 */
194 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
195 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1 /* cf0en */
196 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
197 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1 /* cf1en */
198 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
199 	u8 flags8;
200 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1 /* cf2en */
201 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
202 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1 /* cf3en */
203 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
204 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1 /* cf4en */
205 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
206 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1 /* cf5en */
207 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
208 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1 /* cf6en */
209 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
210 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1 /* cf7en */
211 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
212 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1 /* cf8en */
213 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
214 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1 /* cf9en */
215 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
216 	u8 flags9;
217 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1 /* cf10en */
218 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
219 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1 /* cf11en */
220 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
221 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1 /* cf12en */
222 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
223 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1 /* cf13en */
224 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
225 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1 /* cf14en */
226 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
227 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1 /* cf15en */
228 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
229 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1 /* cf16en */
230 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
231 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1 /* cf_array_cf_en */
232 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
233 	u8 flags10;
234 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1 /* cf18en */
235 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
236 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1 /* cf19en */
237 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
238 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1 /* cf20en */
239 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
240 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1 /* cf21en */
241 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
242 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1 /* cf22en */
243 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
244 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1 /* cf23en */
245 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
246 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1 /* rule0en */
247 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
248 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1 /* rule1en */
249 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
250 	u8 flags11;
251 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1 /* rule2en */
252 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
253 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1 /* rule3en */
254 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
255 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1 /* rule4en */
256 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
257 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1 /* rule5en */
258 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
259 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1 /* rule6en */
260 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
261 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1 /* rule7en */
262 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
263 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1 /* rule8en */
264 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
265 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1 /* rule9en */
266 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
267 	u8 flags12;
268 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1 /* rule10en */
269 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
270 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1 /* rule11en */
271 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
272 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1 /* rule12en */
273 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
274 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1 /* rule13en */
275 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
276 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1 /* rule14en */
277 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
278 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1 /* rule15en */
279 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
280 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1 /* rule16en */
281 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
282 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1 /* rule17en */
283 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
284 	u8 flags13;
285 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1 /* rule18en */
286 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
287 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1 /* rule19en */
288 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
289 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1 /* rule20en */
290 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
291 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1 /* rule21en */
292 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
293 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1 /* rule22en */
294 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
295 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1 /* rule23en */
296 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
297 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1 /* rule24en */
298 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
299 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1 /* rule25en */
300 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
301 	u8 flags14;
302 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1 /* bit16 */
303 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
304 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1 /* bit17 */
305 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
306 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1 /* bit18 */
307 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
308 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1 /* bit19 */
309 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
310 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1 /* bit20 */
311 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
312 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1 /* bit21 */
313 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
314 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3 /* cf23 */
315 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
316 	u8 byte2 /* byte2 */;
317 	__le16 physical_q0 /* physical_q0 */;
318 	__le16 consolid_prod /* physical_q1 */;
319 	__le16 reserved16 /* physical_q2 */;
320 	__le16 tx_bd_cons /* word3 */;
321 	__le16 tx_bd_or_spq_prod /* word4 */;
322 	__le16 word5 /* word5 */;
323 	__le16 conn_dpi /* conn_dpi */;
324 	u8 byte3 /* byte3 */;
325 	u8 byte4 /* byte4 */;
326 	u8 byte5 /* byte5 */;
327 	u8 byte6 /* byte6 */;
328 	__le32 reg0 /* reg0 */;
329 	__le32 reg1 /* reg1 */;
330 	__le32 reg2 /* reg2 */;
331 	__le32 reg3 /* reg3 */;
332 	__le32 reg4 /* reg4 */;
333 	__le32 reg5 /* cf_array0 */;
334 	__le32 reg6 /* cf_array1 */;
335 	__le16 word7 /* word7 */;
336 	__le16 word8 /* word8 */;
337 	__le16 word9 /* word9 */;
338 	__le16 word10 /* word10 */;
339 	__le32 reg7 /* reg7 */;
340 	__le32 reg8 /* reg8 */;
341 	__le32 reg9 /* reg9 */;
342 	u8 byte7 /* byte7 */;
343 	u8 byte8 /* byte8 */;
344 	u8 byte9 /* byte9 */;
345 	u8 byte10 /* byte10 */;
346 	u8 byte11 /* byte11 */;
347 	u8 byte12 /* byte12 */;
348 	u8 byte13 /* byte13 */;
349 	u8 byte14 /* byte14 */;
350 	u8 byte15 /* byte15 */;
351 	u8 e5_reserved /* e5_reserved */;
352 	__le16 word11 /* word11 */;
353 	__le32 reg10 /* reg10 */;
354 	__le32 reg11 /* reg11 */;
355 	__le32 reg12 /* reg12 */;
356 	__le32 reg13 /* reg13 */;
357 	__le32 reg14 /* reg14 */;
358 	__le32 reg15 /* reg15 */;
359 	__le32 reg16 /* reg16 */;
360 	__le32 reg17 /* reg17 */;
361 	__le32 reg18 /* reg18 */;
362 	__le32 reg19 /* reg19 */;
363 	__le16 word12 /* word12 */;
364 	__le16 word13 /* word13 */;
365 	__le16 word14 /* word14 */;
366 	__le16 word15 /* word15 */;
367 };
368 
369 struct e4_tstorm_core_conn_ag_ctx
370 {
371 	u8 byte0 /* cdu_validation */;
372 	u8 byte1 /* state */;
373 	u8 flags0;
374 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
375 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
376 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
377 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
378 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1 /* bit2 */
379 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
380 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1 /* bit3 */
381 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
382 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1 /* bit4 */
383 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
384 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1 /* bit5 */
385 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
386 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
387 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
388 	u8 flags1;
389 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
390 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
391 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
392 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
393 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
394 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
395 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
396 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
397 	u8 flags2;
398 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
399 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
400 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
401 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
402 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3 /* cf7 */
403 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
404 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3 /* cf8 */
405 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
406 	u8 flags3;
407 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3 /* cf9 */
408 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
409 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3 /* cf10 */
410 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
411 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
412 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
413 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
414 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
415 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
416 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
417 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
418 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
419 	u8 flags4;
420 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
421 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
422 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
423 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
424 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
425 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
426 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1 /* cf7en */
427 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
428 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1 /* cf8en */
429 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
430 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1 /* cf9en */
431 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
432 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1 /* cf10en */
433 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
434 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
435 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
436 	u8 flags5;
437 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
438 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
439 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
440 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
441 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
442 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
443 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
444 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
445 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
446 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
447 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
448 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
449 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
450 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
451 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
452 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
453 	__le32 reg0 /* reg0 */;
454 	__le32 reg1 /* reg1 */;
455 	__le32 reg2 /* reg2 */;
456 	__le32 reg3 /* reg3 */;
457 	__le32 reg4 /* reg4 */;
458 	__le32 reg5 /* reg5 */;
459 	__le32 reg6 /* reg6 */;
460 	__le32 reg7 /* reg7 */;
461 	__le32 reg8 /* reg8 */;
462 	u8 byte2 /* byte2 */;
463 	u8 byte3 /* byte3 */;
464 	__le16 word0 /* word0 */;
465 	u8 byte4 /* byte4 */;
466 	u8 byte5 /* byte5 */;
467 	__le16 word1 /* word1 */;
468 	__le16 word2 /* conn_dpi */;
469 	__le16 word3 /* word3 */;
470 	__le32 reg9 /* reg9 */;
471 	__le32 reg10 /* reg10 */;
472 };
473 
474 struct e4_ustorm_core_conn_ag_ctx
475 {
476 	u8 reserved /* cdu_validation */;
477 	u8 byte1 /* state */;
478 	u8 flags0;
479 #define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
480 #define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
481 #define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
482 #define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
483 #define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
484 #define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
485 #define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
486 #define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
487 #define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
488 #define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
489 	u8 flags1;
490 #define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
491 #define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT     0
492 #define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
493 #define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT     2
494 #define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
495 #define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT     4
496 #define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
497 #define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT     6
498 	u8 flags2;
499 #define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
500 #define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
501 #define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
502 #define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
503 #define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
504 #define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
505 #define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
506 #define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   3
507 #define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
508 #define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   4
509 #define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
510 #define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   5
511 #define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
512 #define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   6
513 #define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
514 #define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
515 	u8 flags3;
516 #define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
517 #define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
518 #define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
519 #define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
520 #define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
521 #define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
522 #define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
523 #define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
524 #define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
525 #define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
526 #define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
527 #define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
528 #define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
529 #define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
530 #define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
531 #define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
532 	u8 byte2 /* byte2 */;
533 	u8 byte3 /* byte3 */;
534 	__le16 word0 /* conn_dpi */;
535 	__le16 word1 /* word1 */;
536 	__le32 rx_producers /* reg0 */;
537 	__le32 reg1 /* reg1 */;
538 	__le32 reg2 /* reg2 */;
539 	__le32 reg3 /* reg3 */;
540 	__le16 word2 /* word2 */;
541 	__le16 word3 /* word3 */;
542 };
543 
544 /*
545  * The core storm context for the Mstorm
546  */
547 struct mstorm_core_conn_st_ctx
548 {
549 	__le32 reserved[24];
550 };
551 
552 /*
553  * The core storm context for the Ustorm
554  */
555 struct ustorm_core_conn_st_ctx
556 {
557 	__le32 reserved[4];
558 };
559 
560 /*
561  * core connection context
562  */
563 struct core_conn_context
564 {
565 	struct ystorm_core_conn_st_ctx ystorm_st_context /* ystorm storm context */;
566 	struct regpair ystorm_st_padding[2] /* padding */;
567 	struct pstorm_core_conn_st_ctx pstorm_st_context /* pstorm storm context */;
568 	struct regpair pstorm_st_padding[2] /* padding */;
569 	struct xstorm_core_conn_st_ctx xstorm_st_context /* xstorm storm context */;
570 	struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
571 	struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
572 	struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
573 	struct mstorm_core_conn_st_ctx mstorm_st_context /* mstorm storm context */;
574 	struct ustorm_core_conn_st_ctx ustorm_st_context /* ustorm storm context */;
575 	struct regpair ustorm_st_padding[2] /* padding */;
576 };
577 
578 
579 /*
580  * How ll2 should deal with packet upon errors
581  */
582 enum core_error_handle
583 {
584 	LL2_DROP_PACKET /* If error occurs drop packet */,
585 	LL2_DO_NOTHING /* If error occurs do nothing */,
586 	LL2_ASSERT /* If error occurs assert */,
587 	MAX_CORE_ERROR_HANDLE
588 };
589 
590 
591 /*
592  * opcodes for the event ring
593  */
594 enum core_event_opcode
595 {
596 	CORE_EVENT_TX_QUEUE_START,
597 	CORE_EVENT_TX_QUEUE_STOP,
598 	CORE_EVENT_RX_QUEUE_START,
599 	CORE_EVENT_RX_QUEUE_STOP,
600 	CORE_EVENT_RX_QUEUE_FLUSH,
601 	MAX_CORE_EVENT_OPCODE
602 };
603 
604 
605 /*
606  * The L4 pseudo checksum mode for Core
607  */
608 enum core_l4_pseudo_checksum_mode
609 {
610 	CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH /* Pseudo Checksum on packet is calculated with the correct packet length. */,
611 	CORE_L4_PSEUDO_CSUM_ZERO_LENGTH /* Pseudo Checksum on packet is calculated with zero length. */,
612 	MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
613 };
614 
615 
616 /*
617  * Light-L2 RX Producers in Tstorm RAM
618  */
619 struct core_ll2_port_stats
620 {
621 	struct regpair gsi_invalid_hdr;
622 	struct regpair gsi_invalid_pkt_length;
623 	struct regpair gsi_unsupported_pkt_typ;
624 	struct regpair gsi_crcchksm_error;
625 };
626 
627 
628 /*
629  * Ethernet TX Per Queue Stats
630  */
631 struct core_ll2_pstorm_per_queue_stat
632 {
633 	struct regpair sent_ucast_bytes /* number of total bytes sent without errors */;
634 	struct regpair sent_mcast_bytes /* number of total bytes sent without errors */;
635 	struct regpair sent_bcast_bytes /* number of total bytes sent without errors */;
636 	struct regpair sent_ucast_pkts /* number of total packets sent without errors */;
637 	struct regpair sent_mcast_pkts /* number of total packets sent without errors */;
638 	struct regpair sent_bcast_pkts /* number of total packets sent without errors */;
639 };
640 
641 
642 /*
643  * Light-L2 RX Producers in Tstorm RAM
644  */
645 struct core_ll2_rx_prod
646 {
647 	__le16 bd_prod /* BD Producer */;
648 	__le16 cqe_prod /* CQE Producer */;
649 	__le32 reserved;
650 };
651 
652 
653 struct core_ll2_tstorm_per_queue_stat
654 {
655 	struct regpair packet_too_big_discard /* Number of packets discarded because they are bigger than MTU */;
656 	struct regpair no_buff_discard /* Number of packets discarded due to lack of host buffers */;
657 };
658 
659 
660 struct core_ll2_ustorm_per_queue_stat
661 {
662 	struct regpair rcv_ucast_bytes;
663 	struct regpair rcv_mcast_bytes;
664 	struct regpair rcv_bcast_bytes;
665 	struct regpair rcv_ucast_pkts;
666 	struct regpair rcv_mcast_pkts;
667 	struct regpair rcv_bcast_pkts;
668 };
669 
670 
671 /*
672  * Core Ramrod Command IDs (light L2)
673  */
674 enum core_ramrod_cmd_id
675 {
676 	CORE_RAMROD_UNUSED,
677 	CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
678 	CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
679 	CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
680 	CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
681 	CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
682 	MAX_CORE_RAMROD_CMD_ID
683 };
684 
685 
686 /*
687  * Core RX CQE Type for Light L2
688  */
689 enum core_roce_flavor_type
690 {
691 	CORE_ROCE,
692 	CORE_RROCE,
693 	MAX_CORE_ROCE_FLAVOR_TYPE
694 };
695 
696 
697 /*
698  * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
699  */
700 struct core_rx_action_on_error
701 {
702 	u8 error_type;
703 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK  0x3 /* ll2 how to handle error packet_too_big (use enum core_error_handle) */
704 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
705 #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK         0x3 /* ll2 how to handle error with no_buff  (use enum core_error_handle) */
706 #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT        2
707 #define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK        0xF
708 #define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT       4
709 };
710 
711 
712 /*
713  * Core RX BD for Light L2
714  */
715 struct core_rx_bd
716 {
717 	struct regpair addr;
718 	__le16 reserved[4];
719 };
720 
721 
722 /*
723  * Core RX CM offload BD for Light L2
724  */
725 struct core_rx_bd_with_buff_len
726 {
727 	struct regpair addr;
728 	__le16 buff_length;
729 	__le16 reserved[3];
730 };
731 
732 /*
733  * Core RX CM offload BD for Light L2
734  */
735 union core_rx_bd_union
736 {
737 	struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
738 	struct core_rx_bd_with_buff_len rx_bd_with_len /* Core Rx Bd with dynamic buffer length */;
739 };
740 
741 
742 
743 /*
744  * Opaque Data for Light L2 RX CQE .
745  */
746 struct core_rx_cqe_opaque_data
747 {
748 	__le32 data[2] /* Opaque CQE Data */;
749 };
750 
751 
752 /*
753  * Core RX CQE Type for Light L2
754  */
755 enum core_rx_cqe_type
756 {
757 	CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */,
758 	CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */,
759 	CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */,
760 	CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */,
761 	MAX_CORE_RX_CQE_TYPE
762 };
763 
764 
765 /*
766  * Core RX CQE for Light L2 .
767  */
768 struct core_rx_fast_path_cqe
769 {
770 	u8 type /* CQE type */;
771 	u8 placement_offset /* Offset (in bytes) of the packet from start of the buffer */;
772 	struct parsing_and_err_flags parse_flags /* Parsing and error flags from the parser */;
773 	__le16 packet_length /* Total packet length (from the parser) */;
774 	__le16 vlan /* 802.1q VLAN tag */;
775 	struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
776 	struct parsing_err_flags err_flags /* bit- map: each bit represents a specific error. errors indications are provided by the cracker. see spec for detailed description */;
777 	__le16 reserved0;
778 	__le32 reserved1[3];
779 };
780 
781 /*
782  * Core Rx CM offload CQE .
783  */
784 struct core_rx_gsi_offload_cqe
785 {
786 	u8 type /* CQE type */;
787 	u8 data_length_error /* set if gsi data is bigger than buff */;
788 	struct parsing_and_err_flags parse_flags /* Parsing and error flags from the parser */;
789 	__le16 data_length /* Total packet length (from the parser) */;
790 	__le16 vlan /* 802.1q VLAN tag */;
791 	__le32 src_mac_addrhi /* hi 4 bytes source mac address */;
792 	__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
793 	__le16 qp_id /* These are the lower 16 bit of QP id in RoCE BTH header */;
794 	__le32 gid_dst[4] /* Gid destination address */;
795 };
796 
797 /*
798  * Core RX CQE for Light L2 .
799  */
800 struct core_rx_slow_path_cqe
801 {
802 	u8 type /* CQE type */;
803 	u8 ramrod_cmd_id;
804 	__le16 echo;
805 	struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
806 	__le32 reserved1[5];
807 };
808 
809 /*
810  * Core RX CM offload BD for Light L2
811  */
812 union core_rx_cqe_union
813 {
814 	struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
815 	struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
816 	struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
817 };
818 
819 
820 
821 
822 
823 /*
824  * Ramrod data for rx queue start ramrod
825  */
826 struct core_rx_start_ramrod_data
827 {
828 	struct regpair bd_base /* bd address of the first bd page */;
829 	struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
830 	__le16 mtu /* Maximum transmission unit */;
831 	__le16 sb_id /* Status block ID */;
832 	u8 sb_index /* index of the protocol index */;
833 	u8 complete_cqe_flg /* post completion to the CQE ring if set */;
834 	u8 complete_event_flg /* post completion to the event ring if set */;
835 	u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
836 	__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
837 	u8 inner_vlan_removal_en /* if set, 802.1q tags will be removed and copied to CQE */;
838 	u8 queue_id /* Light L2 RX Queue ID */;
839 	u8 main_func_queue /* Is this the main queue for the PF */;
840 	u8 mf_si_bcast_accept_all /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if main_func_queue is set. */;
841 	u8 mf_si_mcast_accept_all /* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if main_func_queue is set. */;
842 	struct core_rx_action_on_error action_on_error /* Specifies how ll2 should deal with packets errors: packet_too_big and no_buff */;
843 	u8 gsi_offload_flag /* set when in GSI offload mode on ROCE connection */;
844 	u8 reserved[7];
845 };
846 
847 
848 /*
849  * Ramrod data for rx queue stop ramrod
850  */
851 struct core_rx_stop_ramrod_data
852 {
853 	u8 complete_cqe_flg /* post completion to the CQE ring if set */;
854 	u8 complete_event_flg /* post completion to the event ring if set */;
855 	u8 queue_id /* Light L2 RX Queue ID */;
856 	u8 reserved1;
857 	__le16 reserved2[2];
858 };
859 
860 
861 /*
862  * Flags for Core TX BD
863  */
864 struct core_tx_bd_data
865 {
866 	__le16 as_bitfield;
867 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK      0x1 /* Do not allow additional VLAN manipulations on this packet (DCB) */
868 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT     0
869 #define CORE_TX_BD_DATA_VLAN_INSERTION_MASK       0x1 /* Insert VLAN into packet */
870 #define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT      1
871 #define CORE_TX_BD_DATA_START_BD_MASK             0x1 /* This is the first BD of the packet (for debug) */
872 #define CORE_TX_BD_DATA_START_BD_SHIFT            2
873 #define CORE_TX_BD_DATA_IP_CSUM_MASK              0x1 /* Calculate the IP checksum for the packet */
874 #define CORE_TX_BD_DATA_IP_CSUM_SHIFT             3
875 #define CORE_TX_BD_DATA_L4_CSUM_MASK              0x1 /* Calculate the L4 checksum for the packet */
876 #define CORE_TX_BD_DATA_L4_CSUM_SHIFT             4
877 #define CORE_TX_BD_DATA_IPV6_EXT_MASK             0x1 /* Packet is IPv6 with extensions */
878 #define CORE_TX_BD_DATA_IPV6_EXT_SHIFT            5
879 #define CORE_TX_BD_DATA_L4_PROTOCOL_MASK          0x1 /* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol: 0-TCP, 1-UDP */
880 #define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT         6
881 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK  0x1 /* The pseudo checksum mode to place in the L4 checksum field. Required only when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode) */
882 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
883 #define CORE_TX_BD_DATA_NBDS_MASK                 0xF /* Number of BDs that make up one packet - width wide enough to present CORE_LL2_TX_MAX_BDS_PER_PACKET */
884 #define CORE_TX_BD_DATA_NBDS_SHIFT                8
885 #define CORE_TX_BD_DATA_ROCE_FLAV_MASK            0x1 /* Use roce_flavor enum - Differentiate between Roce flavors is valid when connType is ROCE (use enum core_roce_flavor_type) */
886 #define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT           12
887 #define CORE_TX_BD_DATA_IP_LEN_MASK               0x1 /* Calculate ip length */
888 #define CORE_TX_BD_DATA_IP_LEN_SHIFT              13
889 #define CORE_TX_BD_DATA_RESERVED0_MASK            0x3
890 #define CORE_TX_BD_DATA_RESERVED0_SHIFT           14
891 };
892 
893 /*
894  * Core TX BD for Light L2
895  */
896 struct core_tx_bd
897 {
898 	struct regpair addr /* Buffer Address */;
899 	__le16 nbytes /* Number of Bytes in Buffer */;
900 	__le16 nw_vlan_or_lb_echo /* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack packets: echo data to pass to Rx */;
901 	struct core_tx_bd_data bd_data /* BD Flags */;
902 	__le16 bitfield1;
903 #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK  0x3FFF /* L4 Header Offset from start of packet (in Words). This is needed if both l4_csum and ipv6_ext are set */
904 #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
905 #define CORE_TX_BD_TX_DST_MASK           0x3 /* Packet destination - Network, Loopback or Drop (use enum core_tx_dest) */
906 #define CORE_TX_BD_TX_DST_SHIFT          14
907 };
908 
909 
910 
911 /*
912  * Light L2 TX Destination
913  */
914 enum core_tx_dest
915 {
916 	CORE_TX_DEST_NW /* TX Destination to the Network */,
917 	CORE_TX_DEST_LB /* TX Destination to the Loopback */,
918 	CORE_TX_DEST_RESERVED,
919 	CORE_TX_DEST_DROP /* TX Drop */,
920 	MAX_CORE_TX_DEST
921 };
922 
923 
924 /*
925  * Ramrod data for tx queue start ramrod
926  */
927 struct core_tx_start_ramrod_data
928 {
929 	struct regpair pbl_base_addr /* Address of the pbl page */;
930 	__le16 mtu /* Maximum transmission unit */;
931 	__le16 sb_id /* Status block ID */;
932 	u8 sb_index /* Status block protocol index */;
933 	u8 stats_en /* Statistics Enable */;
934 	u8 stats_id /* Statistics Counter ID */;
935 	u8 conn_type /* connection type that loaded ll2 */;
936 	__le16 pbl_size /* Number of BD pages pointed by PBL */;
937 	__le16 qm_pq_id /* QM PQ ID */;
938 	u8 gsi_offload_flag /* set when in GSI offload mode on ROCE connection */;
939 	u8 resrved[3];
940 };
941 
942 
943 /*
944  * Ramrod data for tx queue stop ramrod
945  */
946 struct core_tx_stop_ramrod_data
947 {
948 	__le32 reserved0[2];
949 };
950 
951 
952 /*
953  * Enum flag for what type of dcb data to update
954  */
955 enum dcb_dscp_update_mode
956 {
957 	DONT_UPDATE_DCB_DSCP /* use when no change should be done to dcb data */,
958 	UPDATE_DCB /* use to update only l2 (vlan) priority */,
959 	UPDATE_DSCP /* use to update only l3 dscp */,
960 	UPDATE_DCB_DSCP /* update vlan pri and dscp */,
961 	MAX_DCB_DSCP_UPDATE_MODE
962 };
963 
964 
965 struct eth_mstorm_per_pf_stat
966 {
967 	struct regpair gre_discard_pkts /* Dropped GRE RX packets */;
968 	struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */;
969 	struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */;
970 	struct regpair lb_discard_pkts /* Dropped Tx switched packets */;
971 };
972 
973 
974 struct eth_mstorm_per_queue_stat
975 {
976 	struct regpair ttl0_discard /* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (in IPv6) */;
977 	struct regpair packet_too_big_discard /* Number of packets discarded because they are bigger than MTU */;
978 	struct regpair no_buff_discard /* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */;
979 	struct regpair not_active_discard /* Number of packets discarded because of no active Rx connection */;
980 	struct regpair tpa_coalesced_pkts /* number of coalesced packets in all TPA aggregations */;
981 	struct regpair tpa_coalesced_events /* total number of TPA aggregations */;
982 	struct regpair tpa_aborts_num /* number of aggregations, which abnormally ended */;
983 	struct regpair tpa_coalesced_bytes /* total TCP payload length in all TPA aggregations */;
984 };
985 
986 
987 /*
988  * Ethernet TX Per PF
989  */
990 struct eth_pstorm_per_pf_stat
991 {
992 	struct regpair sent_lb_ucast_bytes /* number of total ucast bytes sent on loopback port without errors */;
993 	struct regpair sent_lb_mcast_bytes /* number of total mcast bytes sent on loopback port without errors */;
994 	struct regpair sent_lb_bcast_bytes /* number of total bcast bytes sent on loopback port without errors */;
995 	struct regpair sent_lb_ucast_pkts /* number of total ucast packets sent on loopback port without errors */;
996 	struct regpair sent_lb_mcast_pkts /* number of total mcast packets sent on loopback port without errors */;
997 	struct regpair sent_lb_bcast_pkts /* number of total bcast packets sent on loopback port without errors */;
998 	struct regpair sent_gre_bytes /* Sent GRE bytes */;
999 	struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */;
1000 	struct regpair sent_geneve_bytes /* Sent GENEVE bytes */;
1001 	struct regpair sent_gre_pkts /* Sent GRE packets */;
1002 	struct regpair sent_vxlan_pkts /* Sent VXLAN packets */;
1003 	struct regpair sent_geneve_pkts /* Sent GENEVE packets */;
1004 	struct regpair gre_drop_pkts /* Dropped GRE TX packets */;
1005 	struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
1006 	struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
1007 };
1008 
1009 
1010 /*
1011  * Ethernet TX Per Queue Stats
1012  */
1013 struct eth_pstorm_per_queue_stat
1014 {
1015 	struct regpair sent_ucast_bytes /* number of total bytes sent without errors */;
1016 	struct regpair sent_mcast_bytes /* number of total bytes sent without errors */;
1017 	struct regpair sent_bcast_bytes /* number of total bytes sent without errors */;
1018 	struct regpair sent_ucast_pkts /* number of total packets sent without errors */;
1019 	struct regpair sent_mcast_pkts /* number of total packets sent without errors */;
1020 	struct regpair sent_bcast_pkts /* number of total packets sent without errors */;
1021 	struct regpair error_drop_pkts /* number of total packets dropped due to errors */;
1022 };
1023 
1024 
1025 /*
1026  * ETH Rx producers data
1027  */
1028 struct eth_rx_rate_limit
1029 {
1030 	__le16 mult /* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */;
1031 	__le16 cnst /* Constant term to add (or subtract from number of cycles) */;
1032 	u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
1033 	u8 reserved0;
1034 	__le16 reserved1;
1035 };
1036 
1037 
1038 struct eth_ustorm_per_pf_stat
1039 {
1040 	struct regpair rcv_lb_ucast_bytes /* number of total ucast bytes received on loopback port without errors */;
1041 	struct regpair rcv_lb_mcast_bytes /* number of total mcast bytes received on loopback port without errors */;
1042 	struct regpair rcv_lb_bcast_bytes /* number of total bcast bytes received on loopback port without errors */;
1043 	struct regpair rcv_lb_ucast_pkts /* number of total ucast packets received on loopback port without errors */;
1044 	struct regpair rcv_lb_mcast_pkts /* number of total mcast packets received on loopback port without errors */;
1045 	struct regpair rcv_lb_bcast_pkts /* number of total bcast packets received on loopback port without errors */;
1046 	struct regpair rcv_gre_bytes /* Received GRE bytes */;
1047 	struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */;
1048 	struct regpair rcv_geneve_bytes /* Received GENEVE bytes */;
1049 	struct regpair rcv_gre_pkts /* Received GRE packets */;
1050 	struct regpair rcv_vxlan_pkts /* Received VXLAN packets */;
1051 	struct regpair rcv_geneve_pkts /* Received GENEVE packets */;
1052 };
1053 
1054 
1055 struct eth_ustorm_per_queue_stat
1056 {
1057 	struct regpair rcv_ucast_bytes;
1058 	struct regpair rcv_mcast_bytes;
1059 	struct regpair rcv_bcast_bytes;
1060 	struct regpair rcv_ucast_pkts;
1061 	struct regpair rcv_mcast_pkts;
1062 	struct regpair rcv_bcast_pkts;
1063 };
1064 
1065 
1066 /*
1067  * Event Ring Next Page Address
1068  */
1069 struct event_ring_next_addr
1070 {
1071 	struct regpair addr /* Next Page Address */;
1072 	__le32 reserved[2] /* Reserved */;
1073 };
1074 
1075 /*
1076  * Event Ring Element
1077  */
1078 union event_ring_element
1079 {
1080 	struct event_ring_entry entry /* Event Ring Entry */;
1081 	struct event_ring_next_addr next_addr /* Event Ring Next Page Address */;
1082 };
1083 
1084 
1085 
1086 /*
1087  * Ports mode
1088  */
1089 enum fw_flow_ctrl_mode
1090 {
1091 	flow_ctrl_pause,
1092 	flow_ctrl_pfc,
1093 	MAX_FW_FLOW_CTRL_MODE
1094 };
1095 
1096 
1097 /*
1098  * Major and Minor hsi Versions
1099  */
1100 struct hsi_fp_ver_struct
1101 {
1102 	u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */;
1103 	u8 major_ver_arr[2] /* Major Version of driver loading pf */;
1104 };
1105 
1106 
1107 /*
1108  * Integration Phase
1109  */
1110 enum integ_phase
1111 {
1112 	INTEG_PHASE_BB_A0_LATEST=3 /* BB A0 latest integration phase */,
1113 	INTEG_PHASE_BB_B0_NO_MCP=10 /* BB B0 without MCP */,
1114 	INTEG_PHASE_BB_B0_WITH_MCP=11 /* BB B0 with MCP */,
1115 	MAX_INTEG_PHASE
1116 };
1117 
1118 
1119 /*
1120  * Ports mode
1121  */
1122 enum iwarp_ll2_tx_queues
1123 {
1124 	IWARP_LL2_IN_ORDER_TX_QUEUE=1 /* LL2 queue for OOO packets sent in-order by the driver */,
1125 	IWARP_LL2_ALIGNED_TX_QUEUE /* LL2 queue for unaligned packets sent aligned by the driver */,
1126 	IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE /* LL2 queue for unaligned packets sent aligned and was right-trimmed by the driver */,
1127 	IWARP_LL2_ERROR /* Error indication */,
1128 	MAX_IWARP_LL2_TX_QUEUES
1129 };
1130 
1131 
1132 /*
1133  * Malicious VF error ID
1134  */
1135 enum malicious_vf_error_id
1136 {
1137 	MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
1138 	VF_PF_CHANNEL_NOT_READY /* Writing to VF/PF channel when it is not ready */,
1139 	VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
1140 	VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
1141 	ETH_PACKET_TOO_SMALL /* TX packet is shorter then reported on BDs or from minimal size */,
1142 	ETH_ILLEGAL_VLAN_MODE /* Tx packet with marked as insert VLAN when its illegal */,
1143 	ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
1144 	ETH_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */,
1145 	ETH_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */,
1146 	ETH_ILLEGAL_NBDS /* indicated number of BDs for the packet is illegal */,
1147 	ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
1148 	ETH_INSUFFICIENT_BDS /* There are not enough BDs for transmission of even one packet */,
1149 	ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
1150 	ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
1151 	ETH_ZERO_SIZE_BD /* empty BD (which not contains control flags) is illegal  */,
1152 	ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit  */,
1153 	ETH_INSUFFICIENT_PAYLOAD /* In LSO its expected that on the local BD ring there will be at least MSS bytes of data */,
1154 	ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
1155 	ETH_TUNN_IPV6_EXT_NBD_ERR /* Tunneled packet with IPv6+Ext without a proper number of BDs */,
1156 	ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
1157 	ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
1158 	MAX_MALICIOUS_VF_ERROR_ID
1159 };
1160 
1161 
1162 
1163 /*
1164  * Mstorm non-triggering VF zone
1165  */
1166 struct mstorm_non_trigger_vf_zone
1167 {
1168 	struct eth_mstorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
1169 	struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD] /* VF RX queues producers */;
1170 };
1171 
1172 
1173 /*
1174  * Mstorm VF zone
1175  */
1176 struct mstorm_vf_zone
1177 {
1178 	struct mstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
1179 };
1180 
1181 
1182 /*
1183  * personality per PF
1184  */
1185 enum personality_type
1186 {
1187 	BAD_PERSONALITY_TYP,
1188 	PERSONALITY_ISCSI /* iSCSI and LL2 */,
1189 	PERSONALITY_FCOE /* Fcoe and LL2 */,
1190 	PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
1191 	PERSONALITY_RDMA /* Roce and LL2 */,
1192 	PERSONALITY_CORE /* CORE(LL2) */,
1193 	PERSONALITY_ETH /* Ethernet */,
1194 	PERSONALITY_TOE /* Toe and LL2 */,
1195 	MAX_PERSONALITY_TYPE
1196 };
1197 
1198 
1199 /*
1200  * tunnel configuration
1201  */
1202 struct pf_start_tunnel_config
1203 {
1204 	u8 set_vxlan_udp_port_flg /* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set - FW will use a default port */;
1205 	u8 set_geneve_udp_port_flg /* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set - FW will use a default port */;
1206 	u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */;
1207 	u8 tunnel_clss_l2geneve /* Rx classification scheme for l2 GENEVE tunnel. */;
1208 	u8 tunnel_clss_ipgeneve /* Rx classification scheme for ip GENEVE tunnel. */;
1209 	u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */;
1210 	u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */;
1211 	u8 reserved;
1212 	__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */;
1213 	__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */;
1214 };
1215 
1216 /*
1217  * Ramrod data for PF start ramrod
1218  */
1219 struct pf_start_ramrod_data
1220 {
1221 	struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
1222 	struct regpair consolid_q_pbl_addr /* PBL address of consolidation queue */;
1223 	struct pf_start_tunnel_config tunnel_config /* tunnel configuration. */;
1224 	__le32 reserved;
1225 	__le16 event_ring_sb_id /* Status block ID */;
1226 	u8 base_vf_id /* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */;
1227 	u8 num_vfs /* Amount of vfs owned by PF */;
1228 	u8 event_ring_num_pages /* Number of PBL pages in event ring */;
1229 	u8 event_ring_sb_index /* Status block index */;
1230 	u8 path_id /* HW path ID (engine ID) */;
1231 	u8 warning_as_error /* In FW asserts, treat warning as error */;
1232 	u8 dont_log_ramrods /* If not set - throw a warning for each ramrod (for debug) */;
1233 	u8 personality /* define what type of personality is new PF */;
1234 	__le16 log_type_mask /* Log type mask. Each bit set enables a corresponding event type logging. Event types are defined as ASSERT_LOG_TYPE_xxx */;
1235 	u8 mf_mode /* Multi function mode */;
1236 	u8 integ_phase /* Integration phase */;
1237 	u8 allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in Switch Independent function mode */;
1238 	u8 inner_to_outer_pri_map[8] /* Map from inner to outer priority. Set pri_map_valid when init map */;
1239 	u8 pri_map_valid /* If inner_to_outer_pri_map is initialize then set pri_map_valid */;
1240 	__le32 outer_tag /* In case mf_mode is MF_OVLAN, this field specifies the outer vlan (lower 16 bits) and ethType to use (higher 16 bits) */;
1241 	struct hsi_fp_ver_struct hsi_fp_ver /* FP HSI version to be used by FW */;
1242 };
1243 
1244 
1245 
1246 /*
1247  * Data for port update ramrod
1248  */
1249 struct protocol_dcb_data
1250 {
1251 	u8 dcb_enable_flag /* dcbEnable flag value */;
1252 	u8 dscp_enable_flag /* If set use dscp value */;
1253 	u8 dcb_priority /* dcbPri flag value */;
1254 	u8 dcb_tc /* dcb TC value */;
1255 	u8 dscp_val /* dscp value to write if dscp_enable_flag is set */;
1256 	u8 reserved0;
1257 };
1258 
1259 /*
1260  * Update tunnel configuration
1261  */
1262 struct pf_update_tunnel_config
1263 {
1264 	u8 update_rx_pf_clss /* Update RX per PF tunnel classification scheme. */;
1265 	u8 update_rx_def_ucast_clss /* Update per PORT default tunnel RX classification scheme for traffic with unknown unicast outer MAC in NPAR mode. */;
1266 	u8 update_rx_def_non_ucast_clss /* Update per PORT default tunnel RX classification scheme for traffic with non unicast outer MAC in NPAR mode. */;
1267 	u8 set_vxlan_udp_port_flg /* Update VXLAN tunnel UDP destination port. */;
1268 	u8 set_geneve_udp_port_flg /* Update GENEVE tunnel UDP destination port. */;
1269 	u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
1270 	u8 tunnel_clss_l2geneve /* Classification scheme for l2 GENEVE tunnel. */;
1271 	u8 tunnel_clss_ipgeneve /* Classification scheme for ip GENEVE tunnel. */;
1272 	u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
1273 	u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
1274 	__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
1275 	__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
1276 	__le16 reserved;
1277 };
1278 
1279 /*
1280  * Data for port update ramrod
1281  */
1282 struct pf_update_ramrod_data
1283 {
1284 	u8 pf_id;
1285 	u8 update_eth_dcb_data_mode /* Update Eth DCB  data indication */;
1286 	u8 update_fcoe_dcb_data_mode /* Update FCOE DCB  data indication */;
1287 	u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB  data indication */;
1288 	u8 update_roce_dcb_data_mode /* Update ROCE DCB  data indication */;
1289 	u8 update_rroce_dcb_data_mode /* Update RROCE (RoceV2) DCB  data indication */;
1290 	u8 update_iwarp_dcb_data_mode /* Update IWARP DCB  data indication */;
1291 	u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
1292 	struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
1293 	struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
1294 	struct protocol_dcb_data iscsi_dcb_data /* core iscsi related fields */;
1295 	struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
1296 	struct protocol_dcb_data rroce_dcb_data /* core roce related fields */;
1297 	struct protocol_dcb_data iwarp_dcb_data /* core iwarp related fields */;
1298 	__le16 mf_vlan /* new outer vlan id value */;
1299 	__le16 reserved;
1300 	struct pf_update_tunnel_config tunnel_config /* tunnel configuration. */;
1301 };
1302 
1303 
1304 
1305 /*
1306  * Ports mode
1307  */
1308 enum ports_mode
1309 {
1310 	ENGX2_PORTX1 /* 2 engines x 1 port */,
1311 	ENGX2_PORTX2 /* 2 engines x 2 ports */,
1312 	ENGX1_PORTX1 /* 1 engine  x 1 port */,
1313 	ENGX1_PORTX2 /* 1 engine  x 2 ports */,
1314 	ENGX1_PORTX4 /* 1 engine  x 4 ports */,
1315 	MAX_PORTS_MODE
1316 };
1317 
1318 
1319 
1320 /*
1321  * use to index in hsi_fp_[major|minor]_ver_arr per protocol
1322  */
1323 enum protocol_version_array_key
1324 {
1325 	ETH_VER_KEY=0,
1326 	ROCE_VER_KEY,
1327 	MAX_PROTOCOL_VERSION_ARRAY_KEY
1328 };
1329 
1330 
1331 
1332 /*
1333  * RDMA TX Stats
1334  */
1335 struct rdma_sent_stats
1336 {
1337 	struct regpair sent_bytes /* number of total RDMA bytes sent */;
1338 	struct regpair sent_pkts /* number of total RDMA packets sent */;
1339 };
1340 
1341 /*
1342  * Pstorm non-triggering VF zone
1343  */
1344 struct pstorm_non_trigger_vf_zone
1345 {
1346 	struct eth_pstorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
1347 	struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
1348 };
1349 
1350 
1351 /*
1352  * Pstorm VF zone
1353  */
1354 struct pstorm_vf_zone
1355 {
1356 	struct pstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
1357 	struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
1358 };
1359 
1360 
1361 /*
1362  * Ramrod Header of SPQE
1363  */
1364 struct ramrod_header
1365 {
1366 	__le32 cid /* Slowpath Connection CID */;
1367 	u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
1368 	u8 protocol_id /* Ramrod Protocol ID */;
1369 	__le16 echo /* Ramrod echo */;
1370 };
1371 
1372 
1373 /*
1374  * RDMA RX Stats
1375  */
1376 struct rdma_rcv_stats
1377 {
1378 	struct regpair rcv_bytes /* number of total RDMA bytes received */;
1379 	struct regpair rcv_pkts /* number of total RDMA packets received */;
1380 };
1381 
1382 
1383 
1384 /*
1385  * Data for update QCN/DCQCN RL ramrod
1386  */
1387 struct rl_update_ramrod_data
1388 {
1389 	u8 qcn_update_param_flg /* Update QCN global params: timeout. */;
1390 	u8 dcqcn_update_param_flg /* Update DCQCN global params: timeout, g, k. */;
1391 	u8 rl_init_flg /* Init RL parameters, when RL disabled. */;
1392 	u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */;
1393 	u8 rl_stop_flg /* Stop RL. */;
1394 	u8 rl_id_first /* ID of first or single RL, that will be updated. */;
1395 	u8 rl_id_last /* ID of last RL, that will be updated. If clear, single RL will updated. */;
1396 	u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
1397 	__le32 rl_bc_rate /* Byte Counter Limit. */;
1398 	__le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
1399 	__le16 rl_r_ai /* Active increase rate. */;
1400 	__le16 rl_r_hai /* Hyper active increase rate. */;
1401 	__le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */;
1402 	__le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
1403 	__le32 dcqcn_timeuot_us /* DCQCN timeout. */;
1404 	__le32 qcn_timeuot_us /* QCN timeout. */;
1405 	__le32 reserved[2];
1406 };
1407 
1408 
1409 /*
1410  * Slowpath Element (SPQE)
1411  */
1412 struct slow_path_element
1413 {
1414 	struct ramrod_header hdr /* Ramrod Header */;
1415 	struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
1416 };
1417 
1418 
1419 /*
1420  * Tstorm non-triggering VF zone
1421  */
1422 struct tstorm_non_trigger_vf_zone
1423 {
1424 	struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
1425 };
1426 
1427 
1428 struct tstorm_per_port_stat
1429 {
1430 	struct regpair trunc_error_discard /* packet is dropped because it was truncated in NIG */;
1431 	struct regpair mac_error_discard /* packet is dropped because of Ethernet FCS error */;
1432 	struct regpair mftag_filter_discard /* packet is dropped because classification was unsuccessful */;
1433 	struct regpair eth_mac_filter_discard /* packet was passed to Ethernet and dropped because of no mac filter match */;
1434 	struct regpair ll2_mac_filter_discard /* packet passed to Light L2 and dropped because Light L2 is not configured for this PF */;
1435 	struct regpair ll2_conn_disabled_discard /* packet passed to Light L2 and dropped because Light L2 is not configured for this PF */;
1436 	struct regpair iscsi_irregular_pkt /* packet is an ISCSI irregular packet */;
1437 	struct regpair fcoe_irregular_pkt /* packet is an FCOE irregular packet */;
1438 	struct regpair roce_irregular_pkt /* packet is an ROCE irregular packet */;
1439 	struct regpair iwarp_irregular_pkt /* packet is an IWARP irregular packet */;
1440 	struct regpair eth_irregular_pkt /* packet is an ETH irregular packet */;
1441 	struct regpair toe_irregular_pkt /* packet is an TOE irregular packet */;
1442 	struct regpair preroce_irregular_pkt /* packet is an PREROCE irregular packet */;
1443 	struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
1444 	struct regpair eth_vxlan_tunn_filter_discard /* VXLAN dropped packets */;
1445 	struct regpair eth_geneve_tunn_filter_discard /* GENEVE dropped packets */;
1446 };
1447 
1448 
1449 /*
1450  * Tstorm VF zone
1451  */
1452 struct tstorm_vf_zone
1453 {
1454 	struct tstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
1455 };
1456 
1457 
1458 /*
1459  * Tunnel classification scheme
1460  */
1461 enum tunnel_clss
1462 {
1463 	TUNNEL_CLSS_MAC_VLAN=0 /* Use MAC and VLAN from first L2 header for vport classification. */,
1464 	TUNNEL_CLSS_MAC_VNI /* Use MAC from first L2 header and VNI from tunnel header for vport classification */,
1465 	TUNNEL_CLSS_INNER_MAC_VLAN /* Use MAC and VLAN from last L2 header for vport classification */,
1466 	TUNNEL_CLSS_INNER_MAC_VNI /* Use MAC from last L2 header and VNI from tunnel header for vport classification */,
1467 	TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE /* Use MAC and VLAN from last L2 header for vport classification. If no exact match, use MAC and VLAN from first L2 header for classification. */,
1468 	MAX_TUNNEL_CLSS
1469 };
1470 
1471 
1472 
1473 /*
1474  * Ustorm non-triggering VF zone
1475  */
1476 struct ustorm_non_trigger_vf_zone
1477 {
1478 	struct eth_ustorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
1479 	struct regpair vf_pf_msg_addr /* VF-PF message address */;
1480 };
1481 
1482 
1483 /*
1484  * Ustorm triggering VF zone
1485  */
1486 struct ustorm_trigger_vf_zone
1487 {
1488 	u8 vf_pf_msg_valid /* VF-PF message valid flag */;
1489 	u8 reserved[7];
1490 };
1491 
1492 
1493 /*
1494  * Ustorm VF zone
1495  */
1496 struct ustorm_vf_zone
1497 {
1498 	struct ustorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
1499 	struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
1500 };
1501 
1502 
1503 /*
1504  * VF-PF channel data
1505  */
1506 struct vf_pf_channel_data
1507 {
1508 	__le32 ready /* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel is ready for a new transaction. */;
1509 	u8 valid /* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is valid. */;
1510 	u8 reserved0;
1511 	__le16 reserved1;
1512 };
1513 
1514 
1515 /*
1516  * Ramrod data for VF start ramrod
1517  */
1518 struct vf_start_ramrod_data
1519 {
1520 	u8 vf_id /* VF ID */;
1521 	u8 enable_flr_ack /* If set, initial cleanup ack will be sent to parent PF SP event queue */;
1522 	__le16 opaque_fid /* VF opaque FID */;
1523 	u8 personality /* define what type of personality is new VF */;
1524 	u8 reserved[7];
1525 	struct hsi_fp_ver_struct hsi_fp_ver /* FP HSI version to be used by FW */;
1526 };
1527 
1528 
1529 /*
1530  * Ramrod data for VF start ramrod
1531  */
1532 struct vf_stop_ramrod_data
1533 {
1534 	u8 vf_id /* VF ID */;
1535 	u8 reserved0;
1536 	__le16 reserved1;
1537 	__le32 reserved2;
1538 };
1539 
1540 
1541 /*
1542  * VF zone size mode.
1543  */
1544 enum vf_zone_size_mode
1545 {
1546 	VF_ZONE_SIZE_MODE_DEFAULT /* Default VF zone size. Up to 192 VF supported. */,
1547 	VF_ZONE_SIZE_MODE_DOUBLE /* Doubled VF zone size. Up to 96 VF supported. */,
1548 	VF_ZONE_SIZE_MODE_QUAD /* Quad VF zone size. Up to 48 VF supported. */,
1549 	MAX_VF_ZONE_SIZE_MODE
1550 };
1551 
1552 
1553 
1554 
1555 /*
1556  * Attentions status block
1557  */
1558 struct atten_status_block
1559 {
1560 	__le32 atten_bits;
1561 	__le32 atten_ack;
1562 	__le16 reserved0;
1563 	__le16 sb_index /* status block running index */;
1564 	__le32 reserved1;
1565 };
1566 
1567 
1568 /*
1569  * Igu cleanup bit values to distinguish between clean or producer consumer update.
1570  */
1571 enum command_type_bit
1572 {
1573 	IGU_COMMAND_TYPE_NOP=0,
1574 	IGU_COMMAND_TYPE_SET=1,
1575 	MAX_COMMAND_TYPE_BIT
1576 };
1577 
1578 
1579 /*
1580  * DMAE command
1581  */
1582 struct dmae_cmd
1583 {
1584 	__le32 opcode;
1585 #define DMAE_CMD_SRC_MASK              0x1 /* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */
1586 #define DMAE_CMD_SRC_SHIFT             0
1587 #define DMAE_CMD_DST_MASK              0x3 /* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None (use enum dmae_cmd_dst_enum) */
1588 #define DMAE_CMD_DST_SHIFT             1
1589 #define DMAE_CMD_C_DST_MASK            0x1 /* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */
1590 #define DMAE_CMD_C_DST_SHIFT           3
1591 #define DMAE_CMD_CRC_RESET_MASK        0x1 /* Reset the CRC result (do not use the previous result as the seed) */
1592 #define DMAE_CMD_CRC_RESET_SHIFT       4
1593 #define DMAE_CMD_SRC_ADDR_RESET_MASK   0x1 /* Reset the source address in the next go to the same source address of the previous go */
1594 #define DMAE_CMD_SRC_ADDR_RESET_SHIFT  5
1595 #define DMAE_CMD_DST_ADDR_RESET_MASK   0x1 /* Reset the destination address in the next go to the same destination address of the previous go */
1596 #define DMAE_CMD_DST_ADDR_RESET_SHIFT  6
1597 #define DMAE_CMD_COMP_FUNC_MASK        0x1 /* 0   completion function is the same as src function, 1 - 0   completion function is the same as dst function (use enum dmae_cmd_comp_func_enum) */
1598 #define DMAE_CMD_COMP_FUNC_SHIFT       7
1599 #define DMAE_CMD_COMP_WORD_EN_MASK     0x1 /* 0 - Do not write a completion word, 1 - Write a completion word (use enum dmae_cmd_comp_word_en_enum) */
1600 #define DMAE_CMD_COMP_WORD_EN_SHIFT    8
1601 #define DMAE_CMD_COMP_CRC_EN_MASK      0x1 /* 0 - Do not write a CRC word, 1 - Write a CRC word (use enum dmae_cmd_comp_crc_en_enum) */
1602 #define DMAE_CMD_COMP_CRC_EN_SHIFT     9
1603 #define DMAE_CMD_COMP_CRC_OFFSET_MASK  0x7 /* The CRC word should be taken from the DMAE address space from address 9+X, where X is the value in these bits. */
1604 #define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
1605 #define DMAE_CMD_RESERVED1_MASK        0x1
1606 #define DMAE_CMD_RESERVED1_SHIFT       13
1607 #define DMAE_CMD_ENDIANITY_MODE_MASK   0x3
1608 #define DMAE_CMD_ENDIANITY_MODE_SHIFT  14
1609 #define DMAE_CMD_ERR_HANDLING_MASK     0x3 /* The field specifies how the completion word is affected by PCIe read error. 0   Send a regular completion, 1 - Send a completion with an error indication, 2   do not send a completion (use enum dmae_cmd_error_handling_enum) */
1610 #define DMAE_CMD_ERR_HANDLING_SHIFT    16
1611 #define DMAE_CMD_PORT_ID_MASK          0x3 /* The port ID to be placed on the  RF FID  field of the GRC bus. this field is used both when GRC is the destination and when it is the source of the DMAE transaction. */
1612 #define DMAE_CMD_PORT_ID_SHIFT         18
1613 #define DMAE_CMD_SRC_PF_ID_MASK        0xF /* Source PCI function number [3:0] */
1614 #define DMAE_CMD_SRC_PF_ID_SHIFT       20
1615 #define DMAE_CMD_DST_PF_ID_MASK        0xF /* Destination PCI function number [3:0] */
1616 #define DMAE_CMD_DST_PF_ID_SHIFT       24
1617 #define DMAE_CMD_SRC_VF_ID_VALID_MASK  0x1 /* Source VFID valid */
1618 #define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
1619 #define DMAE_CMD_DST_VF_ID_VALID_MASK  0x1 /* Destination VFID valid */
1620 #define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
1621 #define DMAE_CMD_RESERVED2_MASK        0x3
1622 #define DMAE_CMD_RESERVED2_SHIFT       30
1623 	__le32 src_addr_lo /* PCIe source address low in bytes or GRC source address in DW */;
1624 	__le32 src_addr_hi /* PCIe source address high in bytes or reserved (if source is GRC) */;
1625 	__le32 dst_addr_lo /* PCIe destination address low in bytes or GRC destination address in DW */;
1626 	__le32 dst_addr_hi /* PCIe destination address high in bytes or reserved (if destination is GRC) */;
1627 	__le16 length_dw /* Length in DW */;
1628 	__le16 opcode_b;
1629 #define DMAE_CMD_SRC_VF_ID_MASK        0xFF /* Source VF id */
1630 #define DMAE_CMD_SRC_VF_ID_SHIFT       0
1631 #define DMAE_CMD_DST_VF_ID_MASK        0xFF /* Destination VF id */
1632 #define DMAE_CMD_DST_VF_ID_SHIFT       8
1633 	__le32 comp_addr_lo /* PCIe completion address low in bytes or GRC completion address in DW */;
1634 	__le32 comp_addr_hi /* PCIe completion address high in bytes or reserved (if completion address is GRC) */;
1635 	__le32 comp_val /* Value to write to completion address */;
1636 	__le32 crc32 /* crc16 result */;
1637 	__le32 crc_32_c /* crc32_c result */;
1638 	__le16 crc16 /* crc16 result */;
1639 	__le16 crc16_c /* crc16_c result */;
1640 	__le16 crc10 /* crc_t10 result */;
1641 	__le16 reserved;
1642 	__le16 xsum16 /* checksum16 result  */;
1643 	__le16 xsum8 /* checksum8 result  */;
1644 };
1645 
1646 
1647 enum dmae_cmd_comp_crc_en_enum
1648 {
1649 	dmae_cmd_comp_crc_disabled /* Do not write a CRC word */,
1650 	dmae_cmd_comp_crc_enabled /* Write a CRC word */,
1651 	MAX_DMAE_CMD_COMP_CRC_EN_ENUM
1652 };
1653 
1654 
1655 enum dmae_cmd_comp_func_enum
1656 {
1657 	dmae_cmd_comp_func_to_src /* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */,
1658 	dmae_cmd_comp_func_to_dst /* completion word and/or CRC will be sent to DST-PCI function/DST VFID */,
1659 	MAX_DMAE_CMD_COMP_FUNC_ENUM
1660 };
1661 
1662 
1663 enum dmae_cmd_comp_word_en_enum
1664 {
1665 	dmae_cmd_comp_word_disabled /* Do not write a completion word */,
1666 	dmae_cmd_comp_word_enabled /* Write the completion word */,
1667 	MAX_DMAE_CMD_COMP_WORD_EN_ENUM
1668 };
1669 
1670 
1671 enum dmae_cmd_c_dst_enum
1672 {
1673 	dmae_cmd_c_dst_pcie,
1674 	dmae_cmd_c_dst_grc,
1675 	MAX_DMAE_CMD_C_DST_ENUM
1676 };
1677 
1678 
1679 enum dmae_cmd_dst_enum
1680 {
1681 	dmae_cmd_dst_none_0,
1682 	dmae_cmd_dst_pcie,
1683 	dmae_cmd_dst_grc,
1684 	dmae_cmd_dst_none_3,
1685 	MAX_DMAE_CMD_DST_ENUM
1686 };
1687 
1688 
1689 enum dmae_cmd_error_handling_enum
1690 {
1691 	dmae_cmd_error_handling_send_regular_comp /* Send a regular completion (with no error indication) */,
1692 	dmae_cmd_error_handling_send_comp_with_err /* Send a completion with an error indication (i.e. set bit 31 of the completion word) */,
1693 	dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */,
1694 	MAX_DMAE_CMD_ERROR_HANDLING_ENUM
1695 };
1696 
1697 
1698 enum dmae_cmd_src_enum
1699 {
1700 	dmae_cmd_src_pcie /* The source is the PCIe */,
1701 	dmae_cmd_src_grc /* The source is the GRC */,
1702 	MAX_DMAE_CMD_SRC_ENUM
1703 };
1704 
1705 
1706 struct e4_mstorm_core_conn_ag_ctx
1707 {
1708 	u8 byte0 /* cdu_validation */;
1709 	u8 byte1 /* state */;
1710 	u8 flags0;
1711 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
1712 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
1713 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
1714 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
1715 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
1716 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
1717 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
1718 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
1719 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
1720 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
1721 	u8 flags1;
1722 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
1723 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
1724 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
1725 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
1726 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
1727 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
1728 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
1729 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
1730 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
1731 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
1732 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
1733 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
1734 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
1735 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
1736 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
1737 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
1738 	__le16 word0 /* word0 */;
1739 	__le16 word1 /* word1 */;
1740 	__le32 reg0 /* reg0 */;
1741 	__le32 reg1 /* reg1 */;
1742 };
1743 
1744 
1745 
1746 
1747 
1748 struct e4_ystorm_core_conn_ag_ctx
1749 {
1750 	u8 byte0 /* cdu_validation */;
1751 	u8 byte1 /* state */;
1752 	u8 flags0;
1753 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
1754 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
1755 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
1756 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
1757 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
1758 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
1759 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
1760 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
1761 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
1762 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
1763 	u8 flags1;
1764 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
1765 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
1766 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
1767 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
1768 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
1769 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
1770 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
1771 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
1772 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
1773 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
1774 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
1775 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
1776 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
1777 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
1778 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
1779 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
1780 	u8 byte2 /* byte2 */;
1781 	u8 byte3 /* byte3 */;
1782 	__le16 word0 /* word0 */;
1783 	__le32 reg0 /* reg0 */;
1784 	__le32 reg1 /* reg1 */;
1785 	__le16 word1 /* word1 */;
1786 	__le16 word2 /* word2 */;
1787 	__le16 word3 /* word3 */;
1788 	__le16 word4 /* word4 */;
1789 	__le32 reg2 /* reg2 */;
1790 	__le32 reg3 /* reg3 */;
1791 };
1792 
1793 
1794 struct e5_mstorm_core_conn_ag_ctx
1795 {
1796 	u8 byte0 /* cdu_validation */;
1797 	u8 byte1 /* state_and_core_id */;
1798 	u8 flags0;
1799 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
1800 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
1801 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
1802 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
1803 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
1804 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
1805 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
1806 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
1807 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
1808 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
1809 	u8 flags1;
1810 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
1811 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
1812 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
1813 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
1814 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
1815 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
1816 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
1817 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
1818 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
1819 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
1820 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
1821 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
1822 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
1823 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
1824 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
1825 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
1826 	__le16 word0 /* word0 */;
1827 	__le16 word1 /* word1 */;
1828 	__le32 reg0 /* reg0 */;
1829 	__le32 reg1 /* reg1 */;
1830 };
1831 
1832 
1833 struct e5_tstorm_core_conn_ag_ctx
1834 {
1835 	u8 byte0 /* cdu_validation */;
1836 	u8 byte1 /* state_and_core_id */;
1837 	u8 flags0;
1838 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
1839 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT         0
1840 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
1841 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT         1
1842 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK          0x1 /* bit2 */
1843 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT         2
1844 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK          0x1 /* bit3 */
1845 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT         3
1846 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK          0x1 /* bit4 */
1847 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT         4
1848 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK          0x1 /* bit5 */
1849 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT         5
1850 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0_MASK           0x3 /* timer0cf */
1851 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT          6
1852 	u8 flags1;
1853 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1_MASK           0x3 /* timer1cf */
1854 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT          0
1855 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2_MASK           0x3 /* timer2cf */
1856 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT          2
1857 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3_MASK           0x3 /* timer_stop_all */
1858 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT          4
1859 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4_MASK           0x3 /* cf4 */
1860 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT          6
1861 	u8 flags2;
1862 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5_MASK           0x3 /* cf5 */
1863 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT          0
1864 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6_MASK           0x3 /* cf6 */
1865 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT          2
1866 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7_MASK           0x3 /* cf7 */
1867 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT          4
1868 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8_MASK           0x3 /* cf8 */
1869 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT          6
1870 	u8 flags3;
1871 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9_MASK           0x3 /* cf9 */
1872 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT          0
1873 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10_MASK          0x3 /* cf10 */
1874 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT         2
1875 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
1876 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT        4
1877 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
1878 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT        5
1879 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
1880 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT        6
1881 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK         0x1 /* cf3en */
1882 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT        7
1883 	u8 flags4;
1884 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK         0x1 /* cf4en */
1885 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT        0
1886 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK         0x1 /* cf5en */
1887 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT        1
1888 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK         0x1 /* cf6en */
1889 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT        2
1890 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK         0x1 /* cf7en */
1891 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT        3
1892 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK         0x1 /* cf8en */
1893 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT        4
1894 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK         0x1 /* cf9en */
1895 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT        5
1896 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK        0x1 /* cf10en */
1897 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT       6
1898 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
1899 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT      7
1900 	u8 flags5;
1901 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
1902 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT      0
1903 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
1904 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT      1
1905 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
1906 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT      2
1907 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
1908 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT      3
1909 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
1910 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT      4
1911 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK       0x1 /* rule6en */
1912 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT      5
1913 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK       0x1 /* rule7en */
1914 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT      6
1915 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK       0x1 /* rule8en */
1916 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT      7
1917 	u8 flags6;
1918 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit6 */
1919 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
1920 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK  0x1 /* bit7 */
1921 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
1922 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK  0x1 /* bit8 */
1923 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
1924 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK  0x3 /* cf11 */
1925 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
1926 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK  0x1 /* cf11en */
1927 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
1928 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK  0x1 /* rule9en */
1929 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
1930 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_MASK  0x1 /* rule10en */
1931 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
1932 	u8 byte2 /* byte2 */;
1933 	__le16 word0 /* word0 */;
1934 	__le32 reg0 /* reg0 */;
1935 	__le32 reg1 /* reg1 */;
1936 	__le32 reg2 /* reg2 */;
1937 	__le32 reg3 /* reg3 */;
1938 	__le32 reg4 /* reg4 */;
1939 	__le32 reg5 /* reg5 */;
1940 	__le32 reg6 /* reg6 */;
1941 	__le32 reg7 /* reg7 */;
1942 	__le32 reg8 /* reg8 */;
1943 	u8 byte3 /* byte3 */;
1944 	u8 byte4 /* byte4 */;
1945 	u8 byte5 /* byte5 */;
1946 	u8 e4_reserved8 /* byte6 */;
1947 	__le16 word1 /* word1 */;
1948 	__le16 word2 /* conn_dpi */;
1949 	__le32 reg9 /* reg9 */;
1950 	__le16 word3 /* word3 */;
1951 	__le16 e4_reserved9 /* word4 */;
1952 };
1953 
1954 
1955 struct e5_ustorm_core_conn_ag_ctx
1956 {
1957 	u8 reserved /* cdu_validation */;
1958 	u8 byte1 /* state_and_core_id */;
1959 	u8 flags0;
1960 #define E5_USTORM_CORE_CONN_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
1961 #define E5_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT         0
1962 #define E5_USTORM_CORE_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
1963 #define E5_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT         1
1964 #define E5_USTORM_CORE_CONN_AG_CTX_CF0_MASK           0x3 /* timer0cf */
1965 #define E5_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT          2
1966 #define E5_USTORM_CORE_CONN_AG_CTX_CF1_MASK           0x3 /* timer1cf */
1967 #define E5_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT          4
1968 #define E5_USTORM_CORE_CONN_AG_CTX_CF2_MASK           0x3 /* timer2cf */
1969 #define E5_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT          6
1970 	u8 flags1;
1971 #define E5_USTORM_CORE_CONN_AG_CTX_CF3_MASK           0x3 /* timer_stop_all */
1972 #define E5_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT          0
1973 #define E5_USTORM_CORE_CONN_AG_CTX_CF4_MASK           0x3 /* cf4 */
1974 #define E5_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT          2
1975 #define E5_USTORM_CORE_CONN_AG_CTX_CF5_MASK           0x3 /* cf5 */
1976 #define E5_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT          4
1977 #define E5_USTORM_CORE_CONN_AG_CTX_CF6_MASK           0x3 /* cf6 */
1978 #define E5_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT          6
1979 	u8 flags2;
1980 #define E5_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
1981 #define E5_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT        0
1982 #define E5_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
1983 #define E5_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT        1
1984 #define E5_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
1985 #define E5_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT        2
1986 #define E5_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK         0x1 /* cf3en */
1987 #define E5_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT        3
1988 #define E5_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK         0x1 /* cf4en */
1989 #define E5_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT        4
1990 #define E5_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK         0x1 /* cf5en */
1991 #define E5_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT        5
1992 #define E5_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK         0x1 /* cf6en */
1993 #define E5_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT        6
1994 #define E5_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
1995 #define E5_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT      7
1996 	u8 flags3;
1997 #define E5_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
1998 #define E5_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT      0
1999 #define E5_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
2000 #define E5_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT      1
2001 #define E5_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
2002 #define E5_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT      2
2003 #define E5_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
2004 #define E5_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT      3
2005 #define E5_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
2006 #define E5_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT      4
2007 #define E5_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK       0x1 /* rule6en */
2008 #define E5_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT      5
2009 #define E5_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK       0x1 /* rule7en */
2010 #define E5_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT      6
2011 #define E5_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK       0x1 /* rule8en */
2012 #define E5_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT      7
2013 	u8 flags4;
2014 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit2 */
2015 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
2016 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK  0x1 /* bit3 */
2017 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
2018 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK  0x3 /* cf7 */
2019 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
2020 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK  0x3 /* cf8 */
2021 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT 4
2022 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK  0x1 /* cf7en */
2023 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT 6
2024 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK  0x1 /* cf8en */
2025 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT 7
2026 	u8 byte2 /* byte2 */;
2027 	__le16 word0 /* conn_dpi */;
2028 	__le16 word1 /* word1 */;
2029 	__le32 rx_producers /* reg0 */;
2030 	__le32 reg1 /* reg1 */;
2031 	__le32 reg2 /* reg2 */;
2032 	__le32 reg3 /* reg3 */;
2033 	__le16 word2 /* word2 */;
2034 	__le16 word3 /* word3 */;
2035 };
2036 
2037 
2038 struct e5_xstorm_core_conn_ag_ctx
2039 {
2040 	u8 reserved0 /* cdu_validation */;
2041 	u8 state_and_core_id /* state_and_core_id */;
2042 	u8 flags0;
2043 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1 /* exist_in_qm0 */
2044 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
2045 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1 /* exist_in_qm1 */
2046 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
2047 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1 /* exist_in_qm2 */
2048 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
2049 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1 /* exist_in_qm3 */
2050 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
2051 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1 /* bit4 */
2052 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
2053 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1 /* cf_array_active */
2054 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
2055 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1 /* bit6 */
2056 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
2057 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1 /* bit7 */
2058 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
2059 	u8 flags1;
2060 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1 /* bit8 */
2061 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
2062 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1 /* bit9 */
2063 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
2064 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1 /* bit10 */
2065 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
2066 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1 /* bit11 */
2067 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
2068 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1 /* bit12 */
2069 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
2070 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1 /* bit13 */
2071 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
2072 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1 /* bit14 */
2073 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
2074 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1 /* bit15 */
2075 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
2076 	u8 flags2;
2077 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3 /* timer0cf */
2078 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
2079 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3 /* timer1cf */
2080 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
2081 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3 /* timer2cf */
2082 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
2083 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3 /* timer_stop_all */
2084 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
2085 	u8 flags3;
2086 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3 /* cf4 */
2087 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
2088 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3 /* cf5 */
2089 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
2090 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3 /* cf6 */
2091 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
2092 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3 /* cf7 */
2093 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
2094 	u8 flags4;
2095 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3 /* cf8 */
2096 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
2097 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3 /* cf9 */
2098 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
2099 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3 /* cf10 */
2100 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
2101 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3 /* cf11 */
2102 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
2103 	u8 flags5;
2104 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3 /* cf12 */
2105 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
2106 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3 /* cf13 */
2107 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
2108 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3 /* cf14 */
2109 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
2110 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3 /* cf15 */
2111 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
2112 	u8 flags6;
2113 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3 /* cf16 */
2114 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
2115 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3 /* cf_array_cf */
2116 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
2117 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3 /* cf18 */
2118 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
2119 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3 /* cf19 */
2120 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
2121 	u8 flags7;
2122 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3 /* cf20 */
2123 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
2124 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3 /* cf21 */
2125 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
2126 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3 /* cf22 */
2127 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
2128 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1 /* cf0en */
2129 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
2130 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1 /* cf1en */
2131 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
2132 	u8 flags8;
2133 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1 /* cf2en */
2134 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
2135 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1 /* cf3en */
2136 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
2137 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1 /* cf4en */
2138 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
2139 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1 /* cf5en */
2140 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
2141 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1 /* cf6en */
2142 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
2143 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1 /* cf7en */
2144 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
2145 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1 /* cf8en */
2146 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
2147 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1 /* cf9en */
2148 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
2149 	u8 flags9;
2150 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1 /* cf10en */
2151 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
2152 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1 /* cf11en */
2153 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
2154 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1 /* cf12en */
2155 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
2156 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1 /* cf13en */
2157 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
2158 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1 /* cf14en */
2159 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
2160 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1 /* cf15en */
2161 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
2162 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1 /* cf16en */
2163 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
2164 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1 /* cf_array_cf_en */
2165 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
2166 	u8 flags10;
2167 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1 /* cf18en */
2168 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
2169 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1 /* cf19en */
2170 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
2171 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1 /* cf20en */
2172 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
2173 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1 /* cf21en */
2174 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
2175 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1 /* cf22en */
2176 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
2177 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1 /* cf23en */
2178 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
2179 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1 /* rule0en */
2180 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
2181 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1 /* rule1en */
2182 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
2183 	u8 flags11;
2184 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1 /* rule2en */
2185 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
2186 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1 /* rule3en */
2187 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
2188 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1 /* rule4en */
2189 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
2190 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1 /* rule5en */
2191 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
2192 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1 /* rule6en */
2193 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
2194 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1 /* rule7en */
2195 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
2196 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1 /* rule8en */
2197 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
2198 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1 /* rule9en */
2199 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
2200 	u8 flags12;
2201 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1 /* rule10en */
2202 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
2203 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1 /* rule11en */
2204 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
2205 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1 /* rule12en */
2206 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
2207 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1 /* rule13en */
2208 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
2209 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1 /* rule14en */
2210 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
2211 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1 /* rule15en */
2212 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
2213 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1 /* rule16en */
2214 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
2215 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1 /* rule17en */
2216 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
2217 	u8 flags13;
2218 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1 /* rule18en */
2219 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
2220 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1 /* rule19en */
2221 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
2222 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1 /* rule20en */
2223 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
2224 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1 /* rule21en */
2225 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
2226 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1 /* rule22en */
2227 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
2228 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1 /* rule23en */
2229 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
2230 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1 /* rule24en */
2231 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
2232 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1 /* rule25en */
2233 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
2234 	u8 flags14;
2235 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1 /* bit16 */
2236 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
2237 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1 /* bit17 */
2238 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
2239 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1 /* bit18 */
2240 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
2241 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1 /* bit19 */
2242 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
2243 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1 /* bit20 */
2244 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
2245 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1 /* bit21 */
2246 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
2247 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3 /* cf23 */
2248 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
2249 	u8 byte2 /* byte2 */;
2250 	__le16 physical_q0 /* physical_q0 */;
2251 	__le16 consolid_prod /* physical_q1 */;
2252 	__le16 reserved16 /* physical_q2 */;
2253 	__le16 tx_bd_cons /* word3 */;
2254 	__le16 tx_bd_or_spq_prod /* word4 */;
2255 	__le16 word5 /* word5 */;
2256 	__le16 conn_dpi /* conn_dpi */;
2257 	u8 byte3 /* byte3 */;
2258 	u8 byte4 /* byte4 */;
2259 	u8 byte5 /* byte5 */;
2260 	u8 byte6 /* byte6 */;
2261 	__le32 reg0 /* reg0 */;
2262 	__le32 reg1 /* reg1 */;
2263 	__le32 reg2 /* reg2 */;
2264 	__le32 reg3 /* reg3 */;
2265 	__le32 reg4 /* reg4 */;
2266 	__le32 reg5 /* cf_array0 */;
2267 	__le32 reg6 /* cf_array1 */;
2268 	u8 flags15;
2269 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK         0x1 /* bit22 */
2270 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT        0
2271 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK         0x1 /* bit23 */
2272 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT        1
2273 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK         0x1 /* bit24 */
2274 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT        2
2275 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK         0x3 /* cf24 */
2276 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT        3
2277 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK         0x1 /* cf24en */
2278 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT        5
2279 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK         0x1 /* rule26en */
2280 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT        6
2281 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_MASK         0x1 /* rule27en */
2282 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_SHIFT        7
2283 	u8 byte7 /* byte7 */;
2284 	__le16 word7 /* word7 */;
2285 	__le16 word8 /* word8 */;
2286 	__le16 word9 /* word9 */;
2287 	__le16 word10 /* word10 */;
2288 	__le16 word11 /* word11 */;
2289 	__le32 reg7 /* reg7 */;
2290 	__le32 reg8 /* reg8 */;
2291 	__le32 reg9 /* reg9 */;
2292 	u8 byte8 /* byte8 */;
2293 	u8 byte9 /* byte9 */;
2294 	u8 byte10 /* byte10 */;
2295 	u8 byte11 /* byte11 */;
2296 	u8 byte12 /* byte12 */;
2297 	u8 byte13 /* byte13 */;
2298 	u8 byte14 /* byte14 */;
2299 	u8 byte15 /* byte15 */;
2300 	__le32 reg10 /* reg10 */;
2301 	__le32 reg11 /* reg11 */;
2302 	__le32 reg12 /* reg12 */;
2303 	__le32 reg13 /* reg13 */;
2304 	__le32 reg14 /* reg14 */;
2305 	__le32 reg15 /* reg15 */;
2306 	__le32 reg16 /* reg16 */;
2307 	__le32 reg17 /* reg17 */;
2308 	__le32 reg18 /* reg18 */;
2309 	__le32 reg19 /* reg19 */;
2310 	__le16 word12 /* word12 */;
2311 	__le16 word13 /* word13 */;
2312 	__le16 word14 /* word14 */;
2313 	__le16 word15 /* word15 */;
2314 };
2315 
2316 
2317 struct e5_ystorm_core_conn_ag_ctx
2318 {
2319 	u8 byte0 /* cdu_validation */;
2320 	u8 byte1 /* state_and_core_id */;
2321 	u8 flags0;
2322 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
2323 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
2324 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
2325 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
2326 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
2327 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
2328 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
2329 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
2330 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
2331 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
2332 	u8 flags1;
2333 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
2334 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
2335 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
2336 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
2337 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
2338 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
2339 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
2340 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2341 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
2342 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2343 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
2344 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2345 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
2346 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2347 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
2348 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2349 	u8 byte2 /* byte2 */;
2350 	u8 byte3 /* byte3 */;
2351 	__le16 word0 /* word0 */;
2352 	__le32 reg0 /* reg0 */;
2353 	__le32 reg1 /* reg1 */;
2354 	__le16 word1 /* word1 */;
2355 	__le16 word2 /* word2 */;
2356 	__le16 word3 /* word3 */;
2357 	__le16 word4 /* word4 */;
2358 	__le32 reg2 /* reg2 */;
2359 	__le32 reg3 /* reg3 */;
2360 };
2361 
2362 
2363 /*
2364  * IGU cleanup command
2365  */
2366 struct igu_cleanup
2367 {
2368 	__le32 sb_id_and_flags;
2369 #define IGU_CLEANUP_RESERVED0_MASK     0x7FFFFFF
2370 #define IGU_CLEANUP_RESERVED0_SHIFT    0
2371 #define IGU_CLEANUP_CLEANUP_SET_MASK   0x1 /* cleanup clear - 0, set - 1 */
2372 #define IGU_CLEANUP_CLEANUP_SET_SHIFT  27
2373 #define IGU_CLEANUP_CLEANUP_TYPE_MASK  0x7
2374 #define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
2375 #define IGU_CLEANUP_COMMAND_TYPE_MASK  0x1 /* must always be set (use enum command_type_bit) */
2376 #define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
2377 	__le32 reserved1;
2378 };
2379 
2380 
2381 /*
2382  * IGU firmware driver command
2383  */
2384 union igu_command
2385 {
2386 	struct igu_prod_cons_update prod_cons_update;
2387 	struct igu_cleanup cleanup;
2388 };
2389 
2390 
2391 /*
2392  * IGU firmware driver command
2393  */
2394 struct igu_command_reg_ctrl
2395 {
2396 	__le16 opaque_fid;
2397 	__le16 igu_command_reg_ctrl_fields;
2398 #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK  0xFFF
2399 #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
2400 #define IGU_COMMAND_REG_CTRL_RESERVED_MASK      0x7
2401 #define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT     12
2402 #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK  0x1 /* command typ: 0 - read, 1 - write */
2403 #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
2404 };
2405 
2406 
2407 /*
2408  * IGU mapping line structure
2409  */
2410 struct igu_mapping_line
2411 {
2412 	__le32 igu_mapping_line_fields;
2413 #define IGU_MAPPING_LINE_VALID_MASK            0x1
2414 #define IGU_MAPPING_LINE_VALID_SHIFT           0
2415 #define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK    0xFF
2416 #define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT   1
2417 #define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK  0xFF /* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */
2418 #define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
2419 #define IGU_MAPPING_LINE_PF_VALID_MASK         0x1 /* PF-1, VF-0 */
2420 #define IGU_MAPPING_LINE_PF_VALID_SHIFT        17
2421 #define IGU_MAPPING_LINE_IPS_GROUP_MASK        0x3F
2422 #define IGU_MAPPING_LINE_IPS_GROUP_SHIFT       18
2423 #define IGU_MAPPING_LINE_RESERVED_MASK         0xFF
2424 #define IGU_MAPPING_LINE_RESERVED_SHIFT        24
2425 };
2426 
2427 
2428 /*
2429  * IGU MSIX line structure
2430  */
2431 struct igu_msix_vector
2432 {
2433 	struct regpair address;
2434 	__le32 data;
2435 	__le32 msix_vector_fields;
2436 #define IGU_MSIX_VECTOR_MASK_BIT_MASK      0x1
2437 #define IGU_MSIX_VECTOR_MASK_BIT_SHIFT     0
2438 #define IGU_MSIX_VECTOR_RESERVED0_MASK     0x7FFF
2439 #define IGU_MSIX_VECTOR_RESERVED0_SHIFT    1
2440 #define IGU_MSIX_VECTOR_STEERING_TAG_MASK  0xFF
2441 #define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
2442 #define IGU_MSIX_VECTOR_RESERVED1_MASK     0xFF
2443 #define IGU_MSIX_VECTOR_RESERVED1_SHIFT    24
2444 };
2445 
2446 
2447 /*
2448  * per encapsulation type enabling flags
2449  */
2450 struct prs_reg_encapsulation_type_en
2451 {
2452 	u8 flags;
2453 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK     0x1 /* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */
2454 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT    0
2455 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK      0x1 /* Enable bit for IP-over-GRE (IP GRE) encapsulation. */
2456 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT     1
2457 #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK            0x1 /* Enable bit for VXLAN encapsulation. */
2458 #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT           2
2459 #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK            0x1 /* Enable bit for T-Tag encapsulation. */
2460 #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT           3
2461 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK  0x1 /* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */
2462 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
2463 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK   0x1 /* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */
2464 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT  5
2465 #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK                0x3
2466 #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT               6
2467 };
2468 
2469 
2470 enum pxp_tph_st_hint
2471 {
2472 	TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
2473 	TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
2474 	TPH_ST_HINT_TARGET /* Device Write and Host Read, or Host Write and Device Read */,
2475 	TPH_ST_HINT_TARGET_PRIO /* Device Write and Host Read, or Host Write and Device Read - with temporal reuse */,
2476 	MAX_PXP_TPH_ST_HINT
2477 };
2478 
2479 
2480 /*
2481  * QM hardware structure of enable bypass credit mask
2482  */
2483 struct qm_rf_bypass_mask
2484 {
2485 	u8 flags;
2486 #define QM_RF_BYPASS_MASK_LINEVOQ_MASK    0x1
2487 #define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT   0
2488 #define QM_RF_BYPASS_MASK_RESERVED0_MASK  0x1
2489 #define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
2490 #define QM_RF_BYPASS_MASK_PFWFQ_MASK      0x1
2491 #define QM_RF_BYPASS_MASK_PFWFQ_SHIFT     2
2492 #define QM_RF_BYPASS_MASK_VPWFQ_MASK      0x1
2493 #define QM_RF_BYPASS_MASK_VPWFQ_SHIFT     3
2494 #define QM_RF_BYPASS_MASK_PFRL_MASK       0x1
2495 #define QM_RF_BYPASS_MASK_PFRL_SHIFT      4
2496 #define QM_RF_BYPASS_MASK_VPQCNRL_MASK    0x1
2497 #define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT   5
2498 #define QM_RF_BYPASS_MASK_FWPAUSE_MASK    0x1
2499 #define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT   6
2500 #define QM_RF_BYPASS_MASK_RESERVED1_MASK  0x1
2501 #define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
2502 };
2503 
2504 
2505 /*
2506  * QM hardware structure of opportunistic credit mask
2507  */
2508 struct qm_rf_opportunistic_mask
2509 {
2510 	__le16 flags;
2511 #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK     0x1
2512 #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT    0
2513 #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK     0x1
2514 #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT    1
2515 #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK       0x1
2516 #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT      2
2517 #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK       0x1
2518 #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT      3
2519 #define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK        0x1
2520 #define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT       4
2521 #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK     0x1
2522 #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT    5
2523 #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK     0x1
2524 #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT    6
2525 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK   0x1
2526 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT  7
2527 #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK  0x1
2528 #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
2529 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK   0x7F
2530 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT  9
2531 };
2532 
2533 
2534 /*
2535  * QM hardware structure of QM map memory
2536  */
2537 struct qm_rf_pq_map
2538 {
2539 	__le32 reg;
2540 #define QM_RF_PQ_MAP_PQ_VALID_MASK          0x1 /* PQ active */
2541 #define QM_RF_PQ_MAP_PQ_VALID_SHIFT         0
2542 #define QM_RF_PQ_MAP_RL_ID_MASK             0xFF /* RL ID */
2543 #define QM_RF_PQ_MAP_RL_ID_SHIFT            1
2544 #define QM_RF_PQ_MAP_VP_PQ_ID_MASK          0x1FF /* the first PQ associated with the VPORT and VOQ of this PQ */
2545 #define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT         9
2546 #define QM_RF_PQ_MAP_VOQ_MASK               0x1F /* VOQ */
2547 #define QM_RF_PQ_MAP_VOQ_SHIFT              18
2548 #define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK  0x3 /* WRR weight */
2549 #define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
2550 #define QM_RF_PQ_MAP_RL_VALID_MASK          0x1 /* RL active */
2551 #define QM_RF_PQ_MAP_RL_VALID_SHIFT         25
2552 #define QM_RF_PQ_MAP_RESERVED_MASK          0x3F
2553 #define QM_RF_PQ_MAP_RESERVED_SHIFT         26
2554 };
2555 
2556 
2557 /*
2558  * Completion params for aggregated interrupt completion
2559  */
2560 struct sdm_agg_int_comp_params
2561 {
2562 	__le16 params;
2563 #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK      0x3F /* the number of aggregated interrupt, 0-31 */
2564 #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT     0
2565 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK  0x1 /* 1 - set a bit in aggregated vector, 0 - dont set */
2566 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
2567 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK     0x1FF /* Number of bit in the aggregated vector, 0-279 (TBD) */
2568 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT    7
2569 };
2570 
2571 
2572 /*
2573  * SDM operation gen command (generate aggregative interrupt)
2574  */
2575 struct sdm_op_gen
2576 {
2577 	__le32 command;
2578 #define SDM_OP_GEN_COMP_PARAM_MASK  0xFFFF /* completion parameters 0-15 */
2579 #define SDM_OP_GEN_COMP_PARAM_SHIFT 0
2580 #define SDM_OP_GEN_COMP_TYPE_MASK   0xF /* completion type 16-19 */
2581 #define SDM_OP_GEN_COMP_TYPE_SHIFT  16
2582 #define SDM_OP_GEN_RESERVED_MASK    0xFFF /* reserved 20-31 */
2583 #define SDM_OP_GEN_RESERVED_SHIFT   20
2584 };
2585 
2586 #endif /* __ECORE_HSI_COMMON__ */
2587