1 /*******************************************************************************
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Copyright 2014 QLogic Corporation
22 * The contents of this file are subject to the terms of the
23 * QLogic End User License (the "License").
24 * You may not use this file except in compliance with the License.
25 *
26 * You can obtain a copy of the License at
27 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
28 * QLogic_End_User_Software_License.txt
29 * See the License for the specific language governing permissions
30 * and limitations under the License.
31 *
32 *
33 * Module Description:
34 *
35 *
36 * History:
37 * 10/10/01 Hav Khauv Inception.
38 ******************************************************************************/
39
40 #ifndef _LM5710_H
41 #define _LM5710_H
42
43 //migrated from 5706_reg.h
44 #ifndef __BIG_ENDIAN
45 #ifndef LITTLE_ENDIAN
46 #define LITTLE_ENDIAN
47 #endif
48 #else
49 #undef LITTLE_ENDIAN
50 #ifndef BIG_ENDIAN
51 #define BIG_ENDIAN
52 #endif
53 #ifndef BIG_ENDIAN_HOST
54 #define BIG_ENDIAN_HOST
55 #endif
56 #endif
57
58 #ifndef INLINE
59 #if DBG
60 #define INLINE
61 #else
62 #define INLINE __inline
63 #endif
64 #endif
65
66 #if !defined(LITTLE_ENDIAN) && !defined(BIG_ENDIAN)
67 #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
68 #endif
69
70 #define ECORE_NIV
71
72 #ifdef __LINUX
73 #include <linux/types.h>
74 #endif
75 #include "bcmtype.h"
76 #include "debug.h"
77 #include "igu_def.h"
78 #include "microcode_constants.h"
79 #include "fcoe_constants.h"
80 #include "toe_constants.h"
81 #include "tcp_constants.h"
82 #include "eth_constants.h"
83 //this is the included HSI
84 #include "5710_hsi.h"
85 #include "lm5710_hsi.h"
86 #include "pcics_reg_driver.h"
87 #include "bigmac_addresses.h"
88 #include "misc_bits.h"
89 #include "emac_reg_driver.h"
90 #include "dmae_clients.h"
91 #include "prs_flags.h"
92 #include "57712_reg.h"
93 #include "grc_addr.h"
94 #include "bd_chain_st.h"
95 #include "lm_sp_req_mgr.h"
96 #include "license.h"
97 #include "mcp_shmem.h"
98 #include "lm_dcbx_mp.h"
99
100 #ifndef elink_dev
101 #define elink_dev _lm_device_t
102 #endif
103 #include "clc.h"
104 //#include "status_code.h"
105 // TODO - we will add ou rown shmem
106 //#include "shmem.h"
107 //
108 #define DEVICE_TYPE_PF 0
109 #define DEVICE_TYPE_VF 1
110
111 /* Virtualization types (vt) */
112 #define VT_NONE 0
113 #define VT_BASIC_VF 1
114 #define VT_CHANNEL_VF 2
115 #define VT_ASSIGNED_TO_VM_PF 3
116
117 #define VT_HW_CHANNEL_TYPE 0
118 #define VT_SW_CHANNEL_TYPE 1
119
120
121 #define IS_CHANNEL_VFDEV(pdev) (((pdev)->params.device_type == DEVICE_TYPE_VF) && ((pdev)->params.virtualization_type == VT_CHANNEL_VF))
122
123 #define IS_BASIC_VIRT_MODE_MASTER_PFDEV(pdev) (((pdev)->params.device_type == DEVICE_TYPE_PF) && ((pdev)->params.virtualization_type == VT_BASIC_VF))
124 #define IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev) (((pdev)->params.device_type == DEVICE_TYPE_PF) && ((pdev)->params.virtualization_type == VT_CHANNEL_VF))
125 #define IS_ASSIGNED_TO_VM_PFDEV(pdev) (((pdev)->params.device_type == DEVICE_TYPE_PF) && ((pdev)->params.virtualization_type == VT_ASSIGNED_TO_VM_PF))
126 #define DBG_DMP_IS_ONLINE(pdev) IS_ASSIGNED_TO_VM_PFDEV(pdev)
127
128 #define IS_HW_CHANNEL_VIRT_MODE(pdev) (((pdev)->params.virtualization_type == VT_CHANNEL_VF) && ((pdev)->params.channel_type == VT_HW_CHANNEL_TYPE))
129 #define IS_SW_CHANNEL_VIRT_MODE(pdev) (((pdev)->params.virtualization_type == VT_CHANNEL_VF) && ((pdev)->params.channel_type == VT_SW_CHANNEL_TYPE))
130
131 #define IS_PFDEV(pdev) (((pdev)->pf_dev == NULL) && ((pdev)->params.device_type == DEVICE_TYPE_PF))
132 #define IS_VFDEV(pdev) (((pdev)->pf_dev != NULL) || ((pdev)->params.device_type == DEVICE_TYPE_VF))
133 #define PFDEV(pdev) (pdev)
134
135
136
137 #define LM_VF_MAX_RVFID_SIZE 6
138
139 #define LM_MAX_VF_CID_WND_SIZE 4
140 #define LM_MAX_VF_CHAINS_PER_PF (1 << LM_MAX_VF_CID_WND_SIZE)
141
142 #define LM_VF_CID_WND_SIZE(_pdev) (((_pdev)->hw_info.sriov_info.max_chains_per_vf) ? (_pdev)->hw_info.sriov_info.vf_cid_wnd_size : LM_MAX_VF_CID_WND_SIZE)
143 #define LM_VF_CHAINS_PER_PF(_pdev) (((_pdev)->hw_info.sriov_info.max_chains_per_vf) ? (_pdev)->hw_info.sriov_info.max_chains_per_vf : LM_MAX_VF_CHAINS_PER_PF)
144
145 #define LM_VF_NUM_CIDS_MASK(_pdev) ((1 << LM_VF_CID_WND_SIZE(_pdev)) - 1)
146
147 #define LM_VF_CID_BASE(_pdev) (1 << (LM_VF_MAX_RVFID_SIZE + LM_VF_CID_WND_SIZE(_pdev)))
148
149 #define LM_VF_MAX_RVFID_MASK ((1 << LM_VF_MAX_RVFID_SIZE) - 1)
150
151
152 #define VF_TO_PF_CID(pdev,cid) (cid)
153 #define PF_TO_VF_CID(pdev,cid) (cid)
154
155 #define GET_VF_Q_ID_FROM_PF_CID(cid) (cid & LM_VF_NUM_CIDS_MASK(pdev))
156 #define GET_ABS_VF_ID_FROM_PF_CID(cid) ((cid >> LM_VF_CID_WND_SIZE(pdev)) & LM_VF_MAX_RVFID_MASK)
157
158 #define VF_BAR0_IGU_OFFSET 0x0000 /*0x0000-0x3000: (12KB)*/
159 #define VF_BAR0_USDM_QUEUES_OFFSET 0x3000 /*-0x4100: (ZoneA) (4352B)*/
160 #define VF_BAR0_CSDM_QUEUES_OFFSET 0x4100 /*-0x5200: (ZoneA) (4352B)*/
161 #define VF_BAR0_XSDM_QUEUES_OFFSET 0x5200 /*-0x6300: (ZoneA) (4352B)*/
162 #define VF_BAR0_TSDM_QUEUES_OFFSET 0x6300 /*-0x7400: (ZoneA) (4352B)*/
163 #define VF_BAR0_USDM_GLOBAL_OFFSET 0x7400 /*-0x7600: (ZoneB) (512B)*/
164 #define VF_BAR0_CSDM_GLOBAL_OFFSET 0x7600 /*-0x7800: (ZoneB) (512B)*/
165 #define VF_BAR0_XSDM_GLOBAL_OFFSET 0x7800 /*-0x7A00: (ZoneB) (512B)*/
166 #define VF_BAR0_TSDM_GLOBAL_OFFSET 0x7A00 /*-0x7C00: (ZoneB) (512B)*/
167 #define VF_BAR0_DB_OFFSET 0x7C00 /*-0x7E00: (512B)*/
168 #define VF_BAR0_DB_SIZE 512
169 #define VF_BAR0_GRC_OFFSET 0x7E00 /*-0x8000:(512B) */
170
171 /* multi function mode is supported on (5711+5711E FPGA+EMUL) and on (5711E ASIC) and on 5712E and 5713E */
172 #define IS_MF_MODE_CAPABLE(pdev) ((CHIP_NUM(pdev) == CHIP_NUM_5711E) || \
173 (CHIP_NUM(pdev) == CHIP_NUM_5712E) || \
174 (CHIP_IS_E3(pdev)))
175
176 /* Macro for triggering PCIE analyzer: write to 0x2000 */
177 #define LM_TRIGGER_PCIE(_pdev) \
178 { \
179 u32_t kuku = 0xcafecafe; \
180 REG_WR((_pdev), 0x2000, kuku); \
181 }
182
183 // Send an attention on this Function.
184 #define LM_GENERAL_ATTN_INTERRUPT_SET(_pdev,_func) REG_WR((_pdev),MISC_REG_AEU_GENERAL_ATTN_12 + 4*(_func),0x1)
185 /*******************************************************************************
186 * Constants.
187 ******************************************************************************/
188 #define MAX_PATH_NUM 2
189 #define E2_MAX_NUM_OF_VFS 64
190 #define E1H_FUNC_MAX 8
191 #define E2_FUNC_MAX 4 /* per path */
192 #define MAX_VNIC_NUM 4
193 #define MAX_FUNC_NUM 8 /* Common to all chips */
194 #define MAX_NDSB HC_SB_MAX_SB_E2
195 #define MAX_RSS_CHAINS (16) /* a constatnt for _HW_ limit */
196 #define MAX_HW_CHAINS (64) /* real E2/E3 HW limit of IGU blocks configured for function*/
197
198
199 typedef enum
200 {
201 LM_CLI_IDX_NDIS = 0,
202 //LM_CLI_IDX_RDMA = 1,
203 LM_CLI_IDX_ISCSI, /* iSCSI idx must be after ndis+rdma */
204 LM_CLI_IDX_FCOE, /* FCOE idx must be after ndis+rdma */
205 LM_CLI_IDX_FWD,
206 LM_CLI_IDX_OOO,
207 LM_CLI_IDX_MAX
208 } lm_cli_idx_t;
209
210 typedef enum
211 {
212 LM_RESOURCE_NDIS = LM_CLI_IDX_NDIS,
213 // LM_RESOURCE_RDMA = LM_CLI_IDX_RDMA,
214 LM_RESOURCE_ISCSI = LM_CLI_IDX_ISCSI, /* iSCSI idx must be after ndis+rdma */
215 LM_RESOURCE_FCOE = LM_CLI_IDX_FCOE, /* FCOE idx must be after ndis+rdma */
216 LM_RESOURCE_FWD = LM_CLI_IDX_FWD,
217 LM_RESOURCE_OOO = LM_CLI_IDX_OOO,
218 LM_RESOURCE_COMMON = LM_CLI_IDX_MAX,
219 } lm_resource_idx_t;
220
221 struct sq_pending_command
222 {
223 d_list_entry_t list;
224 u32_t cid;
225 u16_t type;
226 u8_t cmd;
227 u8_t flags;
228 #define SQ_PEND_RELEASE_MEM 0x1
229 #define SQ_PEND_COMP_CALLED 0x2
230
231 struct slow_path_element command;
232 };
233
234 #include "lm_desc.h"
235 #include "listq.h"
236 #include "lm.h"
237 #include "mm.h"
238 #include "ecore_sp_verbs.h"
239 #ifdef VF_INVOLVED
240 #include "lm_vf.h"
241 #endif
242 #include "lm_stats.h"
243 #include "lm_dmae.h"
244 #if !defined(_B10KD_EXT)
245 #include "bcm_utils.h"
246 #endif
247
248 #define EVEREST 1
249
250 /* non rss chains - ISCSI, FCOE, FWD, ISCSI OOO */
251 #define MAX_NON_RSS_CHAINS (4)
252
253 /* which of the non-rss chains need fw clients - ISCSI, FCOE*/
254 #define MAX_NON_RSS_FW_CLIENTS (4)
255
256 #define MAX_ETH_REG_CONS (MAX_RSS_CHAINS + MAX_NON_RSS_CHAINS)
257 #define MAX_ETH_REG_CHAINS (MAX_HW_CHAINS + MAX_NON_RSS_CHAINS)
258
259 #define MAX_ETH_CONS (MAX_ETH_REG_CONS + MAX_ETH_TX_ONLY_CONS)
260 #define MAX_ETH_CHAINS (MAX_ETH_REG_CHAINS + MAX_ETH_TX_ONLY_CONS)
261
262 #ifndef VF_INVOLVED
263 #define MAX_VF_ETH_CONS 0
264 #endif
265
266 #if defined(_VBD_) || defined (_VBD_CMD_)
267 #define MAX_TX_CHAIN(_pdev) (3U*LM_SB_CNT(_pdev) + MAX_NON_RSS_CHAINS)
268 #define MAX_RX_CHAIN(_pdev) (1U*LM_SB_CNT(_pdev) + MAX_NON_RSS_CHAINS)
269 #else
270 #define MAX_TX_CHAIN(_pdev) (MAX_ETH_CONS)
271 #define MAX_RX_CHAIN(_pdev) (MAX_ETH_REG_CONS)
272 #endif
273
274
275 #define ILT_NUM_PAGE_ENTRIES 3072
276 #define ILT_NUM_PAGE_ENTRIES_PER_FUNC 384
277
278 /* According to the PCI-E Init document */
279 #define SEARCHER_TOTAL_MEM_REQUIRED_PER_CON 64
280 #define TIMERS_TOTAL_MEM_REQUIRED_PER_CON 8
281 #define QM_TOTAL_MEM_REQUIRED_PER_CON (32*4)
282
283
284 /* Number of bits must be 10 to 25. */
285 #ifndef LM_PAGE_BITS
286 #define LM_PAGE_BITS 12 /* 4K page. */
287 #endif
288
289 #define LM_PAGE_SIZE (1 << LM_PAGE_BITS)
290 #define LM_PAGE_MASK (LM_PAGE_SIZE - 1)
291
292
293 /* Number of bits must be 10 to 25. */
294 #define LM_DQ_CID_BITS 7 /* 128 Byte page. */
295
296 #define LM_DQ_CID_SIZE (1 << LM_DQ_CID_BITS)
297 #define LM_DQ_CID_MASK (LM_DQ_CID_SIZE - 1)
298
299 #define LM_VF_DQ_CID_BITS 3 /* 8 Byte page. */
300
301 #define LM_VF_DQ_CID_SIZE (1 << LM_VF_DQ_CID_BITS)
302 #define LM_VF_DQ_CID_MASK (LM_VF_DQ_CID_SIZE - 1)
303
304 #define LM_ILT_ALIGNMENT 0x1000 /* ILT assumes pages aligned to 4K NOTE: E1 has a bug,
305 * in which page needs to be aligned to page-size
306 */
307
308 #define LM_ILT_ALIGNMENT_MASK (LM_ILT_ALIGNMENT - 1)
309
310 #define LM_TIMERS_SCAN_POLL 20000 /* 20 sec */
311 #define LM_TIMERS_SCAN_TIME 1000 /*1m*/
312 #define LM_UNLOAD_TIME 100000 /*100m in micros */
313 #if !defined(_VBD_CMD_)
314 #define LM_CID_RETURN_TIME 2000 /*2 sec on emulation*/
315 #define LM_CID_RETURN_TIME_EMUL 10000 /*10 sec on emulation*/
316
317 #else
318 #define LM_CID_RETURN_TIME 0
319 #define LM_CID_RETURN_TIME_EMUL 0
320 #endif
321
322 // TODO add for ASIC
323 #define LM_FREE_CID_DELAY_TIME(pdev) ((pdev)->params.l4_free_cid_delay_time)
324 /*
325 #define LM_FREE_CID_DELAY_TIME(pdev) (CHIP_REV(pdev) == CHIP_REV_FPGA || CHIP_REV(pdev) == CHIP_REV_EMUL) ? LM_CID_RETURN_TIME_EMUL : LM_CID_RETURN_TIME;
326 */
327
328 #define LM_EMUL_FACTOR 2000
329 #define LM_FPGA_FACTOR 200
330
331 #ifndef CACHE_LINE_SIZE_MASK
332 #define CACHE_LINE_SIZE_MASK 0x3f
333 #define CACHE_LINE_SIZE (CACHE_LINE_SIZE_MASK + 1)
334 #endif
335
336 /*need to know from where can I take these values */
337 #define NVRAM_1MB_SIZE 0x20000 // 1M bit in bytes
338 #define NVRAM_PAGE_SIZE 256
339
340 /* Number of packets per indication in calls to mm_indicate_rx/tx. */
341 #ifndef MAX_PACKETS_PER_INDICATION
342 #define MAX_PACKETS_PER_INDICATION 50
343 #endif
344
345 // TODO - adjust to our needs - the limitation of the PBF
346 #ifndef MAX_FRAG_CNT
347 #define MAX_FRAG_CNT 33
348 #endif
349 #ifndef MAX_FRAG_CNT_PER_TB
350 /* MichalS TODO - do we want to leave it like this or calculate it according to connection params. */
351 #define MAX_FRAG_CNT_PER_TB 33 /* arbitrary(?) */
352 #endif
353
354 /* The maximum is actually 0xffff which can be described by a BD. */
355 // TODO - adjust to our needs
356 #define MAX_FRAGMENT_SIZE 0xf000
357
358 /* Maximum Packet Size: max jumbo frame: 9600 + ethernet-header+llc-snap+vlan+crc32 */
359 #define MAXIMUM_PACKET_SIZE 9632
360
361 // TODO - adjust to our needs
362 /* Buffer size of the statistics block. */
363 #define CHIP_STATS_BUFFER_SIZE ((sizeof(statistics_block_t) + \
364 CACHE_LINE_SIZE_MASK) & \
365 ~CACHE_LINE_SIZE_MASK)
366
367 // Status blocks type per storm - used for initialization
368 #define STATUS_BLOCK_INVALID_TYPE 0
369 #define STATUS_BLOCK_SP_SL_TYPE 1
370 #define STATUS_BLOCK_NORMAL_TYPE 2
371 #define STATUS_BLOCK_NORMAL_SL_TYPE 3
372
373 #define LM_DEF_NO_EVENT_ACTIVE 0x00000000
374 #define LM_DEF_ATTN_ACTIVE (1L<<0)
375 #define LM_SP_ACTIVE (LM_DEF_USTORM_ACTIVE | LM_DEF_CSTORM_ACTIVE | LM_DEF_XSTORM_ACTIVE | LM_DEF_TSTORM_ACTIVE)
376
377 #define LM_DEF_USTORM_ACTIVE (1L<<1)
378 #define LM_DEF_CSTORM_ACTIVE (1L<<2)
379 #define LM_DEF_XSTORM_ACTIVE (1L<<3)
380 #define LM_DEF_TSTORM_ACTIVE (1L<<4)
381
382 #define LM_DEF_EVENT_MASK 0xffff
383
384 #define LM_NON_DEF_USTORM_ACTIVE (1L<<16)
385 #define LM_NON_DEF_CSTORM_ACTIVE (1L<<17)
386 #define LM_NON_DEF_EVENT_MASK 0xffff0000
387
388 #define ATTN_NIG_FOR_FUNC (1L << 8)
389 #define ATTN_SW_TIMER_4_FUNC (1L << 9)
390 #define GPIO_2_FUNC (1L << 10)
391 #define GPIO_3_FUNC (1L << 11)
392 #define GPIO_4_FUNC (1L << 12)
393 #define ATTN_GENERAL_ATTN_1 (1L << 13)
394 #define ATTN_GENERAL_ATTN_2 (1L << 14)
395 #define ATTN_GENERAL_ATTN_3 (1L << 15)
396
397 #define ATTN_NIG_FOR_FUNC1 (1L << 8)
398 #define ATTN_SW_TIMER_4_FUNC1 (1L << 9)
399 #define GPIO_2_FUNC1 (1L << 10)
400 #define GPIO_3_FUNC1 (1L << 11)
401 #define GPIO_4_FUNC1 (1L << 12)
402 #define ATTN_GENERAL_ATTN_4 (1L << 13)
403 #define ATTN_GENERAL_ATTN_5 (1L << 14)
404 #define ATTN_GENERAL_ATTN_6 (1L << 15)
405
406 #define ATTN_HARD_WIRED_MASK 0xff00
407
408 #define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
409 #define HC_SEG_ACCESS_ATTN 4
410
411 #define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
412
413 //Buffer size of the status block. This is the same for host_def_status_block, they are the same size.
414 //TODO: check the cache line issue! do we need it as in Teton?
415 #define E2_STATUS_BLOCK_BUFFER_SIZE ((sizeof(struct host_hc_status_block_e2) + \
416 CACHE_LINE_SIZE_MASK) & \
417 ~CACHE_LINE_SIZE_MASK)
418
419 #define E1X_STATUS_BLOCK_BUFFER_SIZE ((sizeof(struct host_hc_status_block_e1x) + \
420 CACHE_LINE_SIZE_MASK) & \
421 ~CACHE_LINE_SIZE_MASK)
422
423 #define DEF_STATUS_BLOCK_BUFFER_SIZE ((sizeof(struct host_sp_status_block) + \
424 CACHE_LINE_SIZE_MASK) & \
425 ~CACHE_LINE_SIZE_MASK)
426
427 /* This is the def and non-def status block ID format according to spec --> used for debugging purpose only */
428 #define DBG_SB_ID(port,stormID,cpuID) (((port) << 7) | ((stormID) << 5) | (cpuID))
429 #define DBG_DEF_SB_ID(port,stormID,vnicID) (((port) << 7) | ((stormID) << 5) | (0x10+vnicID)) /* the ID is for debugging purposes, it's not looked at by hw/fw*/
430
431 #define SB_RX_INDEX(pdev, index) ((pdev)->vars.u_hc_ack[index])
432 #define SB_TX_INDEX(pdev, index) ((pdev)->vars.c_hc_ack[index])
433
434 #define SB_INDEX_OF_USTORM(pdev, index) ((pdev)->vars.u_hc_ack[index])
435 //#define SB_INDEX_OF_CSTORM(pdev, index) ((pdev)->vars.c_hc_ack[index])
436
437 #define DEF_SB_INDEX(pdev) ((pdev)->vars.hc_def_ack)
438 #define DEF_SB_INDEX_OF_ATTN(pdev) ((pdev)->vars.attn_def_ack)
439
440 //_________________________________________________________________________________________________--
441
442 #define NUM_OF_ELT_PAGES 16 // this is the size of the elt in the hw
443 #define DEF_STATUS_BLOCK_IGU_INDEX 16 //MAX_NDSB //this is where the default status block lies (that is VBD's static index of default status block)
444 #define DEF_STATUS_BLOCK_INDEX HC_SP_SB_ID //this is where the default status block lies (that is VBD's static index of default status block)
445 #define MAX_DYNAMIC_ATTN_GRPS 8 //this is the 8 non hard-wired groups configured by the driver (exc. PXP,NIG)
446 #define MAX_NUM_BAR 3 // number of bars suported by the hw 1 bar in first phase emulation
447 #define MAX_NUM_VF_BAR 3
448
449 #define BAR_0 0 //index for BAR0
450 #define BAR_1 1 //index for BAR1
451 #define BAR_2 2 //index for BAR2
452
453 /* HW RSS configuration */
454 #define RSS_INDIRECTION_TABLE_SIZE 0x80 /* Maximum indirection table. */
455 #define RSS_HASH_KEY_SIZE 0x28 /* Maximum key size. */
456
457 /* RX BD to RX CQE size ratio */
458 #define LM_RX_BD_CQ_SIZE_RATIO (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
459
460 /*******************************************************************************
461 * Macros.
462 ******************************************************************************/
463 #ifndef OFFSETOF
464 #define OFFSETOF(_s, _m) ((u32_t) PTR_SUB(&((_s *) 0)->_m, (u8_t *) 0))
465 #endif
466 #define WORD_ALIGNED_OFFSETOF(_s, _m) (OFFSETOF(_s, _m) & ~0x03)
467
468 /* warning NOT side effect safe dont use this with CEIL_DIV( a++,b) */
469 #define CEIL_DIV( a, b ) ((a / b) + ( (a % b) ? 1 : 0))
470
471 /**
472 * @description
473 * Should be moved to a common place.
474 * Find the next power of 2 that is larger than "num".
475 * @param num - The variable to find a power of 2 that is
476 * larger.
477 * @param num_bits_supported - The largest number of bits
478 * supported
479 *
480 * @return u32_t - The next power of 2 that is larger than
481 * "num".
482 */
483 u32_t upper_align_power_of_2(IN const u16_t num, IN const u8_t num_bits_supported);
484
485
486 /*
487 The attention lines works with the state machine below for parallel computation:
488
489 cols: 0 1 2 3 4 5 6 7
490 _________________________
491 Attn_bits 0 0 1 1 0 0 1 1
492 Attn_ack 0 1 0 1 0 1 0 1
493 State 0 0 0 0 1 1 1 1
494
495 cols: 0,1,6,7 - NOP
496 cols: 3,4 - ASSERT
497 cols: 2 - Assertion procedure
498 cols: 5 - Deassertion procedure
499 */
500 #define GET_ATTN_CHNG_GROUPS(_pdev, _attn_bits, _attn_ack, _asserted_grps_ptr, _deasserted_grps_ptr) \
501 { \
502 u16_t _state = (_pdev)->vars.attn_state; \
503 \
504 DbgBreakIf(~(_attn_bits ^ _attn_ack) & (_attn_bits ^ _state)); \
505 \
506 *(_asserted_grps_ptr) = _attn_bits & ~_attn_ack & ~_state; \
507 *(_deasserted_grps_ptr) = ~_attn_bits & _attn_ack & _state; \
508 }
509
510 /* Finds out whether a specific unicore interrupt has caused the NIG attn to get asserted.
511 * If this is the case, need to adjust the portion of bits of the NIG config status interrupt register
512 * to the value read from the unicore interrupt register.
513 * We use here a "bit overwrite" instead of just a "bit flip" since the value read from the
514 * unicore interrupt register might be spread over more than a single bit!
515 */
516 #define HANDLE_UNICORE_INT_ASSERTED(_pdev, _nig_reg_name, _unicore_intr_val_ptr, _unicore_intr_name, _nig_status_port_ptr, _is_unicore_assrtd_ptr, _unicore_intr_size) \
517 { \
518 *(_unicore_intr_val_ptr) = REG_RD(_pdev, _nig_reg_name); \
519 *(_is_unicore_assrtd_ptr) = ( ( *(_unicore_intr_val_ptr) << _unicore_intr_size) ^ (*(_nig_status_port_ptr) & _unicore_intr_name)); \
520 \
521 if (*(_is_unicore_assrtd_ptr)) \
522 { \
523 DbgMessage(_pdev, WARN, "lm_handle_assertion_processing(): " #_unicore_intr_name " asserted!\n"); \
524 *(_nig_status_port_ptr) = (*(_nig_status_port_ptr) & ~(_unicore_intr_name)) | (*(_unicore_intr_val_ptr) << _unicore_intr_size); \
525 } \
526 }
527 // *(_nig_status_port_ptr) ^= ( 0x1 << _unicore_intr_size);
528
529
530 /*******************************************************************************
531 * Statistics.
532 ******************************************************************************/
533 typedef struct _lm_rx_statistics_t
534 {
535 u32_t aborted;
536 } lm_rx_stats_t;
537
538 /*******************************************************************************
539 * Packet descriptor.
540 ******************************************************************************/
541
542 typedef struct _lm_coalesce_buffer_t
543 {
544 s_list_entry_t link;
545
546 u8_t *mem_virt;
547 u32_t buf_size;
548 lm_frag_list_t frags; /* coalesce buf is a frag list with 1 frag */
549 } lm_coalesce_buffer_t;
550
551 typedef struct _lm_client_con_params_t
552 {
553 u32_t mtu;
554 u32_t lah_size;
555 u32_t num_rx_desc;
556 u32_t num_tx_desc;
557 u8_t attributes;
558 #define LM_CLIENT_ATTRIBUTES_RX (0x1)
559 #define LM_CLIENT_ATTRIBUTES_TPA (0x2)
560 #define LM_CLIENT_ATTRIBUTES_TX (0x4)
561 #define LM_CLIENT_ATTRIBUTES_REG_CLI (0x8)
562 } lm_client_con_params_t;
563
564 typedef struct _lm_packet_t
565 {
566 /* Must be the first entry in this structure. */
567 s_list_entry_t link;
568
569 lm_status_t status;
570 u32_t size;
571
572 union _lm_pkt_info_t
573 {
574 struct _lm_tx_pkt_info_t
575 {
576 lm_coalesce_buffer_t *coalesce_buf;
577 u16_t next_bd_idx;
578
579 u16_t bd_used;
580 u8_t span_pages;
581 u8_t _pad1;
582 u8_t hdr_nbds;
583
584 u16_t reserve;
585
586 // TODO - Do we want this stuff ????
587 #if DBG
588 struct eth_tx_bd *dbg_start_bd;
589 u16_t dbg_start_bd_idx;
590 u16_t dbg_frag_cnt;
591 #endif
592 } tx;
593
594 struct _lm_rx_pkt_info_t
595 {
596 u16_t next_bd_idx;
597 u8_t qidx; // VBD mapping to RSS queue.
598 #define LM_MAX_SGES_FOR_PACKET 1 // TODO_QG rename to LM_MAX_FW_SGES_FOR_PACKET
599 lm_address_t mem_phys[1+LM_MAX_SGES_FOR_PACKET]; // arrays content:
600 // bd ring address[0] + sge addresses[1] (optional)
601 // (currently one)
602 u32_t* hash_val_ptr;
603
604 #if DBG
605 struct eth_rx_sge *dbg_sge;
606 struct eth_rx_bd *dbg_bd;
607 #endif
608 union eth_sgl_or_raw_data sgl_or_raw_data; // currently used by OOO_CID. upper layer should handle endianity!
609 } rx;
610 } u1; // _lm_pkt_info_t
611
612 lm_pkt_tx_info_t* l2pkt_tx_info;
613 lm_pkt_rx_info_t* l2pkt_rx_info;
614
615 } lm_packet_t;
616
617 DECLARE_FRAG_LIST_BUFFER_TYPE(lm_packet_frag_list_t, MAX_FRAG_CNT);
618
619 /*******************************************************************************
620 * Configurable parameters for the hardware dependent module.
621 ******************************************************************************/
622
623 // I only want this enum for LLFC_TRAFFIC_TYPE_MAX value (should be HSI and fixed by FW)
624 typedef enum _driver_traafic_type_t
625 {
626 LLFC_DRIVER_TRAFFIC_TYPE_NW = 0,
627 LLFC_DRIVER_TRAFFIC_TYPE_FCOE,
628 LLFC_DRIVER_TRAFFIC_TYPE_ISCSI,
629 LLFC_DRIVER_TRAFFIC_TYPE_MAX
630 }driver_traafic_type_t;
631 typedef struct _app_params_t
632 {
633 u32_t enabled;
634 u32_t traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
635 }app_params_t;
636 //Cos DCBX params
637 #define DCBX_COS_MAX_NUM_E2E3A0 (ELINK_DCBX_E2E3_MAX_NUM_COS)
638 // This define is different than CLC, because CLC currently supports the Max number of COS
639 #define DCBX_COS_MAX_NUM_E3B0 (min(3,ELINK_DCBX_E3B0_MAX_NUM_COS))
640 #define DCBX_COS_MAX_NUM 3 //(max(DCBX_COS_MAX_NUM_E2,DCBX_COS_MAX_NUM_E3B0))
641
642
643 typedef struct _dcbx_cos_params_t
644 {
645 u32_t bw_tbl;
646 u32_t pri_bitmask;
647 u8_t s_pri;
648 /**
649 * valid values are 0 - 5. 0 is highest strict priority.
650 * There can't be two COS's with the same pri. *
651 */
652 #define DCBX_S_PRI_INVALID (DCBX_COS_MAX_NUM)
653 #define DCBX_S_PRI_COS_HIGHEST (0)
654 #define DCBX_S_PRI_COS_NEXT_LOWER_PRI(_sp) ((_sp) + 1)
655 u8_t pauseable; // This value is obsolete in CHIP_IS_E3B0
656 // (pdev) and is only for debugging CHIP_IS_E2E3(pdev)
657 }dcbx_cos_params_t;
658
659 typedef struct _pg_params_t
660 {
661 u32_t enabled;
662 #define LM_DCBX_ETS_IS_ENABLED(_pdev) ((TRUE == IS_DCB_ENABLED(pdev)) && \
663 (TRUE == ((_pdev)->params.dcbx_port_params.ets.enabled)))
664 u8_t num_of_cos; //valid COS entries
665 dcbx_cos_params_t cos_params[DCBX_COS_MAX_NUM];
666 }pg_params_t;
667
668 typedef struct _pfc_params_t
669 {
670 u32_t enabled;
671 u32_t priority_non_pauseable_mask;
672 #define LM_DCBX_PFC_PRI_NON_PAUSE_MASK(_pdev) (_pdev->params.dcbx_port_params.pfc.priority_non_pauseable_mask)
673 #define LM_DCBX_PFC_PRI_PAUSE_MASK(_pdev) ((u8_t)(~LM_DCBX_PFC_PRI_NON_PAUSE_MASK(_pdev)))
674 #define LM_DCBX_PFC_PRI_MASK (0xFF)
675 #define LM_DCBX_PFC_PRI_GET_PAUSE(_pdev,_pg_pri) (_pg_pri & LM_DCBX_PFC_PRI_PAUSE_MASK(_pdev))
676 #define LM_DCBX_PFC_PRI_GET_NON_PAUSE(_pdev,_pg_pri) (LM_DCBX_PFC_PRI_NON_PAUSE_MASK(_pdev) & _pg_pri)
677 #define LM_DCBX_IS_PFC_PRI_SOME_PAUSE(_pdev,_pg_pri) (0 != LM_DCBX_PFC_PRI_GET_PAUSE(_pdev,_pg_pri))
678 #define LM_DCBX_IS_PFC_PRI_ONLY_PAUSE(_pdev,_pg_pri) (_pg_pri == LM_DCBX_PFC_PRI_GET_PAUSE(_pdev,_pg_pri))
679 #define LM_DCBX_IS_PFC_PRI_ONLY_NON_PAUSE(_pdev,_pg_pri) (_pg_pri == LM_DCBX_PFC_PRI_GET_NON_PAUSE(_pdev,_pg_pri))
680 #define LM_DCBX_IS_PFC_PRI_MIX_PAUSE(_pdev,_pg_pri) (!(LM_DCBX_IS_PFC_PRI_ONLY_NON_PAUSE(_pdev,_pg_pri) || \
681 LM_DCBX_IS_PFC_PRI_ONLY_PAUSE(_pdev,_pg_pri)))
682 }pfc_params_t;
683
684 typedef struct _dcbx_port_params_t
685 {
686 u32_t dcbx_enabled;
687 pfc_params_t pfc;
688 pg_params_t ets;
689 app_params_t app;
690 }dcbx_port_params_t;
691
692
693 typedef enum
694 {
695 DCBX_READ_LOCAL_MIB,
696 DCBX_READ_REMOTE_MIB
697 }dcbx_read_mib_type;
698
699 typedef enum
700 {
701 DCBX_UPDATE_TASK_STATE_FREE,
702 DCBX_UPDATE_TASK_STATE_SCHEDULE,
703 DCBX_UPDATE_TASK_STATE_HANDLED
704 } dcbx_update_task_state;
705
706 typedef enum
707 {
708 LM_SINGLE_SM = 0, /* default */
709 LM_DOUBLE_SM_SINGLE_IGU = 1,
710 LM_DOUBLE_SM_DOUBLE_IGU = 2
711 } fw_ndsb_type;
712
713 typedef enum
714 {
715 LM_COS_MODE_COS3 = 0,
716 LM_COS_MODE_COS6 = 1
717 } lm_cos_modes ;
718
719 typedef enum
720 {
721 LM_COS_MODE_OVERRIDE = 0,
722 LM_COS_MODE_STATIC = 1
723 } lm_network_cos_modes ;
724
725
726 typedef enum
727 {
728 LM_AUTOGREEEN_DISABLED = 0,
729 LM_AUTOGREEEN_ENABLED = 1,
730 LM_AUTOGREEEN_NVRAM = 2
731 } lm_autogreeen_t ;
732
733 /*** This i2c section should be in common .h file with EMC... ***/
734
735 #define I2C_BINARY_SIZE 256
736 #define I2C_A2_DYNAMIC_OFFSET 0
737 #define I2C_A2_DYNAMIC_SIZE 128
738
739 #define I2C_A2_STATIC_OFFSET 128
740 #define I2C_A2_STATIC_SIZE 128
741
742 typedef enum
743 {
744 I2C_SECTION_A0 = 0,
745 I2C_SECTION_A2 = 1,
746 I2C_SECTION_MAX = 2
747 } i2c_section_t;
748
749 typedef struct _i2c_binary_info_t
750 {
751 u32_t last_query_status[I2C_SECTION_MAX];
752 u64_t last_query_ts;
753 u32_t reserved[10];
754 u8_t ax_data[I2C_SECTION_MAX][I2C_BINARY_SIZE];
755 } i2c_binary_info_t;
756
757 /*** This i2c section should be in common .h file with EMC... ***/
758
759 typedef struct _lm_params_t
760 {
761 /* This value is used by the upper module to inform the protocol
762 * of the maximum transmit/receive packet size. Packet size
763 * ranges from 1500-9600 bytes. This value does not include ETH_PACKET_LEN, LLC-SNAP, VLAN tag, CRC32
764 */
765 u32_t mtu[LM_CLI_IDX_MAX];
766 #define LM_MTU_INVALID_VALUE (0xFFFFFFFF)
767 u32_t mtu_max;
768
769 #define MAX_CLI_PACKET_SIZE(pdev, chain_idx) ((u16_t)(pdev)->params.l2_cli_con_params[(chain_idx)].mtu + (pdev)->params.rcv_buffer_offset + ETHERNET_PACKET_HEADER_SIZE+ ETHERNET_VLAN_TAG_SIZE + ETHERNET_LLC_SNAP_SIZE + CACHE_LINE_SIZE)
770 #define CLI_MTU_WITH_ETH_HDR_SIZE(pdev, chain_idx) ((u16_t)(pdev)->params.l2_cli_con_params[(chain_idx)].mtu + ETHERNET_PACKET_HEADER_SIZE)
771 #define MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx) ((MAX_CLI_PACKET_SIZE(pdev, chain_idx) + CACHE_LINE_SIZE_MASK) & \
772 ~CACHE_LINE_SIZE_MASK)
773
774 #define LM_MTU_NDIS_DEFAULT (1500)
775 #define LM_MTU_ISCSI_DEFAULT (1500)
776 #define LM_MTU_FCOE_DEFAULT (2500)
777 #define LM_MTU_FWD_DEFAULT (LM_MTU_NDIS_DEFAULT)
778
779 #define LM_MTU_FLOW_CTRL_TX_THR (5000)
780 #define LM_MTU_MAX_DEFAULT (1500)
781 #define LM_MTU_MAX (9600)
782 /* Current node address. The MAC address is initially set to the
783 * hardware address. This entry can be modified to allow the driver
784 * to override the default MAC address. The new MAC address takes
785 * effect after a driver reset. */
786 u8_t mac_addr[8];
787
788 /* parameters for tx/rx chians.
789 1 for all rss chains, and 1 more for each non-rss chain */
790 u32_t l2_rx_desc_cnt[1+MAX_NON_RSS_CHAINS];
791 u32_t l2_tx_bd_page_cnt[1+MAX_NON_RSS_CHAINS];
792 u32_t l2_tx_coal_buf_cnt[1+MAX_NON_RSS_CHAINS];
793 lm_client_con_params_t l2_cli_con_params[3*MAX_HW_CHAINS + MAX_NON_RSS_CHAINS];
794
795 /* All the L2 receive buffers start at a cache line size aligned
796 * address. This value determines the location of the L2 frame header
797 * from the beginning of the receive buffer. */
798 u32_t rcv_buffer_offset;
799
800 /* network type for defintion of max cwnd */
801 u32_t network_type;
802 #define LM_NETOWRK_TYPE_LAN 0
803 #define LM_NETOWRK_TYPE_WAN 1
804 #define LM_NETOWRK_TYPE_AUTO 2 /* Linux only */
805 u32_t max_cwnd_wan;
806 u32_t max_cwnd_lan;
807
808 u32_t cid_allocation_mode;
809 #define LM_CID_ALLOC_REGULAR 1
810 #define LM_CID_ALLOC_DELAY 2 /* delay cid allocation when there are no free cids but there are
811 * cids pending allocation */
812 #define LM_CID_ALLOC_NUM_MODES 2
813
814
815 u32_t ndsb_type;
816
817 u32_t int_coalesing_mode;
818 #define LM_INT_COAL_NONE 0
819 #define LM_INT_COAL_PERIODIC_SYNC 1 /* default */
820 #define LM_INT_COAL_NUM_MODES 2
821 u32_t int_per_sec_rx_override;
822 u32_t int_per_sec_rx[HC_USTORM_SB_NUM_INDICES];
823 u32_t int_per_sec_tx_override;
824 u32_t int_per_sec_tx[HC_CSTORM_SB_NUM_INDICES];
825
826 /* VF interrupt moderation (Low, Medium, High) parameters */
827 u32_t vf_int_per_sec_rx[3];
828 u32_t vf_int_per_sec_tx[3];
829 #define LM_VF_INT_LOW_IDX 0
830 #define LM_VF_INT_MEDIUM_IDX 1
831 #define LM_VF_INT_HIGH_IDX 2
832 /* all protocols dynamic coalescing params */
833 u32_t enable_dynamic_hc[HC_DHC_SB_NUM_INDICES];
834 u32_t hc_timeout0[2][HC_DHC_SB_NUM_INDICES];
835 u32_t hc_timeout1[2][HC_DHC_SB_NUM_INDICES];
836 u32_t hc_timeout2[2][HC_DHC_SB_NUM_INDICES];
837 u32_t hc_timeout3[2][HC_DHC_SB_NUM_INDICES];
838 u32_t hc_threshold0[2];
839 u32_t hc_threshold1[2];
840 u32_t hc_threshold2[2];
841 u32_t l2_dynamic_hc_min_bytes_per_packet;
842 u32_t l4_hc_scaling_factor;
843
844 u32_t l4_hc_ustorm_thresh;
845 u32_t l4_scq_page_cnt;
846 u32_t l4_rcq_page_cnt;
847 u32_t l4_grq_page_cnt;
848 u32_t l4_preallocate_cnt;
849 u32_t l4_preallocate_blk_size;
850 u32_t l4_preallocate_retry_cnt;
851
852 #if defined(_VBD_) || defined(_VBD_CMD_)
853 #define NUM_BUFS_FOR_GRQS(pdev) \
854 (pdev)->params.l4_grq_page_cnt*512*(LM_TOE_RSS_CHAIN_CNT(pdev))
855 #else
856 #define NUM_BUFS_FOR_GRQS(pdev) \
857 (pdev)->params.l4_grq_page_cnt*512*1
858 #endif
859 // #define NUM_BUFS_FOR_GRQS(pdev)
860 // (pdev)->params.l4_grq_page_cnt*512*(LM_TOE_RSS_CHAIN_CNT(pdev))
861
862 u32_t l4_tx_chain_page_cnt;
863 u32_t l4_rx_chain_page_cnt;
864 u32_t l4_gen_buf_size; /* minimum size of generic buffer */
865 u32_t l4_history_cqe_cnt; /* how much history to save */
866
867 /* DCA Related params */
868 u32_t l4_ignore_grq_push_enabled; /* Configuration passed to fw whether to ignore push on grq or not */
869
870 u32_t l4cli_flags; /* such as LLC_SNAP*/
871 u32_t l4cli_ticks_per_second;
872 u32_t l4cli_ack_frequency;
873 u32_t l4cli_delayed_ack_ticks;
874 u32_t l4cli_max_retx;
875 u32_t l4cli_doubt_reachability_retx;
876 u32_t l4cli_sws_prevention_ticks;
877 u32_t l4cli_dup_ack_threshold;
878 u32_t l4cli_push_ticks;
879 u32_t l4cli_nce_stale_ticks;
880 u32_t l4cli_starting_ip_id;
881
882 /* Various test/debug modes. Any validation failure will cause the
883 * driver to write to misc.swap_diag0 with the corresponding flag.
884 * The intention is to trigger the bus analyzer. */
885 // TODO - adjust to our needs
886 u32_t test_mode;
887 #define TEST_MODE_DISABLED 0x00
888 #define TEST_MODE_OBSOLETE_0 0x01 /* was TEST_MODE_IKOS */
889 #define TEST_MODE_OBSOLETE_1 0x02 /* was TEST_MODE_FPGA */
890 #define TEST_MODE_VERIFY_RX_CRC 0x10
891 #define TEST_MODE_RX_BD_TAGGING 0x20
892 #define TEST_MODE_TX_BD_TAGGING 0x40
893 #define TEST_MODE_LOG_REG_ACCESS 0x80
894 #define TEST_MODE_SAVE_DUMMY_DMA_DATA 0x0100
895 #define TEST_MODE_INIT_GEN_BUF_DATA 0x0200
896 #define TEST_MODE_DRIVER_PULSE_ALWAYS_ALIVE 0x0400
897 #define TEST_MODE_IGNORE_SHMEM_SIGNATURE 0x0800
898 #define TEST_MODE_NO_MCP 0x1000
899
900 lm_offload_t ofld_cap;
901 lm_offload_t ofld_cap_to_ndis;
902
903 lm_wake_up_mode_t wol_cap;
904
905 lm_flow_control_t flow_ctrl_cap;
906 lm_eee_policy_t eee_policy;
907 lm_medium_t req_medium;
908
909 u32_t selective_autoneg;
910 #define SELECTIVE_AUTONEG_OFF 0
911 #define SELECTIVE_AUTONEG_SINGLE_SPEED 1
912 #define SELECTIVE_AUTONEG_ENABLE_SLOWER_SPEEDS 2
913
914 u32_t wire_speed; /* Not valid on SERDES. */
915
916 /* Ways for the MAC to determine a link change. */
917 u32_t phy_int_mode;
918 #define PHY_INT_MODE_AUTO 0
919 #define PHY_INT_MODE_MI_INTERRUPT 1
920 #define PHY_INT_MODE_LINK_READY 2
921 #define PHY_INT_MODE_AUTO_POLLING 3
922
923 /* Ways for the driver to get the link change event. */
924 u32_t link_chng_mode;
925 #define LINK_CHNG_MODE_AUTO 0
926 #define LINK_CHNG_MODE_USE_STATUS_REG 1
927 #define LINK_CHNG_MODE_USE_STATUS_BLOCK 2
928
929 /* Ways for the driver to determine which phy to prefer in case of dual media. */
930 u32_t phy_priority_mode;
931 #define PHY_PRIORITY_MODE_HW_DEF 0
932 #define PHY_PRIORITY_MODE_10GBASET 1
933 #define PHY_PRIORITY_MODE_SERDES 2
934 #define PHY_PRIORITY_MODE_HW_PIN 3
935
936 u32_t interrupt_mode; /* initialized by um to state whether we are using MSI-X or not, determined after we receive resources from OS */
937 #define LM_INT_MODE_INTA 0
938 #define LM_INT_MODE_SIMD 1 /* Single ISR / Multiple DPC */
939 #define LM_INT_MODE_MIMD 2 /* Multiple ISR / Multple DPC */
940
941 /* Relevant only for E2, and defines how the igu will be worked with (via GRC / BAR). Default will be set to BAR,
942 * the defines for this are INTR_BLK_ACCESS_GRC, INTR_BLK_ACCESS_IGUMEM */
943 u32_t igu_access_mode;
944
945 u32_t sw_config;
946 #define LM_SWCFG_1G 0
947 #define LM_SWCFG_10G 1
948 #define LM_SWCFG_AD 2
949 #define LM_SWCFG_OT_AD 3
950 #define LM_SWCFG_HW_DEF 4
951
952 u8_t mf_mode; //use enum mf_mode
953 u8_t sd_mode;
954 u8_t pad[2];
955
956 #define IS_MF_AFEX(_pdev) IS_MF_AFEX_MODE(_pdev)
957 #define IS_MF_AFEX_MODE(_pdev) (IS_MULTI_VNIC(_pdev) && ((_pdev)->params.mf_mode == MULTI_FUNCTION_AFEX))
958 #define IS_MF_SI_MODE(_pdev) (IS_MULTI_VNIC(_pdev) && ((_pdev)->params.mf_mode == MULTI_FUNCTION_SI))
959 #define IS_MF_SD_MODE(_pdev) (IS_MULTI_VNIC(_pdev) && ((_pdev)->params.mf_mode == MULTI_FUNCTION_SD))
960 #define IS_SD_REGULAR_MODE(_pdev) (IS_MF_SD_MODE(_pdev) && ((_pdev)->params.sd_mode == SD_REGULAR_MODE))
961 #define IS_SD_UFP_MODE(_pdev) (IS_MF_SD_MODE(_pdev) && ((_pdev)->params.sd_mode == SD_UFP_MODE))
962 #define IS_SD_BD_MODE(_pdev) (IS_MF_SD_MODE(_pdev) && ((_pdev)->params.sd_mode == SD_BD_MODE))
963
964 lm_autogreeen_t autogreeen; // autogrEEEn support
965
966 u32_t tmr_reload_value1;
967
968 u32_t max_func_connections; // Number of connection supported by this function.
969 /* TODO: init max_toe/max_rdma from somewhere else should come from licence info */
970 u32_t max_supported_toe_cons;
971 u32_t max_func_toe_cons; // Number of TOE connections supported
972 u32_t max_func_rdma_cons; // Number of RDMA connections supported
973 u32_t max_func_iscsi_cons; // Number of iSCSI connections supported
974 u32_t max_func_fcoe_cons; // Number of FCoE connections supported
975 u32_t max_fcoe_task; // Number of FCoE max_fcoe_exchanges
976 u32_t max_eth_including_vfs_conns;
977 u32_t context_line_size; //Size of the context as configured in the CDU.
978 u32_t context_waste_size; // Waste size as configured in the CDU.
979 u32_t num_context_in_page;
980 u32_t client_page_size; // Client memory page size.
981 u32_t elt_page_size; // ELT page size.
982 u32_t ilt_client_page_size; // ILT clients page size. We will give all client same page size. All ports as well.
983 u32_t cfc_last_lcid; // number of supported connections in CFC - 1
984 u32_t bandwidth_min; //The last value of min CMNG bandwidth configured by BACS
985 u32_t bandwidth_max; //The last value of max CMNG bandwidth configured by BACS
986
987 /* vnic parameters */
988 /* Relative Function Number */
989 u8_t pfunc_rel;
990 #define PORT_ID_PARAM_FUNC_REL(_pfunc_rel) ((_pfunc_rel) & 1) //0 or 1
991 #define PORT_ID_PARAM_FUNC_ABS(_chip_num, _port_mode, _pfunc_abs) (lm_get_port_id_from_func_abs(_chip_num, _port_mode, _pfunc_abs)) //0 or 1
992 #define PORT_ID(pdev) (PORT_ID_PARAM_FUNC_REL(PFDEV(pdev)->params.pfunc_rel)) //0 or 1
993 #define FUNC_ID(pdev) (PFDEV(pdev)->params.pfunc_rel) //0-7
994 #define VNIC_ID_PARAM_FUNC_REL(_pfunc_rel) ((_pfunc_rel) >> 1) //0, 1, 2 or 3
995 #define VNIC_ID(pdev) (VNIC_ID_PARAM_FUNC_REL(PFDEV(pdev)->params.pfunc_rel)) //0, 1, 2 or 3
996 #define LM_FOREACH_FUNC_IN_PORT(pdev, func) \
997 for ((func) = PORT_ID(pdev); (func) < E1H_FUNC_MAX; (func)+=2)
998
999 #define LM_PFS_PER_PORT(pdev) \
1000 ((LM_CHIP_PORT_MODE_4 == CHIP_PORT_MODE(pdev)) ? 2 : 4 )
1001
1002 #define LM_FIRST_ABS_FUNC_IN_PORT(pdev) \
1003 ((LM_CHIP_PORT_MODE_NONE == CHIP_PORT_MODE(pdev))? PORT_ID(pdev) : (PATH_ID(pdev)+2*PORT_ID(pdev)))
1004
1005 #define LM_FOREACH_ABS_FUNC_IN_PORT(pdev, func) \
1006 for ( (func) = LM_FIRST_ABS_FUNC_IN_PORT(pdev) ; (func) < MAX_FUNC_NUM; (func) += (MAX_FUNC_NUM/LM_PFS_PER_PORT(pdev)) )
1007
1008
1009 #define FUNC_MAILBOX_ID_PARAM(_port,_vnic,_chip_num, _port_mode) ((_port) + (_vnic) * ((CHIP_IS_E1x_PARAM(_chip_num) || (_port_mode == LM_CHIP_PORT_MODE_4))? 2 : 1))
1010 #define FUNC_MAILBOX_ID(pdev) (FUNC_MAILBOX_ID_PARAM(PORT_ID(pdev) ,VNIC_ID(pdev),CHIP_NUM(pdev), CHIP_PORT_MODE(pdev)))
1011 /* Absolute Function Number */
1012 u8_t pfunc_abs;
1013 #define ABS_FUNC_ID(pdev) (PFDEV(pdev)->params.pfunc_abs)
1014 #define LM_FOREACH_FUNC_MAILBOX_IN_PORT(pdev, func) \
1015 for ((func) = PORT_ID(pdev); (func) < (CHIP_IS_E1x(pdev) ? E1H_FUNC_MAX : E2_FUNC_MAX); (func)+= (CHIP_IS_E1x(pdev) ? 2 : 1))
1016 u8_t path_id;
1017 #define PATH_ID(pdev) (PFDEV(pdev)->params.path_id)
1018
1019 #define SHMEM_BASE(pdev) (pdev->hw_info.shmem_base)
1020
1021 u8_t vnics_per_port; //1, 2 or 4
1022 u8_t multi_vnics_mode; //flag for multi function mode (can be set even if vnics_per_port==1)
1023 u8_t path_has_ovlan; // The multi function mode in the path (can be different than the mutli-function-mode of the function (4-port MF / SF mode E3 only)
1024 u8_t pfunc_mb_id; // this is for shmem mail box id and currently doesn't support flows which are not mcp send/recv command
1025 u8_t _pad;
1026
1027 #define IS_MULTI_VNIC(pdev) (PFDEV(pdev)->params.multi_vnics_mode)
1028 #define VNICS_PER_PORT(pdev) (PFDEV(pdev)->params.vnics_per_port)
1029 #define VNICS_PER_PATH(pdev) (PFDEV(pdev)->params.vnics_per_port * ((LM_CHIP_PORT_MODE_4 == CHIP_PORT_MODE(pdev))? 2 : 1 ))
1030
1031 u16_t ovlan; //vnic outer vlan
1032 u16_t sd_vlan_eth_type;
1033
1034 /** 32-bit aligned **/
1035 // min max bw
1036 u8_t min_bw[MAX_VNIC_NUM];
1037 u8_t max_bw[MAX_VNIC_NUM];
1038
1039 /* 32 bit aligned. */
1040
1041 /* Status-Block-Related. Status blocks */
1042 u8_t sb_cnt; //number of vnic's non-default status blocks (16, 8 or 4)
1043 #define LM_SB_CNT(pdev) ((pdev)->params.sb_cnt)
1044 #ifdef _VBD_
1045 #define LM_NON_RSS_SB(pdev) (LM_SB_CNT(pdev) - 1)
1046 #else
1047 #define LM_NON_RSS_SB(pdev) (LM_MAX_RSS_CHAINS(pdev) - 1)
1048 #endif
1049 #define LM_NON_RSS_CHAIN(pdev) (LM_MAX_RSS_CHAINS(pdev) - 1)
1050 #define LM_OOO_SB(pdev) (LM_NON_RSS_SB(pdev))
1051
1052 #define LM_SB_ID_VALID(pdev, sb_id) ((sb_id) < LM_SB_CNT(pdev))
1053 #define LM_FOREACH_SB_ID(pdev, sb_id) \
1054 for ((sb_id) = 0; (sb_id) < LM_SB_CNT(pdev); (sb_id)++)
1055 /*
1056 #define LM_REST_OF_SB_ID(pdev, sb_id) \
1057 for ((sb_id) = LM_SB_CNT(pdev); (sb_id) < MAX_RSS_CHAINS / pdev->params.vnics_per_port; (sb_id)++)
1058 */
1059 u8_t max_pf_sb_cnt;
1060 u8_t fw_sb_cnt;
1061
1062 u8_t fw_base_qzone_cnt;
1063 u8_t fw_qzone_id[PXP_REG_HST_ZONE_PERMISSION_TABLE_SIZE]; /* Which qzone-id in the qzone-table is used for updating producers + dhc counters
1064 * relevant from E2. For qzone_id from base area offset in permission table is guaranted */
1065 u8_t fw_aux_qzone_cnt;
1066 u8_t aux_fw_qzone_id; /* Which qzone-id in the qzone-table is used for updating producers + dhc counters
1067 * relevant from E2*/
1068
1069 u8_t max_pf_fw_client_cnt;
1070 u8_t fw_client_cnt;
1071 u8_t base_fw_client_id;
1072 u8_t base_fw_ndsb;
1073
1074 u8_t base_fw_stats_id; /* Where to collect statistics to */
1075
1076 u8_t base_cam_offset; /* Relevant only for VFs (FIXME: revisit... ) */
1077
1078 u8_t vf_num_in_pf;
1079 u8_t vf_num_in_path;
1080 u8_t _cnt_pad[2];
1081 #define REL_VFID(_pdev) ((_pdev)->params.vf_num_in_pf)
1082 #define ABS_VFID(_pdev) ((_pdev)->params.vf_num_in_path)
1083 #define FW_VFID(_pdev) (8 + ABS_VFID((_pdev)))
1084 /* 32 bit aligned. */
1085 u32_t debug_me_register;
1086
1087 /* cam/mac parameters (see lm_init_cam_params) */
1088 u16_t base_offset_in_cam_table;
1089 #define BASE_OFFSET_IN_CAM_TABLE(_pdev) (_pdev)->params.base_offset_in_cam_table
1090
1091 u16_t cam_size;
1092 #define LM_CAM_SIZE(pdev) ((pdev)->params.cam_size)
1093
1094 u16_t mc_table_size[LM_CLI_IDX_MAX];
1095 #define LM_MC_TABLE_SIZE(pdev,lm_client_idx) ((pdev)->params.mc_table_size[lm_client_idx])
1096
1097 u16_t uc_table_size[LM_CLI_IDX_MAX];
1098 #define LM_UC_TABLE_SIZE(pdev,lm_client_idx) ((pdev)->params.uc_table_size[lm_client_idx])
1099
1100 #define LM_MC_NDIS_TABLE_SIZE (64)
1101 #define LM_MC_FCOE_TABLE_SIZE (2)
1102
1103 #define LM_MAX_MC_TABLE_SIZE (LM_MC_NDIS_TABLE_SIZE + LM_MC_FCOE_TABLE_SIZE)
1104 #define LM_KEEP_CURRENT_CAM_VALUE (0xFFFF)
1105 #define LM_INVALID_CAM_BASE_IDX (0xFF)
1106
1107 u8_t rss_caps; /* rss hash calculation types supported */
1108 #define LM_RSS_CAP_IPV4 1
1109 #define LM_RSS_CAP_IPV6 2
1110
1111 u8_t rss_chain_cnt; /* number of rss chains. lm wise, if rss_chain_cnt==1 then rss is disabled */
1112 u8_t tss_chain_cnt; /* number of tss chains. should be identical to rss_chain_cnt. */
1113
1114 /* TODO FIX MAX RSS Chains with new HC SB management*/
1115 u8_t max_rss_chains;
1116 #define LM_MAX_RSS_CHAINS(pdev) (pdev)->params.max_rss_chains
1117
1118 /** 32-bit aligned * */
1119 /* for registry */
1120 u32_t override_rss_chain_cnt; /* value for overriding configured rss_chain_cnt */
1121
1122 #define RSS_ID_TO_SB_ID(_rss_id) (_rss_id) /* Mapping between rss-id to sb-id */
1123 #define RSS_ID_TO_CID(_rss_id) (_rss_id) /* Mapping between rss-id to cid */
1124 #define TSS_ID_TO_CID(_tss_id) (_tss_id) /* Mapping between rss-id to cid */
1125 #define CHAIN_TO_RSS_ID(_pdev, _chain) (lm_mp_get_reg_chain_from_chain(_pdev, _chain)) /* Mapping between rss-id to cid */
1126
1127 #define LM_CLI_RX_FILTER_MASK(pdev, cid) (1 << LM_FW_CLI_ID(pdev, cid))
1128
1129 #define LM_RX_FILTER_ALL_MASK(pdev, ret_val) \
1130 { \
1131 ret_val |= LM_CLI_RX_FILTER_MASK((pdev), NDIS_CID(pdev)); \
1132 ret_val |= LM_CLI_RX_FILTER_MASK((pdev), ISCSI_CID(pdev));\
1133 ret_val |= LM_CLI_RX_FILTER_MASK((pdev), RDMA_CID(pdev)); \
1134 ret_val |= LM_CLI_RX_FILTER_MASK((pdev), FCOE_CID(pdev)); \
1135 }
1136
1137 #define LM_SW_LEADING_SB_ID 0
1138 #define LM_SW_LEADING_RSS_CID(pdev) 0
1139
1140 #define LM_INVALID_ETH_CID (0xFF)
1141
1142 u8_t map_client_to_cid[LM_CLI_IDX_MAX];
1143 #define NDIS_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_NDIS]
1144 #define ISCSI_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_ISCSI]
1145 #define FCOE_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_FCOE]
1146 #define RDMA_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_RDMA]
1147 #define FWD_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_FWD]
1148 #define OOO_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_OOO]
1149
1150 #define LM_CLI_CID(_pdev, lm_cli_idx) ((_pdev)->params.map_client_to_cid[lm_cli_idx])
1151
1152 #define LM_CHAIN_IDX_CLI(pdev, cid) ((lm_chain_type_not_cos != lm_mp_get_chain_type(pdev, cid)) ? LM_CLI_IDX_NDIS : \
1153 ((cid == ISCSI_CID(pdev) ? LM_CLI_IDX_ISCSI : \
1154 ((cid == FCOE_CID(pdev) ? LM_CLI_IDX_FCOE : \
1155 ((cid == FWD_CID(pdev) ? LM_CLI_IDX_FWD : \
1156 ((cid == OOO_CID(pdev) ? LM_CLI_IDX_OOO : \
1157 (((cid >= (pdev)->params.max_pf_fw_client_cnt) && (cid < (pdev)->params.fw_client_cnt)) ? LM_CLI_IDX_NDIS : \
1158 LM_CLI_IDX_MAX))))))))))
1159
1160
1161 #define LM_CHAIN_IDX_TRAFFIC_TYPE(pdev, cid) ((lm_chain_type_not_cos != lm_mp_get_chain_type(pdev, cid)) ? LLFC_TRAFFIC_TYPE_NW : \
1162 ((cid == ISCSI_CID(pdev) ? LLFC_TRAFFIC_TYPE_ISCSI : \
1163 ((cid == FCOE_CID(pdev) ? LLFC_TRAFFIC_TYPE_FCOE : \
1164 ((cid == FWD_CID(pdev) ? LLFC_TRAFFIC_TYPE_NW : \
1165 ((cid == OOO_CID(pdev) ? LLFC_TRAFFIC_TYPE_ISCSI : \
1166 (((cid >= (pdev)->params.max_pf_fw_client_cnt) && (cid < (pdev)->params.fw_client_cnt)) ? LLFC_TRAFFIC_TYPE_NW : \
1167 MAX_TRAFFIC_TYPE))))))))))
1168
1169 #define LM_FW_CLI_ID(pdev, cid) (pdev->params.base_fw_client_id + cid)
1170
1171 /* A bit about E2 Qzone-IDs: qzone is a new area in internal memory where the FW stores producers + dynamic-host-coalesing (dhc) values.
1172 * It is a separate area than areas the have arrays for clients / status-blocks. Technically, the driver can decide to have separate entries
1173 * for producers + dhc entries (it has to do with permissions in PXP for VFs..., for now there is no reason to do this. And we'll use the same
1174 * id, but note that QZONE_ID is intended for fp ring producers. DHC_QZONE_ID is intended for status-block, and thus the parameter they receive.
1175 */
1176 #define LM_FW_QZONE_ID(pdev, cid) (pdev->params.fw_qzone_id[cid])
1177 #define LM_FW_AUX_QZONE_ID(pdev, rel_non_rss_cid) (pdev->params.aux_fw_qzone_id + rel_non_rss_cid)
1178 #define LM_FW_DHC_QZONE_ID(pdev, sb_id) (pdev->params.fw_qzone_id[sb_id])
1179 #define LM_FW_SB_ID(pdev, sb_id) ((sb_id == DEF_STATUS_BLOCK_INDEX)? DEF_STATUS_BLOCK_INDEX: pdev->params.base_fw_ndsb + sb_id)
1180 #define LM_FW_STATS_ID(pdev,cid) (pdev->params.base_fw_stats_id + cid)
1181 #define LM_CLIENT_BIT_VECTOR(pdev, lm_cli_idx) (1 << (LM_FW_CLI_ID(pdev, LM_CLI_CID(pdev, lm_cli_idx))))
1182 #define LM_CID_BIT_VECTOR(pdev, cid) (1 << (LM_FW_CLI_ID(pdev, cid)))
1183
1184
1185 /* 'for loop' macros on rss/tss chains */
1186 #define LM_FOREACH_RSS_IDX(pdev, rss_idx) \
1187 for ((rss_idx) = 0; (rss_idx) < (pdev)->params.rss_chain_cnt; (rss_idx)++)
1188 #define LM_FOREACH_TSS_IDX(pdev, tss_idx) \
1189 for ((tss_idx) = 0; (tss_idx) < (pdev)->params.tss_chain_cnt; (tss_idx)++)
1190 #define LM_FOREACH_RSS_IDX_SKIP_LEADING(pdev, rss_idx) \
1191 for ((rss_idx) = 1; (u8_t)(rss_idx) < (pdev)->params.rss_chain_cnt; (rss_idx)++)
1192 #define LM_FOREACH_TSS_IDX_SKIP_LEADING(pdev, tss_idx) \
1193 for ((tss_idx) = 1; (u8_t)(tss_idx) < (pdev)->params.tss_chain_cnt; (tss_idx)++)
1194
1195
1196 /* L4 RSS */
1197 u8_t l4_rss_chain_cnt; /* number of L4 rss chains. lm wise, if rss_chain_cnt==1 then rss is disabled */
1198 u8_t l4_tss_chain_cnt; /* number of L4 tss chains. */
1199 u8_t l4_rss_base_chain_idx; /* L4 rss base chain Where do the L4 status block start */
1200 u8_t l4_base_fw_rss_id; /* L4 rss base chain Where do the L4 status block start */
1201
1202 #define LM_TOE_BASE_RSS_ID(pdev) ((pdev)->params.l4_rss_base_chain_idx) /* that is first L4 SB */
1203 #define LM_TOE_FW_RSS_ID(pdev, rss_id) ((pdev)->params.l4_base_fw_rss_id + (IS_MULTI_VNIC(pdev) ? (CHIP_IS_E1x(pdev) ? rss_id : 0) : rss_id)) /* that is first L4 SB */
1204 #define LM_TOE_RSS_CHAIN_CNT(pdev) ((pdev)->params.l4_rss_chain_cnt)
1205 #define LM_TOE_TSS_CHAIN_CNT(pdev) ((pdev)->params.l4_tss_chain_cnt)
1206
1207
1208 /* 'for loop' macros on L4 rss/tss chains */
1209 #define LM_TOE_FOREACH_RSS_IDX(pdev, rss_idx) \
1210 for ((rss_idx) = (pdev)->params.l4_rss_base_chain_idx; (rss_idx) < (pdev)->params.l4_rss_base_chain_idx + (pdev)->params.l4_rss_chain_cnt; (rss_idx)++)
1211 #define LM_TOE_FOREACH_TSS_IDX(pdev, tss_idx) \
1212 for ((tss_idx) = (pdev)->params.l4_rss_base_chain_idx; (tss_idx) < (pdev)->params.l4_rss_base_chain_idx + (pdev)->params.l4_tss_chain_cnt; (tss_idx)++)
1213
1214 /* for multi function mode, when 'rss_base_chain_idx' != 0 */
1215 // In new VBD dsign chain doesn't equal client and
1216 // we must add client offset
1217 //((pdev)->params.base_fw_client_id + (val))
1218 #define LM_CHAIN_TO_FW_CLIENT(_pdev, _chain) ((_pdev)->params.base_fw_client_id + (_chain))
1219
1220 // eth configuration.
1221 u32_t keep_vlan_tag;
1222
1223 u16_t eth_align_enable;
1224
1225 // TODO: encapsulate in a connection object
1226 u32_t update_comp_cnt;
1227 u32_t update_suspend_cnt;
1228 u32_t update_toe_comp_cnt;
1229
1230 lm_address_t dmae_copy_scratchpad_phys;
1231
1232 // congestion managment parameters
1233 u32_t cmng_enable;
1234 u32_t cmng_rate_shaping_enable;
1235 u32_t cmng_fairness_enable;
1236 // safc
1237 u32_t cmng_safc_rate_thresh;
1238 u32_t cmng_activate_safc;
1239 // fairness
1240 u32_t cmng_fair_port0_rate;
1241 u32_t cmng_eth_weight;
1242 u32_t cmng_toe_weight;
1243 u32_t cmng_rdma_weight;
1244 u32_t cmng_iscsi_weight;
1245 // rate shaping
1246 u32_t cmng_eth_rate;
1247 u32_t cmng_toe_rate;
1248 u32_t cmng_rdma_rate;
1249 u32_t cmng_iscsi_rate;
1250 // Demo will be removed later
1251 u32_t cmng_toe_con_number;
1252 u32_t cmng_rdma_con_number;
1253 u32_t cmng_iscsi_con_number;
1254 // iscsi
1255 u32_t l5sc_max_pending_tasks;
1256
1257 // cls_params
1258 struct elink_params link;
1259
1260 // fw flow control
1261 u32_t l2_fw_flow_ctrl;
1262 u32_t l4_fw_flow_ctrl;
1263
1264 // preemphasis rx/tx configuration
1265 u32_t preemphasis_enable;
1266
1267 u32_t preemphasis_rx_0;
1268 u32_t preemphasis_rx_1;
1269 u32_t preemphasis_rx_2;
1270 u32_t preemphasis_rx_3;
1271
1272 u32_t preemphasis_tx_0;
1273 u32_t preemphasis_tx_1;
1274 u32_t preemphasis_tx_2;
1275 u32_t preemphasis_tx_3;
1276 u32_t l4_rss_enabled_by_os;
1277 u32_t disable_patent_using;
1278 u32_t l4_grq_filling_threshold_divider;
1279 u32_t l4_free_cid_delay_time;
1280 u32_t l4_enable_rss;
1281 u32_t l4_rss_is_possible;
1282 #define L4_RSS_DISABLED 0 /* shmulikr: l4_enable_rss is more then a flag. The various values represent the possible flavors */
1283 #define L4_RSS_DYNAMIC 1 /* Full support including support for indirection table update */
1284 u32_t l4_max_rcv_wnd_size;
1285 /* disable PCIe non-FATAL error reporting */
1286 u32_t disable_pcie_nfr;
1287
1288 u32_t mf_proto_support_flags; /* For multi-function: which protocols are supported */
1289 #define LM_PROTO_SUPPORT_ETHERNET 0x1
1290 #define LM_PROTO_SUPPORT_ISCSI 0x2
1291 #define LM_PROTO_SUPPORT_FCOE 0x4
1292
1293 /* In release this flag will prevent us from crashing in customer site */
1294 u32_t debug_cap_flags;
1295 #if DBG
1296 #define DEFAULT_DEBUG_CAP_FLAGS_VAL 0xffffffff
1297 #else
1298 #define DEFAULT_DEBUG_CAP_FLAGS_VAL 0x0
1299 #endif
1300
1301 #define DEBUG_CAP_FLAGS_STATS_FW 0x1
1302 //#define DEBUG_CAP_FLAGS_XXX 0x2
1303
1304 u32_t l4_limit_isles;
1305 #define L4_LI_NOTIFY 0x0001
1306 #define L4_LI_MAX_GEN_BUFS_IN_ISLE 0x0002
1307 #define L4_LI_MAX_GEN_BUFS_IN_ARCHIPELAGO 0x0004
1308
1309 u32_t l4_max_gen_bufs_in_isle;
1310 u32_t l4_max_gen_bufs_in_archipelago;
1311 u32_t l4_valid_gen_bufs_in_archipelago;
1312 u32_t l4_max_gen_buf_cnt; /* maximum number of generic buffers the system can allocate, duplicated from UM*/
1313
1314 u32_t l4_isles_pool_size;
1315
1316 u32_t i2c_interval_sec;
1317 elink_status_t i2c_elink_status[I2C_SECTION_MAX]; // represents last elink res per section
1318
1319 u8_t l4_num_of_blocks_per_connection;
1320 // PF_FLR
1321 u8_t is_flr;
1322 u8_t __nmb_pad[2];
1323 //LLFC should be moved to vars
1324 dcbx_port_params_t dcbx_port_params;
1325 u32_t lm_dcb_dont_break_bad_oid;
1326
1327 config_lldp_params_t lldp_config_params;
1328 config_dcbx_params_t dcbx_config_params;
1329 u32_t try_not_align_page_multiplied_memory;
1330
1331 u32_t l4_dominance_threshold; /*for firmware debug.*/
1332 u32_t l4_max_dominance_value; /* set to 0 to disable dominant connection, set to 20 (default) to enable */
1333
1334 u32_t l4_data_integrity;
1335 u32_t l4_start_port;
1336 u32_t l4_num_of_ports;
1337 u32_t l4_skip_start_bytes;
1338
1339 u32_t l4_support_pending_sp_req_complete;
1340 u32_t l4_support_upload_req_on_abortive_disconnect;
1341
1342 u32_t grc_timeout_max_ignore ;
1343 u32_t tpa_desc_cnt_per_chain;//Number of RSC pages descriptor required per-queue.
1344 u32_t b_dcb_indicate_event;//DCB indicates event towards upper layer.
1345 u32_t sriov_inc_mac;
1346 /* Virtualization related */
1347 u8_t device_type;
1348 u8_t virtualization_type;
1349 u8_t channel_type;
1350 u8_t pf_acquire_status;
1351
1352 u8_t fw_stats_init_value;
1353 u8_t int_coalesing_mode_disabled_by_ndis;
1354 u8_t mac_spoof_test;
1355
1356 u8_t run_driver_pulse;
1357 #define IS_DRIVER_PULSE_ALWAYS_ALIVE(_pdev) (!(_pdev)->params.run_driver_pulse)
1358 u8_t ___pad;
1359
1360 /* Error Recovery supported only on E2 and above. Can be controlled via registry */
1361 u32_t enable_error_recovery;
1362 #define IS_ERROR_RECOVERY_ENABLED(_pdev) ((_pdev)->params.enable_error_recovery && !CHIP_IS_E1x(_pdev))
1363 u32_t validate_sq_complete;
1364
1365 u32_t e3_cos_modes; // enum lm_cos_modes
1366 u32_t e3_network_cos_mode; // enum lm_network_cos_modes
1367
1368 /* Enables switching between non-enlighted vms under npar configuration.
1369 * vm's that don't have their mac in the tx cam can't be 'switched' between pfs
1370 * this mode actually means that all traffic will be passed on loopback channel if
1371 * there is a pf in promiscuous/accept unmatched (which is set when there are vms)
1372 * this feature hurts performance and therefore can be disabled */
1373 u32_t npar_vm_switching_enable;
1374
1375 u32_t flow_control_reporting_mode;
1376 #define LM_FLOW_CONTROL_REPORTING_MODE_DISABLED 0
1377 #define LM_FLOW_CONTROL_REPORTING_MODE_ENABLED 1
1378
1379 u32_t fw_valid_mask; // 0xeeRRnnMM
1380 u32_t vf_promiscuous_mode_restricted;
1381 u32_t max_chains_per_vf_override;
1382 u32_t record_sp;
1383 #define XSTORM_RECORD_SLOW_PATH 0x01
1384 #define CSTORM_RECORD_SLOW_PATH 0x02
1385 #define TSTORM_RECORD_SLOW_PATH 0x04
1386 #define USTORM_RECORD_SLOW_PATH 0x08
1387 u32_t start_mp_chain;
1388 u32_t debug_sriov;
1389 u32_t debug_sriov_vfs;
1390 u8_t b_inta_mode_prvided_by_os;
1391 } lm_params_t;
1392
1393
1394
1395 /*******************************************************************************
1396 * Device NVM info -- The native strapping does not support the new parts, the
1397 * software needs to reconfigure for them.
1398 ******************************************************************************/
1399 //TODO we need check
1400 typedef struct _flash_spec_t
1401 {
1402 u32_t page_size;
1403 u32_t total_size;
1404 } flash_spec_t;
1405
1406 //TODO resolve big endian issues
1407 typedef struct _lm_cam_entry_t
1408 {
1409 u8_t cam_addr[ETHERNET_ADDRESS_SIZE];
1410 u16_t ref_cnt;
1411 } lm_cam_entry_t;
1412
1413
1414 #define MAX_MAC_OFFSET_IN_NIG 16
1415
1416 typedef struct _lm_nig_mirror_entry_t
1417 {
1418 s32_t refcnt; //signed to detect underflow.
1419
1420 //atomic access is not needed because this struct is modified under TOE_LOCK.
1421 #define NIG_ENTRY_INC_REFCNT(_entry) ++(_entry)->refcnt
1422 #define NIG_ENTRY_DEC_REFCNT(_entry) {--(_entry)->refcnt; DbgBreakIf((_entry)->refcnt < 0);}
1423
1424 u8_t addr[ETHERNET_ADDRESS_SIZE]; //MAC address of this entry.
1425 }lm_nig_mirror_entry_t;
1426
1427 typedef struct _lm_nig_mirror_t
1428 {
1429 lm_nig_mirror_entry_t entries[MAX_MAC_OFFSET_IN_NIG];
1430 }lm_nig_mirror_t;
1431
1432
1433 /*******************************************************************************
1434 * Device info.
1435 ******************************************************************************/
1436
1437 /* multi function specific */
1438 typedef struct _lm_hardware_mf_info_t
1439 {
1440 u32_t func_mf_cfg;
1441 #define NIV_FUNCTION_ENABLED(_pdev) (GET_FLAGS((_pdev)->hw_info.mf_info.func_mf_cfg, FUNC_MF_CFG_FUNC_DISABLED|FUNC_MF_CFG_FUNC_DELETED)==0)
1442
1443 u8_t vnics_per_port; //1, 2 or 4
1444 u8_t multi_vnics_mode;
1445 u8_t path_has_ovlan; /* the multi function mode of the path... */
1446 u8_t _pad;
1447
1448 u8_t min_bw[MAX_VNIC_NUM];
1449 u8_t max_bw[MAX_VNIC_NUM];
1450
1451 u16_t ext_id; //vnic outer vlan or VIF ID
1452 #define VALID_OVLAN(ovlan) ((ovlan) <= 4096)
1453 #define INVALID_VIF_ID 0xFFFF
1454 #define OVLAN(_pdev) ((_pdev)->hw_info.mf_info.ext_id)
1455 #define VIF_ID(_pdev) ((_pdev)->hw_info.mf_info.ext_id)
1456
1457 u16_t default_vlan;
1458 #define NIV_DEFAULT_VLAN(_pdev) ((_pdev)->hw_info.mf_info.default_vlan)
1459
1460 u8_t niv_allowed_priorities;
1461 #define NIV_ALLOWED_PRIORITIES(_pdev) ((_pdev)->hw_info.mf_info.niv_allowed_priorities)
1462
1463 u8_t niv_default_cos;
1464 #define NIV_DEFAULT_COS(_pdev) ((_pdev)->hw_info.mf_info.niv_default_cos)
1465
1466 u8_t niv_mba_enabled;
1467 u8_t _pad1;
1468
1469 enum mf_cfg_afex_vlan_mode afex_vlan_mode;
1470 #define AFEX_VLAN_MODE(_pdev) ((_pdev)->hw_info.mf_info.afex_vlan_mode)
1471
1472 u16_t flags;
1473 #define MF_INFO_VALID_MAC 0x0001
1474
1475 u8_t mf_mode; /* Switch-dependent / Switch-Independent */
1476 u8_t sd_mode;
1477 #define SD_REGULAR_MODE 0
1478 #define SD_UFP_MODE 1
1479 #define SD_BD_MODE 2
1480 } lm_hardware_mf_info_t;
1481
1482
1483 /* IGU related params for status-blocks */
1484 typedef struct _lm_vf_igu_info_t
1485 {
1486 u8_t igu_base_sb; /* base for all ndsb u + c */
1487 u8_t igu_sb_cnt;
1488 u8_t igu_test_sb_cnt;
1489 u8_t igu_test_mode;
1490 } lm_vf_igu_info_t;
1491
1492 typedef struct _lm_igu_block_t
1493 {
1494 u8_t status;
1495 #define LM_IGU_STATUS_AVAILABLE 0x01
1496 #define LM_IGU_STATUS_VALID 0x02
1497 #define LM_IGU_STATUS_BUSY 0x04
1498 #define LM_IGU_STATUS_PF 0x08
1499
1500 u8_t vector_number;
1501 u8_t pf_number;
1502 u8_t vf_number;
1503 u32_t block_dump;
1504 } lm_igu_block_t;
1505
1506 typedef struct _lm_igu_map_t
1507 {
1508 lm_igu_block_t igu_blocks_set[IGU_REG_MAPPING_MEMORY_SIZE];
1509
1510 } lm_igu_map_t;
1511
1512 typedef struct _lm_igu_info_t {
1513 u8_t igu_base_sb; /* base for all ndsb u + c */
1514 #define IGU_BASE_NDSB(pdev) ((pdev)->hw_info.intr_blk_info.igu_info.igu_base_sb)
1515 #define IGU_PF_NDSB(pdev, sb_id) (IGU_BASE_NDSB(pdev) + sb_id)
1516 u8_t igu_sb_cnt;
1517 #define LM_IGU_SB_CNT(pdev) ((pdev)->hw_info.intr_blk_info.igu_info.igu_sb_cnt)
1518 u8_t igu_dsb_id;
1519 #define IGU_DSB_ID(pdev) ((pdev)->hw_info.intr_blk_info.igu_info.igu_dsb_id)
1520 u8_t igu_u_sb_offset;
1521 #define IGU_U_NDSB_OFFSET(pdev) ((pdev)->hw_info.intr_blk_info.igu_info.igu_u_sb_offset)
1522 u8_t igu_func_id;
1523 #define IGU_FUNC_ID(pdev) ((pdev)->hw_info.intr_blk_info.igu_info.igu_func_id)
1524 u8_t igu_test_sb_cnt;
1525 lm_vf_igu_info_t vf_igu_info[E2_MAX_NUM_OF_VFS];
1526 u8_t igu_sb[IGU_REG_MAPPING_MEMORY_SIZE];
1527 #define IGU_VF_NDSB(pdev, sb_id) ((pdev)->hw_info.intr_blk_info.igu_info.igu_sb[sb_id])
1528 lm_igu_map_t igu_map;
1529 #define IGU_SB(pdev, sb_id) ((pdev)->hw_info.intr_blk_info.igu_info.igu_map.igu_blocks_set[sb_id])
1530 } lm_igu_info_t;
1531
1532 typedef struct _lm_intr_blk_info_t
1533 {
1534 u8_t blk_type;
1535 #define INTR_BLK_HC 0
1536 #define INTR_BLK_IGU 1
1537 #define INTR_BLK_TYPE(_pdev) ((_pdev)->hw_info.intr_blk_info.blk_type)
1538
1539 u8_t blk_mode;
1540 #define INTR_BLK_MODE_BC 0
1541 #define INTR_BLK_MODE_NORM 1
1542 #define INTR_BLK_MODE(_pdev) ((_pdev)->hw_info.intr_blk_info.blk_mode)
1543
1544 u8_t access_type;
1545 #define INTR_BLK_ACCESS_GRC 1
1546 #define INTR_BLK_ACCESS_IGUMEM 0
1547 #define INTR_BLK_ACCESS(_pdev) ((_pdev)->hw_info.intr_blk_info.access_type)
1548
1549 u32_t simd_addr_wmask;
1550 #define INTR_BLK_SIMD_ADDR_WMASK(_pdev) ((_pdev)->hw_info.intr_blk_info.simd_addr_wmask)
1551
1552 u32_t simd_addr_womask;
1553 #define INTR_BLK_SIMD_ADDR_WOMASK(_pdev) ((_pdev)->hw_info.intr_blk_info.simd_addr_womask)
1554
1555 u32_t cmd_ctrl_rd_wmask;
1556 u32_t cmd_ctrl_rd_womask;
1557 #define INTR_BLK_CMD_CTRL_INVALID 0
1558 #define INTR_BLK_REQUIRE_CMD_CTRL(_pdev) ((_pdev)->hw_info.intr_blk_info.cmd_ctrl_rd_wmask != INTR_BLK_CMD_CTRL_INVALID)
1559 #define INTR_BLK_CMD_CTRL_RD_WMASK(_pdev) ((_pdev)->hw_info.intr_blk_info.cmd_ctrl_rd_wmask)
1560 #define INTR_BLK_CMD_CTRL_RD_WOMASK(_pdev) ((_pdev)->hw_info.intr_blk_info.cmd_ctrl_rd_womask)
1561
1562 /* IGU specific data */
1563 lm_igu_info_t igu_info;
1564
1565 } lm_intr_blk_info_t;
1566
1567 #ifdef VF_INVOLVED
1568 #define GET_NUM_VFS_PER_PF(_pdev) ((_pdev)->hw_info.sriov_info.total_vfs)
1569 #define GET_NUM_VFS_PER_PATH(_pdev) (64)
1570 #else
1571 #define GET_NUM_VFS_PER_PF(_pdev) (0)
1572 #define GET_NUM_VFS_PER_PATH(_pdev) (0)
1573 #endif
1574 typedef struct _lm_sriov_info_t {
1575 // #define MAX_VF_BAR 3 Fix it when emulation supports 3 bars
1576 #define MAX_VF_BAR 2
1577 u16_t sriov_control;
1578 u16_t total_vfs; /* maximum allowed vfs */
1579 u16_t num_vfs;
1580 u16_t vf_device_id;
1581 u8_t max_chains_per_vf;
1582 u8_t vf_cid_wnd_size;
1583 u8_t vf_pool_size;
1584 u8_t pf_nd_pool_size;
1585 u32_t first_vf_in_pf;
1586 u32_t vf_bar_size[MAX_VF_BAR];
1587 lm_address_t vf_bars[MAX_VF_BAR];
1588
1589 u32_t shmem_num_vfs_in_pf;
1590 u8_t b_pf_asymetric_configuration;
1591
1592 } lm_sriov_info_t;
1593
1594
1595 typedef enum
1596 {
1597 LM_CHIP_PORT_MODE_NONE = 0x0,
1598 LM_CHIP_PORT_MODE_2 = 0x1,
1599 LM_CHIP_PORT_MODE_4 = 0x2
1600 } lm_chip_port_mode_t ;
1601
1602 typedef struct _lm_hardware_info_t
1603 {
1604 /* PCI info. */
1605 u16_t vid;
1606 u16_t did;
1607 u16_t ssid;
1608 u16_t svid;
1609
1610 u8_t irq;
1611 u8_t int_pin;
1612 u8_t latency_timer;
1613 u8_t cache_line_size;
1614 u8_t rev_id;
1615 u8_t _pad[3];
1616
1617 lm_address_t mem_base[MAX_NUM_BAR];
1618 u32_t bar_size[MAX_NUM_BAR];
1619
1620 lm_address_t mem_base1;
1621 u32_t bar_size1;
1622
1623 /* Device info. */
1624 u8_t mac_addr[8]; /* Hardware MAC address. */
1625 u8_t iscsi_mac_addr[8]; /* Hardware MAC address for iSCSI. */
1626 u8_t fcoe_mac_addr[8]; /* Hardware MAC address for FCoE. */
1627 u8_t fcoe_wwn_port_name[8]; /* Hardware MAC address for FCoE WWPN. */
1628 u8_t fcoe_wwn_node_name[8]; /* Hardware MAC address for FCoE WWNN. */
1629
1630 u32_t shmem_base; /* Firmware share memory base addr. */
1631 u32_t mf_cfg_base; /* MF cfg offset in shmem_base */
1632 u32_t shmem_base2; /* Firmware share memory 2 base addr. */
1633
1634 u32_t chip_id; /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
1635 #define CHIP_NUM_SET(_chip_id,_p) ((_chip_id) = (((_p) & 0xffff) << 16))
1636 #define CHIP_NUM(_p) (((_p)->hw_info.chip_id) & 0xffff0000)
1637 #define CHIP_NUM_5710 0x164e0000
1638 #define CHIP_NUM_5711 0x164f0000
1639 #define CHIP_NUM_5711E 0x16500000
1640 #define CHIP_NUM_5712 0x16620000
1641 #define CHIP_NUM_5712E 0x16630000
1642 #define CHIP_NUM_5713 0x16510000
1643 #define CHIP_NUM_5713E 0x16520000
1644 #define CHIP_NUM_57800 0x168a0000
1645 #define CHIP_NUM_57840_OBSOLETE 0x168d0000
1646 #define CHIP_NUM_57810 0x168e0000
1647 #define CHIP_NUM_57800_MF 0x16a50000
1648 #define CHIP_NUM_57840_MF_OBSOLETE 0x16ae0000
1649 #define CHIP_NUM_57810_MF 0x16ab0000
1650 #define CHIP_NUM_57811 0x163d0000
1651 #define CHIP_NUM_57811_MF 0x163e0000
1652 #define CHIP_NUM_57811_VF 0x163f0000
1653 #define CHIP_NUM_57840_4_10 0x16a10000
1654 #define CHIP_NUM_57840_2_20 0x16a20000
1655 #define CHIP_NUM_57840_MF 0x16a40000
1656 #define CHIP_NUM_57840_VF 0x16ad0000
1657
1658
1659 #define CHIP_IS_E1_PARAM(_chip_num) ((_chip_num) == CHIP_NUM_5710)
1660 #define CHIP_IS_E1(_p) (CHIP_IS_E1_PARAM(CHIP_NUM(_p)))
1661
1662 #define CHIP_IS_E1H_PARAM(_chip_num) (((_chip_num) == CHIP_NUM_5711) || ((_chip_num) == CHIP_NUM_5711E))
1663 #define CHIP_IS_E1H(_p) (CHIP_IS_E1H_PARAM(CHIP_NUM(_p)))
1664
1665 #define CHIP_IS_E1x_PARAM(_chip_num) (CHIP_IS_E1_PARAM(((_chip_num))) || CHIP_IS_E1H_PARAM(((_chip_num))))
1666 #define CHIP_IS_E1x(_p) (CHIP_IS_E1x_PARAM(CHIP_NUM(_p)))
1667
1668 #define CHIP_IS_E2_PARAM(_chip_num) (((_chip_num) == CHIP_NUM_5712) || ((_chip_num) == CHIP_NUM_5713) || \
1669 ((_chip_num) == CHIP_NUM_5712E) || ((_chip_num) == CHIP_NUM_5713E))
1670
1671 #define CHIP_IS_E2(_p) (CHIP_IS_E2_PARAM(CHIP_NUM(_p)))
1672
1673 #define CHIP_IS_E3_PARAM(_chip_num) ((_chip_num == CHIP_NUM_57800) || (_chip_num == CHIP_NUM_57810) || \
1674 (_chip_num == CHIP_NUM_57840_4_10) || (_chip_num == CHIP_NUM_57840_2_20) || (_chip_num == CHIP_NUM_57800_MF) || \
1675 (_chip_num == CHIP_NUM_57810_MF) || (_chip_num == CHIP_NUM_57840_MF) || \
1676 (_chip_num == CHIP_NUM_57840_OBSOLETE) || (_chip_num == CHIP_NUM_57840_MF_OBSOLETE) || \
1677 (_chip_num == CHIP_NUM_57811) || (_chip_num == CHIP_NUM_57811_MF) || \
1678 (_chip_num == CHIP_NUM_57811_VF))
1679
1680 #define CHIP_IS_E3(_p) (CHIP_IS_E3_PARAM(CHIP_NUM(_p)))
1681
1682 #define CHIP_IS_E2E3(_p) (CHIP_IS_E2(_p) || (CHIP_IS_E3(_p)))
1683
1684
1685 #define CHIP_IS_E2E3A0(_p) (CHIP_IS_E2(_p) || (CHIP_IS_E3A0(_p)))
1686
1687 #define CHIP_REV_SHIFT 12
1688 #define CHIP_REV_MASK (0xF<<CHIP_REV_SHIFT)
1689 #define CHIP_REV(_p) (((_p)->hw_info.chip_id) & CHIP_REV_MASK)
1690 #define CHIP_REV_Ax (0x0<<CHIP_REV_SHIFT)
1691 #define CHIP_REV_Bx (0x1<<CHIP_REV_SHIFT)
1692 #define CHIP_REV_Cx (0x2<<CHIP_REV_SHIFT)
1693 #define CHIP_REV_SIM_IS_FPGA (0x1<<CHIP_REV_SHIFT)
1694
1695 #define CHIP_REV_ASIC_MAX (0x5<<CHIP_REV_SHIFT)
1696 #define CHIP_REV_IS_SLOW(_p) (CHIP_REV(_p) > CHIP_REV_ASIC_MAX)
1697 #define CHIP_REV_IS_FPGA(_p) (CHIP_REV_IS_SLOW(_p) && (CHIP_REV(_p) & CHIP_REV_SIM_IS_FPGA))
1698 #define CHIP_REV_IS_EMUL(_p) (CHIP_REV_IS_SLOW(_p) && !(CHIP_REV(_p)& CHIP_REV_SIM_IS_FPGA)) //if it's simulated, and not FPGA, it's EMUL.
1699 #define CHIP_REV_IS_ASIC(_p) (!CHIP_REV_IS_SLOW(_p))
1700 #define CHIP_REV_SIM(_p) ((0xF - (CHIP_REV(_p)>>CHIP_REV_SHIFT))>>1)<<CHIP_REV_SHIFT //For EMUL: Ax=0xE, Bx=0xC, Cx=0xA. For FPGA: Ax=0xF, Bx=0xD, Cx=0xB.
1701
1702 #define CHIP_IS_E3B0(_p) (CHIP_IS_E3(_p)&&( (CHIP_REV(_p) == CHIP_REV_Bx)||(CHIP_REV_SIM(_p) == CHIP_REV_Bx)))
1703
1704 #define CHIP_IS_E3A0(_p) (CHIP_IS_E3(_p)&&( (CHIP_REV(_p) == CHIP_REV_Ax)||(CHIP_REV_SIM(_p) == CHIP_REV_Ax)))
1705
1706 #define CHIP_METAL(_p) (((_p)->hw_info.chip_id) & 0x00000ff0)
1707 #define CHIP_BONDING(_p) (((_p)->hw_info.chip_id) & 0x0000000f)
1708
1709 #define CHIP_ID(_p) (((_p)->hw_info.chip_id) & 0xfffffff0)
1710 #define CHIP_ID_5706_A0 0x57060000
1711 #define CHIP_ID_5706_A1 0x57060010
1712 #define CHIP_ID_5706_FPGA 0x5706f000
1713 #define CHIP_ID_5706_IKOS 0x5706e000
1714 #define CHIP_ID_5708_A0 0x57080000
1715 #define CHIP_ID_5708_B0 0x57081000
1716 #define CHIP_ID_5708_FPGA 0x5708f000
1717 #define CHIP_ID_5708_IKOS 0x5708e000
1718 #define CHIP_ID_5710_EMUL 0X164ed000
1719 #define CHIP_ID_5710_A0 0x164e0000
1720 #define CHIP_ID_5710_A1 0x164e0010
1721
1722 #define IS_CHIP_REV_A0(_p) (CHIP_ID(_p) == CHIP_ID_5710_A0)
1723 #define IS_CHIP_REV_A1(_p) (CHIP_ID(_p) == CHIP_ID_5710_A1)
1724
1725 #define CHIP_BOND_ID(_p) (((_p)->hw_info.chip_id) & 0xf)
1726
1727 /* A serdes chip will have the first bit of the bond id set. */
1728 #define CHIP_BOND_ID_SERDES_BIT 0x01
1729
1730 /* This bit defines if OTP process was done on chip */
1731 #define CHIP_OPT_MISC_DO_BIT 0x02
1732
1733 u8_t silent_chip_rev; /* silent chip rev:
1734 For 57711 0-A0, 1-A1 2-A2
1735 For 57710 0-A1 1-A2 */
1736 #define SILENT_CHIP_REV(_p) ((_p)->hw_info.silent_chip_rev)
1737 #define SILENT_REV_E1_A0 0xFF
1738 #define SILENT_REV_E1_A1 0x00
1739 #define SILENT_REV_E1_A2 0x01
1740
1741 #define SILENT_REV_E1H_A0 0x00
1742 #define SILENT_REV_E1H_A1 0x01
1743 #define SILENT_REV_E1H_A2 0x02
1744
1745 #define SILENT_REV_E3_B0 0x00
1746 #define SILENT_REV_E3_B1 0x01
1747
1748 /* In E2, the chip can be configured in 2-port mode (i.e. 1 port per path) or 4-port mode (i.e. 2 port per path)
1749 * the driver needs this information since it needs to configure several blocks accordingly */
1750 lm_chip_port_mode_t chip_port_mode;
1751 #define CHIP_PORT_MODE(_p) ((_p)->hw_info.chip_port_mode)
1752
1753 /* HW config from nvram. */
1754 u32_t nvm_hw_config;
1755 u32_t nvm_hw_config2;
1756
1757 /* board sn*/
1758 u8_t board_num[16];
1759
1760 /* Flash info. */
1761 flash_spec_t flash_spec;
1762
1763 /* Needed for pxp config should be done by the MCP*/
1764 u8_t max_payload_size;
1765 u8_t max_read_req_size;
1766
1767 u8_t mcp_detected;
1768
1769 // external phy fw version
1770 u8_t sz_ext_phy_fw_ver[16];// NULL terminated string populated only after a call to get ext phy fw version
1771
1772 // link config
1773 u32_t link_config[ELINK_LINK_CONFIG_SIZE];
1774
1775 // initial dual phy priority config
1776 u32_t multi_phy_config;
1777
1778 u32_t phy_force_kr_enabler; // read from shmem
1779
1780 u8_t no_10g_kr; // TRUE if the KR enforcer is active on this session
1781
1782 // pcie info
1783 u8_t pcie_lane_width;
1784 #define PCIE_WIDTH_1 1
1785 #define PCIE_WIDTH_2 2
1786 #define PCIE_WIDTH_4 4
1787 #define PCIE_WIDTH_8 8
1788 #define PCIE_WIDTH_16 16
1789 #define PCIE_WIDTH_32 32
1790
1791 u8_t pcie_lane_speed;
1792 #define PCIE_LANE_SPEED_2_5G 1
1793 #define PCIE_LANE_SPEED_5G 2
1794 #define PCIE_LANE_SPEED_8G 3
1795
1796 // In E2 chip rev A0 the PCI LANE speed are different (ERR 8)
1797 #define PCIE_LANE_SPEED_2_5G_E2_A0 0
1798 #define PCIE_LANE_SPEED_5G_E2_A0 1
1799
1800 // We need to save PF0's MPS before going to D3 and restore it when
1801 // returning to D0 to compensate for a Windows bug. See CQ57271.
1802 u32_t saved_pf0_pcie_mps;
1803 #define INVALID_MPS 0xEEEEEEEE //this will never be a valid value since MPS occupies only bits 5-7.
1804
1805 // mba features
1806 u8_t mba_features;
1807
1808 // port_feature_config bits
1809 u32_t port_feature_config;
1810
1811 // mba vlan enable bits
1812 u32_t mba_vlan_cfg ;
1813
1814 // TRUE if dcc is active
1815 u8_t is_dcc_active;
1816
1817 // bc rev
1818 u32_t bc_rev;
1819 // ther driver should not load with bc less then the following
1820 #define BC_REV_SUPPORTED 0x040200 //4.2.0
1821 #define BC_REV_IE_DCB_SUPPORTED 0x070200 //7.2.0
1822 #define BC_REV_IE_SRIOV_SUPPORTED 0x070400 //7.4.0
1823
1824 #define LM_GET_BC_REV_MAJOR(_p) (_p->hw_info.bc_rev>>8)
1825
1826 /* HW Licensing of Max #connections for each protocol, takes into account bar-size, licensing is 'per-port' and not 'per functions' */
1827 u32_t max_port_toe_conn;
1828 u32_t max_port_rdma_conn;
1829 u32_t max_port_iscsi_conn;
1830 u32_t max_port_fcoe_conn;
1831 u32_t max_port_conns; /* the maximum number of connections support for this port, used to configure PORT registers */
1832 u32_t max_common_conns; /* the maximum number of connections support for ALL ports, used to configure COMMON registers, only used by PORT-MASTER */
1833
1834 lm_hardware_mf_info_t mf_info;
1835
1836 /* Information on interrupt block are we working with - HC or IGU (E1/E1H or E2 and above) */
1837 lm_intr_blk_info_t intr_blk_info;
1838
1839 lm_sriov_info_t sriov_info;
1840
1841 u8_t flr_capable;
1842 u8_t pci_cfg_trust;
1843 #define PCI_CFG_NOT_TESTED_FOR_TRUST 0x00
1844 #define PCI_CFG_NOT_TRUSTED 0x01
1845 #define PCI_CFG_TRUSTED 0x02
1846
1847 u8_t pda_pm_reset_in_progress;
1848 #define SET_PDA_PM_RESET_IN_PROGRESS(_pdev) ((_pdev)->hw_info.pda_pm_reset_in_progress = TRUE)
1849 #define CLEAR_PDA_PM_RESET_IN_PROGRESS(_pdev) ((_pdev)->hw_info.pda_pm_reset_in_progress = FALSE)
1850 #define IS_PDA_PM_RESET_IN_PROGRESS(_pdev) ((_pdev)->hw_info.pda_pm_reset_in_progress)
1851
1852 u8_t ___pad;
1853 u32_t grc_didvid;
1854 u32_t pci_cfg_didvid;
1855 u32_t pcie_caps_offset;
1856 u32_t pcie_dev_capabilities;
1857 } lm_hardware_info_t;
1858
1859
1860
1861 //this struct encapsulates both the default status block as well as the RSS status blocks.
1862 typedef struct _gen_sp_status_block_t
1863 {
1864 /*physical address of the status block.*/
1865 lm_address_t blk_phy_address;
1866 struct hc_sp_status_block_data sb_data;
1867 volatile struct host_sp_status_block * hc_sp_status_blk;
1868 } gen_sp_status_block_t;
1869
1870 //this struct encapsulates both the default status block as well as the RSS status blocks.
1871 typedef struct _gen_status_block_t
1872 {
1873 union {
1874 struct hc_status_block_data_e1x e1x_sb_data;
1875 struct hc_status_block_data_e2 e2_sb_data;
1876 lm_address_t vf_sb_phy_address;
1877 } hc_status_block_data;
1878
1879 union {
1880 /*pointer to default status block */
1881 volatile struct host_hc_status_block_e1x * e1x_sb;
1882 /*pointer to RSS status block */
1883 volatile struct host_hc_status_block_e2 * e2_sb;
1884 volatile u16_t * vf_sb;
1885 } host_hc_status_block;
1886
1887 /*physical address of the status block.*/
1888 } gen_status_block_t;
1889
1890 //attn group wiring
1891 typedef struct _route_cfg_sig_output
1892 {
1893 #define NUM_ATTN_REGS_E1X 4
1894 #define NUM_ATTN_REGS_E2 5
1895 #define MAX_ATTN_REGS 5
1896
1897 u32_t attn_sig_dword[MAX_ATTN_REGS];
1898
1899 } route_cfg_sig_output;
1900
1901 /* interrupt/host coalesing configuration info */
1902 #define HC_TIMEOUT_RESOLUTION_IN_US 4
1903 typedef struct _lm_int_coalesing_info {
1904 struct dynamic_hc_config eth_dynamic_hc_cfg;
1905
1906 u32_t hc_usec_c_sb[HC_CSTORM_SB_NUM_INDICES]; /* static host coalescing period for cstorm sb indexes */
1907 u32_t hc_usec_u_sb[HC_USTORM_SB_NUM_INDICES]; /* static host coalescing period for ustorm sb indexes */
1908 } lm_int_coalesing_info;
1909
1910 /*******************************************************************************
1911 * Device state variables.
1912 ******************************************************************************/
1913 // Driver increase/decrease/set macros for L2/L4
1914 #define LM_COMMON_DRV_STATS_ATOMIC_INC(_pdev, layer_type, field_name) \
1915 mm_atomic_inc(&((_pdev->vars.stats.stats_mirror.stats_drv.drv_##layer_type.field_name)));
1916 #define LM_COMMON_DRV_STATS_ATOMIC_DEC(_pdev, layer_type, field_name) \
1917 mm_atomic_dec(&((_pdev->vars.stats.stats_mirror.stats_drv.drv_##layer_type.field_name)));
1918 #define LM_COMMON_DRV_STATS_INC(_pdev, layer_type, field_name) \
1919 ((_pdev->vars.stats.stats_mirror.stats_drv.drv_##layer_type.field_name)++);
1920 #define LM_COMMON_DRV_STATS_DEC(_pdev, layer_type, field_name) \
1921 ((_pdev->vars.stats.stats_mirror.stats_drv.drv_##layer_type.field_name)--);
1922
1923 #define LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(_pdev, field_name) LM_COMMON_DRV_STATS_ATOMIC_INC(_pdev, toe, field_name)
1924 #define LM_COMMON_DRV_STATS_ATOMIC_DEC_TOE(_pdev, field_name) LM_COMMON_DRV_STATS_ATOMIC_DEC(_pdev, toe, field_name)
1925
1926 #define LM_COMMON_DRV_STATS_INC_ETH(_pdev, field_name) LM_COMMON_DRV_STATS_INC(_pdev, eth, field_name)
1927 #define LM_COMMON_DRV_STATS_DEC_ETH(_pdev, field_name) LM_COMMON_DRV_STATS_DEC(_pdev, eth, field_name)
1928
1929 /* currently driver ETH stats that use ATOMIC_INC are not required for NDIS or BACS, therefore they are disabled in release version */
1930 #if DBG
1931
1932 #define LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(_pdev, field_name) LM_COMMON_DRV_STATS_ATOMIC_INC(_pdev, eth, field_name)
1933 #define LM_COMMON_DRV_STATS_ATOMIC_DEC_ETH(_pdev, field_name) LM_COMMON_DRV_STATS_ATOMIC_DEC(_pdev, eth, field_name)
1934 #else
1935 #define LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(_pdev, field_name)
1936 #define LM_COMMON_DRV_STATS_ATOMIC_DEC_ETH(_pdev, field_name)
1937 #endif /* DBG */
1938
1939 /* this is a wrapper structure for a vf to pf message, it contains the message itself,
1940 * we use a void pointer to the actual message to enable compiling the vbd with out the vf/pf interface
1941 */
1942 typedef struct _lm_vf_pf_message_t
1943 {
1944 u32_t state;
1945 u32_t message_size;
1946 void * message_virt_addr;
1947 lm_address_t message_phys_addr;
1948 void * bulletin_virt_addr;
1949 lm_address_t bulletin_phys_addr;
1950 volatile u16 * done;
1951 void * cookie;
1952 u16_t do_not_arm_trigger;
1953 u16_t old_version;
1954 #ifdef VF_INVOLVED
1955 union
1956 {
1957 struct pf_vf_msg_hdr sw_channel_hdr;
1958 struct pfvf_tlv hw_channel_hdr;
1959 } bad_response;
1960 #endif
1961 }
1962 lm_vf_pf_message_t;
1963
1964
1965 ////////////////////// Start DCBX define /////////////////////////////////////////////////////
1966 #define LM_DCBX_IE_IS_ETS_DISABLE(_num_traffic_classes) (0 == (_num_traffic_classes))
1967 #define LM_DCBX_IE_CLASSIF_ENTRIES_TO_ALOC_SIZE(_entries) ((_entries) * sizeof(dcb_classif_elem_t))
1968
1969 // regular + extension
1970 #define LM_DCBX_IE_CHIP_CLASSIF_NUM_ENTRIES_LOCAL (DCBX_MAX_APP_LOCAL)
1971 #define LM_DCBX_IE_CHIP_CLASSIF_NUM_ENTRIES_REMOTE (DCBX_MAX_APP_PROTOCOL)
1972 // 2 = 1 for default + 1 for ISCSI
1973 #define LM_DCBX_IE_CLASSIF_NUM_ENTRIES_LOCAL (LM_DCBX_IE_CHIP_CLASSIF_NUM_ENTRIES_LOCAL + 2)
1974 #define LM_DCBX_IE_CLASSIF_NUM_ENTRIES_REMOTE (LM_DCBX_IE_CHIP_CLASSIF_NUM_ENTRIES_REMOTE)
1975
1976 #define LM_DCBX_IE_CLASSIF_TABLE_ALOC_SIZE_LOCAL (LM_DCBX_IE_CLASSIF_ENTRIES_TO_ALOC_SIZE(LM_DCBX_IE_CLASSIF_NUM_ENTRIES_LOCAL))
1977 #define LM_DCBX_IE_CLASSIF_TABLE_ALOC_SIZE_REMOTE (LM_DCBX_IE_CLASSIF_ENTRIES_TO_ALOC_SIZE(LM_DCBX_IE_CLASSIF_NUM_ENTRIES_REMOTE))
1978 // For debbuging purpose only This size has no arbitrary.
1979 #define LM_DCBX_IE_CLASSIF_TABLE_ALOC_SIZE_DBG (LM_DCBX_IE_CLASSIF_ENTRIES_TO_ALOC_SIZE(16))
1980
1981 #define LM_DCBX_MAX_TRAFFIC_TYPES (8)
1982 #define LM_DCBX_ILLEGAL_PRI (MAX_PFC_PRIORITIES)
1983
1984 #define IS_DCB_SUPPORTED_BY_CHIP(_pdev) (!(CHIP_IS_E1x(_pdev)))
1985
1986 #define IS_DCB_SUPPORTED(_pdev) (((_pdev)->params.dcbx_config_params.dcb_enable) && \
1987 IS_DCB_SUPPORTED_BY_CHIP(_pdev))
1988
1989 #define IS_DCB_ENABLED(_pdev) ((_pdev)->dcbx_info.is_enabled)
1990
1991 #define LM_DCBX_ADMIN_MIB_OFFSET(_pdev ,_mf_cfg_offfset) (_mf_cfg_offfset + \
1992 PORT_MAX * sizeof(lldp_params_t) + \
1993 PORT_ID(_pdev) * sizeof(lldp_admin_mib_t))
1994
1995
1996 typedef struct _lm_dcbx_stat
1997 {
1998 u64_t pfc_frames_sent;
1999 u64_t pfc_frames_received;
2000 }lm_dcbx_stat;
2001
2002 typedef enum
2003 {
2004 FUNCTION_DCBX_START_POSTED = 0,
2005 FUNCTION_DCBX_START_COMPLETED = 1,
2006 FUNCTION_DCBX_STOP_POSTED = 2,
2007 FUNCTION_DCBX_STOP_COMPLETED = 3,
2008 } lm_dcbx_function_state_t;
2009
2010 typedef enum
2011 {
2012 lm_dcbx_drv_flags_set_bit = 0,
2013 lm_dcbx_drv_flags_reset_bit = 1,
2014 lm_dcbx_drv_flags_reset_flags = 2,
2015 }lm_dcbx_drv_flags_cmd_t;
2016
2017 typedef enum {
2018 lm_dcbx_ets_config_state_cee,
2019 lm_dcbx_ets_config_state_ieee,
2020 }lm_dcbx_ets_config_state;
2021
2022 typedef enum {
2023 lm_dcbx_ets_ieee_config_not_valid,
2024 lm_dcbx_ets_ieee_config_en,
2025 lm_dcbx_ets_ieee_config_di,
2026 }lm_dcbx_ie_ets_ieee_config_state;
2027
2028 typedef struct _lm_dcbx_indicate_event_t
2029 {
2030 // This design supports only one client bounded
2031 u8_t lm_cli_idx;
2032
2033 u32_t dcb_current_oper_state_bitmap;
2034 #define DCB_STATE_CONFIGURED_BY_OS_QOS (1 << 0)
2035 #define DCB_STATE_CONFIGURED_BY_OS_QOS_TO_WILLING (1 << 1)
2036
2037 lm_dcbx_ets_config_state ets_config_state;
2038
2039 u8_t is_ets_ieee_params_os_valid;
2040 dcb_ets_tsa_param_t ets_ieee_params_os;
2041
2042 // Configuration parameters
2043 lm_dcbx_ie_ets_ieee_config_state ets_ieee_config_state;
2044 dcb_ets_tsa_param_t ets_ieee_params_config;
2045
2046 // CEE doesn't support CONDITION_TCP_PORT.
2047 // If an ISCSI entry with CONDITION_TCP_PORT will be accepted (and enforced), but kept locally in the driver
2048 // and not passed to MCP. This entry will be used when determining iSCSI priority:
2049 // If the operational configuration from MCP contains an entry with 'TCP or UDP port' = 3260 use that entry,
2050 // Else if OS configuration contained an entry with 'TCP port' = 3260 use that entry,
2051 // Else use the default configuration.
2052 u16_t iscsi_tcp_pri;
2053 // Only for debug use
2054 dcb_indicate_event_params_t dcb_params_given_dbg;
2055
2056 dcb_indicate_event_params_t local_params;
2057 dcb_indicate_event_params_t remote_params;
2058 }lm_dcbx_indicate_event_t;
2059
2060 typedef struct _lm_dcbx_info_t
2061 {
2062 dcbx_update_task_state dcbx_update_lpme_task_state;
2063 // The dcbx ramrod state
2064 volatile u32_t dcbx_ramrod_state;
2065 // Flow control configuration
2066 void *pfc_fw_cfg_virt;
2067 lm_address_t pfc_fw_cfg_phys;
2068
2069 u32_t dcbx_error;
2070 #define DCBX_ERROR_NO_ERROR (0)
2071 #define DCBX_ERROR_MCP_CMD_FAILED (1 << 0)
2072 #define DCBX_ERROR_SET_TIMER (1 << 1)
2073 #define DCBX_ERROR_REGISTER_LPME (1 << 2)
2074 #define DCBX_ERROR_WRONG_PORT (1 << 3)
2075 #define DCBX_ERROR_RESOURCE (1 << 4)
2076
2077 // This parameter can only be changed in is_dcbx_neg_received and is a one-shut parameter
2078 u8_t is_dcbx_neg_received;
2079 u8_t is_enabled;
2080 u8_t _pad[2];
2081 lm_dcbx_indicate_event_t indicate_event;
2082
2083 // saved the original admin MIB
2084 // Should not be used in MF this is only a pach until MCP will know how to return to default
2085 lldp_admin_mib_t admin_mib_org;
2086
2087 // Indicate event to upper layer.
2088 volatile u32_t is_indicate_event_en;
2089 /*
2090 1. This array will serve in order to find the correct COS in Fast path in O (1).(Instead of O(num_of_opr_cos))
2091 2. All entries must always contain a valid COS value that will be between "num_of_opr_cos -1".
2092 3. This array will be filled in slow path.
2093 4. Any Array change or access will not require any lock.
2094 */
2095 u8_t pri_to_cos[LM_DCBX_MAX_TRAFFIC_TYPES];
2096
2097 // For debugging
2098 u32_t lpme_failed_cnt;
2099
2100 /******************************start Debbuging code not to submit**************************************/
2101 lldp_local_mib_t local_mib_last;
2102 /******************************end Debbuging code not to submit****************************************/
2103 }lm_dcbx_info_t;
2104
2105 /**
2106 * @description
2107 * Set in a shared port memory place if DCBX completion was
2108 * received. Function is needed for PMF migration in order to
2109 * synchronize the new PMF that DCBX results has ended.
2110 * @param pdev
2111 * @param is_completion_recv
2112 */
2113 void
2114 lm_dcbx_config_drv_flags(
2115 IN struct _lm_device_t *pdev,
2116 IN const lm_dcbx_drv_flags_cmd_t drv_flags_cmd,
2117 IN const u32_t bit_drv_flags);
2118
2119 ////////////////////// End DCBX define /////////////////////////////////////////////////////
2120
2121 typedef enum
2122 {
2123 NOT_PMF = 0,
2124 PMF_ORIGINAL = 1,
2125 PMF_MIGRATION = 2,
2126 }pmf_type_t;
2127
2128 typedef enum
2129 {
2130 MAC_TYPE_NONE = 0,
2131 MAC_TYPE_EMAC = 1,
2132 MAC_TYPE_BMAC = 2,
2133 MAC_TYPE_UMAC = 3,
2134 MAC_TYPE_XMAC = 4,
2135 MAC_TYPE_MAX = 5
2136 } mac_type_t;
2137
2138 // this is based on bdrv_if.h "l2_ioc_link_settings_t"
2139 typedef struct _lm_reported_link_params_t
2140 {
2141 lm_status_t link;
2142 lm_medium_t medium;
2143 lm_flow_control_t flow_ctrl;
2144 u8_t cable_is_attached;
2145 u8_t eee_policy;
2146
2147 } lm_reported_link_params_t;
2148
2149 typedef struct _lm_variables_t
2150 {
2151 #if defined(__SunOS)
2152 ddi_acc_handle_t reg_handle[MAX_NUM_BAR]; /* Holds the DMA registration handle */
2153 #endif
2154 volatile void * mapped_bar_addr[MAX_NUM_BAR]; /* Holds the mapped BAR address.*/
2155
2156 gen_sp_status_block_t gen_sp_status_block;
2157 gen_status_block_t status_blocks_arr[MAX_NDSB]; /* at index 16 the the default status block lies */
2158 // Host Coalescing acknowledge numbers - this is the local copy to compare against the status index of each of the status blocks.
2159 u16_t u_hc_ack[MAX_NDSB]; //local copy of non-default USTORM consumer
2160 u16_t c_hc_ack[MAX_NDSB]; //local copy of non-default CSTORM consumer
2161 u16_t hc_def_ack; //local copy of SP consumer
2162 u16_t _hc_pad;
2163 u16_t attn_def_ack; //local copy of attention bits consumer
2164 u16_t attn_state; //states for all 16 attn lines (per func) 0=ready for assertion 1=ready for deassertion
2165 route_cfg_sig_output attn_groups_output[MAX_DYNAMIC_ATTN_GRPS]; //dynamic attn groups wiring definitions
2166 u32_t attn_sig_af_inv_reg_addr[MAX_ATTN_REGS]; // addresses of the AEU_AFTER_INVERT registers
2167 u8_t num_attn_sig_regs;
2168 u32_t aeu_mask_attn_func; //mask the relevant AEU line from config register
2169 lm_status_t link_status;
2170
2171 lm_int_coalesing_info int_coal;
2172
2173 u8_t eth_init_state; /* deprecated. used only to mark if eth is already init or not. */
2174 #define PORT_STATE_CLOSE 0
2175 #define PORT_STATE_OPEN 1
2176 #define PORT_STATE_CLOSING 2
2177
2178 lm_medium_t medium;
2179 lm_flow_control_t flow_control;
2180 lm_eee_policy_t eee_policy;
2181 u32_t autogreeen; // autogrEEEn status
2182
2183 // lm statistics
2184 lm_stats_all_t stats ;
2185
2186 // TRUE if read/write DMAE operations can be done (DMAE block + PXP initialized)
2187 #define DMAE_READY(pdev) (pdev->vars.b_is_dmae_ready)
2188 u8_t b_is_dmae_ready ;
2189
2190 // mirrored NIG MAC table - used in MF/SI mode to support VMChimney.
2191 lm_nig_mirror_t nig_mirror;
2192
2193 //TODO MCP interface ready
2194 u16_t fw_wr_seq;
2195 u8_t fw_timed_out;
2196 u32_t fw_port_stats_ptr; // pointer to mcp scratch pad for statistics saving (host_func_stats_t)
2197 u32_t fw_func_stats_ptr; // pointer to Managment statistics (host_port_stats_t)
2198
2199
2200 /* Serdes autonegotiation fallback. For a serdes medium,
2201 * if we cannot get link via autonegotiation, we'll force
2202 * the speed to get link. */
2203 //TODO after specs of serdes
2204 mac_type_t mac_type;
2205
2206 /*Target phy address used with mread and mwrite*/
2207 u8_t phy_addr;
2208
2209 /* This flag is set if the cable is attached when there
2210 * is no link. The upper module could check this flag to
2211 * determine if there is a need to wait for link. */
2212 u8_t cable_is_attached;
2213
2214 /* Write sequence for driver pulse. */
2215 u16_t drv_pulse_wr_seq;
2216
2217 // the page tables
2218 u32_t searcher_t1_num_pages;
2219 void **searcher_t1_virt_addr_table;
2220 lm_address_t *searcher_t1_phys_addr_table;
2221
2222 u32_t searcher_t2_num_pages;
2223 void **searcher_t2_virt_addr_table;
2224 lm_address_t *searcher_t2_phys_addr_table;
2225
2226 u32_t timers_linear_num_pages;
2227 void **timers_linear_virt_addr_table;
2228 lm_address_t *timers_linear_phys_addr_table;
2229
2230 u32_t qm_queues_num_pages;
2231 void** qm_queues_virt_addr_table;
2232 lm_address_t *qm_queues_phys_addr_table;
2233
2234 u32_t context_cdu_num_pages;
2235 void **context_cdu_virt_addr_table;
2236 lm_address_t *context_cdu_phys_addr_table;
2237
2238 u32_t elt_num_pages; // must be less then 16
2239 void * elt_virt_addr_table[NUM_OF_ELT_PAGES];
2240 lm_address_t elt_phys_addr_table[NUM_OF_ELT_PAGES];
2241
2242 // Zeroed buffer to use in WB zero memory
2243 u32_t zero_buffer[DMAE_MAX_RW_SIZE_STATIC] ;
2244
2245 u32_t clk_factor ; // clock factor to multiple timeouts in non ASIC (EMUL/FPGA) cases (value is 1 for ASIC)
2246
2247 u32_t inst_id; // represents Bus & Device numbers
2248 // 0x0000ff00 - Bus
2249 // 0x000000ff - Device
2250 #ifndef INST_ID_TO_BUS_NUM
2251 #define INST_ID_TO_BUS_NUM(_inst_id) (((_inst_id) >> 8)& 0xFF)
2252 #define MAX_PCI_BUS_NUM (256)
2253 #endif // INST_ID_TO_BUS_NUM
2254
2255 /* Emulation/FPAG doorbell full workaround is enabled.
2256 * The only impact on ASIC is an extra "if" command to check chip rev */
2257 #ifndef USER_LINUX
2258 #define EMULATION_DOORBELL_FULL_WORKAROUND
2259 #endif // USER_LINUX
2260
2261 #if defined(EMULATION_DOORBELL_FULL_WORKAROUND)
2262 u32_t doorbells_cnt;
2263 #define DOORBELL_CHECK_FREQUENCY 500
2264
2265 #define ALLOWED_DOORBELLS_HIGH_WM 1000
2266 #define ALLOWED_DOORBELLS_LOW_WM 700
2267 u8_t doorbells_blocked;
2268 u32_t doorbells_high_wm_reached; /* for statistics */
2269 #endif // EMULATION_DOORBELL_FULL_WORKAROUND
2270 u8_t enable_intr; /* When this flag is set process interrupt */
2271 u8_t dbg_intr_in_wrong_state;
2272 u8_t dbg_intr_in_disabled;
2273 u8_t dbg_intr_zero_status;
2274
2275 // is this device in charge on link support.
2276 pmf_type_t is_pmf;
2277
2278 #define IS_PMF(_pdev) (( PMF_ORIGINAL == (_pdev)->vars.is_pmf) || ( PMF_MIGRATION == (_pdev)->vars.is_pmf))
2279 #define IS_PMF_ORIGINAL(_pdev) ( PMF_ORIGINAL == (_pdev)->vars.is_pmf)
2280 #define IS_PMF_MIGRATION(_pdev) ( PMF_MIGRATION == (_pdev)->vars.is_pmf)
2281
2282 // The load-response we received from MCP when loading... need for elink calls and convenient
2283 // for debugging.
2284 lm_loader_response load_code;
2285
2286 u8_t b_in_init_reset_flow;
2287 u8_t _pad[3];
2288 lm_reported_link_params_t last_reported_link_params;
2289
2290 // cls_vars
2291 struct elink_vars link;
2292 u32_t link_chng_cnt;
2293 #define LM_LINK_CHNG_CNT(pdev) ((pdev)->vars.link_chng_cnt)
2294
2295 u32_t shared_l5_mac_client_id;
2296 u64_t last_recycling_timestamp;
2297
2298 /* sriov-related */
2299 //u8_t num_vfs_enabled; /* number of vfs that were enabled, need this for disabling them */
2300 u8_t is_igu_test_mode;
2301 u8_t is_pf_restricts_lamac;
2302 u8_t is_pf_rejected_lamac;
2303 u8_t is_pf_provides_mac;
2304 u16_t pf_link_speed;
2305 u16_t __pad;
2306 u32_t vf_pf_channel_lock;
2307 lm_vf_pf_message_t vf_pf_mess;
2308
2309 u32_t pxp_hw_interrupts_cnt;
2310 u32_t dq_int_status_cnt;
2311 u32_t dq_int_status_discard_cnt;
2312 u32_t dq_int_status_vf_val_err_cnt;
2313 u32_t dq_vf_type_val_err_fid;
2314 u32_t dq_vf_type_val_err_mcid;
2315 u32_t cfc_int_status_cnt;
2316 } lm_variables_t;
2317
2318 typedef struct _eth_tx_prod_t
2319 {
2320 u32_t packets_prod;
2321 u16_t bds_prod;
2322 u16_t reserved;
2323 }eth_tx_prod_t;
2324
2325 /*******************************************************************************
2326 * global chip info
2327 ******************************************************************************/
2328
2329 typedef struct _lm_chip_global_t
2330 {
2331 u8_t flags;
2332 #define LM_CHIP_GLOBAL_FLAG_RESET_IN_PROGRESS 0x1 // The flag indicates whether
2333
2334 #define LM_CHIP_GLOBAL_FLAG_NIG_RESET_CALLED 0x2 // the flag will be set when lm_reset_path() will do nig reset
2335 // the flag will be reset after grc timeout occured and the cause is NIG access OR after another "no nig" reset
2336
2337 u32_t cnt_grc_timeout_ignored;
2338 u32_t grc_timeout_val[E1H_FUNC_MAX*2]; // we give each function 2 grc timeouts before we ASSERT...
2339 u8_t func_en[E1H_FUNC_MAX]; /* Used for WOL: each function needs to mark itself: whether it should be enabled when reseting nig with wol enabled */
2340 } lm_chip_global_t;
2341
2342 extern lm_chip_global_t g_lm_chip_global[MAX_PCI_BUS_NUM];
2343
2344 /*******************************************************************************
2345 * bd chain
2346 ******************************************************************************/
2347
2348
2349 /*******************************************************************************
2350 * Transmit info.
2351 ******************************************************************************/
2352
2353 typedef struct _lm_tx_chain_t
2354 {
2355 u32_t idx;
2356
2357 lm_bd_chain_t bd_chain;
2358
2359
2360 eth_tx_prod_t eth_tx_prods;
2361
2362
2363 u32_t prod_bseq;
2364 u16_t pkt_idx;
2365 u16_t volatile *hw_con_idx_ptr;
2366
2367 u16_t coalesce_buf_cnt;
2368 u16_t _reserved;
2369
2370 /* debug stats */
2371 u32_t coalesce_buf_used;
2372 u32_t lso_split_used;
2373
2374 lm_hc_sb_info_t hc_sb_info;
2375
2376 s_list_t active_descq;
2377 s_list_t coalesce_buf_list;
2378 } lm_tx_chain_t;
2379
2380
2381 typedef struct _lm_tx_info_t
2382 {
2383 lm_tx_chain_t chain[3*MAX_HW_CHAINS + MAX_NON_RSS_CHAINS];
2384 #define LM_TXQ(_pdev, _idx) (_pdev)->tx_info.chain[_idx]
2385
2386 u32_t max_chain_idx;
2387 u32_t catchup_chain_idx;
2388
2389 u32_t forward_packets;
2390 u32_t lso_forward_packets;
2391
2392 } lm_tx_info_t;
2393
2394 /*******************************************************************************
2395 * Receive info.
2396 ******************************************************************************/
2397 typedef struct _lm_rx_chain_common_t
2398 {
2399 u16_t bd_prod_without_next; // bd prod without next BD taken into account
2400 u32_t prod_bseq;
2401 u32_t desc_cnt;
2402 s_list_t free_descq;
2403 } lm_rx_chain_common_t;
2404
2405 /*******************************************************/
2406 /*******************************************************************************
2407 * TPA start info.
2408 ******************************************************************************/
2409 #define LM_TPA_MAX_AGGS (max(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,ETH_MAX_AGGREGATION_QUEUES_E1))
2410 #define LM_TPA_MAX_AGG_SIZE (8)
2411 #define LM_TPA_MIN_DESC (LM_TPA_MAX_AGGS * LM_TPA_MAX_AGG_SIZE * 2) // TODO_RSC fine tuning Minimum TPA must be 64 for mask_array.
2412 #define LM_TPA_BD_ELEN_SIZE (sizeof(struct eth_rx_sge))
2413
2414 #define LM_TPA_PAGE_BITS (LM_PAGE_BITS) /* 4K page. */
2415 #define LM_TPA_PAGE_SIZE (1 << LM_TPA_PAGE_BITS)
2416
2417 //Ramrod defines
2418 #define LM_TPA_SGE_PAUSE_THR_LOW (150)
2419 #define LM_TPA_SGE_PAUSE_THR_HIGH (250)
2420 typedef struct _lm_tpa_cahin_dbg_params
2421 {
2422 u64_t pck_received;
2423 u64_t pck_received_ind;
2424 u64_t pck_ret_from_chip;
2425 u64_t pck_ret_abort_active;
2426 u64_t pck_ret_abort;
2427 }lm_tpa_cahin_dbg_params;
2428 typedef enum
2429 {
2430 lm_tpa_state_disable = 0, // VBD changes to the state only under RX lock.
2431 // In this state VBD won't accept RSC packet descriptors.
2432 lm_tpa_state_wait_packets = 1, // VBD is waiting to receive number of "tpa_info:: tpa_desc_cnt_per_chain
2433 // " multiply "RSS queues" RSC l2packet. After first enable.
2434 lm_tpa_state_enable = 2, // RSC is enabled.
2435 lm_tpa_state_invalid = 3,
2436 }lm_tpa_state_t;
2437
2438 typedef struct _lm_tpa_sge_chain_t
2439 {
2440 lm_bd_chain_t bd_chain; // The RSC BD chain.
2441
2442 #define LM_TPA_CHAIN_BD(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].tpa_chain.sge_chain.bd_chain)
2443 #define LM_TPA_CHAIN_BD_NUM_ELEM(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].tpa_chain.sge_chain.size)
2444 #define LM_TPA_CHAIN_BD_MASK(_pdev, _idx) (LM_TPA_CHAIN_BD_NUM_ELEM(_pdev,_idx) - 1)
2445
2446 lm_packet_t** active_descq_array; // Array of pointers for OOO quick access of packet descriptors.
2447
2448 #define LM_TPA_ACTIVE_DESCQ_ARRAY_ELEM(_pdev,_idx) (LM_TPA_CHAIN_BD_NUM_ELEM(_pdev,_idx))
2449 #define LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(_pdev,_idx,_entry) DbgBreakIf((LM_TPA_ACTIVE_DESCQ_ARRAY_ELEM(_pdev,_idx) <= (_entry)))
2450 #define LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(_pdev,_idx,_x) ((_x) & LM_TPA_CHAIN_BD_MASK(_pdev,_idx))
2451
2452 u64_t* mask_array; // Will have exactly a bit for each entry in the tpa_chain::sge_chain:: active_descq_array.
2453 // Each bit represent if the RSC bd is free or used.1 is used. 0 is free.
2454
2455 /* Number of u64 elements in SGE mask array */
2456 #define LM_TPA_MASK_LEN(_pdev,_idx) ((LM_TPA_CHAIN_BD_NUM_ELEM(_pdev,_idx)) / \
2457 BIT_VEC64_ELEM_SZ)
2458 #define LM_TPA_MASK_MASK(_pdev, _idx) (LM_TPA_MASK_LEN(_pdev, _idx) - 1)
2459 #define LM_TPA_MASK_NEXT_ELEM(_pdev, _idx, el) (((el) + 1) & LM_TPA_MASK_MASK(_pdev, _idx))
2460
2461
2462 #define LM_TPA_BD_ENTRY_TO_MASK_ENTRY(_pdev,_idx,_x) (LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(_pdev,_idx,_x) >> BIT_VEC64_ELEM_SHIFT)
2463
2464 #define LM_TPA_MASK_SET_ACTIVE_BIT(_pdev,_idx,_active_entry) LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(_pdev,_idx,_active_entry); \
2465 BIT_VEC64_SET_BIT((&LM_SGE_TPA_CHAIN(_pdev,_idx))->mask_array,_active_entry)
2466
2467 #define LM_TPA_MASK_CLEAR_ACTIVE_BIT(_pdev,_idx,_active_entry) DbgBreakIf(0 == LM_TPA_MASK_TEST_ACTIVE_BIT(_pdev,_idx,_active_entry)); \
2468 LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(_pdev,_idx,_active_entry); \
2469 BIT_VEC64_CLEAR_BIT((&LM_SGE_TPA_CHAIN(_pdev,_idx))->mask_array,_active_entry)
2470
2471 #define LM_TPA_MASK_TEST_ACTIVE_BIT(_pdev,_idx,_active_entry) (BIT_VEC64_TEST_BIT((&LM_SGE_TPA_CHAIN(_pdev,_idx))->mask_array,_active_entry))
2472
2473 u16_t size; // Limitation: number of SGE must be a multiple of 64 and a power of 2.
2474 // This is derived from the implementation that we will check in resolution of 64 for optimization.
2475 // sge_chain::size should be larger from tpa_desc_cnt_per_chain
2476
2477 u32_t last_max_con; // The highest SGE consumer.
2478 }lm_tpa_sge_chain_t;
2479
2480 typedef struct _lm_tpa_start_coales_bd_t
2481 {
2482 lm_packet_t* packet; // Represents an open coalescing, and save the first packet descriptor.
2483 u8_t is_entry_used; // The entry state for debugging.
2484 }lm_tpa_start_coales_bd_t;
2485
2486 typedef struct _lm_tpa_chain_t
2487 {
2488 lm_rx_chain_common_t common;
2489 lm_tpa_start_coales_bd_t start_coales_bd[LM_TPA_MAX_AGGS]; //Each entry represents an open coalescing,
2490 // and save the first packet descriptor.
2491 // all the state are suppose to be synchronized we keep them per chain and not in TPA info for reason of lock.
2492 // The lock in lw_recv_packets is taken per chain
2493 // The RSC state. The state is initialized to tpa_state_disable.
2494 lm_tpa_state_t state;
2495 lm_tpa_sge_chain_t sge_chain;
2496
2497 struct tpa_update_ramrod_data* ramrod_data_virt;
2498 lm_address_t ramrod_data_phys;
2499
2500 // Debug information
2501 lm_tpa_cahin_dbg_params dbg_params;
2502 }lm_tpa_chain_t;
2503
2504 typedef struct _lm_tpa_info_t
2505 {
2506 struct tpa_update_ramrod_data* ramrod_data_virt;
2507 lm_address_t ramrod_data_phys;
2508
2509 volatile void * update_cookie;
2510 volatile u32_t ramrod_recv_cnt; // Number of ramrods received.Decrement by using Interlockeddecrement.
2511 volatile u32_t state;
2512 #define TPA_STATE_NONE 0
2513 #define TPA_STATE_RAMROD_SENT 1
2514
2515 u8_t ipvx_enabled_required;
2516 u8_t ipvx_enabled_current;
2517 #define TPA_IPVX_DISABLED (0)
2518 #define TPA_IPV4_ENABLED (1<<0)
2519 #define TPA_IPV6_ENABLED (1<<1)
2520 }lm_tpa_info_t;
2521
2522
2523 /*******************************************************************************
2524 * RSC end info.
2525 ******************************************************************************/
2526 typedef enum
2527 {
2528 LM_RXQ_CHAIN_IDX_BD = 0,
2529 LM_RXQ_CHAIN_IDX_SGE = 1,
2530 LM_RXQ_CHAIN_IDX_MAX = 2,
2531 } lm_rxq_chain_idx_t ;
2532
2533
2534 typedef struct _lm_rx_chain_t
2535 {
2536 lm_rx_chain_common_t common;
2537 u32_t idx;
2538 lm_bd_chain_t chain_arr[LM_RXQ_CHAIN_IDX_MAX];
2539 lm_tpa_chain_t tpa_chain;
2540 u32_t lah_size; // if 0 - only LM_RXQ_CHAIN_IDX_BD chain is valid
2541 u32_t ret_bytes;
2542 u32_t ret_bytes_last_fw_update;
2543 u16_t volatile *hw_con_idx_ptr; // TODO - remove - check non NDIS clients
2544
2545 lm_hc_sb_info_t hc_sb_info;
2546
2547 s_list_t active_descq;
2548 } lm_rx_chain_t;
2549
2550 /*******************************************************************************
2551 * send queue info.
2552 ******************************************************************************/
2553
2554 typedef struct _lm_sq_chain_t
2555 {
2556 /* This is a contiguous memory block of params.l2_sq_bd_page_cnt pages
2557 * used for rx completion. The BD chain is arranged as a circular
2558 * chain where the last BD entry of a page points to the next page,
2559 * and the last BD entry of the last page points to the first. */
2560 struct slow_path_element *sq_chain_virt;
2561 lm_address_t bd_chain_phy;
2562
2563 u16_t prod_idx;
2564 u16_t con_idx;
2565
2566 struct slow_path_element *prod_bd;
2567 struct slow_path_element *last_bd;
2568 u16_t bd_left;
2569
2570 } lm_sq_chain_t;
2571
2572
2573 /**
2574 * Event Queue Structure. Used for the main event-queue, and
2575 * also event queues used by iscsi + fcoe
2576 */
2577 typedef struct _lm_eq_chain_t
2578 {
2579 lm_bd_chain_t bd_chain;
2580 u16_t volatile *hw_con_idx_ptr;
2581 u16_t iro_prod_offset; /* The producer offset inside internal RAM */
2582 lm_hc_sb_info_t hc_sb_info;
2583
2584 } lm_eq_chain_t;
2585
2586
2587 /* the rcq chain now holds the real HSI eth_rx_cqe */
2588 typedef struct _lm_rcq_chain_t
2589 {
2590 u32_t idx; //this is the symmetric index of the corresponding Rx
2591
2592 lm_bd_chain_t bd_chain;
2593
2594 u32_t prod_bseq;
2595 u16_t volatile *hw_con_idx_ptr;
2596 u16_t iro_prod_offset; /* The producer offset inside internal RAM */
2597
2598 lm_hc_sb_info_t hc_sb_info;
2599
2600 } lm_rcq_chain_t;
2601
2602 typedef struct _lm_rx_info_t
2603 {
2604 lm_rx_chain_t rxq_chain[MAX_HW_CHAINS + MAX_NON_RSS_CHAINS];
2605 lm_rcq_chain_t rcq_chain[MAX_HW_CHAINS + MAX_NON_RSS_CHAINS];
2606 #define LM_RXQ(_pdev, _idx) (_pdev)->rx_info.rxq_chain[_idx]
2607 #define LM_RXQ_COMMON(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].common)
2608 #define LM_RXQ_CHAIN(_pdev, _idx, _rxq_chain_idx) (_pdev)->rx_info.rxq_chain[_idx].chain_arr[_rxq_chain_idx]
2609 #define LM_RXQ_CHAIN_BD(_pdev, _idx) LM_RXQ_CHAIN(_pdev, _idx, LM_RXQ_CHAIN_IDX_BD )
2610 #define LM_RXQ_CHAIN_SGE(_pdev, _idx) LM_RXQ_CHAIN(_pdev, _idx, LM_RXQ_CHAIN_IDX_SGE )
2611 #define LM_RXQ_IS_CHAIN_SGE_VALID(_pdev, _idx) (0 != (_pdev)->rx_info.rxq_chain[_idx].lah_size)
2612 #define LM_RXQ_SGE_PTR_IF_VALID(_pdev, _idx) LM_RXQ_IS_CHAIN_SGE_VALID(_pdev, _idx) ? &LM_RXQ_CHAIN_SGE(_pdev, _idx ) : NULL
2613
2614 #define LM_RCQ(_pdev, _idx) (_pdev)->rx_info.rcq_chain[_idx]
2615
2616
2617 #define LM_TPA(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].tpa_chain)
2618 #define LM_TPA_COMMON(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].tpa_chain.common)
2619 #define LM_SGE_TPA_CHAIN(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].tpa_chain.sge_chain)
2620 lm_tpa_info_t tpa_info;
2621 #define LM_TPA_INFO(_pdev) ((_pdev)->rx_info.tpa_info)
2622 struct tstorm_eth_approximate_match_multicast_filtering appr_mc;
2623
2624 } lm_rx_info_t;
2625
2626 #define MAX_RAMRODS_OUTSTANDING 2
2627
2628 typedef struct _lm_request_sp
2629 {
2630 u8_t req_type;
2631 #define REQ_SET_INFORMATION 0x1
2632 #define REQ_QUERY_INFORMATION 0x2
2633
2634 u32_t ioc; //IOCTL number of the request
2635 u8_t ok_to_indicate; //should the request be indicated up to NDIS or not
2636 void *clnt_blk; //L2/L4 client block
2637 u8_t ramrod_priority; //ramrod priority (this priority is for the 'common sq' and not for the 'per CID one outstanding' mechnism)
2638 struct sq_pending_command sp_list_command;
2639 } lm_request_sp;
2640
2641 typedef union _client_init_data_t{
2642 struct client_init_ramrod_data init_data;
2643 struct tx_queue_init_ramrod_data tx_queue;
2644 } client_init_data_t;
2645
2646 typedef struct _lm_client_info_update
2647 {
2648 struct client_update_ramrod_data *data_virt;
2649 lm_address_t data_phys;
2650 volatile u32_t state;
2651 #define LM_CLI_UPDATE_NOT_USED 0
2652 #define LM_CLI_UPDATE_USED 1
2653 #define LM_CLI_UPDATE_RECV 2
2654 }lm_client_info_update;
2655
2656 typedef struct _lm_client_info_t
2657 {
2658 client_init_data_t * client_init_data_virt;
2659 lm_address_t client_init_data_phys;
2660
2661 lm_client_info_update update;
2662
2663 /* Classification objects used in ecore-sp-verbs */
2664 struct ecore_vlan_mac_obj mac_obj;
2665 struct ecore_vlan_mac_obj mac_vlan_obj;
2666 struct ecore_vlan_mac_obj vlan_obj; /* 9/21/11 MichalS :used only for default, but placed here as a preparation for
2667 * future enhancement to support per client if needed */
2668 u16_t current_set_vlan;
2669
2670 void * volatile set_mac_cookie;
2671 volatile u32_t sp_mac_state;
2672
2673 /* RX_MODE related */
2674 void * volatile set_rx_mode_cookie;
2675 volatile unsigned long sp_rxmode_state;
2676
2677 u32_t last_set_rx_mask;
2678 u8_t b_any_vlan_on;
2679 u8_t b_vlan_only_in_process;
2680 } lm_client_info_t ;
2681
2682 /*************** SlowPath Queue Information: should be modified under SQ_LOCK ************/
2683 typedef void(*lm_sq_comp_cb_t)(struct _lm_device_t *pdev, struct sq_pending_command *pending);
2684
2685 typedef enum {
2686 SQ_STATE_NORMAL = 0,
2687 SQ_STATE_PENDING = 1, /* In this state slowpath will be posted but not to HW.
2688 * completed by vbd work-item (Error Recovery) */
2689 SQ_STATE_BLOCKED = 2
2690 } lm_sq_state_t;
2691
2692 typedef struct _lm_sq_info_t
2693 {
2694 lm_sq_chain_t sq_chain;
2695 u8_t num_pending_normal;
2696 u8_t num_pending_high;
2697
2698 d_list_t pending_normal;
2699 d_list_t pending_high;
2700
2701 /* This list contains the elements that have been posted to the SQ
2702 * but not completed by FW yet. Maximum list size is MAX_NUM_SPE anyway */
2703 d_list_t pending_complete;
2704
2705 lm_sq_state_t sq_state;
2706 lm_sq_comp_cb_t sq_comp_cb[MAX_CONNECTION_TYPE];
2707 u8_t sq_comp_scheduled;
2708
2709 } lm_sq_info_t;
2710
2711 typedef enum {
2712 FUNCTION_START_POSTED = 0,
2713 FUNCTION_START_COMPLETED = 1,
2714 FUNCTION_STOP_POSTED = 2,
2715 FUNCTION_STOP_COMPLETED = 3
2716 } lm_function_state_t;
2717
2718 typedef struct _lm_eq_info_t
2719 {
2720 lm_eq_chain_t eq_chain;
2721
2722 volatile u32_t function_state;
2723
2724 } lm_eq_info_t;
2725
2726 /* for now */
2727 //TODO : need to change according to hsi enum
2728 #define MAX_PROTO (FCOE_CONNECTION_TYPE + 1)
2729 #if 0
2730 #define LM_PROTO_NIC 0
2731 #define LM_PROTO_TOE 1
2732 #endif //0
2733
2734 /*******************************************************************************
2735 * cid resources
2736 ******************************************************************************/
2737
2738 typedef struct _lm_cid_resc_t
2739 {
2740 lm_sp_req_manager_t sp_req_mgr;
2741 void *cookies[MAX_PROTO];
2742 u8_t cid_pending;
2743 #if defined(__SunOS)
2744 ddi_acc_handle_t reg_handle; /* Holds the DMA registration handle */
2745 #endif
2746 volatile void *mapped_cid_bar_addr;/* Holds the mapped BAR address.*/
2747
2748 volatile u32_t con_state;
2749 #define LM_CON_STATE_CLOSE 0
2750 #define LM_CON_STATE_OPEN_SENT 1
2751 #define LM_CON_STATE_OPEN 2
2752 #define LM_CON_STATE_HALT_SENT 3
2753 #define LM_CON_STATE_HALT 4
2754 #define LM_CON_STATE_TERMINATE 5
2755
2756 } lm_cid_resc_t;
2757
2758 struct lm_context_cookie{
2759 lm_cid_resc_t cid_resc;
2760 u32_t next;
2761 u32_t prev; /* for enabling extraction */
2762 u8_t invalid;
2763 u8_t ip_type; /* for searcher mirror hash management */
2764 u8_t cfc_delete_cnt;
2765 u8_t _pad;
2766 u32_t h_val; /* for searcher mirror hash management */
2767 };
2768 #define LM_MAX_VALID_CFC_DELETIONS 3
2769
2770 #define LM_CONTEXT_VALID 0
2771 #define LM_CONTEXT_INVALID_WAIT 1
2772 #define LM_CONTEXT_INVALID_DELETE 2
2773
2774 /* The size of the context is currently 1K... this can change in the future*/
2775 #define LM_CONTEXT_SIZE 1024
2776
2777 /* structures to support searcher hash table entries */
2778 typedef struct _lm_searcher_hash_entry {
2779 u8_t num_ipv4;
2780 u8_t num_ipv6;
2781 u8_t depth_ipv4;
2782 } lm_searcher_hash_entry_t;
2783
2784 typedef struct _lm_searcher_hash_info {
2785 #define SEARCHER_KEY_LEN 40
2786 u8_t searcher_key[SEARCHER_KEY_LEN];
2787 u8_t searcher_key_bits[SEARCHER_KEY_LEN*8];
2788
2789 /* length in bytes of IPV6 "4 tuple" */
2790 #define MAX_SEARCHER_IN_STR 36
2791 u8_t searcher_in_str_bits[MAX_SEARCHER_IN_STR*8];
2792
2793 lm_searcher_hash_entry_t *searcher_table;
2794 u32_t num_tuples; /* for debug */
2795 u8_t hash_depth_reached; /* for debug */
2796 u8_t num_hash_bits;
2797 } lm_searcher_hash_info_t;
2798
2799 /* per-function context data */
2800 typedef struct _lm_context_info {
2801 struct lm_context_cookie * array;
2802 /* spinlock_t lock; lock was moved to the UM */
2803 u32_t proto_start[MAX_PROTO];
2804 u32_t proto_end[MAX_PROTO];
2805 u32_t proto_ffree[MAX_PROTO];
2806 u32_t proto_pending[MAX_PROTO]; /* list of cids that are pending for cfc-delete */
2807
2808 /* field added for searcher mirror hash management.
2809 * it is part of the context info because this hash management
2810 * is done as part of cid allocation/de-allocating */
2811 lm_searcher_hash_info_t searcher_hash;
2812 } lm_context_info_t;
2813
2814 //#endif /* 0 */
2815
2816 /*******************************************************************************
2817 * Include the l4 header file.
2818 ******************************************************************************/
2819 #include "lm_l4st.h"
2820 #include "lm_l4if.h"
2821
2822 #include "lm_l5st.h"
2823 #include "lm_l5if.h"
2824
2825 /* lm device offload info that is common to all offloaded protocols */
2826 typedef struct _lm_offload_info_t
2827 {
2828 struct _lm_device_t *pdev;
2829
2830 l4_ofld_params_t l4_params;
2831
2832 /* Per stack offload state info. Each index correspond to a stack. */
2833 #define STATE_BLOCK_IDX0 0
2834 #define STATE_BLOCK_TOE STATE_BLOCK_IDX0
2835 #define STATE_BLOCK_IDX1 1
2836 #define STATE_BLOCK_IDX2 2
2837 #define STATE_BLOCK_ISCSI STATE_BLOCK_IDX2
2838 #define STATE_BLOCK_IDX3 3
2839 #define STATE_BLOCK_RDMA STATE_BLOCK_IDX3
2840 #define STATE_BLOCK_IDX4 4
2841 #define STATE_BLOCK_FCOE STATE_BLOCK_IDX4
2842 #define STATE_BLOCK_CNT 5
2843 lm_state_block_t *state_blks[STATE_BLOCK_CNT];
2844 } lm_offload_info_t;
2845
2846 typedef void(*lm_cid_recycled_cb_t)(struct _lm_device_t *pdev, void *cookie, s32_t cid);
2847
2848 struct iro {
2849 u32_t base;
2850 u16_t m1;
2851 u16_t m2;
2852 u16_t m3;
2853 u16_t size;
2854 } ;
2855
2856 /* ecore info. Variables that are accessed from the common init code need using the defines below */
2857 typedef struct _ecore_info_t
2858 {
2859 void * gunzip_buf; /* used for unzipping data */
2860 u32_t gunzip_outlen;
2861 lm_address_t gunzip_phys; /* physical address of buffer */
2862 #define FW_BUF_SIZE 0x8000
2863 #define GUNZIP_BUF(_pdev) (_pdev)->ecore_info.gunzip_buf
2864 #define GUNZIP_OUTLEN(_pdev) (_pdev)->ecore_info.gunzip_outlen
2865 #define GUNZIP_PHYS(_pdev) (_pdev)->ecore_info.gunzip_phys
2866 const struct raw_op *init_ops;
2867 /* Init blocks offsets inside init_ops */
2868 const u16_t *init_ops_offsets;
2869 /* Data blob - has 32 bit granularity */
2870 const u32_t *init_data;
2871 u32_t init_mode_flags;
2872 #define INIT_MODE_FLAGS(_pdev) (_pdev)->ecore_info.init_mode_flags
2873 /* Zipped PRAM blobs - raw data */
2874 const u8_t *tsem_int_table_data;
2875 const u8_t *tsem_pram_data;
2876 const u8_t *usem_int_table_data;
2877 const u8_t *usem_pram_data;
2878 const u8_t *xsem_int_table_data;
2879 const u8_t *xsem_pram_data;
2880 const u8_t *csem_int_table_data;
2881 const u8_t *csem_pram_data;
2882 #define INIT_OPS(_pdev) (_pdev)->ecore_info.init_ops
2883 #define INIT_DATA(_pdev) (_pdev)->ecore_info.init_data
2884 #define INIT_OPS_OFFSETS(_pdev) (_pdev)->ecore_info.init_ops_offsets
2885 #define INIT_TSEM_PRAM_DATA(_pdev) (_pdev)->ecore_info.tsem_pram_data
2886 #define INIT_XSEM_PRAM_DATA(_pdev) (_pdev)->ecore_info.xsem_pram_data
2887 #define INIT_USEM_PRAM_DATA(_pdev) (_pdev)->ecore_info.usem_pram_data
2888 #define INIT_CSEM_PRAM_DATA(_pdev) (_pdev)->ecore_info.csem_pram_data
2889 #define INIT_TSEM_INT_TABLE_DATA(_pdev) (_pdev)->ecore_info.tsem_int_table_data
2890 #define INIT_XSEM_INT_TABLE_DATA(_pdev) (_pdev)->ecore_info.xsem_int_table_data
2891 #define INIT_USEM_INT_TABLE_DATA(_pdev) (_pdev)->ecore_info.usem_int_table_data
2892 #define INIT_CSEM_INT_TABLE_DATA(_pdev) (_pdev)->ecore_info.csem_int_table_data
2893 const struct iro *iro_arr;
2894 #define INIT_IRO_ARRAY(_pdev) (_pdev)->ecore_info.iro_arr
2895 #define IRO (PFDEV(pdev))->ecore_info.iro_arr
2896
2897 } ecore_info_t;
2898
2899 typedef struct _flr_stats_t {
2900 u32_t is_pf;
2901 u32_t default_wait_interval_ms;
2902 u32_t cfc_usage_counter;
2903 u32_t qm_usage_counter;
2904 u32_t tm_vnic_usage_counter;
2905 u32_t tm_num_scans_usage_counter;
2906 u32_t dq_usage_counter;
2907 u32_t final_cleanup_complete;
2908 u32_t dmae_cx;
2909 u32_t pbf_queue[3];
2910 u32_t pbf_transmit_buffer[3];
2911 } flr_stats_t;
2912
2913
2914 typedef struct _lm_slowpath_data_t {
2915 /* Function Start Data */
2916 struct function_start_data * func_start_data;
2917 lm_address_t func_start_data_phys;
2918
2919 /* Classification */
2920 union {
2921 struct mac_configuration_cmd e1x;
2922 struct eth_classify_rules_ramrod_data e2;
2923 } * mac_rdata[LM_CLI_IDX_MAX];
2924 lm_address_t mac_rdata_phys[LM_CLI_IDX_MAX];
2925
2926 /* TODO: MAC-VLAN PAIR!!! */
2927
2928 union {
2929 struct tstorm_eth_mac_filter_config e1x;
2930 struct eth_filter_rules_ramrod_data e2;
2931 } * rx_mode_rdata[LM_CLI_IDX_MAX];
2932 lm_address_t rx_mode_rdata_phys[LM_CLI_IDX_MAX]; // FIXME: multi-client...
2933
2934 union {
2935 struct mac_configuration_cmd e1;
2936 struct eth_multicast_rules_ramrod_data e2;
2937 } * mcast_rdata[LM_CLI_IDX_MAX];
2938 lm_address_t mcast_rdata_phys[LM_CLI_IDX_MAX];
2939
2940 union {
2941 //struct eth_rss_update_ramrod_data_e1x e1x;
2942 struct eth_rss_update_ramrod_data e2;
2943 } * rss_rdata;
2944 lm_address_t rss_rdata_phys;
2945
2946 struct function_update_data* niv_function_update_data;
2947 lm_address_t niv_function_update_data_phys;
2948
2949 struct function_update_data* l2mp_func_update_data;
2950 lm_address_t l2mp_func_update_data_phys;
2951
2952 struct function_update_data* encap_function_update_data;
2953 lm_address_t encap_function_update_data_phys;
2954
2955 struct function_update_data* ufp_function_update_data;
2956 lm_address_t ufp_function_update_data_phys;
2957
2958 } lm_slowpath_data_t ;
2959
2960 typedef enum _niv_ramrod_state_t
2961 {
2962 NIV_RAMROD_NOT_POSTED,
2963 NIV_RAMROD_VIF_UPDATE_POSTED,
2964 NIV_RAMROD_VIF_LISTS_POSTED,
2965 NIV_RAMROD_SET_LOOPBACK_POSTED,
2966 NIV_RAMROD_CLEAR_LOOPBACK_POSTED,
2967 NIV_RAMROD_COMPLETED
2968 }niv_ramrod_state_t;
2969
2970
2971 typedef enum _ufp_ramrod_state_t
2972 {
2973 UFP_RAMROD_NOT_POSTED,
2974 UFP_RAMROD_PF_LINK_UPDATE_POSTED,
2975 UFP_RAMROD_PF_UPDATE_POSTED,
2976 UFP_RAMROD_COMPLETED
2977 }ufp_ramrod_state_t;
2978
2979 typedef struct _lm_slowpath_info_t {
2980 lm_slowpath_data_t slowpath_data;
2981
2982 #define LM_SLOWPATH(pdev, var) (pdev->slowpath_info.slowpath_data.var)
2983 #define LM_SLOWPATH_PHYS(pdev, var) (pdev->slowpath_info.slowpath_data.var##_phys)
2984
2985
2986 /* CAM credit pools */
2987 struct ecore_credit_pool_obj vlans_pool;
2988 struct ecore_credit_pool_obj macs_pool;
2989
2990 /* Rx-Mode Object */
2991 struct ecore_rx_mode_obj rx_mode_obj;
2992
2993 /* Multi-Cast */
2994 struct ecore_mcast_obj mcast_obj[LM_CLI_IDX_MAX];
2995 volatile void * set_mcast_cookie[LM_CLI_IDX_MAX];
2996 volatile u32_t sp_mcast_state[LM_CLI_IDX_MAX];
2997
2998 /* RSS - Only support for NDIS client ! */
2999 struct ecore_rss_config_obj rss_conf_obj;
3000 volatile void * set_rss_cookie;
3001 volatile u32_t sp_rss_state;
3002
3003 u32_t rss_hash_key[RSS_HASH_KEY_SIZE/4];
3004 u32_t last_set_rss_flags;
3005 u32_t last_set_rss_result_mask;
3006 u8 last_set_indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
3007
3008 // possible values of the echo field
3009 #define FUNC_UPDATE_RAMROD_NO_SOURCE 0
3010 #define FUNC_UPDATE_RAMROD_SOURCE_NIV 1
3011 #define FUNC_UPDATE_RAMROD_SOURCE_L2MP 2
3012 #define FUNC_UPDATE_RAMROD_SOURCE_ENCAP 3
3013 #define FUNC_UPDATE_RAMROD_SOURCE_UFP 4
3014
3015 volatile u32_t niv_ramrod_state; //use enum niv_ramrod_state_t
3016
3017 volatile u32_t l2mp_func_update_ramrod_state;
3018 #define L2MP_FUNC_UPDATE_RAMROD_NOT_POSTED 0
3019 #define L2MP_FUNC_UPDATE_RAMROD_POSTED 1
3020 #define L2MP_FUNC_UPDATE_RAMROD_COMPLETED 2
3021
3022 volatile u8_t last_vif_list_bitmap;
3023 volatile u32_t ufp_func_ramrod_state; //use enum ufp_ramrod_state_t
3024 } lm_slowpath_info_t;
3025
3026 #define MAX_ER_DEBUG_ENTRIES 10
3027
3028 typedef struct _lm_er_debug_info_t
3029 {
3030 u32_t attn_sig[MAX_ATTN_REGS];
3031 } lm_er_debug_info_t;
3032
3033 typedef enum _encap_ofld_state_t
3034 {
3035 ENCAP_OFFLOAD_DISABLED,
3036 ENCAP_OFFLOAD_ENABLED
3037 } encap_ofld_state_t;
3038
3039 typedef struct _lm_encap_info_t
3040 {
3041 u8_t new_encap_offload_state;
3042 u8_t current_encap_offload_state;
3043
3044 volatile void * update_cookie;
3045 }lm_encap_info_t;
3046
3047 typedef struct _lm_debug_info_t
3048 {
3049 u32_t ack_dis[MAX_HW_CHAINS];
3050 u32_t ack_en[MAX_HW_CHAINS];
3051 u32_t ack_def_dis;
3052 u32_t ack_def_en;
3053 u32_t rx_only_int[MAX_HW_CHAINS];
3054 u32_t tx_only_int[MAX_HW_CHAINS];
3055 u32_t both_int[MAX_HW_CHAINS];
3056 u32_t empty_int[MAX_HW_CHAINS];
3057 u32_t false_int[MAX_HW_CHAINS];
3058 u32_t not_porocessed_int[MAX_HW_CHAINS];
3059
3060 /* Debug information for error recovery. */
3061 /* Data for last MAX_ER_DEBUG_ENTRIES recoveries */
3062 lm_er_debug_info_t er_debug_info[MAX_ER_DEBUG_ENTRIES];
3063 u8_t curr_er_debug_idx; /* Index into array above */
3064 u8_t er_bit_is_set_already;
3065 u8_t er_bit_from_previous_sessions;
3066 u8_t _pad;
3067
3068 /* Some temporary statistics for removed sanity checks */
3069 u32_t number_of_long_LSO_headers; /* for LSO processing of packets with headers more than 120 B */
3070 u32_t pending_tx_packets_on_fwd; /* There were pending tx packets on forward channel at time of abort
3071 * CQ57879 : evbda!um_abort_tx_packets while running Super Stress with Error Recovery */
3072
3073 /* OS bugs worked-around in eVBD */
3074 u32_t pf0_mps_overwrite;
3075
3076 /* TOE Rx/Tx half-complete upon ER */
3077 u32_t toe_rx_comp_upon_er;
3078 u32_t toe_tx_comp_upon_er;
3079
3080 u32_t toe_prealloc_alloc_fail;
3081
3082 } lm_debug_info_t;
3083
3084 /*
3085 * CQ 70040
3086 * Support for NSCI get OS driver version
3087 */
3088 typedef struct _lm_cli_drv_ver_to_shmem_t
3089 {
3090 struct os_drv_ver cli_drv_ver;
3091 }lm_cli_drv_ver_to_shmem_t;
3092
3093 /*******************************************************************************
3094 * Main device block.
3095 ******************************************************************************/
3096 typedef struct _lm_device_t
3097 {
3098 d_list_entry_t link; /* Link for the device list. */
3099
3100 u32_t ver_num; /* major:8 minor:8 fix:16 */
3101 u8_t ver_str[16]; /* null terminated version string. */
3102 u32_t ver_num_fw; /* major:8 minor:8 fix:16 */
3103 u8_t product_version[4]; /* OEM product version 0xffffffff means invalid/not exists*/
3104
3105 lm_variables_t vars;
3106 lm_tx_info_t tx_info;
3107 lm_rx_info_t rx_info;
3108 lm_sq_info_t sq_info;
3109 lm_eq_info_t eq_info;
3110 lm_client_info_t client_info[ETH_MAX_RX_CLIENTS_E2];
3111 lm_offload_info_t ofld_info;
3112 lm_toe_info_t toe_info;
3113 lm_dcbx_info_t dcbx_info;
3114 lm_hardware_info_t hw_info;
3115 lm_slowpath_info_t slowpath_info;
3116 lm_dmae_info_t dmae_info;
3117 lm_params_t params;
3118 lm_context_info_t* context_info;
3119 //lm_mc_table_t mc_table;
3120 lm_nwuf_list_t nwuf_list;
3121
3122 i2c_binary_info_t i2c_binary_info;
3123
3124 /* Statistics. */
3125 u32_t chip_reset_cnt;
3126 u32_t fw_timed_out_cnt;
3127
3128 lm_cid_recycled_cb_t cid_recycled_callbacks[MAX_PROTO];
3129
3130 lm_iscsi_info_t iscsi_info;
3131
3132 lm_fcoe_info_t fcoe_info;
3133
3134 ecore_info_t ecore_info;
3135 struct _lm_device_t* pf_dev;
3136 #ifdef VF_INVOLVED
3137 pf_resources_set_t pf_resources;
3138 u8_t vf_idx;
3139 u8_t _vf_pad[2];
3140 //PF master params
3141 lm_vfs_set_t vfs_set;
3142 //VF PF Channel params
3143 void * pf_vf_acquiring_resp;
3144 #endif
3145 flr_stats_t flr_stats;
3146
3147 lm_encap_info_t encap_info;
3148
3149 lm_debug_info_t debug_info;
3150
3151 /*
3152 * 08/01/2014
3153 * CQ 70040
3154 * Support for NSCI get OS driver version
3155 */
3156 lm_cli_drv_ver_to_shmem_t lm_cli_drv_ver_to_shmem;
3157
3158 /* Turned on if a panic occured in the device... (viewed by functions that wait and get a timeout... - do not assert... )
3159 * not turned on yet, prep for the future...
3160 */
3161 u8_t panic;
3162 } lm_device_t;
3163
3164
3165 // driver pulse interval calculation
3166 #define DRV_PULSE_PERIOD_MS_FACTOR(_p) CHIP_REV_IS_ASIC(_p) ? DRV_PULSE_PERIOD_MS : (DRV_PULSE_PERIOD_MS*10)
3167
3168 // dropless mode definitions
3169 #define BRB_SIZE(_pdev) (CHIP_IS_E3(_pdev) ? 1024 : 512)
3170 #define MAX_AGG_QS(_pdev) (CHIP_IS_E1(_pdev) ? \
3171 ETH_MAX_AGGREGATION_QUEUES_E1 :\
3172 ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
3173 #define FW_DROP_LEVEL(_pdev) (ETH_MIN_RX_CQES_WITHOUT_TPA + MAX_AGG_QS(_pdev))
3174 #define FW_PREFETCH_CNT 16
3175 #define DROPLESS_FC_HEADROOM 150
3176
3177 /*******************************************************************************
3178 * Functions exported between file modules.
3179 ******************************************************************************/
3180 /* Prints the entire information of all status blocks
3181 * Parameters:
3182 * pdev - LM device which holds the status blocks within
3183 */
3184 void print_sb_info(lm_device_t *pdev);
3185
3186 //__________________________________________________________________________________
3187
3188 lm_status_t lm_pretend_func( struct _lm_device_t *pdev, u16_t pretend_func_num );
3189
3190 /* returns a non-default status block according to rss ID
3191 * Parameters:
3192 * pdev - LM device which holds the status blocks within
3193 * rss_id - RSS ID for which we return the specific status block
3194 */
3195 volatile struct host_status_block * lm_get_status_block(lm_device_t *pdev, u8_t rss_id);
3196
3197 /* returns the default status block. It is unique per function.
3198 * Parameters:
3199 * pdev - LM device which holds the status blocks within
3200 */
3201 volatile struct hc_sp_status_block * lm_get_default_status_block(lm_device_t *pdev);
3202
3203 /* returns the attention status block. It is unique per function.
3204 * Parameters:
3205 * pdev - LM device which holds the status blocks within
3206 */
3207 volatile struct atten_sp_status_block * lm_get_attention_status_block(lm_device_t *pdev);
3208
3209 /**
3210 * @Description
3211 * Prepares for MCP reset: takes care of CLP
3212 * configurations.
3213 *
3214 * @param pdev
3215 * @param magic_val Old value of 'magic' bit.
3216 */
3217 lm_status_t lm_reset_mcp_prep(lm_device_t *pde, u32_t * magic_val);
3218 lm_status_t lm_reset_mcp_comp(lm_device_t *pdev, u32_t magic_val);
3219
3220
3221 /* Initialize the whole status blocks per port - overall: 1 default sb, 16 non-default sbs
3222 *
3223 * Parameters:
3224 * pdev - the LM device which holds the sbs
3225 * port - the port number
3226 */
3227 void init_status_blocks(struct _lm_device_t *pdev);
3228
3229 void lm_setup_ndsb_index(struct _lm_device_t *pdev, u8_t sb_id, u8_t idx, u8_t sm_idx, u8_t timeout, u8_t dhc_enable);
3230
3231 /**
3232 * This function sets all the status-block ack values back to
3233 * zero. Must be called BEFORE initializing the igu + before
3234 * initializing status-blocks.
3235 *
3236 * @param pdev
3237 */
3238 void lm_reset_sb_ack_values(struct _lm_device_t *pdev);
3239
3240 /* set interrupt coalesing parameters.
3241 - these settings are derived from user configured interrupt coalesing mode and tx/rx interrupts rate (lm params).
3242 - these settings are used for status blocks initialization */
3243 void lm_set_int_coal_info(struct _lm_device_t *pdev);
3244
3245 void lm_int_igu_sb_cleanup(lm_device_t *pdev, u8 igu_sb_id);
3246
3247 /**
3248 * @description
3249 * Get the HC_INDEX_ETH_TX_CQ_CONS_COSX index from chain.
3250 * @param pdev
3251 * @param chain
3252 *
3253 * @return STATIC u8_t
3254 */
3255 u8_t
3256 lm_eth_tx_hc_cq_cons_cosx_from_chain(IN lm_device_t *pdev,
3257 IN const u32_t chain);
3258
3259 /**
3260 * This function sets all the status-block ack values back to
3261 * zero. Must be called BEFORE initializing the igu + before
3262 * initializing status-blocks.
3263 *
3264 * @param pdev
3265 */
3266 void lm_reset_sb_ack_values(struct _lm_device_t *pdev);
3267
3268 /* Driver calls this function in order to ACK the default/non-default status block index(consumer) toward the chip.
3269 * This is needed by the hw in order to decide whether an interrupt should be generated by the IGU.
3270 * This is achieved via write into the INT ACK register.
3271 * This function is also controls whether to enable/disable the interrupt line
3272 *
3273 * Parameters:
3274 * rss_id - the RSS/CPU number we are running on
3275 * pdev - this is the LM device
3276 */
3277 void lm_int_ack_sb_enable(lm_device_t *pdev, u8_t rss_id);
3278 void lm_int_ack_sb_disable(lm_device_t *pdev, u8_t rss_id);
3279 void lm_int_ack_def_sb_enable(lm_device_t *pdev);
3280 void lm_int_ack_def_sb_disable(lm_device_t *pdev);
3281
3282 #define USTORM_INTR_FLAG 1
3283 #define CSTORM_INTR_FLAG 2
3284 #define SERV_RX_INTR_FLAG 4
3285 #define SERV_TX_INTR_FLAG 8
3286
3287 #ifndef USER_LINUX
lm_get_sb_number_indexes(lm_device_t * pdev)3288 static __inline u16_t lm_get_sb_number_indexes(lm_device_t *pdev)
3289 {
3290 if (CHIP_IS_E1x(pdev))
3291 {
3292 return HC_SB_MAX_INDICES_E1X;
3293 }
3294 else
3295 {
3296 return HC_SB_MAX_INDICES_E2;
3297 }
3298 }
3299
lm_get_sb_running_index(lm_device_t * pdev,u8_t sb_id,u8_t sm_idx)3300 static __inline u16_t lm_get_sb_running_index(lm_device_t *pdev, u8_t sb_id, u8_t sm_idx)
3301 {
3302 #ifdef VF_INVOLVED
3303 if (IS_CHANNEL_VFDEV(pdev)) {
3304 return lm_vf_pf_get_sb_running_index(pdev, sb_id, sm_idx);
3305 }
3306 #endif
3307 if (CHIP_IS_E1x(pdev))
3308 {
3309 return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e1x_sb->sb.running_index[sm_idx]);
3310 }
3311 else
3312 {
3313 return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb->sb.running_index[sm_idx]);
3314 }
3315 }
lm_get_sb_index(lm_device_t * pdev,u8_t sb_id,u8_t idx)3316 static __inline u16_t lm_get_sb_index(lm_device_t *pdev, u8_t sb_id, u8_t idx)
3317 {
3318 #ifdef VF_INVOLVED
3319 if (IS_CHANNEL_VFDEV(pdev)) {
3320 return lm_vf_pf_get_sb_index(pdev, sb_id, idx);
3321 }
3322 #endif
3323 if (CHIP_IS_E1x(pdev))
3324 {
3325 return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e1x_sb->sb.index_values[idx]);
3326 }
3327 else
3328 {
3329 return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb->sb.index_values[idx]);
3330 }
3331 }
3332
3333
lm_get_sb_running_indexes(lm_device_t * pdev,u8_t sb_idx)3334 static __inline u16_t volatile * lm_get_sb_running_indexes(lm_device_t *pdev, u8_t sb_idx)
3335 {
3336 u16_t volatile * running_indexes_ptr;
3337 if (CHIP_IS_E1x(pdev))
3338 {
3339 running_indexes_ptr = &pdev->vars.status_blocks_arr[sb_idx].host_hc_status_block.e1x_sb->sb.running_index[0];
3340 }
3341 else
3342 {
3343 running_indexes_ptr = &pdev->vars.status_blocks_arr[sb_idx].host_hc_status_block.e2_sb->sb.running_index[0];
3344 }
3345 return running_indexes_ptr;
3346 }
lm_get_sb_indexes(lm_device_t * pdev,u8_t sb_idx)3347 static __inline u16_t volatile * lm_get_sb_indexes(lm_device_t *pdev, u8_t sb_idx)
3348 {
3349 u16_t volatile * indexes_ptr;
3350
3351 #ifdef VF_INVOLVED
3352 if (IS_CHANNEL_VFDEV(pdev)) {
3353 return pdev->vars.status_blocks_arr[sb_idx].host_hc_status_block.vf_sb;
3354 }
3355 #endif
3356
3357 if (CHIP_IS_E1x(pdev))
3358 {
3359 indexes_ptr = &pdev->vars.status_blocks_arr[sb_idx].host_hc_status_block.e1x_sb->sb.index_values[0];
3360 }
3361 else
3362 {
3363 indexes_ptr = &pdev->vars.status_blocks_arr[sb_idx].host_hc_status_block.e2_sb->sb.index_values[0];
3364 }
3365 return indexes_ptr;
3366 }
3367
3368
lm_map_igu_sb_id_to_drv_rss(lm_device_t * pdev,u8_t igu_sb_id)3369 static __inline u8_t lm_map_igu_sb_id_to_drv_rss(lm_device_t *pdev, u8_t igu_sb_id)
3370 {
3371 u8_t drv_sb_id = igu_sb_id;
3372 if (INTR_BLK_TYPE(pdev) == INTR_BLK_IGU)
3373 {
3374 if (drv_sb_id >= IGU_U_NDSB_OFFSET(pdev))
3375 {
3376 drv_sb_id -= IGU_U_NDSB_OFFSET(pdev);
3377 }
3378 }
3379 /* FIXME: this doesn't have to be right - drv rss id can differ from sb-id */
3380 return drv_sb_id;
3381 }
lm_query_storm_intr(lm_device_t * pdev,u8_t igu_sb_id,u8_t * drv_sb_id)3382 static __inline u8_t lm_query_storm_intr(lm_device_t *pdev, u8_t igu_sb_id, u8_t * drv_sb_id)
3383 {
3384 u8_t flags = 0;
3385
3386 *drv_sb_id = igu_sb_id;
3387
3388 switch(pdev->params.ndsb_type)
3389 {
3390 case LM_SINGLE_SM:
3391 /* One Segment Per u/c */
3392 SET_FLAGS(flags, USTORM_INTR_FLAG);
3393 break;
3394
3395 case LM_DOUBLE_SM_SINGLE_IGU:
3396 /* One Segment Per u/c */
3397 SET_FLAGS(flags, USTORM_INTR_FLAG);
3398 break;
3399
3400 default:
3401 {
3402 if (igu_sb_id >= IGU_U_NDSB_OFFSET(pdev))
3403 {
3404 *drv_sb_id -= IGU_U_NDSB_OFFSET(pdev);
3405 SET_FLAGS(flags, USTORM_INTR_FLAG);
3406 }
3407 else
3408 {
3409 SET_FLAGS(flags, CSTORM_INTR_FLAG);
3410 }
3411 }
3412 break;
3413 }
3414 return flags;
3415 }
3416
3417 /* Check whether a non-default status block has changed, that is,
3418 * the hw has written a new prod_idx for on or more of its storm parts.
3419 *
3420 * Parameters:
3421 * pdev - this is the LM device
3422 * sb_idx - this is the index where the status block lies in the array under the lm_device
3423 *
3424 * Return Value:
3425 * result - TRUE in case the specific status block is considered as changed.
3426 * FALSE otherwise.
3427 *
3428 * Nots:
3429 * For performance optimization, this function is static inline.
3430 */
lm_is_sb_updated(lm_device_t * pdev,u8_t igu_sb_id)3431 static __inline u8_t lm_is_sb_updated(lm_device_t *pdev, u8_t igu_sb_id)
3432 {
3433 u8_t result = FALSE;
3434 u16_t hw_sb_idx = 0;
3435 u8_t flags = 0;
3436 u8_t drv_sb_id = 0;
3437
3438 DbgBreakIfFastPath(!pdev);
3439 if (!pdev)
3440 {
3441 return FALSE;
3442 }
3443
3444 flags = lm_query_storm_intr(pdev, igu_sb_id, &drv_sb_id);
3445
3446 if (GET_FLAGS(flags, USTORM_INTR_FLAG))
3447 {
3448 hw_sb_idx = lm_get_sb_running_index(pdev, drv_sb_id, SM_RX_ID);
3449 if (hw_sb_idx != pdev->vars.u_hc_ack[drv_sb_id])
3450 {
3451 DbgMessage(pdev, INFORMi, "lm_is_sb_updated():u_sb.status_block_index:%d u_hc_ack:%d\n",
3452 hw_sb_idx, pdev->vars.u_hc_ack[drv_sb_id]);
3453
3454 result = TRUE;
3455 }
3456 }
3457
3458 if (GET_FLAGS(flags, CSTORM_INTR_FLAG))
3459 {
3460 hw_sb_idx = lm_get_sb_running_index(pdev, drv_sb_id, SM_TX_ID);
3461 if (hw_sb_idx != pdev->vars.c_hc_ack[drv_sb_id])
3462 {
3463 DbgMessage(pdev, INFORMi, "lm_is_sb_updated():c_sb.status_block_index:%d c_hc_ack:%d\n",
3464 hw_sb_idx, pdev->vars.u_hc_ack[drv_sb_id]);
3465
3466 result = TRUE;
3467 }
3468 }
3469
3470 DbgMessage(pdev, INFORMi, "lm_is_sb_updated(): result:%s\n", result? "TRUE" : "FALSE");
3471
3472 return result;
3473 }
3474 #endif // !USER_LINUX
3475
3476 /* Check if the default statu blocks has changed, that is,
3477 * the hw has written a new prod_idx for on or more of its storm parts.
3478 *
3479 * Parameters:
3480 * pdev - this is the LM device
3481 *
3482 * Return Value:
3483 * result - TRUE in case the status block is considered as changed.
3484 * FALSE otherwise.
3485 */
3486 u8_t lm_is_def_sb_updated(lm_device_t *pdev);
3487
3488
3489 /* Check if the status block has outstanding completed Rx requests
3490 *
3491 * Parameters:
3492 * pdev - this is the LM device
3493 * sb_idx - this is the index where the status block lies in the array under the lm_device
3494 *
3495 * Return Value:
3496 * result - TRUE in case the status block has new update regarding Rx completion
3497 * FALSE otherwise.
3498 */
3499 u8_t lm_is_rx_completion(lm_device_t *pdev, u8_t chain_idx);
3500
3501 /* Check if the status block has outstanding completed Tx requests
3502 *
3503 * Parameters:
3504 * pdev - this is the LM device
3505 * sb_idx - this is the index where the status block lies in the array under the lm_device
3506 *
3507 * Return Value:
3508 * result - TRUE in case the status block has new update regarding Tx completion
3509 * FALSE otherwise.
3510 */
3511 u8_t lm_is_tx_completion(lm_device_t *pdev, u8_t chain_idx);
3512
3513 /*
3514 * Handle an IGU status-block update.
3515 * Parameters:
3516 * pdev - the LM device
3517 * igu_sb_id - the igu sb id that got the interrupt / MSI-X message
3518 * rx_rss_id / tx_rss_id - matching driver chains
3519 * flags: service_rx / service_tx to know which activity occured
3520 */
3521 u8_t lm_handle_igu_sb_id(lm_device_t *pdev, u8_t igu_sb_id, OUT u8_t *rx_rss_id, OUT u8_t *tx_rss_id);
3522
3523 lm_status_t lm_update_eth_client(IN struct _lm_device_t *pdev,
3524 IN const u8_t cid,
3525 IN const u16_t silent_vlan_value,
3526 IN const u16_t silent_vlan_mask,
3527 IN const u8_t silent_vlan_removal_flg,
3528 IN const u8_t silent_vlan_change_flg
3529 );
3530 lm_status_t lm_establish_eth_con(struct _lm_device_t *pdev, u8_t const cid, u8_t sb_id, u8_t attributes_bitmap);
3531 lm_status_t lm_establish_forward_con(struct _lm_device_t *pdev);
3532 lm_status_t lm_close_forward_con(struct _lm_device_t *pdev);
3533 lm_status_t lm_close_eth_con(struct _lm_device_t *pdev, u32_t const cid,
3534 const u8_t send_halt_ramrod);
3535 lm_status_t lm_terminate_eth_con(struct _lm_device_t *pdev, u32_t const cid);
3536 lm_status_t lm_chip_stop(struct _lm_device_t *pdev);
3537
3538 int lm_set_init_arrs(lm_device_t *pdev);
3539
3540 lm_status_t
3541 lm_empty_ramrod_eth(IN struct _lm_device_t *pdev,
3542 IN const u32_t cid,
3543 IN u32_t data_cid,
3544 IN volatile u32_t *curr_state,
3545 IN u32_t new_state);
3546 /*
3547 * save client connection parameters for a given L2 client
3548 */
3549 lm_status_t
3550 lm_setup_client_con_params( IN struct _lm_device_t *pdev,
3551 IN u8_t const chain_idx,
3552 IN struct _lm_client_con_params_t *cli_params );
3553
3554 lm_status_t
3555 lm_eq_ramrod_post_sync( IN struct _lm_device_t *pdev,
3556 IN u8_t cmd_id,
3557 IN u64_t data,
3558 IN u8_t ramrod_priority,
3559 IN volatile u32_t *p_curr_state,
3560 IN u32_t curr_state,
3561 IN u32_t new_state);
3562
3563 //L2 Client conn, used for iscsi/rdma
3564 /*
3565 * allocate and setup txq, rxq, rcq and set tstrom ram values for L2 client connection of a given client index
3566 */
3567 lm_status_t
3568 lm_init_chain_con( IN struct _lm_device_t *pdev,
3569 IN u8_t const chain_idx,
3570 IN u8_t const b_alloc );
3571
3572 /*
3573 * reset txq, rxq, rcq counters for L2 client connection
3574 */
3575 lm_status_t
3576 lm_clear_eth_con_resc(
3577 IN struct _lm_device_t *pdev,
3578 IN u8_t const cid
3579 );
3580
3581 /*
3582 * clear the status block consumer index in the internal ram for a given status block index
3583 */
3584 lm_status_t
3585 lm_clear_chain_sb_cons_idx(
3586 IN struct _lm_device_t *pdev,
3587 IN u8_t sb_idx,
3588 IN struct _lm_hc_sb_info_t *hc_sb_info,
3589 IN volatile u16_t ** hw_con_idx_ptr
3590 );
3591
3592
3593 u8_t lm_is_eq_completion(lm_device_t *pdev);
3594
3595 /* Does relevant processing in case of attn signals assertion.
3596 * 1)Write '1' into attn_ack to chip(IGU) (do this in parallel for _all_ bits including the fixed 8 hard-wired via the
3597 * set_ack_bit_register
3598 * 2)MASK AEU lines via the mask_attn_func_x register (also in parallel) via GRC - for AEU lower lines 0-7 only!
3599 * 3)Only for the 8 upper fixed hard-wired AEU lines: do their relevant processing, if any.
3600 Finally, drv needs to "clean the attn in the hw block"(e.g. INT_STS_CLR) for them.
3601 *
3602 * Parameters:
3603 * pdev - this is the LM device
3604 * assertion_proc_flgs - attn lines which got asserted
3605 */
3606 void lm_handle_assertion_processing(lm_device_t *pdev, u16_t assertion_proc_flgs);
3607
3608 /* Does relevant processing in case of attn signals deassertion.
3609 * 1) Grab split access lock register of MCP (instead of SW arbiter)
3610 * 2) Read 128bit after inverter via the 4*32regs via GRC.
3611 * 3) For each dynamic group (8 lower bits only!), read the masks which were set aside to find for each group which attn bit is a member and
3612 * needs to be handled. pass all over atten bits belonged to this group and treat them accordingly.
3613 * After an attn signal was handled, drv needs to "clean the attn in the hw block"(e.g. INT_STS_CLR) for that attn bit.
3614 * 4) Release split access lock register of MCP
3615 * 5) Write '0' into attn_ack to chip(IGU) (do this in parallel for _all_ bits, including the fixed 8 hard-wired, via the set_ack_bit_register)
3616 * 6) UNMASK AEU lines via the mask_attn_func_x register (also in parallel) via GRC - for AEU lower lines 0-7 only!
3617 *
3618 * Parameters:
3619 * pdev - this is the LM device
3620 * deassertion_proc_flgs - attn lines which got deasserted
3621 */
3622 void lm_handle_deassertion_processing(lm_device_t *pdev, u16_t deassertion_proc_flgs);
3623
3624 /* Returns the attn_bits and attn_ack fields from the default status block
3625 *
3626 * Parameters:
3627 * pdev - this is the LM device
3628 * attn_bits - OUT param which receives the attn_bits from the atten part of the def sb
3629 * attn_ack - OUT param which receives the attn_ack from the atten part of the def sb
3630 */
3631 void lm_get_attn_info(lm_device_t *pdev, u16_t *attn_bits, u16_t *attn_ack);
3632
3633 /**Genrate a general attention on all functions but this one,
3634 * which causes them to update their link status and CMNG state
3635 * from SHMEM.
3636 *
3637 * @param pdev the LM device
3638 */
3639 void sync_link_status(lm_device_t *pdev);
3640 /**
3641 * @description
3642 * Calculates BW according to current linespeed and MF
3643 * configuration of the function in Mbps.
3644 * @param pdev
3645 * @param link_speed - Port rate in Mbps.
3646 * @param vnic
3647 *
3648 * @return u16
3649 * Return the max BW of the function in Mbps.
3650 */
3651 u16_t
3652 lm_get_max_bw(IN const lm_device_t *pdev,
3653 IN const u32_t link_speed,
3654 IN const u8_t vnic);
3655
3656 /**Update CMNG and link info from SHMEM and configure the
3657 * firmware to the right CMNG values if this device is the PMF.
3658 *
3659 * @note This function must be called under PHY_LOCK
3660 *
3661 * @param pdev the LM device
3662 */
3663 void lm_reload_link_and_cmng(lm_device_t *pdev);
3664
3665 /* Returns the number of toggled bits in a 32 bit integer
3666 * n - integer to count its '1' bits
3667 */
3668 u32_t count_bits(u32_t n);
3669
3670 u32_t LOG2(u32_t v);
3671
3672 /**
3673 * General function that waits for a certain state to change,
3674 * not protocol specific. It takes into account vbd-commander
3675 * and reset-is-in-progress
3676 *
3677 * @param pdev
3678 * @param curr_state -> what to poll on
3679 * @param new_state -> what we're waiting for
3680 *
3681 * @return lm_status_t TIMEOUT if state didn't change, SUCCESS
3682 * otherwise
3683 */
3684 lm_status_t lm_wait_state_change(struct _lm_device_t *pdev, volatile u32_t * curr_state, u32_t new_state);
3685
3686 /* copy the new values of the status block prod_index for each strom into the local copy we hold in the lm_device
3687 *
3688 * Parameters:
3689 * pdev - this is the LM device
3690 * sb_idx - this is the index where the status block lies in the array under the lm_device
3691 */
3692 void lm_update_fp_hc_indices(lm_device_t *pdev, u8_t igu_sb_id, u32_t *activity_flg, u8_t *drv_rss_id);
3693 void lm_update_def_hc_indices(lm_device_t *pdev, u8_t sb_id, u32_t *activity_flg);
3694
3695 void lm_57710A0_dbg_intr( struct _lm_device_t * pdev );
3696
3697 /* mdio access functions*/
3698 lm_status_t
3699 lm_mwrite(
3700 lm_device_t *pdev,
3701 u32_t reg,
3702 u32_t val);
3703
3704 lm_status_t
3705 lm_mread(
3706 lm_device_t *pdev,
3707 u32_t reg,
3708 u32_t *ret_val);
3709
3710 lm_status_t
3711 lm_m45write(
3712 lm_device_t *pdev,
3713 u32_t reg,
3714 u32_t addr,
3715 u32_t val);
3716
3717 lm_status_t
3718 lm_m45read(
3719 lm_device_t *pdev,
3720 u32_t reg,
3721 u32_t addr,
3722 u32_t *ret_val);
3723
3724 lm_status_t
3725 lm_phy45_read(
3726 lm_device_t *pdev,
3727 u8_t phy_addr,
3728 u8_t dev_addr,
3729 u16_t reg, // offset
3730 u16_t *ret_val);
3731
3732 lm_status_t
3733 lm_phy45_write(
3734 lm_device_t *pdev,
3735 u8_t phy_addr,
3736 u8_t dev_addr,
3737 u16_t reg, // offset
3738 u16_t val);
3739
3740 lm_status_t
3741 lm_set_phy_addr(
3742 lm_device_t *pdev,
3743 u8_t addr);
3744
3745 void
3746 lm_reset_link(lm_device_t *pdev);
3747
3748 u32_t
3749 lm_nvram_query(
3750 lm_device_t *pdev,
3751 u8_t reset_flash_block,
3752 u8_t no_hw_mod);
3753
3754 void
3755 lm_nvram_init(
3756 lm_device_t *pdev,
3757 u8_t reset_flash_block);
3758
3759 lm_status_t
3760 lm_nvram_read(
3761 lm_device_t *pdev,
3762 u32_t offset,
3763 u32_t *ret_buf,
3764 u32_t buf_size); /* Must be a multiple of 4. */
3765
3766 lm_status_t
3767 lm_nvram_write(
3768 lm_device_t *pdev,
3769 u32_t offset,
3770 u32_t *data_buf,
3771 u32_t buf_size); /* Must be a multiple of 4. */
3772
3773 void
3774 lm_reg_rd_ind(
3775 lm_device_t *pdev,
3776 u32_t offset,
3777 u32_t *ret);
3778
3779 void
3780 lm_reg_wr_ind(
3781 lm_device_t *pdev,
3782 u32_t offset,
3783 u32_t val);
3784
3785 void
3786 lm_reg_rd_ind_imp(
3787 lm_device_t *pdev,
3788 u32_t offset,
3789 u32_t *ret);
3790
3791 void
3792 lm_reg_wr_ind_imp(
3793 lm_device_t *pdev,
3794 u32_t offset,
3795 u32_t val);
3796
3797 lm_status_t
3798 lm_init_mac_link(
3799 lm_device_t *pdev);
3800
3801 //TODO check if we need that when MCP ready
3802 u8_t
3803 fw_reset_sync(
3804 lm_device_t *pdev,
3805 lm_reason_t reason,
3806 u32_t msg_data,
3807 u32_t fw_ack_timeout_us); /* timeout in microseconds. */
3808
3809 // mcp interface
3810 lm_status_t
3811 lm_mcp_submit_cmd(
3812 lm_device_t *pdev,
3813 u32_t drv_msg);
3814
3815 lm_status_t
3816 lm_mcp_get_resp(
3817 lm_device_t *pdev);
3818
3819
3820 lm_coalesce_buffer_t *
3821 lm_get_coalesce_buffer(
3822 IN lm_device_t *pdev,
3823 IN lm_tx_chain_t *txq,
3824 IN u32_t buf_size);
3825
3826
3827 void
3828 lm_put_coalesce_buffer(
3829 IN lm_device_t *pdev,
3830 IN lm_tx_chain_t *txq,
3831 IN lm_coalesce_buffer_t *coalesce_buf);
3832
3833 void lm_reset_device_if_undi_active(
3834 IN struct _lm_device_t *pdev);
3835
3836 void
3837 lm_cmng_init(
3838 struct _lm_device_t *pdev,
3839 u32_t port_rate);
3840
3841 lm_status_t lm_get_pcicfg_mps_mrrs(lm_device_t * pdev);
3842
3843 void lm_set_pcie_nfe_report( lm_device_t *pdev);
3844
3845
3846 void lm_clear_non_def_status_block(struct _lm_device_t *pdev,
3847 u8_t sb_id);
3848
3849 void lm_init_non_def_status_block(struct _lm_device_t *pdev,
3850 u8_t sb_id,
3851 u8_t port);
3852
3853 void lm_eth_init_command_comp(struct _lm_device_t *pdev, struct common_ramrod_eth_rx_cqe *cqe);
3854
3855 u8_t lm_is_nig_reset_called(struct _lm_device_t *pdev);
3856 void lm_clear_nig_reset_called(struct _lm_device_t *pdev);
3857
3858 void lm_setup_fan_failure_detection(struct _lm_device_t *pdev);
3859 void enable_blocks_attention(struct _lm_device_t *pdev);
3860 u32_t lm_inc_cnt_grc_timeout_ignore(struct _lm_device_t *pdev, u32_t val);
3861
3862 //acquire split MCP access lock register
3863 lm_status_t acquire_split_alr(lm_device_t *pdev);
3864 //Release split MCP access lock register
3865 void release_split_alr(lm_device_t *pdev);
3866
3867 /*******************************************************************************
3868 * Description:
3869 *
3870 * Return:
3871 ******************************************************************************/
3872
3873 #ifdef __BIG_ENDIAN
3874 #define CHANGE_ENDIANITY TRUE
3875 #else
3876 #define CHANGE_ENDIANITY FALSE
3877 #endif
3878
3879 // do not call this macro directly from the code!
3880 #define REG_WR_DMAE_LEN_IMP(_pdev,_reg_offset, _addr_src, _b_src_is_zeroed, _len, le32_swap) lm_dmae_reg_wr(_pdev, \
3881 lm_dmae_get(_pdev, LM_DMAE_DEFAULT)->context, \
3882 (void*)_addr_src, \
3883 _reg_offset,\
3884 (u16_t)_len,\
3885 _b_src_is_zeroed,\
3886 le32_swap)
3887
3888 // do not call this macro directly from the code!
3889 #define REG_RD_DMAE_LEN_IMP(_pdev,_reg_offset, _addr_dst, _len) lm_dmae_reg_rd( _pdev, \
3890 lm_dmae_get(_pdev, LM_DMAE_DEFAULT)->context, \
3891 _reg_offset, \
3892 _addr_dst,\
3893 _len,\
3894 FALSE)
3895
3896 // Macro for writing a buffer to destination address using DMAE when data given is in VIRTUAL ADDRESS,
3897 #define VIRT_WR_DMAE_LEN(_pdev, _src_addr, _dst_addr, _len, le32_swap) REG_WR_DMAE_LEN_IMP(_pdev, _dst_addr, _src_addr, FALSE, _len, le32_swap)
3898
3899 // Macro for writing a buffer to destination address using DMAE when data given is in PHYSICAL ADDRESS,
3900 #define PHYS_WR_DMAE_LEN(_pdev, _src_addr, _dst_addr, _len) lm_dmae_reg_wr_phys( _pdev, \
3901 lm_dmae_get(_pdev, LM_DMAE_DEFAULT)->context, \
3902 _src_addr, \
3903 _dst_addr,\
3904 (u16_t)_len)
3905
3906 // Macro for copying physical buffer using DMAE,
3907 #define PHYS_COPY_DMAE_LEN(_pdev, _src_addr, _dst_addr, _len) lm_dmae_copy_phys_buffer_unsafe( _pdev,\
3908 lm_dmae_get(_pdev, LM_DMAE_TOE)->context,\
3909 _src_addr,\
3910 _dst_addr,\
3911 (u16_t)_len)
3912 // write a buffer to destination address using DMAE
3913 #define REG_WR_DMAE_LEN(_pdev,_reg_offset, _addr_src, _len) REG_WR_DMAE_LEN_IMP(_pdev, _reg_offset, _addr_src, FALSE, _len, FALSE)
3914
3915 // read from a buffer to destination address using DMAE
3916 #define REG_RD_DMAE_LEN(_pdev,_reg_offset, _addr_dst, _len) REG_RD_DMAE_LEN_IMP(_pdev,_reg_offset, _addr_dst, _len)
3917
3918 // write a zeroed buffer to destination address using DMAE
3919 #define REG_WR_DMAE_LEN_ZERO(_pdev,_reg_offset, _len) REG_WR_DMAE_LEN_IMP(_pdev,_reg_offset, pdev->vars.zero_buffer, TRUE, _len, FALSE)
3920
3921 // Write to regiters, value of length 64 bit
3922 #define REG_WR_DMAE(_pdev,_reg_offset, _addr_src ) REG_WR_DMAE_LEN(_pdev,_reg_offset, _addr_src, 2)
3923
3924 // Read from regiters, value of length 64 bit
3925 #define REG_RD_DMAE(_pdev,_reg_offset, _addr_dst ) REG_RD_DMAE_LEN(_pdev,_reg_offset, _addr_dst, 2)
3926
3927
3928
3929
3930 /* Indirect register access. */
3931 #define REG_RD_IND(_pdev, _reg_offset, _ret) lm_reg_rd_ind(_pdev, (_reg_offset), _ret)
3932 #define REG_WR_IND(_pdev, _reg_offset, _val) lm_reg_wr_ind(_pdev, (_reg_offset), _val)
3933
3934 #ifndef __LINUX
3935 /* BAR write32 via register address */
3936 #define LM_BAR_WR32_ADDRESS(_pdev, _address, _val) \
3937 *((u32_t volatile *) (_address))=(_val); \
3938 mm_write_barrier()
3939 #else
3940 /* BAR write32 via register address */
3941 #define LM_BAR_WR32_ADDRESS(_pdev, _address, _val) \
3942 mm_io_write_dword(_pdev, _address, _val)
3943 #endif
3944
3945
3946 #if !(defined(UEFI) || defined(__SunOS) || defined(__LINUX)) || defined(__SunOS_MDB)
3947
3948 #ifdef _VBD_CMD_
3949 void vbd_cmd_on_bar_access(lm_device_t* pdev, u8_t bar, u32_t offset);
3950 #define VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset) vbd_cmd_on_bar_access(_pdev, _bar, _offset);
3951 #else
3952 #define VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)
3953 #endif
3954
3955
3956 /* BAR read8 via register offset and specific bar */
3957 #define LM_BAR_RD8_OFFSET(_pdev, _bar, _offset, _ret) \
3958 do { \
3959 mm_read_barrier(); \
3960 VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
3961 *(_ret) = *((u8_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)] + (_offset))); \
3962 } while (0)
3963 /* BAR read16 via register offset and specific bar */
3964 #define LM_BAR_RD16_OFFSET(_pdev, _bar, _offset, _ret) \
3965 do { \
3966 mm_read_barrier(); \
3967 VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
3968 *(_ret) = *((u16_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset))); \
3969 } while (0)
3970
3971 /* BAR read32 via register offset and specific bar */
3972 #define LM_BAR_RD32_OFFSET(_pdev, _bar, _offset, _ret) \
3973 do { \
3974 mm_read_barrier(); \
3975 VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
3976 *(_ret) = *((u32_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset))); \
3977 } while (0)
3978
3979 /* BAR read64 via register offset and specific bar */
3980 #define LM_BAR_RD64_OFFSET(_pdev, _bar, _offset, _ret) \
3981 do { \
3982 mm_read_barrier(); \
3983 VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
3984 *(_ret) = *((u64_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset))); \
3985 } while (0)
3986
3987 /* BAR write8 via register offset and specific bar */
3988 #define LM_BAR_WR8_OFFSET(_pdev, _bar, _offset, _val) \
3989 do { \
3990 VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
3991 *((u8_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset)))=(_val); \
3992 mm_write_barrier(); \
3993 } while (0)
3994
3995 /* BAR write16 via register offset and specific bar */
3996 #define LM_BAR_WR16_OFFSET(_pdev, _bar, _offset, _val) \
3997 do { \
3998 VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
3999 *((u16_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset)))=(_val); \
4000 mm_write_barrier(); \
4001 } while (0)
4002
4003 /* BAR write32 via register offset and specific bar */
4004 #define LM_BAR_WR32_OFFSET(_pdev, _bar, _offset, _val) \
4005 do { \
4006 VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
4007 *((u32_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset)))=(_val); \
4008 mm_write_barrier(); \
4009 } while (0)
4010
4011 /* BAR write64 via register offset and specific bar */
4012 #define LM_BAR_WR64_OFFSET(_pdev, _bar, _offset, _val) \
4013 do { \
4014 VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
4015 *((u64_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset)))=(_val); \
4016 mm_write_barrier(); \
4017 } while (0)
4018
4019 /* BAR copy buffer to specific bar address */
4020 #define LM_BAR_COPY_BUFFER(_pdev, _bar, _offset, _size_, _buf_ptr) \
4021 do { \
4022 u32_t i; \
4023 for (i=0; i<size; i++) { \
4024 VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, (_offset+i*4) )\
4025 *((u32_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset)+i*4))=*(buf_ptr+i); \
4026 } \
4027 } while (0)
4028
4029 #else
4030 #define LM_BAR_RD8_OFFSET(_pdev, _bar, _offset, _ret) \
4031 mm_bar_read_byte(_pdev, _bar, _offset, _ret)
4032 #define LM_BAR_RD16_OFFSET(_pdev, _bar, _offset, _ret) \
4033 mm_bar_read_word(_pdev, _bar, _offset, _ret)
4034 #define LM_BAR_RD32_OFFSET(_pdev, _bar, _offset, _ret) \
4035 mm_bar_read_dword(_pdev, _bar, _offset, _ret)
4036 #define LM_BAR_RD64_OFFSET(_pdev, _bar, _offset, _ret) \
4037 mm_bar_read_ddword(_pdev, _bar, _offset, _ret)
4038 #define LM_BAR_WR8_OFFSET(_pdev, _bar, _offset, _val) \
4039 mm_bar_write_byte(_pdev, _bar, _offset, _val)
4040 #define LM_BAR_WR16_OFFSET(_pdev, _bar, _offset, _val) \
4041 mm_bar_write_word(_pdev, _bar, _offset, _val)
4042 #define LM_BAR_WR32_OFFSET(_pdev, _bar, _offset, _val) \
4043 mm_bar_write_dword(_pdev, _bar, _offset, _val)
4044 #define LM_BAR_WR64_OFFSET(_pdev, _bar, _offset, _val) \
4045 mm_bar_write_ddword(_pdev, _bar, _offset, _val)
4046 #define LM_BAR_COPY_BUFFER(_pdev, _bar, _offset, _size, _buf_ptr) \
4047 mm_bar_copy_buffer(_pdev, _bar, _offset, _size, _buf_ptr)
4048 #endif
4049
4050 #ifndef USER_LINUX
4051
4052 #if DBG && LOG_REG_ACCESS
4053
4054 #define LOG_REG_RD(_pdev, _offset, _val) \
4055 if((_pdev)->params.test_mode & TEST_MODE_LOG_REG_ACCESS) \
4056 { \
4057 DbgMessage(_pdev, INFORM, "rd 0x%04x = 0x%08x\n", _offset, _val); \
4058 }
4059
4060 #define LOG_REG_WR(_pdev, _offset, _val) \
4061 if((_pdev)->params.test_mode & TEST_MODE_LOG_REG_ACCESS) \
4062 { \
4063 DbgMessage(_pdev, INFORM, "wr 0x%04x 0x%08x\n", _offset, _val); \
4064 }
4065
4066 #else
4067
4068 #define LOG_REG_RD(_pdev, _offset, _val)
4069 #define LOG_REG_WR(_pdev, _offset, _val)
4070
4071 #endif /* DBG */
4072
4073 #endif /* USER_LINUX */
4074
4075 #if defined(__SunOS)
4076
4077 #ifdef __SunOS_MDB
4078
4079 /* Solaris debugger (MDB) doesn't have access to ddi_get/put routines */
4080
_reg_rd(struct _lm_device_t * pdev,u32_t reg_offset)4081 static __inline u32_t _reg_rd(struct _lm_device_t * pdev, u32_t reg_offset)
4082 {
4083 u32_t val;
4084 LM_BAR_RD32_OFFSET(pdev, BAR_0, reg_offset, &val);
4085 return val;
4086 }
4087
4088 #define REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
4089 #define VF_REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
4090
4091 #define REG_WR(_pdev, _reg_offset, _val) \
4092 do { \
4093 LOG_REG_WR(_pdev, (u32_t)(_reg_offset), _val); \
4094 LM_BAR_WR32_OFFSET(_pdev, BAR_0, _reg_offset, _val); \
4095 } while (0)
4096
4097 #define VF_REG_WR(_pdev, _reg_offset, _val) REG_WR(_pdev, _reg_offset, _val)
4098
4099 #else /* __SunOS && !__SunOS_MDB */
4100
4101 #define REG_RD(_pdev, _reg_offset) \
4102 ddi_get32((_pdev)->vars.reg_handle[BAR_0], \
4103 (uint32_t *)((caddr_t)(_pdev)->vars.mapped_bar_addr[BAR_0] + \
4104 (_reg_offset)))
4105
4106 #define REG_WR(_pdev, _reg_offset, _val) \
4107 ddi_put32((_pdev)->vars.reg_handle[BAR_0], \
4108 (uint32_t *)((caddr_t)(_pdev)->vars.mapped_bar_addr[BAR_0] + \
4109 (_reg_offset)), \
4110 (_val)) \
4111
4112 #define VF_REG_RD(_pdev, _reg_offset) \
4113 ddi_get32((_pdev)->vars.reg_handle[BAR_0], \
4114 (uint32_t *)((caddr_t)(_pdev)->vars.mapped_bar_addr[BAR_0] + \
4115 (_reg_offset)))
4116
4117 #define VF_REG_WR(_pdev, _reg_offset, _val) \
4118 ddi_put32((_pdev)->vars.reg_handle[BAR_0], \
4119 (uint32_t *)((caddr_t)(_pdev)->vars.mapped_bar_addr[BAR_0] + \
4120 (_reg_offset)), \
4121 (_val))
4122
4123 #endif /* __SunOS_MDB */
4124
4125 #elif defined (_VBD_CMD_)
4126
4127 //we repeat this function's signature here because including everest_sim.h leads to a circular dependency.
4128 void vbd_cmd_on_reg_write(lm_device_t* pdev, u32_t offset);
4129
_reg_rd(struct _lm_device_t * pdev,u32_t reg_offset)4130 static __inline u32_t _reg_rd(struct _lm_device_t * pdev, u32_t reg_offset)
4131 {
4132 u32_t val;
4133 DbgBreakIf(IS_VFDEV(pdev));
4134 LM_BAR_RD32_OFFSET(pdev, BAR_0, reg_offset, &val);
4135 LOG_REG_RD(pdev, (reg_offset), val);
4136 return val;
4137 }
4138
4139 /* Register access via register name. Macro returns a value */
4140 #define REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
4141
_vf_reg_rd(struct _lm_device_t * pdev,u32_t reg_offset)4142 static __inline u32_t _vf_reg_rd(struct _lm_device_t * pdev, u32_t reg_offset)
4143 {
4144 u32_t val;
4145 LM_BAR_RD32_OFFSET(pdev, BAR_0, reg_offset, &val);
4146 LOG_REG_RD(pdev, (reg_offset), val);
4147 return val;
4148 }
4149
4150 #define VF_REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
4151
4152 // Offset passed to LOG_REG_WR is now without the bar address!
4153 #define REG_WR(_pdev, _reg_offset, _val) \
4154 do { \
4155 DbgBreakIf(IS_VFDEV(_pdev)); \
4156 LOG_REG_WR(_pdev, (u32_t)(_reg_offset), _val); \
4157 LM_BAR_WR32_OFFSET(_pdev, BAR_0, _reg_offset, _val); \
4158 vbd_cmd_on_reg_write(_pdev, _reg_offset);\
4159 } while (0)
4160
4161 #define VF_REG_WR(_pdev, _reg_offset, _val) \
4162 do { \
4163 LOG_REG_WR(_pdev, (u32_t)(_reg_offset), _val); \
4164 LM_BAR_WR32_OFFSET(_pdev, BAR_0, _reg_offset, _val); \
4165 vbd_cmd_on_reg_write(_pdev, _reg_offset);\
4166 } while (0)
4167
4168
4169 #elif !defined(USER_LINUX)
4170
_reg_rd(struct _lm_device_t * pdev,u32_t reg_offset)4171 static __inline u32_t _reg_rd(struct _lm_device_t * pdev, u32_t reg_offset)
4172 {
4173 u32_t val;
4174 DbgBreakIf(IS_VFDEV(pdev));
4175 LM_BAR_RD32_OFFSET(pdev, BAR_0, reg_offset, &val);
4176 LOG_REG_RD(pdev, (reg_offset), val);
4177 return val;
4178 }
4179
4180 /* Register access via register name. Macro returns a value */
4181 #define REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
4182
_vf_reg_rd(struct _lm_device_t * pdev,u32_t reg_offset)4183 static __inline u32_t _vf_reg_rd(struct _lm_device_t * pdev, u32_t reg_offset)
4184 {
4185 u32_t val;
4186 LM_BAR_RD32_OFFSET(pdev, BAR_0, reg_offset, &val);
4187 LOG_REG_RD(pdev, (reg_offset), val);
4188 return val;
4189 }
4190
4191 #define VF_REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
4192
4193 // Offset passed to LOG_REG_WR is now without the bar address!
4194 #define REG_WR(_pdev, _reg_offset, _val) \
4195 do { \
4196 DbgBreakIf(IS_VFDEV(_pdev)); \
4197 LOG_REG_WR(_pdev, (u32_t)(_reg_offset), _val); \
4198 LM_BAR_WR32_OFFSET(_pdev, BAR_0, _reg_offset, _val); \
4199 } while (0)
4200
4201 #define VF_REG_WR(_pdev, _reg_offset, _val) \
4202 do { \
4203 LOG_REG_WR(_pdev, (u32_t)(_reg_offset), _val); \
4204 LM_BAR_WR32_OFFSET(_pdev, BAR_0, _reg_offset, _val); \
4205 } while (0)
4206
4207 #endif /* USER_LINUX */
4208
4209 /* TBA: optionally add LOG_REG_WR as in Teton to write 8/16/32*/
4210
4211 // special macros for reading from shared memory
4212
4213 /* TBD - E1H: all shmen read/write operations currenly use FUNC_ID for offset calculatio. This may not be right! MCP TBD*/
4214 #define LM_SHMEM_READ_IMP(_pdev,_offset,_ret,_shmem_base_name) \
4215 LM_BAR_RD32_OFFSET((_pdev),BAR_0,(_pdev)->hw_info._shmem_base_name + _offset,(_ret));
4216
4217 #define LM_SHMEM_READ(_pdev,_offset,_ret) LM_SHMEM_READ_IMP(_pdev,_offset,_ret, shmem_base );
4218 #define LM_SHMEM2_READ(_pdev,_offset,_ret) LM_SHMEM_READ_IMP(_pdev,_offset,_ret, shmem_base2 );
4219 #define LM_MFCFG_READ(_pdev,_offset,_ret) LM_SHMEM_READ_IMP(_pdev,_offset,_ret, mf_cfg_base );
4220
4221 #define LM_SHMEM_WRITE_IMP(_pdev,_offset,_val,_shmem_base_name) \
4222 LM_BAR_WR32_OFFSET((_pdev),BAR_0,(_pdev)->hw_info._shmem_base_name + _offset,(_val));
4223
4224 #define LM_SHMEM_WRITE(_pdev,_offset,_val) LM_SHMEM_WRITE_IMP(_pdev,_offset,_val,shmem_base);
4225 #define LM_SHMEM2_WRITE(_pdev,_offset,_val) LM_SHMEM_WRITE_IMP(_pdev,_offset,_val,shmem_base2);
4226 #define LM_MFCFG_WRITE(_pdev,_offset,_val) LM_SHMEM_WRITE_IMP(_pdev,_offset,_val,mf_cfg_base);
4227
4228 #define LM_SHMEM2_ADDR(_pdev, field) (_pdev->hw_info.shmem_base2 + OFFSETOF(struct shmem2_region, field))
4229 #define LM_SHMEM2_HAS(_pdev, field) ((_pdev)->hw_info.shmem_base2 && \
4230 (REG_RD(_pdev, LM_SHMEM2_ADDR(_pdev, size)) > OFFSETOF(struct shmem2_region, field)))
4231
4232
4233 /* Macros for read/write to internal memory of storms */
4234 #define LM_INTMEM_READ8(_pdev,_offset,_ret,_type) \
4235 DbgMessage(pdev, INFORMi, "LM_INTMEM_READ8() inside! storm:%s address:0x%x\n",#_type,_type); \
4236 LM_BAR_RD8_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_ret));
4237
4238 #define LM_INTMEM_WRITE8(_pdev,_offset,_val,_type) \
4239 DbgMessage(pdev, INFORMi, "LM_INTMEM_WRITE8() inside! storm:%s address:0x%x\n",#_type,_type); \
4240 LM_BAR_WR8_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_val));
4241
4242 #define LM_INTMEM_READ16(_pdev,_offset,_ret,_type) \
4243 DbgMessage(pdev, INFORMi, "LM_INTMEM_READ16() inside! storm:%s address:0x%x\n",#_type,_type); \
4244 LM_BAR_RD16_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_ret));
4245
4246 #define LM_INTMEM_WRITE16(_pdev,_offset,_val,_type) \
4247 DbgMessage(pdev, INFORMi, "LM_INTMEM_WRITE16() inside! storm:%s address:0x%x offset=%x val=%x\n",#_type,_type, _offset, _val); \
4248 LM_BAR_WR16_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_val));
4249
4250 #define LM_INTMEM_READ32(_pdev,_offset,_ret,_type) \
4251 DbgMessage(pdev, INFORMi, "LM_INTMEM_READ32() inside! storm:%s address:0x%x\n",#_type,_type); \
4252 LM_BAR_RD32_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_ret));
4253
4254 #define LM_INTMEM_WRITE32(_pdev,_offset,_val,_type) \
4255 DbgMessage(pdev, INFORMi, "LM_INTMEM_WRITE32() inside! storm:%s address:0x%x\n",#_type,_type); \
4256 LM_BAR_WR32_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_val));
4257
4258 #define LM_INTMEM_READ64(_pdev,_offset,_ret,_type) \
4259 DbgMessage(pdev, INFORMi, "LM_INTMEM_READ64() inside! storm:%s address:0x%x\n",#_type,_type); \
4260 LM_BAR_RD64_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_ret));
4261
4262 #define LM_INTMEM_WRITE64(_pdev,_offset,_val,_type) \
4263 DbgMessage(pdev, INFORMi, "LM_INTMEM_WRITE64() inside! storm:%s address:0x%x\n",#_type,_type); \
4264 LM_BAR_WR64_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_val));
4265 //________________________________________________________________________________
4266
4267
4268 #define DEFAULT_WAIT_INTERVAL_MICSEC 30 // wait interval microseconds
4269
4270 u32_t reg_wait_verify_val(struct _lm_device_t * pdev, u32_t reg_offset, u32_t excpected_val, u32_t total_wait_time_ms );
4271 #if !defined(_VBD_CMD_)
4272 #define REG_WAIT_VERIFY_VAL(_pdev, _reg_offset, _excpected_val, _total_wait_time_ms ) \
4273 reg_wait_verify_val(_pdev, _reg_offset, _excpected_val, _total_wait_time_ms );
4274 #else
4275 /* For VBD_CMD: we don't verify values written... */
4276 #define REG_WAIT_VERIFY_VAL(_pdev, _reg_offset, _excpected_val, _total_wait_time_ms ) 0
4277 #endif
4278
4279 #define DPM_TRIGER_TYPE 0x40
4280
4281 #if defined(EMULATION_DOORBELL_FULL_WORKAROUND)
4282 #define _DOORBELL(PDEV,CID,VAL) do{\
4283 MM_WRITE_DOORBELL(PDEV,BAR_1,CID,VAL);\
4284 } while(0)
4285
DOORBELL(lm_device_t * pdev,u32_t cid,u32_t val)4286 static __inline void DOORBELL(lm_device_t *pdev, u32_t cid, u32_t val)
4287 {
4288 u32_t db_fill;
4289 u32_t wait_cnt = 0;
4290
4291 if (CHIP_REV_IS_EMUL(pdev) || CHIP_REV_IS_FPGA(pdev)) {
4292 lm_device_t *pf_dev = pdev->pf_dev;
4293 if (!pf_dev) {
4294 pf_dev = pdev;
4295 }
4296 /* wait while doorbells are blocked */
4297 while(pdev->vars.doorbells_blocked) {
4298 wait_cnt++; /* counter required to avoid Watcom warning */
4299 }
4300
4301 if(mm_atomic_dec(&pdev->vars.doorbells_cnt) == 0) {
4302
4303 mm_atomic_set(&pdev->vars.doorbells_cnt, DOORBELL_CHECK_FREQUENCY);
4304
4305 db_fill=REG_RD(pf_dev,DORQ_REG_DQ_FILL_LVLF);
4306
4307 if (db_fill > ALLOWED_DOORBELLS_HIGH_WM) {
4308
4309 DbgMessage(pdev, WARN,
4310 "EMULATION_DOORBELL_FULL_WORKAROUND: db_fill=%d, doorbell in busy wait!\n",
4311 db_fill);
4312
4313 /* block additional doorbells */
4314 pdev->vars.doorbells_blocked = 1;
4315
4316 /* busy wait for doorbell capacity */
4317
4318 do {
4319 db_fill=REG_RD(pf_dev,DORQ_REG_DQ_FILL_LVLF);
4320 if (db_fill == 0xffffffff) {
4321 DbgMessage(pdev, FATAL, "DOORBELL: fill level 0xffffffff\n");
4322 break;
4323 }
4324 } while (db_fill > ALLOWED_DOORBELLS_LOW_WM);
4325
4326 /* incr statistics */
4327 pdev->vars.doorbells_high_wm_reached++;
4328
4329 /* unblock additional doorbells */
4330 pdev->vars.doorbells_blocked = 0;
4331 }
4332 }
4333 }
4334
4335 _DOORBELL(pdev,cid,val);
4336 }
4337
4338 #else
4339
4340 // need to change LM_PAGE_SIZE to OS page size + when we will have 2 bars BAR_DOORBELL_OFFSET is not needed.
4341 #define DOORBELL(PDEV,CID,VAL) do{\
4342 MM_WRITE_DOORBELL(PDEV,BAR_1,CID,VAL);\
4343 } while(0)
4344
4345 #endif /* defined(EMULATION_DOORBELL_FULL_WORKAROUND) */
4346
4347
4348 #define HW_CID(pdev,x) (x |(PORT_ID(pdev) << 23 | VNIC_ID(pdev) << 17))
4349 // used on a CID received from the HW - ignore bits 17, 18 and 23 (though 19-22 can be ignored as well)
4350 #define SW_CID(x) (x & COMMON_RAMROD_ETH_RX_CQE_CID & ~0x860000)
4351
4352
4353 u64_t lm_get_timestamp_of_recent_cid_recycling(struct _lm_device_t *pdev);
4354
lm_sb_id_from_chain(struct _lm_device_t * pdev,u32_t chain_idx)4355 static u8_t __inline lm_sb_id_from_chain(struct _lm_device_t *pdev, u32_t chain_idx)
4356 {
4357 u8_t sb_id = 0 ;
4358
4359 if (CHAIN_TO_RSS_ID(pdev,(u32_t)chain_idx) >= LM_SB_CNT(pdev)) //LM_MAX_RSS_CHAINS(pdev))
4360 {
4361 /* mapping iscsi / fcoe cids to the default status block */
4362 sb_id = DEF_STATUS_BLOCK_INDEX;
4363 }
4364 else
4365 {
4366 sb_id = (u8_t)RSS_ID_TO_SB_ID(CHAIN_TO_RSS_ID(pdev,(u32_t)chain_idx));
4367 }
4368 return sb_id;
4369 }
lm_set_virt_mode(struct _lm_device_t * pdev,u8_t device_type,u8_t virtualization_type)4370 static void __inline lm_set_virt_mode(struct _lm_device_t *pdev, u8_t device_type, u8_t virtualization_type)
4371 {
4372 if (CHK_NULL(pdev))
4373 {
4374 DbgBreakMsg("lm_set_virt_mode pdev is null");
4375 return;
4376 }
4377
4378 if ((pdev->params.device_type == DEVICE_TYPE_PF) && (pdev->params.virtualization_type == VT_NONE)) {
4379 switch (device_type) {
4380 case DEVICE_TYPE_PF:
4381 pdev->params.device_type = device_type;
4382 switch (virtualization_type) {
4383 case VT_NONE:
4384 break;
4385 case VT_BASIC_VF:
4386 case VT_CHANNEL_VF:
4387 case VT_ASSIGNED_TO_VM_PF:
4388 pdev->params.virtualization_type = virtualization_type;
4389 break;
4390 default:
4391 DbgMessage(pdev, FATAL, "Master PF mode %d is not supported in virt.mode\n",virtualization_type);
4392 DbgBreak();
4393 break;
4394 }
4395 break;
4396 case DEVICE_TYPE_VF:
4397 pdev->params.device_type = device_type;
4398 switch (virtualization_type) {
4399 case VT_BASIC_VF:
4400 case VT_CHANNEL_VF:
4401 pdev->params.virtualization_type = virtualization_type;
4402 break;
4403 case VT_NONE:
4404 DbgMessage(pdev, FATAL, "VF mode is mandatory parameter\n");
4405 DbgBreak();
4406 break;
4407 default:
4408 DbgMessage(pdev, FATAL, "VF mode %d is not supported\n",virtualization_type);
4409 DbgBreak();
4410 break;
4411 }
4412 break;
4413 default:
4414 DbgMessage(pdev, FATAL, "Device type %d is not supported in virt.mode\n",device_type);
4415 DbgBreak();
4416 }
4417 } else {
4418 DbgMessage(pdev, FATAL, "Virt.mode is set already (%d,%d)\n",device_type,virtualization_type);
4419 }
4420 DbgMessage(pdev, WARN, "Virt.mode is set as (%d,%d)\n", pdev->params.device_type, pdev->params.virtualization_type);
4421 }
4422
lm_set_virt_channel_type(struct _lm_device_t * pdev,u8_t channel_type)4423 static void __inline lm_set_virt_channel_type(struct _lm_device_t *pdev, u8_t channel_type)
4424 {
4425 if (CHK_NULL(pdev))
4426 {
4427 DbgBreakMsg("lm_set_virt_channel_type pdev is null");
4428 return;
4429 }
4430 switch (channel_type) {
4431 case VT_HW_CHANNEL_TYPE:
4432 case VT_SW_CHANNEL_TYPE:
4433 break;
4434 default:
4435 DbgMessage(pdev, WARN, "Unknown channel type (%d)\n", channel_type);
4436 DbgBreak();
4437 channel_type = VT_HW_CHANNEL_TYPE;
4438 }
4439 pdev->params.channel_type = channel_type;
4440 DbgMessage(pdev, WARN, "Channel type is set as (%d)\n", pdev->params.channel_type);
4441 }
4442
lm_reset_virt_mode(struct _lm_device_t * pdev)4443 static void __inline lm_reset_virt_mode(struct _lm_device_t *pdev)
4444 {
4445 if (CHK_NULL(pdev))
4446 {
4447 DbgBreakMsg("lm_reset_virt_mode pdev is null");
4448 return;
4449 }
4450 if (pdev->params.device_type == DEVICE_TYPE_PF) {
4451 pdev->params.device_type = DEVICE_TYPE_PF;
4452 pdev->params.virtualization_type = VT_NONE;
4453 DbgMessage(pdev, FATAL, "Vrtualization mode is reset to simple PF\n");
4454 } else {
4455 DbgMessage(pdev, FATAL, "Virtualization mode reset is is valid only for PF\n");
4456 }
4457 }
4458
4459 u32_t lm_get_num_of_cashed_grq_bds(struct _lm_device_t *pdev);
4460 void lm_set_waitp(lm_device_t *pdev);
4461 u8_t lm_get_port_id_from_func_abs( const u32_t chip_num, const lm_chip_port_mode_t lm_chip_port_mode, const u8_t abs_func );
4462 u8_t lm_get_abs_func_vector( const u32_t chip_num, const lm_chip_port_mode_t chip_port_mode, const u8_t b_multi_vnics_mode, const u8_t path_id );
4463 u8_t lm_check_if_pf_assigned_to_vm(struct _lm_device_t *pdev);
4464 u8_t lm_is_fw_version_valid(struct _lm_device_t *pdev);
4465 lm_status_t lm_set_cli_drv_ver_to_shmem(struct _lm_device_t *lmdev);
4466
4467 #ifdef VF_INVOLVED
4468 lm_vf_info_t * lm_pf_find_vf_info_by_rel_id(struct _lm_device_t *pdev, u16_t relative_vf_id);
4469 lm_vf_info_t * lm_pf_find_vf_info_by_abs_id(struct _lm_device_t *pdev, u8_t abs_vf_id);
4470 lm_status_t lm_pf_download_standard_request(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, void* virt_buffer, u32_t length);
4471 lm_status_t lm_pf_upload_standard_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, void* virt_buffer, u32_t length);
4472
4473 lm_status_t lm_pf_upload_standard_request(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, lm_address_t * phys_buffer, u32_t length);
4474 lm_status_t lm_pf_download_standard_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, lm_address_t * phys_buffer, u32_t length);
4475 lm_status_t lm_pf_allocate_vfs(struct _lm_device_t *pdev);
4476 lm_status_t lm_pf_init_vfs(struct _lm_device_t *pdev, u16_t num_vfs);
4477 lm_status_t lm_pf_clear_vfs(struct _lm_device_t * pdev);
4478 lm_status_t lm_pf_set_vf_ctx(struct _lm_device_t *pdev, u16_t vf_id, void* ctx);
4479 #if 0
4480 lm_status_t lm_pf_set_vf_client_id(struct _lm_device_t *pdev,
4481 u16_t vf_id,
4482 u8_t base_fw_client_id,
4483 u8_t base_sw_client_id);
4484 lm_status_t lm_pf_set_vf_ndsb(struct _lm_device_t *pdev,
4485 u16_t vf_id,
4486 u8_t base_fw_ndsb,
4487 u8_t base_sw_ndsb,
4488 u8_t base_fw_dhc_qzone_id);
4489 lm_status_t lm_pf_set_vf_qzone_id(struct _lm_device_t *pdev,
4490 u16_t vf_id,
4491 u8_t base_fw_qzone_id);
4492 #endif
4493
4494 lm_status_t lm_pf_set_vf_stat_id(struct _lm_device_t *pdev,
4495 u16_t vf_id,
4496 u8_t base_fw_stats_id);
4497
4498 u8_t lm_pf_is_vf_mac_set(struct _lm_device_t *pdev, u16_t vf_id);
4499
4500 lm_status_t lm_pf_set_vf_base_cam_idx(struct _lm_device_t *pdev, u16_t vf_id, u32_t base_cam_idx);
4501
4502 u32_t lm_pf_get_sw_client_idx_from_cid(struct _lm_device_t *pdev, u32_t cid);
4503 u32_t lm_pf_get_fw_client_idx_from_cid(struct _lm_device_t *pdev, u32_t cid);
4504
4505 u8_t lm_pf_acquire_vf_chains_resources(struct _lm_device_t *pdev, u16_t vf_id, u32_t num_chains);
4506 void lm_pf_release_vf_chains_resources(struct _lm_device_t *pdev, u16_t vf_id);
4507 void lm_pf_release_separate_vf_chain_resources(struct _lm_device_t *pdev, u16_t vf_id, u8_t chain_num);
4508 u8_t lm_pf_is_sriov_valid(struct _lm_device_t *pdev);
4509 u8_t lm_pf_allocate_vf_igu_sbs(struct _lm_device_t *pdev, struct _lm_vf_info_t *vf_info, u8_t num_of_igu_sbs);
4510 void lm_pf_release_vf_igu_sbs(struct _lm_device_t *pdev, struct _lm_vf_info_t *vf_info);
4511 u8_t lm_pf_get_max_number_of_vf_igu_sbs(struct _lm_device_t *pdev);
4512 u8_t lm_pf_get_next_free_igu_block_id(struct _lm_device_t *pdev, u8_t starting_from);
4513 void lm_pf_clear_vf_igu_blocks(struct _lm_device_t *pdev);
4514 u8_t lm_pf_release_vf_igu_block(struct _lm_device_t *pdev, u8_t igu_sb_idx);
4515 u8_t lm_pf_acquire_vf_igu_block(struct _lm_device_t *pdev, u8_t igu_sb_idx, u8_t abs_vf_id, u8_t vector_number);
4516 u8_t lm_pf_get_vf_available_igu_blocks(struct _lm_device_t *pdev);
4517 lm_status_t lm_pf_update_vf_default_vlan(IN struct _lm_device_t *pdev, IN struct _lm_vf_info_t * vf_info,
4518 IN const u16_t silent_vlan_value,
4519 IN const u16_t silent_vlan_mask,
4520 IN const u8_t silent_vlan_removal_flg,
4521 IN const u8_t silent_vlan_change_flg,
4522 IN const u16_t default_vlan,
4523 IN const u8_t default_vlan_enable_flg,
4524 IN const u8_t default_vlan_change_flg);
4525
4526 lm_status_t lm_pf_update_vf_ndsb(IN struct _lm_device_t *pdev,
4527 IN struct _lm_vf_info_t *vf_info,
4528 IN u8_t relative_in_vf_ndsb,
4529 IN u16_t interrupt_mod_level);
4530
4531 lm_status_t lm_pf_update_vf_ndsbs(IN struct _lm_device_t *pdev,
4532 IN struct _lm_vf_info_t *vf_info,
4533 IN u16_t interrupt_mod_level);
4534
4535 #endif
4536
4537 #endif /* _LM5710_H */
4538