1 /*
2  * vxge-config.h: iPXE driver for Neterion Inc's X3100 Series 10GbE
3  *              PCIe I/O Virtualized Server Adapter.
4  *
5  * Copyright(c) 2002-2010 Neterion Inc.
6  *
7  * This software may be used and distributed according to the terms of
8  * the GNU General Public License (GPL), incorporated herein by
9  * reference.  Drivers based on or derived from this code fall under
10  * the GPL and must retain the authorship, copyright and license
11  * notice.
12  *
13  */
14 
15 FILE_LICENCE(GPL2_ONLY);
16 
17 #ifndef VXGE_CONFIG_H
18 #define VXGE_CONFIG_H
19 
20 #include <stdint.h>
21 #include <ipxe/list.h>
22 #include <ipxe/pci.h>
23 
24 #ifndef VXGE_CACHE_LINE_SIZE
25 #define VXGE_CACHE_LINE_SIZE 4096
26 #endif
27 
28 #define WAIT_FACTOR          1
29 
30 #define VXGE_HW_MAC_MAX_WIRE_PORTS      2
31 #define VXGE_HW_MAC_MAX_AGGR_PORTS      2
32 #define VXGE_HW_MAC_MAX_PORTS           3
33 
34 #define VXGE_HW_MIN_MTU				68
35 #define VXGE_HW_MAX_MTU				9600
36 #define VXGE_HW_DEFAULT_MTU			1500
37 
38 #ifndef __iomem
39 #define __iomem
40 #endif
41 
42 #ifndef ____cacheline_aligned
43 #define ____cacheline_aligned
44 #endif
45 
46 /**
47  * debug filtering masks
48  */
49 #define	VXGE_NONE	0x00
50 #define	VXGE_INFO	0x01
51 #define	VXGE_INTR	0x02
52 #define	VXGE_XMIT	0x04
53 #define VXGE_POLL	0x08
54 #define	VXGE_ERR	0x10
55 #define VXGE_TRACE	0x20
56 #define VXGE_ALL	(VXGE_INFO|VXGE_INTR|VXGE_XMIT\
57 			|VXGE_POLL|VXGE_ERR|VXGE_TRACE)
58 
59 #define NULL_VPID					0xFFFFFFFF
60 
61 #define VXGE_HW_EVENT_BASE                      0
62 #define VXGE_LL_EVENT_BASE                      100
63 
64 #define VXGE_HW_BASE_INF	100
65 #define VXGE_HW_BASE_ERR	200
66 #define VXGE_HW_BASE_BADCFG	300
67 #define VXGE_HW_DEF_DEVICE_POLL_MILLIS            1000
68 #define VXGE_HW_MAX_PAYLOAD_SIZE_512            2
69 
70 enum vxge_hw_status {
71 	VXGE_HW_OK				  = 0,
72 	VXGE_HW_FAIL				  = 1,
73 	VXGE_HW_PENDING				  = 2,
74 	VXGE_HW_COMPLETIONS_REMAIN		  = 3,
75 
76 	VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
77 	VXGE_HW_INF_OUT_OF_DESCRIPTORS		  = VXGE_HW_BASE_INF + 2,
78 	VXGE_HW_INF_SW_LRO_BEGIN		  = VXGE_HW_BASE_INF + 3,
79 	VXGE_HW_INF_SW_LRO_CONT			  = VXGE_HW_BASE_INF + 4,
80 	VXGE_HW_INF_SW_LRO_UNCAPABLE		  = VXGE_HW_BASE_INF + 5,
81 	VXGE_HW_INF_SW_LRO_FLUSH_SESSION	  = VXGE_HW_BASE_INF + 6,
82 	VXGE_HW_INF_SW_LRO_FLUSH_BOTH		  = VXGE_HW_BASE_INF + 7,
83 
84 	VXGE_HW_ERR_INVALID_HANDLE		  = VXGE_HW_BASE_ERR + 1,
85 	VXGE_HW_ERR_OUT_OF_MEMORY		  = VXGE_HW_BASE_ERR + 2,
86 	VXGE_HW_ERR_VPATH_NOT_AVAILABLE	  	  = VXGE_HW_BASE_ERR + 3,
87 	VXGE_HW_ERR_VPATH_NOT_OPEN		  = VXGE_HW_BASE_ERR + 4,
88 	VXGE_HW_ERR_WRONG_IRQ			  = VXGE_HW_BASE_ERR + 5,
89 	VXGE_HW_ERR_SWAPPER_CTRL		  = VXGE_HW_BASE_ERR + 6,
90 	VXGE_HW_ERR_INVALID_MTU_SIZE		  = VXGE_HW_BASE_ERR + 7,
91 	VXGE_HW_ERR_INVALID_INDEX		  = VXGE_HW_BASE_ERR + 8,
92 	VXGE_HW_ERR_INVALID_TYPE		  = VXGE_HW_BASE_ERR + 9,
93 	VXGE_HW_ERR_INVALID_OFFSET		  = VXGE_HW_BASE_ERR + 10,
94 	VXGE_HW_ERR_INVALID_DEVICE		  = VXGE_HW_BASE_ERR + 11,
95 	VXGE_HW_ERR_VERSION_CONFLICT		  = VXGE_HW_BASE_ERR + 12,
96 	VXGE_HW_ERR_INVALID_PCI_INFO		  = VXGE_HW_BASE_ERR + 13,
97 	VXGE_HW_ERR_INVALID_TCODE 		  = VXGE_HW_BASE_ERR + 14,
98 	VXGE_HW_ERR_INVALID_BLOCK_SIZE		  = VXGE_HW_BASE_ERR + 15,
99 	VXGE_HW_ERR_INVALID_STATE		  = VXGE_HW_BASE_ERR + 16,
100 	VXGE_HW_ERR_PRIVILAGED_OPEARATION	  = VXGE_HW_BASE_ERR + 17,
101 	VXGE_HW_ERR_INVALID_PORT 		  = VXGE_HW_BASE_ERR + 18,
102 	VXGE_HW_ERR_FIFO		 	  = VXGE_HW_BASE_ERR + 19,
103 	VXGE_HW_ERR_VPATH			  = VXGE_HW_BASE_ERR + 20,
104 	VXGE_HW_ERR_CRITICAL			  = VXGE_HW_BASE_ERR + 21,
105 	VXGE_HW_ERR_SLOT_FREEZE 		  = VXGE_HW_BASE_ERR + 22,
106 	VXGE_HW_ERR_INVALID_MIN_BANDWIDTH	  = VXGE_HW_BASE_ERR + 25,
107 	VXGE_HW_ERR_INVALID_MAX_BANDWIDTH	  = VXGE_HW_BASE_ERR + 26,
108 	VXGE_HW_ERR_INVALID_TOTAL_BANDWIDTH	  = VXGE_HW_BASE_ERR + 27,
109 	VXGE_HW_ERR_INVALID_BANDWIDTH_LIMIT	  = VXGE_HW_BASE_ERR + 28,
110 	VXGE_HW_ERR_RESET_IN_PROGRESS		  = VXGE_HW_BASE_ERR + 29,
111 	VXGE_HW_ERR_OUT_OF_SPACE		  = VXGE_HW_BASE_ERR + 30,
112 	VXGE_HW_ERR_INVALID_FUNC_MODE		  = VXGE_HW_BASE_ERR + 31,
113 	VXGE_HW_ERR_INVALID_DP_MODE		  = VXGE_HW_BASE_ERR + 32,
114 	VXGE_HW_ERR_INVALID_FAILURE_BEHAVIOUR	  = VXGE_HW_BASE_ERR + 33,
115 	VXGE_HW_ERR_INVALID_L2_SWITCH_STATE	  = VXGE_HW_BASE_ERR + 34,
116 	VXGE_HW_ERR_INVALID_CATCH_BASIN_MODE	  = VXGE_HW_BASE_ERR + 35,
117 
118 	VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS	  = VXGE_HW_BASE_BADCFG + 1,
119 	VXGE_HW_BADCFG_FIFO_BLOCKS		  = VXGE_HW_BASE_BADCFG + 2,
120 	VXGE_HW_BADCFG_VPATH_MTU		  = VXGE_HW_BASE_BADCFG + 3,
121 	VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG	  = VXGE_HW_BASE_BADCFG + 4,
122 	VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH	  = VXGE_HW_BASE_BADCFG + 5,
123 	VXGE_HW_BADCFG_VPATH_BANDWIDTH_LIMIT	  = VXGE_HW_BASE_BADCFG + 6,
124 	VXGE_HW_BADCFG_INTR_MODE		  = VXGE_HW_BASE_BADCFG + 7,
125 	VXGE_HW_BADCFG_RTS_MAC_EN		  = VXGE_HW_BASE_BADCFG + 8,
126 	VXGE_HW_BADCFG_VPATH_AGGR_ACK		  = VXGE_HW_BASE_BADCFG + 9,
127 	VXGE_HW_BADCFG_VPATH_PRIORITY		  = VXGE_HW_BASE_BADCFG + 10,
128 
129 	VXGE_HW_EOF_TRACE_BUF			  = -1
130 };
131 
132 /**
133  * enum enum vxge_hw_device_link_state - Link state enumeration.
134  * @VXGE_HW_LINK_NONE: Invalid link state.
135  * @VXGE_HW_LINK_DOWN: Link is down.
136  * @VXGE_HW_LINK_UP: Link is up.
137  *
138  */
139 enum vxge_hw_device_link_state {
140 	VXGE_HW_LINK_NONE,
141 	VXGE_HW_LINK_DOWN,
142 	VXGE_HW_LINK_UP
143 };
144 
145 /*forward declaration*/
146 struct vxge_vpath;
147 struct __vxge_hw_virtualpath;
148 
149 /**
150  * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
151  *
152  * One buffer mode RxD for ring structure
153  */
154 struct vxge_hw_ring_rxd_1 {
155 	u64 host_control;
156 	u64 control_0;
157 #define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0)		vxge_bVALn(ctrl0, 0, 7)
158 
159 #define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER		vxge_mBIT(7)
160 
161 #define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0)	vxge_bVALn(ctrl0, 8, 1)
162 
163 #define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0)	vxge_bVALn(ctrl0, 9, 1)
164 
165 #define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0)	vxge_bVALn(ctrl0, 10, 1)
166 
167 #define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0)		vxge_bVALn(ctrl0, 12, 4)
168 #define VXGE_HW_RING_RXD_T_CODE(val) 			vxge_vBIT(val, 12, 4)
169 
170 #define VXGE_HW_RING_RXD_T_CODE_UNUSED		VXGE_HW_RING_T_CODE_UNUSED
171 
172 #define VXGE_HW_RING_RXD_SYN_GET(ctrl0)		vxge_bVALn(ctrl0, 16, 1)
173 
174 #define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0)		vxge_bVALn(ctrl0, 17, 1)
175 
176 #define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0)	vxge_bVALn(ctrl0, 18, 1)
177 
178 #define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0)		vxge_bVALn(ctrl0, 19, 1)
179 
180 #define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0)	vxge_bVALn(ctrl0, 20, 4)
181 
182 #define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0)		vxge_bVALn(ctrl0, 24, 1)
183 
184 #define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0)		vxge_bVALn(ctrl0, 25, 2)
185 
186 #define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0)		vxge_bVALn(ctrl0, 27, 5)
187 
188 #define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0)	vxge_bVALn(ctrl0, 32, 16)
189 
190 #define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0)	vxge_bVALn(ctrl0, 48, 16)
191 
192 	u64 control_1;
193 
194 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1)	vxge_bVALn(ctrl1, 2, 14)
195 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
196 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK		vxge_vBIT(0x3FFF, 2, 14)
197 
198 #define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1)    vxge_bVALn(ctrl1, 16, 32)
199 
200 #define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1)	vxge_bVALn(ctrl1, 48, 16)
201 
202 	u64 buffer0_ptr;
203 };
204 
205 /**
206  * struct vxge_hw_fifo_txd - Transmit Descriptor
207  *
208  * Transmit descriptor (TxD).Fifo descriptor contains configured number
209  * (list) of TxDs. * For more details please refer to Titan User Guide,
210  * Section 5.4.2 "Transmit Descriptor (TxD) Format".
211  */
212 struct vxge_hw_fifo_txd {
213 	u64 control_0;
214 #define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER		vxge_mBIT(7)
215 
216 #define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0)		vxge_bVALn(ctrl0, 12, 4)
217 #define VXGE_HW_FIFO_TXD_T_CODE(val) 			vxge_vBIT(val, 12, 4)
218 #define VXGE_HW_FIFO_TXD_T_CODE_UNUSED		VXGE_HW_FIFO_T_CODE_UNUSED
219 
220 #define VXGE_HW_FIFO_TXD_GATHER_CODE(val) 		vxge_vBIT(val, 22, 2)
221 #define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST	VXGE_HW_FIFO_GATHER_CODE_FIRST
222 #define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST	VXGE_HW_FIFO_GATHER_CODE_LAST
223 
224 #define VXGE_HW_FIFO_TXD_LSO_EN				vxge_mBIT(30)
225 #define VXGE_HW_FIFO_TXD_LSO_MSS(val) 			vxge_vBIT(val, 34, 14)
226 #define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) 		vxge_vBIT(val, 48, 16)
227 
228 	u64 control_1;
229 #define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN			vxge_mBIT(5)
230 #define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN			vxge_mBIT(6)
231 #define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN			vxge_mBIT(7)
232 #define VXGE_HW_FIFO_TXD_VLAN_ENABLE			vxge_mBIT(15)
233 
234 #define VXGE_HW_FIFO_TXD_VLAN_TAG(val) 			vxge_vBIT(val, 16, 16)
235 #define VXGE_HW_FIFO_TXD_NO_BW_LIMIT			vxge_mBIT(43)
236 
237 #define VXGE_HW_FIFO_TXD_INT_NUMBER(val) 		vxge_vBIT(val, 34, 6)
238 
239 #define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST		vxge_mBIT(46)
240 #define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ			vxge_mBIT(47)
241 
242 	u64 buffer_pointer;
243 
244 	u64 host_control;
245 };
246 
247 /**
248  * struct vxge_hw_device_date - Date Format
249  * @day: Day
250  * @month: Month
251  * @year: Year
252  * @date: Date in string format
253  *
254  * Structure for returning date
255  */
256 
257 #define VXGE_HW_FW_STRLEN	32
258 struct vxge_hw_device_date {
259 	u32     day;
260 	u32     month;
261 	u32     year;
262 	char    date[VXGE_HW_FW_STRLEN];
263 };
264 
265 struct vxge_hw_device_version {
266 	u32     major;
267 	u32     minor;
268 	u32     build;
269 	char    version[VXGE_HW_FW_STRLEN];
270 };
271 
272 u64 __vxge_hw_vpath_pci_func_mode_get(
273 	u32 vp_id,
274 	struct vxge_hw_vpath_reg __iomem *vpath_reg);
275 
276 /*
277  * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
278  * @control_0: Bits 0 to 7 - Doorbell type.
279  *             Bits 8 to 31 - Reserved.
280  *             Bits 32 to 39 - The highest TxD in this TxDL.
281  *             Bits 40 to 47 - Reserved.
282  *	       Bits 48 to 55 - Reserved.
283  *             Bits 56 to 63 - No snoop flags.
284  * @txdl_ptr:  The starting location of the TxDL in host memory.
285  *
286  * Created by the host and written to the adapter via PIO to a Kernel Doorbell
287  * FIFO. All non-offload doorbell wrapper fields must be written by the host as
288  * part of a doorbell write. Consumed by the adapter but is not written by the
289  * adapter.
290  */
291 struct __vxge_hw_non_offload_db_wrapper {
292 	u64		control_0;
293 #define	VXGE_HW_NODBW_GET_TYPE(ctrl0)			vxge_bVALn(ctrl0, 0, 8)
294 #define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
295 #define	VXGE_HW_NODBW_TYPE_NODBW				0
296 
297 #define	VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0)	vxge_bVALn(ctrl0, 32, 8)
298 #define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
299 
300 #define	VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0)		vxge_bVALn(ctrl0, 56, 8)
301 #define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
302 #define	VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE		0x2
303 #define	VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ		0x1
304 
305 	u64		txdl_ptr;
306 };
307 
308 /*
309  * struct __vxge_hw_fifo - Fifo.
310  * @vp_id: Virtual path id
311  * @tx_intr_num: Interrupt Number associated with the TX
312  * @txdl: Start pointer of the txdl list of this fifo.
313  *        iPXE does not support tx fragmentation, so we need
314  *        only one txd in a list
315  * @depth: total number of lists in this fifo
316  * @hw_offset: txd index from where adapter owns the txd list
317  * @sw_offset: txd index from where driver owns the txd list
318  *
319  * @stats: Statistics of this fifo
320  *
321  */
322 struct __vxge_hw_fifo {
323 	struct vxge_hw_vpath_reg		*vp_reg;
324 	struct __vxge_hw_non_offload_db_wrapper	*nofl_db;
325 	u32					vp_id;
326 	u32					tx_intr_num;
327 
328 	struct vxge_hw_fifo_txd		*txdl;
329 #define VXGE_HW_FIFO_TXD_DEPTH 128
330 	u16				depth;
331 	u16				hw_offset;
332 	u16				sw_offset;
333 
334 	struct __vxge_hw_virtualpath    *vpathh;
335 };
336 
337 /* Structure that represents the Rx descriptor block which contains
338  * 128 Rx descriptors.
339  */
340 struct __vxge_hw_ring_block {
341 #define VXGE_HW_MAX_RXDS_PER_BLOCK_1            127
342 	struct vxge_hw_ring_rxd_1 rxd[VXGE_HW_MAX_RXDS_PER_BLOCK_1];
343 
344 	u64 reserved_0;
345 #define END_OF_BLOCK    0xFEFFFFFFFFFFFFFFULL
346 	/* 0xFEFFFFFFFFFFFFFF to mark last Rxd in this blk */
347 	u64 reserved_1;
348 	/* Logical ptr to next */
349 	u64 reserved_2_pNext_RxD_block;
350 	/* Buff0_ptr.In a 32 bit arch the upper 32 bits should be 0 */
351 	u64 pNext_RxD_Blk_physical;
352 };
353 
354 /*
355  * struct __vxge_hw_ring - Ring channel.
356  *
357  * Note: The structure is cache line aligned to better utilize
358  *       CPU cache performance.
359  */
360 struct __vxge_hw_ring {
361 	struct vxge_hw_vpath_reg		*vp_reg;
362 	struct vxge_hw_common_reg		*common_reg;
363 	u32					vp_id;
364 #define VXGE_HW_RING_RXD_QWORDS_MODE_1	4
365 	u32					doorbell_cnt;
366 	u32					total_db_cnt;
367 #define VXGE_HW_RING_RXD_QWORD_LIMIT	16
368 	u64					rxd_qword_limit;
369 
370 	struct __vxge_hw_ring_block		*rxdl;
371 #define VXGE_HW_RING_BUF_PER_BLOCK 	9
372 	u16					buf_per_block;
373 	u16					rxd_offset;
374 
375 #define VXGE_HW_RING_RX_POLL_WEIGHT	8
376 	u16					rx_poll_weight;
377 
378 	struct io_buffer *iobuf[VXGE_HW_RING_BUF_PER_BLOCK + 1];
379 	struct __vxge_hw_virtualpath *vpathh;
380 };
381 
382 /*
383  * struct __vxge_hw_virtualpath - Virtual Path
384  *
385  * Virtual path structure to encapsulate the data related to a virtual path.
386  * Virtual paths are allocated by the HW upon getting configuration from the
387  * driver and inserted into the list of virtual paths.
388  */
389 struct __vxge_hw_virtualpath {
390 	u32				vp_id;
391 
392 	u32				vp_open;
393 #define VXGE_HW_VP_NOT_OPEN	0
394 #define	VXGE_HW_VP_OPEN		1
395 
396 	struct __vxge_hw_device		*hldev;
397 	struct vxge_hw_vpath_reg	*vp_reg;
398 	struct vxge_hw_vpmgmt_reg	*vpmgmt_reg;
399 	struct __vxge_hw_non_offload_db_wrapper	*nofl_db;
400 
401 	u32				max_mtu;
402 	u32				vsport_number;
403 	u32				max_kdfc_db;
404 	u32				max_nofl_db;
405 
406 	struct __vxge_hw_ring ringh;
407 	struct __vxge_hw_fifo fifoh;
408 };
409 #define VXGE_HW_INFO_LEN	64
410 #define VXGE_HW_PMD_INFO_LEN	16
411 #define VXGE_MAX_PRINT_BUF_SIZE	128
412 /**
413  * struct vxge_hw_device_hw_info - Device information
414  * @host_type: Host Type
415  * @func_id: Function Id
416  * @vpath_mask: vpath bit mask
417  * @fw_version: Firmware version
418  * @fw_date: Firmware Date
419  * @flash_version: Firmware version
420  * @flash_date: Firmware Date
421  * @mac_addrs: Mac addresses for each vpath
422  * @mac_addr_masks: Mac address masks for each vpath
423  *
424  * Returns the vpath mask that has the bits set for each vpath allocated
425  * for the driver and the first mac address for each vpath
426  */
427 struct vxge_hw_device_hw_info {
428 	u32		host_type;
429 #define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION			0
430 #define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION			1
431 #define VXGE_HW_NO_MR_SR_VH0_FUNCTION0				2
432 #define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION			3
433 #define VXGE_HW_MR_SR_VH0_INVALID_CONFIG			4
434 #define VXGE_HW_SR_VH_FUNCTION0					5
435 #define VXGE_HW_SR_VH_VIRTUAL_FUNCTION				6
436 #define VXGE_HW_VH_NORMAL_FUNCTION				7
437 	u64		function_mode;
438 #define VXGE_HW_FUNCTION_MODE_MIN				0
439 #define VXGE_HW_FUNCTION_MODE_MAX				11
440 
441 #define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION			0
442 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION			1
443 #define VXGE_HW_FUNCTION_MODE_SRIOV				2
444 #define VXGE_HW_FUNCTION_MODE_MRIOV				3
445 #define VXGE_HW_FUNCTION_MODE_MRIOV_8				4
446 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17			5
447 #define VXGE_HW_FUNCTION_MODE_SRIOV_8				6
448 #define VXGE_HW_FUNCTION_MODE_SRIOV_4				7
449 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2			8
450 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4			9
451 #define VXGE_HW_FUNCTION_MODE_MRIOV_4				10
452 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_DIRECT_IO		11
453 
454 	u32		func_id;
455 	u64		vpath_mask;
456 	struct vxge_hw_device_version fw_version;
457 	struct vxge_hw_device_date    fw_date;
458 	struct vxge_hw_device_version flash_version;
459 	struct vxge_hw_device_date    flash_date;
460 	u8		serial_number[VXGE_HW_INFO_LEN];
461 	u8		part_number[VXGE_HW_INFO_LEN];
462 	u8		product_desc[VXGE_HW_INFO_LEN];
463 	u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
464 	u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
465 };
466 
467 /**
468  * struct __vxge_hw_device  - Hal device object
469  * @magic: Magic Number
470  * @bar0: BAR0 virtual address.
471  * @pdev: Physical device handle
472  * @config: Confguration passed by the LL driver at initialization
473  * @link_state: Link state
474  *
475  * HW device object. Represents Titan adapter
476  */
477 struct __vxge_hw_device {
478 	u32				magic;
479 #define VXGE_HW_DEVICE_MAGIC		0x12345678
480 #define VXGE_HW_DEVICE_DEAD		0xDEADDEAD
481 	void __iomem			*bar0;
482 	struct pci_device		*pdev;
483 	struct net_device		*ndev;
484 	struct vxgedev 			*vdev;
485 
486 	enum vxge_hw_device_link_state	link_state;
487 
488 	u32				host_type;
489 	u32				func_id;
490 	u8				titan1;
491 	u32				access_rights;
492 #define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH      0x1
493 #define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM     0x2
494 #define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM     0x4
495 	struct vxge_hw_legacy_reg	*legacy_reg;
496 	struct vxge_hw_toc_reg		*toc_reg;
497 	struct vxge_hw_common_reg	*common_reg;
498 	struct vxge_hw_mrpcim_reg	*mrpcim_reg;
499 	struct vxge_hw_srpcim_reg	*srpcim_reg \
500 					[VXGE_HW_TITAN_SRPCIM_REG_SPACES];
501 	struct vxge_hw_vpmgmt_reg	*vpmgmt_reg \
502 					[VXGE_HW_TITAN_VPMGMT_REG_SPACES];
503 	struct vxge_hw_vpath_reg	*vpath_reg \
504 					[VXGE_HW_TITAN_VPATH_REG_SPACES];
505 	u8				*kdfc;
506 	u8				*usdc;
507 	struct __vxge_hw_virtualpath	virtual_path;
508 	u64				vpath_assignments;
509 	u64				vpaths_deployed;
510 	u32				first_vp_id;
511 	u64				tim_int_mask0[4];
512 	u32				tim_int_mask1[4];
513 
514 	struct vxge_hw_device_hw_info   hw_info;
515 };
516 
517 #define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
518 
519 #define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) {	\
520 	if (i < 16) {					\
521 		m0[0] |= vxge_vBIT(0x8, (i*4), 4);	\
522 		m0[1] |= vxge_vBIT(0x4, (i*4), 4);	\
523 	}			       		\
524 	else {					\
525 		m1[0] = 0x80000000;		\
526 		m1[1] = 0x40000000;		\
527 	}					\
528 }
529 
530 #define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) {	\
531 	if (i < 16) {					\
532 		m0[0] &= ~vxge_vBIT(0x8, (i*4), 4);	\
533 		m0[1] &= ~vxge_vBIT(0x4, (i*4), 4);	\
534 	}						\
535 	else {						\
536 		m1[0] = 0;				\
537 		m1[1] = 0;				\
538 	}						\
539 }
540 
541 /**
542  * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
543  * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
544  * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
545  * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
546  * device.
547  * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
548  * filling-in and posting later.
549  *
550  * Titan/HW descriptor states.
551  *
552  */
553 enum vxge_hw_txdl_state {
554 	VXGE_HW_TXDL_STATE_NONE	= 0,
555 	VXGE_HW_TXDL_STATE_AVAIL	= 1,
556 	VXGE_HW_TXDL_STATE_POSTED	= 2,
557 	VXGE_HW_TXDL_STATE_FREED	= 3
558 };
559 
560 
561 /* fifo and ring circular buffer offset tracking apis */
__vxge_hw_desc_offset_up(u16 upper_limit,u16 * offset)562 static inline void __vxge_hw_desc_offset_up(u16 upper_limit,
563 			u16 *offset)
564 {
565 	if (++(*offset) >= upper_limit)
566 		*offset = 0;
567 }
568 
569 /* rxd offset handling apis */
vxge_hw_ring_rxd_offset_up(u16 * offset)570 static inline void vxge_hw_ring_rxd_offset_up(u16 *offset)
571 {
572 	__vxge_hw_desc_offset_up(VXGE_HW_MAX_RXDS_PER_BLOCK_1,
573 			offset);
574 }
575 /* txd offset handling apis */
vxge_hw_fifo_txd_offset_up(u16 * offset)576 static inline void vxge_hw_fifo_txd_offset_up(u16 *offset)
577 {
578 	__vxge_hw_desc_offset_up(VXGE_HW_FIFO_TXD_DEPTH, offset);
579 }
580 
581 /**
582  * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
583  * @rxdh: Descriptor handle.
584  * @dma_pointer: DMA address of	a single receive buffer	this descriptor
585  * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
586  * the receive buffer should be already mapped to the device
587  * @size: Size of the receive @dma_pointer buffer.
588  *
589  * Prepare 1-buffer-mode Rx	descriptor for posting
590  * (via	vxge_hw_ring_rxd_post()).
591  *
592  * This	inline helper-function does not	return any parameters and always
593  * succeeds.
594  *
595  */
596 static inline
vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1 * rxdp,struct io_buffer * iob,u32 size)597 void vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1 *rxdp,
598 	struct io_buffer *iob, u32 size)
599 {
600 	rxdp->host_control = (intptr_t)(iob);
601 	rxdp->buffer0_ptr = virt_to_bus(iob->data);
602 	rxdp->control_1	&= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
603 	rxdp->control_1	|= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
604 }
605 
606 enum vxge_hw_status vxge_hw_device_hw_info_get(
607 	struct pci_device *pdev,
608 	void __iomem *bar0,
609 	struct vxge_hw_device_hw_info *hw_info);
610 
611 enum vxge_hw_status
612 __vxge_hw_vpath_fw_ver_get(
613 	struct vxge_hw_vpath_reg __iomem *vpath_reg,
614 	struct vxge_hw_device_hw_info *hw_info);
615 
616 enum vxge_hw_status
617 __vxge_hw_vpath_card_info_get(
618 	struct vxge_hw_vpath_reg __iomem *vpath_reg,
619 	struct vxge_hw_device_hw_info *hw_info);
620 
621 /**
622  * vxge_hw_device_link_state_get - Get link state.
623  * @devh: HW device handle.
624  *
625  * Get link state.
626  * Returns: link state.
627  */
628 static inline
vxge_hw_device_link_state_get(struct __vxge_hw_device * devh)629 enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
630 	struct __vxge_hw_device *devh)
631 {
632 	return devh->link_state;
633 }
634 
635 void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
636 
637 enum vxge_hw_status vxge_hw_device_initialize(
638 	struct __vxge_hw_device **devh,
639 	void *bar0,
640 	struct pci_device *pdev,
641 	u8 titan1);
642 
643 enum vxge_hw_status
644 vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_vpath *vpath);
645 
646 enum vxge_hw_status
647 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
648 
649 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_virtualpath *vpath);
650 
651 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_virtualpath *vpath);
652 
653 enum vxge_hw_status
654 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath *vpath);
655 
656 void
657 vxge_hw_vpath_enable(struct __vxge_hw_virtualpath *vpath);
658 
659 enum vxge_hw_status
660 vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath *vpath, u32 new_mtu);
661 
662 void
663 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath *vpath);
664 
665 void
666 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
667 
668 enum vxge_hw_status
669 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
670 
671 enum vxge_hw_status
672 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
673 
674 enum vxge_hw_status
675 __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
676 	struct vxge_hw_vpath_reg __iomem *vpath_reg);
677 
678 enum vxge_hw_status
679 __vxge_hw_device_register_poll(
680 	void __iomem	*reg,
681 	u64 mask, u32 max_millis);
682 
683 #ifndef readq
readq(void __iomem * addr)684 static inline u64 readq(void __iomem *addr)
685 {
686 	u64 ret = 0;
687 	ret = readl(addr + 4);
688 	ret <<= 32;
689 	ret |= readl(addr);
690 
691 	return ret;
692 }
693 #endif
694 
695 #ifndef writeq
writeq(u64 val,void __iomem * addr)696 static inline void writeq(u64 val, void __iomem *addr)
697 {
698 	writel((u32) (val), addr);
699 	writel((u32) (val >> 32), (addr + 4));
700 }
701 #endif
702 
__vxge_hw_pio_mem_write32_upper(u32 val,void __iomem * addr)703 static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
704 {
705 	writel(val, addr + 4);
706 }
707 
__vxge_hw_pio_mem_write32_lower(u32 val,void __iomem * addr)708 static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
709 {
710 	writel(val, addr);
711 }
712 
713 static inline enum vxge_hw_status
__vxge_hw_pio_mem_write64(u64 val64,void __iomem * addr,u64 mask,u32 max_millis)714 __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
715 			  u64 mask, u32 max_millis)
716 {
717 	enum vxge_hw_status status = VXGE_HW_OK;
718 
719 	__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
720 	wmb();
721 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
722 	wmb();
723 
724 	status = __vxge_hw_device_register_poll(addr, mask, max_millis);
725 	return status;
726 }
727 
728 void
729 __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
730 
731 enum vxge_hw_status
732 __vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
733 
734 enum vxge_hw_status
735 __vxge_hw_vpath_pci_read(
736 	struct __vxge_hw_virtualpath	*vpath,
737 	u32			phy_func_0,
738 	u32			offset,
739 	u32			*val);
740 
741 enum vxge_hw_status
742 __vxge_hw_vpath_addr_get(
743 	struct vxge_hw_vpath_reg __iomem *vpath_reg,
744 	u8 (macaddr)[ETH_ALEN],
745 	u8 (macaddr_mask)[ETH_ALEN]);
746 
747 u32
748 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
749 
750 enum vxge_hw_status
751 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
752 
753 enum vxge_hw_status
754 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
755 
756 /**
757  * vxge_debug
758  * @mask: mask for the debug
759  * @fmt: printf like format string
760  */
761 static const u16 debug_filter = VXGE_ERR;
762 #define vxge_debug(mask, fmt...) 	do { 	\
763 		if (debug_filter & mask)	\
764 			DBG(fmt); 		\
765 	} while (0);
766 
767 #define vxge_trace() 	vxge_debug(VXGE_TRACE, "%s:%d\n", __func__, __LINE__);
768 
769 enum vxge_hw_status
770 vxge_hw_get_func_mode(struct __vxge_hw_device *hldev, u32 *func_mode);
771 
772 enum vxge_hw_status
773 vxge_hw_set_fw_api(struct __vxge_hw_device *hldev,
774 		u64 vp_id, u32 action,
775 		u32 offset, u64 data0, u64 data1);
776 void
777 vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device *hldev);
778 
779 #endif
780