xref: /dragonfly/sys/dev/netif/igb/if_igb.h (revision d3e43a7a)
1 /*
2  * Copyright (c) 2001-2013, Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  *  1. Redistributions of source code must retain the above copyright notice,
9  *     this list of conditions and the following disclaimer.
10  *
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  3. Neither the name of the Intel Corporation nor the names of its
16  *     contributors may be used to endorse or promote products derived from
17  *     this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _IF_IGB_H_
33 #define _IF_IGB_H_
34 
35 /* Tunables */
36 
37 /*
38  * Max ring count
39  */
40 #define IGB_MAX_RING_I210	4
41 #define IGB_MAX_RING_I211	2
42 #define IGB_MAX_RING_I350	8
43 #define IGB_MAX_RING_I354	8
44 #define IGB_MAX_RING_82580	8
45 #define IGB_MAX_RING_82576	16
46 #define IGB_MAX_RING_82575	4
47 #define IGB_MIN_RING		1
48 #define IGB_MIN_RING_RSS	2
49 
50 /*
51  * Max TX/RX interrupt bits
52  */
53 #define IGB_MAX_TXRXINT_I210	4
54 #define IGB_MAX_TXRXINT_I211	4
55 #define IGB_MAX_TXRXINT_I350	8
56 #define IGB_MAX_TXRXINT_I354	8
57 #define IGB_MAX_TXRXINT_82580	8
58 #define IGB_MAX_TXRXINT_82576	16
59 #define IGB_MAX_TXRXINT_82575	4	/* XXX not used */
60 #define IGB_MIN_TXRXINT		2	/* XXX VF? */
61 
62 /*
63  * Max IVAR count
64  */
65 #define IGB_MAX_IVAR_I210	4
66 #define IGB_MAX_IVAR_I211	4
67 #define IGB_MAX_IVAR_I350	4
68 #define IGB_MAX_IVAR_I354	4
69 #define IGB_MAX_IVAR_82580	4
70 #define IGB_MAX_IVAR_82576	8
71 #define IGB_MAX_IVAR_VF		1
72 
73 /*
74  * Default number of segments received before writing to RX related registers
75  */
76 #define IGB_DEF_RXWREG_NSEGS	32
77 
78 /*
79  * Default number of segments sent before writing to TX related registers
80  */
81 #define IGB_DEF_TXWREG_NSEGS	8
82 
83 /*
84  * IGB_TXD: Maximum number of Transmit Descriptors
85  *
86  *   This value is the number of transmit descriptors allocated by the driver.
87  *   Increasing this value allows the driver to queue more transmits. Each
88  *   descriptor is 16 bytes.
89  *   Since TDLEN should be multiple of 128bytes, the number of transmit
90  *   desscriptors should meet the following condition.
91  *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
92  */
93 #define IGB_MIN_TXD		256
94 #define IGB_DEFAULT_TXD		1024
95 #define IGB_MAX_TXD		4096
96 
97 /*
98  * IGB_RXD: Maximum number of Transmit Descriptors
99  *
100  *   This value is the number of receive descriptors allocated by the driver.
101  *   Increasing this value allows the driver to buffer more incoming packets.
102  *   Each descriptor is 16 bytes.  A receive buffer is also allocated for each
103  *   descriptor. The maximum MTU size is 16110.
104  *   Since TDLEN should be multiple of 128bytes, the number of transmit
105  *   desscriptors should meet the following condition.
106  *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
107  */
108 #define IGB_MIN_RXD		256
109 #define IGB_DEFAULT_RXD		512
110 #define IGB_MAX_RXD		4096
111 
112 /*
113  * This parameter controls when the driver calls the routine to reclaim
114  * transmit descriptors. Cleaning earlier seems a win.
115  */
116 #define IGB_TX_CLEANUP_THRESHOLD(sc)	((sc)->num_tx_desc / 2)
117 
118 /*
119  * This parameter controls whether or not autonegotation is enabled.
120  *              0 - Disable autonegotiation
121  *              1 - Enable  autonegotiation
122  */
123 #define DO_AUTO_NEG		1
124 
125 /*
126  * This parameter control whether or not the driver will wait for
127  * autonegotiation to complete.
128  *              1 - Wait for autonegotiation to complete
129  *              0 - Don't wait for autonegotiation to complete
130  */
131 #define WAIT_FOR_AUTO_NEG_DEFAULT	0
132 
133 /* Tunables -- End */
134 
135 #define AUTONEG_ADV_DEFAULT	(ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
136 				 ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
137 				 ADVERTISE_1000_FULL)
138 
139 #define AUTO_ALL_MODES			0
140 
141 /* PHY master/slave setting */
142 #define IGB_MASTER_SLAVE		e1000_ms_hw_default
143 
144 /*
145  * Micellaneous constants
146  */
147 #define IGB_VENDOR_ID			0x8086
148 
149 #define IGB_JUMBO_PBA			0x00000028
150 #define IGB_DEFAULT_PBA			0x00000030
151 #define IGB_SMARTSPEED_DOWNSHIFT	3
152 #define IGB_SMARTSPEED_MAX		15
153 #define IGB_MAX_LOOP			10
154 
155 #define IGB_RX_PTHRESH			((hw->mac.type == e1000_i354) ? 12 : \
156 					  ((hw->mac.type <= e1000_82576) ? 16 : 8))
157 #define IGB_RX_HTHRESH			8
158 #define IGB_RX_WTHRESH			((hw->mac.type == e1000_82576 && \
159 					  sc->msix_mem_res) ? 1 : 4)
160 
161 #define IGB_TX_PTHRESH			((hw->mac.type == e1000_i354) ? 20 : 8)
162 #define IGB_TX_HTHRESH			1
163 #define IGB_TX_WTHRESH			16
164 
165 #define MAX_NUM_MULTICAST_ADDRESSES	128
166 #define IGB_FC_PAUSE_TIME		0x0680
167 
168 #define IGB_INTR_RATE			6000
169 #define IGB_MSIX_RX_RATE		6000
170 #define IGB_MSIX_TX_RATE		4000
171 
172 /*
173  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
174  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
175  * also optimize cache line size effect. H/W supports up to cache line size 128.
176  */
177 #define IGB_DBA_ALIGN			128
178 
179 /* PCI Config defines */
180 #define IGB_MSIX_BAR			3
181 #define IGB_MSIX_BAR_ALT		4
182 
183 #define IGB_VFTA_SIZE			128
184 #define IGB_TSO_SIZE			(IP_MAXPACKET + \
185 					 sizeof(struct ether_vlan_header))
186 #define IGB_HDR_BUF			128
187 #define IGB_TXPBSIZE			20408
188 #define IGB_PKTTYPE_MASK		0x0000FFF0
189 
190 #define IGB_CSUM_FEATURES		(CSUM_IP | CSUM_TCP | CSUM_UDP)
191 
192 /* One for TX csum offloading desc, the other 2 are reserved */
193 #define IGB_TX_RESERVED			3
194 
195 /* Large enough for 64K TSO */
196 #define IGB_MAX_SCATTER			33
197 
198 #define IGB_NRSSRK			10
199 #define IGB_RSSRK_SIZE			4
200 #define IGB_RSSRK_VAL(key, i)		(key[(i) * IGB_RSSRK_SIZE] | \
201 					 key[(i) * IGB_RSSRK_SIZE + 1] << 8 | \
202 					 key[(i) * IGB_RSSRK_SIZE + 2] << 16 | \
203 					 key[(i) * IGB_RSSRK_SIZE + 3] << 24)
204 
205 #define IGB_NRETA			32
206 #define IGB_RETA_SIZE			4
207 #define IGB_RETA_SHIFT			0
208 #define IGB_RETA_SHIFT_82575		6
209 
210 #define IGB_EITR_INTVL_MASK		0x7ffc
211 #define IGB_EITR_INTVL_SHIFT		2
212 
213 /* Disable DMA Coalesce Flush */
214 #define IGB_DMCTLX_DCFLUSH_DIS		0x80000000
215 
216 struct igb_softc;
217 
218 /*
219  * Bus dma information structure
220  */
221 struct igb_dma {
222 	bus_addr_t		dma_paddr;
223 	void			*dma_vaddr;
224 	bus_dma_tag_t		dma_tag;
225 	bus_dmamap_t		dma_map;
226 };
227 
228 /*
229  * Transmit ring: one per queue
230  */
231 struct igb_tx_ring {
232 	struct lwkt_serialize	tx_serialize;
233 	struct igb_softc	*sc;
234 	struct ifaltq_subque	*ifsq;
235 	uint32_t		me;
236 	uint32_t		tx_flags;
237 #define IGB_TXFLAG_TSO_IPLEN0	0x1
238 #define IGB_TXFLAG_ENABLED	0x2
239 	struct e1000_tx_desc	*tx_base;
240 	int			num_tx_desc;
241 	uint32_t		next_avail_desc;
242 	uint32_t		next_to_clean;
243 	uint32_t		*tx_hdr;
244 	int			tx_avail;
245 	struct igb_tx_buf	*tx_buf;
246 	bus_dma_tag_t		tx_tag;
247 	int			tx_nsegs;
248 	int			intr_nsegs;
249 	int			wreg_nsegs;
250 	int			tx_intr_vec;
251 	uint32_t		tx_intr_mask;
252 	struct ifsubq_watchdog	tx_watchdog;
253 
254 	/* Soft stats */
255 	u_long			tx_packets;
256 
257 	struct igb_dma		txdma;
258 	bus_dma_tag_t		tx_hdr_dtag;
259 	bus_dmamap_t		tx_hdr_dmap;
260 	bus_addr_t		tx_hdr_paddr;
261 	int			tx_intr_cpuid;
262 } __cachealign;
263 
264 /*
265  * Receive ring: one per queue
266  */
267 struct igb_rx_ring {
268 	struct lwkt_serialize	rx_serialize;
269 	struct igb_softc	*sc;
270 	uint32_t		me;
271 	union e1000_adv_rx_desc	*rx_base;
272 	boolean_t		discard;
273 	int			num_rx_desc;
274 	uint32_t		next_to_check;
275 	struct igb_rx_buf	*rx_buf;
276 	bus_dma_tag_t		rx_tag;
277 	bus_dmamap_t		rx_sparemap;
278 	int			rx_intr_vec;
279 	uint32_t		rx_intr_mask;
280 
281 	/*
282 	 * First/last mbuf pointers, for
283 	 * collecting multisegment RX packets.
284 	 */
285 	struct mbuf		*fmp;
286 	struct mbuf		*lmp;
287 	int			wreg_nsegs;
288 
289 	struct igb_tx_ring	*rx_txr;	/* piggybacked TX ring */
290 
291 	/* Soft stats */
292 	u_long			rx_packets;
293 
294 	struct igb_dma		rxdma;
295 } __cachealign;
296 
297 struct igb_intr_data {
298 	struct lwkt_serialize	*intr_serialize;
299 	driver_intr_t		*intr_func;
300 	void			*intr_hand;
301 	struct resource		*intr_res;
302 	void			*intr_funcarg;
303 	int			intr_rid;
304 	int			intr_cpuid;
305 	int			intr_rate;
306 	int			intr_use;
307 #define IGB_INTR_USE_RXTX	0
308 #define IGB_INTR_USE_STATUS	1
309 #define IGB_INTR_USE_RX		2
310 #define IGB_INTR_USE_TX		3
311 	const char		*intr_desc;
312 	char			intr_desc0[64];
313 };
314 
315 struct igb_softc {
316 	struct arpcom		arpcom;
317 	struct e1000_hw		hw;
318 
319 	struct e1000_osdep	osdep;
320 	device_t		dev;
321 	uint32_t		flags;
322 #define IGB_FLAG_SHARED_INTR	0x1
323 #define IGB_FLAG_HAS_MGMT	0x2
324 
325 	bus_dma_tag_t		parent_tag;
326 
327 	int			mem_rid;
328 	struct resource 	*mem_res;
329 
330 	struct ifmedia		media;
331 	struct callout		timer;
332 	int			timer_cpuid;
333 
334 	int			if_flags;
335 	int			max_frame_size;
336 	int			pause_frames;
337 	uint16_t		vf_ifp;	/* a VF interface */
338 
339 	/* Management and WOL features */
340 	int			wol;
341 
342 	/* Info about the interface */
343 	uint8_t			link_active;
344 	uint16_t		link_speed;
345 	uint16_t		link_duplex;
346 	uint32_t		smartspeed;
347 	uint32_t		dma_coalesce;
348 
349 	/* Multicast array pointer */
350 	uint8_t			*mta;
351 
352 	int			rx_npoll_off;
353 	int			tx_npoll_off;
354 	int			serialize_cnt;
355 	struct lwkt_serialize	**serializes;
356 	struct lwkt_serialize	main_serialize;
357 
358 	int			intr_type;
359 	uint32_t		intr_mask;
360 	int			sts_msix_vec;
361 	uint32_t		sts_intr_mask;
362 
363 	/*
364 	 * Transmit rings
365 	 */
366 	int			tx_ring_cnt;
367 	int			tx_ring_msix;
368 	int			tx_ring_inuse;
369 	struct igb_tx_ring	*tx_rings;
370 
371 	/*
372 	 * Receive rings
373 	 */
374 	int			rss_debug;
375 	int			rx_ring_cnt;
376 	int			rx_ring_msix;
377 	int			rx_ring_inuse;
378 	struct igb_rx_ring	*rx_rings;
379 
380 	int			ifm_flowctrl;
381 
382 	/* Misc stats maintained by the driver */
383 	u_long			dropped_pkts;
384 	u_long			mbuf_defrag_failed;
385 	u_long			no_tx_dma_setup;
386 	u_long			watchdog_events;
387 	u_long			rx_overruns;
388 	u_long			device_control;
389 	u_long			rx_control;
390 	u_long			int_mask;
391 	u_long			eint_mask;
392 	u_long			packet_buf_alloc_rx;
393 	u_long			packet_buf_alloc_tx;
394 
395 	void 			*stats;
396 
397 	int			msix_mem_rid;
398 	struct resource 	*msix_mem_res;
399 
400 	int			intr_cnt;
401 	struct igb_intr_data	*intr_data;
402 };
403 
404 #define IGB_ENABLE_HWRSS(sc)	((sc)->rx_ring_cnt > 1)
405 #define IGB_ENABLE_HWTSS(sc)	((sc)->tx_ring_cnt > 1)
406 
407 struct igb_tx_buf {
408 	struct mbuf	*m_head;
409 	bus_dmamap_t	map;		/* bus_dma map for packet */
410 };
411 
412 struct igb_rx_buf {
413 	struct mbuf	*m_head;
414 	bus_dmamap_t	map;	/* bus_dma map for packet */
415 	bus_addr_t	paddr;
416 };
417 
418 #define UPDATE_VF_REG(reg, last, cur)		\
419 {						\
420 	uint32_t new = E1000_READ_REG(hw, reg);	\
421 	if (new < last)				\
422 		cur += 0x100000000LL;		\
423 	last = new;				\
424 	cur &= 0xFFFFFFFF00000000LL;		\
425 	cur |= new;				\
426 }
427 
428 #define IGB_I210_LINK_DELAY	1000	/* unit: ms */
429 
430 #endif /* _IF_IGB_H_ */
431