xref: /dragonfly/sys/dev/netif/igb/if_igb.h (revision 0ccdae7f)
1 /*
2  * Copyright (c) 2001-2013, Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  *  1. Redistributions of source code must retain the above copyright notice,
9  *     this list of conditions and the following disclaimer.
10  *
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  3. Neither the name of the Intel Corporation nor the names of its
16  *     contributors may be used to endorse or promote products derived from
17  *     this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _IF_IGB_H_
33 #define _IF_IGB_H_
34 
35 /* Tunables */
36 
37 /*
38  * Max ring count
39  */
40 #define IGB_MAX_RING_I210	4
41 #define IGB_MAX_RING_I211	2
42 #define IGB_MAX_RING_I350	8
43 #define IGB_MAX_RING_I354	8
44 #define IGB_MAX_RING_82580	8
45 #define IGB_MAX_RING_82576	16
46 #define IGB_MAX_RING_82575	4
47 #define IGB_MIN_RING		1
48 #define IGB_MIN_RING_RSS	2
49 
50 /*
51  * Max TX/RX interrupt bits
52  */
53 #define IGB_MAX_TXRXINT_I210	4
54 #define IGB_MAX_TXRXINT_I211	4
55 #define IGB_MAX_TXRXINT_I350	8
56 #define IGB_MAX_TXRXINT_I354	8
57 #define IGB_MAX_TXRXINT_82580	8
58 #define IGB_MAX_TXRXINT_82576	16
59 #define IGB_MAX_TXRXINT_82575	4	/* XXX not used */
60 #define IGB_MIN_TXRXINT		2	/* XXX VF? */
61 
62 /*
63  * Max IVAR count
64  */
65 #define IGB_MAX_IVAR_I210	4
66 #define IGB_MAX_IVAR_I211	4
67 #define IGB_MAX_IVAR_I350	4
68 #define IGB_MAX_IVAR_I354	4
69 #define IGB_MAX_IVAR_82580	4
70 #define IGB_MAX_IVAR_82576	8
71 #define IGB_MAX_IVAR_VF		1
72 
73 /*
74  * Default number of segments received before writing to RX related registers
75  */
76 #define IGB_DEF_RXWREG_NSEGS	32
77 
78 /*
79  * Default number of segments sent before writing to TX related registers
80  */
81 #define IGB_DEF_TXWREG_NSEGS	8
82 
83 /*
84  * IGB_TXD: Maximum number of Transmit Descriptors
85  *
86  *   This value is the number of transmit descriptors allocated by the driver.
87  *   Increasing this value allows the driver to queue more transmits. Each
88  *   descriptor is 16 bytes.
89  *   Since TDLEN should be multiple of 128bytes, the number of transmit
90  *   desscriptors should meet the following condition.
91  *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
92  */
93 #define IGB_MIN_TXD		256
94 #define IGB_DEFAULT_TXD		1024
95 #define IGB_MAX_TXD		4096
96 
97 /*
98  * IGB_RXD: Maximum number of Transmit Descriptors
99  *
100  *   This value is the number of receive descriptors allocated by the driver.
101  *   Increasing this value allows the driver to buffer more incoming packets.
102  *   Each descriptor is 16 bytes.  A receive buffer is also allocated for each
103  *   descriptor. The maximum MTU size is 16110.
104  *   Since TDLEN should be multiple of 128bytes, the number of transmit
105  *   desscriptors should meet the following condition.
106  *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
107  */
108 #define IGB_MIN_RXD		256
109 #define IGB_DEFAULT_RXD		512
110 #define IGB_MAX_RXD		4096
111 
112 /*
113  * This parameter controls when the driver calls the routine to reclaim
114  * transmit descriptors. Cleaning earlier seems a win.
115  */
116 #define IGB_TX_CLEANUP_THRESHOLD(sc)	((sc)->num_tx_desc / 2)
117 
118 /*
119  * This parameter controls whether or not autonegotation is enabled.
120  *              0 - Disable autonegotiation
121  *              1 - Enable  autonegotiation
122  */
123 #define DO_AUTO_NEG		1
124 
125 /*
126  * This parameter control whether or not the driver will wait for
127  * autonegotiation to complete.
128  *              1 - Wait for autonegotiation to complete
129  *              0 - Don't wait for autonegotiation to complete
130  */
131 #define WAIT_FOR_AUTO_NEG_DEFAULT	0
132 
133 /* Tunables -- End */
134 
135 #define AUTONEG_ADV_DEFAULT	(ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
136 				 ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
137 				 ADVERTISE_1000_FULL)
138 
139 #define AUTO_ALL_MODES			0
140 
141 /* PHY master/slave setting */
142 #define IGB_MASTER_SLAVE		e1000_ms_hw_default
143 
144 /*
145  * Micellaneous constants
146  */
147 #define IGB_VENDOR_ID			0x8086
148 
149 #define IGB_JUMBO_PBA			0x00000028
150 #define IGB_DEFAULT_PBA			0x00000030
151 #define IGB_SMARTSPEED_DOWNSHIFT	3
152 #define IGB_SMARTSPEED_MAX		15
153 #define IGB_MAX_LOOP			10
154 
155 #define IGB_RX_PTHRESH			((hw->mac.type == e1000_i354) ? 12 : \
156 					  ((hw->mac.type <= e1000_82576) ? 16 : 8))
157 #define IGB_RX_HTHRESH			8
158 #define IGB_RX_WTHRESH			((hw->mac.type == e1000_82576 && \
159 					  sc->msix_mem_res) ? 1 : 4)
160 
161 #define IGB_TX_PTHRESH			((hw->mac.type == e1000_i354) ? 20 : 8)
162 #define IGB_TX_HTHRESH			1
163 #define IGB_TX_WTHRESH			16
164 
165 #define MAX_NUM_MULTICAST_ADDRESSES	128
166 #define IGB_FC_PAUSE_TIME		0x0680
167 
168 #define IGB_INTR_RATE			6000
169 #define IGB_MSIX_RX_RATE		6000
170 #define IGB_MSIX_TX_RATE		4000
171 
172 /*
173  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
174  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
175  * also optimize cache line size effect. H/W supports up to cache line size 128.
176  */
177 #define IGB_DBA_ALIGN			128
178 
179 /* PCI Config defines */
180 #define IGB_MSIX_BAR			3
181 #define IGB_MSIX_BAR_ALT		4
182 
183 #define IGB_VFTA_SIZE			128
184 #define IGB_TSO_SIZE			(IP_MAXPACKET + \
185 					 sizeof(struct ether_vlan_header))
186 #define IGB_HDR_BUF			128
187 #define IGB_TXPBSIZE			20408
188 #define IGB_PKTTYPE_MASK		0x0000FFF0
189 
190 #define IGB_CSUM_FEATURES		(CSUM_IP | CSUM_TCP | CSUM_UDP)
191 
192 /* One for TX csum offloading desc, the other 2 are reserved */
193 #define IGB_TX_RESERVED			3
194 
195 /* Large enough for 64K TSO */
196 #define IGB_MAX_SCATTER			33
197 
198 #define IGB_NRSSRK			10
199 #define IGB_RSSRK_SIZE			4
200 #define IGB_RSSRK_VAL(key, i)		(key[(i) * IGB_RSSRK_SIZE] | \
201 					 key[(i) * IGB_RSSRK_SIZE + 1] << 8 | \
202 					 key[(i) * IGB_RSSRK_SIZE + 2] << 16 | \
203 					 key[(i) * IGB_RSSRK_SIZE + 3] << 24)
204 
205 #define IGB_NRETA			32
206 #define IGB_RETA_SIZE			4
207 #define IGB_RETA_SHIFT			0
208 #define IGB_RETA_SHIFT_82575		6
209 
210 #define IGB_RDRTABLE_SIZE		(IGB_NRETA * IGB_RETA_SIZE)
211 
212 #define IGB_EITR_INTVL_MASK		0x7ffc
213 #define IGB_EITR_INTVL_SHIFT		2
214 
215 /* Disable DMA Coalesce Flush */
216 #define IGB_DMCTLX_DCFLUSH_DIS		0x80000000
217 
218 struct igb_softc;
219 
220 /*
221  * Bus dma information structure
222  */
223 struct igb_dma {
224 	bus_addr_t		dma_paddr;
225 	void			*dma_vaddr;
226 	bus_dma_tag_t		dma_tag;
227 	bus_dmamap_t		dma_map;
228 };
229 
230 /*
231  * Transmit ring: one per queue
232  */
233 struct igb_tx_ring {
234 	struct lwkt_serialize	tx_serialize;
235 	struct igb_softc	*sc;
236 	struct ifaltq_subque	*ifsq;
237 	uint32_t		me;
238 	int16_t			tx_running;
239 #define IGB_TX_RUNNING		100
240 #define IGB_TX_RUNNING_DEC	25
241 	uint16_t		tx_flags;
242 #define IGB_TXFLAG_TSO_IPLEN0	0x1
243 #define IGB_TXFLAG_ENABLED	0x2
244 	struct e1000_tx_desc	*tx_base;
245 	int			num_tx_desc;
246 	uint32_t		next_avail_desc;
247 	uint32_t		next_to_clean;
248 	int			tx_avail;
249 	int			tx_nmbuf;
250 	uint32_t		*tx_hdr;
251 	struct igb_tx_buf	*tx_buf;
252 	bus_dma_tag_t		tx_tag;
253 	int			tx_nsegs;
254 	int			intr_nsegs;
255 	int			wreg_nsegs;
256 	int			tx_intr_vec;
257 	uint32_t		tx_intr_mask;
258 	struct ifsubq_watchdog	tx_watchdog;
259 	struct callout		tx_gc_timer;
260 
261 	/* Soft stats */
262 	u_long			tx_packets;
263 	u_long			tx_gc;
264 
265 	struct igb_dma		txdma;
266 	bus_dma_tag_t		tx_hdr_dtag;
267 	bus_dmamap_t		tx_hdr_dmap;
268 	bus_addr_t		tx_hdr_paddr;
269 	int			tx_intr_cpuid;
270 } __cachealign;
271 
272 /*
273  * Receive ring: one per queue
274  */
275 struct igb_rx_ring {
276 	struct lwkt_serialize	rx_serialize;
277 	struct igb_softc	*sc;
278 	uint32_t		me;
279 	union e1000_adv_rx_desc	*rx_base;
280 	boolean_t		discard;
281 	int			num_rx_desc;
282 	uint32_t		next_to_check;
283 	struct igb_rx_buf	*rx_buf;
284 	bus_dma_tag_t		rx_tag;
285 	bus_dmamap_t		rx_sparemap;
286 	int			rx_intr_vec;
287 	uint32_t		rx_intr_mask;
288 
289 	/*
290 	 * First/last mbuf pointers, for
291 	 * collecting multisegment RX packets.
292 	 */
293 	struct mbuf		*fmp;
294 	struct mbuf		*lmp;
295 	int			wreg_nsegs;
296 
297 	struct igb_tx_ring	*rx_txr;	/* piggybacked TX ring */
298 
299 	/* Soft stats */
300 	u_long			rx_packets;
301 
302 	struct igb_dma		rxdma;
303 } __cachealign;
304 
305 struct igb_intr_data {
306 	struct lwkt_serialize	*intr_serialize;
307 	driver_intr_t		*intr_func;
308 	void			*intr_hand;
309 	struct resource		*intr_res;
310 	void			*intr_funcarg;
311 	int			intr_rid;
312 	int			intr_cpuid;
313 	int			intr_rate;
314 	int			intr_use;
315 #define IGB_INTR_USE_RXTX	0
316 #define IGB_INTR_USE_STATUS	1
317 #define IGB_INTR_USE_RX		2
318 #define IGB_INTR_USE_TX		3
319 	const char		*intr_desc;
320 	char			intr_desc0[64];
321 };
322 
323 struct igb_softc {
324 	struct arpcom		arpcom;
325 	struct e1000_hw		hw;
326 
327 	struct e1000_osdep	osdep;
328 	device_t		dev;
329 	uint32_t		flags;
330 #define IGB_FLAG_SHARED_INTR	0x1
331 #define IGB_FLAG_HAS_MGMT	0x2
332 
333 	bus_dma_tag_t		parent_tag;
334 
335 	int			mem_rid;
336 	struct resource 	*mem_res;
337 
338 	struct ifmedia		media;
339 	struct callout		timer;
340 	int			timer_cpuid;
341 
342 	int			if_flags;
343 	int			max_frame_size;
344 	int			pause_frames;
345 	uint16_t		vf_ifp;	/* a VF interface */
346 
347 	/* Management and WOL features */
348 	int			wol;
349 
350 	/* Info about the interface */
351 	uint8_t			link_active;
352 	uint16_t		link_speed;
353 	uint16_t		link_duplex;
354 	uint32_t		smartspeed;
355 	uint32_t		dma_coalesce;
356 
357 	/* Multicast array pointer */
358 	uint8_t			*mta;
359 
360 	int			serialize_cnt;
361 	struct lwkt_serialize	**serializes;
362 	struct lwkt_serialize	main_serialize;
363 
364 	int			intr_type;
365 	uint32_t		intr_mask;
366 	int			sts_msix_vec;
367 	uint32_t		sts_intr_mask;
368 
369 	/*
370 	 * Transmit rings
371 	 */
372 	int			tx_ring_cnt;
373 	int			tx_ring_msix;
374 	int			tx_ring_inuse;
375 	struct igb_tx_ring	*tx_rings;
376 
377 	/*
378 	 * Receive rings
379 	 */
380 	int			rss_debug;
381 	int			rx_ring_cnt;
382 	int			rx_ring_msix;
383 	int			rx_ring_inuse;
384 	struct igb_rx_ring	*rx_rings;
385 
386 	int			ifm_flowctrl;
387 
388 	/* Misc stats maintained by the driver */
389 	u_long			dropped_pkts;
390 	u_long			mbuf_defrag_failed;
391 	u_long			no_tx_dma_setup;
392 	u_long			watchdog_events;
393 	u_long			rx_overruns;
394 	u_long			device_control;
395 	u_long			rx_control;
396 	u_long			int_mask;
397 	u_long			eint_mask;
398 	u_long			packet_buf_alloc_rx;
399 	u_long			packet_buf_alloc_tx;
400 
401 	void 			*stats;
402 
403 	int			msix_mem_rid;
404 	struct resource 	*msix_mem_res;
405 
406 	int			intr_cnt;
407 	struct igb_intr_data	*intr_data;
408 
409 	struct if_ringmap	*rx_rmap;
410 	struct if_ringmap	*rx_rmap_intr;
411 	struct if_ringmap	*tx_rmap;
412 	struct if_ringmap	*tx_rmap_intr;
413 
414 	int			rdr_table[IGB_RDRTABLE_SIZE];
415 };
416 
417 #define IGB_ENABLE_HWRSS(sc)	((sc)->rx_ring_cnt > 1)
418 #define IGB_ENABLE_HWTSS(sc)	((sc)->tx_ring_cnt > 1)
419 
420 struct igb_tx_buf {
421 	struct mbuf	*m_head;
422 	bus_dmamap_t	map;		/* bus_dma map for packet */
423 };
424 
425 struct igb_rx_buf {
426 	struct mbuf	*m_head;
427 	bus_dmamap_t	map;	/* bus_dma map for packet */
428 	bus_addr_t	paddr;
429 };
430 
431 #define UPDATE_VF_REG(reg, last, cur)		\
432 {						\
433 	uint32_t new = E1000_READ_REG(hw, reg);	\
434 	if (new < last)				\
435 		cur += 0x100000000LL;		\
436 	last = new;				\
437 	cur &= 0xFFFFFFFF00000000LL;		\
438 	cur |= new;				\
439 }
440 
441 #define IGB_I210_LINK_DELAY	1000	/* unit: ms */
442 
443 #endif /* _IF_IGB_H_ */
444