xref: /dragonfly/sys/dev/netif/igb/if_igb.h (revision 38c2ea22)
1 /*
2  * Copyright (c) 2001-2011, Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  *  1. Redistributions of source code must retain the above copyright notice,
9  *     this list of conditions and the following disclaimer.
10  *
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  3. Neither the name of the Intel Corporation nor the names of its
16  *     contributors may be used to endorse or promote products derived from
17  *     this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _IF_IGB_H_
33 #define _IF_IGB_H_
34 
35 /* Tunables */
36 
37 /*
38  * IGB_TXD: Maximum number of Transmit Descriptors
39  *
40  *   This value is the number of transmit descriptors allocated by the driver.
41  *   Increasing this value allows the driver to queue more transmits. Each
42  *   descriptor is 16 bytes.
43  *   Since TDLEN should be multiple of 128bytes, the number of transmit
44  *   desscriptors should meet the following condition.
45  *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
46  */
47 #define IGB_MIN_TXD		256
48 #define IGB_DEFAULT_TXD		1024
49 #define IGB_MAX_TXD		4096
50 
51 /*
52  * IGB_RXD: Maximum number of Transmit Descriptors
53  *
54  *   This value is the number of receive descriptors allocated by the driver.
55  *   Increasing this value allows the driver to buffer more incoming packets.
56  *   Each descriptor is 16 bytes.  A receive buffer is also allocated for each
57  *   descriptor. The maximum MTU size is 16110.
58  *   Since TDLEN should be multiple of 128bytes, the number of transmit
59  *   desscriptors should meet the following condition.
60  *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
61  */
62 #define IGB_MIN_RXD		256
63 #define IGB_DEFAULT_RXD		1024
64 #define IGB_MAX_RXD		4096
65 
66 /*
67  * This parameter controls when the driver calls the routine to reclaim
68  * transmit descriptors. Cleaning earlier seems a win.
69  */
70 #define IGB_TX_CLEANUP_THRESHOLD(sc)	((sc)->num_tx_desc / 2)
71 
72 /*
73  * This parameter controls whether or not autonegotation is enabled.
74  *              0 - Disable autonegotiation
75  *              1 - Enable  autonegotiation
76  */
77 #define DO_AUTO_NEG		1
78 
79 /*
80  * This parameter control whether or not the driver will wait for
81  * autonegotiation to complete.
82  *              1 - Wait for autonegotiation to complete
83  *              0 - Don't wait for autonegotiation to complete
84  */
85 #define WAIT_FOR_AUTO_NEG_DEFAULT	0
86 
87 /* Tunables -- End */
88 
89 #define AUTONEG_ADV_DEFAULT	(ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
90 				 ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
91 				 ADVERTISE_1000_FULL)
92 
93 #define AUTO_ALL_MODES			0
94 
95 /* PHY master/slave setting */
96 #define IGB_MASTER_SLAVE		e1000_ms_hw_default
97 
98 /*
99  * Micellaneous constants
100  */
101 #define IGB_VENDOR_ID			0x8086
102 
103 #define IGB_JUMBO_PBA			0x00000028
104 #define IGB_DEFAULT_PBA			0x00000030
105 #define IGB_SMARTSPEED_DOWNSHIFT	3
106 #define IGB_SMARTSPEED_MAX		15
107 #define IGB_MAX_LOOP			10
108 
109 #define IGB_RX_PTHRESH			(hw->mac.type <= e1000_82576 ? 16 : 8)
110 #define IGB_RX_HTHRESH			8
111 #define IGB_RX_WTHRESH			1
112 
113 #define IGB_TX_PTHRESH			8
114 #define IGB_TX_HTHRESH			1
115 #define IGB_TX_WTHRESH			((hw->mac.type != e1000_82575 && \
116                                           sc->msix_mem) ? 1 : 16)
117 
118 #define MAX_NUM_MULTICAST_ADDRESSES	128
119 #define IGB_FC_PAUSE_TIME		0x0680
120 
121 #define IGB_INTR_RATE			10000
122 
123 /*
124  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
125  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
126  * also optimize cache line size effect. H/W supports up to cache line size 128.
127  */
128 #define IGB_DBA_ALIGN			128
129 
130 /* PCI Config defines */
131 #define IGB_MSIX_BAR			3
132 
133 #define IGB_MAX_SCATTER			64
134 #define IGB_VFTA_SIZE			128
135 #define IGB_TSO_SIZE			(65535 + \
136 					 sizeof(struct ether_vlan_header))
137 #define IGB_TSO_SEG_SIZE		4096	/* Max dma segment size */
138 #define IGB_HDR_BUF			128
139 #define IGB_PKTTYPE_MASK		0x0000FFF0
140 
141 #define IGB_CSUM_FEATURES		(CSUM_IP | CSUM_TCP | CSUM_UDP)
142 #define IGB_IPVHL_SIZE			1 /* sizeof(ip.ip_vhl) */
143 #define IGB_TXCSUM_MINHL		(ETHER_HDR_LEN + EVL_ENCAPLEN + \
144 					 IGB_IPVHL_SIZE)
145 
146 /* One for TX csum offloading desc, the other 2 are reserved */
147 #define IGB_TX_RESERVED			3
148 
149 /* Large enough for 64K TSO */
150 #define IGB_TX_SPARE			32
151 
152 #define IGB_TX_OACTIVE_MAX		64
153 
154 struct igb_softc;
155 
156 /*
157  * Bus dma information structure
158  */
159 struct igb_dma {
160 	bus_addr_t		dma_paddr;
161 	void			*dma_vaddr;
162 	bus_dma_tag_t		dma_tag;
163 	bus_dmamap_t		dma_map;
164 };
165 
166 /*
167  * Driver queue struct: this is the interrupt container
168  * for the associated tx and rx ring.
169  */
170 struct igb_queue {
171 	struct igb_softc	*sc;
172 	uint32_t		msix;		/* This queue's MSIX vector */
173 	uint32_t		eims;		/* This queue's EIMS bit */
174 	uint32_t		eitr_setting;
175 	struct resource		*res;
176 	void			*tag;
177 	struct igb_tx_ring	*txr;
178 	struct igb_rx_ring	*rxr;
179 	uint64_t		irqs;
180 };
181 
182 /*
183  * Transmit ring: one per queue
184  */
185 struct igb_tx_ring {
186 	struct igb_softc	*sc;
187 	uint32_t		me;
188 	struct igb_dma		txdma;
189 	bus_dma_tag_t		tx_hdr_dtag;
190 	bus_dmamap_t		tx_hdr_dmap;
191 	bus_addr_t		tx_hdr_paddr;
192 	struct e1000_tx_desc	*tx_base;
193 	uint32_t		next_avail_desc;
194 	uint32_t		next_to_clean;
195 	uint32_t		*tx_hdr;
196 	int			tx_avail;
197 	struct igb_tx_buf	*tx_buf;
198 	bus_dma_tag_t		tx_tag;
199 	int			tx_nsegs;
200 	int			spare_desc;
201 	int			oact_lo_desc;
202 	int			oact_hi_desc;
203 	int			intr_nsegs;
204 	int			tx_intr_bit;
205 	uint32_t		tx_intr_mask;
206 
207 	u_long			no_desc_avail;
208 	u_long			tx_packets;
209 
210 	u_long			ctx_try_pullup;
211 	u_long			ctx_drop1;
212 	u_long			ctx_drop2;
213 	u_long			ctx_pullup1;
214 	u_long			ctx_pullup1_failed;
215 	u_long			ctx_pullup2;
216 	u_long			ctx_pullup2_failed;
217 };
218 
219 /*
220  * Receive ring: one per queue
221  */
222 struct igb_rx_ring {
223 	struct igb_softc	*sc;
224 	uint32_t		me;
225 	struct igb_dma		rxdma;
226 	union e1000_adv_rx_desc	*rx_base;
227 	boolean_t		discard;
228 	uint32_t		next_to_check;
229 	struct igb_rx_buf	*rx_buf;
230 	bus_dma_tag_t		rx_tag;
231 	bus_dmamap_t		rx_sparemap;
232 	int			rx_intr_bit;
233 	uint32_t		rx_intr_mask;
234 
235 	/*
236 	 * First/last mbuf pointers, for
237 	 * collecting multisegment RX packets.
238 	 */
239 	struct mbuf		*fmp;
240 	struct mbuf		*lmp;
241 
242 	/* Soft stats */
243 	u_long			rx_packets;
244 };
245 
246 struct igb_softc {
247 	struct arpcom		arpcom;
248 	struct e1000_hw		hw;
249 
250 	struct e1000_osdep	osdep;
251 	device_t		dev;
252 	uint32_t		flags;
253 #define IGB_FLAG_SHARED_INTR	0x1
254 
255 	bus_dma_tag_t		parent_tag;
256 
257 	int			mem_rid;
258 	struct resource 	*mem_res;
259 
260 	struct resource 	*msix_mem;
261 	void			*tag;
262 	uint32_t		que_mask;
263 
264 	int			linkvec;
265 	int			link_mask;
266 	int			link_irq;
267 
268 	struct ifmedia		media;
269 	struct callout		timer;
270 
271 #if 0
272 	int			msix;	/* total vectors allocated */
273 #endif
274 	int			intr_type;
275 	int			intr_rid;
276 	struct resource		*intr_res;
277 	void			*intr_tag;
278 
279 	int			if_flags;
280 	int			max_frame_size;
281 	int			min_frame_size;
282 	int			pause_frames;
283 	uint16_t		num_queues;
284 	uint16_t		vf_ifp;	/* a VF interface */
285 
286 	/* Management and WOL features */
287 	int			wol;
288 	int			has_manage;
289 
290 	/* Info about the interface */
291 	uint8_t			link_active;
292 	uint16_t		link_speed;
293 	uint16_t		link_duplex;
294 	uint32_t		smartspeed;
295 	uint32_t		dma_coalesce;
296 
297 	int			intr_rate;
298 
299 	/* Interface queues */
300 	struct igb_queue	*queues;
301 	uint32_t		intr_mask;
302 
303 	/*
304 	 * Transmit rings
305 	 */
306 	struct igb_tx_ring	*tx_rings;
307 	int			num_tx_desc;
308 
309 	/* Multicast array pointer */
310 	uint8_t			*mta;
311 
312 	/*
313 	 * Receive rings
314 	 */
315 	struct igb_rx_ring	*rx_rings;
316 	int			num_rx_desc;
317 	uint32_t		rx_mbuf_sz;
318 	uint32_t		rx_mask;
319 
320 	/* Misc stats maintained by the driver */
321 	u_long			dropped_pkts;
322 	u_long			mbuf_defrag_failed;
323 	u_long			no_tx_dma_setup;
324 	u_long			watchdog_events;
325 	u_long			rx_overruns;
326 	u_long			device_control;
327 	u_long			rx_control;
328 	u_long			int_mask;
329 	u_long			eint_mask;
330 	u_long			packet_buf_alloc_rx;
331 	u_long			packet_buf_alloc_tx;
332 
333 	/* sysctl tree glue */
334 	struct sysctl_ctx_list	sysctl_ctx;
335 	struct sysctl_oid	*sysctl_tree;
336 
337 	void 			*stats;
338 };
339 
340 struct igb_tx_buf {
341 	struct mbuf	*m_head;
342 	bus_dmamap_t	map;		/* bus_dma map for packet */
343 };
344 
345 struct igb_rx_buf {
346 	struct mbuf	*m_head;
347 	bus_dmamap_t	map;	/* bus_dma map for packet */
348 	bus_addr_t	paddr;
349 };
350 
351 #define UPDATE_VF_REG(reg, last, cur)		\
352 {						\
353 	uint32_t new = E1000_READ_REG(hw, reg);	\
354 	if (new < last)				\
355 		cur += 0x100000000LL;		\
356 	last = new;				\
357 	cur &= 0xFFFFFFFF00000000LL;		\
358 	cur |= new;				\
359 }
360 
361 #define IGB_IS_OACTIVE(txr)	((txr)->tx_avail < (txr)->oact_lo_desc)
362 #define IGB_IS_NOT_OACTIVE(txr)	((txr)->tx_avail >= (txr)->oact_hi_desc)
363 
364 #endif /* _IF_IGB_H_ */
365