xref: /dragonfly/sys/dev/netif/igb/if_igb.h (revision 19380330)
1 /*
2  * Copyright (c) 2001-2011, Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  *  1. Redistributions of source code must retain the above copyright notice,
9  *     this list of conditions and the following disclaimer.
10  *
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  3. Neither the name of the Intel Corporation nor the names of its
16  *     contributors may be used to endorse or promote products derived from
17  *     this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _IF_IGB_H_
33 #define _IF_IGB_H_
34 
35 /* Tunables */
36 
37 /*
38  * Max ring count
39  */
40 #define IGB_MAX_RING_82575	4
41 #define IGB_MAX_RING_I350	8
42 #define IGB_MAX_RING_82580	8
43 #define IGB_MAX_RING_82576	16
44 #define IGB_MIN_RING		1
45 #define IGB_MIN_RING_RSS	2
46 
47 /*
48  * Max TX/RX interrupt bits
49  */
50 #define IGB_MAX_TXRXINT_82575	4	/* XXX not used */
51 #define IGB_MAX_TXRXINT_I350	8
52 #define IGB_MAX_TXRXINT_82580	8
53 #define IGB_MAX_TXRXINT_82576	16
54 #define IGB_MIN_TXRXINT		2	/* XXX VF? */
55 
56 /*
57  * Max IVAR count
58  */
59 #define IGB_MAX_IVAR_I350	4
60 #define IGB_MAX_IVAR_82580	4
61 #define IGB_MAX_IVAR_82576	8
62 #define IGB_MAX_IVAR_VF		1
63 
64 /*
65  * IGB_TXD: Maximum number of Transmit Descriptors
66  *
67  *   This value is the number of transmit descriptors allocated by the driver.
68  *   Increasing this value allows the driver to queue more transmits. Each
69  *   descriptor is 16 bytes.
70  *   Since TDLEN should be multiple of 128bytes, the number of transmit
71  *   desscriptors should meet the following condition.
72  *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
73  */
74 #define IGB_MIN_TXD		256
75 #define IGB_DEFAULT_TXD		1024
76 #define IGB_MAX_TXD		4096
77 
78 /*
79  * IGB_RXD: Maximum number of Transmit Descriptors
80  *
81  *   This value is the number of receive descriptors allocated by the driver.
82  *   Increasing this value allows the driver to buffer more incoming packets.
83  *   Each descriptor is 16 bytes.  A receive buffer is also allocated for each
84  *   descriptor. The maximum MTU size is 16110.
85  *   Since TDLEN should be multiple of 128bytes, the number of transmit
86  *   desscriptors should meet the following condition.
87  *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
88  */
89 #define IGB_MIN_RXD		256
90 #define IGB_DEFAULT_RXD		512
91 #define IGB_MAX_RXD		4096
92 
93 /*
94  * This parameter controls when the driver calls the routine to reclaim
95  * transmit descriptors. Cleaning earlier seems a win.
96  */
97 #define IGB_TX_CLEANUP_THRESHOLD(sc)	((sc)->num_tx_desc / 2)
98 
99 /*
100  * This parameter controls whether or not autonegotation is enabled.
101  *              0 - Disable autonegotiation
102  *              1 - Enable  autonegotiation
103  */
104 #define DO_AUTO_NEG		1
105 
106 /*
107  * This parameter control whether or not the driver will wait for
108  * autonegotiation to complete.
109  *              1 - Wait for autonegotiation to complete
110  *              0 - Don't wait for autonegotiation to complete
111  */
112 #define WAIT_FOR_AUTO_NEG_DEFAULT	0
113 
114 /* Tunables -- End */
115 
116 #define AUTONEG_ADV_DEFAULT	(ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
117 				 ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
118 				 ADVERTISE_1000_FULL)
119 
120 #define AUTO_ALL_MODES			0
121 
122 /* PHY master/slave setting */
123 #define IGB_MASTER_SLAVE		e1000_ms_hw_default
124 
125 /*
126  * Micellaneous constants
127  */
128 #define IGB_VENDOR_ID			0x8086
129 
130 #define IGB_JUMBO_PBA			0x00000028
131 #define IGB_DEFAULT_PBA			0x00000030
132 #define IGB_SMARTSPEED_DOWNSHIFT	3
133 #define IGB_SMARTSPEED_MAX		15
134 #define IGB_MAX_LOOP			10
135 
136 #define IGB_RX_PTHRESH			(hw->mac.type <= e1000_82576 ? 16 : 8)
137 #define IGB_RX_HTHRESH			8
138 #define IGB_RX_WTHRESH			1
139 
140 #define IGB_TX_PTHRESH			8
141 #define IGB_TX_HTHRESH			1
142 #define IGB_TX_WTHRESH			16
143 
144 #define MAX_NUM_MULTICAST_ADDRESSES	128
145 #define IGB_FC_PAUSE_TIME		0x0680
146 
147 #define IGB_INTR_RATE			6000
148 #define IGB_MSIX_RX_RATE		6000
149 #define IGB_MSIX_TX_RATE		4000
150 
151 /*
152  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
153  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
154  * also optimize cache line size effect. H/W supports up to cache line size 128.
155  */
156 #define IGB_DBA_ALIGN			128
157 
158 /* PCI Config defines */
159 #define IGB_MSIX_BAR			3
160 
161 #define IGB_MAX_SCATTER			64
162 #define IGB_VFTA_SIZE			128
163 #define IGB_TSO_SIZE			(IP_MAXPACKET + \
164 					 sizeof(struct ether_vlan_header))
165 #define IGB_HDR_BUF			128
166 #define IGB_PKTTYPE_MASK		0x0000FFF0
167 
168 #define IGB_CSUM_FEATURES		(CSUM_IP | CSUM_TCP | CSUM_UDP)
169 #define IGB_IPVHL_SIZE			1 /* sizeof(ip.ip_vhl) */
170 #define IGB_TXCSUM_MINHL		(ETHER_HDR_LEN + EVL_ENCAPLEN + \
171 					 IGB_IPVHL_SIZE)
172 
173 /* One for TX csum offloading desc, the other 2 are reserved */
174 #define IGB_TX_RESERVED			3
175 
176 /* Large enough for 64K TSO */
177 #define IGB_TX_SPARE			33
178 
179 #define IGB_TX_OACTIVE_MAX		64
180 
181 /* main + 16x RX + 16x TX */
182 #define IGB_NSERIALIZE			33
183 
184 #define IGB_NRSSRK			10
185 #define IGB_RSSRK_SIZE			4
186 #define IGB_RSSRK_VAL(key, i)		(key[(i) * IGB_RSSRK_SIZE] | \
187 					 key[(i) * IGB_RSSRK_SIZE + 1] << 8 | \
188 					 key[(i) * IGB_RSSRK_SIZE + 2] << 16 | \
189 					 key[(i) * IGB_RSSRK_SIZE + 3] << 24)
190 
191 #define IGB_NRETA			32
192 #define IGB_RETA_SIZE			4
193 #define IGB_RETA_SHIFT			0
194 #define IGB_RETA_SHIFT_82575		6
195 
196 #define IGB_EITR_INTVL_MASK		0x7ffc
197 #define IGB_EITR_INTVL_SHIFT		2
198 
199 struct igb_softc;
200 
201 /*
202  * Bus dma information structure
203  */
204 struct igb_dma {
205 	bus_addr_t		dma_paddr;
206 	void			*dma_vaddr;
207 	bus_dma_tag_t		dma_tag;
208 	bus_dmamap_t		dma_map;
209 };
210 
211 /*
212  * Transmit ring: one per queue
213  */
214 struct igb_tx_ring {
215 	struct lwkt_serialize	tx_serialize;
216 	struct igb_softc	*sc;
217 	uint32_t		me;
218 	struct e1000_tx_desc	*tx_base;
219 	int			num_tx_desc;
220 	uint32_t		next_avail_desc;
221 	uint32_t		next_to_clean;
222 	uint32_t		*tx_hdr;
223 	int			tx_avail;
224 	struct igb_tx_buf	*tx_buf;
225 	bus_dma_tag_t		tx_tag;
226 	int			tx_nsegs;
227 	int			spare_desc;
228 	int			oact_lo_desc;
229 	int			oact_hi_desc;
230 	int			intr_nsegs;
231 	int			tx_intr_bit;
232 	uint32_t		tx_intr_mask;
233 
234 	/* Soft stats */
235 	u_long			no_desc_avail;
236 	u_long			tx_packets;
237 
238 	struct igb_dma		txdma;
239 	bus_dma_tag_t		tx_hdr_dtag;
240 	bus_dmamap_t		tx_hdr_dmap;
241 	bus_addr_t		tx_hdr_paddr;
242 } __cachealign;
243 
244 /*
245  * Receive ring: one per queue
246  */
247 struct igb_rx_ring {
248 	struct lwkt_serialize	rx_serialize;
249 	struct igb_softc	*sc;
250 	uint32_t		me;
251 	union e1000_adv_rx_desc	*rx_base;
252 	boolean_t		discard;
253 	int			num_rx_desc;
254 	uint32_t		next_to_check;
255 	struct igb_rx_buf	*rx_buf;
256 	bus_dma_tag_t		rx_tag;
257 	bus_dmamap_t		rx_sparemap;
258 	int			rx_intr_bit;
259 	uint32_t		rx_intr_mask;
260 
261 	/*
262 	 * First/last mbuf pointers, for
263 	 * collecting multisegment RX packets.
264 	 */
265 	struct mbuf		*fmp;
266 	struct mbuf		*lmp;
267 
268 	/* Soft stats */
269 	u_long			rx_packets;
270 
271 	struct igb_dma		rxdma;
272 } __cachealign;
273 
274 struct igb_msix_data {
275 	struct lwkt_serialize	*msix_serialize;
276 	struct lwkt_serialize	msix_serialize0;
277 	struct igb_softc	*msix_sc;
278 	uint32_t		msix_mask;
279 	struct igb_rx_ring	*msix_rx;
280 	struct igb_tx_ring	*msix_tx;
281 
282 	driver_intr_t		*msix_func;
283 	void			*msix_arg;
284 
285 	int			msix_cpuid;
286 	char			msix_desc[32];
287 	int			msix_rid;
288 	struct resource		*msix_res;
289 	void			*msix_handle;
290 	u_int			msix_vector;
291 	int			msix_rate;
292 	char			msix_rate_desc[32];
293 } __cachealign;
294 
295 struct igb_softc {
296 	struct arpcom		arpcom;
297 	struct e1000_hw		hw;
298 
299 	struct e1000_osdep	osdep;
300 	device_t		dev;
301 	uint32_t		flags;
302 #define IGB_FLAG_SHARED_INTR	0x1
303 #define IGB_FLAG_HAS_MGMT	0x2
304 #define IGB_FLAG_TSO_IPLEN0	0x4
305 
306 	bus_dma_tag_t		parent_tag;
307 
308 	int			mem_rid;
309 	struct resource 	*mem_res;
310 
311 	struct ifmedia		media;
312 	struct callout		timer;
313 
314 	int			intr_type;
315 	int			intr_rid;
316 	struct resource		*intr_res;
317 	void			*intr_tag;
318 
319 	int			if_flags;
320 	int			max_frame_size;
321 	int			pause_frames;
322 	uint16_t		vf_ifp;	/* a VF interface */
323 
324 	/* Management and WOL features */
325 	int			wol;
326 
327 	/* Info about the interface */
328 	uint8_t			link_active;
329 	uint16_t		link_speed;
330 	uint16_t		link_duplex;
331 	uint32_t		smartspeed;
332 	uint32_t		dma_coalesce;
333 
334 	/* Multicast array pointer */
335 	uint8_t			*mta;
336 
337 	int			rx_npoll_off;
338 	int			tx_npoll_off;
339 	int			serialize_cnt;
340 	int			tx_serialize;
341 	int			rx_serialize;
342 	struct lwkt_serialize	*serializes[IGB_NSERIALIZE];
343 	struct lwkt_serialize	main_serialize;
344 
345 	int			intr_rate;
346 	uint32_t		intr_mask;
347 	int			sts_intr_bit;
348 	uint32_t		sts_intr_mask;
349 
350 	/*
351 	 * Transmit rings
352 	 */
353 	int			tx_ring_cnt;
354 	struct igb_tx_ring	*tx_rings;
355 
356 	/*
357 	 * Receive rings
358 	 */
359 	int			rss_debug;
360 	int			rx_ring_cnt;
361 	int			rx_ring_msix;
362 	int			rx_ring_inuse;
363 	struct igb_rx_ring	*rx_rings;
364 
365 	/* Misc stats maintained by the driver */
366 	u_long			dropped_pkts;
367 	u_long			mbuf_defrag_failed;
368 	u_long			no_tx_dma_setup;
369 	u_long			watchdog_events;
370 	u_long			rx_overruns;
371 	u_long			device_control;
372 	u_long			rx_control;
373 	u_long			int_mask;
374 	u_long			eint_mask;
375 	u_long			packet_buf_alloc_rx;
376 	u_long			packet_buf_alloc_tx;
377 
378 	/* sysctl tree glue */
379 	struct sysctl_ctx_list	sysctl_ctx;
380 	struct sysctl_oid	*sysctl_tree;
381 
382 	void 			*stats;
383 
384 	int			msix_tx_cpuid;
385 	int			msix_mem_rid;
386 	struct resource 	*msix_mem_res;
387 	int			msix_cnt;
388 	struct igb_msix_data	*msix_data;
389 };
390 
391 #define IGB_ENABLE_HWRSS(sc)	((sc)->rx_ring_cnt > 1)
392 
393 struct igb_tx_buf {
394 	struct mbuf	*m_head;
395 	bus_dmamap_t	map;		/* bus_dma map for packet */
396 };
397 
398 struct igb_rx_buf {
399 	struct mbuf	*m_head;
400 	bus_dmamap_t	map;	/* bus_dma map for packet */
401 	bus_addr_t	paddr;
402 };
403 
404 #define UPDATE_VF_REG(reg, last, cur)		\
405 {						\
406 	uint32_t new = E1000_READ_REG(hw, reg);	\
407 	if (new < last)				\
408 		cur += 0x100000000LL;		\
409 	last = new;				\
410 	cur &= 0xFFFFFFFF00000000LL;		\
411 	cur |= new;				\
412 }
413 
414 #define IGB_IS_OACTIVE(txr)	((txr)->tx_avail < (txr)->oact_lo_desc)
415 #define IGB_IS_NOT_OACTIVE(txr)	((txr)->tx_avail >= (txr)->oact_hi_desc)
416 
417 #endif /* _IF_IGB_H_ */
418