xref: /dragonfly/sys/dev/netif/re/if_re.c (revision 3d33658b)
1 /*
2  * Copyright (c) 2004
3  *	Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4  *
5  * Copyright (c) 1997, 1998-2003
6  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: src/sys/dev/re/if_re.c,v 1.25 2004/06/09 14:34:01 naddy Exp $
36  */
37 
38 /*
39  * RealTek 8169S/8110S/8168/8111/8101E PCI NIC driver
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Networking Software Engineer
43  * Wind River Systems
44  */
45 
46 /*
47  * This driver is designed to support RealTek's next generation of
48  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
49  * seven devices in this family: the the RTL8169, the RTL8169S, RTL8110S,
50  * the RTL8168, the RTL8111 and the RTL8101E.
51  *
52  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC:
53  *
54  *	o Descriptor based DMA mechanism.  Each descriptor represents
55  *	  a single packet fragment. Data buffers may be aligned on
56  *	  any byte boundary.
57  *
58  *	o 64-bit DMA.
59  *
60  *	o TCP/IP checksum offload for both RX and TX.
61  *
62  *	o High and normal priority transmit DMA rings.
63  *
64  *	o VLAN tag insertion and extraction.
65  *
66  *	o TCP large send (segmentation offload).
67  *
68  *	o 1000Mbps mode.
69  *
70  *	o Jumbo frames.
71  *
72  * 	o GMII and TBI ports/registers for interfacing with copper
73  *	  or fiber PHYs.
74  *
75  *      o RX and TX DMA rings can have up to 1024 descriptors.
76  *
77  * The 8169 does not have a built-in PHY.  Most reference boards use a
78  * Marvell 88E1000 'Alaska' copper gigE PHY.  8169/8110 is _no longer_
79  * supported.
80  *
81  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
82  * (the 'S' stands for 'single-chip').  These devices have the same
83  * programming API as the older 8169, but also have some vendor-specific
84  * registers for the on-board PHY.  The 8110S is a LAN-on-motherboard
85  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
86  *
87  * This driver takes advantage of the RX and TX checksum offload and
88  * VLAN tag insertion/extraction features.  It also implements
89  * interrupt moderation using the timer interrupt registers, which
90  * significantly reduces interrupt load.
91  */
92 
93 #define _IP_VHL
94 
95 #include "opt_ifpoll.h"
96 
97 #include <sys/param.h>
98 #include <sys/bus.h>
99 #include <sys/endian.h>
100 #include <sys/kernel.h>
101 #include <sys/in_cksum.h>
102 #include <sys/interrupt.h>
103 #include <sys/malloc.h>
104 #include <sys/mbuf.h>
105 #include <sys/rman.h>
106 #include <sys/serialize.h>
107 #include <sys/socket.h>
108 #include <sys/sockio.h>
109 #include <sys/sysctl.h>
110 
111 #include <net/bpf.h>
112 #include <net/ethernet.h>
113 #include <net/if.h>
114 #include <net/ifq_var.h>
115 #include <net/if_arp.h>
116 #include <net/if_dl.h>
117 #include <net/if_media.h>
118 #include <net/if_poll.h>
119 #include <net/if_types.h>
120 #include <net/vlan/if_vlan_var.h>
121 #include <net/vlan/if_vlan_ether.h>
122 
123 #include <netinet/ip.h>
124 
125 #include "pcidevs.h"
126 #include <bus/pci/pcireg.h>
127 #include <bus/pci/pcivar.h>
128 
129 #include <dev/netif/re/if_rereg.h>
130 #include <dev/netif/re/if_revar.h>
131 #include <dev/netif/re/re.h>
132 #include <dev/netif/re/re_dragonfly.h>
133 
134 /*
135  * Various supported device vendors/types and their names.
136  */
137 static const struct re_type {
138 	uint16_t	re_vid;
139 	uint16_t	re_did;
140 	const char	*re_name;
141 } re_devs[] = {
142 	{ PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE528T,
143 	  "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
144 
145 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8101E,
146 	  "RealTek 810x PCIe 10/100baseTX" },
147 
148 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168,
149 	  "RealTek 8111/8168 PCIe Gigabit Ethernet" },
150 
151 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168_1,
152 	  "RealTek 8168 PCIe Gigabit Ethernet" },
153 
154 #ifdef notyet
155 	/*
156 	 * This driver now only supports built-in PHYs.
157 	 */
158 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169,
159 	  "RealTek 8110/8169 Gigabit Ethernet" },
160 #endif
161 
162 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169SC,
163 	  "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
164 
165 	{ PCI_VENDOR_COREGA, PCI_PRODUCT_COREGA_CG_LAPCIGT,
166 	  "Corega CG-LAPCIGT Gigabit Ethernet" },
167 
168 	{ PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1032,
169 	  "Linksys EG1032 Gigabit Ethernet" },
170 
171 	{ PCI_VENDOR_USR2, PCI_PRODUCT_USR2_997902,
172 	  "US Robotics 997902 Gigabit Ethernet" },
173 
174 	{ PCI_VENDOR_TTTECH, PCI_PRODUCT_TTTECH_MC322,
175 	  "TTTech MC322 Gigabit Ethernet" },
176 
177 	{ 0, 0, NULL }
178 };
179 
180 static int	re_probe(device_t);
181 static int	re_attach(device_t);
182 static int	re_detach(device_t);
183 static int	re_suspend(device_t);
184 static int	re_resume(device_t);
185 static void	re_shutdown(device_t);
186 
187 static int	re_allocmem(device_t);
188 static void	re_freemem(device_t);
189 static void	re_freebufmem(struct re_softc *, int, int);
190 static int	re_encap(struct re_softc *, struct mbuf **, int *);
191 static int	re_newbuf_std(struct re_softc *, int, int);
192 #ifdef RE_JUMBO
193 static int	re_newbuf_jumbo(struct re_softc *, int, int);
194 #endif
195 static void	re_setup_rxdesc(struct re_softc *, int);
196 static int	re_rx_list_init(struct re_softc *);
197 static int	re_tx_list_init(struct re_softc *);
198 static int	re_rxeof(struct re_softc *);
199 static int	re_txeof(struct re_softc *);
200 static int	re_tx_collect(struct re_softc *);
201 static void	re_intr(void *);
202 static void	re_tick(void *);
203 static void	re_tick_serialized(void *);
204 static void	re_disable_aspm(device_t);
205 static void	re_link_up(struct re_softc *);
206 static void	re_link_down(struct re_softc *);
207 
208 static void	re_start(struct ifnet *, struct ifaltq_subque *);
209 static int	re_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
210 static void	re_init(void *);
211 static void	re_stop(struct re_softc *, boolean_t);
212 static void	re_watchdog(struct ifnet *);
213 
214 static void	re_setup_hw_im(struct re_softc *);
215 static void	re_setup_sim_im(struct re_softc *);
216 static void	re_disable_hw_im(struct re_softc *);
217 static void	re_disable_sim_im(struct re_softc *);
218 static void	re_config_imtype(struct re_softc *, int);
219 static void	re_setup_intr(struct re_softc *, int, int);
220 
221 static int	re_sysctl_hwtime(SYSCTL_HANDLER_ARGS, int *);
222 static int	re_sysctl_rxtime(SYSCTL_HANDLER_ARGS);
223 static int	re_sysctl_txtime(SYSCTL_HANDLER_ARGS);
224 static int	re_sysctl_simtime(SYSCTL_HANDLER_ARGS);
225 static int	re_sysctl_imtype(SYSCTL_HANDLER_ARGS);
226 
227 static int	re_jpool_alloc(struct re_softc *);
228 static void	re_jpool_free(struct re_softc *);
229 #ifdef RE_JUMBO
230 static struct re_jbuf *re_jbuf_alloc(struct re_softc *);
231 static void	re_jbuf_free(void *);
232 static void	re_jbuf_ref(void *);
233 #endif
234 
235 #ifdef IFPOLL_ENABLE
236 static void	re_npoll(struct ifnet *, struct ifpoll_info *);
237 static void	re_npoll_compat(struct ifnet *, void *, int);
238 #endif
239 
240 static device_method_t re_methods[] = {
241 	/* Device interface */
242 	DEVMETHOD(device_probe,		re_probe),
243 	DEVMETHOD(device_attach,	re_attach),
244 	DEVMETHOD(device_detach,	re_detach),
245 	DEVMETHOD(device_suspend,	re_suspend),
246 	DEVMETHOD(device_resume,	re_resume),
247 	DEVMETHOD(device_shutdown,	re_shutdown),
248 	DEVMETHOD_END
249 };
250 
251 static driver_t re_driver = {
252 	"re",
253 	re_methods,
254 	sizeof(struct re_softc)
255 };
256 
257 static devclass_t re_devclass;
258 
259 DECLARE_DUMMY_MODULE(if_re);
260 DRIVER_MODULE(if_re, pci, re_driver, re_devclass, NULL, NULL);
261 DRIVER_MODULE(if_re, cardbus, re_driver, re_devclass, NULL, NULL);
262 
263 static int	re_rx_desc_count = RE_RX_DESC_CNT_DEF;
264 static int	re_tx_desc_count = RE_TX_DESC_CNT_DEF;
265 static int	re_msi_enable = 1;
266 
267 TUNABLE_INT("hw.re.rx_desc_count", &re_rx_desc_count);
268 TUNABLE_INT("hw.re.tx_desc_count", &re_tx_desc_count);
269 TUNABLE_INT("hw.re.msi.enable", &re_msi_enable);
270 
271 static __inline void
272 re_free_rxchain(struct re_softc *sc)
273 {
274 	if (sc->re_head != NULL) {
275 		m_freem(sc->re_head);
276 		sc->re_head = sc->re_tail = NULL;
277 	}
278 }
279 
280 static int
281 re_probe(device_t dev)
282 {
283 	const struct re_type *t;
284 	uint16_t vendor, product;
285 
286 	vendor = pci_get_vendor(dev);
287 	product = pci_get_device(dev);
288 
289 	/*
290 	 * Only attach to rev.3 of the Linksys EG1032 adapter.
291 	 * Rev.2 is supported by sk(4).
292 	 */
293 	if (vendor == PCI_VENDOR_LINKSYS &&
294 	    product == PCI_PRODUCT_LINKSYS_EG1032 &&
295 	    pci_get_subdevice(dev) != PCI_SUBDEVICE_LINKSYS_EG1032_REV3)
296 		return ENXIO;
297 
298 	for (t = re_devs; t->re_name != NULL; t++) {
299 		if (product == t->re_did && vendor == t->re_vid)
300 			break;
301 	}
302 	if (t->re_name == NULL)
303 		return ENXIO;
304 
305 	device_set_desc(dev, t->re_name);
306 	return 0;
307 }
308 
309 static int
310 re_allocmem(device_t dev)
311 {
312 	struct re_softc *sc = device_get_softc(dev);
313 	bus_dmamem_t dmem;
314 	int error, i;
315 
316 	/*
317 	 * Allocate list data
318 	 */
319 	sc->re_ldata.re_tx_mbuf =
320 	kmalloc(sc->re_tx_desc_cnt * sizeof(struct mbuf *),
321 		M_DEVBUF, M_ZERO | M_WAITOK);
322 
323 	sc->re_ldata.re_rx_mbuf =
324 	kmalloc(sc->re_rx_desc_cnt * sizeof(struct mbuf *),
325 		M_DEVBUF, M_ZERO | M_WAITOK);
326 
327 	sc->re_ldata.re_rx_paddr =
328 	kmalloc(sc->re_rx_desc_cnt * sizeof(bus_addr_t),
329 		M_DEVBUF, M_ZERO | M_WAITOK);
330 
331 	sc->re_ldata.re_tx_dmamap =
332 	kmalloc(sc->re_tx_desc_cnt * sizeof(bus_dmamap_t),
333 		M_DEVBUF, M_ZERO | M_WAITOK);
334 
335 	sc->re_ldata.re_rx_dmamap =
336 	kmalloc(sc->re_rx_desc_cnt * sizeof(bus_dmamap_t),
337 		M_DEVBUF, M_ZERO | M_WAITOK);
338 
339 	/*
340 	 * Allocate the parent bus DMA tag appropriate for PCI.
341 	 */
342 	error = bus_dma_tag_create(NULL,	/* parent */
343 			1, 0,			/* alignment, boundary */
344 			BUS_SPACE_MAXADDR,	/* lowaddr */
345 			BUS_SPACE_MAXADDR,	/* highaddr */
346 			NULL, NULL,		/* filter, filterarg */
347 			BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
348 			0,			/* nsegments */
349 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
350 			0,			/* flags */
351 			&sc->re_parent_tag);
352 	if (error) {
353 		device_printf(dev, "could not allocate parent dma tag\n");
354 		return error;
355 	}
356 
357 	/* Allocate TX descriptor list. */
358 	error = bus_dmamem_coherent(sc->re_parent_tag,
359 			RE_RING_ALIGN, 0,
360 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
361 			RE_TX_LIST_SZ(sc), BUS_DMA_WAITOK | BUS_DMA_ZERO,
362 			&dmem);
363 	if (error) {
364 		device_printf(dev, "could not allocate TX ring\n");
365 		return error;
366 	}
367 	sc->re_ldata.re_tx_list_tag = dmem.dmem_tag;
368 	sc->re_ldata.re_tx_list_map = dmem.dmem_map;
369 	sc->re_ldata.re_tx_list = dmem.dmem_addr;
370 	sc->re_ldata.re_tx_list_addr = dmem.dmem_busaddr;
371 
372 	/* Allocate RX descriptor list. */
373 	error = bus_dmamem_coherent(sc->re_parent_tag,
374 			RE_RING_ALIGN, 0,
375 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
376 			RE_RX_LIST_SZ(sc), BUS_DMA_WAITOK | BUS_DMA_ZERO,
377 			&dmem);
378 	if (error) {
379 		device_printf(dev, "could not allocate RX ring\n");
380 		return error;
381 	}
382 	sc->re_ldata.re_rx_list_tag = dmem.dmem_tag;
383 	sc->re_ldata.re_rx_list_map = dmem.dmem_map;
384 	sc->re_ldata.re_rx_list = dmem.dmem_addr;
385 	sc->re_ldata.re_rx_list_addr = dmem.dmem_busaddr;
386 
387 	/* Allocate maps for TX mbufs. */
388 	error = bus_dma_tag_create(sc->re_parent_tag,
389 			1, 0,
390 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
391 			NULL, NULL,
392 			RE_FRAMELEN_MAX, RE_MAXSEGS, MCLBYTES,
393 			BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
394 			&sc->re_ldata.re_tx_mtag);
395 	if (error) {
396 		device_printf(dev, "could not allocate TX buf dma tag\n");
397 		return(error);
398 	}
399 
400 	/* Create DMA maps for TX buffers */
401 	for (i = 0; i < sc->re_tx_desc_cnt; i++) {
402 		error = bus_dmamap_create(sc->re_ldata.re_tx_mtag,
403 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
404 				&sc->re_ldata.re_tx_dmamap[i]);
405 		if (error) {
406 			device_printf(dev, "can't create DMA map for TX buf\n");
407 			re_freebufmem(sc, i, 0);
408 			return(error);
409 		}
410 	}
411 
412 	/* Allocate maps for RX mbufs. */
413 	error = bus_dma_tag_create(sc->re_parent_tag,
414 			RE_RXBUF_ALIGN, 0,
415 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
416 			NULL, NULL,
417 			MCLBYTES, 1, MCLBYTES,
418 			BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,
419 			&sc->re_ldata.re_rx_mtag);
420 	if (error) {
421 		device_printf(dev, "could not allocate RX buf dma tag\n");
422 		return(error);
423 	}
424 
425 	/* Create spare DMA map for RX */
426 	error = bus_dmamap_create(sc->re_ldata.re_rx_mtag, BUS_DMA_WAITOK,
427 			&sc->re_ldata.re_rx_spare);
428 	if (error) {
429 		device_printf(dev, "can't create spare DMA map for RX\n");
430 		bus_dma_tag_destroy(sc->re_ldata.re_rx_mtag);
431 		sc->re_ldata.re_rx_mtag = NULL;
432 		return error;
433 	}
434 
435 	/* Create DMA maps for RX buffers */
436 	for (i = 0; i < sc->re_rx_desc_cnt; i++) {
437 		error = bus_dmamap_create(sc->re_ldata.re_rx_mtag,
438 				BUS_DMA_WAITOK, &sc->re_ldata.re_rx_dmamap[i]);
439 		if (error) {
440 			device_printf(dev, "can't create DMA map for RX buf\n");
441 			re_freebufmem(sc, sc->re_tx_desc_cnt, i);
442 			return(error);
443 		}
444 	}
445 
446 	/* Create jumbo buffer pool for RX if required */
447 	if (sc->re_caps & RE_C_CONTIGRX) {
448 		error = re_jpool_alloc(sc);
449 		if (error) {
450 			re_jpool_free(sc);
451 #ifdef RE_JUMBO
452 			/* Disable jumbo frame support */
453 			sc->re_maxmtu = ETHERMTU;
454 #endif
455 		}
456 	}
457 	return(0);
458 }
459 
460 static void
461 re_freebufmem(struct re_softc *sc, int tx_cnt, int rx_cnt)
462 {
463 	int i;
464 
465 	/* Destroy all the RX and TX buffer maps */
466 	if (sc->re_ldata.re_tx_mtag) {
467 		for (i = 0; i < tx_cnt; i++) {
468 			bus_dmamap_destroy(sc->re_ldata.re_tx_mtag,
469 					   sc->re_ldata.re_tx_dmamap[i]);
470 		}
471 		bus_dma_tag_destroy(sc->re_ldata.re_tx_mtag);
472 		sc->re_ldata.re_tx_mtag = NULL;
473 	}
474 
475 	if (sc->re_ldata.re_rx_mtag) {
476 		for (i = 0; i < rx_cnt; i++) {
477 			bus_dmamap_destroy(sc->re_ldata.re_rx_mtag,
478 					   sc->re_ldata.re_rx_dmamap[i]);
479 		}
480 		bus_dmamap_destroy(sc->re_ldata.re_rx_mtag,
481 				   sc->re_ldata.re_rx_spare);
482 		bus_dma_tag_destroy(sc->re_ldata.re_rx_mtag);
483 		sc->re_ldata.re_rx_mtag = NULL;
484 	}
485 }
486 
487 static void
488 re_freemem(device_t dev)
489 {
490 	struct re_softc *sc = device_get_softc(dev);
491 
492 	/* Unload and free the RX DMA ring memory and map */
493 	if (sc->re_ldata.re_rx_list_tag) {
494 		bus_dmamap_unload(sc->re_ldata.re_rx_list_tag,
495 				  sc->re_ldata.re_rx_list_map);
496 		bus_dmamem_free(sc->re_ldata.re_rx_list_tag,
497 				sc->re_ldata.re_rx_list,
498 				sc->re_ldata.re_rx_list_map);
499 		bus_dma_tag_destroy(sc->re_ldata.re_rx_list_tag);
500 	}
501 
502 	/* Unload and free the TX DMA ring memory and map */
503 	if (sc->re_ldata.re_tx_list_tag) {
504 		bus_dmamap_unload(sc->re_ldata.re_tx_list_tag,
505 				  sc->re_ldata.re_tx_list_map);
506 		bus_dmamem_free(sc->re_ldata.re_tx_list_tag,
507 				sc->re_ldata.re_tx_list,
508 				sc->re_ldata.re_tx_list_map);
509 		bus_dma_tag_destroy(sc->re_ldata.re_tx_list_tag);
510 	}
511 
512 	/* Free RX/TX buf DMA stuffs */
513 	re_freebufmem(sc, sc->re_tx_desc_cnt, sc->re_rx_desc_cnt);
514 
515 	/* Unload and free the stats buffer and map */
516 	if (sc->re_ldata.re_stag) {
517 		bus_dmamap_unload(sc->re_ldata.re_stag, sc->re_ldata.re_smap);
518 		bus_dmamem_free(sc->re_ldata.re_stag,
519 				sc->re_ldata.re_stats,
520 				sc->re_ldata.re_smap);
521 		bus_dma_tag_destroy(sc->re_ldata.re_stag);
522 	}
523 
524 	if (sc->re_caps & RE_C_CONTIGRX)
525 		re_jpool_free(sc);
526 
527 	if (sc->re_parent_tag)
528 		bus_dma_tag_destroy(sc->re_parent_tag);
529 
530 	if (sc->re_ldata.re_tx_mbuf != NULL)
531 		kfree(sc->re_ldata.re_tx_mbuf, M_DEVBUF);
532 	if (sc->re_ldata.re_rx_mbuf != NULL)
533 		kfree(sc->re_ldata.re_rx_mbuf, M_DEVBUF);
534 	if (sc->re_ldata.re_rx_paddr != NULL)
535 		kfree(sc->re_ldata.re_rx_paddr, M_DEVBUF);
536 	if (sc->re_ldata.re_tx_dmamap != NULL)
537 		kfree(sc->re_ldata.re_tx_dmamap, M_DEVBUF);
538 	if (sc->re_ldata.re_rx_dmamap != NULL)
539 		kfree(sc->re_ldata.re_rx_dmamap, M_DEVBUF);
540 }
541 
542 static boolean_t
543 re_is_faste(struct re_softc *sc)
544 {
545 	if (pci_get_vendor(sc->dev) == PCI_VENDOR_REALTEK) {
546 		switch (sc->re_device_id) {
547 		case PCI_PRODUCT_REALTEK_RT8169:
548 		case PCI_PRODUCT_REALTEK_RT8169SC:
549 		case PCI_PRODUCT_REALTEK_RT8168:
550 		case PCI_PRODUCT_REALTEK_RT8168_1:
551 			return FALSE;
552 		default:
553 			return TRUE;
554 		}
555 	} else {
556 		return FALSE;
557 	}
558 }
559 
560 /*
561  * Attach the interface. Allocate softc structures, do ifmedia
562  * setup and ethernet/BPF attach.
563  */
564 static int
565 re_attach(device_t dev)
566 {
567 	struct re_softc	*sc = device_get_softc(dev);
568 	struct ifnet *ifp;
569 	struct sysctl_ctx_list *ctx;
570 	struct sysctl_oid *tree;
571 	uint8_t eaddr[ETHER_ADDR_LEN];
572 	int error = 0, qlen, msi_enable;
573 	u_int irq_flags;
574 
575 	callout_init_mp(&sc->re_timer);
576 	sc->dev = dev;
577 	sc->re_device_id = pci_get_device(dev);
578 	sc->re_unit = device_get_unit(dev);
579 	ifmedia_init(&sc->media, IFM_IMASK, rtl_ifmedia_upd, rtl_ifmedia_sts);
580 
581 	sc->re_caps = RE_C_HWIM;
582 
583 	sc->re_rx_desc_cnt = re_rx_desc_count;
584 	if (sc->re_rx_desc_cnt > RE_RX_DESC_CNT_MAX)
585 		sc->re_rx_desc_cnt = RE_RX_DESC_CNT_MAX;
586 
587 	sc->re_tx_desc_cnt = re_tx_desc_count;
588 	if (sc->re_tx_desc_cnt > RE_TX_DESC_CNT_MAX)
589 		sc->re_tx_desc_cnt = RE_TX_DESC_CNT_MAX;
590 
591 	qlen = RE_IFQ_MAXLEN;
592 	if (sc->re_tx_desc_cnt > qlen)
593 		qlen = sc->re_tx_desc_cnt;
594 
595 	sc->re_rxbuf_size = MCLBYTES;
596 	sc->re_newbuf = re_newbuf_std;
597 
598 	/*
599 	 * Hardware interrupt moderation settings.
600 	 * XXX does not seem correct, undocumented.
601 	 */
602 	sc->re_tx_time = 5;		/* 125us */
603 	sc->re_rx_time = 2;		/* 50us */
604 
605 	/* Simulated interrupt moderation setting. */
606 	sc->re_sim_time = 150;		/* 150us */
607 
608 	/* Use simulated interrupt moderation by default. */
609 	sc->re_imtype = RE_IMTYPE_SIM;
610 	re_config_imtype(sc, sc->re_imtype);
611 
612 	ctx = device_get_sysctl_ctx(dev);
613 	tree = device_get_sysctl_tree(dev);
614 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
615 		       "rx_desc_count", CTLFLAG_RD, &sc->re_rx_desc_cnt,
616 		       0, "RX desc count");
617 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
618 		       "tx_desc_count", CTLFLAG_RD, &sc->re_tx_desc_cnt,
619 		       0, "TX desc count");
620 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "sim_time",
621 			CTLTYPE_INT | CTLFLAG_RW,
622 			sc, 0, re_sysctl_simtime, "I",
623 			"Simulated interrupt moderation time (usec).");
624 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "imtype",
625 			CTLTYPE_INT | CTLFLAG_RW,
626 			sc, 0, re_sysctl_imtype, "I",
627 			"Interrupt moderation type -- "
628 			"0:disable, 1:simulated, "
629 			"2:hardware(if supported)");
630 	if (sc->re_caps & RE_C_HWIM) {
631 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
632 				OID_AUTO, "hw_rxtime",
633 				CTLTYPE_INT | CTLFLAG_RW,
634 				sc, 0, re_sysctl_rxtime, "I",
635 				"Hardware interrupt moderation time "
636 				"(unit: 25usec).");
637 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
638 				OID_AUTO, "hw_txtime",
639 				CTLTYPE_INT | CTLFLAG_RW,
640 				sc, 0, re_sysctl_txtime, "I",
641 				"Hardware interrupt moderation time "
642 				"(unit: 25usec).");
643 	}
644 
645 #ifndef BURN_BRIDGES
646 	/*
647 	 * Handle power management nonsense.
648 	 */
649 
650 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
651 		uint32_t membase, irq;
652 
653 		/* Save important PCI config data. */
654 		membase = pci_read_config(dev, RE_PCI_LOMEM, 4);
655 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
656 
657 		/* Reset the power state. */
658 		device_printf(dev, "chip is in D%d power mode "
659 		    "-- setting to D0\n", pci_get_powerstate(dev));
660 
661 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
662 
663 		/* Restore PCI config data. */
664 		pci_write_config(dev, RE_PCI_LOMEM, membase, 4);
665 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
666 	}
667 #endif
668 	/*
669 	 * Map control/status registers.
670 	 */
671 	pci_enable_busmaster(dev);
672 
673 	if (pci_is_pcie(dev)) {
674 		sc->re_res_rid = PCIR_BAR(2);
675 		sc->re_res_type = SYS_RES_MEMORY;
676 	} else {
677 		sc->re_res_rid = PCIR_BAR(0);
678 		sc->re_res_type = SYS_RES_IOPORT;
679 	}
680 	sc->re_res = bus_alloc_resource_any(dev, sc->re_res_type,
681 	    &sc->re_res_rid, RF_ACTIVE);
682 	if (sc->re_res == NULL) {
683 		device_printf(dev, "couldn't map IO\n");
684 		error = ENXIO;
685 		goto fail;
686 	}
687 
688 	sc->re_btag = rman_get_bustag(sc->re_res);
689 	sc->re_bhandle = rman_get_bushandle(sc->re_res);
690 
691 	error = rtl_check_mac_version(sc);
692 	if (error) {
693 		device_printf(dev, "check mac version failed\n");
694 		goto fail;
695 	}
696 
697 	rtl_init_software_variable(sc);
698 	if (pci_is_pcie(dev))
699 		sc->re_if_flags |= RL_FLAG_PCIE;
700 	else
701 		sc->re_if_flags &= ~RL_FLAG_PCIE;
702 	device_printf(dev, "MAC version 0x%08x, MACFG %u%s%s%s\n",
703 	    (CSR_READ_4(sc, RE_TXCFG) & 0xFCF00000), sc->re_type,
704 	    sc->re_coalesce_tx_pkt ? ", software TX defrag" : "",
705 	    sc->re_pad_runt ? ", pad runt" : "",
706 	    sc->re_hw_enable_msi_msix ? ", support MSI" : "");
707 
708 	/*
709 	 * Allocate interrupt
710 	 */
711 	if (pci_is_pcie(dev) && sc->re_hw_enable_msi_msix)
712 		msi_enable = re_msi_enable;
713 	else
714 		msi_enable = 0;
715 	sc->re_irq_type = pci_alloc_1intr(dev, msi_enable,
716 	    &sc->re_irq_rid, &irq_flags);
717 
718 	sc->re_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->re_irq_rid,
719 					    irq_flags);
720 	if (sc->re_irq == NULL) {
721 		device_printf(dev, "couldn't map interrupt\n");
722 		error = ENXIO;
723 		goto fail;
724 	}
725 
726 	/* Disable ASPM */
727 	re_disable_aspm(dev);
728 
729 	rtl_exit_oob(sc);
730 	rtl_hw_init(sc);
731 
732 	/* Reset the adapter. */
733 	rtl_reset(sc);
734 
735 	rtl_get_hw_mac_address(sc, eaddr);
736 	if (sc->re_type == MACFG_3)	/* Change PCI Latency time*/
737 		pci_write_config(dev, PCIR_LATTIMER, 0x40, 1);
738 
739 	/* Allocate DMA stuffs */
740 	error = re_allocmem(dev);
741 	if (error)
742 		goto fail;
743 
744 	if (pci_is_pcie(dev)) {
745 		sc->re_bus_speed = 125;
746 	} else {
747 		uint8_t cfg2;
748 
749 		cfg2 = CSR_READ_1(sc, RE_CFG2);
750 		switch (cfg2 & RE_CFG2_PCICLK_MASK) {
751 		case RE_CFG2_PCICLK_33MHZ:
752 			sc->re_bus_speed = 33;
753 			break;
754 		case RE_CFG2_PCICLK_66MHZ:
755 			sc->re_bus_speed = 66;
756 			break;
757 		default:
758 			device_printf(dev, "unknown bus speed, assume 33MHz\n");
759 			sc->re_bus_speed = 33;
760 			break;
761 		}
762 	}
763 	device_printf(dev, "bus speed %dMHz\n", sc->re_bus_speed);
764 
765 	rtl_phy_power_up(sc);
766 	rtl_hw_phy_config(sc);
767 	rtl_clrwol(sc);
768 
769 	/* TODO: jumbo frame */
770 	CSR_WRITE_2(sc, RE_RxMaxSize, sc->re_rxbuf_size);
771 
772 	/* Enable hardware checksum if available. */
773 	sc->re_tx_cstag = 1;
774 	sc->re_rx_cstag = 1;
775 
776 	ifp = &sc->arpcom.ac_if;
777 	ifp->if_softc = sc;
778 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
779 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
780 	ifp->if_ioctl = re_ioctl;
781 	ifp->if_start = re_start;
782 #ifdef IFPOLL_ENABLE
783 	ifp->if_npoll = re_npoll;
784 #endif
785 	ifp->if_watchdog = re_watchdog;
786 	ifp->if_init = re_init;
787 	if (!re_is_faste(sc))
788 		ifp->if_baudrate = 1000000000;
789 	else
790 		ifp->if_baudrate = 100000000;
791 	ifp->if_nmbclusters = sc->re_rx_desc_cnt;
792 	ifq_set_maxlen(&ifp->if_snd, qlen);
793 	ifq_set_ready(&ifp->if_snd);
794 
795 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
796 	    IFCAP_RXCSUM | IFCAP_TXCSUM;
797 	ifp->if_capenable = ifp->if_capabilities;
798 	/* NOTE: if_hwassist will be setup after the interface is up. */
799 
800 	/*
801 	 * Call MI attach routine.
802 	 */
803 	ether_ifattach(ifp, eaddr, NULL);
804 
805 	ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->re_irq));
806 
807 #ifdef IFPOLL_ENABLE
808 	ifpoll_compat_setup(&sc->re_npoll, ctx, (struct sysctl_oid *)tree,
809 	    device_get_unit(dev), ifp->if_serializer);
810 #endif
811 
812 	/* Hook interrupt last to avoid having to lock softc */
813 	error = bus_setup_intr(dev, sc->re_irq, INTR_MPSAFE | INTR_HIFREQ,
814 	    re_intr, sc, &sc->re_intrhand, ifp->if_serializer);
815 	if (error) {
816 		device_printf(dev, "couldn't set up irq\n");
817 		ether_ifdetach(ifp);
818 		goto fail;
819 	}
820 
821 	ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
822 	ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
823 	ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
824 	ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
825 	if (!re_is_faste(sc)) {
826 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
827 		    0, NULL);
828 	}
829 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
830 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
831 	rtl_ifmedia_upd(ifp);
832 
833 fail:
834 	if (error)
835 		re_detach(dev);
836 
837 	return (error);
838 }
839 
840 /*
841  * Shutdown hardware and free up resources. This can be called any
842  * time after the mutex has been initialized. It is called in both
843  * the error case in attach and the normal detach case so it needs
844  * to be careful about only freeing resources that have actually been
845  * allocated.
846  */
847 static int
848 re_detach(device_t dev)
849 {
850 	struct re_softc *sc = device_get_softc(dev);
851 	struct ifnet *ifp = &sc->arpcom.ac_if;
852 
853 	/* These should only be active if attach succeeded */
854 	if (device_is_attached(dev)) {
855 		lwkt_serialize_enter(ifp->if_serializer);
856 		re_stop(sc, TRUE);
857 		bus_teardown_intr(dev, sc->re_irq, sc->re_intrhand);
858 		lwkt_serialize_exit(ifp->if_serializer);
859 
860 		ether_ifdetach(ifp);
861 	}
862 	ifmedia_removeall(&sc->media);
863 
864 	if (sc->re_irq)
865 		bus_release_resource(dev, SYS_RES_IRQ, sc->re_irq_rid,
866 				     sc->re_irq);
867 
868 	if (sc->re_irq_type == PCI_INTR_TYPE_MSI)
869 		pci_release_msi(dev);
870 
871 	if (sc->re_res) {
872 		bus_release_resource(dev, sc->re_res_type, sc->re_res_rid,
873 		    sc->re_res);
874 	}
875 	rtl_cmac_unmap(sc);
876 
877 	/* Free DMA stuffs */
878 	re_freemem(dev);
879 
880 	return(0);
881 }
882 
883 static void
884 re_setup_rxdesc(struct re_softc *sc, int idx)
885 {
886 	bus_addr_t paddr;
887 	uint32_t cmdstat;
888 	struct re_desc *d;
889 
890 	paddr = sc->re_ldata.re_rx_paddr[idx];
891 	d = &sc->re_ldata.re_rx_list[idx];
892 
893 	d->re_bufaddr_lo = htole32(RE_ADDR_LO(paddr));
894 	d->re_bufaddr_hi = htole32(RE_ADDR_HI(paddr));
895 
896 	cmdstat = sc->re_rxbuf_size | RE_RDESC_CMD_OWN;
897 	if (idx == (sc->re_rx_desc_cnt - 1))
898 		cmdstat |= RE_RDESC_CMD_EOR;
899 	d->re_cmdstat = htole32(cmdstat);
900 }
901 
902 static int
903 re_newbuf_std(struct re_softc *sc, int idx, int init)
904 {
905 	bus_dma_segment_t seg;
906 	bus_dmamap_t map;
907 	struct mbuf *m;
908 	int error, nsegs;
909 
910 	m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
911 	if (m == NULL) {
912 		error = ENOBUFS;
913 
914 		if (init) {
915 			if_printf(&sc->arpcom.ac_if, "m_getcl failed\n");
916 			return error;
917 		} else {
918 			goto back;
919 		}
920 	}
921 	m->m_len = m->m_pkthdr.len = MCLBYTES;
922 
923 	/*
924 	 * NOTE:
925 	 * re(4) chips need address of the receive buffer to be 8-byte
926 	 * aligned, so don't call m_adj(m, ETHER_ALIGN) here.
927 	 */
928 
929 	error = bus_dmamap_load_mbuf_segment(sc->re_ldata.re_rx_mtag,
930 			sc->re_ldata.re_rx_spare, m,
931 			&seg, 1, &nsegs, BUS_DMA_NOWAIT);
932 	if (error) {
933 		m_freem(m);
934 		if (init) {
935 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
936 			return error;
937 		} else {
938 			goto back;
939 		}
940 	}
941 
942 	if (!init) {
943 		bus_dmamap_sync(sc->re_ldata.re_rx_mtag,
944 				sc->re_ldata.re_rx_dmamap[idx],
945 				BUS_DMASYNC_POSTREAD);
946 		bus_dmamap_unload(sc->re_ldata.re_rx_mtag,
947 				  sc->re_ldata.re_rx_dmamap[idx]);
948 	}
949 	sc->re_ldata.re_rx_mbuf[idx] = m;
950 	sc->re_ldata.re_rx_paddr[idx] = seg.ds_addr;
951 
952 	map = sc->re_ldata.re_rx_dmamap[idx];
953 	sc->re_ldata.re_rx_dmamap[idx] = sc->re_ldata.re_rx_spare;
954 	sc->re_ldata.re_rx_spare = map;
955 back:
956 	re_setup_rxdesc(sc, idx);
957 	return error;
958 }
959 
960 #ifdef RE_JUMBO
961 static int
962 re_newbuf_jumbo(struct re_softc *sc, int idx, int init)
963 {
964 	struct mbuf *m;
965 	struct re_jbuf *jbuf;
966 	int error = 0;
967 
968 	MGETHDR(m, init ? M_WAITOK : M_NOWAIT, MT_DATA);
969 	if (m == NULL) {
970 		error = ENOBUFS;
971 		if (init) {
972 			if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n");
973 			return error;
974 		} else {
975 			goto back;
976 		}
977 	}
978 
979 	jbuf = re_jbuf_alloc(sc);
980 	if (jbuf == NULL) {
981 		m_freem(m);
982 
983 		error = ENOBUFS;
984 		if (init) {
985 			if_printf(&sc->arpcom.ac_if, "jpool is empty\n");
986 			return error;
987 		} else {
988 			goto back;
989 		}
990 	}
991 
992 	m->m_ext.ext_arg = jbuf;
993 	m->m_ext.ext_buf = jbuf->re_buf;
994 	m->m_ext.ext_free = re_jbuf_free;
995 	m->m_ext.ext_ref = re_jbuf_ref;
996 	m->m_ext.ext_size = sc->re_rxbuf_size;
997 
998 	m->m_data = m->m_ext.ext_buf;
999 	m->m_flags |= M_EXT;
1000 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1001 
1002 	/*
1003 	 * NOTE:
1004 	 * Some re(4) chips(e.g. RTL8101E) need address of the receive buffer
1005 	 * to be 8-byte aligned, so don't call m_adj(m, ETHER_ALIGN) here.
1006 	 */
1007 
1008 	sc->re_ldata.re_rx_mbuf[idx] = m;
1009 	sc->re_ldata.re_rx_paddr[idx] = jbuf->re_paddr;
1010 back:
1011 	re_setup_rxdesc(sc, idx);
1012 	return error;
1013 }
1014 #endif	/* RE_JUMBO */
1015 
1016 static int
1017 re_tx_list_init(struct re_softc *sc)
1018 {
1019 	bzero(sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc));
1020 
1021 	sc->re_ldata.re_tx_prodidx = 0;
1022 	sc->re_ldata.re_tx_considx = 0;
1023 	sc->re_ldata.re_tx_free = sc->re_tx_desc_cnt;
1024 
1025 	return(0);
1026 }
1027 
1028 static int
1029 re_rx_list_init(struct re_softc *sc)
1030 {
1031 	int i, error;
1032 
1033 	bzero(sc->re_ldata.re_rx_list, RE_RX_LIST_SZ(sc));
1034 
1035 	for (i = 0; i < sc->re_rx_desc_cnt; i++) {
1036 		error = sc->re_newbuf(sc, i, 1);
1037 		if (error)
1038 			return(error);
1039 	}
1040 
1041 	sc->re_ldata.re_rx_prodidx = 0;
1042 	sc->re_head = sc->re_tail = NULL;
1043 
1044 	return(0);
1045 }
1046 
1047 #define RE_IP4_PACKET	0x1
1048 #define RE_TCP_PACKET	0x2
1049 #define RE_UDP_PACKET	0x4
1050 
1051 static __inline uint8_t
1052 re_packet_type(struct re_softc *sc, uint32_t rxstat, uint32_t rxctrl)
1053 {
1054 	uint8_t packet_type = 0;
1055 
1056 	if (sc->re_if_flags & RL_FLAG_DESCV2) {
1057 		if (rxctrl & RE_RDESC_CTL_PROTOIP4)
1058 			packet_type |= RE_IP4_PACKET;
1059 	} else {
1060 		if (rxstat & RE_RDESC_STAT_PROTOID)
1061 			packet_type |= RE_IP4_PACKET;
1062 	}
1063 	if (RE_TCPPKT(rxstat))
1064 		packet_type |= RE_TCP_PACKET;
1065 	else if (RE_UDPPKT(rxstat))
1066 		packet_type |= RE_UDP_PACKET;
1067 	return packet_type;
1068 }
1069 
1070 /*
1071  * RX handler for C+ and 8169. For the gigE chips, we support
1072  * the reception of jumbo frames that have been fragmented
1073  * across multiple 2K mbuf cluster buffers.
1074  */
1075 static int
1076 re_rxeof(struct re_softc *sc)
1077 {
1078 	struct ifnet *ifp = &sc->arpcom.ac_if;
1079 	struct mbuf *m;
1080 	struct re_desc 	*cur_rx;
1081 	uint32_t rxstat, rxctrl;
1082 	int i, total_len, rx = 0;
1083 
1084 	for (i = sc->re_ldata.re_rx_prodidx;
1085 	     RE_OWN(&sc->re_ldata.re_rx_list[i]) == 0; RE_RXDESC_INC(sc, i)) {
1086 		cur_rx = &sc->re_ldata.re_rx_list[i];
1087 		m = sc->re_ldata.re_rx_mbuf[i];
1088 		total_len = RE_RXBYTES(cur_rx);
1089 		rxstat = le32toh(cur_rx->re_cmdstat);
1090 		rxctrl = le32toh(cur_rx->re_control);
1091 
1092 		rx = 1;
1093 
1094 #ifdef INVARIANTS
1095 		if (sc->re_flags & RE_F_USE_JPOOL)
1096 			KKASSERT(rxstat & RE_RDESC_STAT_EOF);
1097 #endif
1098 
1099 		if ((rxstat & RE_RDESC_STAT_EOF) == 0) {
1100 			if (sc->re_flags & RE_F_DROP_RXFRAG) {
1101 				re_setup_rxdesc(sc, i);
1102 				continue;
1103 			}
1104 
1105 			if (sc->re_newbuf(sc, i, 0)) {
1106 				/* Drop upcoming fragments */
1107 				sc->re_flags |= RE_F_DROP_RXFRAG;
1108 				continue;
1109 			}
1110 
1111 			m->m_len = MCLBYTES;
1112 			if (sc->re_head == NULL) {
1113 				sc->re_head = sc->re_tail = m;
1114 			} else {
1115 				sc->re_tail->m_next = m;
1116 				sc->re_tail = m;
1117 			}
1118 			continue;
1119 		} else if (sc->re_flags & RE_F_DROP_RXFRAG) {
1120 			/*
1121 			 * Last fragment of a multi-fragment packet.
1122 			 *
1123 			 * Since error already happened, this fragment
1124 			 * must be dropped as well as the fragment chain.
1125 			 */
1126 			re_setup_rxdesc(sc, i);
1127 			re_free_rxchain(sc);
1128 			sc->re_flags &= ~RE_F_DROP_RXFRAG;
1129 			continue;
1130 		}
1131 
1132 		rxstat >>= 1;
1133 		if (rxstat & RE_RDESC_STAT_RXERRSUM) {
1134 			IFNET_STAT_INC(ifp, ierrors, 1);
1135 			/*
1136 			 * If this is part of a multi-fragment packet,
1137 			 * discard all the pieces.
1138 			 */
1139 			re_free_rxchain(sc);
1140 			re_setup_rxdesc(sc, i);
1141 			continue;
1142 		}
1143 
1144 		/*
1145 		 * If allocating a replacement mbuf fails,
1146 		 * reload the current one.
1147 		 */
1148 
1149 		if (sc->re_newbuf(sc, i, 0)) {
1150 			IFNET_STAT_INC(ifp, ierrors, 1);
1151 			continue;
1152 		}
1153 
1154 		if (sc->re_head != NULL) {
1155 			m->m_len = total_len % MCLBYTES;
1156 			/*
1157 			 * Special case: if there's 4 bytes or less
1158 			 * in this buffer, the mbuf can be discarded:
1159 			 * the last 4 bytes is the CRC, which we don't
1160 			 * care about anyway.
1161 			 */
1162 			if (m->m_len <= ETHER_CRC_LEN) {
1163 				sc->re_tail->m_len -=
1164 				    (ETHER_CRC_LEN - m->m_len);
1165 				m_freem(m);
1166 			} else {
1167 				m->m_len -= ETHER_CRC_LEN;
1168 				sc->re_tail->m_next = m;
1169 			}
1170 			m = sc->re_head;
1171 			sc->re_head = sc->re_tail = NULL;
1172 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1173 		} else {
1174 			m->m_pkthdr.len = m->m_len =
1175 			    (total_len - ETHER_CRC_LEN);
1176 		}
1177 
1178 		IFNET_STAT_INC(ifp, ipackets, 1);
1179 		m->m_pkthdr.rcvif = ifp;
1180 
1181 		/* Do RX checksumming if enabled */
1182 
1183 		if (ifp->if_capenable & IFCAP_RXCSUM) {
1184 			uint8_t packet_type;
1185 
1186 			packet_type = re_packet_type(sc, rxstat, rxctrl);
1187 
1188 			/* Check IP header checksum */
1189 			if (packet_type & RE_IP4_PACKET) {
1190 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1191 				if ((rxstat & RE_RDESC_STAT_IPSUMBAD) == 0)
1192 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1193 			}
1194 
1195 			/* Check TCP/UDP checksum */
1196 			if (((packet_type & RE_TCP_PACKET) &&
1197 			     (rxstat & RE_RDESC_STAT_TCPSUMBAD) == 0) ||
1198 			    ((packet_type & RE_UDP_PACKET) &&
1199 			     (rxstat & RE_RDESC_STAT_UDPSUMBAD) == 0)) {
1200 				m->m_pkthdr.csum_flags |=
1201 				    CSUM_DATA_VALID|CSUM_PSEUDO_HDR|
1202 				    CSUM_FRAG_NOT_CHECKED;
1203 				m->m_pkthdr.csum_data = 0xffff;
1204 			}
1205 		}
1206 
1207 		if (rxctrl & RE_RDESC_CTL_HASTAG) {
1208 			m->m_flags |= M_VLANTAG;
1209 			m->m_pkthdr.ether_vlantag =
1210 				be16toh((rxctrl & RE_RDESC_CTL_TAGDATA));
1211 		}
1212 		ifp->if_input(ifp, m, NULL, -1);
1213 	}
1214 
1215 	sc->re_ldata.re_rx_prodidx = i;
1216 
1217 	return rx;
1218 }
1219 
1220 #undef RE_IP4_PACKET
1221 #undef RE_TCP_PACKET
1222 #undef RE_UDP_PACKET
1223 
1224 static int
1225 re_tx_collect(struct re_softc *sc)
1226 {
1227 	struct ifnet *ifp = &sc->arpcom.ac_if;
1228 	uint32_t txstat;
1229 	int idx, tx = 0;
1230 
1231 	for (idx = sc->re_ldata.re_tx_considx;
1232 	     sc->re_ldata.re_tx_free < sc->re_tx_desc_cnt;
1233 	     RE_TXDESC_INC(sc, idx)) {
1234 		txstat = le32toh(sc->re_ldata.re_tx_list[idx].re_cmdstat);
1235 		if (txstat & RE_TDESC_CMD_OWN)
1236 			break;
1237 
1238 		tx = 1;
1239 
1240 		sc->re_ldata.re_tx_list[idx].re_bufaddr_lo = 0;
1241 
1242 		/*
1243 		 * We only stash mbufs in the last descriptor
1244 		 * in a fragment chain, which also happens to
1245 		 * be the only place where the TX status bits
1246 		 * are valid.
1247 		 */
1248 		if (txstat & RE_TDESC_CMD_EOF) {
1249 			bus_dmamap_unload(sc->re_ldata.re_tx_mtag,
1250 			    sc->re_ldata.re_tx_dmamap[idx]);
1251 			m_freem(sc->re_ldata.re_tx_mbuf[idx]);
1252 			sc->re_ldata.re_tx_mbuf[idx] = NULL;
1253 			if (txstat & (RE_TDESC_STAT_EXCESSCOL|
1254 			    RE_TDESC_STAT_COLCNT))
1255 				IFNET_STAT_INC(ifp, collisions, 1);
1256 			if (txstat & RE_TDESC_STAT_TXERRSUM)
1257 				IFNET_STAT_INC(ifp, oerrors, 1);
1258 			else
1259 				IFNET_STAT_INC(ifp, opackets, 1);
1260 		}
1261 		sc->re_ldata.re_tx_free++;
1262 	}
1263 	sc->re_ldata.re_tx_considx = idx;
1264 
1265 	return tx;
1266 }
1267 
1268 static int
1269 re_txeof(struct re_softc *sc)
1270 {
1271 	struct ifnet *ifp = &sc->arpcom.ac_if;
1272 	int tx;
1273 
1274 	tx = re_tx_collect(sc);
1275 
1276 	/* There is enough free TX descs */
1277 	if (sc->re_ldata.re_tx_free > RE_TXDESC_SPARE)
1278 		ifq_clr_oactive(&ifp->if_snd);
1279 
1280 	/*
1281 	 * Some chips will ignore a second TX request issued while an
1282 	 * existing transmission is in progress. If the transmitter goes
1283 	 * idle but there are still packets waiting to be sent, we need
1284 	 * to restart the channel here to flush them out. This only seems
1285 	 * to be required with the PCIe devices.
1286 	 */
1287 	if (sc->re_ldata.re_tx_free < sc->re_tx_desc_cnt)
1288 		CSR_WRITE_1(sc, RE_TPPOLL, RE_NPQ);
1289 	else
1290 		ifp->if_timer = 0;
1291 
1292 	return tx;
1293 }
1294 
1295 static void
1296 re_tick(void *xsc)
1297 {
1298 	struct re_softc *sc = xsc;
1299 
1300 	lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer);
1301 	re_tick_serialized(xsc);
1302 	lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer);
1303 }
1304 
1305 static void
1306 re_tick_serialized(void *xsc)
1307 {
1308 	struct re_softc *sc = xsc;
1309 	struct ifnet *ifp = &sc->arpcom.ac_if;
1310 
1311 	ASSERT_SERIALIZED(ifp->if_serializer);
1312 
1313 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1314 		return;
1315 
1316 	if (rtl_link_ok(sc)) {
1317 		if ((sc->re_flags & RE_F_LINKED) == 0)
1318 			re_link_up(sc);
1319 	} else if (sc->re_flags & RE_F_LINKED) {
1320 		re_link_down(sc);
1321 	}
1322 	callout_reset(&sc->re_timer, hz, re_tick, sc);
1323 }
1324 
1325 #ifdef IFPOLL_ENABLE
1326 
1327 static void
1328 re_npoll_compat(struct ifnet *ifp, void *arg __unused, int count)
1329 {
1330 	struct re_softc *sc = ifp->if_softc;
1331 
1332 	ASSERT_SERIALIZED(ifp->if_serializer);
1333 
1334 	if (sc->re_npoll.ifpc_stcount-- == 0) {
1335 		uint16_t status;
1336 
1337 		sc->re_npoll.ifpc_stcount = sc->re_npoll.ifpc_stfrac;
1338 
1339 		status = CSR_READ_2(sc, RE_ISR);
1340 		if (status)
1341 			CSR_WRITE_2(sc, RE_ISR, status);
1342 
1343 		/*
1344 		 * XXX check behaviour on receiver stalls.
1345 		 */
1346 
1347 		if (status & RE_ISR_SYSTEM_ERR) {
1348 			rtl_reset(sc);
1349 			re_init(sc);
1350 			/* Done! */
1351 			return;
1352 		}
1353 	}
1354 
1355 	sc->rxcycles = count;
1356 	re_rxeof(sc);
1357 	re_txeof(sc);
1358 
1359 	if (!ifq_is_empty(&ifp->if_snd))
1360 		if_devstart(ifp);
1361 }
1362 
1363 static void
1364 re_npoll(struct ifnet *ifp, struct ifpoll_info *info)
1365 {
1366 	struct re_softc *sc = ifp->if_softc;
1367 
1368 	ASSERT_SERIALIZED(ifp->if_serializer);
1369 
1370 	if (info != NULL) {
1371 		int cpuid = sc->re_npoll.ifpc_cpuid;
1372 
1373 		info->ifpi_rx[cpuid].poll_func = re_npoll_compat;
1374 		info->ifpi_rx[cpuid].arg = NULL;
1375 		info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
1376 
1377 		if (ifp->if_flags & IFF_RUNNING)
1378 			re_setup_intr(sc, 0, RE_IMTYPE_NONE);
1379 		ifq_set_cpuid(&ifp->if_snd, cpuid);
1380 	} else {
1381 		if (ifp->if_flags & IFF_RUNNING)
1382 			re_setup_intr(sc, 1, sc->re_imtype);
1383 		ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->re_irq));
1384 	}
1385 }
1386 #endif /* IFPOLL_ENABLE */
1387 
1388 static void
1389 re_intr(void *arg)
1390 {
1391 	struct re_softc	*sc = arg;
1392 	struct ifnet *ifp = &sc->arpcom.ac_if;
1393 	uint16_t status;
1394 	int proc;
1395 
1396 	ASSERT_SERIALIZED(ifp->if_serializer);
1397 
1398 	if ((sc->re_flags & RE_F_SUSPENDED) ||
1399 	    (ifp->if_flags & IFF_RUNNING) == 0)
1400 		return;
1401 
1402 	/* Disable interrupts. */
1403 	CSR_WRITE_2(sc, RE_IMR, 0);
1404 
1405 	status = CSR_READ_2(sc, RE_ISR);
1406 again:
1407 	proc = 0;
1408 	if (status)
1409 		CSR_WRITE_2(sc, RE_ISR, status);
1410 	if (status & sc->re_intrs) {
1411 		if (status & RE_ISR_SYSTEM_ERR) {
1412 			rtl_reset(sc);
1413 			re_init(sc);
1414 			/* Done! */
1415 			return;
1416 		}
1417 		proc |= re_rxeof(sc);
1418 		proc |= re_txeof(sc);
1419 	}
1420 
1421 	if (sc->re_imtype == RE_IMTYPE_SIM) {
1422 		if ((sc->re_flags & RE_F_TIMER_INTR)) {
1423 			if (!proc) {
1424 				/*
1425 				 * Nothing needs to be processed, fallback
1426 				 * to use TX/RX interrupts.
1427 				 *
1428 				 * NOTE: This will re-enable interrupts.
1429 				 */
1430 				re_setup_intr(sc, 1, RE_IMTYPE_NONE);
1431 
1432 				/*
1433 				 * Recollect, mainly to avoid the possible
1434 				 * race introduced by changing interrupt
1435 				 * masks.
1436 				 */
1437 				re_rxeof(sc);
1438 				re_txeof(sc);
1439 			} else {
1440 				/* Re-enable interrupts. */
1441 				CSR_WRITE_2(sc, RE_IMR, sc->re_intrs);
1442 				CSR_WRITE_4(sc, RE_TIMERCNT, 1); /* reload */
1443 			}
1444 		} else if (proc) {
1445 			/*
1446 			 * Assume that using simulated interrupt moderation
1447 			 * (hardware timer based) could reduce the interript
1448 			 * rate.
1449 			 *
1450 			 * NOTE: This will re-enable interrupts.
1451 			 */
1452 			re_setup_intr(sc, 1, RE_IMTYPE_SIM);
1453 		} else {
1454 			/* Re-enable interrupts. */
1455 			CSR_WRITE_2(sc, RE_IMR, sc->re_intrs);
1456 		}
1457 	} else {
1458 		status = CSR_READ_2(sc, RE_ISR);
1459 		if (status & sc->re_intrs) {
1460 			if (!ifq_is_empty(&ifp->if_snd))
1461 				if_devstart(ifp);
1462 			/* NOTE: Interrupts are still disabled. */
1463 			goto again;
1464 		}
1465 		/* Re-enable interrupts. */
1466 		CSR_WRITE_2(sc, RE_IMR, sc->re_intrs);
1467 	}
1468 
1469 	if (!ifq_is_empty(&ifp->if_snd))
1470 		if_devstart(ifp);
1471 }
1472 
1473 static int
1474 re_encap(struct re_softc *sc, struct mbuf **m_head, int *idx0)
1475 {
1476 	struct mbuf *m = *m_head;
1477 	bus_dma_segment_t segs[RE_MAXSEGS];
1478 	bus_dmamap_t map;
1479 	int error, maxsegs, idx, i, nsegs;
1480 	struct re_desc *d, *tx_ring;
1481 	uint32_t cmd_csum, ctl_csum, vlantag;
1482 
1483 	KASSERT(sc->re_ldata.re_tx_free > RE_TXDESC_SPARE,
1484 		("not enough free TX desc"));
1485 
1486 	if (sc->re_coalesce_tx_pkt && m->m_pkthdr.len != m->m_len) {
1487 		struct mbuf *m_new;
1488 
1489 		m_new = m_defrag(m, M_NOWAIT);
1490 		if (m_new == NULL) {
1491 			error = ENOBUFS;
1492 			goto back;
1493 		} else {
1494 			*m_head = m = m_new;
1495 			if (m->m_pkthdr.len != m->m_len) {
1496 				/* Still not configuous; give up. */
1497 				error = ENOBUFS;
1498 				goto back;
1499 			}
1500 		}
1501 	}
1502 
1503 	map = sc->re_ldata.re_tx_dmamap[*idx0];
1504 
1505 	/*
1506 	 * Set up checksum offload. Note: checksum offload bits must
1507 	 * appear in all descriptors of a multi-descriptor transmit
1508 	 * attempt. (This is according to testing done with an 8169
1509 	 * chip. I'm not sure if this is a requirement or a bug.)
1510 	 */
1511 	cmd_csum = ctl_csum = 0;
1512 	if (m->m_pkthdr.csum_flags & CSUM_IP) {
1513 		cmd_csum |= RE_TDESC_CMD_IPCSUM;
1514 		ctl_csum |= RE_TDESC_CTL_IPCSUM;
1515 	}
1516 	if (m->m_pkthdr.csum_flags & CSUM_TCP) {
1517 		cmd_csum |= RE_TDESC_CMD_TCPCSUM;
1518 		ctl_csum |= RE_TDESC_CTL_TCPCSUM;
1519 	}
1520 	if (m->m_pkthdr.csum_flags & CSUM_UDP) {
1521 		cmd_csum |= RE_TDESC_CMD_UDPCSUM;
1522 		ctl_csum |= RE_TDESC_CTL_UDPCSUM;
1523 	}
1524 
1525 	/* For version2 descriptor, csum flags are set on re_control */
1526 	if (sc->re_if_flags & RL_FLAG_DESCV2)
1527 		cmd_csum = 0;
1528 	else
1529 		ctl_csum = 0;
1530 
1531 	if (sc->re_pad_runt) {
1532 		/*
1533 		 * With some of the RealTek chips, using the checksum offload
1534 		 * support in conjunction with the autopadding feature results
1535 		 * in the transmission of corrupt frames. For example, if we
1536 		 * need to send a really small IP fragment that's less than 60
1537 		 * bytes in size, and IP header checksumming is enabled, the
1538 		 * resulting ethernet frame that appears on the wire will
1539 		 * have garbled payload. To work around this, if TX checksum
1540 		 * offload is enabled, we always manually pad short frames out
1541 		 * to the minimum ethernet frame size.
1542 		 *
1543 		 * Note: this appears unnecessary for TCP, and doing it for TCP
1544 		 * with PCIe adapters seems to result in bad checksums.
1545 		 */
1546 		if ((m->m_pkthdr.csum_flags &
1547 		     (CSUM_DELAY_IP | CSUM_DELAY_DATA)) &&
1548 		    (m->m_pkthdr.csum_flags & CSUM_TCP) == 0 &&
1549 		    m->m_pkthdr.len < RE_MIN_FRAMELEN) {
1550 			error = m_devpad(m, RE_MIN_FRAMELEN);
1551 			if (error)
1552 				goto back;
1553 		}
1554 	}
1555 
1556 	vlantag = 0;
1557 	if (m->m_flags & M_VLANTAG) {
1558 		vlantag = htobe16(m->m_pkthdr.ether_vlantag) |
1559 			  RE_TDESC_CTL_INSTAG;
1560 	}
1561 
1562 	maxsegs = sc->re_ldata.re_tx_free;
1563 	if (maxsegs > RE_MAXSEGS)
1564 		maxsegs = RE_MAXSEGS;
1565 
1566 	error = bus_dmamap_load_mbuf_defrag(sc->re_ldata.re_tx_mtag, map,
1567 			m_head, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1568 	if (error)
1569 		goto back;
1570 
1571 	m = *m_head;
1572 	bus_dmamap_sync(sc->re_ldata.re_tx_mtag, map, BUS_DMASYNC_PREWRITE);
1573 
1574 	/*
1575 	 * Map the segment array into descriptors.  We also keep track
1576 	 * of the end of the ring and set the end-of-ring bits as needed,
1577 	 * and we set the ownership bits in all except the very first
1578 	 * descriptor, whose ownership bits will be turned on later.
1579 	 */
1580 	tx_ring = sc->re_ldata.re_tx_list;
1581 	idx = *idx0;
1582 	i = 0;
1583 	for (;;) {
1584 		uint32_t cmdstat;
1585 
1586 		d = &tx_ring[idx];
1587 
1588 		cmdstat = segs[i].ds_len;
1589 		d->re_bufaddr_lo = htole32(RE_ADDR_LO(segs[i].ds_addr));
1590 		d->re_bufaddr_hi = htole32(RE_ADDR_HI(segs[i].ds_addr));
1591 		if (i == 0)
1592 			cmdstat |= RE_TDESC_CMD_SOF;
1593 		else
1594 			cmdstat |= RE_TDESC_CMD_OWN;
1595 		if (idx == (sc->re_tx_desc_cnt - 1))
1596 			cmdstat |= RE_TDESC_CMD_EOR;
1597 		d->re_cmdstat = htole32(cmdstat | cmd_csum);
1598 		d->re_control = htole32(ctl_csum | vlantag);
1599 
1600 		i++;
1601 		if (i == nsegs)
1602 			break;
1603 		RE_TXDESC_INC(sc, idx);
1604 	}
1605 	d->re_cmdstat |= htole32(RE_TDESC_CMD_EOF);
1606 
1607 	/* Transfer ownership of packet to the chip. */
1608 	d->re_cmdstat |= htole32(RE_TDESC_CMD_OWN);
1609 	if (*idx0 != idx)
1610 		tx_ring[*idx0].re_cmdstat |= htole32(RE_TDESC_CMD_OWN);
1611 
1612 	/*
1613 	 * Insure that the map for this transmission
1614 	 * is placed at the array index of the last descriptor
1615 	 * in this chain.
1616 	 */
1617 	sc->re_ldata.re_tx_dmamap[*idx0] = sc->re_ldata.re_tx_dmamap[idx];
1618 	sc->re_ldata.re_tx_dmamap[idx] = map;
1619 
1620 	sc->re_ldata.re_tx_mbuf[idx] = m;
1621 	sc->re_ldata.re_tx_free -= nsegs;
1622 
1623 	RE_TXDESC_INC(sc, idx);
1624 	*idx0 = idx;
1625 back:
1626 	if (error) {
1627 		m_freem(*m_head);
1628 		*m_head = NULL;
1629 	}
1630 	return error;
1631 }
1632 
1633 /*
1634  * Main transmit routine for C+ and gigE NICs.
1635  */
1636 
1637 static void
1638 re_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1639 {
1640 	struct re_softc	*sc = ifp->if_softc;
1641 	struct mbuf *m_head;
1642 	int idx, need_trans, oactive, error;
1643 
1644 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1645 	ASSERT_SERIALIZED(ifp->if_serializer);
1646 
1647 	if ((sc->re_flags & RE_F_LINKED) == 0) {
1648 		ifq_purge(&ifp->if_snd);
1649 		return;
1650 	}
1651 
1652 	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1653 		return;
1654 
1655 	idx = sc->re_ldata.re_tx_prodidx;
1656 
1657 	need_trans = 0;
1658 	oactive = 0;
1659 	while (sc->re_ldata.re_tx_mbuf[idx] == NULL) {
1660 		if (sc->re_ldata.re_tx_free <= RE_TXDESC_SPARE) {
1661 			if (!oactive) {
1662 				if (re_tx_collect(sc)) {
1663 					oactive = 1;
1664 					continue;
1665 				}
1666 			}
1667 			ifq_set_oactive(&ifp->if_snd);
1668 			break;
1669 		}
1670 
1671 		m_head = ifq_dequeue(&ifp->if_snd);
1672 		if (m_head == NULL)
1673 			break;
1674 
1675 		error = re_encap(sc, &m_head, &idx);
1676 		if (error) {
1677 			/* m_head is freed by re_encap(), if we reach here */
1678 			IFNET_STAT_INC(ifp, oerrors, 1);
1679 
1680 			if (error == EFBIG && !oactive) {
1681 				if (re_tx_collect(sc)) {
1682 					oactive = 1;
1683 					continue;
1684 				}
1685 			}
1686 			ifq_set_oactive(&ifp->if_snd);
1687 			break;
1688 		}
1689 
1690 		oactive = 0;
1691 		need_trans = 1;
1692 
1693 		/*
1694 		 * If there's a BPF listener, bounce a copy of this frame
1695 		 * to him.
1696 		 */
1697 		ETHER_BPF_MTAP(ifp, m_head);
1698 	}
1699 
1700 	/*
1701 	 * If sc->re_ldata.re_tx_mbuf[idx] is not NULL it is possible
1702 	 * for OACTIVE to not be properly set when we also do not
1703 	 * have sufficient free tx descriptors, leaving packet in
1704 	 * ifp->if_snd.  This can cause if_start_dispatch() to loop
1705 	 * infinitely so make sure OACTIVE is set properly.
1706 	 */
1707 	if (sc->re_ldata.re_tx_free <= RE_TXDESC_SPARE) {
1708 		if (!ifq_is_oactive(&ifp->if_snd)) {
1709 #if 0
1710 			if_printf(ifp, "Debug: OACTIVE was not set when "
1711 			    "re_tx_free was below minimum!\n");
1712 #endif
1713 			ifq_set_oactive(&ifp->if_snd);
1714 		}
1715 	}
1716 	if (!need_trans)
1717 		return;
1718 
1719 	sc->re_ldata.re_tx_prodidx = idx;
1720 
1721 	/*
1722 	 * RealTek put the TX poll request register in a different
1723 	 * location on the 8169 gigE chip. I don't know why.
1724 	 */
1725 	CSR_WRITE_1(sc, RE_TPPOLL, RE_NPQ);
1726 
1727 	/*
1728 	 * Set a timeout in case the chip goes out to lunch.
1729 	 */
1730 	ifp->if_timer = 5;
1731 }
1732 
1733 static void
1734 re_link_up(struct re_softc *sc)
1735 {
1736 	struct ifnet *ifp = &sc->arpcom.ac_if;
1737 	int error;
1738 
1739 	ASSERT_SERIALIZED(ifp->if_serializer);
1740 
1741 	rtl_link_on_patch(sc);
1742 	re_stop(sc, FALSE);
1743 	rtl_set_eaddr(sc);
1744 
1745 	error = re_rx_list_init(sc);
1746 	if (error) {
1747 		re_stop(sc, TRUE);
1748 		return;
1749 	}
1750 	error = re_tx_list_init(sc);
1751 	if (error) {
1752 		re_stop(sc, TRUE);
1753 		return;
1754 	}
1755 
1756 	/*
1757 	 * Load the addresses of the RX and TX lists into the chip.
1758 	 */
1759 	CSR_WRITE_4(sc, RE_RXLIST_ADDR_HI,
1760 	    RE_ADDR_HI(sc->re_ldata.re_rx_list_addr));
1761 	CSR_WRITE_4(sc, RE_RXLIST_ADDR_LO,
1762 	    RE_ADDR_LO(sc->re_ldata.re_rx_list_addr));
1763 
1764 	CSR_WRITE_4(sc, RE_TXLIST_ADDR_HI,
1765 	    RE_ADDR_HI(sc->re_ldata.re_tx_list_addr));
1766 	CSR_WRITE_4(sc, RE_TXLIST_ADDR_LO,
1767 	    RE_ADDR_LO(sc->re_ldata.re_tx_list_addr));
1768 
1769 	rtl_hw_start(sc);
1770 
1771 #ifdef IFPOLL_ENABLE
1772 	/*
1773 	 * Disable interrupts if we are polling.
1774 	 */
1775 	if (ifp->if_flags & IFF_NPOLLING)
1776 		re_setup_intr(sc, 0, RE_IMTYPE_NONE);
1777 	else	/* otherwise ... */
1778 #endif /* IFPOLL_ENABLE */
1779 	/*
1780 	 * Enable interrupts.
1781 	 */
1782 	re_setup_intr(sc, 1, sc->re_imtype);
1783 	CSR_WRITE_2(sc, RE_ISR, sc->re_intrs);
1784 
1785 	sc->re_flags |= RE_F_LINKED;
1786 	ifp->if_link_state = LINK_STATE_UP;
1787 	if_link_state_change(ifp);
1788 
1789 	if (bootverbose)
1790 		if_printf(ifp, "link UP\n");
1791 
1792 	if (!ifq_is_empty(&ifp->if_snd))
1793 		if_devstart(ifp);
1794 }
1795 
1796 static void
1797 re_link_down(struct re_softc *sc)
1798 {
1799 	struct ifnet *ifp = &sc->arpcom.ac_if;
1800 
1801 	/* NOTE: re_stop() will reset RE_F_LINKED. */
1802 	ifp->if_link_state = LINK_STATE_DOWN;
1803 	if_link_state_change(ifp);
1804 
1805 	re_stop(sc, FALSE);
1806 	rtl_ifmedia_upd(ifp);
1807 
1808 	if (bootverbose)
1809 		if_printf(ifp, "link DOWN\n");
1810 }
1811 
1812 static void
1813 re_init(void *xsc)
1814 {
1815 	struct re_softc *sc = xsc;
1816 	struct ifnet *ifp = &sc->arpcom.ac_if;
1817 
1818 	ASSERT_SERIALIZED(ifp->if_serializer);
1819 
1820 	re_stop(sc, TRUE);
1821 	if (rtl_link_ok(sc)) {
1822 		if (bootverbose)
1823 			if_printf(ifp, "link is UP in if_init\n");
1824 		re_link_up(sc);
1825 	}
1826 
1827 	ifp->if_flags |= IFF_RUNNING;
1828 	ifq_clr_oactive(&ifp->if_snd);
1829 
1830 	callout_reset(&sc->re_timer, hz, re_tick, sc);
1831 }
1832 
1833 static int
1834 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1835 {
1836 	struct re_softc *sc = ifp->if_softc;
1837 	struct ifreq *ifr = (struct ifreq *)data;
1838 	int error = 0, mask;
1839 
1840 	ASSERT_SERIALIZED(ifp->if_serializer);
1841 
1842 	switch(command) {
1843 	case SIOCSIFMTU:
1844 #ifdef RE_JUMBO
1845 		if (ifr->ifr_mtu > sc->re_maxmtu) {
1846 			error = EINVAL;
1847 		} else if (ifp->if_mtu != ifr->ifr_mtu) {
1848 			ifp->if_mtu = ifr->ifr_mtu;
1849 			if (ifp->if_flags & IFF_RUNNING)
1850 				ifp->if_init(sc);
1851 		}
1852 #else
1853 		error = EOPNOTSUPP;
1854 #endif
1855 		break;
1856 
1857 	case SIOCSIFFLAGS:
1858 		if (ifp->if_flags & IFF_UP) {
1859 			if (ifp->if_flags & IFF_RUNNING) {
1860 				if ((ifp->if_flags ^ sc->re_saved_ifflags) &
1861 				    (IFF_PROMISC | IFF_ALLMULTI))
1862 					rtl_set_rx_packet_filter(sc);
1863 			} else {
1864 				re_init(sc);
1865 			}
1866 		} else if (ifp->if_flags & IFF_RUNNING) {
1867 			re_stop(sc, TRUE);
1868 		}
1869 		sc->re_saved_ifflags = ifp->if_flags;
1870 		break;
1871 
1872 	case SIOCADDMULTI:
1873 	case SIOCDELMULTI:
1874 		rtl_set_rx_packet_filter(sc);
1875 		break;
1876 
1877 	case SIOCGIFMEDIA:
1878 	case SIOCSIFMEDIA:
1879 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
1880 		break;
1881 
1882 	case SIOCSIFCAP:
1883 		mask = (ifr->ifr_reqcap ^ ifp->if_capenable) &
1884 		       ifp->if_capabilities;
1885 		ifp->if_capenable ^= mask;
1886 
1887 		/* NOTE: re_init will setup if_hwassist. */
1888 		ifp->if_hwassist = 0;
1889 
1890 		/* Setup flags for the backend. */
1891 		if (ifp->if_capenable & IFCAP_RXCSUM)
1892 			sc->re_rx_cstag = 1;
1893 		else
1894 			sc->re_rx_cstag = 0;
1895 		if (ifp->if_capenable & IFCAP_TXCSUM)
1896 			sc->re_tx_cstag = 1;
1897 		else
1898 			sc->re_tx_cstag = 0;
1899 
1900 		if (mask && (ifp->if_flags & IFF_RUNNING))
1901 			re_init(sc);
1902 		break;
1903 
1904 	default:
1905 		error = ether_ioctl(ifp, command, data);
1906 		break;
1907 	}
1908 	return(error);
1909 }
1910 
1911 static void
1912 re_watchdog(struct ifnet *ifp)
1913 {
1914 	struct re_softc *sc = ifp->if_softc;
1915 
1916 	ASSERT_SERIALIZED(ifp->if_serializer);
1917 
1918 	IFNET_STAT_INC(ifp, oerrors, 1);
1919 
1920 	re_txeof(sc);
1921 	re_rxeof(sc);
1922 
1923 	if (sc->re_ldata.re_tx_free != sc->re_tx_desc_cnt) {
1924 		if_printf(ifp, "watchdog timeout, txd free %d\n",
1925 		    sc->re_ldata.re_tx_free);
1926 		rtl_reset(sc);
1927 		re_init(sc);
1928 	}
1929 }
1930 
1931 /*
1932  * Stop the adapter and free any mbufs allocated to the
1933  * RX and TX lists.
1934  */
1935 static void
1936 re_stop(struct re_softc *sc, boolean_t full_stop)
1937 {
1938 	struct ifnet *ifp = &sc->arpcom.ac_if;
1939 	int i;
1940 
1941 	ASSERT_SERIALIZED(ifp->if_serializer);
1942 
1943 	/* Stop the adapter. */
1944 	rtl_stop(sc);
1945 
1946 	ifp->if_timer = 0;
1947 	if (full_stop) {
1948 		callout_stop(&sc->re_timer);
1949 		ifp->if_flags &= ~IFF_RUNNING;
1950 	}
1951 	ifq_clr_oactive(&ifp->if_snd);
1952 	sc->re_flags &= ~(RE_F_TIMER_INTR | RE_F_DROP_RXFRAG | RE_F_LINKED);
1953 
1954 	re_free_rxchain(sc);
1955 
1956 	/* Free the TX list buffers. */
1957 	for (i = 0; i < sc->re_tx_desc_cnt; i++) {
1958 		if (sc->re_ldata.re_tx_mbuf[i] != NULL) {
1959 			bus_dmamap_unload(sc->re_ldata.re_tx_mtag,
1960 					  sc->re_ldata.re_tx_dmamap[i]);
1961 			m_freem(sc->re_ldata.re_tx_mbuf[i]);
1962 			sc->re_ldata.re_tx_mbuf[i] = NULL;
1963 		}
1964 	}
1965 
1966 	/* Free the RX list buffers. */
1967 	for (i = 0; i < sc->re_rx_desc_cnt; i++) {
1968 		if (sc->re_ldata.re_rx_mbuf[i] != NULL) {
1969 			if ((sc->re_flags & RE_F_USE_JPOOL) == 0) {
1970 				bus_dmamap_unload(sc->re_ldata.re_rx_mtag,
1971 						  sc->re_ldata.re_rx_dmamap[i]);
1972 			}
1973 			m_freem(sc->re_ldata.re_rx_mbuf[i]);
1974 			sc->re_ldata.re_rx_mbuf[i] = NULL;
1975 		}
1976 	}
1977 }
1978 
1979 /*
1980  * Device suspend routine.  Stop the interface and save some PCI
1981  * settings in case the BIOS doesn't restore them properly on
1982  * resume.
1983  */
1984 static int
1985 re_suspend(device_t dev)
1986 {
1987 #ifndef BURN_BRIDGES
1988 	int i;
1989 #endif
1990 	struct re_softc *sc = device_get_softc(dev);
1991 	struct ifnet *ifp = &sc->arpcom.ac_if;
1992 
1993 	lwkt_serialize_enter(ifp->if_serializer);
1994 
1995 	re_stop(sc, TRUE);
1996 
1997 #ifndef BURN_BRIDGES
1998 	for (i = 0; i < 5; i++)
1999 		sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2000 	sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2001 	sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2002 	sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2003 	sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2004 #endif
2005 
2006 	sc->re_flags |= RE_F_SUSPENDED;
2007 
2008 	lwkt_serialize_exit(ifp->if_serializer);
2009 
2010 	return (0);
2011 }
2012 
2013 /*
2014  * Device resume routine.  Restore some PCI settings in case the BIOS
2015  * doesn't, re-enable busmastering, and restart the interface if
2016  * appropriate.
2017  */
2018 static int
2019 re_resume(device_t dev)
2020 {
2021 	struct re_softc *sc = device_get_softc(dev);
2022 	struct ifnet *ifp = &sc->arpcom.ac_if;
2023 #ifndef BURN_BRIDGES
2024 	int i;
2025 #endif
2026 
2027 	lwkt_serialize_enter(ifp->if_serializer);
2028 
2029 #ifndef BURN_BRIDGES
2030 	/* better way to do this? */
2031 	for (i = 0; i < 5; i++)
2032 		pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
2033 	pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
2034 	pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
2035 	pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
2036 	pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
2037 
2038 	/* reenable busmastering */
2039 	pci_enable_busmaster(dev);
2040 	pci_enable_io(dev, SYS_RES_IOPORT);
2041 #endif
2042 
2043 	/* reinitialize interface if necessary */
2044 	if (ifp->if_flags & IFF_UP)
2045 		re_init(sc);
2046 
2047 	sc->re_flags &= ~RE_F_SUSPENDED;
2048 
2049 	lwkt_serialize_exit(ifp->if_serializer);
2050 
2051 	return (0);
2052 }
2053 
2054 /*
2055  * Stop all chip I/O so that the kernel's probe routines don't
2056  * get confused by errant DMAs when rebooting.
2057  */
2058 static void
2059 re_shutdown(device_t dev)
2060 {
2061 	struct re_softc *sc = device_get_softc(dev);
2062 	struct ifnet *ifp = &sc->arpcom.ac_if;
2063 
2064 	lwkt_serialize_enter(ifp->if_serializer);
2065 	re_stop(sc, TRUE);
2066 	rtl_hw_d3_para(sc);
2067 	rtl_phy_power_down(sc);
2068 	lwkt_serialize_exit(ifp->if_serializer);
2069 }
2070 
2071 static int
2072 re_sysctl_rxtime(SYSCTL_HANDLER_ARGS)
2073 {
2074 	struct re_softc *sc = arg1;
2075 
2076 	return re_sysctl_hwtime(oidp, arg1, arg2, req, &sc->re_rx_time);
2077 }
2078 
2079 static int
2080 re_sysctl_txtime(SYSCTL_HANDLER_ARGS)
2081 {
2082 	struct re_softc *sc = arg1;
2083 
2084 	return re_sysctl_hwtime(oidp, arg1, arg2, req, &sc->re_tx_time);
2085 }
2086 
2087 static int
2088 re_sysctl_hwtime(SYSCTL_HANDLER_ARGS, int *hwtime)
2089 {
2090 	struct re_softc *sc = arg1;
2091 	struct ifnet *ifp = &sc->arpcom.ac_if;
2092 	int error, v;
2093 
2094 	lwkt_serialize_enter(ifp->if_serializer);
2095 
2096 	v = *hwtime;
2097 	error = sysctl_handle_int(oidp, &v, 0, req);
2098 	if (error || req->newptr == NULL)
2099 		goto back;
2100 
2101 	if (v <= 0) {
2102 		error = EINVAL;
2103 		goto back;
2104 	}
2105 
2106 	if (v != *hwtime) {
2107 		*hwtime = v;
2108 
2109 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
2110 		    IFF_RUNNING && sc->re_imtype == RE_IMTYPE_HW)
2111 			re_setup_hw_im(sc);
2112 	}
2113 back:
2114 	lwkt_serialize_exit(ifp->if_serializer);
2115 	return error;
2116 }
2117 
2118 static int
2119 re_sysctl_simtime(SYSCTL_HANDLER_ARGS)
2120 {
2121 	struct re_softc *sc = arg1;
2122 	struct ifnet *ifp = &sc->arpcom.ac_if;
2123 	int error, v;
2124 
2125 	lwkt_serialize_enter(ifp->if_serializer);
2126 
2127 	v = sc->re_sim_time;
2128 	error = sysctl_handle_int(oidp, &v, 0, req);
2129 	if (error || req->newptr == NULL)
2130 		goto back;
2131 
2132 	if (v <= 0) {
2133 		error = EINVAL;
2134 		goto back;
2135 	}
2136 
2137 	if (v != sc->re_sim_time) {
2138 		sc->re_sim_time = v;
2139 
2140 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
2141 		    IFF_RUNNING && sc->re_imtype == RE_IMTYPE_SIM) {
2142 #ifdef foo
2143 			/*
2144 			 * Following code causes various strange
2145 			 * performance problems.  Hmm ...
2146 			 */
2147 			CSR_WRITE_2(sc, RE_IMR, 0);
2148 			CSR_WRITE_4(sc, RE_TIMERINT, 0);
2149 			CSR_READ_4(sc, RE_TIMERINT); /* flush */
2150 
2151 			CSR_WRITE_2(sc, RE_IMR, sc->re_intrs);
2152 			re_setup_sim_im(sc);
2153 #else
2154 			re_setup_intr(sc, 0, RE_IMTYPE_NONE);
2155 			DELAY(10);
2156 			re_setup_intr(sc, 1, RE_IMTYPE_SIM);
2157 #endif
2158 		}
2159 	}
2160 back:
2161 	lwkt_serialize_exit(ifp->if_serializer);
2162 	return error;
2163 }
2164 
2165 static int
2166 re_sysctl_imtype(SYSCTL_HANDLER_ARGS)
2167 {
2168 	struct re_softc *sc = arg1;
2169 	struct ifnet *ifp = &sc->arpcom.ac_if;
2170 	int error, v;
2171 
2172 	lwkt_serialize_enter(ifp->if_serializer);
2173 
2174 	v = sc->re_imtype;
2175 	error = sysctl_handle_int(oidp, &v, 0, req);
2176 	if (error || req->newptr == NULL)
2177 		goto back;
2178 
2179 	if (v != RE_IMTYPE_HW && v != RE_IMTYPE_SIM && v != RE_IMTYPE_NONE) {
2180 		error = EINVAL;
2181 		goto back;
2182 	}
2183 	if (v == RE_IMTYPE_HW && (sc->re_caps & RE_C_HWIM) == 0) {
2184 		/* Can't do hardware interrupt moderation */
2185 		error = EOPNOTSUPP;
2186 		goto back;
2187 	}
2188 
2189 	if (v != sc->re_imtype) {
2190 		sc->re_imtype = v;
2191 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
2192 		    IFF_RUNNING)
2193 			re_setup_intr(sc, 1, sc->re_imtype);
2194 	}
2195 back:
2196 	lwkt_serialize_exit(ifp->if_serializer);
2197 	return error;
2198 }
2199 
2200 static void
2201 re_setup_hw_im(struct re_softc *sc)
2202 {
2203 	KKASSERT(sc->re_caps & RE_C_HWIM);
2204 
2205 	/*
2206 	 * Interrupt moderation
2207 	 *
2208 	 * 0xABCD
2209 	 * A - unknown (maybe TX related)
2210 	 * B - TX timer (unit: 25us)
2211 	 * C - unknown (maybe RX related)
2212 	 * D - RX timer (unit: 25us)
2213 	 *
2214 	 *
2215 	 * re(4)'s interrupt moderation is actually controlled by
2216 	 * two variables, like most other NICs (bge, bce etc.)
2217 	 * o  timer
2218 	 * o  number of packets [P]
2219 	 *
2220 	 * The logic relationship between these two variables is
2221 	 * similar to other NICs too:
2222 	 * if (timer expire || packets > [P])
2223 	 *     Interrupt is delivered
2224 	 *
2225 	 * Currently we only know how to set 'timer', but not
2226 	 * 'number of packets', which should be ~30, as far as I
2227 	 * tested (sink ~900Kpps, interrupt rate is 30KHz)
2228 	 */
2229 	CSR_WRITE_2(sc, RE_IM,
2230 		    RE_IM_RXTIME(sc->re_rx_time) |
2231 		    RE_IM_TXTIME(sc->re_tx_time) |
2232 		    RE_IM_MAGIC);
2233 }
2234 
2235 static void
2236 re_disable_hw_im(struct re_softc *sc)
2237 {
2238 	if (sc->re_caps & RE_C_HWIM)
2239 		CSR_WRITE_2(sc, RE_IM, 0);
2240 }
2241 
2242 static void
2243 re_setup_sim_im(struct re_softc *sc)
2244 {
2245 	uint32_t ticks;
2246 
2247 	if (sc->re_if_flags & RL_FLAG_PCIE) {
2248 		ticks = sc->re_sim_time * sc->re_bus_speed;
2249 	} else {
2250 		/*
2251 		 * Datasheet says tick decreases at bus speed,
2252 		 * but it seems the clock runs a little bit
2253 		 * faster, so we do some compensation here.
2254 		 */
2255 		ticks = (sc->re_sim_time * sc->re_bus_speed * 8) / 5;
2256 	}
2257 	CSR_WRITE_4(sc, RE_TIMERINT, ticks);
2258 
2259 	CSR_WRITE_4(sc, RE_TIMERCNT, 1); /* reload */
2260 	sc->re_flags |= RE_F_TIMER_INTR;
2261 }
2262 
2263 static void
2264 re_disable_sim_im(struct re_softc *sc)
2265 {
2266 	CSR_WRITE_4(sc, RE_TIMERINT, 0);
2267 	sc->re_flags &= ~RE_F_TIMER_INTR;
2268 }
2269 
2270 static void
2271 re_config_imtype(struct re_softc *sc, int imtype)
2272 {
2273 	switch (imtype) {
2274 	case RE_IMTYPE_HW:
2275 		KKASSERT(sc->re_caps & RE_C_HWIM);
2276 		/* FALL THROUGH */
2277 	case RE_IMTYPE_NONE:
2278 		sc->re_intrs = RE_INTRS;
2279 		sc->re_rx_ack = RE_ISR_RX_OK | RE_ISR_FIFO_OFLOW |
2280 				RE_ISR_RX_OVERRUN;
2281 		sc->re_tx_ack = RE_ISR_TX_OK;
2282 		break;
2283 
2284 	case RE_IMTYPE_SIM:
2285 		sc->re_intrs = RE_INTRS_TIMER;
2286 		sc->re_rx_ack = RE_ISR_PCS_TIMEOUT;
2287 		sc->re_tx_ack = RE_ISR_PCS_TIMEOUT;
2288 		break;
2289 
2290 	default:
2291 		panic("%s: unknown imtype %d",
2292 		      sc->arpcom.ac_if.if_xname, imtype);
2293 	}
2294 }
2295 
2296 static void
2297 re_setup_intr(struct re_softc *sc, int enable_intrs, int imtype)
2298 {
2299 	re_config_imtype(sc, imtype);
2300 
2301 	if (enable_intrs)
2302 		CSR_WRITE_2(sc, RE_IMR, sc->re_intrs);
2303 	else
2304 		CSR_WRITE_2(sc, RE_IMR, 0);
2305 
2306 	sc->re_npoll.ifpc_stcount = 0;
2307 
2308 	switch (imtype) {
2309 	case RE_IMTYPE_NONE:
2310 		re_disable_sim_im(sc);
2311 		re_disable_hw_im(sc);
2312 		break;
2313 
2314 	case RE_IMTYPE_HW:
2315 		KKASSERT(sc->re_caps & RE_C_HWIM);
2316 		re_disable_sim_im(sc);
2317 		re_setup_hw_im(sc);
2318 		break;
2319 
2320 	case RE_IMTYPE_SIM:
2321 		re_disable_hw_im(sc);
2322 		re_setup_sim_im(sc);
2323 		break;
2324 
2325 	default:
2326 		panic("%s: unknown imtype %d",
2327 		      sc->arpcom.ac_if.if_xname, imtype);
2328 	}
2329 }
2330 
2331 static int
2332 re_jpool_alloc(struct re_softc *sc)
2333 {
2334 	struct re_list_data *ldata = &sc->re_ldata;
2335 	struct re_jbuf *jbuf;
2336 	bus_addr_t paddr;
2337 	bus_size_t jpool_size;
2338 	bus_dmamem_t dmem;
2339 	caddr_t buf;
2340 	int i, error;
2341 
2342 	lwkt_serialize_init(&ldata->re_jbuf_serializer);
2343 
2344 	ldata->re_jbuf = kmalloc(sizeof(struct re_jbuf) * RE_JBUF_COUNT(sc),
2345 				 M_DEVBUF, M_WAITOK | M_ZERO);
2346 
2347 	jpool_size = RE_JBUF_COUNT(sc) * RE_JBUF_SIZE;
2348 
2349 	error = bus_dmamem_coherent(sc->re_parent_tag,
2350 			RE_RXBUF_ALIGN, 0,
2351 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2352 			jpool_size, BUS_DMA_WAITOK, &dmem);
2353 	if (error) {
2354 		device_printf(sc->dev, "could not allocate jumbo memory\n");
2355 		return error;
2356 	}
2357 	ldata->re_jpool_tag = dmem.dmem_tag;
2358 	ldata->re_jpool_map = dmem.dmem_map;
2359 	ldata->re_jpool = dmem.dmem_addr;
2360 	paddr = dmem.dmem_busaddr;
2361 
2362 	/* ..and split it into 9KB chunks */
2363 	SLIST_INIT(&ldata->re_jbuf_free);
2364 
2365 	buf = ldata->re_jpool;
2366 	for (i = 0; i < RE_JBUF_COUNT(sc); i++) {
2367 		jbuf = &ldata->re_jbuf[i];
2368 
2369 		jbuf->re_sc = sc;
2370 		jbuf->re_inuse = 0;
2371 		jbuf->re_slot = i;
2372 		jbuf->re_buf = buf;
2373 		jbuf->re_paddr = paddr;
2374 
2375 		SLIST_INSERT_HEAD(&ldata->re_jbuf_free, jbuf, re_link);
2376 
2377 		buf += RE_JBUF_SIZE;
2378 		paddr += RE_JBUF_SIZE;
2379 	}
2380 	return 0;
2381 }
2382 
2383 static void
2384 re_jpool_free(struct re_softc *sc)
2385 {
2386 	struct re_list_data *ldata = &sc->re_ldata;
2387 
2388 	if (ldata->re_jpool_tag != NULL) {
2389 		bus_dmamap_unload(ldata->re_jpool_tag, ldata->re_jpool_map);
2390 		bus_dmamem_free(ldata->re_jpool_tag, ldata->re_jpool,
2391 				ldata->re_jpool_map);
2392 		bus_dma_tag_destroy(ldata->re_jpool_tag);
2393 		ldata->re_jpool_tag = NULL;
2394 	}
2395 
2396 	if (ldata->re_jbuf != NULL) {
2397 		kfree(ldata->re_jbuf, M_DEVBUF);
2398 		ldata->re_jbuf = NULL;
2399 	}
2400 }
2401 
2402 #ifdef RE_JUMBO
2403 static struct re_jbuf *
2404 re_jbuf_alloc(struct re_softc *sc)
2405 {
2406 	struct re_list_data *ldata = &sc->re_ldata;
2407 	struct re_jbuf *jbuf;
2408 
2409 	lwkt_serialize_enter(&ldata->re_jbuf_serializer);
2410 
2411 	jbuf = SLIST_FIRST(&ldata->re_jbuf_free);
2412 	if (jbuf != NULL) {
2413 		SLIST_REMOVE_HEAD(&ldata->re_jbuf_free, re_link);
2414 		jbuf->re_inuse = 1;
2415 	}
2416 
2417 	lwkt_serialize_exit(&ldata->re_jbuf_serializer);
2418 
2419 	return jbuf;
2420 }
2421 
2422 static void
2423 re_jbuf_free(void *arg)
2424 {
2425 	struct re_jbuf *jbuf = arg;
2426 	struct re_softc *sc = jbuf->re_sc;
2427 	struct re_list_data *ldata = &sc->re_ldata;
2428 
2429 	if (&ldata->re_jbuf[jbuf->re_slot] != jbuf) {
2430 		panic("%s: free wrong jumbo buffer",
2431 		      sc->arpcom.ac_if.if_xname);
2432 	} else if (jbuf->re_inuse == 0) {
2433 		panic("%s: jumbo buffer already freed",
2434 		      sc->arpcom.ac_if.if_xname);
2435 	}
2436 
2437 	lwkt_serialize_enter(&ldata->re_jbuf_serializer);
2438 	atomic_subtract_int(&jbuf->re_inuse, 1);
2439 	if (jbuf->re_inuse == 0)
2440 		SLIST_INSERT_HEAD(&ldata->re_jbuf_free, jbuf, re_link);
2441 	lwkt_serialize_exit(&ldata->re_jbuf_serializer);
2442 }
2443 
2444 static void
2445 re_jbuf_ref(void *arg)
2446 {
2447 	struct re_jbuf *jbuf = arg;
2448 	struct re_softc *sc = jbuf->re_sc;
2449 	struct re_list_data *ldata = &sc->re_ldata;
2450 
2451 	if (&ldata->re_jbuf[jbuf->re_slot] != jbuf) {
2452 		panic("%s: ref wrong jumbo buffer",
2453 		      sc->arpcom.ac_if.if_xname);
2454 	} else if (jbuf->re_inuse == 0) {
2455 		panic("%s: jumbo buffer already freed",
2456 		      sc->arpcom.ac_if.if_xname);
2457 	}
2458 	atomic_add_int(&jbuf->re_inuse, 1);
2459 }
2460 #endif	/* RE_JUMBO */
2461 
2462 static void
2463 re_disable_aspm(device_t dev)
2464 {
2465 	uint16_t link_cap, link_ctrl;
2466 	uint8_t pcie_ptr, reg;
2467 
2468 	pcie_ptr = pci_get_pciecap_ptr(dev);
2469 	if (pcie_ptr == 0)
2470 		return;
2471 
2472 	link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2);
2473 	if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0)
2474 		return;
2475 
2476 	if (bootverbose)
2477 		device_printf(dev, "disable ASPM\n");
2478 
2479 	reg = pcie_ptr + PCIER_LINKCTRL;
2480 	link_ctrl = pci_read_config(dev, reg, 2);
2481 	link_ctrl &= ~(PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1);
2482 	pci_write_config(dev, reg, link_ctrl, 2);
2483 }
2484