xref: /freebsd/sys/dev/nfe/if_nfe.c (revision 1d386b48)
1 /*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5  * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22 
23 #include <sys/cdefs.h>
24 #ifdef HAVE_KERNEL_OPTION_HEADERS
25 #include "opt_device_polling.h"
26 #endif
27 
28 #include <sys/param.h>
29 #include <sys/endian.h>
30 #include <sys/systm.h>
31 #include <sys/sockio.h>
32 #include <sys/mbuf.h>
33 #include <sys/malloc.h>
34 #include <sys/module.h>
35 #include <sys/kernel.h>
36 #include <sys/queue.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 
41 #include <net/if.h>
42 #include <net/if_var.h>
43 #include <net/if_arp.h>
44 #include <net/ethernet.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_types.h>
48 #include <net/if_vlan_var.h>
49 
50 #include <net/bpf.h>
51 
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <sys/bus.h>
55 #include <sys/rman.h>
56 
57 #include <dev/mii/mii.h>
58 #include <dev/mii/miivar.h>
59 
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 
63 #include <dev/nfe/if_nfereg.h>
64 #include <dev/nfe/if_nfevar.h>
65 
66 MODULE_DEPEND(nfe, pci, 1, 1, 1);
67 MODULE_DEPEND(nfe, ether, 1, 1, 1);
68 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
69 
70 /* "device miibus" required.  See GENERIC if you get errors here. */
71 #include "miibus_if.h"
72 
73 static int  nfe_probe(device_t);
74 static int  nfe_attach(device_t);
75 static int  nfe_detach(device_t);
76 static int  nfe_suspend(device_t);
77 static int  nfe_resume(device_t);
78 static int nfe_shutdown(device_t);
79 static int  nfe_can_use_msix(struct nfe_softc *);
80 static int  nfe_detect_msik9(struct nfe_softc *);
81 static void nfe_power(struct nfe_softc *);
82 static int  nfe_miibus_readreg(device_t, int, int);
83 static int  nfe_miibus_writereg(device_t, int, int, int);
84 static void nfe_miibus_statchg(device_t);
85 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
86 static void nfe_set_intr(struct nfe_softc *);
87 static __inline void nfe_enable_intr(struct nfe_softc *);
88 static __inline void nfe_disable_intr(struct nfe_softc *);
89 static int  nfe_ioctl(if_t, u_long, caddr_t);
90 static void nfe_alloc_msix(struct nfe_softc *, int);
91 static int nfe_intr(void *);
92 static void nfe_int_task(void *, int);
93 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
94 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
95 static int nfe_newbuf(struct nfe_softc *, int);
96 static int nfe_jnewbuf(struct nfe_softc *, int);
97 static int  nfe_rxeof(struct nfe_softc *, int, int *);
98 static int  nfe_jrxeof(struct nfe_softc *, int, int *);
99 static void nfe_txeof(struct nfe_softc *);
100 static int  nfe_encap(struct nfe_softc *, struct mbuf **);
101 static void nfe_setmulti(struct nfe_softc *);
102 static void nfe_start(if_t);
103 static void nfe_start_locked(if_t);
104 static void nfe_watchdog(if_t);
105 static void nfe_init(void *);
106 static void nfe_init_locked(void *);
107 static void nfe_stop(if_t);
108 static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
109 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
110 static int  nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
111 static int  nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
112 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
113 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
114 static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static int  nfe_ifmedia_upd(if_t);
118 static void nfe_ifmedia_sts(if_t, struct ifmediareq *);
119 static void nfe_tick(void *);
120 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
121 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
122 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
123 
124 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
125 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
126 static void nfe_sysctl_node(struct nfe_softc *);
127 static void nfe_stats_clear(struct nfe_softc *);
128 static void nfe_stats_update(struct nfe_softc *);
129 static void nfe_set_linkspeed(struct nfe_softc *);
130 static void nfe_set_wol(struct nfe_softc *);
131 
132 #ifdef NFE_DEBUG
133 static int nfedebug = 0;
134 #define	DPRINTF(sc, ...)	do {				\
135 	if (nfedebug)						\
136 		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
137 } while (0)
138 #define	DPRINTFN(sc, n, ...)	do {				\
139 	if (nfedebug >= (n))					\
140 		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
141 } while (0)
142 #else
143 #define	DPRINTF(sc, ...)
144 #define	DPRINTFN(sc, n, ...)
145 #endif
146 
147 #define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
148 #define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
149 #define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
150 
151 /* Tunables. */
152 static int msi_disable = 0;
153 static int msix_disable = 0;
154 static int jumbo_disable = 0;
155 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
156 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
157 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
158 
159 static device_method_t nfe_methods[] = {
160 	/* Device interface */
161 	DEVMETHOD(device_probe,		nfe_probe),
162 	DEVMETHOD(device_attach,	nfe_attach),
163 	DEVMETHOD(device_detach,	nfe_detach),
164 	DEVMETHOD(device_suspend,	nfe_suspend),
165 	DEVMETHOD(device_resume,	nfe_resume),
166 	DEVMETHOD(device_shutdown,	nfe_shutdown),
167 
168 	/* MII interface */
169 	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
170 	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
171 	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
172 
173 	DEVMETHOD_END
174 };
175 
176 static driver_t nfe_driver = {
177 	"nfe",
178 	nfe_methods,
179 	sizeof(struct nfe_softc)
180 };
181 
182 DRIVER_MODULE(nfe, pci, nfe_driver, 0, 0);
183 DRIVER_MODULE(miibus, nfe, miibus_driver, 0, 0);
184 
185 static struct nfe_type nfe_devs[] = {
186 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
187 	    "NVIDIA nForce MCP Networking Adapter"},
188 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
189 	    "NVIDIA nForce2 MCP2 Networking Adapter"},
190 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
191 	    "NVIDIA nForce2 400 MCP4 Networking Adapter"},
192 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
193 	    "NVIDIA nForce2 400 MCP5 Networking Adapter"},
194 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
195 	    "NVIDIA nForce3 MCP3 Networking Adapter"},
196 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
197 	    "NVIDIA nForce3 250 MCP6 Networking Adapter"},
198 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
199 	    "NVIDIA nForce3 MCP7 Networking Adapter"},
200 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
201 	    "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
202 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
203 	    "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
204 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
205 	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP10 */
206 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
207 	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP11 */
208 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
209 	    "NVIDIA nForce 430 MCP12 Networking Adapter"},
210 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
211 	    "NVIDIA nForce 430 MCP13 Networking Adapter"},
212 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
213 	    "NVIDIA nForce MCP55 Networking Adapter"},
214 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
215 	    "NVIDIA nForce MCP55 Networking Adapter"},
216 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
217 	    "NVIDIA nForce MCP61 Networking Adapter"},
218 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
219 	    "NVIDIA nForce MCP61 Networking Adapter"},
220 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
221 	    "NVIDIA nForce MCP61 Networking Adapter"},
222 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
223 	    "NVIDIA nForce MCP61 Networking Adapter"},
224 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
225 	    "NVIDIA nForce MCP65 Networking Adapter"},
226 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
227 	    "NVIDIA nForce MCP65 Networking Adapter"},
228 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
229 	    "NVIDIA nForce MCP65 Networking Adapter"},
230 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
231 	    "NVIDIA nForce MCP65 Networking Adapter"},
232 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
233 	    "NVIDIA nForce MCP67 Networking Adapter"},
234 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
235 	    "NVIDIA nForce MCP67 Networking Adapter"},
236 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
237 	    "NVIDIA nForce MCP67 Networking Adapter"},
238 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
239 	    "NVIDIA nForce MCP67 Networking Adapter"},
240 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
241 	    "NVIDIA nForce MCP73 Networking Adapter"},
242 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
243 	    "NVIDIA nForce MCP73 Networking Adapter"},
244 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
245 	    "NVIDIA nForce MCP73 Networking Adapter"},
246 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
247 	    "NVIDIA nForce MCP73 Networking Adapter"},
248 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
249 	    "NVIDIA nForce MCP77 Networking Adapter"},
250 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
251 	    "NVIDIA nForce MCP77 Networking Adapter"},
252 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
253 	    "NVIDIA nForce MCP77 Networking Adapter"},
254 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
255 	    "NVIDIA nForce MCP77 Networking Adapter"},
256 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
257 	    "NVIDIA nForce MCP79 Networking Adapter"},
258 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
259 	    "NVIDIA nForce MCP79 Networking Adapter"},
260 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
261 	    "NVIDIA nForce MCP79 Networking Adapter"},
262 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
263 	    "NVIDIA nForce MCP79 Networking Adapter"},
264 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN,
265 	    "NVIDIA nForce MCP89 Networking Adapter"},
266 	{0, 0, NULL}
267 };
268 
269 /* Probe for supported hardware ID's */
270 static int
271 nfe_probe(device_t dev)
272 {
273 	struct nfe_type *t;
274 
275 	t = nfe_devs;
276 	/* Check for matching PCI DEVICE ID's */
277 	while (t->name != NULL) {
278 		if ((pci_get_vendor(dev) == t->vid_id) &&
279 		    (pci_get_device(dev) == t->dev_id)) {
280 			device_set_desc(dev, t->name);
281 			return (BUS_PROBE_DEFAULT);
282 		}
283 		t++;
284 	}
285 
286 	return (ENXIO);
287 }
288 
289 static void
290 nfe_alloc_msix(struct nfe_softc *sc, int count)
291 {
292 	int rid;
293 
294 	rid = PCIR_BAR(2);
295 	sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
296 	    &rid, RF_ACTIVE);
297 	if (sc->nfe_msix_res == NULL) {
298 		device_printf(sc->nfe_dev,
299 		    "couldn't allocate MSIX table resource\n");
300 		return;
301 	}
302 	rid = PCIR_BAR(3);
303 	sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
304 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
305 	if (sc->nfe_msix_pba_res == NULL) {
306 		device_printf(sc->nfe_dev,
307 		    "couldn't allocate MSIX PBA resource\n");
308 		bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
309 		    sc->nfe_msix_res);
310 		sc->nfe_msix_res = NULL;
311 		return;
312 	}
313 
314 	if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
315 		if (count == NFE_MSI_MESSAGES) {
316 			if (bootverbose)
317 				device_printf(sc->nfe_dev,
318 				    "Using %d MSIX messages\n", count);
319 			sc->nfe_msix = 1;
320 		} else {
321 			if (bootverbose)
322 				device_printf(sc->nfe_dev,
323 				    "couldn't allocate MSIX\n");
324 			pci_release_msi(sc->nfe_dev);
325 			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
326 			    PCIR_BAR(3), sc->nfe_msix_pba_res);
327 			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
328 			    PCIR_BAR(2), sc->nfe_msix_res);
329 			sc->nfe_msix_pba_res = NULL;
330 			sc->nfe_msix_res = NULL;
331 		}
332 	}
333 }
334 
335 static int
336 nfe_detect_msik9(struct nfe_softc *sc)
337 {
338 	static const char *maker = "MSI";
339 	static const char *product = "K9N6PGM2-V2 (MS-7309)";
340 	char *m, *p;
341 	int found;
342 
343 	found = 0;
344 	m = kern_getenv("smbios.planar.maker");
345 	p = kern_getenv("smbios.planar.product");
346 	if (m != NULL && p != NULL) {
347 		if (strcmp(m, maker) == 0 && strcmp(p, product) == 0)
348 			found = 1;
349 	}
350 	if (m != NULL)
351 		freeenv(m);
352 	if (p != NULL)
353 		freeenv(p);
354 
355 	return (found);
356 }
357 
358 static int
359 nfe_attach(device_t dev)
360 {
361 	struct nfe_softc *sc;
362 	if_t ifp;
363 	bus_addr_t dma_addr_max;
364 	int error = 0, i, msic, phyloc, reg, rid;
365 
366 	sc = device_get_softc(dev);
367 	sc->nfe_dev = dev;
368 
369 	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
370 	    MTX_DEF);
371 	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
372 
373 	pci_enable_busmaster(dev);
374 
375 	rid = PCIR_BAR(0);
376 	sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
377 	    RF_ACTIVE);
378 	if (sc->nfe_res[0] == NULL) {
379 		device_printf(dev, "couldn't map memory resources\n");
380 		mtx_destroy(&sc->nfe_mtx);
381 		return (ENXIO);
382 	}
383 
384 	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
385 		uint16_t v, width;
386 
387 		v = pci_read_config(dev, reg + 0x08, 2);
388 		/* Change max. read request size to 4096. */
389 		v &= ~(7 << 12);
390 		v |= (5 << 12);
391 		pci_write_config(dev, reg + 0x08, v, 2);
392 
393 		v = pci_read_config(dev, reg + 0x0c, 2);
394 		/* link capability */
395 		v = (v >> 4) & 0x0f;
396 		width = pci_read_config(dev, reg + 0x12, 2);
397 		/* negotiated link width */
398 		width = (width >> 4) & 0x3f;
399 		if (v != width)
400 			device_printf(sc->nfe_dev,
401 			    "warning, negotiated width of link(x%d) != "
402 			    "max. width of link(x%d)\n", width, v);
403 	}
404 
405 	if (nfe_can_use_msix(sc) == 0) {
406 		device_printf(sc->nfe_dev,
407 		    "MSI/MSI-X capability black-listed, will use INTx\n");
408 		msix_disable = 1;
409 		msi_disable = 1;
410 	}
411 
412 	/* Allocate interrupt */
413 	if (msix_disable == 0 || msi_disable == 0) {
414 		if (msix_disable == 0 &&
415 		    (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
416 			nfe_alloc_msix(sc, msic);
417 		if (msi_disable == 0 && sc->nfe_msix == 0 &&
418 		    (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
419 		    pci_alloc_msi(dev, &msic) == 0) {
420 			if (msic == NFE_MSI_MESSAGES) {
421 				if (bootverbose)
422 					device_printf(dev,
423 					    "Using %d MSI messages\n", msic);
424 				sc->nfe_msi = 1;
425 			} else
426 				pci_release_msi(dev);
427 		}
428 	}
429 
430 	if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
431 		rid = 0;
432 		sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
433 		    RF_SHAREABLE | RF_ACTIVE);
434 		if (sc->nfe_irq[0] == NULL) {
435 			device_printf(dev, "couldn't allocate IRQ resources\n");
436 			error = ENXIO;
437 			goto fail;
438 		}
439 	} else {
440 		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
441 			sc->nfe_irq[i] = bus_alloc_resource_any(dev,
442 			    SYS_RES_IRQ, &rid, RF_ACTIVE);
443 			if (sc->nfe_irq[i] == NULL) {
444 				device_printf(dev,
445 				    "couldn't allocate IRQ resources for "
446 				    "message %d\n", rid);
447 				error = ENXIO;
448 				goto fail;
449 			}
450 		}
451 		/* Map interrupts to vector 0. */
452 		if (sc->nfe_msix != 0) {
453 			NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
454 			NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
455 		} else if (sc->nfe_msi != 0) {
456 			NFE_WRITE(sc, NFE_MSI_MAP0, 0);
457 			NFE_WRITE(sc, NFE_MSI_MAP1, 0);
458 		}
459 	}
460 
461 	/* Set IRQ status/mask register. */
462 	sc->nfe_irq_status = NFE_IRQ_STATUS;
463 	sc->nfe_irq_mask = NFE_IRQ_MASK;
464 	sc->nfe_intrs = NFE_IRQ_WANTED;
465 	sc->nfe_nointrs = 0;
466 	if (sc->nfe_msix != 0) {
467 		sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
468 		sc->nfe_nointrs = NFE_IRQ_WANTED;
469 	} else if (sc->nfe_msi != 0) {
470 		sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
471 		sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
472 	}
473 
474 	sc->nfe_devid = pci_get_device(dev);
475 	sc->nfe_revid = pci_get_revid(dev);
476 	sc->nfe_flags = 0;
477 
478 	switch (sc->nfe_devid) {
479 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
480 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
481 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
482 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
483 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
484 		break;
485 	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
486 	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
487 		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
488 		break;
489 	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
490 	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
491 	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
492 	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
493 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
494 		    NFE_MIB_V1;
495 		break;
496 	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
497 	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
498 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
499 		    NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
500 		break;
501 
502 	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
503 	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
504 	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
505 	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
506 	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
507 	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
508 	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
509 	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
510 	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
511 	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
512 	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
513 	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
514 		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
515 		    NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
516 		break;
517 	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
518 	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
519 	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
520 	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
521 		/* XXX flow control */
522 		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
523 		    NFE_CORRECT_MACADDR | NFE_MIB_V3;
524 		break;
525 	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
526 	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
527 	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
528 	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
529 	case PCI_PRODUCT_NVIDIA_MCP89_LAN:
530 		/* XXX flow control */
531 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
532 		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
533 		break;
534 	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
535 	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
536 	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
537 	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
538 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
539 		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
540 		    NFE_MIB_V2;
541 		break;
542 	}
543 
544 	nfe_power(sc);
545 	/* Check for reversed ethernet address */
546 	if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
547 		sc->nfe_flags |= NFE_CORRECT_MACADDR;
548 	nfe_get_macaddr(sc, sc->eaddr);
549 	/*
550 	 * Allocate the parent bus DMA tag appropriate for PCI.
551 	 */
552 	dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
553 	if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
554 		dma_addr_max = NFE_DMA_MAXADDR;
555 	error = bus_dma_tag_create(
556 	    bus_get_dma_tag(sc->nfe_dev),	/* parent */
557 	    1, 0,				/* alignment, boundary */
558 	    dma_addr_max,			/* lowaddr */
559 	    BUS_SPACE_MAXADDR,			/* highaddr */
560 	    NULL, NULL,				/* filter, filterarg */
561 	    BUS_SPACE_MAXSIZE_32BIT, 0,		/* maxsize, nsegments */
562 	    BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
563 	    0,					/* flags */
564 	    NULL, NULL,				/* lockfunc, lockarg */
565 	    &sc->nfe_parent_tag);
566 	if (error)
567 		goto fail;
568 
569 	ifp = sc->nfe_ifp = if_gethandle(IFT_ETHER);
570 	if (ifp == NULL) {
571 		device_printf(dev, "can not if_gethandle()\n");
572 		error = ENOSPC;
573 		goto fail;
574 	}
575 
576 	/*
577 	 * Allocate Tx and Rx rings.
578 	 */
579 	if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
580 		goto fail;
581 
582 	if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
583 		goto fail;
584 
585 	nfe_alloc_jrx_ring(sc, &sc->jrxq);
586 	/* Create sysctl node. */
587 	nfe_sysctl_node(sc);
588 
589 	if_setsoftc(ifp, sc);
590 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
591 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
592 	if_setioctlfn(ifp, nfe_ioctl);
593 	if_setstartfn(ifp, nfe_start);
594 	if_sethwassist(ifp, 0);
595 	if_setcapabilities(ifp, 0);
596 	if_setinitfn(ifp, nfe_init);
597 	if_setsendqlen(ifp, NFE_TX_RING_COUNT - 1);
598 	if_setsendqready(ifp);
599 
600 	if (sc->nfe_flags & NFE_HW_CSUM) {
601 		if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0);
602 		if_sethwassistbits(ifp, NFE_CSUM_FEATURES | CSUM_TSO, 0);
603 	}
604 	if_setcapenable(ifp, if_getcapabilities(ifp));
605 
606 	sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
607 	/* VLAN capability setup. */
608 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
609 	if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
610 		if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
611 		if ((if_getcapabilities(ifp) & IFCAP_HWCSUM) != 0)
612 			if_setcapabilitiesbit(ifp,
613 			    (IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO), 0);
614 	}
615 
616 	if (pci_find_cap(dev, PCIY_PMG, &reg) == 0)
617 		if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
618 	if_setcapenable(ifp, if_getcapabilities(ifp));
619 
620 	/*
621 	 * Tell the upper layer(s) we support long frames.
622 	 * Must appear after the call to ether_ifattach() because
623 	 * ether_ifattach() sets ifi_hdrlen to the default value.
624 	 */
625 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
626 
627 #ifdef DEVICE_POLLING
628 	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
629 #endif
630 
631 	/* Do MII setup */
632 	phyloc = MII_PHY_ANY;
633 	if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 ||
634 	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 ||
635 	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 ||
636 	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) {
637 		if (nfe_detect_msik9(sc) != 0)
638 			phyloc = 0;
639 	}
640 	error = mii_attach(dev, &sc->nfe_miibus, ifp,
641 	    (ifm_change_cb_t)nfe_ifmedia_upd, (ifm_stat_cb_t)nfe_ifmedia_sts,
642 	    BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY, MIIF_DOPAUSE);
643 	if (error != 0) {
644 		device_printf(dev, "attaching PHYs failed\n");
645 		goto fail;
646 	}
647 	ether_ifattach(ifp, sc->eaddr);
648 
649 	NET_TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
650 	sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
651 	    taskqueue_thread_enqueue, &sc->nfe_tq);
652 	taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
653 	    device_get_nameunit(sc->nfe_dev));
654 	error = 0;
655 	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
656 		error = bus_setup_intr(dev, sc->nfe_irq[0],
657 		    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
658 		    &sc->nfe_intrhand[0]);
659 	} else {
660 		for (i = 0; i < NFE_MSI_MESSAGES; i++) {
661 			error = bus_setup_intr(dev, sc->nfe_irq[i],
662 			    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
663 			    &sc->nfe_intrhand[i]);
664 			if (error != 0)
665 				break;
666 		}
667 	}
668 	if (error) {
669 		device_printf(dev, "couldn't set up irq\n");
670 		taskqueue_free(sc->nfe_tq);
671 		sc->nfe_tq = NULL;
672 		ether_ifdetach(ifp);
673 		goto fail;
674 	}
675 
676 fail:
677 	if (error)
678 		nfe_detach(dev);
679 
680 	return (error);
681 }
682 
683 static int
684 nfe_detach(device_t dev)
685 {
686 	struct nfe_softc *sc;
687 	if_t ifp;
688 	uint8_t eaddr[ETHER_ADDR_LEN];
689 	int i, rid;
690 
691 	sc = device_get_softc(dev);
692 	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
693 	ifp = sc->nfe_ifp;
694 
695 #ifdef DEVICE_POLLING
696 	if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING)
697 		ether_poll_deregister(ifp);
698 #endif
699 	if (device_is_attached(dev)) {
700 		NFE_LOCK(sc);
701 		nfe_stop(ifp);
702 		if_setflagbits(ifp, 0, IFF_UP);
703 		NFE_UNLOCK(sc);
704 		callout_drain(&sc->nfe_stat_ch);
705 		ether_ifdetach(ifp);
706 	}
707 
708 	if (ifp) {
709 		/* restore ethernet address */
710 		if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
711 			for (i = 0; i < ETHER_ADDR_LEN; i++) {
712 				eaddr[i] = sc->eaddr[5 - i];
713 			}
714 		} else
715 			bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
716 		nfe_set_macaddr(sc, eaddr);
717 		if_free(ifp);
718 	}
719 	if (sc->nfe_miibus)
720 		device_delete_child(dev, sc->nfe_miibus);
721 	bus_generic_detach(dev);
722 	if (sc->nfe_tq != NULL) {
723 		taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
724 		taskqueue_free(sc->nfe_tq);
725 		sc->nfe_tq = NULL;
726 	}
727 
728 	for (i = 0; i < NFE_MSI_MESSAGES; i++) {
729 		if (sc->nfe_intrhand[i] != NULL) {
730 			bus_teardown_intr(dev, sc->nfe_irq[i],
731 			    sc->nfe_intrhand[i]);
732 			sc->nfe_intrhand[i] = NULL;
733 		}
734 	}
735 
736 	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
737 		if (sc->nfe_irq[0] != NULL)
738 			bus_release_resource(dev, SYS_RES_IRQ, 0,
739 			    sc->nfe_irq[0]);
740 	} else {
741 		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
742 			if (sc->nfe_irq[i] != NULL) {
743 				bus_release_resource(dev, SYS_RES_IRQ, rid,
744 				    sc->nfe_irq[i]);
745 				sc->nfe_irq[i] = NULL;
746 			}
747 		}
748 		pci_release_msi(dev);
749 	}
750 	if (sc->nfe_msix_pba_res != NULL) {
751 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
752 		    sc->nfe_msix_pba_res);
753 		sc->nfe_msix_pba_res = NULL;
754 	}
755 	if (sc->nfe_msix_res != NULL) {
756 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
757 		    sc->nfe_msix_res);
758 		sc->nfe_msix_res = NULL;
759 	}
760 	if (sc->nfe_res[0] != NULL) {
761 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
762 		    sc->nfe_res[0]);
763 		sc->nfe_res[0] = NULL;
764 	}
765 
766 	nfe_free_tx_ring(sc, &sc->txq);
767 	nfe_free_rx_ring(sc, &sc->rxq);
768 	nfe_free_jrx_ring(sc, &sc->jrxq);
769 
770 	if (sc->nfe_parent_tag) {
771 		bus_dma_tag_destroy(sc->nfe_parent_tag);
772 		sc->nfe_parent_tag = NULL;
773 	}
774 
775 	mtx_destroy(&sc->nfe_mtx);
776 
777 	return (0);
778 }
779 
780 static int
781 nfe_suspend(device_t dev)
782 {
783 	struct nfe_softc *sc;
784 
785 	sc = device_get_softc(dev);
786 
787 	NFE_LOCK(sc);
788 	nfe_stop(sc->nfe_ifp);
789 	nfe_set_wol(sc);
790 	sc->nfe_suspended = 1;
791 	NFE_UNLOCK(sc);
792 
793 	return (0);
794 }
795 
796 static int
797 nfe_resume(device_t dev)
798 {
799 	struct nfe_softc *sc;
800 	if_t ifp;
801 
802 	sc = device_get_softc(dev);
803 
804 	NFE_LOCK(sc);
805 	nfe_power(sc);
806 	ifp = sc->nfe_ifp;
807 	if (if_getflags(ifp) & IFF_UP)
808 		nfe_init_locked(sc);
809 	sc->nfe_suspended = 0;
810 	NFE_UNLOCK(sc);
811 
812 	return (0);
813 }
814 
815 static int
816 nfe_can_use_msix(struct nfe_softc *sc)
817 {
818 	static struct msix_blacklist {
819 		char	*maker;
820 		char	*product;
821 	} msix_blacklists[] = {
822 		{ "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
823 	};
824 
825 	struct msix_blacklist *mblp;
826 	char *maker, *product;
827 	int count, n, use_msix;
828 
829 	/*
830 	 * Search base board manufacturer and product name table
831 	 * to see this system has a known MSI/MSI-X issue.
832 	 */
833 	maker = kern_getenv("smbios.planar.maker");
834 	product = kern_getenv("smbios.planar.product");
835 	use_msix = 1;
836 	if (maker != NULL && product != NULL) {
837 		count = nitems(msix_blacklists);
838 		mblp = msix_blacklists;
839 		for (n = 0; n < count; n++) {
840 			if (strcmp(maker, mblp->maker) == 0 &&
841 			    strcmp(product, mblp->product) == 0) {
842 				use_msix = 0;
843 				break;
844 			}
845 			mblp++;
846 		}
847 	}
848 	if (maker != NULL)
849 		freeenv(maker);
850 	if (product != NULL)
851 		freeenv(product);
852 
853 	return (use_msix);
854 }
855 
856 /* Take PHY/NIC out of powerdown, from Linux */
857 static void
858 nfe_power(struct nfe_softc *sc)
859 {
860 	uint32_t pwr;
861 
862 	if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
863 		return;
864 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
865 	NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
866 	DELAY(100);
867 	NFE_WRITE(sc, NFE_MAC_RESET, 0);
868 	DELAY(100);
869 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
870 	pwr = NFE_READ(sc, NFE_PWR2_CTL);
871 	pwr &= ~NFE_PWR2_WAKEUP_MASK;
872 	if (sc->nfe_revid >= 0xa3 &&
873 	    (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
874 	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
875 		pwr |= NFE_PWR2_REVA3;
876 	NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
877 }
878 
879 static void
880 nfe_miibus_statchg(device_t dev)
881 {
882 	struct nfe_softc *sc;
883 	struct mii_data *mii;
884 	if_t ifp;
885 	uint32_t rxctl, txctl;
886 
887 	sc = device_get_softc(dev);
888 
889 	mii = device_get_softc(sc->nfe_miibus);
890 	ifp = sc->nfe_ifp;
891 
892 	sc->nfe_link = 0;
893 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
894 	    (IFM_ACTIVE | IFM_AVALID)) {
895 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
896 		case IFM_10_T:
897 		case IFM_100_TX:
898 		case IFM_1000_T:
899 			sc->nfe_link = 1;
900 			break;
901 		default:
902 			break;
903 		}
904 	}
905 
906 	nfe_mac_config(sc, mii);
907 	txctl = NFE_READ(sc, NFE_TX_CTL);
908 	rxctl = NFE_READ(sc, NFE_RX_CTL);
909 	if (sc->nfe_link != 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
910 		txctl |= NFE_TX_START;
911 		rxctl |= NFE_RX_START;
912 	} else {
913 		txctl &= ~NFE_TX_START;
914 		rxctl &= ~NFE_RX_START;
915 	}
916 	NFE_WRITE(sc, NFE_TX_CTL, txctl);
917 	NFE_WRITE(sc, NFE_RX_CTL, rxctl);
918 }
919 
920 static void
921 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
922 {
923 	uint32_t link, misc, phy, seed;
924 	uint32_t val;
925 
926 	NFE_LOCK_ASSERT(sc);
927 
928 	phy = NFE_READ(sc, NFE_PHY_IFACE);
929 	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
930 
931 	seed = NFE_READ(sc, NFE_RNDSEED);
932 	seed &= ~NFE_SEED_MASK;
933 
934 	misc = NFE_MISC1_MAGIC;
935 	link = NFE_MEDIA_SET;
936 
937 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
938 		phy  |= NFE_PHY_HDX;	/* half-duplex */
939 		misc |= NFE_MISC1_HDX;
940 	}
941 
942 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
943 	case IFM_1000_T:	/* full-duplex only */
944 		link |= NFE_MEDIA_1000T;
945 		seed |= NFE_SEED_1000T;
946 		phy  |= NFE_PHY_1000T;
947 		break;
948 	case IFM_100_TX:
949 		link |= NFE_MEDIA_100TX;
950 		seed |= NFE_SEED_100TX;
951 		phy  |= NFE_PHY_100TX;
952 		break;
953 	case IFM_10_T:
954 		link |= NFE_MEDIA_10T;
955 		seed |= NFE_SEED_10T;
956 		break;
957 	}
958 
959 	if ((phy & 0x10000000) != 0) {
960 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
961 			val = NFE_R1_MAGIC_1000;
962 		else
963 			val = NFE_R1_MAGIC_10_100;
964 	} else
965 		val = NFE_R1_MAGIC_DEFAULT;
966 	NFE_WRITE(sc, NFE_SETUP_R1, val);
967 
968 	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
969 
970 	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
971 	NFE_WRITE(sc, NFE_MISC1, misc);
972 	NFE_WRITE(sc, NFE_LINKSPEED, link);
973 
974 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
975 		/* It seems all hardwares supports Rx pause frames. */
976 		val = NFE_READ(sc, NFE_RXFILTER);
977 		if ((IFM_OPTIONS(mii->mii_media_active) &
978 		    IFM_ETH_RXPAUSE) != 0)
979 			val |= NFE_PFF_RX_PAUSE;
980 		else
981 			val &= ~NFE_PFF_RX_PAUSE;
982 		NFE_WRITE(sc, NFE_RXFILTER, val);
983 		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
984 			val = NFE_READ(sc, NFE_MISC1);
985 			if ((IFM_OPTIONS(mii->mii_media_active) &
986 			    IFM_ETH_TXPAUSE) != 0) {
987 				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
988 				    NFE_TX_PAUSE_FRAME_ENABLE);
989 				val |= NFE_MISC1_TX_PAUSE;
990 			} else {
991 				val &= ~NFE_MISC1_TX_PAUSE;
992 				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
993 				    NFE_TX_PAUSE_FRAME_DISABLE);
994 			}
995 			NFE_WRITE(sc, NFE_MISC1, val);
996 		}
997 	} else {
998 		/* disable rx/tx pause frames */
999 		val = NFE_READ(sc, NFE_RXFILTER);
1000 		val &= ~NFE_PFF_RX_PAUSE;
1001 		NFE_WRITE(sc, NFE_RXFILTER, val);
1002 		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
1003 			NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1004 			    NFE_TX_PAUSE_FRAME_DISABLE);
1005 			val = NFE_READ(sc, NFE_MISC1);
1006 			val &= ~NFE_MISC1_TX_PAUSE;
1007 			NFE_WRITE(sc, NFE_MISC1, val);
1008 		}
1009 	}
1010 }
1011 
1012 static int
1013 nfe_miibus_readreg(device_t dev, int phy, int reg)
1014 {
1015 	struct nfe_softc *sc = device_get_softc(dev);
1016 	uint32_t val;
1017 	int ntries;
1018 
1019 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1020 
1021 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1022 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1023 		DELAY(100);
1024 	}
1025 
1026 	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1027 
1028 	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1029 		DELAY(100);
1030 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1031 			break;
1032 	}
1033 	if (ntries == NFE_TIMEOUT) {
1034 		DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1035 		return 0;
1036 	}
1037 
1038 	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1039 		DPRINTFN(sc, 2, "could not read PHY\n");
1040 		return 0;
1041 	}
1042 
1043 	val = NFE_READ(sc, NFE_PHY_DATA);
1044 	if (val != 0xffffffff && val != 0)
1045 		sc->mii_phyaddr = phy;
1046 
1047 	DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1048 
1049 	return (val);
1050 }
1051 
1052 static int
1053 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1054 {
1055 	struct nfe_softc *sc = device_get_softc(dev);
1056 	uint32_t ctl;
1057 	int ntries;
1058 
1059 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1060 
1061 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1062 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1063 		DELAY(100);
1064 	}
1065 
1066 	NFE_WRITE(sc, NFE_PHY_DATA, val);
1067 	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1068 	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1069 
1070 	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1071 		DELAY(100);
1072 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1073 			break;
1074 	}
1075 #ifdef NFE_DEBUG
1076 	if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1077 		device_printf(sc->nfe_dev, "could not write to PHY\n");
1078 #endif
1079 	return (0);
1080 }
1081 
1082 struct nfe_dmamap_arg {
1083 	bus_addr_t nfe_busaddr;
1084 };
1085 
1086 static int
1087 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1088 {
1089 	struct nfe_dmamap_arg ctx;
1090 	struct nfe_rx_data *data;
1091 	void *desc;
1092 	int i, error, descsize;
1093 
1094 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1095 		desc = ring->desc64;
1096 		descsize = sizeof (struct nfe_desc64);
1097 	} else {
1098 		desc = ring->desc32;
1099 		descsize = sizeof (struct nfe_desc32);
1100 	}
1101 
1102 	ring->cur = ring->next = 0;
1103 
1104 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1105 	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1106 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1107 	    BUS_SPACE_MAXADDR,			/* highaddr */
1108 	    NULL, NULL,				/* filter, filterarg */
1109 	    NFE_RX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1110 	    NFE_RX_RING_COUNT * descsize,	/* maxsegsize */
1111 	    0,					/* flags */
1112 	    NULL, NULL,				/* lockfunc, lockarg */
1113 	    &ring->rx_desc_tag);
1114 	if (error != 0) {
1115 		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1116 		goto fail;
1117 	}
1118 
1119 	/* allocate memory to desc */
1120 	error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1121 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1122 	if (error != 0) {
1123 		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1124 		goto fail;
1125 	}
1126 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1127 		ring->desc64 = desc;
1128 	else
1129 		ring->desc32 = desc;
1130 
1131 	/* map desc to device visible address space */
1132 	ctx.nfe_busaddr = 0;
1133 	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1134 	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1135 	if (error != 0) {
1136 		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1137 		goto fail;
1138 	}
1139 	ring->physaddr = ctx.nfe_busaddr;
1140 
1141 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1142 	    1, 0,			/* alignment, boundary */
1143 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1144 	    BUS_SPACE_MAXADDR,		/* highaddr */
1145 	    NULL, NULL,			/* filter, filterarg */
1146 	    MCLBYTES, 1,		/* maxsize, nsegments */
1147 	    MCLBYTES,			/* maxsegsize */
1148 	    0,				/* flags */
1149 	    NULL, NULL,			/* lockfunc, lockarg */
1150 	    &ring->rx_data_tag);
1151 	if (error != 0) {
1152 		device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1153 		goto fail;
1154 	}
1155 
1156 	error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1157 	if (error != 0) {
1158 		device_printf(sc->nfe_dev,
1159 		    "could not create Rx DMA spare map\n");
1160 		goto fail;
1161 	}
1162 
1163 	/*
1164 	 * Pre-allocate Rx buffers and populate Rx ring.
1165 	 */
1166 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1167 		data = &sc->rxq.data[i];
1168 		data->rx_data_map = NULL;
1169 		data->m = NULL;
1170 		error = bus_dmamap_create(ring->rx_data_tag, 0,
1171 		    &data->rx_data_map);
1172 		if (error != 0) {
1173 			device_printf(sc->nfe_dev,
1174 			    "could not create Rx DMA map\n");
1175 			goto fail;
1176 		}
1177 	}
1178 
1179 fail:
1180 	return (error);
1181 }
1182 
1183 static void
1184 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1185 {
1186 	struct nfe_dmamap_arg ctx;
1187 	struct nfe_rx_data *data;
1188 	void *desc;
1189 	int i, error, descsize;
1190 
1191 	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1192 		return;
1193 	if (jumbo_disable != 0) {
1194 		device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1195 		sc->nfe_jumbo_disable = 1;
1196 		return;
1197 	}
1198 
1199 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1200 		desc = ring->jdesc64;
1201 		descsize = sizeof (struct nfe_desc64);
1202 	} else {
1203 		desc = ring->jdesc32;
1204 		descsize = sizeof (struct nfe_desc32);
1205 	}
1206 
1207 	ring->jcur = ring->jnext = 0;
1208 
1209 	/* Create DMA tag for jumbo Rx ring. */
1210 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1211 	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1212 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1213 	    BUS_SPACE_MAXADDR,			/* highaddr */
1214 	    NULL, NULL,				/* filter, filterarg */
1215 	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsize */
1216 	    1, 					/* nsegments */
1217 	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsegsize */
1218 	    0,					/* flags */
1219 	    NULL, NULL,				/* lockfunc, lockarg */
1220 	    &ring->jrx_desc_tag);
1221 	if (error != 0) {
1222 		device_printf(sc->nfe_dev,
1223 		    "could not create jumbo ring DMA tag\n");
1224 		goto fail;
1225 	}
1226 
1227 	/* Create DMA tag for jumbo Rx buffers. */
1228 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1229 	    1, 0,				/* alignment, boundary */
1230 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1231 	    BUS_SPACE_MAXADDR,			/* highaddr */
1232 	    NULL, NULL,				/* filter, filterarg */
1233 	    MJUM9BYTES,				/* maxsize */
1234 	    1,					/* nsegments */
1235 	    MJUM9BYTES,				/* maxsegsize */
1236 	    0,					/* flags */
1237 	    NULL, NULL,				/* lockfunc, lockarg */
1238 	    &ring->jrx_data_tag);
1239 	if (error != 0) {
1240 		device_printf(sc->nfe_dev,
1241 		    "could not create jumbo Rx buffer DMA tag\n");
1242 		goto fail;
1243 	}
1244 
1245 	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1246 	error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1247 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1248 	if (error != 0) {
1249 		device_printf(sc->nfe_dev,
1250 		    "could not allocate DMA'able memory for jumbo Rx ring\n");
1251 		goto fail;
1252 	}
1253 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1254 		ring->jdesc64 = desc;
1255 	else
1256 		ring->jdesc32 = desc;
1257 
1258 	ctx.nfe_busaddr = 0;
1259 	error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1260 	    NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1261 	if (error != 0) {
1262 		device_printf(sc->nfe_dev,
1263 		    "could not load DMA'able memory for jumbo Rx ring\n");
1264 		goto fail;
1265 	}
1266 	ring->jphysaddr = ctx.nfe_busaddr;
1267 
1268 	/* Create DMA maps for jumbo Rx buffers. */
1269 	error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1270 	if (error != 0) {
1271 		device_printf(sc->nfe_dev,
1272 		    "could not create jumbo Rx DMA spare map\n");
1273 		goto fail;
1274 	}
1275 
1276 	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1277 		data = &sc->jrxq.jdata[i];
1278 		data->rx_data_map = NULL;
1279 		data->m = NULL;
1280 		error = bus_dmamap_create(ring->jrx_data_tag, 0,
1281 		    &data->rx_data_map);
1282 		if (error != 0) {
1283 			device_printf(sc->nfe_dev,
1284 			    "could not create jumbo Rx DMA map\n");
1285 			goto fail;
1286 		}
1287 	}
1288 
1289 	return;
1290 
1291 fail:
1292 	/*
1293 	 * Running without jumbo frame support is ok for most cases
1294 	 * so don't fail on creating dma tag/map for jumbo frame.
1295 	 */
1296 	nfe_free_jrx_ring(sc, ring);
1297 	device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1298 	    "resource shortage\n");
1299 	sc->nfe_jumbo_disable = 1;
1300 }
1301 
1302 static int
1303 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1304 {
1305 	void *desc;
1306 	size_t descsize;
1307 	int i;
1308 
1309 	ring->cur = ring->next = 0;
1310 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1311 		desc = ring->desc64;
1312 		descsize = sizeof (struct nfe_desc64);
1313 	} else {
1314 		desc = ring->desc32;
1315 		descsize = sizeof (struct nfe_desc32);
1316 	}
1317 	bzero(desc, descsize * NFE_RX_RING_COUNT);
1318 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1319 		if (nfe_newbuf(sc, i) != 0)
1320 			return (ENOBUFS);
1321 	}
1322 
1323 	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1324 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1325 
1326 	return (0);
1327 }
1328 
1329 static int
1330 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1331 {
1332 	void *desc;
1333 	size_t descsize;
1334 	int i;
1335 
1336 	ring->jcur = ring->jnext = 0;
1337 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1338 		desc = ring->jdesc64;
1339 		descsize = sizeof (struct nfe_desc64);
1340 	} else {
1341 		desc = ring->jdesc32;
1342 		descsize = sizeof (struct nfe_desc32);
1343 	}
1344 	bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1345 	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1346 		if (nfe_jnewbuf(sc, i) != 0)
1347 			return (ENOBUFS);
1348 	}
1349 
1350 	bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1351 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1352 
1353 	return (0);
1354 }
1355 
1356 static void
1357 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1358 {
1359 	struct nfe_rx_data *data;
1360 	void *desc;
1361 	int i;
1362 
1363 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1364 		desc = ring->desc64;
1365 	else
1366 		desc = ring->desc32;
1367 
1368 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1369 		data = &ring->data[i];
1370 		if (data->rx_data_map != NULL) {
1371 			bus_dmamap_destroy(ring->rx_data_tag,
1372 			    data->rx_data_map);
1373 			data->rx_data_map = NULL;
1374 		}
1375 		if (data->m != NULL) {
1376 			m_freem(data->m);
1377 			data->m = NULL;
1378 		}
1379 	}
1380 	if (ring->rx_data_tag != NULL) {
1381 		if (ring->rx_spare_map != NULL) {
1382 			bus_dmamap_destroy(ring->rx_data_tag,
1383 			    ring->rx_spare_map);
1384 			ring->rx_spare_map = NULL;
1385 		}
1386 		bus_dma_tag_destroy(ring->rx_data_tag);
1387 		ring->rx_data_tag = NULL;
1388 	}
1389 
1390 	if (desc != NULL) {
1391 		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1392 		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1393 		ring->desc64 = NULL;
1394 		ring->desc32 = NULL;
1395 	}
1396 	if (ring->rx_desc_tag != NULL) {
1397 		bus_dma_tag_destroy(ring->rx_desc_tag);
1398 		ring->rx_desc_tag = NULL;
1399 	}
1400 }
1401 
1402 static void
1403 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1404 {
1405 	struct nfe_rx_data *data;
1406 	void *desc;
1407 	int i;
1408 
1409 	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1410 		return;
1411 
1412 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1413 		desc = ring->jdesc64;
1414 	} else {
1415 		desc = ring->jdesc32;
1416 	}
1417 
1418 	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1419 		data = &ring->jdata[i];
1420 		if (data->rx_data_map != NULL) {
1421 			bus_dmamap_destroy(ring->jrx_data_tag,
1422 			    data->rx_data_map);
1423 			data->rx_data_map = NULL;
1424 		}
1425 		if (data->m != NULL) {
1426 			m_freem(data->m);
1427 			data->m = NULL;
1428 		}
1429 	}
1430 	if (ring->jrx_data_tag != NULL) {
1431 		if (ring->jrx_spare_map != NULL) {
1432 			bus_dmamap_destroy(ring->jrx_data_tag,
1433 			    ring->jrx_spare_map);
1434 			ring->jrx_spare_map = NULL;
1435 		}
1436 		bus_dma_tag_destroy(ring->jrx_data_tag);
1437 		ring->jrx_data_tag = NULL;
1438 	}
1439 
1440 	if (desc != NULL) {
1441 		bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1442 		bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1443 		ring->jdesc64 = NULL;
1444 		ring->jdesc32 = NULL;
1445 	}
1446 
1447 	if (ring->jrx_desc_tag != NULL) {
1448 		bus_dma_tag_destroy(ring->jrx_desc_tag);
1449 		ring->jrx_desc_tag = NULL;
1450 	}
1451 }
1452 
1453 static int
1454 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1455 {
1456 	struct nfe_dmamap_arg ctx;
1457 	int i, error;
1458 	void *desc;
1459 	int descsize;
1460 
1461 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1462 		desc = ring->desc64;
1463 		descsize = sizeof (struct nfe_desc64);
1464 	} else {
1465 		desc = ring->desc32;
1466 		descsize = sizeof (struct nfe_desc32);
1467 	}
1468 
1469 	ring->queued = 0;
1470 	ring->cur = ring->next = 0;
1471 
1472 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1473 	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1474 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1475 	    BUS_SPACE_MAXADDR,			/* highaddr */
1476 	    NULL, NULL,				/* filter, filterarg */
1477 	    NFE_TX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1478 	    NFE_TX_RING_COUNT * descsize,	/* maxsegsize */
1479 	    0,					/* flags */
1480 	    NULL, NULL,				/* lockfunc, lockarg */
1481 	    &ring->tx_desc_tag);
1482 	if (error != 0) {
1483 		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1484 		goto fail;
1485 	}
1486 
1487 	error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1488 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1489 	if (error != 0) {
1490 		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1491 		goto fail;
1492 	}
1493 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1494 		ring->desc64 = desc;
1495 	else
1496 		ring->desc32 = desc;
1497 
1498 	ctx.nfe_busaddr = 0;
1499 	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1500 	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1501 	if (error != 0) {
1502 		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1503 		goto fail;
1504 	}
1505 	ring->physaddr = ctx.nfe_busaddr;
1506 
1507 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1508 	    1, 0,
1509 	    BUS_SPACE_MAXADDR,
1510 	    BUS_SPACE_MAXADDR,
1511 	    NULL, NULL,
1512 	    NFE_TSO_MAXSIZE,
1513 	    NFE_MAX_SCATTER,
1514 	    NFE_TSO_MAXSGSIZE,
1515 	    0,
1516 	    NULL, NULL,
1517 	    &ring->tx_data_tag);
1518 	if (error != 0) {
1519 		device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1520 		goto fail;
1521 	}
1522 
1523 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1524 		error = bus_dmamap_create(ring->tx_data_tag, 0,
1525 		    &ring->data[i].tx_data_map);
1526 		if (error != 0) {
1527 			device_printf(sc->nfe_dev,
1528 			    "could not create Tx DMA map\n");
1529 			goto fail;
1530 		}
1531 	}
1532 
1533 fail:
1534 	return (error);
1535 }
1536 
1537 static void
1538 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1539 {
1540 	void *desc;
1541 	size_t descsize;
1542 
1543 	sc->nfe_force_tx = 0;
1544 	ring->queued = 0;
1545 	ring->cur = ring->next = 0;
1546 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1547 		desc = ring->desc64;
1548 		descsize = sizeof (struct nfe_desc64);
1549 	} else {
1550 		desc = ring->desc32;
1551 		descsize = sizeof (struct nfe_desc32);
1552 	}
1553 	bzero(desc, descsize * NFE_TX_RING_COUNT);
1554 
1555 	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1556 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1557 }
1558 
1559 static void
1560 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1561 {
1562 	struct nfe_tx_data *data;
1563 	void *desc;
1564 	int i;
1565 
1566 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1567 		desc = ring->desc64;
1568 	} else {
1569 		desc = ring->desc32;
1570 	}
1571 
1572 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1573 		data = &ring->data[i];
1574 
1575 		if (data->m != NULL) {
1576 			bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1577 			    BUS_DMASYNC_POSTWRITE);
1578 			bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1579 			m_freem(data->m);
1580 			data->m = NULL;
1581 		}
1582 		if (data->tx_data_map != NULL) {
1583 			bus_dmamap_destroy(ring->tx_data_tag,
1584 			    data->tx_data_map);
1585 			data->tx_data_map = NULL;
1586 		}
1587 	}
1588 
1589 	if (ring->tx_data_tag != NULL) {
1590 		bus_dma_tag_destroy(ring->tx_data_tag);
1591 		ring->tx_data_tag = NULL;
1592 	}
1593 
1594 	if (desc != NULL) {
1595 		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1596 		    BUS_DMASYNC_POSTWRITE);
1597 		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1598 		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1599 		ring->desc64 = NULL;
1600 		ring->desc32 = NULL;
1601 		bus_dma_tag_destroy(ring->tx_desc_tag);
1602 		ring->tx_desc_tag = NULL;
1603 	}
1604 }
1605 
1606 #ifdef DEVICE_POLLING
1607 static poll_handler_t nfe_poll;
1608 
1609 static int
1610 nfe_poll(if_t ifp, enum poll_cmd cmd, int count)
1611 {
1612 	struct nfe_softc *sc = if_getsoftc(ifp);
1613 	uint32_t r;
1614 	int rx_npkts = 0;
1615 
1616 	NFE_LOCK(sc);
1617 
1618 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1619 		NFE_UNLOCK(sc);
1620 		return (rx_npkts);
1621 	}
1622 
1623 	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1624 		rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1625 	else
1626 		rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1627 	nfe_txeof(sc);
1628 	if (!if_sendq_empty(ifp))
1629 		nfe_start_locked(ifp);
1630 
1631 	if (cmd == POLL_AND_CHECK_STATUS) {
1632 		if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1633 			NFE_UNLOCK(sc);
1634 			return (rx_npkts);
1635 		}
1636 		NFE_WRITE(sc, sc->nfe_irq_status, r);
1637 
1638 		if (r & NFE_IRQ_LINK) {
1639 			NFE_READ(sc, NFE_PHY_STATUS);
1640 			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1641 			DPRINTF(sc, "link state changed\n");
1642 		}
1643 	}
1644 	NFE_UNLOCK(sc);
1645 	return (rx_npkts);
1646 }
1647 #endif /* DEVICE_POLLING */
1648 
1649 static void
1650 nfe_set_intr(struct nfe_softc *sc)
1651 {
1652 
1653 	if (sc->nfe_msi != 0)
1654 		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1655 }
1656 
1657 /* In MSIX, a write to mask reegisters behaves as XOR. */
1658 static __inline void
1659 nfe_enable_intr(struct nfe_softc *sc)
1660 {
1661 
1662 	if (sc->nfe_msix != 0) {
1663 		/* XXX Should have a better way to enable interrupts! */
1664 		if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1665 			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1666 	} else
1667 		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1668 }
1669 
1670 static __inline void
1671 nfe_disable_intr(struct nfe_softc *sc)
1672 {
1673 
1674 	if (sc->nfe_msix != 0) {
1675 		/* XXX Should have a better way to disable interrupts! */
1676 		if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1677 			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1678 	} else
1679 		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1680 }
1681 
1682 static int
1683 nfe_ioctl(if_t ifp, u_long cmd, caddr_t data)
1684 {
1685 	struct nfe_softc *sc;
1686 	struct ifreq *ifr;
1687 	struct mii_data *mii;
1688 	int error, init, mask;
1689 
1690 	sc = if_getsoftc(ifp);
1691 	ifr = (struct ifreq *) data;
1692 	error = 0;
1693 	init = 0;
1694 	switch (cmd) {
1695 	case SIOCSIFMTU:
1696 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1697 			error = EINVAL;
1698 		else if (if_getmtu(ifp) != ifr->ifr_mtu) {
1699 			if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1700 			    (sc->nfe_jumbo_disable != 0)) &&
1701 			    ifr->ifr_mtu > ETHERMTU)
1702 				error = EINVAL;
1703 			else {
1704 				NFE_LOCK(sc);
1705 				if_setmtu(ifp, ifr->ifr_mtu);
1706 				if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1707 					if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1708 					nfe_init_locked(sc);
1709 				}
1710 				NFE_UNLOCK(sc);
1711 			}
1712 		}
1713 		break;
1714 	case SIOCSIFFLAGS:
1715 		NFE_LOCK(sc);
1716 		if (if_getflags(ifp) & IFF_UP) {
1717 			/*
1718 			 * If only the PROMISC or ALLMULTI flag changes, then
1719 			 * don't do a full re-init of the chip, just update
1720 			 * the Rx filter.
1721 			 */
1722 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) &&
1723 			    ((if_getflags(ifp) ^ sc->nfe_if_flags) &
1724 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1725 				nfe_setmulti(sc);
1726 			else
1727 				nfe_init_locked(sc);
1728 		} else {
1729 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1730 				nfe_stop(ifp);
1731 		}
1732 		sc->nfe_if_flags = if_getflags(ifp);
1733 		NFE_UNLOCK(sc);
1734 		error = 0;
1735 		break;
1736 	case SIOCADDMULTI:
1737 	case SIOCDELMULTI:
1738 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1739 			NFE_LOCK(sc);
1740 			nfe_setmulti(sc);
1741 			NFE_UNLOCK(sc);
1742 			error = 0;
1743 		}
1744 		break;
1745 	case SIOCSIFMEDIA:
1746 	case SIOCGIFMEDIA:
1747 		mii = device_get_softc(sc->nfe_miibus);
1748 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1749 		break;
1750 	case SIOCSIFCAP:
1751 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1752 #ifdef DEVICE_POLLING
1753 		if ((mask & IFCAP_POLLING) != 0) {
1754 			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1755 				error = ether_poll_register(nfe_poll, ifp);
1756 				if (error)
1757 					break;
1758 				NFE_LOCK(sc);
1759 				nfe_disable_intr(sc);
1760 				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1761 				NFE_UNLOCK(sc);
1762 			} else {
1763 				error = ether_poll_deregister(ifp);
1764 				/* Enable interrupt even in error case */
1765 				NFE_LOCK(sc);
1766 				nfe_enable_intr(sc);
1767 				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1768 				NFE_UNLOCK(sc);
1769 			}
1770 		}
1771 #endif /* DEVICE_POLLING */
1772 		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1773 		    (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
1774 			if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
1775 		if ((mask & IFCAP_TXCSUM) != 0 &&
1776 		    (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
1777 			if_togglecapenable(ifp, IFCAP_TXCSUM);
1778 			if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1779 				if_sethwassistbits(ifp, NFE_CSUM_FEATURES, 0);
1780 			else
1781 				if_sethwassistbits(ifp, 0, NFE_CSUM_FEATURES);
1782 		}
1783 		if ((mask & IFCAP_RXCSUM) != 0 &&
1784 		    (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
1785 			if_togglecapenable(ifp, IFCAP_RXCSUM);
1786 			init++;
1787 		}
1788 		if ((mask & IFCAP_TSO4) != 0 &&
1789 		    (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
1790 			if_togglecapenable(ifp, IFCAP_TSO4);
1791 			if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
1792 				if_sethwassistbits(ifp, CSUM_TSO, 0);
1793 			else
1794 				if_sethwassistbits(ifp, 0, CSUM_TSO);
1795 		}
1796 		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1797 		    (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
1798 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
1799 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1800 		    (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
1801 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
1802 			if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
1803 				if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
1804 			init++;
1805 		}
1806 		/*
1807 		 * XXX
1808 		 * It seems that VLAN stripping requires Rx checksum offload.
1809 		 * Unfortunately FreeBSD has no way to disable only Rx side
1810 		 * VLAN stripping. So when we know Rx checksum offload is
1811 		 * disabled turn entire hardware VLAN assist off.
1812 		 */
1813 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) == 0) {
1814 			if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
1815 				init++;
1816 			if_setcapenablebit(ifp, 0,
1817 			    (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO));
1818 		}
1819 		if (init > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1820 			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1821 			nfe_init(sc);
1822 		}
1823 		if_vlancap(ifp);
1824 		break;
1825 	default:
1826 		error = ether_ioctl(ifp, cmd, data);
1827 		break;
1828 	}
1829 
1830 	return (error);
1831 }
1832 
1833 static int
1834 nfe_intr(void *arg)
1835 {
1836 	struct nfe_softc *sc;
1837 	uint32_t status;
1838 
1839 	sc = (struct nfe_softc *)arg;
1840 
1841 	status = NFE_READ(sc, sc->nfe_irq_status);
1842 	if (status == 0 || status == 0xffffffff)
1843 		return (FILTER_STRAY);
1844 	nfe_disable_intr(sc);
1845 	taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
1846 
1847 	return (FILTER_HANDLED);
1848 }
1849 
1850 static void
1851 nfe_int_task(void *arg, int pending)
1852 {
1853 	struct nfe_softc *sc = arg;
1854 	if_t ifp = sc->nfe_ifp;
1855 	uint32_t r;
1856 	int domore;
1857 
1858 	NFE_LOCK(sc);
1859 
1860 	if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1861 		nfe_enable_intr(sc);
1862 		NFE_UNLOCK(sc);
1863 		return;	/* not for us */
1864 	}
1865 	NFE_WRITE(sc, sc->nfe_irq_status, r);
1866 
1867 	DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1868 
1869 #ifdef DEVICE_POLLING
1870 	if (if_getcapenable(ifp) & IFCAP_POLLING) {
1871 		NFE_UNLOCK(sc);
1872 		return;
1873 	}
1874 #endif
1875 
1876 	if (r & NFE_IRQ_LINK) {
1877 		NFE_READ(sc, NFE_PHY_STATUS);
1878 		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1879 		DPRINTF(sc, "link state changed\n");
1880 	}
1881 
1882 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1883 		NFE_UNLOCK(sc);
1884 		nfe_disable_intr(sc);
1885 		return;
1886 	}
1887 
1888 	domore = 0;
1889 	/* check Rx ring */
1890 	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1891 		domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1892 	else
1893 		domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1894 	/* check Tx ring */
1895 	nfe_txeof(sc);
1896 
1897 	if (!if_sendq_empty(ifp))
1898 		nfe_start_locked(ifp);
1899 
1900 	NFE_UNLOCK(sc);
1901 
1902 	if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1903 		taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
1904 		return;
1905 	}
1906 
1907 	/* Reenable interrupts. */
1908 	nfe_enable_intr(sc);
1909 }
1910 
1911 static __inline void
1912 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1913 {
1914 	struct nfe_desc32 *desc32;
1915 	struct nfe_desc64 *desc64;
1916 	struct nfe_rx_data *data;
1917 	struct mbuf *m;
1918 
1919 	data = &sc->rxq.data[idx];
1920 	m = data->m;
1921 
1922 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1923 		desc64 = &sc->rxq.desc64[idx];
1924 		/* VLAN packet may have overwritten it. */
1925 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1926 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1927 		desc64->length = htole16(m->m_len);
1928 		desc64->flags = htole16(NFE_RX_READY);
1929 	} else {
1930 		desc32 = &sc->rxq.desc32[idx];
1931 		desc32->length = htole16(m->m_len);
1932 		desc32->flags = htole16(NFE_RX_READY);
1933 	}
1934 }
1935 
1936 static __inline void
1937 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1938 {
1939 	struct nfe_desc32 *desc32;
1940 	struct nfe_desc64 *desc64;
1941 	struct nfe_rx_data *data;
1942 	struct mbuf *m;
1943 
1944 	data = &sc->jrxq.jdata[idx];
1945 	m = data->m;
1946 
1947 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1948 		desc64 = &sc->jrxq.jdesc64[idx];
1949 		/* VLAN packet may have overwritten it. */
1950 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1951 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1952 		desc64->length = htole16(m->m_len);
1953 		desc64->flags = htole16(NFE_RX_READY);
1954 	} else {
1955 		desc32 = &sc->jrxq.jdesc32[idx];
1956 		desc32->length = htole16(m->m_len);
1957 		desc32->flags = htole16(NFE_RX_READY);
1958 	}
1959 }
1960 
1961 static int
1962 nfe_newbuf(struct nfe_softc *sc, int idx)
1963 {
1964 	struct nfe_rx_data *data;
1965 	struct nfe_desc32 *desc32;
1966 	struct nfe_desc64 *desc64;
1967 	struct mbuf *m;
1968 	bus_dma_segment_t segs[1];
1969 	bus_dmamap_t map;
1970 	int nsegs;
1971 
1972 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1973 	if (m == NULL)
1974 		return (ENOBUFS);
1975 
1976 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1977 	m_adj(m, ETHER_ALIGN);
1978 
1979 	if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1980 	    m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1981 		m_freem(m);
1982 		return (ENOBUFS);
1983 	}
1984 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1985 
1986 	data = &sc->rxq.data[idx];
1987 	if (data->m != NULL) {
1988 		bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1989 		    BUS_DMASYNC_POSTREAD);
1990 		bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1991 	}
1992 	map = data->rx_data_map;
1993 	data->rx_data_map = sc->rxq.rx_spare_map;
1994 	sc->rxq.rx_spare_map = map;
1995 	bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1996 	    BUS_DMASYNC_PREREAD);
1997 	data->paddr = segs[0].ds_addr;
1998 	data->m = m;
1999 	/* update mapping address in h/w descriptor */
2000 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2001 		desc64 = &sc->rxq.desc64[idx];
2002 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2003 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2004 		desc64->length = htole16(segs[0].ds_len);
2005 		desc64->flags = htole16(NFE_RX_READY);
2006 	} else {
2007 		desc32 = &sc->rxq.desc32[idx];
2008 		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2009 		desc32->length = htole16(segs[0].ds_len);
2010 		desc32->flags = htole16(NFE_RX_READY);
2011 	}
2012 
2013 	return (0);
2014 }
2015 
2016 static int
2017 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2018 {
2019 	struct nfe_rx_data *data;
2020 	struct nfe_desc32 *desc32;
2021 	struct nfe_desc64 *desc64;
2022 	struct mbuf *m;
2023 	bus_dma_segment_t segs[1];
2024 	bus_dmamap_t map;
2025 	int nsegs;
2026 
2027 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2028 	if (m == NULL)
2029 		return (ENOBUFS);
2030 	m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2031 	m_adj(m, ETHER_ALIGN);
2032 
2033 	if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2034 	    sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2035 		m_freem(m);
2036 		return (ENOBUFS);
2037 	}
2038 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2039 
2040 	data = &sc->jrxq.jdata[idx];
2041 	if (data->m != NULL) {
2042 		bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2043 		    BUS_DMASYNC_POSTREAD);
2044 		bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2045 	}
2046 	map = data->rx_data_map;
2047 	data->rx_data_map = sc->jrxq.jrx_spare_map;
2048 	sc->jrxq.jrx_spare_map = map;
2049 	bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2050 	    BUS_DMASYNC_PREREAD);
2051 	data->paddr = segs[0].ds_addr;
2052 	data->m = m;
2053 	/* update mapping address in h/w descriptor */
2054 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2055 		desc64 = &sc->jrxq.jdesc64[idx];
2056 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2057 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2058 		desc64->length = htole16(segs[0].ds_len);
2059 		desc64->flags = htole16(NFE_RX_READY);
2060 	} else {
2061 		desc32 = &sc->jrxq.jdesc32[idx];
2062 		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2063 		desc32->length = htole16(segs[0].ds_len);
2064 		desc32->flags = htole16(NFE_RX_READY);
2065 	}
2066 
2067 	return (0);
2068 }
2069 
2070 static int
2071 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2072 {
2073 	if_t ifp = sc->nfe_ifp;
2074 	struct nfe_desc32 *desc32;
2075 	struct nfe_desc64 *desc64;
2076 	struct nfe_rx_data *data;
2077 	struct mbuf *m;
2078 	uint16_t flags;
2079 	int len, prog, rx_npkts;
2080 	uint32_t vtag = 0;
2081 
2082 	rx_npkts = 0;
2083 	NFE_LOCK_ASSERT(sc);
2084 
2085 	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2086 	    BUS_DMASYNC_POSTREAD);
2087 
2088 	for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2089 		if (count <= 0)
2090 			break;
2091 		count--;
2092 
2093 		data = &sc->rxq.data[sc->rxq.cur];
2094 
2095 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2096 			desc64 = &sc->rxq.desc64[sc->rxq.cur];
2097 			vtag = le32toh(desc64->physaddr[1]);
2098 			flags = le16toh(desc64->flags);
2099 			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2100 		} else {
2101 			desc32 = &sc->rxq.desc32[sc->rxq.cur];
2102 			flags = le16toh(desc32->flags);
2103 			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2104 		}
2105 
2106 		if (flags & NFE_RX_READY)
2107 			break;
2108 		prog++;
2109 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2110 			if (!(flags & NFE_RX_VALID_V1)) {
2111 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2112 				nfe_discard_rxbuf(sc, sc->rxq.cur);
2113 				continue;
2114 			}
2115 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2116 				flags &= ~NFE_RX_ERROR;
2117 				len--;	/* fix buffer length */
2118 			}
2119 		} else {
2120 			if (!(flags & NFE_RX_VALID_V2)) {
2121 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2122 				nfe_discard_rxbuf(sc, sc->rxq.cur);
2123 				continue;
2124 			}
2125 
2126 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2127 				flags &= ~NFE_RX_ERROR;
2128 				len--;	/* fix buffer length */
2129 			}
2130 		}
2131 
2132 		if (flags & NFE_RX_ERROR) {
2133 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2134 			nfe_discard_rxbuf(sc, sc->rxq.cur);
2135 			continue;
2136 		}
2137 
2138 		m = data->m;
2139 		if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2140 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2141 			nfe_discard_rxbuf(sc, sc->rxq.cur);
2142 			continue;
2143 		}
2144 
2145 		if ((vtag & NFE_RX_VTAG) != 0 &&
2146 		    (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2147 			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2148 			m->m_flags |= M_VLANTAG;
2149 		}
2150 
2151 		m->m_pkthdr.len = m->m_len = len;
2152 		m->m_pkthdr.rcvif = ifp;
2153 
2154 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
2155 			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2156 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2157 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2158 				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2159 				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2160 					m->m_pkthdr.csum_flags |=
2161 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2162 					m->m_pkthdr.csum_data = 0xffff;
2163 				}
2164 			}
2165 		}
2166 
2167 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2168 
2169 		NFE_UNLOCK(sc);
2170 		if_input(ifp, m);
2171 		NFE_LOCK(sc);
2172 		rx_npkts++;
2173 	}
2174 
2175 	if (prog > 0)
2176 		bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2177 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2178 
2179 	if (rx_npktsp != NULL)
2180 		*rx_npktsp = rx_npkts;
2181 	return (count > 0 ? 0 : EAGAIN);
2182 }
2183 
2184 static int
2185 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2186 {
2187 	if_t ifp = sc->nfe_ifp;
2188 	struct nfe_desc32 *desc32;
2189 	struct nfe_desc64 *desc64;
2190 	struct nfe_rx_data *data;
2191 	struct mbuf *m;
2192 	uint16_t flags;
2193 	int len, prog, rx_npkts;
2194 	uint32_t vtag = 0;
2195 
2196 	rx_npkts = 0;
2197 	NFE_LOCK_ASSERT(sc);
2198 
2199 	bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2200 	    BUS_DMASYNC_POSTREAD);
2201 
2202 	for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2203 	    vtag = 0) {
2204 		if (count <= 0)
2205 			break;
2206 		count--;
2207 
2208 		data = &sc->jrxq.jdata[sc->jrxq.jcur];
2209 
2210 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2211 			desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2212 			vtag = le32toh(desc64->physaddr[1]);
2213 			flags = le16toh(desc64->flags);
2214 			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2215 		} else {
2216 			desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2217 			flags = le16toh(desc32->flags);
2218 			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2219 		}
2220 
2221 		if (flags & NFE_RX_READY)
2222 			break;
2223 		prog++;
2224 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2225 			if (!(flags & NFE_RX_VALID_V1)) {
2226 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2227 				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2228 				continue;
2229 			}
2230 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2231 				flags &= ~NFE_RX_ERROR;
2232 				len--;	/* fix buffer length */
2233 			}
2234 		} else {
2235 			if (!(flags & NFE_RX_VALID_V2)) {
2236 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2237 				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2238 				continue;
2239 			}
2240 
2241 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2242 				flags &= ~NFE_RX_ERROR;
2243 				len--;	/* fix buffer length */
2244 			}
2245 		}
2246 
2247 		if (flags & NFE_RX_ERROR) {
2248 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2249 			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2250 			continue;
2251 		}
2252 
2253 		m = data->m;
2254 		if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2255 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2256 			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2257 			continue;
2258 		}
2259 
2260 		if ((vtag & NFE_RX_VTAG) != 0 &&
2261 		    (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2262 			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2263 			m->m_flags |= M_VLANTAG;
2264 		}
2265 
2266 		m->m_pkthdr.len = m->m_len = len;
2267 		m->m_pkthdr.rcvif = ifp;
2268 
2269 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
2270 			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2271 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2272 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2273 				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2274 				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2275 					m->m_pkthdr.csum_flags |=
2276 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2277 					m->m_pkthdr.csum_data = 0xffff;
2278 				}
2279 			}
2280 		}
2281 
2282 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2283 
2284 		NFE_UNLOCK(sc);
2285 		if_input(ifp, m);
2286 		NFE_LOCK(sc);
2287 		rx_npkts++;
2288 	}
2289 
2290 	if (prog > 0)
2291 		bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2292 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2293 
2294 	if (rx_npktsp != NULL)
2295 		*rx_npktsp = rx_npkts;
2296 	return (count > 0 ? 0 : EAGAIN);
2297 }
2298 
2299 static void
2300 nfe_txeof(struct nfe_softc *sc)
2301 {
2302 	if_t ifp = sc->nfe_ifp;
2303 	struct nfe_desc32 *desc32;
2304 	struct nfe_desc64 *desc64;
2305 	struct nfe_tx_data *data = NULL;
2306 	uint16_t flags;
2307 	int cons, prog;
2308 
2309 	NFE_LOCK_ASSERT(sc);
2310 
2311 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2312 	    BUS_DMASYNC_POSTREAD);
2313 
2314 	prog = 0;
2315 	for (cons = sc->txq.next; cons != sc->txq.cur;
2316 	    NFE_INC(cons, NFE_TX_RING_COUNT)) {
2317 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2318 			desc64 = &sc->txq.desc64[cons];
2319 			flags = le16toh(desc64->flags);
2320 		} else {
2321 			desc32 = &sc->txq.desc32[cons];
2322 			flags = le16toh(desc32->flags);
2323 		}
2324 
2325 		if (flags & NFE_TX_VALID)
2326 			break;
2327 
2328 		prog++;
2329 		sc->txq.queued--;
2330 		data = &sc->txq.data[cons];
2331 
2332 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2333 			if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2334 				continue;
2335 			if ((flags & NFE_TX_ERROR_V1) != 0) {
2336 				device_printf(sc->nfe_dev,
2337 				    "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2338 
2339 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2340 			} else
2341 				if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2342 		} else {
2343 			if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2344 				continue;
2345 			if ((flags & NFE_TX_ERROR_V2) != 0) {
2346 				device_printf(sc->nfe_dev,
2347 				    "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2348 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2349 			} else
2350 				if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2351 		}
2352 
2353 		/* last fragment of the mbuf chain transmitted */
2354 		KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2355 		bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2356 		    BUS_DMASYNC_POSTWRITE);
2357 		bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2358 		m_freem(data->m);
2359 		data->m = NULL;
2360 	}
2361 
2362 	if (prog > 0) {
2363 		sc->nfe_force_tx = 0;
2364 		sc->txq.next = cons;
2365 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2366 		if (sc->txq.queued == 0)
2367 			sc->nfe_watchdog_timer = 0;
2368 	}
2369 }
2370 
2371 static int
2372 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2373 {
2374 	struct nfe_desc32 *desc32 = NULL;
2375 	struct nfe_desc64 *desc64 = NULL;
2376 	bus_dmamap_t map;
2377 	bus_dma_segment_t segs[NFE_MAX_SCATTER];
2378 	int error, i, nsegs, prod, si;
2379 	uint32_t tsosegsz;
2380 	uint16_t cflags, flags;
2381 	struct mbuf *m;
2382 
2383 	prod = si = sc->txq.cur;
2384 	map = sc->txq.data[prod].tx_data_map;
2385 
2386 	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2387 	    &nsegs, BUS_DMA_NOWAIT);
2388 	if (error == EFBIG) {
2389 		m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
2390 		if (m == NULL) {
2391 			m_freem(*m_head);
2392 			*m_head = NULL;
2393 			return (ENOBUFS);
2394 		}
2395 		*m_head = m;
2396 		error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2397 		    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2398 		if (error != 0) {
2399 			m_freem(*m_head);
2400 			*m_head = NULL;
2401 			return (ENOBUFS);
2402 		}
2403 	} else if (error != 0)
2404 		return (error);
2405 	if (nsegs == 0) {
2406 		m_freem(*m_head);
2407 		*m_head = NULL;
2408 		return (EIO);
2409 	}
2410 
2411 	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2412 		bus_dmamap_unload(sc->txq.tx_data_tag, map);
2413 		return (ENOBUFS);
2414 	}
2415 
2416 	m = *m_head;
2417 	cflags = flags = 0;
2418 	tsosegsz = 0;
2419 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2420 		tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2421 		    NFE_TX_TSO_SHIFT;
2422 		cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2423 		cflags |= NFE_TX_TSO;
2424 	} else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2425 		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2426 			cflags |= NFE_TX_IP_CSUM;
2427 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2428 			cflags |= NFE_TX_TCP_UDP_CSUM;
2429 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2430 			cflags |= NFE_TX_TCP_UDP_CSUM;
2431 	}
2432 
2433 	for (i = 0; i < nsegs; i++) {
2434 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2435 			desc64 = &sc->txq.desc64[prod];
2436 			desc64->physaddr[0] =
2437 			    htole32(NFE_ADDR_HI(segs[i].ds_addr));
2438 			desc64->physaddr[1] =
2439 			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2440 			desc64->vtag = 0;
2441 			desc64->length = htole16(segs[i].ds_len - 1);
2442 			desc64->flags = htole16(flags);
2443 		} else {
2444 			desc32 = &sc->txq.desc32[prod];
2445 			desc32->physaddr =
2446 			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2447 			desc32->length = htole16(segs[i].ds_len - 1);
2448 			desc32->flags = htole16(flags);
2449 		}
2450 
2451 		/*
2452 		 * Setting of the valid bit in the first descriptor is
2453 		 * deferred until the whole chain is fully setup.
2454 		 */
2455 		flags |= NFE_TX_VALID;
2456 
2457 		sc->txq.queued++;
2458 		NFE_INC(prod, NFE_TX_RING_COUNT);
2459 	}
2460 
2461 	/*
2462 	 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2463 	 * csum flags, vtag and TSO belong to the first fragment only.
2464 	 */
2465 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2466 		desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2467 		desc64 = &sc->txq.desc64[si];
2468 		if ((m->m_flags & M_VLANTAG) != 0)
2469 			desc64->vtag = htole32(NFE_TX_VTAG |
2470 			    m->m_pkthdr.ether_vtag);
2471 		if (tsosegsz != 0) {
2472 			/*
2473 			 * XXX
2474 			 * The following indicates the descriptor element
2475 			 * is a 32bit quantity.
2476 			 */
2477 			desc64->length |= htole16((uint16_t)tsosegsz);
2478 			desc64->flags |= htole16(tsosegsz >> 16);
2479 		}
2480 		/*
2481 		 * finally, set the valid/checksum/TSO bit in the first
2482 		 * descriptor.
2483 		 */
2484 		desc64->flags |= htole16(NFE_TX_VALID | cflags);
2485 	} else {
2486 		if (sc->nfe_flags & NFE_JUMBO_SUP)
2487 			desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2488 		else
2489 			desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2490 		desc32 = &sc->txq.desc32[si];
2491 		if (tsosegsz != 0) {
2492 			/*
2493 			 * XXX
2494 			 * The following indicates the descriptor element
2495 			 * is a 32bit quantity.
2496 			 */
2497 			desc32->length |= htole16((uint16_t)tsosegsz);
2498 			desc32->flags |= htole16(tsosegsz >> 16);
2499 		}
2500 		/*
2501 		 * finally, set the valid/checksum/TSO bit in the first
2502 		 * descriptor.
2503 		 */
2504 		desc32->flags |= htole16(NFE_TX_VALID | cflags);
2505 	}
2506 
2507 	sc->txq.cur = prod;
2508 	prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2509 	sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2510 	sc->txq.data[prod].tx_data_map = map;
2511 	sc->txq.data[prod].m = m;
2512 
2513 	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2514 
2515 	return (0);
2516 }
2517 
2518 struct nfe_hash_maddr_ctx {
2519 	uint8_t addr[ETHER_ADDR_LEN];
2520 	uint8_t mask[ETHER_ADDR_LEN];
2521 };
2522 
2523 static u_int
2524 nfe_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2525 {
2526 	struct nfe_hash_maddr_ctx *ctx = arg;
2527 	uint8_t *addrp, mcaddr;
2528 	int j;
2529 
2530 	addrp = LLADDR(sdl);
2531 	for (j = 0; j < ETHER_ADDR_LEN; j++) {
2532 		mcaddr = addrp[j];
2533 		ctx->addr[j] &= mcaddr;
2534 		ctx->mask[j] &= ~mcaddr;
2535 	}
2536 
2537 	return (1);
2538 }
2539 
2540 static void
2541 nfe_setmulti(struct nfe_softc *sc)
2542 {
2543 	if_t ifp = sc->nfe_ifp;
2544 	struct nfe_hash_maddr_ctx ctx;
2545 	uint32_t filter;
2546 	uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2547 		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2548 	};
2549 	int i;
2550 
2551 	NFE_LOCK_ASSERT(sc);
2552 
2553 	if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2554 		bzero(ctx.addr, ETHER_ADDR_LEN);
2555 		bzero(ctx.mask, ETHER_ADDR_LEN);
2556 		goto done;
2557 	}
2558 
2559 	bcopy(etherbroadcastaddr, ctx.addr, ETHER_ADDR_LEN);
2560 	bcopy(etherbroadcastaddr, ctx.mask, ETHER_ADDR_LEN);
2561 
2562 	if_foreach_llmaddr(ifp, nfe_hash_maddr, &ctx);
2563 
2564 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
2565 		ctx.mask[i] |= ctx.addr[i];
2566 	}
2567 
2568 done:
2569 	ctx.addr[0] |= 0x01;	/* make sure multicast bit is set */
2570 
2571 	NFE_WRITE(sc, NFE_MULTIADDR_HI, ctx.addr[3] << 24 | ctx.addr[2] << 16 |
2572 	    ctx.addr[1] << 8 | ctx.addr[0]);
2573 	NFE_WRITE(sc, NFE_MULTIADDR_LO,
2574 	    ctx.addr[5] <<  8 | ctx.addr[4]);
2575 	NFE_WRITE(sc, NFE_MULTIMASK_HI, ctx.mask[3] << 24 | ctx.mask[2] << 16 |
2576 	    ctx.mask[1] << 8 | ctx.mask[0]);
2577 	NFE_WRITE(sc, NFE_MULTIMASK_LO,
2578 	    ctx.mask[5] <<  8 | ctx.mask[4]);
2579 
2580 	filter = NFE_READ(sc, NFE_RXFILTER);
2581 	filter &= NFE_PFF_RX_PAUSE;
2582 	filter |= NFE_RXFILTER_MAGIC;
2583 	filter |= (if_getflags(ifp) & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2584 	NFE_WRITE(sc, NFE_RXFILTER, filter);
2585 }
2586 
2587 static void
2588 nfe_start(if_t ifp)
2589 {
2590 	struct nfe_softc *sc = if_getsoftc(ifp);
2591 
2592 	NFE_LOCK(sc);
2593 	nfe_start_locked(ifp);
2594 	NFE_UNLOCK(sc);
2595 }
2596 
2597 static void
2598 nfe_start_locked(if_t ifp)
2599 {
2600 	struct nfe_softc *sc = if_getsoftc(ifp);
2601 	struct mbuf *m0;
2602 	int enq = 0;
2603 
2604 	NFE_LOCK_ASSERT(sc);
2605 
2606 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2607 	    IFF_DRV_RUNNING || sc->nfe_link == 0)
2608 		return;
2609 
2610 	while (!if_sendq_empty(ifp)) {
2611 		m0 = if_dequeue(ifp);
2612 
2613 		if (m0 == NULL)
2614 			break;
2615 
2616 		if (nfe_encap(sc, &m0) != 0) {
2617 			if (m0 == NULL)
2618 				break;
2619 			if_sendq_prepend(ifp, m0);
2620 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
2621 			break;
2622 		}
2623 		enq++;
2624 		if_etherbpfmtap(ifp, m0);
2625 	}
2626 
2627 	if (enq > 0) {
2628 		bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2629 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2630 
2631 		/* kick Tx */
2632 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2633 
2634 		/*
2635 		 * Set a timeout in case the chip goes out to lunch.
2636 		 */
2637 		sc->nfe_watchdog_timer = 5;
2638 	}
2639 }
2640 
2641 static void
2642 nfe_watchdog(if_t ifp)
2643 {
2644 	struct nfe_softc *sc = if_getsoftc(ifp);
2645 
2646 	if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2647 		return;
2648 
2649 	/* Check if we've lost Tx completion interrupt. */
2650 	nfe_txeof(sc);
2651 	if (sc->txq.queued == 0) {
2652 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2653 		    "-- recovering\n");
2654 		if (!if_sendq_empty(ifp))
2655 			nfe_start_locked(ifp);
2656 		return;
2657 	}
2658 	/* Check if we've lost start Tx command. */
2659 	sc->nfe_force_tx++;
2660 	if (sc->nfe_force_tx <= 3) {
2661 		/*
2662 		 * If this is the case for watchdog timeout, the following
2663 		 * code should go to nfe_txeof().
2664 		 */
2665 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2666 		return;
2667 	}
2668 	sc->nfe_force_tx = 0;
2669 
2670 	if_printf(ifp, "watchdog timeout\n");
2671 
2672 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2673 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2674 	nfe_init_locked(sc);
2675 }
2676 
2677 static void
2678 nfe_init(void *xsc)
2679 {
2680 	struct nfe_softc *sc = xsc;
2681 
2682 	NFE_LOCK(sc);
2683 	nfe_init_locked(sc);
2684 	NFE_UNLOCK(sc);
2685 }
2686 
2687 static void
2688 nfe_init_locked(void *xsc)
2689 {
2690 	struct nfe_softc *sc = xsc;
2691 	if_t ifp = sc->nfe_ifp;
2692 	struct mii_data *mii;
2693 	uint32_t val;
2694 	int error;
2695 
2696 	NFE_LOCK_ASSERT(sc);
2697 
2698 	mii = device_get_softc(sc->nfe_miibus);
2699 
2700 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2701 		return;
2702 
2703 	nfe_stop(ifp);
2704 
2705 	sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
2706 
2707 	nfe_init_tx_ring(sc, &sc->txq);
2708 	if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2709 		error = nfe_init_jrx_ring(sc, &sc->jrxq);
2710 	else
2711 		error = nfe_init_rx_ring(sc, &sc->rxq);
2712 	if (error != 0) {
2713 		device_printf(sc->nfe_dev,
2714 		    "initialization failed: no memory for rx buffers\n");
2715 		nfe_stop(ifp);
2716 		return;
2717 	}
2718 
2719 	val = 0;
2720 	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2721 		val |= NFE_MAC_ADDR_INORDER;
2722 	NFE_WRITE(sc, NFE_TX_UNK, val);
2723 	NFE_WRITE(sc, NFE_STATUS, 0);
2724 
2725 	if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2726 		NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2727 
2728 	sc->rxtxctl = NFE_RXTX_BIT2;
2729 	if (sc->nfe_flags & NFE_40BIT_ADDR)
2730 		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2731 	else if (sc->nfe_flags & NFE_JUMBO_SUP)
2732 		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2733 
2734 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2735 		sc->rxtxctl |= NFE_RXTX_RXCSUM;
2736 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2737 		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2738 
2739 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2740 	DELAY(10);
2741 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2742 
2743 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2744 		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2745 	else
2746 		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2747 
2748 	NFE_WRITE(sc, NFE_SETUP_R6, 0);
2749 
2750 	/* set MAC address */
2751 	nfe_set_macaddr(sc, if_getlladdr(ifp));
2752 
2753 	/* tell MAC where rings are in memory */
2754 	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2755 		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2756 		    NFE_ADDR_HI(sc->jrxq.jphysaddr));
2757 		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2758 		    NFE_ADDR_LO(sc->jrxq.jphysaddr));
2759 	} else {
2760 		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2761 		    NFE_ADDR_HI(sc->rxq.physaddr));
2762 		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2763 		    NFE_ADDR_LO(sc->rxq.physaddr));
2764 	}
2765 	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2766 	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2767 
2768 	NFE_WRITE(sc, NFE_RING_SIZE,
2769 	    (NFE_RX_RING_COUNT - 1) << 16 |
2770 	    (NFE_TX_RING_COUNT - 1));
2771 
2772 	NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2773 
2774 	/* force MAC to wakeup */
2775 	val = NFE_READ(sc, NFE_PWR_STATE);
2776 	if ((val & NFE_PWR_WAKEUP) == 0)
2777 		NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2778 	DELAY(10);
2779 	val = NFE_READ(sc, NFE_PWR_STATE);
2780 	NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2781 
2782 #if 1
2783 	/* configure interrupts coalescing/mitigation */
2784 	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2785 #else
2786 	/* no interrupt mitigation: one interrupt per packet */
2787 	NFE_WRITE(sc, NFE_IMTIMER, 970);
2788 #endif
2789 
2790 	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2791 	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2792 	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2793 
2794 	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2795 	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2796 
2797 	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2798 	/* Disable WOL. */
2799 	NFE_WRITE(sc, NFE_WOL_CTL, 0);
2800 
2801 	sc->rxtxctl &= ~NFE_RXTX_BIT2;
2802 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2803 	DELAY(10);
2804 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2805 
2806 	/* set Rx filter */
2807 	nfe_setmulti(sc);
2808 
2809 	/* enable Rx */
2810 	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2811 
2812 	/* enable Tx */
2813 	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2814 
2815 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2816 
2817 	/* Clear hardware stats. */
2818 	nfe_stats_clear(sc);
2819 
2820 #ifdef DEVICE_POLLING
2821 	if (if_getcapenable(ifp) & IFCAP_POLLING)
2822 		nfe_disable_intr(sc);
2823 	else
2824 #endif
2825 	nfe_set_intr(sc);
2826 	nfe_enable_intr(sc); /* enable interrupts */
2827 
2828 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2829 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2830 
2831 	sc->nfe_link = 0;
2832 	mii_mediachg(mii);
2833 
2834 	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2835 }
2836 
2837 static void
2838 nfe_stop(if_t ifp)
2839 {
2840 	struct nfe_softc *sc = if_getsoftc(ifp);
2841 	struct nfe_rx_ring *rx_ring;
2842 	struct nfe_jrx_ring *jrx_ring;
2843 	struct nfe_tx_ring *tx_ring;
2844 	struct nfe_rx_data *rdata;
2845 	struct nfe_tx_data *tdata;
2846 	int i;
2847 
2848 	NFE_LOCK_ASSERT(sc);
2849 
2850 	sc->nfe_watchdog_timer = 0;
2851 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2852 
2853 	callout_stop(&sc->nfe_stat_ch);
2854 
2855 	/* abort Tx */
2856 	NFE_WRITE(sc, NFE_TX_CTL, 0);
2857 
2858 	/* disable Rx */
2859 	NFE_WRITE(sc, NFE_RX_CTL, 0);
2860 
2861 	/* disable interrupts */
2862 	nfe_disable_intr(sc);
2863 
2864 	sc->nfe_link = 0;
2865 
2866 	/* free Rx and Tx mbufs still in the queues. */
2867 	rx_ring = &sc->rxq;
2868 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2869 		rdata = &rx_ring->data[i];
2870 		if (rdata->m != NULL) {
2871 			bus_dmamap_sync(rx_ring->rx_data_tag,
2872 			    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2873 			bus_dmamap_unload(rx_ring->rx_data_tag,
2874 			    rdata->rx_data_map);
2875 			m_freem(rdata->m);
2876 			rdata->m = NULL;
2877 		}
2878 	}
2879 
2880 	if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2881 		jrx_ring = &sc->jrxq;
2882 		for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2883 			rdata = &jrx_ring->jdata[i];
2884 			if (rdata->m != NULL) {
2885 				bus_dmamap_sync(jrx_ring->jrx_data_tag,
2886 				    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2887 				bus_dmamap_unload(jrx_ring->jrx_data_tag,
2888 				    rdata->rx_data_map);
2889 				m_freem(rdata->m);
2890 				rdata->m = NULL;
2891 			}
2892 		}
2893 	}
2894 
2895 	tx_ring = &sc->txq;
2896 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2897 		tdata = &tx_ring->data[i];
2898 		if (tdata->m != NULL) {
2899 			bus_dmamap_sync(tx_ring->tx_data_tag,
2900 			    tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2901 			bus_dmamap_unload(tx_ring->tx_data_tag,
2902 			    tdata->tx_data_map);
2903 			m_freem(tdata->m);
2904 			tdata->m = NULL;
2905 		}
2906 	}
2907 	/* Update hardware stats. */
2908 	nfe_stats_update(sc);
2909 }
2910 
2911 static int
2912 nfe_ifmedia_upd(if_t ifp)
2913 {
2914 	struct nfe_softc *sc = if_getsoftc(ifp);
2915 	struct mii_data *mii;
2916 
2917 	NFE_LOCK(sc);
2918 	mii = device_get_softc(sc->nfe_miibus);
2919 	mii_mediachg(mii);
2920 	NFE_UNLOCK(sc);
2921 
2922 	return (0);
2923 }
2924 
2925 static void
2926 nfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
2927 {
2928 	struct nfe_softc *sc;
2929 	struct mii_data *mii;
2930 
2931 	sc = if_getsoftc(ifp);
2932 
2933 	NFE_LOCK(sc);
2934 	mii = device_get_softc(sc->nfe_miibus);
2935 	mii_pollstat(mii);
2936 
2937 	ifmr->ifm_active = mii->mii_media_active;
2938 	ifmr->ifm_status = mii->mii_media_status;
2939 	NFE_UNLOCK(sc);
2940 }
2941 
2942 void
2943 nfe_tick(void *xsc)
2944 {
2945 	struct nfe_softc *sc;
2946 	struct mii_data *mii;
2947 	if_t ifp;
2948 
2949 	sc = (struct nfe_softc *)xsc;
2950 
2951 	NFE_LOCK_ASSERT(sc);
2952 
2953 	ifp = sc->nfe_ifp;
2954 
2955 	mii = device_get_softc(sc->nfe_miibus);
2956 	mii_tick(mii);
2957 	nfe_stats_update(sc);
2958 	nfe_watchdog(ifp);
2959 	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2960 }
2961 
2962 static int
2963 nfe_shutdown(device_t dev)
2964 {
2965 
2966 	return (nfe_suspend(dev));
2967 }
2968 
2969 static void
2970 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2971 {
2972 	uint32_t val;
2973 
2974 	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2975 		val = NFE_READ(sc, NFE_MACADDR_LO);
2976 		addr[0] = (val >> 8) & 0xff;
2977 		addr[1] = (val & 0xff);
2978 
2979 		val = NFE_READ(sc, NFE_MACADDR_HI);
2980 		addr[2] = (val >> 24) & 0xff;
2981 		addr[3] = (val >> 16) & 0xff;
2982 		addr[4] = (val >>  8) & 0xff;
2983 		addr[5] = (val & 0xff);
2984 	} else {
2985 		val = NFE_READ(sc, NFE_MACADDR_LO);
2986 		addr[5] = (val >> 8) & 0xff;
2987 		addr[4] = (val & 0xff);
2988 
2989 		val = NFE_READ(sc, NFE_MACADDR_HI);
2990 		addr[3] = (val >> 24) & 0xff;
2991 		addr[2] = (val >> 16) & 0xff;
2992 		addr[1] = (val >>  8) & 0xff;
2993 		addr[0] = (val & 0xff);
2994 	}
2995 }
2996 
2997 static void
2998 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
2999 {
3000 
3001 	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
3002 	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3003 	    addr[1] << 8 | addr[0]);
3004 }
3005 
3006 /*
3007  * Map a single buffer address.
3008  */
3009 
3010 static void
3011 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3012 {
3013 	struct nfe_dmamap_arg *ctx;
3014 
3015 	if (error != 0)
3016 		return;
3017 
3018 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3019 
3020 	ctx = (struct nfe_dmamap_arg *)arg;
3021 	ctx->nfe_busaddr = segs[0].ds_addr;
3022 }
3023 
3024 static int
3025 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3026 {
3027 	int error, value;
3028 
3029 	if (!arg1)
3030 		return (EINVAL);
3031 	value = *(int *)arg1;
3032 	error = sysctl_handle_int(oidp, &value, 0, req);
3033 	if (error || !req->newptr)
3034 		return (error);
3035 	if (value < low || value > high)
3036 		return (EINVAL);
3037 	*(int *)arg1 = value;
3038 
3039 	return (0);
3040 }
3041 
3042 static int
3043 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3044 {
3045 
3046 	return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3047 	    NFE_PROC_MAX));
3048 }
3049 
3050 #define	NFE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
3051 	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3052 #define	NFE_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
3053 	    SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3054 
3055 static void
3056 nfe_sysctl_node(struct nfe_softc *sc)
3057 {
3058 	struct sysctl_ctx_list *ctx;
3059 	struct sysctl_oid_list *child, *parent;
3060 	struct sysctl_oid *tree;
3061 	struct nfe_hw_stats *stats;
3062 	int error;
3063 
3064 	stats = &sc->nfe_stats;
3065 	ctx = device_get_sysctl_ctx(sc->nfe_dev);
3066 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3067 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
3068 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3069 	    &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3070 	    "max number of Rx events to process");
3071 
3072 	sc->nfe_process_limit = NFE_PROC_DEFAULT;
3073 	error = resource_int_value(device_get_name(sc->nfe_dev),
3074 	    device_get_unit(sc->nfe_dev), "process_limit",
3075 	    &sc->nfe_process_limit);
3076 	if (error == 0) {
3077 		if (sc->nfe_process_limit < NFE_PROC_MIN ||
3078 		    sc->nfe_process_limit > NFE_PROC_MAX) {
3079 			device_printf(sc->nfe_dev,
3080 			    "process_limit value out of range; "
3081 			    "using default: %d\n", NFE_PROC_DEFAULT);
3082 			sc->nfe_process_limit = NFE_PROC_DEFAULT;
3083 		}
3084 	}
3085 
3086 	if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3087 		return;
3088 
3089 	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
3090 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NFE statistics");
3091 	parent = SYSCTL_CHILDREN(tree);
3092 
3093 	/* Rx statistics. */
3094 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
3095 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
3096 	child = SYSCTL_CHILDREN(tree);
3097 
3098 	NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3099 	    &stats->rx_frame_errors, "Framing Errors");
3100 	NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3101 	    &stats->rx_extra_bytes, "Extra Bytes");
3102 	NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3103 	    &stats->rx_late_cols, "Late Collisions");
3104 	NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3105 	    &stats->rx_runts, "Runts");
3106 	NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3107 	    &stats->rx_jumbos, "Jumbos");
3108 	NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3109 	    &stats->rx_fifo_overuns, "FIFO Overruns");
3110 	NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3111 	    &stats->rx_crc_errors, "CRC Errors");
3112 	NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3113 	    &stats->rx_fae, "Frame Alignment Errors");
3114 	NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3115 	    &stats->rx_len_errors, "Length Errors");
3116 	NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3117 	    &stats->rx_unicast, "Unicast Frames");
3118 	NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3119 	    &stats->rx_multicast, "Multicast Frames");
3120 	NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3121 	    &stats->rx_broadcast, "Broadcast Frames");
3122 	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3123 		NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3124 		    &stats->rx_octets, "Octets");
3125 		NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3126 		    &stats->rx_pause, "Pause frames");
3127 		NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3128 		    &stats->rx_drops, "Drop frames");
3129 	}
3130 
3131 	/* Tx statistics. */
3132 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
3133 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
3134 	child = SYSCTL_CHILDREN(tree);
3135 	NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3136 	    &stats->tx_octets, "Octets");
3137 	NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3138 	    &stats->tx_zero_rexmits, "Zero Retransmits");
3139 	NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3140 	    &stats->tx_one_rexmits, "One Retransmits");
3141 	NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3142 	    &stats->tx_multi_rexmits, "Multiple Retransmits");
3143 	NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3144 	    &stats->tx_late_cols, "Late Collisions");
3145 	NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3146 	    &stats->tx_fifo_underuns, "FIFO Underruns");
3147 	NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3148 	    &stats->tx_carrier_losts, "Carrier Losts");
3149 	NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3150 	    &stats->tx_excess_deferals, "Excess Deferrals");
3151 	NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3152 	    &stats->tx_retry_errors, "Retry Errors");
3153 	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3154 		NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3155 		    &stats->tx_deferals, "Deferrals");
3156 		NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3157 		    &stats->tx_frames, "Frames");
3158 		NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3159 		    &stats->tx_pause, "Pause Frames");
3160 	}
3161 	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3162 		NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3163 		    &stats->tx_deferals, "Unicast Frames");
3164 		NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3165 		    &stats->tx_frames, "Multicast Frames");
3166 		NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3167 		    &stats->tx_pause, "Broadcast Frames");
3168 	}
3169 }
3170 
3171 #undef NFE_SYSCTL_STAT_ADD32
3172 #undef NFE_SYSCTL_STAT_ADD64
3173 
3174 static void
3175 nfe_stats_clear(struct nfe_softc *sc)
3176 {
3177 	int i, mib_cnt;
3178 
3179 	if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3180 		mib_cnt = NFE_NUM_MIB_STATV1;
3181 	else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3182 		mib_cnt = NFE_NUM_MIB_STATV2;
3183 	else
3184 		return;
3185 
3186 	for (i = 0; i < mib_cnt; i++)
3187 		NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t));
3188 
3189 	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3190 		NFE_READ(sc, NFE_TX_UNICAST);
3191 		NFE_READ(sc, NFE_TX_MULTICAST);
3192 		NFE_READ(sc, NFE_TX_BROADCAST);
3193 	}
3194 }
3195 
3196 static void
3197 nfe_stats_update(struct nfe_softc *sc)
3198 {
3199 	struct nfe_hw_stats *stats;
3200 
3201 	NFE_LOCK_ASSERT(sc);
3202 
3203 	if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3204 		return;
3205 
3206 	stats = &sc->nfe_stats;
3207 	stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3208 	stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3209 	stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3210 	stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3211 	stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3212 	stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3213 	stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3214 	stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3215 	stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3216 	stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3217 	stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3218 	stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3219 	stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3220 	stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3221 	stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3222 	stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3223 	stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3224 	stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3225 	stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3226 	stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3227 	stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3228 
3229 	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3230 		stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3231 		stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3232 		stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3233 		stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3234 		stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3235 		stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3236 	}
3237 
3238 	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3239 		stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3240 		stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3241 		stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3242 	}
3243 }
3244 
3245 static void
3246 nfe_set_linkspeed(struct nfe_softc *sc)
3247 {
3248 	struct mii_softc *miisc;
3249 	struct mii_data *mii;
3250 	int aneg, i, phyno;
3251 
3252 	NFE_LOCK_ASSERT(sc);
3253 
3254 	mii = device_get_softc(sc->nfe_miibus);
3255 	mii_pollstat(mii);
3256 	aneg = 0;
3257 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3258 	    (IFM_ACTIVE | IFM_AVALID)) {
3259 		switch IFM_SUBTYPE(mii->mii_media_active) {
3260 		case IFM_10_T:
3261 		case IFM_100_TX:
3262 			return;
3263 		case IFM_1000_T:
3264 			aneg++;
3265 			break;
3266 		default:
3267 			break;
3268 		}
3269 	}
3270 	miisc = LIST_FIRST(&mii->mii_phys);
3271 	phyno = miisc->mii_phy;
3272 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3273 		PHY_RESET(miisc);
3274 	nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3275 	nfe_miibus_writereg(sc->nfe_dev, phyno,
3276 	    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3277 	nfe_miibus_writereg(sc->nfe_dev, phyno,
3278 	    MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3279 	DELAY(1000);
3280 	if (aneg != 0) {
3281 		/*
3282 		 * Poll link state until nfe(4) get a 10/100Mbps link.
3283 		 */
3284 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3285 			mii_pollstat(mii);
3286 			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3287 			    == (IFM_ACTIVE | IFM_AVALID)) {
3288 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
3289 				case IFM_10_T:
3290 				case IFM_100_TX:
3291 					nfe_mac_config(sc, mii);
3292 					return;
3293 				default:
3294 					break;
3295 				}
3296 			}
3297 			NFE_UNLOCK(sc);
3298 			pause("nfelnk", hz);
3299 			NFE_LOCK(sc);
3300 		}
3301 		if (i == MII_ANEGTICKS_GIGE)
3302 			device_printf(sc->nfe_dev,
3303 			    "establishing a link failed, WOL may not work!");
3304 	}
3305 	/*
3306 	 * No link, force MAC to have 100Mbps, full-duplex link.
3307 	 * This is the last resort and may/may not work.
3308 	 */
3309 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3310 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3311 	nfe_mac_config(sc, mii);
3312 }
3313 
3314 static void
3315 nfe_set_wol(struct nfe_softc *sc)
3316 {
3317 	if_t ifp;
3318 	uint32_t wolctl;
3319 	int pmc;
3320 	uint16_t pmstat;
3321 
3322 	NFE_LOCK_ASSERT(sc);
3323 
3324 	if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3325 		return;
3326 	ifp = sc->nfe_ifp;
3327 	if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
3328 		wolctl = NFE_WOL_MAGIC;
3329 	else
3330 		wolctl = 0;
3331 	NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3332 	if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
3333 		nfe_set_linkspeed(sc);
3334 		if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3335 			NFE_WRITE(sc, NFE_PWR2_CTL,
3336 			    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3337 		/* Enable RX. */
3338 		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3339 		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3340 		NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3341 		    NFE_RX_START);
3342 	}
3343 	/* Request PME if WOL is requested. */
3344 	pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3345 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3346 	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
3347 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3348 	pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3349 }
3350