xref: /openbsd/sys/dev/pci/if_nfe.c (revision 8529ddd3)
1 /*	$OpenBSD: if_nfe.c,v 1.110 2015/03/20 18:42:25 mpi Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
5  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
21 
22 #include "bpfilter.h"
23 #include "vlan.h"
24 
25 #include <sys/param.h>
26 #include <sys/endian.h>
27 #include <sys/systm.h>
28 #include <sys/types.h>
29 #include <sys/sockio.h>
30 #include <sys/mbuf.h>
31 #include <sys/queue.h>
32 #include <sys/kernel.h>
33 #include <sys/device.h>
34 #include <sys/timeout.h>
35 #include <sys/socket.h>
36 
37 #include <machine/bus.h>
38 
39 #include <net/if.h>
40 #include <net/if_dl.h>
41 #include <net/if_media.h>
42 
43 #include <netinet/in.h>
44 #include <netinet/if_ether.h>
45 
46 #if NVLAN > 0
47 #include <net/if_types.h>
48 #include <net/if_vlan_var.h>
49 #endif
50 
51 #if NBPFILTER > 0
52 #include <net/bpf.h>
53 #endif
54 
55 #include <dev/mii/miivar.h>
56 
57 #include <dev/pci/pcireg.h>
58 #include <dev/pci/pcivar.h>
59 #include <dev/pci/pcidevs.h>
60 
61 #include <dev/pci/if_nfereg.h>
62 #include <dev/pci/if_nfevar.h>
63 
64 int	nfe_match(struct device *, void *, void *);
65 void	nfe_attach(struct device *, struct device *, void *);
66 int	nfe_activate(struct device *, int);
67 void	nfe_miibus_statchg(struct device *);
68 int	nfe_miibus_readreg(struct device *, int, int);
69 void	nfe_miibus_writereg(struct device *, int, int, int);
70 int	nfe_intr(void *);
71 int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
72 void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
73 void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
74 void	nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
75 void	nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
76 void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
77 void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
78 void	nfe_rxeof(struct nfe_softc *);
79 void	nfe_txeof(struct nfe_softc *);
80 int	nfe_encap(struct nfe_softc *, struct mbuf *);
81 void	nfe_start(struct ifnet *);
82 void	nfe_watchdog(struct ifnet *);
83 int	nfe_init(struct ifnet *);
84 void	nfe_stop(struct ifnet *, int);
85 int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
86 void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
87 void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
88 int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
89 void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
90 void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
91 int	nfe_ifmedia_upd(struct ifnet *);
92 void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
93 void	nfe_iff(struct nfe_softc *);
94 void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
95 void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
96 void	nfe_tick(void *);
97 #ifndef SMALL_KERNEL
98 int	nfe_wol(struct ifnet*, int);
99 #endif
100 
101 struct cfattach nfe_ca = {
102 	sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL,
103 	nfe_activate
104 };
105 
106 struct cfdriver nfe_cd = {
107 	NULL, "nfe", DV_IFNET
108 };
109 
110 #ifdef NFE_DEBUG
111 int nfedebug = 0;
112 #define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
113 #define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
114 #else
115 #define DPRINTF(x)
116 #define DPRINTFN(n,x)
117 #endif
118 
119 const struct pci_matchid nfe_devices[] = {
120 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
121 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
122 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
123 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
124 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
125 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
126 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
127 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
128 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
129 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
130 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
131 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
132 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
133 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
134 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
135 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
136 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
137 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
138 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
139 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
140 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
141 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
142 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 },
143 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 },
144 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 },
145 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 },
146 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 },
147 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 },
148 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 },
149 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 },
150 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 },
151 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 },
152 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 },
153 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 },
154 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 },
155 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 },
156 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 },
157 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 },
158 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 },
159 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN }
160 };
161 
162 int
163 nfe_match(struct device *dev, void *match, void *aux)
164 {
165 	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
166 	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
167 }
168 
169 int
170 nfe_activate(struct device *self, int act)
171 {
172 	struct nfe_softc *sc = (struct nfe_softc *)self;
173 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
174 	int rv = 0;
175 
176 	switch (act) {
177 	case DVACT_SUSPEND:
178 		if (ifp->if_flags & IFF_RUNNING)
179 			nfe_stop(ifp, 0);
180 		rv = config_activate_children(self, act);
181 		break;
182 	case DVACT_RESUME:
183 		if (ifp->if_flags & IFF_UP)
184 			nfe_init(ifp);
185 		break;
186 	default:
187 		rv = config_activate_children(self, act);
188 		break;
189 	}
190 	return (rv);
191 }
192 
193 
194 void
195 nfe_attach(struct device *parent, struct device *self, void *aux)
196 {
197 	struct nfe_softc *sc = (struct nfe_softc *)self;
198 	struct pci_attach_args *pa = aux;
199 	pci_chipset_tag_t pc = pa->pa_pc;
200 	pci_intr_handle_t ih;
201 	const char *intrstr;
202 	struct ifnet *ifp;
203 	bus_size_t memsize;
204 	pcireg_t memtype;
205 
206 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
207 	if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
208 	    &sc->sc_memh, NULL, &memsize, 0)) {
209 		printf(": can't map mem space\n");
210 		return;
211 	}
212 
213 	if (pci_intr_map(pa, &ih) != 0) {
214 		printf(": can't map interrupt\n");
215 		return;
216 	}
217 
218 	intrstr = pci_intr_string(pc, ih);
219 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
220 	    sc->sc_dev.dv_xname);
221 	if (sc->sc_ih == NULL) {
222 		printf(": could not establish interrupt");
223 		if (intrstr != NULL)
224 			printf(" at %s", intrstr);
225 		printf("\n");
226 		return;
227 	}
228 	printf(": %s", intrstr);
229 
230 	sc->sc_dmat = pa->pa_dmat;
231 	sc->sc_flags = 0;
232 
233 	switch (PCI_PRODUCT(pa->pa_id)) {
234 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
235 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
236 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
237 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
238 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
239 		break;
240 	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
241 	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
242 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
243 		break;
244 	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
245 	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
246 	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
247 	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
248 	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
249 	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
250 	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
251 	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
252 	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
253 	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
254 	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
255 	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
256 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR |
257 		    NFE_PWR_MGMT;
258 		break;
259 	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
260 	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
261 	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
262 	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
263 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM |
264 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
265 		break;
266 	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
267 	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
268 	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
269 	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
270 	case PCI_PRODUCT_NVIDIA_MCP89_LAN:
271 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
272 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
273 		break;
274 	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
275 	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
276 	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
277 	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
278 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
279 		break;
280 	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
281 	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
282 	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
283 	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
284 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
285 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
286 		break;
287 	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
288 	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
289 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
290 		    NFE_HW_VLAN | NFE_PWR_MGMT;
291 		break;
292 	}
293 
294 	if (sc->sc_flags & NFE_PWR_MGMT) {
295 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
296 		NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
297 		DELAY(100);
298 		NFE_WRITE(sc, NFE_MAC_RESET, 0);
299 		DELAY(100);
300 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
301 		NFE_WRITE(sc, NFE_PWR2_CTL,
302 		    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK);
303 	}
304 
305 	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
306 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
307 
308 	/*
309 	 * Allocate Tx and Rx rings.
310 	 */
311 	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
312 		printf("%s: could not allocate Tx ring\n",
313 		    sc->sc_dev.dv_xname);
314 		return;
315 	}
316 
317 	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
318 		printf("%s: could not allocate Rx ring\n",
319 		    sc->sc_dev.dv_xname);
320 		nfe_free_tx_ring(sc, &sc->txq);
321 		return;
322 	}
323 
324 	ifp = &sc->sc_arpcom.ac_if;
325 	ifp->if_softc = sc;
326 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
327 	ifp->if_ioctl = nfe_ioctl;
328 	ifp->if_start = nfe_start;
329 	ifp->if_watchdog = nfe_watchdog;
330 	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
331 	IFQ_SET_READY(&ifp->if_snd);
332 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
333 
334 	ifp->if_capabilities = IFCAP_VLAN_MTU;
335 
336 #ifndef SMALL_KERNEL
337 	ifp->if_capabilities |= IFCAP_WOL;
338 	ifp->if_wol = nfe_wol;
339 	nfe_wol(ifp, 0);
340 #endif
341 
342 #if NVLAN > 0
343 	if (sc->sc_flags & NFE_HW_VLAN)
344 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
345 #endif
346 
347 	if (sc->sc_flags & NFE_HW_CSUM) {
348 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
349 		    IFCAP_CSUM_UDPv4;
350 	}
351 
352 	sc->sc_mii.mii_ifp = ifp;
353 	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
354 	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
355 	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
356 
357 	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
358 	    nfe_ifmedia_sts);
359 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0);
360 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
361 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
362 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
363 		    0, NULL);
364 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
365 	} else
366 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
367 
368 	if_attach(ifp);
369 	ether_ifattach(ifp);
370 
371 	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
372 }
373 
374 void
375 nfe_miibus_statchg(struct device *dev)
376 {
377 	struct nfe_softc *sc = (struct nfe_softc *)dev;
378 	struct mii_data *mii = &sc->sc_mii;
379 	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
380 
381 	phy = NFE_READ(sc, NFE_PHY_IFACE);
382 	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
383 
384 	seed = NFE_READ(sc, NFE_RNDSEED);
385 	seed &= ~NFE_SEED_MASK;
386 
387 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
388 		phy  |= NFE_PHY_HDX;	/* half-duplex */
389 		misc |= NFE_MISC1_HDX;
390 	}
391 
392 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
393 	case IFM_1000_T:	/* full-duplex only */
394 		link |= NFE_MEDIA_1000T;
395 		seed |= NFE_SEED_1000T;
396 		phy  |= NFE_PHY_1000T;
397 		break;
398 	case IFM_100_TX:
399 		link |= NFE_MEDIA_100TX;
400 		seed |= NFE_SEED_100TX;
401 		phy  |= NFE_PHY_100TX;
402 		break;
403 	case IFM_10_T:
404 		link |= NFE_MEDIA_10T;
405 		seed |= NFE_SEED_10T;
406 		break;
407 	}
408 
409 	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
410 
411 	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
412 	NFE_WRITE(sc, NFE_MISC1, misc);
413 	NFE_WRITE(sc, NFE_LINKSPEED, link);
414 }
415 
416 int
417 nfe_miibus_readreg(struct device *dev, int phy, int reg)
418 {
419 	struct nfe_softc *sc = (struct nfe_softc *)dev;
420 	uint32_t val;
421 	int ntries;
422 
423 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
424 
425 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
426 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
427 		DELAY(100);
428 	}
429 
430 	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
431 
432 	for (ntries = 0; ntries < 1000; ntries++) {
433 		DELAY(100);
434 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
435 			break;
436 	}
437 	if (ntries == 1000) {
438 		DPRINTFN(2, ("%s: timeout waiting for PHY\n",
439 		    sc->sc_dev.dv_xname));
440 		return 0;
441 	}
442 
443 	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
444 		DPRINTFN(2, ("%s: could not read PHY\n",
445 		    sc->sc_dev.dv_xname));
446 		return 0;
447 	}
448 
449 	val = NFE_READ(sc, NFE_PHY_DATA);
450 	if (val != 0xffffffff && val != 0)
451 		sc->mii_phyaddr = phy;
452 
453 	DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
454 	    sc->sc_dev.dv_xname, phy, reg, val));
455 
456 	return val;
457 }
458 
459 void
460 nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
461 {
462 	struct nfe_softc *sc = (struct nfe_softc *)dev;
463 	uint32_t ctl;
464 	int ntries;
465 
466 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
467 
468 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
469 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
470 		DELAY(100);
471 	}
472 
473 	NFE_WRITE(sc, NFE_PHY_DATA, val);
474 	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
475 	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
476 
477 	for (ntries = 0; ntries < 1000; ntries++) {
478 		DELAY(100);
479 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
480 			break;
481 	}
482 #ifdef NFE_DEBUG
483 	if (nfedebug >= 2 && ntries == 1000)
484 		printf("could not write to PHY\n");
485 #endif
486 }
487 
488 int
489 nfe_intr(void *arg)
490 {
491 	struct nfe_softc *sc = arg;
492 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
493 	uint32_t r;
494 
495 	if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0)
496 		return 0;	/* not for us */
497 	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
498 
499 	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
500 
501 	if (r & NFE_IRQ_LINK) {
502 		NFE_READ(sc, NFE_PHY_STATUS);
503 		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
504 		DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname));
505 	}
506 
507 	if (ifp->if_flags & IFF_RUNNING) {
508 		/* check Rx ring */
509 		nfe_rxeof(sc);
510 
511 		/* check Tx ring */
512 		nfe_txeof(sc);
513 	}
514 
515 	return 1;
516 }
517 
518 int
519 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
520 {
521 	struct nfe_softc *sc = ifp->if_softc;
522 	struct ifaddr *ifa = (struct ifaddr *)data;
523 	struct ifreq *ifr = (struct ifreq *)data;
524 	int s, error = 0;
525 
526 	s = splnet();
527 
528 	switch (cmd) {
529 	case SIOCSIFADDR:
530 		ifp->if_flags |= IFF_UP;
531 		if (!(ifp->if_flags & IFF_RUNNING))
532 			nfe_init(ifp);
533 		if (ifa->ifa_addr->sa_family == AF_INET)
534 			arp_ifinit(&sc->sc_arpcom, ifa);
535 		break;
536 
537 	case SIOCSIFFLAGS:
538 		if (ifp->if_flags & IFF_UP) {
539 			if (ifp->if_flags & IFF_RUNNING)
540 				error = ENETRESET;
541 			else
542 				nfe_init(ifp);
543 		} else {
544 			if (ifp->if_flags & IFF_RUNNING)
545 				nfe_stop(ifp, 1);
546 		}
547 		break;
548 
549 	case SIOCSIFMEDIA:
550 	case SIOCGIFMEDIA:
551 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
552 		break;
553 
554 	default:
555 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
556 	}
557 
558 	if (error == ENETRESET) {
559 		if (ifp->if_flags & IFF_RUNNING)
560 			nfe_iff(sc);
561 		error = 0;
562 	}
563 
564 	splx(s);
565 	return error;
566 }
567 
568 void
569 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
570 {
571 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
572 	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
573 	    sizeof (struct nfe_desc32), ops);
574 }
575 
576 void
577 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
578 {
579 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
580 	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
581 	    sizeof (struct nfe_desc64), ops);
582 }
583 
584 void
585 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
586 {
587 	if (end > start) {
588 		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
589 		    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
590 		    (caddr_t)&sc->txq.desc32[end] -
591 		    (caddr_t)&sc->txq.desc32[start], ops);
592 		return;
593 	}
594 	/* sync from 'start' to end of ring */
595 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
596 	    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
597 	    (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -
598 	    (caddr_t)&sc->txq.desc32[start], ops);
599 
600 	/* sync from start of ring to 'end' */
601 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
602 	    (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops);
603 }
604 
605 void
606 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
607 {
608 	if (end > start) {
609 		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
610 		    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
611 		    (caddr_t)&sc->txq.desc64[end] -
612 		    (caddr_t)&sc->txq.desc64[start], ops);
613 		return;
614 	}
615 	/* sync from 'start' to end of ring */
616 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
617 	    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
618 	    (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -
619 	    (caddr_t)&sc->txq.desc64[start], ops);
620 
621 	/* sync from start of ring to 'end' */
622 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
623 	    (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops);
624 }
625 
626 void
627 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
628 {
629 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
630 	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
631 	    sizeof (struct nfe_desc32), ops);
632 }
633 
634 void
635 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
636 {
637 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
638 	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
639 	    sizeof (struct nfe_desc64), ops);
640 }
641 
642 void
643 nfe_rxeof(struct nfe_softc *sc)
644 {
645 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
646 	struct nfe_desc32 *desc32;
647 	struct nfe_desc64 *desc64;
648 	struct nfe_rx_data *data;
649 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
650 	struct mbuf *m, *mnew;
651 	bus_addr_t physaddr;
652 #if NVLAN > 0
653 	uint32_t vtag;
654 #endif
655 	uint16_t flags;
656 	int error, len;
657 
658 	for (;;) {
659 		data = &sc->rxq.data[sc->rxq.cur];
660 
661 		if (sc->sc_flags & NFE_40BIT_ADDR) {
662 			desc64 = &sc->rxq.desc64[sc->rxq.cur];
663 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
664 
665 			flags = letoh16(desc64->flags);
666 			len = letoh16(desc64->length) & 0x3fff;
667 #if NVLAN > 0
668 			vtag = letoh32(desc64->physaddr[1]);
669 #endif
670 		} else {
671 			desc32 = &sc->rxq.desc32[sc->rxq.cur];
672 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
673 
674 			flags = letoh16(desc32->flags);
675 			len = letoh16(desc32->length) & 0x3fff;
676 		}
677 
678 		if (flags & NFE_RX_READY)
679 			break;
680 
681 		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
682 			if (!(flags & NFE_RX_VALID_V1))
683 				goto skip;
684 
685 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
686 				flags &= ~NFE_RX_ERROR;
687 				len--;	/* fix buffer length */
688 			}
689 		} else {
690 			if (!(flags & NFE_RX_VALID_V2))
691 				goto skip;
692 
693 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
694 				flags &= ~NFE_RX_ERROR;
695 				len--;	/* fix buffer length */
696 			}
697 		}
698 
699 		if (flags & NFE_RX_ERROR) {
700 			ifp->if_ierrors++;
701 			goto skip;
702 		}
703 
704 		/*
705 		 * Try to allocate a new mbuf for this ring element and load
706 		 * it before processing the current mbuf. If the ring element
707 		 * cannot be loaded, drop the received packet and reuse the
708 		 * old mbuf. In the unlikely case that the old mbuf can't be
709 		 * reloaded either, explicitly panic.
710 		 */
711 		mnew = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT);
712 		if (mnew == NULL) {
713 			ifp->if_ierrors++;
714 			goto skip;
715 		}
716 		mnew->m_pkthdr.len = mnew->m_len = MCLBYTES;
717 
718 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
719 		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
720 		bus_dmamap_unload(sc->sc_dmat, data->map);
721 
722 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew,
723 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
724 		if (error != 0) {
725 			m_freem(mnew);
726 
727 			/* try to reload the old mbuf */
728 			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map,
729 			    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
730 			if (error != 0) {
731 				/* very unlikely that it will fail.. */
732 				panic("%s: could not load old rx mbuf",
733 				    sc->sc_dev.dv_xname);
734 			}
735 			ifp->if_ierrors++;
736 			goto skip;
737 		}
738 		physaddr = data->map->dm_segs[0].ds_addr;
739 
740 		/*
741 		 * New mbuf successfully loaded, update Rx ring and continue
742 		 * processing.
743 		 */
744 		m = data->m;
745 		data->m = mnew;
746 
747 		/* finalize mbuf */
748 		m->m_pkthdr.len = m->m_len = len;
749 
750 		if ((sc->sc_flags & NFE_HW_CSUM) &&
751 		    (flags & NFE_RX_IP_CSUMOK)) {
752 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
753 			if (flags & NFE_RX_UDP_CSUMOK)
754 				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
755 			if (flags & NFE_RX_TCP_CSUMOK)
756 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
757 		}
758 
759 #if NVLAN > 0
760 		if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) {
761 			m->m_pkthdr.ether_vtag = vtag & 0xffff;
762 			m->m_flags |= M_VLANTAG;
763 		}
764 #endif
765 
766 		ifp->if_ipackets++;
767 		ml_enqueue(&ml, m);
768 
769 		/* update mapping address in h/w descriptor */
770 		if (sc->sc_flags & NFE_40BIT_ADDR) {
771 #if defined(__LP64__)
772 			desc64->physaddr[0] = htole32(physaddr >> 32);
773 #endif
774 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
775 		} else {
776 			desc32->physaddr = htole32(physaddr);
777 		}
778 
779 skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
780 			desc64->length = htole16(sc->rxq.bufsz);
781 			desc64->flags = htole16(NFE_RX_READY);
782 
783 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
784 		} else {
785 			desc32->length = htole16(sc->rxq.bufsz);
786 			desc32->flags = htole16(NFE_RX_READY);
787 
788 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
789 		}
790 
791 		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
792 	}
793 	if_input(ifp, &ml);
794 }
795 
796 void
797 nfe_txeof(struct nfe_softc *sc)
798 {
799 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
800 	struct nfe_desc32 *desc32;
801 	struct nfe_desc64 *desc64;
802 	struct nfe_tx_data *data = NULL;
803 	uint16_t flags;
804 
805 	while (sc->txq.next != sc->txq.cur) {
806 		if (sc->sc_flags & NFE_40BIT_ADDR) {
807 			desc64 = &sc->txq.desc64[sc->txq.next];
808 			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
809 
810 			flags = letoh16(desc64->flags);
811 		} else {
812 			desc32 = &sc->txq.desc32[sc->txq.next];
813 			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
814 
815 			flags = letoh16(desc32->flags);
816 		}
817 
818 		if (flags & NFE_TX_VALID)
819 			break;
820 
821 		data = &sc->txq.data[sc->txq.next];
822 
823 		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
824 			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
825 				goto skip;
826 
827 			if ((flags & NFE_TX_ERROR_V1) != 0) {
828 				printf("%s: tx v1 error %b\n",
829 				    sc->sc_dev.dv_xname, flags, NFE_V1_TXERR);
830 				ifp->if_oerrors++;
831 			} else
832 				ifp->if_opackets++;
833 		} else {
834 			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
835 				goto skip;
836 
837 			if ((flags & NFE_TX_ERROR_V2) != 0) {
838 				printf("%s: tx v2 error %b\n",
839 				    sc->sc_dev.dv_xname, flags, NFE_V2_TXERR);
840 				ifp->if_oerrors++;
841 			} else
842 				ifp->if_opackets++;
843 		}
844 
845 		if (data->m == NULL) {	/* should not get there */
846 			printf("%s: last fragment bit w/o associated mbuf!\n",
847 			    sc->sc_dev.dv_xname);
848 			goto skip;
849 		}
850 
851 		/* last fragment of the mbuf chain transmitted */
852 		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
853 		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
854 		bus_dmamap_unload(sc->sc_dmat, data->active);
855 		m_freem(data->m);
856 		data->m = NULL;
857 
858 		ifp->if_timer = 0;
859 
860 skip:		sc->txq.queued--;
861 		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
862 	}
863 
864 	if (data != NULL) {	/* at least one slot freed */
865 		ifp->if_flags &= ~IFF_OACTIVE;
866 		nfe_start(ifp);
867 	}
868 }
869 
870 int
871 nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
872 {
873 	struct nfe_desc32 *desc32;
874 	struct nfe_desc64 *desc64;
875 	struct nfe_tx_data *data;
876 	bus_dmamap_t map;
877 	uint16_t flags = 0;
878 	uint32_t vtag = 0;
879 	int error, i, first = sc->txq.cur;
880 
881 	map = sc->txq.data[first].map;
882 
883 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
884 	if (error != 0) {
885 		printf("%s: can't map mbuf (error %d)\n",
886 		    sc->sc_dev.dv_xname, error);
887 		return error;
888 	}
889 
890 	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
891 		bus_dmamap_unload(sc->sc_dmat, map);
892 		return ENOBUFS;
893 	}
894 
895 #if NVLAN > 0
896 	/* setup h/w VLAN tagging */
897 	if (m0->m_flags & M_VLANTAG)
898 		vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag;
899 #endif
900 	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
901 		flags |= NFE_TX_IP_CSUM;
902 	if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
903 		flags |= NFE_TX_TCP_UDP_CSUM;
904 
905 	for (i = 0; i < map->dm_nsegs; i++) {
906 		data = &sc->txq.data[sc->txq.cur];
907 
908 		if (sc->sc_flags & NFE_40BIT_ADDR) {
909 			desc64 = &sc->txq.desc64[sc->txq.cur];
910 #if defined(__LP64__)
911 			desc64->physaddr[0] =
912 			    htole32(map->dm_segs[i].ds_addr >> 32);
913 #endif
914 			desc64->physaddr[1] =
915 			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
916 			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
917 			desc64->flags = htole16(flags);
918 			desc64->vtag = htole32(vtag);
919 		} else {
920 			desc32 = &sc->txq.desc32[sc->txq.cur];
921 
922 			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
923 			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
924 			desc32->flags = htole16(flags);
925 		}
926 
927 		if (map->dm_nsegs > 1) {
928 			/*
929 			 * Checksum flags and vtag belong to the first fragment
930 			 * only.
931 			 */
932 			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
933 			vtag = 0;
934 
935 			/*
936 			 * Setting of the valid bit in the first descriptor is
937 			 * deferred until the whole chain is fully setup.
938 			 */
939 			flags |= NFE_TX_VALID;
940 		}
941 
942 		sc->txq.queued++;
943 		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
944 	}
945 
946 	/* the whole mbuf chain has been setup */
947 	if (sc->sc_flags & NFE_40BIT_ADDR) {
948 		/* fix last descriptor */
949 		flags |= NFE_TX_LASTFRAG_V2;
950 		desc64->flags = htole16(flags);
951 
952 		/* finally, set the valid bit in the first descriptor */
953 		sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID);
954 	} else {
955 		/* fix last descriptor */
956 		if (sc->sc_flags & NFE_JUMBO_SUP)
957 			flags |= NFE_TX_LASTFRAG_V2;
958 		else
959 			flags |= NFE_TX_LASTFRAG_V1;
960 		desc32->flags = htole16(flags);
961 
962 		/* finally, set the valid bit in the first descriptor */
963 		sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID);
964 	}
965 
966 	data->m = m0;
967 	data->active = map;
968 
969 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
970 	    BUS_DMASYNC_PREWRITE);
971 
972 	return 0;
973 }
974 
975 void
976 nfe_start(struct ifnet *ifp)
977 {
978 	struct nfe_softc *sc = ifp->if_softc;
979 	int old = sc->txq.cur;
980 	struct mbuf *m0;
981 
982 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
983 		return;
984 
985 	for (;;) {
986 		IFQ_POLL(&ifp->if_snd, m0);
987 		if (m0 == NULL)
988 			break;
989 
990 		if (nfe_encap(sc, m0) != 0) {
991 			ifp->if_flags |= IFF_OACTIVE;
992 			break;
993 		}
994 
995 		/* packet put in h/w queue, remove from s/w queue */
996 		IFQ_DEQUEUE(&ifp->if_snd, m0);
997 
998 #if NBPFILTER > 0
999 		if (ifp->if_bpf != NULL)
1000 			bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1001 #endif
1002 	}
1003 	if (sc->txq.cur == old)	/* nothing sent */
1004 		return;
1005 
1006 	if (sc->sc_flags & NFE_40BIT_ADDR)
1007 		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1008 	else
1009 		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1010 
1011 	/* kick Tx */
1012 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1013 
1014 	/*
1015 	 * Set a timeout in case the chip goes out to lunch.
1016 	 */
1017 	ifp->if_timer = 5;
1018 }
1019 
1020 void
1021 nfe_watchdog(struct ifnet *ifp)
1022 {
1023 	struct nfe_softc *sc = ifp->if_softc;
1024 
1025 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1026 
1027 	nfe_init(ifp);
1028 
1029 	ifp->if_oerrors++;
1030 }
1031 
1032 int
1033 nfe_init(struct ifnet *ifp)
1034 {
1035 	struct nfe_softc *sc = ifp->if_softc;
1036 	uint32_t tmp;
1037 
1038 	nfe_stop(ifp, 0);
1039 
1040 	NFE_WRITE(sc, NFE_TX_UNK, 0);
1041 	NFE_WRITE(sc, NFE_STATUS, 0);
1042 
1043 	sc->rxtxctl = NFE_RXTX_BIT2;
1044 	if (sc->sc_flags & NFE_40BIT_ADDR)
1045 		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1046 	else if (sc->sc_flags & NFE_JUMBO_SUP)
1047 		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1048 
1049 	if (sc->sc_flags & NFE_HW_CSUM)
1050 		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1051 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1052 		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
1053 
1054 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1055 	DELAY(10);
1056 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1057 
1058 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1059 		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1060 	else
1061 		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
1062 
1063 	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1064 
1065 	/* set MAC address */
1066 	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
1067 
1068 	/* tell MAC where rings are in memory */
1069 #ifdef __LP64__
1070 	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1071 #endif
1072 	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1073 #ifdef __LP64__
1074 	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1075 #endif
1076 	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1077 
1078 	NFE_WRITE(sc, NFE_RING_SIZE,
1079 	    (NFE_RX_RING_COUNT - 1) << 16 |
1080 	    (NFE_TX_RING_COUNT - 1));
1081 
1082 	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1083 
1084 	/* force MAC to wakeup */
1085 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1086 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1087 	DELAY(10);
1088 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1089 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1090 
1091 #if 1
1092 	/* configure interrupts coalescing/mitigation */
1093 	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1094 #else
1095 	/* no interrupt mitigation: one interrupt per packet */
1096 	NFE_WRITE(sc, NFE_IMTIMER, 970);
1097 #endif
1098 
1099 	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1100 	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1101 	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1102 
1103 	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1104 	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1105 
1106 	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1107 
1108 	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1109 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1110 	DELAY(10);
1111 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1112 
1113 	/* program promiscuous mode and multicast filters */
1114 	nfe_iff(sc);
1115 
1116 	nfe_ifmedia_upd(ifp);
1117 
1118 	/* enable Rx */
1119 	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1120 
1121 	/* enable Tx */
1122 	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1123 
1124 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1125 
1126 	/* enable interrupts */
1127 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1128 
1129 	timeout_add_sec(&sc->sc_tick_ch, 1);
1130 
1131 	ifp->if_flags |= IFF_RUNNING;
1132 	ifp->if_flags &= ~IFF_OACTIVE;
1133 
1134 	return 0;
1135 }
1136 
1137 void
1138 nfe_stop(struct ifnet *ifp, int disable)
1139 {
1140 	struct nfe_softc *sc = ifp->if_softc;
1141 
1142 	timeout_del(&sc->sc_tick_ch);
1143 
1144 	ifp->if_timer = 0;
1145 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1146 
1147 	mii_down(&sc->sc_mii);
1148 
1149 	/* abort Tx */
1150 	NFE_WRITE(sc, NFE_TX_CTL, 0);
1151 
1152 	if ((sc->sc_flags & NFE_WOL) == 0) {
1153 		/* disable Rx */
1154 		NFE_WRITE(sc, NFE_RX_CTL, 0);
1155 
1156 		/* disable interrupts */
1157 		NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1158 	}
1159 
1160 	/* reset Tx and Rx rings */
1161 	nfe_reset_tx_ring(sc, &sc->txq);
1162 	nfe_reset_rx_ring(sc, &sc->rxq);
1163 }
1164 
1165 int
1166 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1167 {
1168 	struct nfe_desc32 *desc32;
1169 	struct nfe_desc64 *desc64;
1170 	struct nfe_rx_data *data;
1171 	void **desc;
1172 	bus_addr_t physaddr;
1173 	int i, nsegs, error, descsize;
1174 
1175 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1176 		desc = (void **)&ring->desc64;
1177 		descsize = sizeof (struct nfe_desc64);
1178 	} else {
1179 		desc = (void **)&ring->desc32;
1180 		descsize = sizeof (struct nfe_desc32);
1181 	}
1182 
1183 	ring->cur = ring->next = 0;
1184 	ring->bufsz = MCLBYTES;
1185 
1186 	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1187 	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1188 	if (error != 0) {
1189 		printf("%s: could not create desc DMA map\n",
1190 		    sc->sc_dev.dv_xname);
1191 		goto fail;
1192 	}
1193 
1194 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1195 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1196 	if (error != 0) {
1197 		printf("%s: could not allocate DMA memory\n",
1198 		    sc->sc_dev.dv_xname);
1199 		goto fail;
1200 	}
1201 
1202 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1203 	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1204 	if (error != 0) {
1205 		printf("%s: can't map desc DMA memory\n",
1206 		    sc->sc_dev.dv_xname);
1207 		goto fail;
1208 	}
1209 
1210 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1211 	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1212 	if (error != 0) {
1213 		printf("%s: could not load desc DMA map\n",
1214 		    sc->sc_dev.dv_xname);
1215 		goto fail;
1216 	}
1217 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1218 
1219 	/*
1220 	 * Pre-allocate Rx buffers and populate Rx ring.
1221 	 */
1222 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1223 		data = &sc->rxq.data[i];
1224 
1225 		data->m = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT);
1226 		if (data->m == NULL) {
1227 			printf("%s: could not allocate rx mbuf\n",
1228 			    sc->sc_dev.dv_xname);
1229 			error = ENOMEM;
1230 			goto fail;
1231 		}
1232 		data->m->m_pkthdr.len = data->m->m_len = MCLBYTES;
1233 
1234 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1235 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1236 		if (error != 0) {
1237 			printf("%s: could not create DMA map\n",
1238 			    sc->sc_dev.dv_xname);
1239 			goto fail;
1240 		}
1241 
1242 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, data->m,
1243 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
1244 		if (error != 0) {
1245 			printf("%s: could not load rx buf DMA map",
1246 			    sc->sc_dev.dv_xname);
1247 			goto fail;
1248 		}
1249 		physaddr = data->map->dm_segs[0].ds_addr;
1250 
1251 		if (sc->sc_flags & NFE_40BIT_ADDR) {
1252 			desc64 = &sc->rxq.desc64[i];
1253 #if defined(__LP64__)
1254 			desc64->physaddr[0] = htole32(physaddr >> 32);
1255 #endif
1256 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1257 			desc64->length = htole16(sc->rxq.bufsz);
1258 			desc64->flags = htole16(NFE_RX_READY);
1259 		} else {
1260 			desc32 = &sc->rxq.desc32[i];
1261 			desc32->physaddr = htole32(physaddr);
1262 			desc32->length = htole16(sc->rxq.bufsz);
1263 			desc32->flags = htole16(NFE_RX_READY);
1264 		}
1265 	}
1266 
1267 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1268 	    BUS_DMASYNC_PREWRITE);
1269 
1270 	return 0;
1271 
1272 fail:	nfe_free_rx_ring(sc, ring);
1273 	return error;
1274 }
1275 
1276 void
1277 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1278 {
1279 	int i;
1280 
1281 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1282 		if (sc->sc_flags & NFE_40BIT_ADDR) {
1283 			ring->desc64[i].length = htole16(ring->bufsz);
1284 			ring->desc64[i].flags = htole16(NFE_RX_READY);
1285 		} else {
1286 			ring->desc32[i].length = htole16(ring->bufsz);
1287 			ring->desc32[i].flags = htole16(NFE_RX_READY);
1288 		}
1289 	}
1290 
1291 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1292 	    BUS_DMASYNC_PREWRITE);
1293 
1294 	ring->cur = ring->next = 0;
1295 }
1296 
1297 void
1298 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1299 {
1300 	struct nfe_rx_data *data;
1301 	void *desc;
1302 	int i, descsize;
1303 
1304 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1305 		desc = ring->desc64;
1306 		descsize = sizeof (struct nfe_desc64);
1307 	} else {
1308 		desc = ring->desc32;
1309 		descsize = sizeof (struct nfe_desc32);
1310 	}
1311 
1312 	if (desc != NULL) {
1313 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1314 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1315 		bus_dmamap_unload(sc->sc_dmat, ring->map);
1316 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1317 		    NFE_RX_RING_COUNT * descsize);
1318 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1319 	}
1320 
1321 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1322 		data = &ring->data[i];
1323 
1324 		if (data->map != NULL) {
1325 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1326 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1327 			bus_dmamap_unload(sc->sc_dmat, data->map);
1328 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1329 		}
1330 		if (data->m != NULL)
1331 			m_freem(data->m);
1332 	}
1333 }
1334 
1335 int
1336 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1337 {
1338 	int i, nsegs, error;
1339 	void **desc;
1340 	int descsize;
1341 
1342 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1343 		desc = (void **)&ring->desc64;
1344 		descsize = sizeof (struct nfe_desc64);
1345 	} else {
1346 		desc = (void **)&ring->desc32;
1347 		descsize = sizeof (struct nfe_desc32);
1348 	}
1349 
1350 	ring->queued = 0;
1351 	ring->cur = ring->next = 0;
1352 
1353 	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1354 	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1355 
1356 	if (error != 0) {
1357 		printf("%s: could not create desc DMA map\n",
1358 		    sc->sc_dev.dv_xname);
1359 		goto fail;
1360 	}
1361 
1362 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1363 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1364 	if (error != 0) {
1365 		printf("%s: could not allocate DMA memory\n",
1366 		    sc->sc_dev.dv_xname);
1367 		goto fail;
1368 	}
1369 
1370 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1371 	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1372 	if (error != 0) {
1373 		printf("%s: can't map desc DMA memory\n",
1374 		    sc->sc_dev.dv_xname);
1375 		goto fail;
1376 	}
1377 
1378 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1379 	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1380 	if (error != 0) {
1381 		printf("%s: could not load desc DMA map\n",
1382 		    sc->sc_dev.dv_xname);
1383 		goto fail;
1384 	}
1385 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1386 
1387 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1388 		error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1389 		    NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1390 		    &ring->data[i].map);
1391 		if (error != 0) {
1392 			printf("%s: could not create DMA map\n",
1393 			    sc->sc_dev.dv_xname);
1394 			goto fail;
1395 		}
1396 	}
1397 
1398 	return 0;
1399 
1400 fail:	nfe_free_tx_ring(sc, ring);
1401 	return error;
1402 }
1403 
1404 void
1405 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1406 {
1407 	struct nfe_tx_data *data;
1408 	int i;
1409 
1410 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1411 		if (sc->sc_flags & NFE_40BIT_ADDR)
1412 			ring->desc64[i].flags = 0;
1413 		else
1414 			ring->desc32[i].flags = 0;
1415 
1416 		data = &ring->data[i];
1417 
1418 		if (data->m != NULL) {
1419 			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1420 			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1421 			bus_dmamap_unload(sc->sc_dmat, data->active);
1422 			m_freem(data->m);
1423 			data->m = NULL;
1424 		}
1425 	}
1426 
1427 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1428 	    BUS_DMASYNC_PREWRITE);
1429 
1430 	ring->queued = 0;
1431 	ring->cur = ring->next = 0;
1432 }
1433 
1434 void
1435 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1436 {
1437 	struct nfe_tx_data *data;
1438 	void *desc;
1439 	int i, descsize;
1440 
1441 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1442 		desc = ring->desc64;
1443 		descsize = sizeof (struct nfe_desc64);
1444 	} else {
1445 		desc = ring->desc32;
1446 		descsize = sizeof (struct nfe_desc32);
1447 	}
1448 
1449 	if (desc != NULL) {
1450 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1451 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1452 		bus_dmamap_unload(sc->sc_dmat, ring->map);
1453 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1454 		    NFE_TX_RING_COUNT * descsize);
1455 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1456 	}
1457 
1458 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1459 		data = &ring->data[i];
1460 
1461 		if (data->m != NULL) {
1462 			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1463 			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1464 			bus_dmamap_unload(sc->sc_dmat, data->active);
1465 			m_freem(data->m);
1466 		}
1467 	}
1468 
1469 	/* ..and now actually destroy the DMA mappings */
1470 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1471 		data = &ring->data[i];
1472 		if (data->map == NULL)
1473 			continue;
1474 		bus_dmamap_destroy(sc->sc_dmat, data->map);
1475 	}
1476 }
1477 
1478 int
1479 nfe_ifmedia_upd(struct ifnet *ifp)
1480 {
1481 	struct nfe_softc *sc = ifp->if_softc;
1482 	struct mii_data *mii = &sc->sc_mii;
1483 	struct mii_softc *miisc;
1484 
1485 	if (mii->mii_instance != 0) {
1486 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1487 			mii_phy_reset(miisc);
1488 	}
1489 	return mii_mediachg(mii);
1490 }
1491 
1492 void
1493 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1494 {
1495 	struct nfe_softc *sc = ifp->if_softc;
1496 	struct mii_data *mii = &sc->sc_mii;
1497 
1498 	mii_pollstat(mii);
1499 	ifmr->ifm_status = mii->mii_media_status;
1500 	ifmr->ifm_active = mii->mii_media_active;
1501 }
1502 
1503 void
1504 nfe_iff(struct nfe_softc *sc)
1505 {
1506 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1507 	struct arpcom *ac = &sc->sc_arpcom;
1508 	struct ether_multi *enm;
1509 	struct ether_multistep step;
1510 	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1511 	uint32_t filter;
1512 	int i;
1513 
1514 	filter = NFE_RXFILTER_MAGIC;
1515 	ifp->if_flags &= ~IFF_ALLMULTI;
1516 
1517 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1518 		ifp->if_flags |= IFF_ALLMULTI;
1519 		if (ifp->if_flags & IFF_PROMISC)
1520 			filter |= NFE_PROMISC;
1521 		else
1522 			filter |= NFE_U2M;
1523 		bzero(addr, ETHER_ADDR_LEN);
1524 		bzero(mask, ETHER_ADDR_LEN);
1525 	} else {
1526 		filter |= NFE_U2M;
1527 
1528 		bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1529 		bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1530 
1531 		ETHER_FIRST_MULTI(step, ac, enm);
1532 		while (enm != NULL) {
1533 			for (i = 0; i < ETHER_ADDR_LEN; i++) {
1534 				addr[i] &=  enm->enm_addrlo[i];
1535 				mask[i] &= ~enm->enm_addrlo[i];
1536 			}
1537 
1538 			ETHER_NEXT_MULTI(step, enm);
1539 		}
1540 
1541 		for (i = 0; i < ETHER_ADDR_LEN; i++)
1542 			mask[i] |= addr[i];
1543 	}
1544 
1545 	addr[0] |= 0x01;	/* make sure multicast bit is set */
1546 
1547 	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1548 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1549 	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1550 	    addr[5] <<  8 | addr[4]);
1551 	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1552 	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1553 	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1554 	    mask[5] <<  8 | mask[4]);
1555 	NFE_WRITE(sc, NFE_RXFILTER, filter);
1556 }
1557 
1558 void
1559 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1560 {
1561 	uint32_t tmp;
1562 
1563 	if (sc->sc_flags & NFE_CORRECT_MACADDR) {
1564 		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1565 		addr[0] = (tmp & 0xff);
1566 		addr[1] = (tmp >>  8) & 0xff;
1567 		addr[2] = (tmp >> 16) & 0xff;
1568 		addr[3] = (tmp >> 24) & 0xff;
1569 
1570 		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1571 		addr[4] = (tmp & 0xff);
1572 		addr[5] = (tmp >> 8) & 0xff;
1573 
1574 	} else {
1575 		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1576 		addr[0] = (tmp >> 8) & 0xff;
1577 		addr[1] = (tmp & 0xff);
1578 
1579 		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1580 		addr[2] = (tmp >> 24) & 0xff;
1581 		addr[3] = (tmp >> 16) & 0xff;
1582 		addr[4] = (tmp >>  8) & 0xff;
1583 		addr[5] = (tmp & 0xff);
1584 	}
1585 }
1586 
1587 void
1588 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1589 {
1590 	NFE_WRITE(sc, NFE_MACADDR_LO,
1591 	    addr[5] <<  8 | addr[4]);
1592 	NFE_WRITE(sc, NFE_MACADDR_HI,
1593 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1594 }
1595 
1596 void
1597 nfe_tick(void *arg)
1598 {
1599 	struct nfe_softc *sc = arg;
1600 	int s;
1601 
1602 	s = splnet();
1603 	mii_tick(&sc->sc_mii);
1604 	splx(s);
1605 
1606 	timeout_add_sec(&sc->sc_tick_ch, 1);
1607 }
1608 
1609 #ifndef SMALL_KERNEL
1610 int
1611 nfe_wol(struct ifnet *ifp, int enable)
1612 {
1613 	struct nfe_softc *sc = ifp->if_softc;
1614 
1615 	if (enable) {
1616 		sc->sc_flags |= NFE_WOL;
1617 		NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE);
1618 	} else {
1619 		sc->sc_flags &= ~NFE_WOL;
1620 		NFE_WRITE(sc, NFE_WOL_CTL, 0);
1621 	}
1622 
1623 	return 0;
1624 }
1625 #endif
1626