xref: /openbsd/sys/dev/pci/if_nfe.c (revision cca36db2)
1 /*	$OpenBSD: if_nfe.c,v 1.98 2011/04/05 18:01:21 henning Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
5  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
21 
22 #include "bpfilter.h"
23 #include "vlan.h"
24 
25 #include <sys/param.h>
26 #include <sys/endian.h>
27 #include <sys/systm.h>
28 #include <sys/types.h>
29 #include <sys/sockio.h>
30 #include <sys/mbuf.h>
31 #include <sys/queue.h>
32 #include <sys/kernel.h>
33 #include <sys/device.h>
34 #include <sys/timeout.h>
35 #include <sys/socket.h>
36 
37 #include <machine/bus.h>
38 
39 #include <net/if.h>
40 #include <net/if_dl.h>
41 #include <net/if_media.h>
42 
43 #ifdef INET
44 #include <netinet/in.h>
45 #include <netinet/in_systm.h>
46 #include <netinet/in_var.h>
47 #include <netinet/ip.h>
48 #include <netinet/if_ether.h>
49 #endif
50 
51 #if NVLAN > 0
52 #include <net/if_types.h>
53 #include <net/if_vlan_var.h>
54 #endif
55 
56 #if NBPFILTER > 0
57 #include <net/bpf.h>
58 #endif
59 
60 #include <dev/mii/mii.h>
61 #include <dev/mii/miivar.h>
62 
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 #include <dev/pci/pcidevs.h>
66 
67 #include <dev/pci/if_nfereg.h>
68 #include <dev/pci/if_nfevar.h>
69 
70 int	nfe_match(struct device *, void *, void *);
71 void	nfe_attach(struct device *, struct device *, void *);
72 int	nfe_activate(struct device *, int);
73 void	nfe_miibus_statchg(struct device *);
74 int	nfe_miibus_readreg(struct device *, int, int);
75 void	nfe_miibus_writereg(struct device *, int, int, int);
76 int	nfe_intr(void *);
77 int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
78 void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
79 void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
80 void	nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
81 void	nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
82 void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
83 void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
84 void	nfe_rxeof(struct nfe_softc *);
85 void	nfe_txeof(struct nfe_softc *);
86 int	nfe_encap(struct nfe_softc *, struct mbuf *);
87 void	nfe_start(struct ifnet *);
88 void	nfe_watchdog(struct ifnet *);
89 int	nfe_init(struct ifnet *);
90 void	nfe_stop(struct ifnet *, int);
91 struct	nfe_jbuf *nfe_jalloc(struct nfe_softc *);
92 void	nfe_jfree(caddr_t, u_int, void *);
93 int	nfe_jpool_alloc(struct nfe_softc *);
94 void	nfe_jpool_free(struct nfe_softc *);
95 int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
96 void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
97 void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
98 int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
99 void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
100 void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
101 int	nfe_ifmedia_upd(struct ifnet *);
102 void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
103 void	nfe_setmulti(struct nfe_softc *);
104 void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
105 void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
106 void	nfe_tick(void *);
107 
108 struct cfattach nfe_ca = {
109 	sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL,
110 	nfe_activate
111 };
112 
113 struct cfdriver nfe_cd = {
114 	NULL, "nfe", DV_IFNET
115 };
116 
117 #ifdef NFE_DEBUG
118 int nfedebug = 0;
119 #define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
120 #define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
121 #else
122 #define DPRINTF(x)
123 #define DPRINTFN(n,x)
124 #endif
125 
126 const struct pci_matchid nfe_devices[] = {
127 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
128 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
129 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
130 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
131 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
132 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
133 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
134 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
135 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
136 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
137 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
138 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
139 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
140 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
141 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
142 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
143 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
144 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
145 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
146 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
147 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
148 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
149 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 },
150 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 },
151 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 },
152 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 },
153 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 },
154 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 },
155 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 },
156 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 },
157 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 },
158 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 },
159 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 },
160 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 },
161 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 },
162 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 },
163 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 },
164 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 },
165 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 },
166 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN }
167 };
168 
169 int
170 nfe_match(struct device *dev, void *match, void *aux)
171 {
172 	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
173 	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
174 }
175 
176 int
177 nfe_activate(struct device *self, int act)
178 {
179 	struct nfe_softc *sc = (struct nfe_softc *)self;
180 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
181 	int rv = 0;
182 
183 	switch (act) {
184 	case DVACT_QUIESCE:
185 		rv = config_activate_children(self, act);
186 		break;
187 	case DVACT_SUSPEND:
188 		if (ifp->if_flags & IFF_RUNNING)
189 			nfe_stop(ifp, 0);
190 		rv = config_activate_children(self, act);
191 		break;
192 	case DVACT_RESUME:
193 		rv = config_activate_children(self, act);
194 		if (ifp->if_flags & IFF_UP)
195 			nfe_init(ifp);
196 		break;
197 	}
198 	return (rv);
199 }
200 
201 
202 void
203 nfe_attach(struct device *parent, struct device *self, void *aux)
204 {
205 	struct nfe_softc *sc = (struct nfe_softc *)self;
206 	struct pci_attach_args *pa = aux;
207 	pci_chipset_tag_t pc = pa->pa_pc;
208 	pci_intr_handle_t ih;
209 	const char *intrstr;
210 	struct ifnet *ifp;
211 	bus_size_t memsize;
212 	pcireg_t memtype;
213 
214 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
215 	if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
216 	    &sc->sc_memh, NULL, &memsize, 0)) {
217 		printf(": can't map mem space\n");
218 		return;
219 	}
220 
221 	if (pci_intr_map(pa, &ih) != 0) {
222 		printf(": can't map interrupt\n");
223 		return;
224 	}
225 
226 	intrstr = pci_intr_string(pc, ih);
227 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
228 	    sc->sc_dev.dv_xname);
229 	if (sc->sc_ih == NULL) {
230 		printf(": could not establish interrupt");
231 		if (intrstr != NULL)
232 			printf(" at %s", intrstr);
233 		printf("\n");
234 		return;
235 	}
236 	printf(": %s", intrstr);
237 
238 	sc->sc_dmat = pa->pa_dmat;
239 	sc->sc_flags = 0;
240 
241 	switch (PCI_PRODUCT(pa->pa_id)) {
242 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
243 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
244 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
245 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
246 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
247 		break;
248 	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
249 	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
250 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
251 		break;
252 	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
253 	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
254 	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
255 	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
256 	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
257 	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
258 	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
259 	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
260 	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
261 	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
262 	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
263 	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
264 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR |
265 		    NFE_PWR_MGMT;
266 		break;
267 	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
268 	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
269 	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
270 	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
271 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM |
272 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
273 		break;
274 	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
275 	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
276 	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
277 	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
278 	case PCI_PRODUCT_NVIDIA_MCP89_LAN:
279 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
280 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
281 		break;
282 	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
283 	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
284 	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
285 	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
286 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
287 		break;
288 	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
289 	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
290 	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
291 	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
292 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
293 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
294 		break;
295 	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
296 	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
297 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
298 		    NFE_HW_VLAN | NFE_PWR_MGMT;
299 		break;
300 	}
301 
302 	if (sc->sc_flags & NFE_PWR_MGMT) {
303 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
304 		NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
305 		DELAY(100);
306 		NFE_WRITE(sc, NFE_MAC_RESET, 0);
307 		DELAY(100);
308 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
309 		NFE_WRITE(sc, NFE_PWR2_CTL,
310 		    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK);
311 	}
312 
313 #ifdef notyet
314 	/* enable jumbo frames for adapters that support it */
315 	if (sc->sc_flags & NFE_JUMBO_SUP)
316 		sc->sc_flags |= NFE_USE_JUMBO;
317 #endif
318 
319 	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
320 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
321 
322 	/*
323 	 * Allocate Tx and Rx rings.
324 	 */
325 	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
326 		printf("%s: could not allocate Tx ring\n",
327 		    sc->sc_dev.dv_xname);
328 		return;
329 	}
330 
331 	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
332 		printf("%s: could not allocate Rx ring\n",
333 		    sc->sc_dev.dv_xname);
334 		nfe_free_tx_ring(sc, &sc->txq);
335 		return;
336 	}
337 
338 	ifp = &sc->sc_arpcom.ac_if;
339 	ifp->if_softc = sc;
340 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
341 	ifp->if_ioctl = nfe_ioctl;
342 	ifp->if_start = nfe_start;
343 	ifp->if_watchdog = nfe_watchdog;
344 	ifp->if_baudrate = IF_Gbps(1);
345 	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
346 	IFQ_SET_READY(&ifp->if_snd);
347 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
348 
349 	ifp->if_capabilities = IFCAP_VLAN_MTU;
350 
351 	if (sc->sc_flags & NFE_USE_JUMBO)
352 		ifp->if_hardmtu = NFE_JUMBO_MTU;
353 
354 #if NVLAN > 0
355 	if (sc->sc_flags & NFE_HW_VLAN)
356 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
357 #endif
358 
359 	if (sc->sc_flags & NFE_HW_CSUM) {
360 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
361 		    IFCAP_CSUM_UDPv4;
362 	}
363 
364 	sc->sc_mii.mii_ifp = ifp;
365 	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
366 	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
367 	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
368 
369 	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
370 	    nfe_ifmedia_sts);
371 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0);
372 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
373 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
374 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
375 		    0, NULL);
376 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
377 	} else
378 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
379 
380 	if_attach(ifp);
381 	ether_ifattach(ifp);
382 
383 	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
384 }
385 
386 void
387 nfe_miibus_statchg(struct device *dev)
388 {
389 	struct nfe_softc *sc = (struct nfe_softc *)dev;
390 	struct mii_data *mii = &sc->sc_mii;
391 	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
392 
393 	phy = NFE_READ(sc, NFE_PHY_IFACE);
394 	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
395 
396 	seed = NFE_READ(sc, NFE_RNDSEED);
397 	seed &= ~NFE_SEED_MASK;
398 
399 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
400 		phy  |= NFE_PHY_HDX;	/* half-duplex */
401 		misc |= NFE_MISC1_HDX;
402 	}
403 
404 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
405 	case IFM_1000_T:	/* full-duplex only */
406 		link |= NFE_MEDIA_1000T;
407 		seed |= NFE_SEED_1000T;
408 		phy  |= NFE_PHY_1000T;
409 		break;
410 	case IFM_100_TX:
411 		link |= NFE_MEDIA_100TX;
412 		seed |= NFE_SEED_100TX;
413 		phy  |= NFE_PHY_100TX;
414 		break;
415 	case IFM_10_T:
416 		link |= NFE_MEDIA_10T;
417 		seed |= NFE_SEED_10T;
418 		break;
419 	}
420 
421 	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
422 
423 	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
424 	NFE_WRITE(sc, NFE_MISC1, misc);
425 	NFE_WRITE(sc, NFE_LINKSPEED, link);
426 }
427 
428 int
429 nfe_miibus_readreg(struct device *dev, int phy, int reg)
430 {
431 	struct nfe_softc *sc = (struct nfe_softc *)dev;
432 	uint32_t val;
433 	int ntries;
434 
435 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
436 
437 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
438 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
439 		DELAY(100);
440 	}
441 
442 	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
443 
444 	for (ntries = 0; ntries < 1000; ntries++) {
445 		DELAY(100);
446 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
447 			break;
448 	}
449 	if (ntries == 1000) {
450 		DPRINTFN(2, ("%s: timeout waiting for PHY\n",
451 		    sc->sc_dev.dv_xname));
452 		return 0;
453 	}
454 
455 	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
456 		DPRINTFN(2, ("%s: could not read PHY\n",
457 		    sc->sc_dev.dv_xname));
458 		return 0;
459 	}
460 
461 	val = NFE_READ(sc, NFE_PHY_DATA);
462 	if (val != 0xffffffff && val != 0)
463 		sc->mii_phyaddr = phy;
464 
465 	DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
466 	    sc->sc_dev.dv_xname, phy, reg, val));
467 
468 	return val;
469 }
470 
471 void
472 nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
473 {
474 	struct nfe_softc *sc = (struct nfe_softc *)dev;
475 	uint32_t ctl;
476 	int ntries;
477 
478 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
479 
480 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
481 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
482 		DELAY(100);
483 	}
484 
485 	NFE_WRITE(sc, NFE_PHY_DATA, val);
486 	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
487 	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
488 
489 	for (ntries = 0; ntries < 1000; ntries++) {
490 		DELAY(100);
491 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
492 			break;
493 	}
494 #ifdef NFE_DEBUG
495 	if (nfedebug >= 2 && ntries == 1000)
496 		printf("could not write to PHY\n");
497 #endif
498 }
499 
500 int
501 nfe_intr(void *arg)
502 {
503 	struct nfe_softc *sc = arg;
504 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
505 	uint32_t r;
506 
507 	if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0)
508 		return 0;	/* not for us */
509 	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
510 
511 	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
512 
513 	if (r & NFE_IRQ_LINK) {
514 		NFE_READ(sc, NFE_PHY_STATUS);
515 		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
516 		DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname));
517 	}
518 
519 	if (ifp->if_flags & IFF_RUNNING) {
520 		/* check Rx ring */
521 		nfe_rxeof(sc);
522 
523 		/* check Tx ring */
524 		nfe_txeof(sc);
525 	}
526 
527 	return 1;
528 }
529 
530 int
531 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
532 {
533 	struct nfe_softc *sc = ifp->if_softc;
534 	struct ifaddr *ifa = (struct ifaddr *)data;
535 	struct ifreq *ifr = (struct ifreq *)data;
536 	int s, error = 0;
537 
538 	s = splnet();
539 
540 	switch (cmd) {
541 	case SIOCSIFADDR:
542 		ifp->if_flags |= IFF_UP;
543 		if (!(ifp->if_flags & IFF_RUNNING))
544 			nfe_init(ifp);
545 #ifdef INET
546 		if (ifa->ifa_addr->sa_family == AF_INET)
547 			arp_ifinit(&sc->sc_arpcom, ifa);
548 #endif
549 		break;
550 
551 	case SIOCSIFFLAGS:
552 		if (ifp->if_flags & IFF_UP) {
553 			/*
554 			 * If only the PROMISC or ALLMULTI flag changes, then
555 			 * don't do a full re-init of the chip, just update
556 			 * the Rx filter.
557 			 */
558 			if ((ifp->if_flags & IFF_RUNNING) &&
559 			    ((ifp->if_flags ^ sc->sc_if_flags) &
560 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
561 				nfe_setmulti(sc);
562 			} else {
563 				if (!(ifp->if_flags & IFF_RUNNING))
564 					nfe_init(ifp);
565 			}
566 		} else {
567 			if (ifp->if_flags & IFF_RUNNING)
568 				nfe_stop(ifp, 1);
569 		}
570 		sc->sc_if_flags = ifp->if_flags;
571 		break;
572 
573 	case SIOCSIFMEDIA:
574 	case SIOCGIFMEDIA:
575 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
576 		break;
577 
578 	default:
579 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
580 	}
581 
582 	if (error == ENETRESET) {
583 		if (ifp->if_flags & IFF_RUNNING)
584 			nfe_setmulti(sc);
585 		error = 0;
586 	}
587 
588 	splx(s);
589 	return error;
590 }
591 
592 void
593 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
594 {
595 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
596 	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
597 	    sizeof (struct nfe_desc32), ops);
598 }
599 
600 void
601 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
602 {
603 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
604 	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
605 	    sizeof (struct nfe_desc64), ops);
606 }
607 
608 void
609 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
610 {
611 	if (end > start) {
612 		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
613 		    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
614 		    (caddr_t)&sc->txq.desc32[end] -
615 		    (caddr_t)&sc->txq.desc32[start], ops);
616 		return;
617 	}
618 	/* sync from 'start' to end of ring */
619 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
620 	    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
621 	    (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -
622 	    (caddr_t)&sc->txq.desc32[start], ops);
623 
624 	/* sync from start of ring to 'end' */
625 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
626 	    (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops);
627 }
628 
629 void
630 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
631 {
632 	if (end > start) {
633 		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
634 		    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
635 		    (caddr_t)&sc->txq.desc64[end] -
636 		    (caddr_t)&sc->txq.desc64[start], ops);
637 		return;
638 	}
639 	/* sync from 'start' to end of ring */
640 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
641 	    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
642 	    (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -
643 	    (caddr_t)&sc->txq.desc64[start], ops);
644 
645 	/* sync from start of ring to 'end' */
646 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
647 	    (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops);
648 }
649 
650 void
651 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
652 {
653 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
654 	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
655 	    sizeof (struct nfe_desc32), ops);
656 }
657 
658 void
659 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
660 {
661 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
662 	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
663 	    sizeof (struct nfe_desc64), ops);
664 }
665 
666 void
667 nfe_rxeof(struct nfe_softc *sc)
668 {
669 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
670 	struct nfe_desc32 *desc32;
671 	struct nfe_desc64 *desc64;
672 	struct nfe_rx_data *data;
673 	struct nfe_jbuf *jbuf;
674 	struct mbuf *m, *mnew;
675 	bus_addr_t physaddr;
676 #if NVLAN > 0
677 	uint32_t vtag;
678 #endif
679 	uint16_t flags;
680 	int error, len;
681 
682 	for (;;) {
683 		data = &sc->rxq.data[sc->rxq.cur];
684 
685 		if (sc->sc_flags & NFE_40BIT_ADDR) {
686 			desc64 = &sc->rxq.desc64[sc->rxq.cur];
687 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
688 
689 			flags = letoh16(desc64->flags);
690 			len = letoh16(desc64->length) & 0x3fff;
691 #if NVLAN > 0
692 			vtag = letoh32(desc64->physaddr[1]);
693 #endif
694 		} else {
695 			desc32 = &sc->rxq.desc32[sc->rxq.cur];
696 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
697 
698 			flags = letoh16(desc32->flags);
699 			len = letoh16(desc32->length) & 0x3fff;
700 		}
701 
702 		if (flags & NFE_RX_READY)
703 			break;
704 
705 		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
706 			if (!(flags & NFE_RX_VALID_V1))
707 				goto skip;
708 
709 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
710 				flags &= ~NFE_RX_ERROR;
711 				len--;	/* fix buffer length */
712 			}
713 		} else {
714 			if (!(flags & NFE_RX_VALID_V2))
715 				goto skip;
716 
717 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
718 				flags &= ~NFE_RX_ERROR;
719 				len--;	/* fix buffer length */
720 			}
721 		}
722 
723 		if (flags & NFE_RX_ERROR) {
724 			ifp->if_ierrors++;
725 			goto skip;
726 		}
727 
728 		/*
729 		 * Try to allocate a new mbuf for this ring element and load
730 		 * it before processing the current mbuf. If the ring element
731 		 * cannot be loaded, drop the received packet and reuse the
732 		 * old mbuf. In the unlikely case that the old mbuf can't be
733 		 * reloaded either, explicitly panic.
734 		 */
735 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
736 		if (mnew == NULL) {
737 			ifp->if_ierrors++;
738 			goto skip;
739 		}
740 
741 		if (sc->sc_flags & NFE_USE_JUMBO) {
742 			if ((jbuf = nfe_jalloc(sc)) == NULL) {
743 				m_freem(mnew);
744 				ifp->if_ierrors++;
745 				goto skip;
746 			}
747 			MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc);
748 
749 			bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap,
750 			    mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES,
751 			    BUS_DMASYNC_POSTREAD);
752 
753 			physaddr = jbuf->physaddr;
754 		} else {
755 			MCLGET(mnew, M_DONTWAIT);
756 			if (!(mnew->m_flags & M_EXT)) {
757 				m_freem(mnew);
758 				ifp->if_ierrors++;
759 				goto skip;
760 			}
761 
762 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
763 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
764 			bus_dmamap_unload(sc->sc_dmat, data->map);
765 
766 			error = bus_dmamap_load(sc->sc_dmat, data->map,
767 			    mtod(mnew, void *), MCLBYTES, NULL,
768 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
769 			if (error != 0) {
770 				m_freem(mnew);
771 
772 				/* try to reload the old mbuf */
773 				error = bus_dmamap_load(sc->sc_dmat, data->map,
774 				    mtod(data->m, void *), MCLBYTES, NULL,
775 				    BUS_DMA_READ | BUS_DMA_NOWAIT);
776 				if (error != 0) {
777 					/* very unlikely that it will fail.. */
778 					panic("%s: could not load old rx mbuf",
779 					    sc->sc_dev.dv_xname);
780 				}
781 				ifp->if_ierrors++;
782 				goto skip;
783 			}
784 			physaddr = data->map->dm_segs[0].ds_addr;
785 		}
786 
787 		/*
788 		 * New mbuf successfully loaded, update Rx ring and continue
789 		 * processing.
790 		 */
791 		m = data->m;
792 		data->m = mnew;
793 
794 		/* finalize mbuf */
795 		m->m_pkthdr.len = m->m_len = len;
796 		m->m_pkthdr.rcvif = ifp;
797 
798 		if ((sc->sc_flags & NFE_HW_CSUM) &&
799 		    (flags & NFE_RX_IP_CSUMOK)) {
800 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
801 			if (flags & NFE_RX_UDP_CSUMOK)
802 				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
803 			if (flags & NFE_RX_TCP_CSUMOK)
804 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
805 		}
806 
807 #if NVLAN > 0
808 		if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) {
809 			m->m_pkthdr.ether_vtag = vtag & 0xffff;
810 			m->m_flags |= M_VLANTAG;
811 		}
812 #endif
813 
814 #if NBPFILTER > 0
815 		if (ifp->if_bpf)
816 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
817 #endif
818 		ifp->if_ipackets++;
819 		ether_input_mbuf(ifp, m);
820 
821 		/* update mapping address in h/w descriptor */
822 		if (sc->sc_flags & NFE_40BIT_ADDR) {
823 #if defined(__LP64__)
824 			desc64->physaddr[0] = htole32(physaddr >> 32);
825 #endif
826 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
827 		} else {
828 			desc32->physaddr = htole32(physaddr);
829 		}
830 
831 skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
832 			desc64->length = htole16(sc->rxq.bufsz);
833 			desc64->flags = htole16(NFE_RX_READY);
834 
835 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
836 		} else {
837 			desc32->length = htole16(sc->rxq.bufsz);
838 			desc32->flags = htole16(NFE_RX_READY);
839 
840 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
841 		}
842 
843 		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
844 	}
845 }
846 
847 void
848 nfe_txeof(struct nfe_softc *sc)
849 {
850 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
851 	struct nfe_desc32 *desc32;
852 	struct nfe_desc64 *desc64;
853 	struct nfe_tx_data *data = NULL;
854 	uint16_t flags;
855 
856 	while (sc->txq.next != sc->txq.cur) {
857 		if (sc->sc_flags & NFE_40BIT_ADDR) {
858 			desc64 = &sc->txq.desc64[sc->txq.next];
859 			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
860 
861 			flags = letoh16(desc64->flags);
862 		} else {
863 			desc32 = &sc->txq.desc32[sc->txq.next];
864 			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
865 
866 			flags = letoh16(desc32->flags);
867 		}
868 
869 		if (flags & NFE_TX_VALID)
870 			break;
871 
872 		data = &sc->txq.data[sc->txq.next];
873 
874 		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
875 			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
876 				goto skip;
877 
878 			if ((flags & NFE_TX_ERROR_V1) != 0) {
879 				printf("%s: tx v1 error %b\n",
880 				    sc->sc_dev.dv_xname, flags, NFE_V1_TXERR);
881 				ifp->if_oerrors++;
882 			} else
883 				ifp->if_opackets++;
884 		} else {
885 			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
886 				goto skip;
887 
888 			if ((flags & NFE_TX_ERROR_V2) != 0) {
889 				printf("%s: tx v2 error %b\n",
890 				    sc->sc_dev.dv_xname, flags, NFE_V2_TXERR);
891 				ifp->if_oerrors++;
892 			} else
893 				ifp->if_opackets++;
894 		}
895 
896 		if (data->m == NULL) {	/* should not get there */
897 			printf("%s: last fragment bit w/o associated mbuf!\n",
898 			    sc->sc_dev.dv_xname);
899 			goto skip;
900 		}
901 
902 		/* last fragment of the mbuf chain transmitted */
903 		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
904 		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
905 		bus_dmamap_unload(sc->sc_dmat, data->active);
906 		m_freem(data->m);
907 		data->m = NULL;
908 
909 		ifp->if_timer = 0;
910 
911 skip:		sc->txq.queued--;
912 		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
913 	}
914 
915 	if (data != NULL) {	/* at least one slot freed */
916 		ifp->if_flags &= ~IFF_OACTIVE;
917 		nfe_start(ifp);
918 	}
919 }
920 
921 int
922 nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
923 {
924 	struct nfe_desc32 *desc32;
925 	struct nfe_desc64 *desc64;
926 	struct nfe_tx_data *data;
927 	bus_dmamap_t map;
928 	uint16_t flags = 0;
929 	uint32_t vtag = 0;
930 	int error, i, first = sc->txq.cur;
931 
932 	map = sc->txq.data[first].map;
933 
934 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
935 	if (error != 0) {
936 		printf("%s: can't map mbuf (error %d)\n",
937 		    sc->sc_dev.dv_xname, error);
938 		return error;
939 	}
940 
941 	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
942 		bus_dmamap_unload(sc->sc_dmat, map);
943 		return ENOBUFS;
944 	}
945 
946 #if NVLAN > 0
947 	/* setup h/w VLAN tagging */
948 	if (m0->m_flags & M_VLANTAG)
949 		vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag;
950 #endif
951 	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
952 		flags |= NFE_TX_IP_CSUM;
953 	if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
954 		flags |= NFE_TX_TCP_UDP_CSUM;
955 
956 	for (i = 0; i < map->dm_nsegs; i++) {
957 		data = &sc->txq.data[sc->txq.cur];
958 
959 		if (sc->sc_flags & NFE_40BIT_ADDR) {
960 			desc64 = &sc->txq.desc64[sc->txq.cur];
961 #if defined(__LP64__)
962 			desc64->physaddr[0] =
963 			    htole32(map->dm_segs[i].ds_addr >> 32);
964 #endif
965 			desc64->physaddr[1] =
966 			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
967 			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
968 			desc64->flags = htole16(flags);
969 			desc64->vtag = htole32(vtag);
970 		} else {
971 			desc32 = &sc->txq.desc32[sc->txq.cur];
972 
973 			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
974 			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
975 			desc32->flags = htole16(flags);
976 		}
977 
978 		if (map->dm_nsegs > 1) {
979 			/*
980 			 * Checksum flags and vtag belong to the first fragment
981 			 * only.
982 			 */
983 			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
984 			vtag = 0;
985 
986 			/*
987 			 * Setting of the valid bit in the first descriptor is
988 			 * deferred until the whole chain is fully setup.
989 			 */
990 			flags |= NFE_TX_VALID;
991 		}
992 
993 		sc->txq.queued++;
994 		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
995 	}
996 
997 	/* the whole mbuf chain has been setup */
998 	if (sc->sc_flags & NFE_40BIT_ADDR) {
999 		/* fix last descriptor */
1000 		flags |= NFE_TX_LASTFRAG_V2;
1001 		desc64->flags = htole16(flags);
1002 
1003 		/* finally, set the valid bit in the first descriptor */
1004 		sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID);
1005 	} else {
1006 		/* fix last descriptor */
1007 		if (sc->sc_flags & NFE_JUMBO_SUP)
1008 			flags |= NFE_TX_LASTFRAG_V2;
1009 		else
1010 			flags |= NFE_TX_LASTFRAG_V1;
1011 		desc32->flags = htole16(flags);
1012 
1013 		/* finally, set the valid bit in the first descriptor */
1014 		sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID);
1015 	}
1016 
1017 	data->m = m0;
1018 	data->active = map;
1019 
1020 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1021 	    BUS_DMASYNC_PREWRITE);
1022 
1023 	return 0;
1024 }
1025 
1026 void
1027 nfe_start(struct ifnet *ifp)
1028 {
1029 	struct nfe_softc *sc = ifp->if_softc;
1030 	int old = sc->txq.cur;
1031 	struct mbuf *m0;
1032 
1033 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1034 		return;
1035 
1036 	for (;;) {
1037 		IFQ_POLL(&ifp->if_snd, m0);
1038 		if (m0 == NULL)
1039 			break;
1040 
1041 		if (nfe_encap(sc, m0) != 0) {
1042 			ifp->if_flags |= IFF_OACTIVE;
1043 			break;
1044 		}
1045 
1046 		/* packet put in h/w queue, remove from s/w queue */
1047 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1048 
1049 #if NBPFILTER > 0
1050 		if (ifp->if_bpf != NULL)
1051 			bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1052 #endif
1053 	}
1054 	if (sc->txq.cur == old)	/* nothing sent */
1055 		return;
1056 
1057 	if (sc->sc_flags & NFE_40BIT_ADDR)
1058 		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1059 	else
1060 		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1061 
1062 	/* kick Tx */
1063 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1064 
1065 	/*
1066 	 * Set a timeout in case the chip goes out to lunch.
1067 	 */
1068 	ifp->if_timer = 5;
1069 }
1070 
1071 void
1072 nfe_watchdog(struct ifnet *ifp)
1073 {
1074 	struct nfe_softc *sc = ifp->if_softc;
1075 
1076 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1077 
1078 	nfe_init(ifp);
1079 
1080 	ifp->if_oerrors++;
1081 }
1082 
1083 int
1084 nfe_init(struct ifnet *ifp)
1085 {
1086 	struct nfe_softc *sc = ifp->if_softc;
1087 	uint32_t tmp;
1088 
1089 	nfe_stop(ifp, 0);
1090 
1091 	NFE_WRITE(sc, NFE_TX_UNK, 0);
1092 	NFE_WRITE(sc, NFE_STATUS, 0);
1093 
1094 	sc->rxtxctl = NFE_RXTX_BIT2;
1095 	if (sc->sc_flags & NFE_40BIT_ADDR)
1096 		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1097 	else if (sc->sc_flags & NFE_JUMBO_SUP)
1098 		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1099 
1100 	if (sc->sc_flags & NFE_HW_CSUM)
1101 		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1102 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1103 		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
1104 
1105 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1106 	DELAY(10);
1107 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1108 
1109 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1110 		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1111 	else
1112 		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
1113 
1114 	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1115 
1116 	/* set MAC address */
1117 	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
1118 
1119 	/* tell MAC where rings are in memory */
1120 #ifdef __LP64__
1121 	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1122 #endif
1123 	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1124 #ifdef __LP64__
1125 	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1126 #endif
1127 	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1128 
1129 	NFE_WRITE(sc, NFE_RING_SIZE,
1130 	    (NFE_RX_RING_COUNT - 1) << 16 |
1131 	    (NFE_TX_RING_COUNT - 1));
1132 
1133 	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1134 
1135 	/* force MAC to wakeup */
1136 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1137 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1138 	DELAY(10);
1139 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1140 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1141 
1142 #if 1
1143 	/* configure interrupts coalescing/mitigation */
1144 	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1145 #else
1146 	/* no interrupt mitigation: one interrupt per packet */
1147 	NFE_WRITE(sc, NFE_IMTIMER, 970);
1148 #endif
1149 
1150 	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1151 	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1152 	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1153 
1154 	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1155 	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1156 
1157 	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1158 	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE);
1159 
1160 	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1161 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1162 	DELAY(10);
1163 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1164 
1165 	/* set Rx filter */
1166 	nfe_setmulti(sc);
1167 
1168 	nfe_ifmedia_upd(ifp);
1169 
1170 	/* enable Rx */
1171 	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1172 
1173 	/* enable Tx */
1174 	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1175 
1176 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1177 
1178 	/* enable interrupts */
1179 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1180 
1181 	timeout_add_sec(&sc->sc_tick_ch, 1);
1182 
1183 	ifp->if_flags |= IFF_RUNNING;
1184 	ifp->if_flags &= ~IFF_OACTIVE;
1185 
1186 	return 0;
1187 }
1188 
1189 void
1190 nfe_stop(struct ifnet *ifp, int disable)
1191 {
1192 	struct nfe_softc *sc = ifp->if_softc;
1193 
1194 	timeout_del(&sc->sc_tick_ch);
1195 
1196 	ifp->if_timer = 0;
1197 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1198 
1199 	mii_down(&sc->sc_mii);
1200 
1201 	/* abort Tx */
1202 	NFE_WRITE(sc, NFE_TX_CTL, 0);
1203 
1204 	/* disable Rx */
1205 	NFE_WRITE(sc, NFE_RX_CTL, 0);
1206 
1207 	/* disable interrupts */
1208 	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1209 
1210 	/* reset Tx and Rx rings */
1211 	nfe_reset_tx_ring(sc, &sc->txq);
1212 	nfe_reset_rx_ring(sc, &sc->rxq);
1213 }
1214 
1215 int
1216 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1217 {
1218 	struct nfe_desc32 *desc32;
1219 	struct nfe_desc64 *desc64;
1220 	struct nfe_rx_data *data;
1221 	struct nfe_jbuf *jbuf;
1222 	void **desc;
1223 	bus_addr_t physaddr;
1224 	int i, nsegs, error, descsize;
1225 
1226 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1227 		desc = (void **)&ring->desc64;
1228 		descsize = sizeof (struct nfe_desc64);
1229 	} else {
1230 		desc = (void **)&ring->desc32;
1231 		descsize = sizeof (struct nfe_desc32);
1232 	}
1233 
1234 	ring->cur = ring->next = 0;
1235 	ring->bufsz = MCLBYTES;
1236 
1237 	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1238 	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1239 	if (error != 0) {
1240 		printf("%s: could not create desc DMA map\n",
1241 		    sc->sc_dev.dv_xname);
1242 		goto fail;
1243 	}
1244 
1245 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1246 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1247 	if (error != 0) {
1248 		printf("%s: could not allocate DMA memory\n",
1249 		    sc->sc_dev.dv_xname);
1250 		goto fail;
1251 	}
1252 
1253 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1254 	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1255 	if (error != 0) {
1256 		printf("%s: can't map desc DMA memory\n",
1257 		    sc->sc_dev.dv_xname);
1258 		goto fail;
1259 	}
1260 
1261 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1262 	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1263 	if (error != 0) {
1264 		printf("%s: could not load desc DMA map\n",
1265 		    sc->sc_dev.dv_xname);
1266 		goto fail;
1267 	}
1268 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1269 
1270 	if (sc->sc_flags & NFE_USE_JUMBO) {
1271 		ring->bufsz = NFE_JBYTES;
1272 		if ((error = nfe_jpool_alloc(sc)) != 0) {
1273 			printf("%s: could not allocate jumbo frames\n",
1274 			    sc->sc_dev.dv_xname);
1275 			goto fail;
1276 		}
1277 	}
1278 
1279 	/*
1280 	 * Pre-allocate Rx buffers and populate Rx ring.
1281 	 */
1282 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1283 		data = &sc->rxq.data[i];
1284 
1285 		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1286 		if (data->m == NULL) {
1287 			printf("%s: could not allocate rx mbuf\n",
1288 			    sc->sc_dev.dv_xname);
1289 			error = ENOMEM;
1290 			goto fail;
1291 		}
1292 
1293 		if (sc->sc_flags & NFE_USE_JUMBO) {
1294 			if ((jbuf = nfe_jalloc(sc)) == NULL) {
1295 				printf("%s: could not allocate jumbo buffer\n",
1296 				    sc->sc_dev.dv_xname);
1297 				goto fail;
1298 			}
1299 			MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree,
1300 			    sc);
1301 
1302 			physaddr = jbuf->physaddr;
1303 		} else {
1304 			error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1305 			    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1306 			if (error != 0) {
1307 				printf("%s: could not create DMA map\n",
1308 				    sc->sc_dev.dv_xname);
1309 				goto fail;
1310 			}
1311 			MCLGET(data->m, M_DONTWAIT);
1312 			if (!(data->m->m_flags & M_EXT)) {
1313 				printf("%s: could not allocate mbuf cluster\n",
1314 				    sc->sc_dev.dv_xname);
1315 				error = ENOMEM;
1316 				goto fail;
1317 			}
1318 
1319 			error = bus_dmamap_load(sc->sc_dmat, data->map,
1320 			    mtod(data->m, void *), MCLBYTES, NULL,
1321 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
1322 			if (error != 0) {
1323 				printf("%s: could not load rx buf DMA map",
1324 				    sc->sc_dev.dv_xname);
1325 				goto fail;
1326 			}
1327 			physaddr = data->map->dm_segs[0].ds_addr;
1328 		}
1329 
1330 		if (sc->sc_flags & NFE_40BIT_ADDR) {
1331 			desc64 = &sc->rxq.desc64[i];
1332 #if defined(__LP64__)
1333 			desc64->physaddr[0] = htole32(physaddr >> 32);
1334 #endif
1335 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1336 			desc64->length = htole16(sc->rxq.bufsz);
1337 			desc64->flags = htole16(NFE_RX_READY);
1338 		} else {
1339 			desc32 = &sc->rxq.desc32[i];
1340 			desc32->physaddr = htole32(physaddr);
1341 			desc32->length = htole16(sc->rxq.bufsz);
1342 			desc32->flags = htole16(NFE_RX_READY);
1343 		}
1344 	}
1345 
1346 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1347 	    BUS_DMASYNC_PREWRITE);
1348 
1349 	return 0;
1350 
1351 fail:	nfe_free_rx_ring(sc, ring);
1352 	return error;
1353 }
1354 
1355 void
1356 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1357 {
1358 	int i;
1359 
1360 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1361 		if (sc->sc_flags & NFE_40BIT_ADDR) {
1362 			ring->desc64[i].length = htole16(ring->bufsz);
1363 			ring->desc64[i].flags = htole16(NFE_RX_READY);
1364 		} else {
1365 			ring->desc32[i].length = htole16(ring->bufsz);
1366 			ring->desc32[i].flags = htole16(NFE_RX_READY);
1367 		}
1368 	}
1369 
1370 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1371 	    BUS_DMASYNC_PREWRITE);
1372 
1373 	ring->cur = ring->next = 0;
1374 }
1375 
1376 void
1377 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1378 {
1379 	struct nfe_rx_data *data;
1380 	void *desc;
1381 	int i, descsize;
1382 
1383 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1384 		desc = ring->desc64;
1385 		descsize = sizeof (struct nfe_desc64);
1386 	} else {
1387 		desc = ring->desc32;
1388 		descsize = sizeof (struct nfe_desc32);
1389 	}
1390 
1391 	if (desc != NULL) {
1392 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1393 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1394 		bus_dmamap_unload(sc->sc_dmat, ring->map);
1395 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1396 		    NFE_RX_RING_COUNT * descsize);
1397 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1398 	}
1399 
1400 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1401 		data = &ring->data[i];
1402 
1403 		if (data->map != NULL) {
1404 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1405 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1406 			bus_dmamap_unload(sc->sc_dmat, data->map);
1407 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1408 		}
1409 		if (data->m != NULL)
1410 			m_freem(data->m);
1411 	}
1412 }
1413 
1414 struct nfe_jbuf *
1415 nfe_jalloc(struct nfe_softc *sc)
1416 {
1417 	struct nfe_jbuf *jbuf;
1418 
1419 	jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1420 	if (jbuf == NULL)
1421 		return NULL;
1422 	SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1423 	return jbuf;
1424 }
1425 
1426 /*
1427  * This is called automatically by the network stack when the mbuf is freed.
1428  * Caution must be taken that the NIC might be reset by the time the mbuf is
1429  * freed.
1430  */
1431 void
1432 nfe_jfree(caddr_t buf, u_int size, void *arg)
1433 {
1434 	struct nfe_softc *sc = arg;
1435 	struct nfe_jbuf *jbuf;
1436 	int i;
1437 
1438 	/* find the jbuf from the base pointer */
1439 	i = (buf - sc->rxq.jpool) / NFE_JBYTES;
1440 	if (i < 0 || i >= NFE_JPOOL_COUNT) {
1441 		printf("%s: request to free a buffer (%p) not managed by us\n",
1442 		    sc->sc_dev.dv_xname, buf);
1443 		return;
1444 	}
1445 	jbuf = &sc->rxq.jbuf[i];
1446 
1447 	/* ..and put it back in the free list */
1448 	SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
1449 }
1450 
1451 int
1452 nfe_jpool_alloc(struct nfe_softc *sc)
1453 {
1454 	struct nfe_rx_ring *ring = &sc->rxq;
1455 	struct nfe_jbuf *jbuf;
1456 	bus_addr_t physaddr;
1457 	caddr_t buf;
1458 	int i, nsegs, error;
1459 
1460 	/*
1461 	 * Allocate a big chunk of DMA'able memory.
1462 	 */
1463 	error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1,
1464 	    NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap);
1465 	if (error != 0) {
1466 		printf("%s: could not create jumbo DMA map\n",
1467 		    sc->sc_dev.dv_xname);
1468 		goto fail;
1469 	}
1470 
1471 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0,
1472 	    &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT);
1473 	if (error != 0) {
1474 		printf("%s could not allocate jumbo DMA memory\n",
1475 		    sc->sc_dev.dv_xname);
1476 		goto fail;
1477 	}
1478 
1479 	error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE,
1480 	    &ring->jpool, BUS_DMA_NOWAIT);
1481 	if (error != 0) {
1482 		printf("%s: can't map jumbo DMA memory\n",
1483 		    sc->sc_dev.dv_xname);
1484 		goto fail;
1485 	}
1486 
1487 	error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool,
1488 	    NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
1489 	if (error != 0) {
1490 		printf("%s: could not load jumbo DMA map\n",
1491 		    sc->sc_dev.dv_xname);
1492 		goto fail;
1493 	}
1494 
1495 	/* ..and split it into 9KB chunks */
1496 	SLIST_INIT(&ring->jfreelist);
1497 
1498 	buf = ring->jpool;
1499 	physaddr = ring->jmap->dm_segs[0].ds_addr;
1500 	for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1501 		jbuf = &ring->jbuf[i];
1502 
1503 		jbuf->buf = buf;
1504 		jbuf->physaddr = physaddr;
1505 
1506 		SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1507 
1508 		buf += NFE_JBYTES;
1509 		physaddr += NFE_JBYTES;
1510 	}
1511 
1512 	return 0;
1513 
1514 fail:	nfe_jpool_free(sc);
1515 	return error;
1516 }
1517 
1518 void
1519 nfe_jpool_free(struct nfe_softc *sc)
1520 {
1521 	struct nfe_rx_ring *ring = &sc->rxq;
1522 
1523 	if (ring->jmap != NULL) {
1524 		bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0,
1525 		    ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1526 		bus_dmamap_unload(sc->sc_dmat, ring->jmap);
1527 		bus_dmamap_destroy(sc->sc_dmat, ring->jmap);
1528 	}
1529 	if (ring->jpool != NULL) {
1530 		bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE);
1531 		bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1);
1532 	}
1533 }
1534 
1535 int
1536 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1537 {
1538 	int i, nsegs, error;
1539 	void **desc;
1540 	int descsize;
1541 
1542 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1543 		desc = (void **)&ring->desc64;
1544 		descsize = sizeof (struct nfe_desc64);
1545 	} else {
1546 		desc = (void **)&ring->desc32;
1547 		descsize = sizeof (struct nfe_desc32);
1548 	}
1549 
1550 	ring->queued = 0;
1551 	ring->cur = ring->next = 0;
1552 
1553 	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1554 	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1555 
1556 	if (error != 0) {
1557 		printf("%s: could not create desc DMA map\n",
1558 		    sc->sc_dev.dv_xname);
1559 		goto fail;
1560 	}
1561 
1562 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1563 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1564 	if (error != 0) {
1565 		printf("%s: could not allocate DMA memory\n",
1566 		    sc->sc_dev.dv_xname);
1567 		goto fail;
1568 	}
1569 
1570 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1571 	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1572 	if (error != 0) {
1573 		printf("%s: can't map desc DMA memory\n",
1574 		    sc->sc_dev.dv_xname);
1575 		goto fail;
1576 	}
1577 
1578 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1579 	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1580 	if (error != 0) {
1581 		printf("%s: could not load desc DMA map\n",
1582 		    sc->sc_dev.dv_xname);
1583 		goto fail;
1584 	}
1585 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1586 
1587 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1588 		error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1589 		    NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1590 		    &ring->data[i].map);
1591 		if (error != 0) {
1592 			printf("%s: could not create DMA map\n",
1593 			    sc->sc_dev.dv_xname);
1594 			goto fail;
1595 		}
1596 	}
1597 
1598 	return 0;
1599 
1600 fail:	nfe_free_tx_ring(sc, ring);
1601 	return error;
1602 }
1603 
1604 void
1605 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1606 {
1607 	struct nfe_tx_data *data;
1608 	int i;
1609 
1610 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1611 		if (sc->sc_flags & NFE_40BIT_ADDR)
1612 			ring->desc64[i].flags = 0;
1613 		else
1614 			ring->desc32[i].flags = 0;
1615 
1616 		data = &ring->data[i];
1617 
1618 		if (data->m != NULL) {
1619 			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1620 			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1621 			bus_dmamap_unload(sc->sc_dmat, data->active);
1622 			m_freem(data->m);
1623 			data->m = NULL;
1624 		}
1625 	}
1626 
1627 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1628 	    BUS_DMASYNC_PREWRITE);
1629 
1630 	ring->queued = 0;
1631 	ring->cur = ring->next = 0;
1632 }
1633 
1634 void
1635 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1636 {
1637 	struct nfe_tx_data *data;
1638 	void *desc;
1639 	int i, descsize;
1640 
1641 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1642 		desc = ring->desc64;
1643 		descsize = sizeof (struct nfe_desc64);
1644 	} else {
1645 		desc = ring->desc32;
1646 		descsize = sizeof (struct nfe_desc32);
1647 	}
1648 
1649 	if (desc != NULL) {
1650 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1651 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1652 		bus_dmamap_unload(sc->sc_dmat, ring->map);
1653 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1654 		    NFE_TX_RING_COUNT * descsize);
1655 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1656 	}
1657 
1658 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1659 		data = &ring->data[i];
1660 
1661 		if (data->m != NULL) {
1662 			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1663 			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1664 			bus_dmamap_unload(sc->sc_dmat, data->active);
1665 			m_freem(data->m);
1666 		}
1667 	}
1668 
1669 	/* ..and now actually destroy the DMA mappings */
1670 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1671 		data = &ring->data[i];
1672 		if (data->map == NULL)
1673 			continue;
1674 		bus_dmamap_destroy(sc->sc_dmat, data->map);
1675 	}
1676 }
1677 
1678 int
1679 nfe_ifmedia_upd(struct ifnet *ifp)
1680 {
1681 	struct nfe_softc *sc = ifp->if_softc;
1682 	struct mii_data *mii = &sc->sc_mii;
1683 	struct mii_softc *miisc;
1684 
1685 	if (mii->mii_instance != 0) {
1686 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1687 			mii_phy_reset(miisc);
1688 	}
1689 	return mii_mediachg(mii);
1690 }
1691 
1692 void
1693 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1694 {
1695 	struct nfe_softc *sc = ifp->if_softc;
1696 	struct mii_data *mii = &sc->sc_mii;
1697 
1698 	mii_pollstat(mii);
1699 	ifmr->ifm_status = mii->mii_media_status;
1700 	ifmr->ifm_active = mii->mii_media_active;
1701 }
1702 
1703 void
1704 nfe_setmulti(struct nfe_softc *sc)
1705 {
1706 	struct arpcom *ac = &sc->sc_arpcom;
1707 	struct ifnet *ifp = &ac->ac_if;
1708 	struct ether_multi *enm;
1709 	struct ether_multistep step;
1710 	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1711 	uint32_t filter = NFE_RXFILTER_MAGIC;
1712 	int i;
1713 
1714 	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1715 		bzero(addr, ETHER_ADDR_LEN);
1716 		bzero(mask, ETHER_ADDR_LEN);
1717 		goto done;
1718 	}
1719 
1720 	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1721 	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1722 
1723 	ETHER_FIRST_MULTI(step, ac, enm);
1724 	while (enm != NULL) {
1725 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1726 			ifp->if_flags |= IFF_ALLMULTI;
1727 			bzero(addr, ETHER_ADDR_LEN);
1728 			bzero(mask, ETHER_ADDR_LEN);
1729 			goto done;
1730 		}
1731 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1732 			addr[i] &=  enm->enm_addrlo[i];
1733 			mask[i] &= ~enm->enm_addrlo[i];
1734 		}
1735 		ETHER_NEXT_MULTI(step, enm);
1736 	}
1737 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1738 		mask[i] |= addr[i];
1739 
1740 done:
1741 	addr[0] |= 0x01;	/* make sure multicast bit is set */
1742 
1743 	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1744 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1745 	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1746 	    addr[5] <<  8 | addr[4]);
1747 	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1748 	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1749 	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1750 	    mask[5] <<  8 | mask[4]);
1751 
1752 	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1753 	NFE_WRITE(sc, NFE_RXFILTER, filter);
1754 }
1755 
1756 void
1757 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1758 {
1759 	uint32_t tmp;
1760 
1761 	if (sc->sc_flags & NFE_CORRECT_MACADDR) {
1762 		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1763 		addr[0] = (tmp & 0xff);
1764 		addr[1] = (tmp >>  8) & 0xff;
1765 		addr[2] = (tmp >> 16) & 0xff;
1766 		addr[3] = (tmp >> 24) & 0xff;
1767 
1768 		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1769 		addr[4] = (tmp & 0xff);
1770 		addr[5] = (tmp >> 8) & 0xff;
1771 
1772 	} else {
1773 		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1774 		addr[0] = (tmp >> 8) & 0xff;
1775 		addr[1] = (tmp & 0xff);
1776 
1777 		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1778 		addr[2] = (tmp >> 24) & 0xff;
1779 		addr[3] = (tmp >> 16) & 0xff;
1780 		addr[4] = (tmp >>  8) & 0xff;
1781 		addr[5] = (tmp & 0xff);
1782 	}
1783 }
1784 
1785 void
1786 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1787 {
1788 	NFE_WRITE(sc, NFE_MACADDR_LO,
1789 	    addr[5] <<  8 | addr[4]);
1790 	NFE_WRITE(sc, NFE_MACADDR_HI,
1791 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1792 }
1793 
1794 void
1795 nfe_tick(void *arg)
1796 {
1797 	struct nfe_softc *sc = arg;
1798 	int s;
1799 
1800 	s = splnet();
1801 	mii_tick(&sc->sc_mii);
1802 	splx(s);
1803 
1804 	timeout_add_sec(&sc->sc_tick_ch, 1);
1805 }
1806