xref: /netbsd/sys/dev/pci/if_age.c (revision 6550d01e)
1 /*	$NetBSD: if_age.c,v 1.39 2010/07/20 09:17:24 cegger Exp $ */
2 /*	$OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $	*/
3 
4 /*-
5  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.39 2010/07/20 09:17:24 cegger Exp $");
35 
36 #include "vlan.h"
37 
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/endian.h>
41 #include <sys/systm.h>
42 #include <sys/types.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/queue.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/callout.h>
49 #include <sys/socket.h>
50 
51 #include <net/if.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_ether.h>
55 
56 #ifdef INET
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/in_var.h>
60 #include <netinet/ip.h>
61 #endif
62 
63 #include <net/if_types.h>
64 #include <net/if_vlanvar.h>
65 
66 #include <net/bpf.h>
67 
68 #include <sys/rnd.h>
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #include <dev/pci/pcireg.h>
74 #include <dev/pci/pcivar.h>
75 #include <dev/pci/pcidevs.h>
76 
77 #include <dev/pci/if_agereg.h>
78 
79 static int	age_match(device_t, cfdata_t, void *);
80 static void	age_attach(device_t, device_t, void *);
81 static int	age_detach(device_t, int);
82 
83 static bool	age_resume(device_t, const pmf_qual_t *);
84 
85 static int	age_miibus_readreg(device_t, int, int);
86 static void	age_miibus_writereg(device_t, int, int, int);
87 static void	age_miibus_statchg(device_t);
88 
89 static int	age_init(struct ifnet *);
90 static int	age_ioctl(struct ifnet *, u_long, void *);
91 static void	age_start(struct ifnet *);
92 static void	age_watchdog(struct ifnet *);
93 static void	age_mediastatus(struct ifnet *, struct ifmediareq *);
94 static int	age_mediachange(struct ifnet *);
95 
96 static int	age_intr(void *);
97 static int	age_dma_alloc(struct age_softc *);
98 static void	age_dma_free(struct age_softc *);
99 static void	age_get_macaddr(struct age_softc *, uint8_t[]);
100 static void	age_phy_reset(struct age_softc *);
101 
102 static int	age_encap(struct age_softc *, struct mbuf **);
103 static void	age_init_tx_ring(struct age_softc *);
104 static int	age_init_rx_ring(struct age_softc *);
105 static void	age_init_rr_ring(struct age_softc *);
106 static void	age_init_cmb_block(struct age_softc *);
107 static void	age_init_smb_block(struct age_softc *);
108 static int	age_newbuf(struct age_softc *, struct age_rxdesc *, int);
109 static void	age_mac_config(struct age_softc *);
110 static void	age_txintr(struct age_softc *, int);
111 static void	age_rxeof(struct age_softc *sc, struct rx_rdesc *);
112 static void	age_rxintr(struct age_softc *, int);
113 static void	age_tick(void *);
114 static void	age_reset(struct age_softc *);
115 static void	age_stop(struct ifnet *, int);
116 static void	age_stats_update(struct age_softc *);
117 static void	age_stop_txmac(struct age_softc *);
118 static void	age_stop_rxmac(struct age_softc *);
119 static void	age_rxvlan(struct age_softc *sc);
120 static void	age_rxfilter(struct age_softc *);
121 
122 CFATTACH_DECL_NEW(age, sizeof(struct age_softc),
123     age_match, age_attach, age_detach, NULL);
124 
125 int agedebug = 0;
126 #define	DPRINTF(x)	do { if (agedebug) printf x; } while (0)
127 
128 #define ETHER_ALIGN 2
129 #define AGE_CSUM_FEATURES	(M_CSUM_TCPv4 | M_CSUM_UDPv4)
130 
131 static int
132 age_match(device_t dev, cfdata_t match, void *aux)
133 {
134 	struct pci_attach_args *pa = aux;
135 
136 	return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC &&
137 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA);
138 }
139 
140 static void
141 age_attach(device_t parent, device_t self, void *aux)
142 {
143 	struct age_softc *sc = device_private(self);
144 	struct pci_attach_args *pa = aux;
145 	pci_intr_handle_t ih;
146 	const char *intrstr;
147 	struct ifnet *ifp = &sc->sc_ec.ec_if;
148 	pcireg_t memtype;
149 	int error = 0;
150 
151 	aprint_naive("\n");
152 	aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n");
153 
154 	sc->sc_dev = self;
155 	sc->sc_dmat = pa->pa_dmat;
156 	sc->sc_pct = pa->pa_pc;
157 	sc->sc_pcitag = pa->pa_tag;
158 
159 	/*
160 	 * Allocate IO memory
161 	 */
162 	memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR);
163 	switch (memtype) {
164         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
165         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
166         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
167 		break;
168         default:
169 		aprint_error_dev(self, "invalid base address register\n");
170 		break;
171 	}
172 
173 	if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
174 	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) {
175 		aprint_error_dev(self, "could not map mem space\n");
176 		return;
177 	}
178 
179 	if (pci_intr_map(pa, &ih) != 0) {
180 		aprint_error_dev(self, "could not map interrupt\n");
181 		goto fail;
182 	}
183 
184 	/*
185 	 * Allocate IRQ
186 	 */
187 	intrstr = pci_intr_string(sc->sc_pct, ih);
188 	sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
189 	    age_intr, sc);
190 	if (sc->sc_irq_handle == NULL) {
191 		aprint_error_dev(self, "could not establish interrupt");
192 		if (intrstr != NULL)
193 			aprint_error(" at %s", intrstr);
194 		aprint_error("\n");
195 		goto fail;
196 	}
197 	aprint_normal_dev(self, "%s\n", intrstr);
198 
199 	/* Set PHY address. */
200 	sc->age_phyaddr = AGE_PHY_ADDR;
201 
202 	/* Reset PHY. */
203 	age_phy_reset(sc);
204 
205 	/* Reset the ethernet controller. */
206 	age_reset(sc);
207 
208 	/* Get PCI and chip id/revision. */
209 	sc->age_rev = PCI_REVISION(pa->pa_class);
210 	sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
211 	    MASTER_CHIP_REV_SHIFT;
212 
213 	aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev);
214 	aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev);
215 
216 	if (agedebug) {
217 		aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n",
218 		    CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
219 		    CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
220 	}
221 
222 	/* Set max allowable DMA size. */
223 	sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
224 	sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
225 
226 	/* Allocate DMA stuffs */
227 	error = age_dma_alloc(sc);
228 	if (error)
229 		goto fail;
230 
231 	callout_init(&sc->sc_tick_ch, 0);
232 	callout_setfunc(&sc->sc_tick_ch, age_tick, sc);
233 
234 	/* Load station address. */
235 	age_get_macaddr(sc, sc->sc_enaddr);
236 
237 	aprint_normal_dev(self, "Ethernet address %s\n",
238 	    ether_sprintf(sc->sc_enaddr));
239 
240 	ifp->if_softc = sc;
241 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
242 	ifp->if_init = age_init;
243 	ifp->if_ioctl = age_ioctl;
244 	ifp->if_start = age_start;
245 	ifp->if_stop = age_stop;
246 	ifp->if_watchdog = age_watchdog;
247 	ifp->if_baudrate = IF_Gbps(1);
248 	IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1);
249 	IFQ_SET_READY(&ifp->if_snd);
250 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
251 
252 	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
253 
254 	ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx |
255 				IFCAP_CSUM_TCPv4_Rx |
256 				IFCAP_CSUM_UDPv4_Rx;
257 #ifdef AGE_CHECKSUM
258 	ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx |
259 				IFCAP_CSUM_TCPv4_Tx |
260 				IFCAP_CSUM_UDPv4_Tx;
261 #endif
262 
263 #if NVLAN > 0
264 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
265 #endif
266 
267 	/* Set up MII bus. */
268 	sc->sc_miibus.mii_ifp = ifp;
269 	sc->sc_miibus.mii_readreg = age_miibus_readreg;
270 	sc->sc_miibus.mii_writereg = age_miibus_writereg;
271 	sc->sc_miibus.mii_statchg = age_miibus_statchg;
272 
273 	sc->sc_ec.ec_mii = &sc->sc_miibus;
274 	ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange,
275 	    age_mediastatus);
276 	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
277 	   MII_OFFSET_ANY, MIIF_DOPAUSE);
278 
279 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
280 		aprint_error_dev(self, "no PHY found!\n");
281 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
282 		    0, NULL);
283 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
284 	} else
285 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
286 
287 	if_attach(ifp);
288 	ether_ifattach(ifp, sc->sc_enaddr);
289 
290 	if (pmf_device_register(self, NULL, age_resume))
291 		pmf_class_network_register(self, ifp);
292 	else
293 		aprint_error_dev(self, "couldn't establish power handler\n");
294 
295 	return;
296 
297 fail:
298 	age_dma_free(sc);
299 	if (sc->sc_irq_handle != NULL) {
300 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
301 		sc->sc_irq_handle = NULL;
302 	}
303 	if (sc->sc_mem_size) {
304 		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
305 		sc->sc_mem_size = 0;
306 	}
307 }
308 
309 static int
310 age_detach(device_t self, int flags)
311 {
312 	struct age_softc *sc = device_private(self);
313 	struct ifnet *ifp = &sc->sc_ec.ec_if;
314 	int s;
315 
316 	pmf_device_deregister(self);
317 	s = splnet();
318 	age_stop(ifp, 0);
319 	splx(s);
320 
321 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
322 
323 	/* Delete all remaining media. */
324 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
325 
326 	ether_ifdetach(ifp);
327 	if_detach(ifp);
328 	age_dma_free(sc);
329 
330 	if (sc->sc_irq_handle != NULL) {
331 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
332 		sc->sc_irq_handle = NULL;
333 	}
334 	if (sc->sc_mem_size) {
335 		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
336 		sc->sc_mem_size = 0;
337 	}
338 	return 0;
339 }
340 
341 /*
342  *	Read a PHY register on the MII of the L1.
343  */
344 static int
345 age_miibus_readreg(device_t dev, int phy, int reg)
346 {
347 	struct age_softc *sc = device_private(dev);
348 	uint32_t v;
349 	int i;
350 
351 	if (phy != sc->age_phyaddr)
352 		return 0;
353 
354 	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
355 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
356 	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
357 		DELAY(1);
358 		v = CSR_READ_4(sc, AGE_MDIO);
359 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
360 			break;
361 	}
362 
363 	if (i == 0) {
364 		printf("%s: phy read timeout: phy %d, reg %d\n",
365 			device_xname(sc->sc_dev), phy, reg);
366 		return 0;
367 	}
368 
369 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
370 }
371 
372 /*
373  * 	Write a PHY register on the MII of the L1.
374  */
375 static void
376 age_miibus_writereg(device_t dev, int phy, int reg, int val)
377 {
378 	struct age_softc *sc = device_private(dev);
379 	uint32_t v;
380 	int i;
381 
382 	if (phy != sc->age_phyaddr)
383 		return;
384 
385 	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
386 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
387 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
388 
389 	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
390 		DELAY(1);
391 		v = CSR_READ_4(sc, AGE_MDIO);
392 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
393 			break;
394 	}
395 
396 	if (i == 0) {
397 		printf("%s: phy write timeout: phy %d, reg %d\n",
398 		    device_xname(sc->sc_dev), phy, reg);
399 	}
400 }
401 
402 /*
403  *	Callback from MII layer when media changes.
404  */
405 static void
406 age_miibus_statchg(device_t dev)
407 {
408 	struct age_softc *sc = device_private(dev);
409 	struct ifnet *ifp = &sc->sc_ec.ec_if;
410 	struct mii_data *mii;
411 
412 	if ((ifp->if_flags & IFF_RUNNING) == 0)
413 		return;
414 
415 	mii = &sc->sc_miibus;
416 
417 	sc->age_flags &= ~AGE_FLAG_LINK;
418 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
419 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
420 		case IFM_10_T:
421 		case IFM_100_TX:
422 		case IFM_1000_T:
423 			sc->age_flags |= AGE_FLAG_LINK;
424 			break;
425 		default:
426 			break;
427 		}
428 	}
429 
430 	/* Stop Rx/Tx MACs. */
431 	age_stop_rxmac(sc);
432 	age_stop_txmac(sc);
433 
434 	/* Program MACs with resolved speed/duplex/flow-control. */
435 	if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
436 		uint32_t reg;
437 
438 		age_mac_config(sc);
439 		reg = CSR_READ_4(sc, AGE_MAC_CFG);
440 		/* Restart DMA engine and Tx/Rx MAC. */
441 		CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
442 		    DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
443 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
444 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
445 	}
446 }
447 
448 /*
449  *	Get the current interface media status.
450  */
451 static void
452 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
453 {
454 	struct age_softc *sc = ifp->if_softc;
455 	struct mii_data *mii = &sc->sc_miibus;
456 
457 	mii_pollstat(mii);
458 	ifmr->ifm_status = mii->mii_media_status;
459 	ifmr->ifm_active = mii->mii_media_active;
460 }
461 
462 /*
463  *	Set hardware to newly-selected media.
464  */
465 static int
466 age_mediachange(struct ifnet *ifp)
467 {
468 	struct age_softc *sc = ifp->if_softc;
469 	struct mii_data *mii = &sc->sc_miibus;
470 	int error;
471 
472 	if (mii->mii_instance != 0) {
473 		struct mii_softc *miisc;
474 
475 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
476 			mii_phy_reset(miisc);
477 	}
478 	error = mii_mediachg(mii);
479 
480 	return error;
481 }
482 
483 static int
484 age_intr(void *arg)
485 {
486         struct age_softc *sc = arg;
487         struct ifnet *ifp = &sc->sc_ec.ec_if;
488 	struct cmb *cmb;
489         uint32_t status;
490 
491 	status = CSR_READ_4(sc, AGE_INTR_STATUS);
492 	if (status == 0 || (status & AGE_INTRS) == 0)
493 		return 0;
494 
495 	cmb = sc->age_rdata.age_cmb_block;
496 	if (cmb == NULL) {
497 		/* Happens when bringing up the interface
498 		 * w/o having a carrier. Ack. the interrupt.
499 		 */
500 		CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
501 		return 0;
502 	}
503 
504 	/* Disable interrupts. */
505 	CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
506 
507 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
508 	    sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
509 	status = le32toh(cmb->intr_status);
510 	if ((status & AGE_INTRS) == 0)
511 		goto back;
512 
513 	sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
514 	    TPD_CONS_SHIFT;
515 	sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
516 	    RRD_PROD_SHIFT;
517 
518 	/* Let hardware know CMB was served. */
519 	cmb->intr_status = 0;
520 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
521 	    sc->age_cdata.age_cmb_block_map->dm_mapsize,
522 	    BUS_DMASYNC_PREWRITE);
523 
524 	if (ifp->if_flags & IFF_RUNNING) {
525 		if (status & INTR_CMB_RX)
526 			age_rxintr(sc, sc->age_rr_prod);
527 
528 		if (status & INTR_CMB_TX)
529 			age_txintr(sc, sc->age_tpd_cons);
530 
531 		if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
532 			if (status & INTR_DMA_RD_TO_RST)
533 				printf("%s: DMA read error! -- resetting\n",
534 				    device_xname(sc->sc_dev));
535 			if (status & INTR_DMA_WR_TO_RST)
536 				printf("%s: DMA write error! -- resetting\n",
537 				    device_xname(sc->sc_dev));
538 			age_init(ifp);
539 		}
540 
541 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
542 			age_start(ifp);
543 
544 		if (status & INTR_SMB)
545 			age_stats_update(sc);
546 	}
547 
548 	/* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
549 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
550 	    sc->age_cdata.age_cmb_block_map->dm_mapsize,
551 	    BUS_DMASYNC_POSTREAD);
552 
553 back:
554 	/* Re-enable interrupts. */
555 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
556 
557 	return 1;
558 }
559 
560 static void
561 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[])
562 {
563 	uint32_t ea[2], reg;
564 	int i, vpdc;
565 
566 	reg = CSR_READ_4(sc, AGE_SPI_CTRL);
567 	if ((reg & SPI_VPD_ENB) != 0) {
568 		/* Get VPD stored in TWSI EEPROM. */
569 		reg &= ~SPI_VPD_ENB;
570 		CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
571 	}
572 
573 	if (pci_get_capability(sc->sc_pct, sc->sc_pcitag,
574 	    PCI_CAP_VPD, &vpdc, NULL)) {
575 		/*
576 		 * PCI VPD capability found, let TWSI reload EEPROM.
577 		 * This will set Ethernet address of controller.
578 		 */
579 		CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
580 		    TWSI_CTRL_SW_LD_START);
581 		for (i = 100; i > 0; i++) {
582 			DELAY(1000);
583 			reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
584 			if ((reg & TWSI_CTRL_SW_LD_START) == 0)
585 				break;
586 		}
587 		if (i == 0)
588 			printf("%s: reloading EEPROM timeout!\n",
589 			    device_xname(sc->sc_dev));
590 	} else {
591 		if (agedebug)
592 			printf("%s: PCI VPD capability not found!\n",
593 			    device_xname(sc->sc_dev));
594 	}
595 
596 	ea[0] = CSR_READ_4(sc, AGE_PAR0);
597 	ea[1] = CSR_READ_4(sc, AGE_PAR1);
598 
599 	eaddr[0] = (ea[1] >> 8) & 0xFF;
600 	eaddr[1] = (ea[1] >> 0) & 0xFF;
601 	eaddr[2] = (ea[0] >> 24) & 0xFF;
602 	eaddr[3] = (ea[0] >> 16) & 0xFF;
603 	eaddr[4] = (ea[0] >> 8) & 0xFF;
604 	eaddr[5] = (ea[0] >> 0) & 0xFF;
605 }
606 
607 static void
608 age_phy_reset(struct age_softc *sc)
609 {
610 	uint16_t reg, pn;
611 	int i, linkup;
612 
613 	/* Reset PHY. */
614 	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
615 	DELAY(2000);
616 	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
617 	DELAY(2000);
618 
619 #define ATPHY_DBG_ADDR		0x1D
620 #define ATPHY_DBG_DATA		0x1E
621 #define ATPHY_CDTC		0x16
622 #define PHY_CDTC_ENB		0x0001
623 #define PHY_CDTC_POFF		8
624 #define ATPHY_CDTS		0x1C
625 #define PHY_CDTS_STAT_OK	0x0000
626 #define PHY_CDTS_STAT_SHORT	0x0100
627 #define PHY_CDTS_STAT_OPEN	0x0200
628 #define PHY_CDTS_STAT_INVAL	0x0300
629 #define PHY_CDTS_STAT_MASK	0x0300
630 
631 	/* Check power saving mode. Magic from Linux. */
632 	age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
633 	for (linkup = 0, pn = 0; pn < 4; pn++) {
634 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC,
635 		    (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
636 		for (i = 200; i > 0; i--) {
637 			DELAY(1000);
638 			reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
639 			    ATPHY_CDTC);
640 			if ((reg & PHY_CDTC_ENB) == 0)
641 				break;
642 		}
643 		DELAY(1000);
644 		reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
645 		    ATPHY_CDTS);
646 		if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
647 			linkup++;
648 			break;
649 		}
650 	}
651 	age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR,
652 	    BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
653 	if (linkup == 0) {
654 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
655 		    ATPHY_DBG_ADDR, 0);
656 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
657 		    ATPHY_DBG_DATA, 0x124E);
658 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
659 		    ATPHY_DBG_ADDR, 1);
660 		reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
661 		    ATPHY_DBG_DATA);
662 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
663 		    ATPHY_DBG_DATA, reg | 0x03);
664 		/* XXX */
665 		DELAY(1500 * 1000);
666 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
667 		    ATPHY_DBG_ADDR, 0);
668 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
669 		    ATPHY_DBG_DATA, 0x024E);
670 	}
671 
672 #undef ATPHY_DBG_ADDR
673 #undef ATPHY_DBG_DATA
674 #undef ATPHY_CDTC
675 #undef PHY_CDTC_ENB
676 #undef PHY_CDTC_POFF
677 #undef ATPHY_CDTS
678 #undef PHY_CDTS_STAT_OK
679 #undef PHY_CDTS_STAT_SHORT
680 #undef PHY_CDTS_STAT_OPEN
681 #undef PHY_CDTS_STAT_INVAL
682 #undef PHY_CDTS_STAT_MASK
683 }
684 
685 static int
686 age_dma_alloc(struct age_softc *sc)
687 {
688 	struct age_txdesc *txd;
689 	struct age_rxdesc *rxd;
690 	int nsegs, error, i;
691 
692 	/*
693 	 * Create DMA stuffs for TX ring
694 	 */
695 	error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1,
696 	    AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map);
697 	if (error) {
698 		sc->age_cdata.age_tx_ring_map = NULL;
699 		return ENOBUFS;
700 	}
701 
702 	/* Allocate DMA'able memory for TX ring */
703 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ,
704 	    ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1,
705 	    &nsegs, BUS_DMA_WAITOK);
706 	if (error) {
707 		printf("%s: could not allocate DMA'able memory for Tx ring, "
708 		    "error = %i\n", device_xname(sc->sc_dev), error);
709 		return error;
710 	}
711 
712 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg,
713 	    nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring,
714 	    BUS_DMA_NOWAIT);
715 	if (error)
716 		return ENOBUFS;
717 
718 	memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ);
719 
720 	/*  Load the DMA map for Tx ring. */
721 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
722 	    sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
723 	if (error) {
724 		printf("%s: could not load DMA'able memory for Tx ring, "
725 		    "error = %i\n", device_xname(sc->sc_dev), error);
726 		bus_dmamem_free(sc->sc_dmat,
727 		    &sc->age_rdata.age_tx_ring_seg, 1);
728 		return error;
729 	}
730 
731 	sc->age_rdata.age_tx_ring_paddr =
732 	    sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr;
733 
734 	/*
735 	 * Create DMA stuffs for RX ring
736 	 */
737 	error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1,
738 	    AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map);
739 	if (error) {
740 		sc->age_cdata.age_rx_ring_map = NULL;
741 		return ENOBUFS;
742 	}
743 
744 	/* Allocate DMA'able memory for RX ring */
745 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ,
746 	    ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1,
747 	    &nsegs, BUS_DMA_WAITOK);
748 	if (error) {
749 		printf("%s: could not allocate DMA'able memory for Rx ring, "
750 		    "error = %i.\n", device_xname(sc->sc_dev), error);
751 		return error;
752 	}
753 
754 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg,
755 	    nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring,
756 	    BUS_DMA_NOWAIT);
757 	if (error)
758 		return ENOBUFS;
759 
760 	memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ);
761 
762 	/* Load the DMA map for Rx ring. */
763 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map,
764 	    sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
765 	if (error) {
766 		printf("%s: could not load DMA'able memory for Rx ring, "
767 		    "error = %i.\n", device_xname(sc->sc_dev), error);
768 		bus_dmamem_free(sc->sc_dmat,
769 		    &sc->age_rdata.age_rx_ring_seg, 1);
770 		return error;
771 	}
772 
773 	sc->age_rdata.age_rx_ring_paddr =
774 	    sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr;
775 
776 	/*
777 	 * Create DMA stuffs for RX return ring
778 	 */
779 	error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1,
780 	    AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map);
781 	if (error) {
782 		sc->age_cdata.age_rr_ring_map = NULL;
783 		return ENOBUFS;
784 	}
785 
786 	/* Allocate DMA'able memory for RX return ring */
787 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ,
788 	    ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1,
789 	    &nsegs, BUS_DMA_WAITOK);
790 	if (error) {
791 		printf("%s: could not allocate DMA'able memory for Rx "
792 		    "return ring, error = %i.\n",
793 		    device_xname(sc->sc_dev), error);
794 		return error;
795 	}
796 
797 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg,
798 	    nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring,
799 	    BUS_DMA_NOWAIT);
800 	if (error)
801 		return ENOBUFS;
802 
803 	memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ);
804 
805 	/*  Load the DMA map for Rx return ring. */
806 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map,
807 	    sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
808 	if (error) {
809 		printf("%s: could not load DMA'able memory for Rx return ring, "
810 		    "error = %i\n", device_xname(sc->sc_dev), error);
811 		bus_dmamem_free(sc->sc_dmat,
812 		    &sc->age_rdata.age_rr_ring_seg, 1);
813 		return error;
814 	}
815 
816 	sc->age_rdata.age_rr_ring_paddr =
817 	    sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr;
818 
819 	/*
820 	 * Create DMA stuffs for CMB block
821 	 */
822 	error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1,
823 	    AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
824 	    &sc->age_cdata.age_cmb_block_map);
825 	if (error) {
826 		sc->age_cdata.age_cmb_block_map = NULL;
827 		return ENOBUFS;
828 	}
829 
830 	/* Allocate DMA'able memory for CMB block */
831 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ,
832 	    ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1,
833 	    &nsegs, BUS_DMA_WAITOK);
834 	if (error) {
835 		printf("%s: could not allocate DMA'able memory for "
836 		    "CMB block, error = %i\n", device_xname(sc->sc_dev), error);
837 		return error;
838 	}
839 
840 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg,
841 	    nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block,
842 	    BUS_DMA_NOWAIT);
843 	if (error)
844 		return ENOBUFS;
845 
846 	memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
847 
848 	/*  Load the DMA map for CMB block. */
849 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map,
850 	    sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL,
851 	    BUS_DMA_WAITOK);
852 	if (error) {
853 		printf("%s: could not load DMA'able memory for CMB block, "
854 		    "error = %i\n", device_xname(sc->sc_dev), error);
855 		bus_dmamem_free(sc->sc_dmat,
856 		    &sc->age_rdata.age_cmb_block_seg, 1);
857 		return error;
858 	}
859 
860 	sc->age_rdata.age_cmb_block_paddr =
861 	    sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr;
862 
863 	/*
864 	 * Create DMA stuffs for SMB block
865 	 */
866 	error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1,
867 	    AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
868 	    &sc->age_cdata.age_smb_block_map);
869 	if (error) {
870 		sc->age_cdata.age_smb_block_map = NULL;
871 		return ENOBUFS;
872 	}
873 
874 	/* Allocate DMA'able memory for SMB block */
875 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ,
876 	    ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1,
877 	    &nsegs, BUS_DMA_WAITOK);
878 	if (error) {
879 		printf("%s: could not allocate DMA'able memory for "
880 		    "SMB block, error = %i\n", device_xname(sc->sc_dev), error);
881 		return error;
882 	}
883 
884 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg,
885 	    nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block,
886 	    BUS_DMA_NOWAIT);
887 	if (error)
888 		return ENOBUFS;
889 
890 	memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ);
891 
892 	/*  Load the DMA map for SMB block */
893 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map,
894 	    sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL,
895 	    BUS_DMA_WAITOK);
896 	if (error) {
897 		printf("%s: could not load DMA'able memory for SMB block, "
898 		    "error = %i\n", device_xname(sc->sc_dev), error);
899 		bus_dmamem_free(sc->sc_dmat,
900 		    &sc->age_rdata.age_smb_block_seg, 1);
901 		return error;
902 	}
903 
904 	sc->age_rdata.age_smb_block_paddr =
905 	    sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr;
906 
907 	/* Create DMA maps for Tx buffers. */
908 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
909 		txd = &sc->age_cdata.age_txdesc[i];
910 		txd->tx_m = NULL;
911 		txd->tx_dmamap = NULL;
912 		error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE,
913 		    AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
914 		    &txd->tx_dmamap);
915 		if (error) {
916 			txd->tx_dmamap = NULL;
917 			printf("%s: could not create Tx dmamap, error = %i.\n",
918 			    device_xname(sc->sc_dev), error);
919 			return error;
920 		}
921 	}
922 
923 	/* Create DMA maps for Rx buffers. */
924 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
925 	    BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap);
926 	if (error) {
927 		sc->age_cdata.age_rx_sparemap = NULL;
928 		printf("%s: could not create spare Rx dmamap, error = %i.\n",
929 		    device_xname(sc->sc_dev), error);
930 		return error;
931 	}
932 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
933 		rxd = &sc->age_cdata.age_rxdesc[i];
934 		rxd->rx_m = NULL;
935 		rxd->rx_dmamap = NULL;
936 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
937 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
938 		if (error) {
939 			rxd->rx_dmamap = NULL;
940 			printf("%s: could not create Rx dmamap, error = %i.\n",
941 			    device_xname(sc->sc_dev), error);
942 			return error;
943 		}
944 	}
945 
946 	return 0;
947 }
948 
949 static void
950 age_dma_free(struct age_softc *sc)
951 {
952 	struct age_txdesc *txd;
953 	struct age_rxdesc *rxd;
954 	int i;
955 
956 	/* Tx buffers */
957 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
958 		txd = &sc->age_cdata.age_txdesc[i];
959 		if (txd->tx_dmamap != NULL) {
960 			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
961 			txd->tx_dmamap = NULL;
962 		}
963 	}
964 	/* Rx buffers */
965 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
966 		rxd = &sc->age_cdata.age_rxdesc[i];
967 		if (rxd->rx_dmamap != NULL) {
968 			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
969 			rxd->rx_dmamap = NULL;
970 		}
971 	}
972 	if (sc->age_cdata.age_rx_sparemap != NULL) {
973 		bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap);
974 		sc->age_cdata.age_rx_sparemap = NULL;
975 	}
976 
977 	/* Tx ring. */
978 	if (sc->age_cdata.age_tx_ring_map != NULL)
979 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map);
980 	if (sc->age_cdata.age_tx_ring_map != NULL &&
981 	    sc->age_rdata.age_tx_ring != NULL)
982 		bus_dmamem_free(sc->sc_dmat,
983 		    &sc->age_rdata.age_tx_ring_seg, 1);
984 	sc->age_rdata.age_tx_ring = NULL;
985 	sc->age_cdata.age_tx_ring_map = NULL;
986 
987 	/* Rx ring. */
988 	if (sc->age_cdata.age_rx_ring_map != NULL)
989 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map);
990 	if (sc->age_cdata.age_rx_ring_map != NULL &&
991 	    sc->age_rdata.age_rx_ring != NULL)
992 		bus_dmamem_free(sc->sc_dmat,
993 		    &sc->age_rdata.age_rx_ring_seg, 1);
994 	sc->age_rdata.age_rx_ring = NULL;
995 	sc->age_cdata.age_rx_ring_map = NULL;
996 
997 	/* Rx return ring. */
998 	if (sc->age_cdata.age_rr_ring_map != NULL)
999 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map);
1000 	if (sc->age_cdata.age_rr_ring_map != NULL &&
1001 	    sc->age_rdata.age_rr_ring != NULL)
1002 		bus_dmamem_free(sc->sc_dmat,
1003 		    &sc->age_rdata.age_rr_ring_seg, 1);
1004 	sc->age_rdata.age_rr_ring = NULL;
1005 	sc->age_cdata.age_rr_ring_map = NULL;
1006 
1007 	/* CMB block */
1008 	if (sc->age_cdata.age_cmb_block_map != NULL)
1009 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map);
1010 	if (sc->age_cdata.age_cmb_block_map != NULL &&
1011 	    sc->age_rdata.age_cmb_block != NULL)
1012 		bus_dmamem_free(sc->sc_dmat,
1013 		    &sc->age_rdata.age_cmb_block_seg, 1);
1014 	sc->age_rdata.age_cmb_block = NULL;
1015 	sc->age_cdata.age_cmb_block_map = NULL;
1016 
1017 	/* SMB block */
1018 	if (sc->age_cdata.age_smb_block_map != NULL)
1019 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map);
1020 	if (sc->age_cdata.age_smb_block_map != NULL &&
1021 	    sc->age_rdata.age_smb_block != NULL)
1022 		bus_dmamem_free(sc->sc_dmat,
1023 		    &sc->age_rdata.age_smb_block_seg, 1);
1024 	sc->age_rdata.age_smb_block = NULL;
1025 	sc->age_cdata.age_smb_block_map = NULL;
1026 }
1027 
1028 static void
1029 age_start(struct ifnet *ifp)
1030 {
1031         struct age_softc *sc = ifp->if_softc;
1032         struct mbuf *m_head;
1033 	int enq;
1034 
1035 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1036 		return;
1037 
1038 	enq = 0;
1039 	for (;;) {
1040 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1041 		if (m_head == NULL)
1042 			break;
1043 
1044 		/*
1045 		 * Pack the data into the transmit ring. If we
1046 		 * don't have room, set the OACTIVE flag and wait
1047 		 * for the NIC to drain the ring.
1048 		 */
1049 		if (age_encap(sc, &m_head)) {
1050 			if (m_head == NULL)
1051 				break;
1052 			IF_PREPEND(&ifp->if_snd, m_head);
1053 			ifp->if_flags |= IFF_OACTIVE;
1054 			break;
1055 		}
1056 		enq = 1;
1057 
1058 		/*
1059 		 * If there's a BPF listener, bounce a copy of this frame
1060 		 * to him.
1061 		 */
1062 		bpf_mtap(ifp, m_head);
1063 	}
1064 
1065 	if (enq) {
1066 		/* Update mbox. */
1067 		AGE_COMMIT_MBOX(sc);
1068 		/* Set a timeout in case the chip goes out to lunch. */
1069 		ifp->if_timer = AGE_TX_TIMEOUT;
1070 	}
1071 }
1072 
1073 static void
1074 age_watchdog(struct ifnet *ifp)
1075 {
1076 	struct age_softc *sc = ifp->if_softc;
1077 
1078 	if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1079 		printf("%s: watchdog timeout (missed link)\n",
1080 		    device_xname(sc->sc_dev));
1081 		ifp->if_oerrors++;
1082 		age_init(ifp);
1083 		return;
1084 	}
1085 
1086 	if (sc->age_cdata.age_tx_cnt == 0) {
1087 		printf("%s: watchdog timeout (missed Tx interrupts) "
1088 		    "-- recovering\n", device_xname(sc->sc_dev));
1089 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1090 			age_start(ifp);
1091 		return;
1092 	}
1093 
1094 	printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1095 	ifp->if_oerrors++;
1096 	age_init(ifp);
1097 
1098 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1099 		age_start(ifp);
1100 }
1101 
1102 static int
1103 age_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1104 {
1105 	struct age_softc *sc = ifp->if_softc;
1106 	int s, error;
1107 
1108 	s = splnet();
1109 
1110 	error = ether_ioctl(ifp, cmd, data);
1111 	if (error == ENETRESET) {
1112 		if (ifp->if_flags & IFF_RUNNING)
1113 			age_rxfilter(sc);
1114 		error = 0;
1115 	}
1116 
1117 	splx(s);
1118 	return error;
1119 }
1120 
1121 static void
1122 age_mac_config(struct age_softc *sc)
1123 {
1124 	struct mii_data *mii;
1125 	uint32_t reg;
1126 
1127 	mii = &sc->sc_miibus;
1128 
1129 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1130 	reg &= ~MAC_CFG_FULL_DUPLEX;
1131 	reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1132 	reg &= ~MAC_CFG_SPEED_MASK;
1133 
1134 	/* Reprogram MAC with resolved speed/duplex. */
1135 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1136 	case IFM_10_T:
1137 	case IFM_100_TX:
1138 		reg |= MAC_CFG_SPEED_10_100;
1139 		break;
1140 	case IFM_1000_T:
1141 		reg |= MAC_CFG_SPEED_1000;
1142 		break;
1143 	}
1144 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1145 		reg |= MAC_CFG_FULL_DUPLEX;
1146 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1147 			reg |= MAC_CFG_TX_FC;
1148 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1149 			reg |= MAC_CFG_RX_FC;
1150 	}
1151 
1152 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1153 }
1154 
1155 static bool
1156 age_resume(device_t dv, const pmf_qual_t *qual)
1157 {
1158 	struct age_softc *sc = device_private(dv);
1159 	uint16_t cmd;
1160 
1161 	/*
1162 	 * Clear INTx emulation disable for hardware that
1163 	 * is set in resume event. From Linux.
1164 	 */
1165 	cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1166 	if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) {
1167 		cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE;
1168 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
1169 		    PCI_COMMAND_STATUS_REG, cmd);
1170 	}
1171 
1172 	return true;
1173 }
1174 
1175 static int
1176 age_encap(struct age_softc *sc, struct mbuf **m_head)
1177 {
1178 	struct age_txdesc *txd, *txd_last;
1179 	struct tx_desc *desc;
1180 	struct mbuf *m;
1181 	bus_dmamap_t map;
1182 	uint32_t cflags, poff, vtag;
1183 	int error, i, nsegs, prod;
1184 #if NVLAN > 0
1185 	struct m_tag *mtag;
1186 #endif
1187 
1188 	m = *m_head;
1189 	cflags = vtag = 0;
1190 	poff = 0;
1191 
1192 	prod = sc->age_cdata.age_tx_prod;
1193 	txd = &sc->age_cdata.age_txdesc[prod];
1194 	txd_last = txd;
1195 	map = txd->tx_dmamap;
1196 
1197 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1198 
1199 	if (error == EFBIG) {
1200 		error = 0;
1201 
1202 		*m_head = m_pullup(*m_head, MHLEN);
1203 		if (*m_head == NULL) {
1204 			printf("%s: can't defrag TX mbuf\n",
1205 			    device_xname(sc->sc_dev));
1206 			return ENOBUFS;
1207 		}
1208 
1209 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1210 		  	    BUS_DMA_NOWAIT);
1211 
1212 		if (error != 0) {
1213 			printf("%s: could not load defragged TX mbuf\n",
1214 			    device_xname(sc->sc_dev));
1215 			m_freem(*m_head);
1216 			*m_head = NULL;
1217 			return error;
1218 		}
1219 	} else if (error) {
1220 		printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1221 		return error;
1222 	}
1223 
1224 	nsegs = map->dm_nsegs;
1225 
1226 	if (nsegs == 0) {
1227 		m_freem(*m_head);
1228 		*m_head = NULL;
1229 		return EIO;
1230 	}
1231 
1232 	/* Check descriptor overrun. */
1233 	if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1234 		bus_dmamap_unload(sc->sc_dmat, map);
1235 		return ENOBUFS;
1236 	}
1237 
1238 	m = *m_head;
1239 	/* Configure Tx IP/TCP/UDP checksum offload. */
1240 	if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1241 		cflags |= AGE_TD_CSUM;
1242 		if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0)
1243 			cflags |= AGE_TD_TCPCSUM;
1244 		if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0)
1245 			cflags |= AGE_TD_UDPCSUM;
1246 		/* Set checksum start offset. */
1247 		cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1248 	}
1249 
1250 #if NVLAN > 0
1251 	/* Configure VLAN hardware tag insertion. */
1252 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) {
1253 		vtag = AGE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag)));
1254 		vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1255 		cflags |= AGE_TD_INSERT_VLAN_TAG;
1256 	}
1257 #endif
1258 
1259 	desc = NULL;
1260 	for (i = 0; i < nsegs; i++) {
1261 		desc = &sc->age_rdata.age_tx_ring[prod];
1262 		desc->addr = htole64(map->dm_segs[i].ds_addr);
1263 		desc->len =
1264 		    htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
1265 		desc->flags = htole32(cflags);
1266 		sc->age_cdata.age_tx_cnt++;
1267 		AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1268 	}
1269 
1270 	/* Update producer index. */
1271 	sc->age_cdata.age_tx_prod = prod;
1272 
1273 	/* Set EOP on the last descriptor. */
1274 	prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
1275 	desc = &sc->age_rdata.age_tx_ring[prod];
1276 	desc->flags |= htole32(AGE_TD_EOP);
1277 
1278 	/* Swap dmamap of the first and the last. */
1279 	txd = &sc->age_cdata.age_txdesc[prod];
1280 	map = txd_last->tx_dmamap;
1281 	txd_last->tx_dmamap = txd->tx_dmamap;
1282 	txd->tx_dmamap = map;
1283 	txd->tx_m = m;
1284 
1285 	/* Sync descriptors. */
1286 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1287 	    BUS_DMASYNC_PREWRITE);
1288 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1289 	    sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1290 
1291 	return 0;
1292 }
1293 
1294 static void
1295 age_txintr(struct age_softc *sc, int tpd_cons)
1296 {
1297 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1298 	struct age_txdesc *txd;
1299 	int cons, prog;
1300 
1301 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1302 	    sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1303 
1304 	/*
1305 	 * Go through our Tx list and free mbufs for those
1306 	 * frames which have been transmitted.
1307 	 */
1308 	cons = sc->age_cdata.age_tx_cons;
1309 	for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
1310 		if (sc->age_cdata.age_tx_cnt <= 0)
1311 			break;
1312 		prog++;
1313 		ifp->if_flags &= ~IFF_OACTIVE;
1314 		sc->age_cdata.age_tx_cnt--;
1315 		txd = &sc->age_cdata.age_txdesc[cons];
1316 		/*
1317 		 * Clear Tx descriptors, it's not required but would
1318 		 * help debugging in case of Tx issues.
1319 		 */
1320 		txd->tx_desc->addr = 0;
1321 		txd->tx_desc->len = 0;
1322 		txd->tx_desc->flags = 0;
1323 
1324 		if (txd->tx_m == NULL)
1325 			continue;
1326 		/* Reclaim transmitted mbufs. */
1327 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1328 		m_freem(txd->tx_m);
1329 		txd->tx_m = NULL;
1330 	}
1331 
1332 	if (prog > 0) {
1333 		sc->age_cdata.age_tx_cons = cons;
1334 
1335 		/*
1336 		 * Unarm watchdog timer only when there are no pending
1337 		 * Tx descriptors in queue.
1338 		 */
1339 		if (sc->age_cdata.age_tx_cnt == 0)
1340 			ifp->if_timer = 0;
1341 
1342 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1343 		    sc->age_cdata.age_tx_ring_map->dm_mapsize,
1344 		    BUS_DMASYNC_PREWRITE);
1345 	}
1346 }
1347 
1348 /* Receive a frame. */
1349 static void
1350 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
1351 {
1352 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1353 	struct age_rxdesc *rxd;
1354 	struct rx_desc *desc;
1355 	struct mbuf *mp, *m;
1356 	uint32_t status, index;
1357 	int count, nsegs, pktlen;
1358 	int rx_cons;
1359 
1360 	status = le32toh(rxrd->flags);
1361 	index = le32toh(rxrd->index);
1362 	rx_cons = AGE_RX_CONS(index);
1363 	nsegs = AGE_RX_NSEGS(index);
1364 
1365 	sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
1366 	if ((status & AGE_RRD_ERROR) != 0 &&
1367 	    (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
1368 	    AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
1369 		/*
1370 		 * We want to pass the following frames to upper
1371 		 * layer regardless of error status of Rx return
1372 		 * ring.
1373 		 *
1374 		 *  o IP/TCP/UDP checksum is bad.
1375 		 *  o frame length and protocol specific length
1376 		 *     does not match.
1377 		 */
1378 		sc->age_cdata.age_rx_cons += nsegs;
1379 		sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1380 		return;
1381 	}
1382 
1383 	pktlen = 0;
1384 	for (count = 0; count < nsegs; count++,
1385 	    AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
1386 		rxd = &sc->age_cdata.age_rxdesc[rx_cons];
1387 		mp = rxd->rx_m;
1388 		desc = rxd->rx_desc;
1389 		/* Add a new receive buffer to the ring. */
1390 		if (age_newbuf(sc, rxd, 0) != 0) {
1391 			ifp->if_iqdrops++;
1392 			/* Reuse Rx buffers. */
1393 			if (sc->age_cdata.age_rxhead != NULL) {
1394 				m_freem(sc->age_cdata.age_rxhead);
1395 				AGE_RXCHAIN_RESET(sc);
1396 			}
1397 			break;
1398 		}
1399 
1400 		/* The length of the first mbuf is computed last. */
1401 		if (count != 0) {
1402 			mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
1403 			pktlen += mp->m_len;
1404 		}
1405 
1406 		/* Chain received mbufs. */
1407 		if (sc->age_cdata.age_rxhead == NULL) {
1408 			sc->age_cdata.age_rxhead = mp;
1409 			sc->age_cdata.age_rxtail = mp;
1410 		} else {
1411 			mp->m_flags &= ~M_PKTHDR;
1412 			sc->age_cdata.age_rxprev_tail =
1413 			    sc->age_cdata.age_rxtail;
1414 			sc->age_cdata.age_rxtail->m_next = mp;
1415 			sc->age_cdata.age_rxtail = mp;
1416 		}
1417 
1418 		if (count == nsegs - 1) {
1419 			/*
1420 			 * It seems that L1 controller has no way
1421 			 * to tell hardware to strip CRC bytes.
1422 			 */
1423 			sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
1424 			if (nsegs > 1) {
1425 				/* Remove the CRC bytes in chained mbufs. */
1426 				pktlen -= ETHER_CRC_LEN;
1427 				if (mp->m_len <= ETHER_CRC_LEN) {
1428 					sc->age_cdata.age_rxtail =
1429 					    sc->age_cdata.age_rxprev_tail;
1430 					sc->age_cdata.age_rxtail->m_len -=
1431 					    (ETHER_CRC_LEN - mp->m_len);
1432 					sc->age_cdata.age_rxtail->m_next = NULL;
1433 					m_freem(mp);
1434 				} else {
1435 					mp->m_len -= ETHER_CRC_LEN;
1436 				}
1437 			}
1438 
1439 			m = sc->age_cdata.age_rxhead;
1440 			m->m_flags |= M_PKTHDR;
1441 			m->m_pkthdr.rcvif = ifp;
1442 			m->m_pkthdr.len = sc->age_cdata.age_rxlen;
1443 			/* Set the first mbuf length. */
1444 			m->m_len = sc->age_cdata.age_rxlen - pktlen;
1445 
1446 			/*
1447 			 * Set checksum information.
1448 			 * It seems that L1 controller can compute partial
1449 			 * checksum. The partial checksum value can be used
1450 			 * to accelerate checksum computation for fragmented
1451 			 * TCP/UDP packets. Upper network stack already
1452 			 * takes advantage of the partial checksum value in
1453 			 * IP reassembly stage. But I'm not sure the
1454 			 * correctness of the partial hardware checksum
1455 			 * assistance due to lack of data sheet. If it is
1456 			 * proven to work on L1 I'll enable it.
1457 			 */
1458 			if (status & AGE_RRD_IPV4) {
1459 				if (status & AGE_RRD_IPCSUM_NOK)
1460 					m->m_pkthdr.csum_flags |=
1461 					    M_CSUM_IPv4_BAD;
1462 				if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
1463 				    (status & AGE_RRD_TCP_UDPCSUM_NOK)) {
1464 					m->m_pkthdr.csum_flags |=
1465 					    M_CSUM_TCP_UDP_BAD;
1466 				}
1467 				/*
1468 				 * Don't mark bad checksum for TCP/UDP frames
1469 				 * as fragmented frames may always have set
1470 				 * bad checksummed bit of descriptor status.
1471 				 */
1472 			}
1473 #if NVLAN > 0
1474 			/* Check for VLAN tagged frames. */
1475 			if (status & AGE_RRD_VLAN) {
1476 				uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
1477 				VLAN_INPUT_TAG(ifp, m, AGE_RX_VLAN_TAG(vtag),
1478 					continue);
1479 			}
1480 #endif
1481 
1482 			bpf_mtap(ifp, m);
1483 			/* Pass it on. */
1484 			ether_input(ifp, m);
1485 
1486 			/* Reset mbuf chains. */
1487 			AGE_RXCHAIN_RESET(sc);
1488 		}
1489 	}
1490 
1491 	if (count != nsegs) {
1492 		sc->age_cdata.age_rx_cons += nsegs;
1493 		sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1494 	} else
1495 		sc->age_cdata.age_rx_cons = rx_cons;
1496 }
1497 
1498 static void
1499 age_rxintr(struct age_softc *sc, int rr_prod)
1500 {
1501 	struct rx_rdesc *rxrd;
1502 	int rr_cons, nsegs, pktlen, prog;
1503 
1504 	rr_cons = sc->age_cdata.age_rr_cons;
1505 	if (rr_cons == rr_prod)
1506 		return;
1507 
1508 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1509 	    sc->age_cdata.age_rr_ring_map->dm_mapsize,
1510 	    BUS_DMASYNC_POSTREAD);
1511 
1512 	for (prog = 0; rr_cons != rr_prod; prog++) {
1513 		rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
1514 		nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
1515 		if (nsegs == 0)
1516 			break;
1517 		/*
1518 		 * Check number of segments against received bytes
1519 		 * Non-matching value would indicate that hardware
1520 		 * is still trying to update Rx return descriptors.
1521 		 * I'm not sure whether this check is really needed.
1522 		 */
1523 		pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
1524 		if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
1525 		    (MCLBYTES - ETHER_ALIGN)))
1526 			break;
1527 
1528 		/* Received a frame. */
1529 		age_rxeof(sc, rxrd);
1530 
1531 		/* Clear return ring. */
1532 		rxrd->index = 0;
1533 		AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
1534 	}
1535 
1536 	if (prog > 0) {
1537 		/* Update the consumer index. */
1538 		sc->age_cdata.age_rr_cons = rr_cons;
1539 
1540 		/* Sync descriptors. */
1541 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1542 		    sc->age_cdata.age_rr_ring_map->dm_mapsize,
1543 		    BUS_DMASYNC_PREWRITE);
1544 
1545 		/* Notify hardware availability of new Rx buffers. */
1546 		AGE_COMMIT_MBOX(sc);
1547 	}
1548 }
1549 
1550 static void
1551 age_tick(void *xsc)
1552 {
1553 	struct age_softc *sc = xsc;
1554 	struct mii_data *mii = &sc->sc_miibus;
1555 	int s;
1556 
1557 	s = splnet();
1558 	mii_tick(mii);
1559 	splx(s);
1560 
1561 	callout_schedule(&sc->sc_tick_ch, hz);
1562 }
1563 
1564 static void
1565 age_reset(struct age_softc *sc)
1566 {
1567 	uint32_t reg;
1568 	int i;
1569 
1570 	CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
1571 	CSR_READ_4(sc, AGE_MASTER_CFG);
1572 	DELAY(1000);
1573 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1574 		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1575 			break;
1576 		DELAY(10);
1577 	}
1578 
1579 	if (i == 0)
1580 		printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
1581 		    reg);
1582 
1583 	/* Initialize PCIe module. From Linux. */
1584 	CSR_WRITE_4(sc, 0x12FC, 0x6500);
1585 	CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1586 }
1587 
1588 static int
1589 age_init(struct ifnet *ifp)
1590 {
1591 	struct age_softc *sc = ifp->if_softc;
1592 	struct mii_data *mii;
1593 	uint8_t eaddr[ETHER_ADDR_LEN];
1594 	bus_addr_t paddr;
1595 	uint32_t reg, fsize;
1596 	uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
1597 	int error;
1598 
1599 	/*
1600 	 * Cancel any pending I/O.
1601 	 */
1602 	age_stop(ifp, 0);
1603 
1604 	/*
1605 	 * Reset the chip to a known state.
1606 	 */
1607 	age_reset(sc);
1608 
1609 	/* Initialize descriptors. */
1610 	error = age_init_rx_ring(sc);
1611         if (error != 0) {
1612 		printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
1613 		age_stop(ifp, 0);
1614 		return error;
1615         }
1616 	age_init_rr_ring(sc);
1617 	age_init_tx_ring(sc);
1618 	age_init_cmb_block(sc);
1619 	age_init_smb_block(sc);
1620 
1621 	/* Reprogram the station address. */
1622 	memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
1623 	CSR_WRITE_4(sc, AGE_PAR0,
1624 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1625 	CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
1626 
1627 	/* Set descriptor base addresses. */
1628 	paddr = sc->age_rdata.age_tx_ring_paddr;
1629 	CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
1630 	paddr = sc->age_rdata.age_rx_ring_paddr;
1631 	CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
1632 	paddr = sc->age_rdata.age_rr_ring_paddr;
1633 	CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
1634 	paddr = sc->age_rdata.age_tx_ring_paddr;
1635 	CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
1636 	paddr = sc->age_rdata.age_cmb_block_paddr;
1637 	CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
1638 	paddr = sc->age_rdata.age_smb_block_paddr;
1639 	CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
1640 
1641 	/* Set Rx/Rx return descriptor counter. */
1642 	CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
1643 	    ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
1644 	    DESC_RRD_CNT_MASK) |
1645 	    ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
1646 
1647 	/* Set Tx descriptor counter. */
1648 	CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
1649 	    (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
1650 
1651 	/* Tell hardware that we're ready to load descriptors. */
1652 	CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
1653 
1654         /*
1655 	 * Initialize mailbox register.
1656 	 * Updated producer/consumer index information is exchanged
1657 	 * through this mailbox register. However Tx producer and
1658 	 * Rx return consumer/Rx producer are all shared such that
1659 	 * it's hard to separate code path between Tx and Rx without
1660 	 * locking. If L1 hardware have a separate mail box register
1661 	 * for Tx and Rx consumer/producer management we could have
1662 	 * indepent Tx/Rx handler which in turn Rx handler could have
1663 	 * been run without any locking.
1664 	*/
1665 	AGE_COMMIT_MBOX(sc);
1666 
1667 	/* Configure IPG/IFG parameters. */
1668 	CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
1669 	    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
1670 	    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1671 	    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1672 	    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
1673 
1674 	/* Set parameters for half-duplex media. */
1675 	CSR_WRITE_4(sc, AGE_HDPX_CFG,
1676 	    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1677 	    HDPX_CFG_LCOL_MASK) |
1678 	    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1679 	    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1680 	    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1681 	    HDPX_CFG_ABEBT_MASK) |
1682 	    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1683 	     HDPX_CFG_JAMIPG_MASK));
1684 
1685 	/* Configure interrupt moderation timer. */
1686 	sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
1687 	CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
1688 	reg = CSR_READ_4(sc, AGE_MASTER_CFG);
1689 	reg &= ~MASTER_MTIMER_ENB;
1690 	if (AGE_USECS(sc->age_int_mod) == 0)
1691 		reg &= ~MASTER_ITIMER_ENB;
1692 	else
1693 		reg |= MASTER_ITIMER_ENB;
1694 	CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
1695 	if (agedebug)
1696 		printf("%s: interrupt moderation is %d us.\n",
1697 		    device_xname(sc->sc_dev), sc->age_int_mod);
1698 	CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
1699 
1700 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
1701 	if (ifp->if_mtu < ETHERMTU)
1702 		sc->age_max_frame_size = ETHERMTU;
1703 	else
1704 		sc->age_max_frame_size = ifp->if_mtu;
1705 	sc->age_max_frame_size += ETHER_HDR_LEN +
1706 	    sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
1707 	CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
1708 
1709 	/* Configure jumbo frame. */
1710 	fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
1711 	CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
1712 	    (((fsize / sizeof(uint64_t)) <<
1713 	    RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
1714 	    ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
1715 	    RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
1716 	    ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
1717 	    RXQ_JUMBO_CFG_RRD_TIMER_MASK));
1718 
1719 	/* Configure flow-control parameters. From Linux. */
1720 	if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
1721 		/*
1722 		 * Magic workaround for old-L1.
1723 		 * Don't know which hw revision requires this magic.
1724 		 */
1725 		CSR_WRITE_4(sc, 0x12FC, 0x6500);
1726 		/*
1727 		 * Another magic workaround for flow-control mode
1728 		 * change. From Linux.
1729 		 */
1730 		CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1731 	}
1732 	/*
1733 	 * TODO
1734 	 *  Should understand pause parameter relationships between FIFO
1735 	 *  size and number of Rx descriptors and Rx return descriptors.
1736 	 *
1737 	 *  Magic parameters came from Linux.
1738 	 */
1739 	switch (sc->age_chip_rev) {
1740 	case 0x8001:
1741 	case 0x9001:
1742 	case 0x9002:
1743 	case 0x9003:
1744 		rxf_hi = AGE_RX_RING_CNT / 16;
1745 		rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
1746 		rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
1747 		rrd_lo = AGE_RR_RING_CNT / 16;
1748 		break;
1749 	default:
1750 		reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
1751 		rxf_lo = reg / 16;
1752 		if (rxf_lo < 192)
1753 			rxf_lo = 192;
1754 		rxf_hi = (reg * 7) / 8;
1755 		if (rxf_hi < rxf_lo)
1756 			rxf_hi = rxf_lo + 16;
1757 		reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
1758 		rrd_lo = reg / 8;
1759 		rrd_hi = (reg * 7) / 8;
1760 		if (rrd_lo < 2)
1761 			rrd_lo = 2;
1762 		if (rrd_hi < rrd_lo)
1763 			rrd_hi = rrd_lo + 3;
1764 		break;
1765 	}
1766 	CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
1767 	    ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
1768 	    RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
1769 	    ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
1770 	    RXQ_FIFO_PAUSE_THRESH_HI_MASK));
1771 	CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
1772 	    ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
1773 	    RXQ_RRD_PAUSE_THRESH_LO_MASK) |
1774 	    ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
1775 	    RXQ_RRD_PAUSE_THRESH_HI_MASK));
1776 
1777 	/* Configure RxQ. */
1778 	CSR_WRITE_4(sc, AGE_RXQ_CFG,
1779 	    ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
1780 	    RXQ_CFG_RD_BURST_MASK) |
1781 	    ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
1782 	    RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
1783 	    ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
1784 	    RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
1785 	    RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1786 
1787 	/* Configure TxQ. */
1788 	CSR_WRITE_4(sc, AGE_TXQ_CFG,
1789 	    ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1790 	    TXQ_CFG_TPD_BURST_MASK) |
1791 	    ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
1792 	    TXQ_CFG_TX_FIFO_BURST_MASK) |
1793 	    ((TXQ_CFG_TPD_FETCH_DEFAULT <<
1794 	    TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
1795 	    TXQ_CFG_ENB);
1796 
1797 	/* Configure DMA parameters. */
1798 	CSR_WRITE_4(sc, AGE_DMA_CFG,
1799 	    DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
1800 	    sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
1801 	    sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
1802 
1803 	/* Configure CMB DMA write threshold. */
1804 	CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
1805 	    ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
1806 	    CMB_WR_THRESH_RRD_MASK) |
1807 	    ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
1808 	    CMB_WR_THRESH_TPD_MASK));
1809 
1810 	/* Set CMB/SMB timer and enable them. */
1811 	CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
1812 	    ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
1813 	    ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
1814 
1815 	/* Request SMB updates for every seconds. */
1816 	CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
1817 	CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
1818 
1819 	/*
1820 	 * Disable all WOL bits as WOL can interfere normal Rx
1821 	 * operation.
1822 	 */
1823 	CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1824 
1825         /*
1826 	 * Configure Tx/Rx MACs.
1827 	 *  - Auto-padding for short frames.
1828 	 *  - Enable CRC generation.
1829 	 *  Start with full-duplex/1000Mbps media. Actual reconfiguration
1830 	 *  of MAC is followed after link establishment.
1831 	 */
1832 	CSR_WRITE_4(sc, AGE_MAC_CFG,
1833 	    MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
1834 	    MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
1835 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1836 	    MAC_CFG_PREAMBLE_MASK));
1837 
1838 	/* Set up the receive filter. */
1839 	age_rxfilter(sc);
1840 	age_rxvlan(sc);
1841 
1842 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1843 	reg |= MAC_CFG_RXCSUM_ENB;
1844 
1845 	/* Ack all pending interrupts and clear it. */
1846 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
1847 	CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
1848 
1849 	/* Finally enable Tx/Rx MAC. */
1850 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1851 
1852 	sc->age_flags &= ~AGE_FLAG_LINK;
1853 
1854 	/* Switch to the current media. */
1855 	mii = &sc->sc_miibus;
1856 	mii_mediachg(mii);
1857 
1858 	callout_schedule(&sc->sc_tick_ch, hz);
1859 
1860 	ifp->if_flags |= IFF_RUNNING;
1861 	ifp->if_flags &= ~IFF_OACTIVE;
1862 
1863 	return 0;
1864 }
1865 
1866 static void
1867 age_stop(struct ifnet *ifp, int disable)
1868 {
1869 	struct age_softc *sc = ifp->if_softc;
1870 	struct age_txdesc *txd;
1871 	struct age_rxdesc *rxd;
1872 	uint32_t reg;
1873 	int i;
1874 
1875 	callout_stop(&sc->sc_tick_ch);
1876 
1877 	/*
1878 	 * Mark the interface down and cancel the watchdog timer.
1879 	 */
1880 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1881 	ifp->if_timer = 0;
1882 
1883 	sc->age_flags &= ~AGE_FLAG_LINK;
1884 
1885 	mii_down(&sc->sc_miibus);
1886 
1887 	/*
1888 	 * Disable interrupts.
1889 	 */
1890 	CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
1891 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
1892 
1893 	/* Stop CMB/SMB updates. */
1894 	CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
1895 
1896 	/* Stop Rx/Tx MAC. */
1897 	age_stop_rxmac(sc);
1898 	age_stop_txmac(sc);
1899 
1900 	/* Stop DMA. */
1901 	CSR_WRITE_4(sc, AGE_DMA_CFG,
1902 	    CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
1903 
1904 	/* Stop TxQ/RxQ. */
1905 	CSR_WRITE_4(sc, AGE_TXQ_CFG,
1906 	    CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
1907 	CSR_WRITE_4(sc, AGE_RXQ_CFG,
1908 	    CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
1909 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1910 		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1911 			break;
1912 		DELAY(10);
1913 	}
1914 	if (i == 0)
1915 		printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n",
1916 		    device_xname(sc->sc_dev), reg);
1917 
1918 	/* Reclaim Rx buffers that have been processed. */
1919 	if (sc->age_cdata.age_rxhead != NULL)
1920 		m_freem(sc->age_cdata.age_rxhead);
1921 	AGE_RXCHAIN_RESET(sc);
1922 
1923 	/*
1924 	 * Free RX and TX mbufs still in the queues.
1925 	 */
1926 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
1927 		rxd = &sc->age_cdata.age_rxdesc[i];
1928 		if (rxd->rx_m != NULL) {
1929 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1930 			m_freem(rxd->rx_m);
1931 			rxd->rx_m = NULL;
1932 		}
1933 	}
1934 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
1935 		txd = &sc->age_cdata.age_txdesc[i];
1936 		if (txd->tx_m != NULL) {
1937 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1938 			m_freem(txd->tx_m);
1939 			txd->tx_m = NULL;
1940 		}
1941 	}
1942 }
1943 
1944 static void
1945 age_stats_update(struct age_softc *sc)
1946 {
1947 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1948 	struct age_stats *stat;
1949 	struct smb *smb;
1950 
1951 	stat = &sc->age_stat;
1952 
1953 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
1954 	    sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1955 
1956 	smb = sc->age_rdata.age_smb_block;
1957 	if (smb->updated == 0)
1958 		return;
1959 
1960 	/* Rx stats. */
1961 	stat->rx_frames += smb->rx_frames;
1962 	stat->rx_bcast_frames += smb->rx_bcast_frames;
1963 	stat->rx_mcast_frames += smb->rx_mcast_frames;
1964 	stat->rx_pause_frames += smb->rx_pause_frames;
1965 	stat->rx_control_frames += smb->rx_control_frames;
1966 	stat->rx_crcerrs += smb->rx_crcerrs;
1967 	stat->rx_lenerrs += smb->rx_lenerrs;
1968 	stat->rx_bytes += smb->rx_bytes;
1969 	stat->rx_runts += smb->rx_runts;
1970 	stat->rx_fragments += smb->rx_fragments;
1971 	stat->rx_pkts_64 += smb->rx_pkts_64;
1972 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1973 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1974 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1975 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1976 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1977 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1978 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1979 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1980 	stat->rx_desc_oflows += smb->rx_desc_oflows;
1981 	stat->rx_alignerrs += smb->rx_alignerrs;
1982 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1983 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1984 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1985 
1986 	/* Tx stats. */
1987 	stat->tx_frames += smb->tx_frames;
1988 	stat->tx_bcast_frames += smb->tx_bcast_frames;
1989 	stat->tx_mcast_frames += smb->tx_mcast_frames;
1990 	stat->tx_pause_frames += smb->tx_pause_frames;
1991 	stat->tx_excess_defer += smb->tx_excess_defer;
1992 	stat->tx_control_frames += smb->tx_control_frames;
1993 	stat->tx_deferred += smb->tx_deferred;
1994 	stat->tx_bytes += smb->tx_bytes;
1995 	stat->tx_pkts_64 += smb->tx_pkts_64;
1996 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1997 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1998 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1999 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2000 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2001 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2002 	stat->tx_single_colls += smb->tx_single_colls;
2003 	stat->tx_multi_colls += smb->tx_multi_colls;
2004 	stat->tx_late_colls += smb->tx_late_colls;
2005 	stat->tx_excess_colls += smb->tx_excess_colls;
2006 	stat->tx_underrun += smb->tx_underrun;
2007 	stat->tx_desc_underrun += smb->tx_desc_underrun;
2008 	stat->tx_lenerrs += smb->tx_lenerrs;
2009 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2010 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2011 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2012 
2013 	/* Update counters in ifnet. */
2014 	ifp->if_opackets += smb->tx_frames;
2015 
2016 	ifp->if_collisions += smb->tx_single_colls +
2017 	    smb->tx_multi_colls + smb->tx_late_colls +
2018 	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2019 
2020 	ifp->if_oerrors += smb->tx_excess_colls +
2021 	    smb->tx_late_colls + smb->tx_underrun +
2022 	    smb->tx_pkts_truncated;
2023 
2024 	ifp->if_ipackets += smb->rx_frames;
2025 
2026 	ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2027 	    smb->rx_runts + smb->rx_pkts_truncated +
2028 	    smb->rx_fifo_oflows + smb->rx_desc_oflows +
2029 	    smb->rx_alignerrs;
2030 
2031 	/* Update done, clear. */
2032 	smb->updated = 0;
2033 
2034 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2035 	    sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2036 }
2037 
2038 static void
2039 age_stop_txmac(struct age_softc *sc)
2040 {
2041 	uint32_t reg;
2042 	int i;
2043 
2044 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2045 	if ((reg & MAC_CFG_TX_ENB) != 0) {
2046 		reg &= ~MAC_CFG_TX_ENB;
2047 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2048 	}
2049 	/* Stop Tx DMA engine. */
2050 	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2051 	if ((reg & DMA_CFG_RD_ENB) != 0) {
2052 		reg &= ~DMA_CFG_RD_ENB;
2053 		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2054 	}
2055 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2056 		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2057 		    (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2058 			break;
2059 		DELAY(10);
2060 	}
2061 	if (i == 0)
2062 		printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev));
2063 }
2064 
2065 static void
2066 age_stop_rxmac(struct age_softc *sc)
2067 {
2068 	uint32_t reg;
2069 	int i;
2070 
2071 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2072 	if ((reg & MAC_CFG_RX_ENB) != 0) {
2073 		reg &= ~MAC_CFG_RX_ENB;
2074 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2075 	}
2076 	/* Stop Rx DMA engine. */
2077 	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2078 	if ((reg & DMA_CFG_WR_ENB) != 0) {
2079 		reg &= ~DMA_CFG_WR_ENB;
2080 		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2081 	}
2082 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2083 		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2084 		    (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2085 			break;
2086 		DELAY(10);
2087 	}
2088 	if (i == 0)
2089 		printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev));
2090 }
2091 
2092 static void
2093 age_init_tx_ring(struct age_softc *sc)
2094 {
2095 	struct age_ring_data *rd;
2096 	struct age_txdesc *txd;
2097 	int i;
2098 
2099 	sc->age_cdata.age_tx_prod = 0;
2100 	sc->age_cdata.age_tx_cons = 0;
2101 	sc->age_cdata.age_tx_cnt = 0;
2102 
2103 	rd = &sc->age_rdata;
2104 	memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ);
2105 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
2106 		txd = &sc->age_cdata.age_txdesc[i];
2107 		txd->tx_desc = &rd->age_tx_ring[i];
2108 		txd->tx_m = NULL;
2109 	}
2110 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
2111 	    sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2112 }
2113 
2114 static int
2115 age_init_rx_ring(struct age_softc *sc)
2116 {
2117 	struct age_ring_data *rd;
2118 	struct age_rxdesc *rxd;
2119 	int i;
2120 
2121 	sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
2122 	rd = &sc->age_rdata;
2123 	memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ);
2124 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
2125 		rxd = &sc->age_cdata.age_rxdesc[i];
2126 		rxd->rx_m = NULL;
2127 		rxd->rx_desc = &rd->age_rx_ring[i];
2128 		if (age_newbuf(sc, rxd, 1) != 0)
2129 			return ENOBUFS;
2130 	}
2131 
2132 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
2133 	    sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2134 
2135 	return 0;
2136 }
2137 
2138 static void
2139 age_init_rr_ring(struct age_softc *sc)
2140 {
2141 	struct age_ring_data *rd;
2142 
2143 	sc->age_cdata.age_rr_cons = 0;
2144 	AGE_RXCHAIN_RESET(sc);
2145 
2146 	rd = &sc->age_rdata;
2147 	memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ);
2148 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
2149 	    sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2150 }
2151 
2152 static void
2153 age_init_cmb_block(struct age_softc *sc)
2154 {
2155 	struct age_ring_data *rd;
2156 
2157 	rd = &sc->age_rdata;
2158 	memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
2159 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
2160 	    sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2161 }
2162 
2163 static void
2164 age_init_smb_block(struct age_softc *sc)
2165 {
2166 	struct age_ring_data *rd;
2167 
2168 	rd = &sc->age_rdata;
2169 	memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ);
2170 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2171 	    sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2172 }
2173 
2174 static int
2175 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init)
2176 {
2177 	struct rx_desc *desc;
2178 	struct mbuf *m;
2179 	bus_dmamap_t map;
2180 	int error;
2181 
2182 	MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2183 	if (m == NULL)
2184 		return ENOBUFS;
2185 	MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
2186 	if (!(m->m_flags & M_EXT)) {
2187 		 m_freem(m);
2188 		 return ENOBUFS;
2189 	}
2190 
2191 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2192 	m_adj(m, ETHER_ALIGN);
2193 
2194 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
2195 	    sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT);
2196 
2197 	if (error != 0) {
2198 		if (!error) {
2199 			bus_dmamap_unload(sc->sc_dmat,
2200 			    sc->age_cdata.age_rx_sparemap);
2201 			error = EFBIG;
2202 			printf("%s: too many segments?!\n",
2203 			    device_xname(sc->sc_dev));
2204 		}
2205 		m_freem(m);
2206 
2207 		if (init)
2208 			printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
2209 		return error;
2210 	}
2211 
2212 	if (rxd->rx_m != NULL) {
2213 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2214 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2215 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2216 	}
2217 	map = rxd->rx_dmamap;
2218 	rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
2219 	sc->age_cdata.age_rx_sparemap = map;
2220 	rxd->rx_m = m;
2221 
2222 	desc = rxd->rx_desc;
2223 	desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2224 	desc->len =
2225 	    htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) <<
2226 	    AGE_RD_LEN_SHIFT);
2227 
2228 	return 0;
2229 }
2230 
2231 static void
2232 age_rxvlan(struct age_softc *sc)
2233 {
2234 	uint32_t reg;
2235 
2236 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2237 	reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2238 	if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2239 		reg |= MAC_CFG_VLAN_TAG_STRIP;
2240 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2241 }
2242 
2243 static void
2244 age_rxfilter(struct age_softc *sc)
2245 {
2246 	struct ethercom *ec = &sc->sc_ec;
2247 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2248 	struct ether_multi *enm;
2249 	struct ether_multistep step;
2250 	uint32_t crc;
2251 	uint32_t mchash[2];
2252 	uint32_t rxcfg;
2253 
2254 	rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
2255 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2256 	ifp->if_flags &= ~IFF_ALLMULTI;
2257 
2258 	/*
2259 	 * Always accept broadcast frames.
2260 	 */
2261 	rxcfg |= MAC_CFG_BCAST;
2262 
2263 	if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) {
2264 		ifp->if_flags |= IFF_ALLMULTI;
2265 		if (ifp->if_flags & IFF_PROMISC)
2266 			rxcfg |= MAC_CFG_PROMISC;
2267 		else
2268 			rxcfg |= MAC_CFG_ALLMULTI;
2269 		mchash[0] = mchash[1] = 0xFFFFFFFF;
2270 	} else {
2271 		/* Program new filter. */
2272 		memset(mchash, 0, sizeof(mchash));
2273 
2274 		ETHER_FIRST_MULTI(step, ec, enm);
2275 		while (enm != NULL) {
2276 			crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
2277 			mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2278 			ETHER_NEXT_MULTI(step, enm);
2279 		}
2280 	}
2281 
2282 	CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
2283 	CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
2284 	CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2285 }
2286