xref: /dragonfly/sys/dev/netif/age/if_age.c (revision 59b0b316)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/age/if_age.c,v 1.6 2008/11/07 07:02:28 yongari Exp $
28  */
29 
30 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
31 
32 #include <sys/param.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/interrupt.h>
37 #include <sys/malloc.h>
38 #include <sys/proc.h>
39 #include <sys/rman.h>
40 #include <sys/serialize.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44 
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/bpf.h>
48 #include <net/if_arp.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 #include <net/ifq_var.h>
52 #include <net/vlan/if_vlan_var.h>
53 #include <net/vlan/if_vlan_ether.h>
54 
55 #include <dev/netif/mii_layer/miivar.h>
56 #include <dev/netif/mii_layer/jmphyreg.h>
57 
58 #include <bus/pci/pcireg.h>
59 #include <bus/pci/pcivar.h>
60 #include "pcidevs.h"
61 
62 #include <dev/netif/age/if_agereg.h>
63 #include <dev/netif/age/if_agevar.h>
64 
65 /* "device miibus" required.  See GENERIC if you get errors here. */
66 #include "miibus_if.h"
67 
68 #define	AGE_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
69 
70 struct age_dmamap_ctx {
71 	int			nsegs;
72 	bus_dma_segment_t	*segs;
73 };
74 
75 static int	age_probe(device_t);
76 static int	age_attach(device_t);
77 static int	age_detach(device_t);
78 static int	age_shutdown(device_t);
79 static int	age_suspend(device_t);
80 static int	age_resume(device_t);
81 
82 static int	age_miibus_readreg(device_t, int, int);
83 static int	age_miibus_writereg(device_t, int, int, int);
84 static void	age_miibus_statchg(device_t);
85 
86 static void	age_init(void *);
87 static int	age_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
88 static void	age_start(struct ifnet *, struct ifaltq_subque *);
89 static void	age_watchdog(struct ifnet *);
90 static void	age_mediastatus(struct ifnet *, struct ifmediareq *);
91 static int	age_mediachange(struct ifnet *);
92 
93 static void	age_intr(void *);
94 static void	age_txintr(struct age_softc *, int);
95 static void	age_rxintr(struct age_softc *, int);
96 static void	age_rxeof(struct age_softc *sc, struct rx_rdesc *);
97 
98 static int	age_dma_alloc(struct age_softc *);
99 static void	age_dma_free(struct age_softc *);
100 static void	age_dmamap_cb(void *, bus_dma_segment_t *, int, int);
101 static void	age_dmamap_buf_cb(void *, bus_dma_segment_t *, int,
102 		    bus_size_t, int);
103 static int	age_check_boundary(struct age_softc *);
104 static int	age_newbuf(struct age_softc *, struct age_rxdesc *, int);
105 static int	age_encap(struct age_softc *, struct mbuf **);
106 static void	age_init_tx_ring(struct age_softc *);
107 static int	age_init_rx_ring(struct age_softc *);
108 static void	age_init_rr_ring(struct age_softc *);
109 static void	age_init_cmb_block(struct age_softc *);
110 static void	age_init_smb_block(struct age_softc *);
111 
112 static void	age_tick(void *);
113 static void	age_stop(struct age_softc *);
114 static void	age_reset(struct age_softc *);
115 static int	age_read_vpd_word(struct age_softc *, uint32_t, uint32_t,
116 		    uint32_t *);
117 static void	age_get_macaddr(struct age_softc *);
118 static void	age_phy_reset(struct age_softc *);
119 static void	age_mac_config(struct age_softc *);
120 static void	age_stats_update(struct age_softc *);
121 static void	age_stop_txmac(struct age_softc *);
122 static void	age_stop_rxmac(struct age_softc *);
123 static void	age_rxvlan(struct age_softc *);
124 static void	age_rxfilter(struct age_softc *);
125 #ifdef wol_notyet
126 static void age_setwol(struct age_softc *);
127 #endif
128 
129 static void	age_sysctl_node(struct age_softc *);
130 static int	sysctl_age_stats(SYSCTL_HANDLER_ARGS);
131 static int	sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS);
132 
133 /*
134  * Devices supported by this driver.
135  */
136 static struct age_dev {
137 	uint16_t	age_vendorid;
138 	uint16_t	age_deviceid;
139 	const char	*age_name;
140 } age_devs[] = {
141 	{ VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1,
142 	    "Attansic Technology Corp, L1 Gigabit Ethernet" },
143 };
144 
145 static device_method_t age_methods[] = {
146 	/* Device interface. */
147 	DEVMETHOD(device_probe,		age_probe),
148 	DEVMETHOD(device_attach,	age_attach),
149 	DEVMETHOD(device_detach,	age_detach),
150 	DEVMETHOD(device_shutdown,	age_shutdown),
151 	DEVMETHOD(device_suspend,	age_suspend),
152 	DEVMETHOD(device_resume,	age_resume),
153 
154 	/* Bus interface. */
155 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
156 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
157 
158 	/* MII interface. */
159 	DEVMETHOD(miibus_readreg,	age_miibus_readreg),
160 	DEVMETHOD(miibus_writereg,	age_miibus_writereg),
161 	DEVMETHOD(miibus_statchg,	age_miibus_statchg),
162 
163 	{ NULL, NULL }
164 };
165 
166 static driver_t age_driver = {
167 	"age",
168 	age_methods,
169 	sizeof(struct age_softc)
170 };
171 
172 static devclass_t age_devclass;
173 
174 DECLARE_DUMMY_MODULE(if_age);
175 MODULE_DEPEND(if_age, miibus, 1, 1, 1);
176 DRIVER_MODULE(if_age, pci, age_driver, age_devclass, NULL, NULL);
177 DRIVER_MODULE(miibus, age, miibus_driver, miibus_devclass, NULL, NULL);
178 
179 /*
180  *	Read a PHY register on the MII of the L1.
181  */
182 static int
183 age_miibus_readreg(device_t dev, int phy, int reg)
184 {
185 	struct age_softc *sc;
186 	uint32_t v;
187 	int i;
188 
189 	sc = device_get_softc(dev);
190 	if (phy != sc->age_phyaddr)
191 		return (0);
192 
193 	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
194 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
195 	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
196 		DELAY(1);
197 		v = CSR_READ_4(sc, AGE_MDIO);
198 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
199 			break;
200 	}
201 
202 	if (i == 0) {
203 		device_printf(sc->age_dev, "phy read timeout : %d\n", reg);
204 		return (0);
205 	}
206 
207 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
208 }
209 
210 /*
211  *	Write a PHY register on the MII of the L1.
212  */
213 static int
214 age_miibus_writereg(device_t dev, int phy, int reg, int val)
215 {
216 	struct age_softc *sc;
217 	uint32_t v;
218 	int i;
219 
220 	sc = device_get_softc(dev);
221 	if (phy != sc->age_phyaddr)
222 		return (0);
223 
224 	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
225 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
226 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
227 	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
228 		DELAY(1);
229 		v = CSR_READ_4(sc, AGE_MDIO);
230 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
231 			break;
232 	}
233 
234 	if (i == 0)
235 		device_printf(sc->age_dev, "phy write timeout : %d\n", reg);
236 
237 	return (0);
238 }
239 
240 /*
241  *	Callback from MII layer when media changes.
242  */
243 static void
244 age_miibus_statchg(device_t dev)
245 {
246 	struct age_softc *sc = device_get_softc(dev);
247 	struct ifnet *ifp = &sc->arpcom.ac_if;
248 	struct mii_data *mii;
249 
250 	ASSERT_SERIALIZED(ifp->if_serializer);
251 
252 	if ((ifp->if_flags & IFF_RUNNING) == 0)
253 		return;
254 
255 	mii = device_get_softc(sc->age_miibus);
256 
257 	sc->age_flags &= ~AGE_FLAG_LINK;
258 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
259 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
260 		case IFM_10_T:
261 		case IFM_100_TX:
262 		case IFM_1000_T:
263 			sc->age_flags |= AGE_FLAG_LINK;
264 			break;
265 		default:
266 			break;
267 		}
268 	}
269 
270 	/* Stop Rx/Tx MACs. */
271 	age_stop_rxmac(sc);
272 	age_stop_txmac(sc);
273 
274 	/* Program MACs with resolved speed/duplex/flow-control. */
275 	if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
276 		uint32_t reg;
277 
278 		age_mac_config(sc);
279 
280 		reg = CSR_READ_4(sc, AGE_MAC_CFG);
281 		/* Restart DMA engine and Tx/Rx MAC. */
282 		CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
283 		    DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
284 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
285 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
286 	}
287 }
288 
289 /*
290  *	Get the current interface media status.
291  */
292 static void
293 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
294 {
295 	struct age_softc *sc = ifp->if_softc;
296 	struct mii_data *mii = device_get_softc(sc->age_miibus);
297 
298 	ASSERT_SERIALIZED(ifp->if_serializer);
299 
300 	mii_pollstat(mii);
301 	ifmr->ifm_status = mii->mii_media_status;
302 	ifmr->ifm_active = mii->mii_media_active;
303 }
304 
305 /*
306  *	Set hardware to newly-selected media.
307  */
308 static int
309 age_mediachange(struct ifnet *ifp)
310 {
311 	struct age_softc *sc = ifp->if_softc;
312 	struct mii_data *mii = device_get_softc(sc->age_miibus);
313 	int error;
314 
315 	ASSERT_SERIALIZED(ifp->if_serializer);
316 
317 	if (mii->mii_instance != 0) {
318 		struct mii_softc *miisc;
319 
320 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
321 			mii_phy_reset(miisc);
322 	}
323 	error = mii_mediachg(mii);
324 
325 	return (error);
326 }
327 
328 static int
329 age_read_vpd_word(struct age_softc *sc, uint32_t vpdc, uint32_t offset,
330     uint32_t *word)
331 {
332 	int i;
333 
334 	pci_write_config(sc->age_dev, vpdc + PCIR_VPD_ADDR, offset, 2);
335 	for (i = AGE_TIMEOUT; i > 0; i--) {
336 		DELAY(10);
337 		if ((pci_read_config(sc->age_dev, vpdc + PCIR_VPD_ADDR, 2) &
338 		    0x8000) == 0x8000)
339 			break;
340 	}
341 	if (i == 0) {
342 		device_printf(sc->age_dev, "VPD read timeout!\n");
343 		*word = 0;
344 		return (ETIMEDOUT);
345 	}
346 
347 	*word = pci_read_config(sc->age_dev, vpdc + PCIR_VPD_DATA, 4);
348 	return (0);
349 }
350 
351 static int
352 age_probe(device_t dev)
353 {
354 	struct age_dev *sp;
355 	int i;
356 	uint16_t vendor, devid;
357 
358 	vendor = pci_get_vendor(dev);
359 	devid = pci_get_device(dev);
360 	sp = age_devs;
361 	for (i = 0; i < NELEM(age_devs); i++, sp++) {
362 		if (vendor == sp->age_vendorid &&
363 		    devid == sp->age_deviceid) {
364 			device_set_desc(dev, sp->age_name);
365 			return (0);
366 		}
367 	}
368 	return (ENXIO);
369 }
370 
371 static void
372 age_get_macaddr(struct age_softc *sc)
373 {
374 	uint32_t ea[2], off, reg, word;
375 	int vpd_error, match, vpdc;
376 
377 	reg = CSR_READ_4(sc, AGE_SPI_CTRL);
378 	if ((reg & SPI_VPD_ENB) != 0) {
379 		/* Get VPD stored in TWSI EEPROM. */
380 		reg &= ~SPI_VPD_ENB;
381 		CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
382 	}
383 
384 	ea[0] = ea[1] = 0;
385 	vpdc = pci_get_vpdcap_ptr(sc->age_dev);
386 	if (vpdc) {
387 		vpd_error = 0;
388 
389 		/*
390 		 * PCI VPD capability exists, but it seems that it's
391 		 * not in the standard form as stated in PCI VPD
392 		 * specification such that driver could not use
393 		 * pci_get_vpd_readonly(9) with keyword 'NA'.
394 		 * Search VPD data starting at address 0x0100. The data
395 		 * should be used as initializers to set AGE_PAR0,
396 		 * AGE_PAR1 register including other PCI configuration
397 		 * registers.
398 		 */
399 		word = 0;
400 		match = 0;
401 		reg = 0;
402 		for (off = AGE_VPD_REG_CONF_START; off < AGE_VPD_REG_CONF_END;
403 		    off += sizeof(uint32_t)) {
404 			vpd_error = age_read_vpd_word(sc, vpdc, off, &word);
405 			if (vpd_error != 0)
406 				break;
407 			if (match != 0) {
408 				switch (reg) {
409 				case AGE_PAR0:
410 					ea[0] = word;
411 					break;
412 				case AGE_PAR1:
413 					ea[1] = word;
414 					break;
415 				default:
416 					break;
417 				}
418 				match = 0;
419 			} else if ((word & 0xFF) == AGE_VPD_REG_CONF_SIG) {
420 				match = 1;
421 				reg = word >> 16;
422 			} else
423 				break;
424 		}
425 		if (off >= AGE_VPD_REG_CONF_END)
426 			vpd_error = ENOENT;
427 		if (vpd_error == 0) {
428 			/*
429 			 * Don't blindly trust ethernet address obtained
430 			 * from VPD. Check whether ethernet address is
431 			 * valid one. Otherwise fall-back to reading
432 			 * PAR register.
433 			 */
434 			ea[1] &= 0xFFFF;
435 			if ((ea[0] == 0 && ea[1] == 0) ||
436 			    (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) {
437 				if (bootverbose)
438 					device_printf(sc->age_dev,
439 					    "invalid ethernet address "
440 					    "returned from VPD.\n");
441 				vpd_error = EINVAL;
442 			}
443 		}
444 		if (vpd_error != 0 && (bootverbose))
445 			device_printf(sc->age_dev, "VPD access failure!\n");
446 	} else {
447 		vpd_error = ENOENT;
448 		if (bootverbose)
449 			device_printf(sc->age_dev,
450 			    "PCI VPD capability not found!\n");
451 	}
452 
453 	/*
454 	 * It seems that L1 also provides a way to extract ethernet
455 	 * address via SPI flash interface. Because SPI flash memory
456 	 * device of different vendors vary in their instruction
457 	 * codes for read ID instruction, it's very hard to get
458 	 * instructions codes without detailed information for the
459 	 * flash memory device used on ethernet controller. To simplify
460 	 * code, just read AGE_PAR0/AGE_PAR1 register to get ethernet
461 	 * address which is supposed to be set by hardware during
462 	 * power on reset.
463 	 */
464 	if (vpd_error != 0) {
465 		/*
466 		 * VPD is mapped to SPI flash memory or BIOS set it.
467 		 */
468 		ea[0] = CSR_READ_4(sc, AGE_PAR0);
469 		ea[1] = CSR_READ_4(sc, AGE_PAR1);
470 	}
471 
472 	ea[1] &= 0xFFFF;
473 	if ((ea[0] == 0 && ea[1]  == 0) ||
474 	    (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) {
475 		device_printf(sc->age_dev,
476 		    "generating fake ethernet address.\n");
477 		ea[0] = karc4random();
478 		/* Set OUI to ASUSTek COMPUTER INC. */
479 		sc->age_eaddr[0] = 0x00;
480 		sc->age_eaddr[1] = 0x1B;
481 		sc->age_eaddr[2] = 0xFC;
482 		sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
483 		sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
484 		sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
485 	} else {
486 		sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF;
487 		sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF;
488 		sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF;
489 		sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
490 		sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
491 		sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
492 	}
493 }
494 
495 static void
496 age_phy_reset(struct age_softc *sc)
497 {
498 	/* Reset PHY. */
499 	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
500 	DELAY(1000);
501 	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
502 	DELAY(1000);
503 }
504 
505 static int
506 age_attach(device_t dev)
507 {
508 	struct age_softc *sc = device_get_softc(dev);
509 	struct ifnet *ifp = &sc->arpcom.ac_if;
510 	uint8_t pcie_ptr;
511 	int error;
512 
513 	error = 0;
514 	sc->age_dev = dev;
515 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
516 
517 	callout_init(&sc->age_tick_ch);
518 
519 #ifndef BURN_BRIDGES
520 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
521 		uint32_t irq, mem;
522 
523 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
524 		mem = pci_read_config(dev, AGE_PCIR_BAR, 4);
525 
526 		device_printf(dev, "chip is in D%d power mode "
527 		    "-- setting to D0\n", pci_get_powerstate(dev));
528 
529 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
530 
531 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
532 		pci_write_config(dev, AGE_PCIR_BAR, mem, 4);
533 	}
534 #endif	/* !BURN_BRIDGE */
535 
536 	/* Enable bus mastering */
537 	pci_enable_busmaster(dev);
538 
539 	/*
540 	 * Allocate memory mapped IO
541 	 */
542 	sc->age_mem_rid = AGE_PCIR_BAR;
543 	sc->age_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
544 						 &sc->age_mem_rid, RF_ACTIVE);
545 	if (sc->age_mem_res == NULL) {
546 		device_printf(dev, "can't allocate IO memory\n");
547 		return ENXIO;
548 	}
549 	sc->age_mem_bt = rman_get_bustag(sc->age_mem_res);
550 	sc->age_mem_bh = rman_get_bushandle(sc->age_mem_res);
551 
552 	/*
553 	 * Allocate IRQ
554 	 */
555 	sc->age_irq_rid = 0;
556 	sc->age_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
557 						 &sc->age_irq_rid,
558 						 RF_SHAREABLE | RF_ACTIVE);
559 	if (sc->age_irq_res == NULL) {
560 		device_printf(dev, "can't allocate irq\n");
561 		error = ENXIO;
562 		goto fail;
563 	}
564 
565 	/* Set PHY address. */
566 	sc->age_phyaddr = AGE_PHY_ADDR;
567 
568 	/* Reset PHY. */
569 	age_phy_reset(sc);
570 
571 	/* Reset the ethernet controller. */
572 	age_reset(sc);
573 
574 	/* Get PCI and chip id/revision. */
575 	sc->age_rev = pci_get_revid(dev);
576 	sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
577 	    MASTER_CHIP_REV_SHIFT;
578 	if (bootverbose) {
579 		device_printf(dev, "PCI device revision : 0x%04x\n", sc->age_rev);
580 		device_printf(dev, "Chip id/revision : 0x%04x\n",
581 		    sc->age_chip_rev);
582 	}
583 
584 	/*
585 	 * XXX
586 	 * Unintialized hardware returns an invalid chip id/revision
587 	 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
588 	 * unplugged cable results in putting hardware into automatic
589 	 * power down mode which in turn returns invalld chip revision.
590 	 */
591 	if (sc->age_chip_rev == 0xFFFF) {
592 		device_printf(dev,"invalid chip revision : 0x%04x -- "
593 		    "not initialized?\n", sc->age_chip_rev);
594 		error = ENXIO;
595 		goto fail;
596 	}
597 	device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n",
598 	    CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
599 	    CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
600 
601 	/* Get DMA parameters from PCIe device control register. */
602 	pcie_ptr = pci_get_pciecap_ptr(dev);
603 	if (pcie_ptr) {
604 		uint16_t devctl;
605 
606 		sc->age_flags |= AGE_FLAG_PCIE;
607 		devctl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
608 		/* Max read request size. */
609 		sc->age_dma_rd_burst = ((devctl >> 12) & 0x07) <<
610 		    DMA_CFG_RD_BURST_SHIFT;
611 		/* Max payload size. */
612 		sc->age_dma_wr_burst = ((devctl >> 5) & 0x07) <<
613 		    DMA_CFG_WR_BURST_SHIFT;
614 		if (bootverbose) {
615 			device_printf(dev, "Read request size : %d bytes.\n",
616 			    128 << ((devctl >> 12) & 0x07));
617 			device_printf(dev, "TLP payload size : %d bytes.\n",
618 			    128 << ((devctl >> 5) & 0x07));
619 		}
620 	} else {
621 		sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
622 		sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
623 	}
624 
625 	/* Create device sysctl node. */
626 	age_sysctl_node(sc);
627 
628 	if ((error = age_dma_alloc(sc)) != 0)
629 		goto fail;
630 
631 	/* Load station address. */
632 	age_get_macaddr(sc);
633 
634 	ifp->if_softc = sc;
635 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
636 	ifp->if_ioctl = age_ioctl;
637 	ifp->if_start = age_start;
638 	ifp->if_init = age_init;
639 	ifp->if_watchdog = age_watchdog;
640 	ifq_set_maxlen(&ifp->if_snd, AGE_TX_RING_CNT - 1);
641 	ifq_set_ready(&ifp->if_snd);
642 
643 	ifp->if_capabilities = IFCAP_HWCSUM |
644 			       IFCAP_VLAN_MTU |
645 			       IFCAP_VLAN_HWTAGGING;
646 	ifp->if_hwassist = AGE_CSUM_FEATURES;
647 	ifp->if_capenable = ifp->if_capabilities;
648 
649 	/* Set up MII bus. */
650 	if ((error = mii_phy_probe(dev, &sc->age_miibus, age_mediachange,
651 	    age_mediastatus)) != 0) {
652 		device_printf(dev, "no PHY found!\n");
653 		goto fail;
654 	}
655 
656 	ether_ifattach(ifp, sc->age_eaddr, NULL);
657 
658 	/* Tell the upper layer(s) we support long frames. */
659 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
660 
661 	ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->age_irq_res));
662 
663 	error = bus_setup_intr(dev, sc->age_irq_res, INTR_MPSAFE, age_intr, sc,
664 			       &sc->age_irq_handle, ifp->if_serializer);
665 	if (error) {
666 		device_printf(dev, "could not set up interrupt handler.\n");
667 		ether_ifdetach(ifp);
668 		goto fail;
669 	}
670 
671 	return 0;
672 fail:
673 	age_detach(dev);
674 	return (error);
675 }
676 
677 static int
678 age_detach(device_t dev)
679 {
680 	struct age_softc *sc = device_get_softc(dev);
681 
682 	if (device_is_attached(dev)) {
683 		struct ifnet *ifp = &sc->arpcom.ac_if;
684 
685 		lwkt_serialize_enter(ifp->if_serializer);
686 		sc->age_flags |= AGE_FLAG_DETACH;
687 		age_stop(sc);
688 		bus_teardown_intr(dev, sc->age_irq_res, sc->age_irq_handle);
689 		lwkt_serialize_exit(ifp->if_serializer);
690 
691 		ether_ifdetach(ifp);
692 	}
693 
694 	if (sc->age_miibus != NULL)
695 		device_delete_child(dev, sc->age_miibus);
696 	bus_generic_detach(dev);
697 
698 	if (sc->age_irq_res != NULL) {
699 		bus_release_resource(dev, SYS_RES_IRQ, sc->age_irq_rid,
700 				     sc->age_irq_res);
701 	}
702 	if (sc->age_mem_res != NULL) {
703 		bus_release_resource(dev, SYS_RES_MEMORY, sc->age_mem_rid,
704 				     sc->age_mem_res);
705 	}
706 
707 	age_dma_free(sc);
708 
709 	return (0);
710 }
711 
712 static void
713 age_sysctl_node(struct age_softc *sc)
714 {
715 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->age_dev);
716 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->age_dev);
717 	int error;
718 
719 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
720 	    "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_age_stats,
721 	    "I", "Statistics");
722 
723 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
724 	    "int_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->age_int_mod, 0,
725 	    sysctl_hw_age_int_mod, "I", "age interrupt moderation");
726 
727 	/* Pull in device tunables. */
728 	sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
729 	error = resource_int_value(device_get_name(sc->age_dev),
730 	    device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod);
731 	if (error == 0) {
732 		if (sc->age_int_mod < AGE_IM_TIMER_MIN ||
733 		    sc->age_int_mod > AGE_IM_TIMER_MAX) {
734 			device_printf(sc->age_dev,
735 			    "int_mod value out of range; using default: %d\n",
736 			    AGE_IM_TIMER_DEFAULT);
737 			sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
738 		}
739 	}
740 }
741 
742 struct age_dmamap_arg {
743 	bus_addr_t	age_busaddr;
744 };
745 
746 static void
747 age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
748 {
749 	struct age_dmamap_arg *ctx;
750 
751 	if (error != 0)
752 		return;
753 
754 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
755 
756 	ctx = (struct age_dmamap_arg *)arg;
757 	ctx->age_busaddr = segs[0].ds_addr;
758 }
759 
760 /*
761  * Attansic L1 controller have single register to specify high
762  * address part of DMA blocks. So all descriptor structures and
763  * DMA memory blocks should have the same high address of given
764  * 4GB address space(i.e. crossing 4GB boundary is not allowed).
765  */
766 static int
767 age_check_boundary(struct age_softc *sc)
768 {
769 	bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end;
770 	bus_addr_t cmb_block_end, smb_block_end;
771 
772 	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
773 	tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ;
774 	rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ;
775 	rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ;
776 	cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ;
777 	smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ;
778 
779 	if ((AGE_ADDR_HI(tx_ring_end) !=
780 	    AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) ||
781 	    (AGE_ADDR_HI(rx_ring_end) !=
782 	    AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) ||
783 	    (AGE_ADDR_HI(rr_ring_end) !=
784 	    AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) ||
785 	    (AGE_ADDR_HI(cmb_block_end) !=
786 	    AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) ||
787 	    (AGE_ADDR_HI(smb_block_end) !=
788 	    AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr)))
789 		return (EFBIG);
790 
791 	if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) ||
792 	    (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) ||
793 	    (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) ||
794 	    (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end)))
795 		return (EFBIG);
796 
797 	return (0);
798 }
799 
800 static int
801 age_dma_alloc(struct age_softc *sc)
802 {
803 	struct age_txdesc *txd;
804 	struct age_rxdesc *rxd;
805 	bus_addr_t lowaddr;
806 	struct age_dmamap_arg ctx;
807 	int error, i;
808 
809 	lowaddr = BUS_SPACE_MAXADDR;
810 again:
811 	/* Create parent ring/DMA block tag. */
812 	error = bus_dma_tag_create(
813 	    NULL,			/* parent */
814 	    1, 0,			/* alignment, boundary */
815 	    lowaddr,			/* lowaddr */
816 	    BUS_SPACE_MAXADDR,		/* highaddr */
817 	    NULL, NULL,			/* filter, filterarg */
818 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
819 	    0,				/* nsegments */
820 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
821 	    0,				/* flags */
822 	    &sc->age_cdata.age_parent_tag);
823 	if (error != 0) {
824 		device_printf(sc->age_dev,
825 		    "could not create parent DMA tag.\n");
826 		goto fail;
827 	}
828 
829 	/* Create tag for Tx ring. */
830 	error = bus_dma_tag_create(
831 	    sc->age_cdata.age_parent_tag, /* parent */
832 	    AGE_TX_RING_ALIGN, 0,	/* alignment, boundary */
833 	    BUS_SPACE_MAXADDR,		/* lowaddr */
834 	    BUS_SPACE_MAXADDR,		/* highaddr */
835 	    NULL, NULL,			/* filter, filterarg */
836 	    AGE_TX_RING_SZ,		/* maxsize */
837 	    1,				/* nsegments */
838 	    AGE_TX_RING_SZ,		/* maxsegsize */
839 	    0,				/* flags */
840 	    &sc->age_cdata.age_tx_ring_tag);
841 	if (error != 0) {
842 		device_printf(sc->age_dev,
843 		    "could not create Tx ring DMA tag.\n");
844 		goto fail;
845 	}
846 
847 	/* Create tag for Rx ring. */
848 	error = bus_dma_tag_create(
849 	    sc->age_cdata.age_parent_tag, /* parent */
850 	    AGE_RX_RING_ALIGN, 0,	/* alignment, boundary */
851 	    BUS_SPACE_MAXADDR,		/* lowaddr */
852 	    BUS_SPACE_MAXADDR,		/* highaddr */
853 	    NULL, NULL,			/* filter, filterarg */
854 	    AGE_RX_RING_SZ,		/* maxsize */
855 	    1,				/* nsegments */
856 	    AGE_RX_RING_SZ,		/* maxsegsize */
857 	    0,				/* flags */
858 	    &sc->age_cdata.age_rx_ring_tag);
859 	if (error != 0) {
860 		device_printf(sc->age_dev,
861 		    "could not create Rx ring DMA tag.\n");
862 		goto fail;
863 	}
864 
865 	/* Create tag for Rx return ring. */
866 	error = bus_dma_tag_create(
867 	    sc->age_cdata.age_parent_tag, /* parent */
868 	    AGE_RR_RING_ALIGN, 0,	/* alignment, boundary */
869 	    BUS_SPACE_MAXADDR,		/* lowaddr */
870 	    BUS_SPACE_MAXADDR,		/* highaddr */
871 	    NULL, NULL,			/* filter, filterarg */
872 	    AGE_RR_RING_SZ,		/* maxsize */
873 	    1,				/* nsegments */
874 	    AGE_RR_RING_SZ,		/* maxsegsize */
875 	    0,				/* flags */
876 	    &sc->age_cdata.age_rr_ring_tag);
877 	if (error != 0) {
878 		device_printf(sc->age_dev,
879 		    "could not create Rx return ring DMA tag.\n");
880 		goto fail;
881 	}
882 
883 	/* Create tag for coalesing message block. */
884 	error = bus_dma_tag_create(
885 	    sc->age_cdata.age_parent_tag, /* parent */
886 	    AGE_CMB_ALIGN, 0,		/* alignment, boundary */
887 	    BUS_SPACE_MAXADDR,		/* lowaddr */
888 	    BUS_SPACE_MAXADDR,		/* highaddr */
889 	    NULL, NULL,			/* filter, filterarg */
890 	    AGE_CMB_BLOCK_SZ,		/* maxsize */
891 	    1,				/* nsegments */
892 	    AGE_CMB_BLOCK_SZ,		/* maxsegsize */
893 	    0,				/* flags */
894 	    &sc->age_cdata.age_cmb_block_tag);
895 	if (error != 0) {
896 		device_printf(sc->age_dev,
897 		    "could not create CMB DMA tag.\n");
898 		goto fail;
899 	}
900 
901 	/* Create tag for statistics message block. */
902 	error = bus_dma_tag_create(
903 	    sc->age_cdata.age_parent_tag, /* parent */
904 	    AGE_SMB_ALIGN, 0,		/* alignment, boundary */
905 	    BUS_SPACE_MAXADDR,		/* lowaddr */
906 	    BUS_SPACE_MAXADDR,		/* highaddr */
907 	    NULL, NULL,			/* filter, filterarg */
908 	    AGE_SMB_BLOCK_SZ,		/* maxsize */
909 	    1,				/* nsegments */
910 	    AGE_SMB_BLOCK_SZ,		/* maxsegsize */
911 	    0,				/* flags */
912 	    &sc->age_cdata.age_smb_block_tag);
913 	if (error != 0) {
914 		device_printf(sc->age_dev,
915 		    "could not create SMB DMA tag.\n");
916 		goto fail;
917 	}
918 
919 	/* Allocate DMA'able memory and load the DMA map. */
920 	error = bus_dmamem_alloc(sc->age_cdata.age_tx_ring_tag,
921 	    (void **)&sc->age_rdata.age_tx_ring,
922 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
923 	    &sc->age_cdata.age_tx_ring_map);
924 	if (error != 0) {
925 		device_printf(sc->age_dev,
926 		    "could not allocate DMA'able memory for Tx ring.\n");
927 		goto fail;
928 	}
929 	ctx.age_busaddr = 0;
930 	error = bus_dmamap_load(sc->age_cdata.age_tx_ring_tag,
931 	    sc->age_cdata.age_tx_ring_map, sc->age_rdata.age_tx_ring,
932 	    AGE_TX_RING_SZ, age_dmamap_cb, &ctx, 0);
933 	if (error != 0 || ctx.age_busaddr == 0) {
934 		device_printf(sc->age_dev,
935 		    "could not load DMA'able memory for Tx ring.\n");
936 		goto fail;
937 	}
938 	sc->age_rdata.age_tx_ring_paddr = ctx.age_busaddr;
939 	/* Rx ring */
940 	error = bus_dmamem_alloc(sc->age_cdata.age_rx_ring_tag,
941 	    (void **)&sc->age_rdata.age_rx_ring,
942 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
943 	    &sc->age_cdata.age_rx_ring_map);
944 	if (error != 0) {
945 		device_printf(sc->age_dev,
946 		    "could not allocate DMA'able memory for Rx ring.\n");
947 		goto fail;
948 	}
949 	ctx.age_busaddr = 0;
950 	error = bus_dmamap_load(sc->age_cdata.age_rx_ring_tag,
951 	    sc->age_cdata.age_rx_ring_map, sc->age_rdata.age_rx_ring,
952 	    AGE_RX_RING_SZ, age_dmamap_cb, &ctx, 0);
953 	if (error != 0 || ctx.age_busaddr == 0) {
954 		device_printf(sc->age_dev,
955 		    "could not load DMA'able memory for Rx ring.\n");
956 		goto fail;
957 	}
958 	sc->age_rdata.age_rx_ring_paddr = ctx.age_busaddr;
959 	/* Rx return ring */
960 	error = bus_dmamem_alloc(sc->age_cdata.age_rr_ring_tag,
961 	    (void **)&sc->age_rdata.age_rr_ring,
962 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
963 	    &sc->age_cdata.age_rr_ring_map);
964 	if (error != 0) {
965 		device_printf(sc->age_dev,
966 		    "could not allocate DMA'able memory for Rx return ring.\n");
967 		goto fail;
968 	}
969 	ctx.age_busaddr = 0;
970 	error = bus_dmamap_load(sc->age_cdata.age_rr_ring_tag,
971 	    sc->age_cdata.age_rr_ring_map, sc->age_rdata.age_rr_ring,
972 	    AGE_RR_RING_SZ, age_dmamap_cb, &ctx, 0);
973 	if (error != 0 || ctx.age_busaddr == 0) {
974 		device_printf(sc->age_dev,
975 		    "could not load DMA'able memory for Rx return ring.\n");
976 		goto fail;
977 	}
978 	sc->age_rdata.age_rr_ring_paddr = ctx.age_busaddr;
979 	/* CMB block */
980 	error = bus_dmamem_alloc(sc->age_cdata.age_cmb_block_tag,
981 	    (void **)&sc->age_rdata.age_cmb_block,
982 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
983 	    &sc->age_cdata.age_cmb_block_map);
984 	if (error != 0) {
985 		device_printf(sc->age_dev,
986 		    "could not allocate DMA'able memory for CMB block.\n");
987 		goto fail;
988 	}
989 	ctx.age_busaddr = 0;
990 	error = bus_dmamap_load(sc->age_cdata.age_cmb_block_tag,
991 	    sc->age_cdata.age_cmb_block_map, sc->age_rdata.age_cmb_block,
992 	    AGE_CMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
993 	if (error != 0 || ctx.age_busaddr == 0) {
994 		device_printf(sc->age_dev,
995 		    "could not load DMA'able memory for CMB block.\n");
996 		goto fail;
997 	}
998 	sc->age_rdata.age_cmb_block_paddr = ctx.age_busaddr;
999 	/* SMB block */
1000 	error = bus_dmamem_alloc(sc->age_cdata.age_smb_block_tag,
1001 	    (void **)&sc->age_rdata.age_smb_block,
1002 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
1003 	    &sc->age_cdata.age_smb_block_map);
1004 	if (error != 0) {
1005 		device_printf(sc->age_dev,
1006 		    "could not allocate DMA'able memory for SMB block.\n");
1007 		goto fail;
1008 	}
1009 	ctx.age_busaddr = 0;
1010 	error = bus_dmamap_load(sc->age_cdata.age_smb_block_tag,
1011 	    sc->age_cdata.age_smb_block_map, sc->age_rdata.age_smb_block,
1012 	    AGE_SMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
1013 	if (error != 0 || ctx.age_busaddr == 0) {
1014 		device_printf(sc->age_dev,
1015 		    "could not load DMA'able memory for SMB block.\n");
1016 		goto fail;
1017 	}
1018 	sc->age_rdata.age_smb_block_paddr = ctx.age_busaddr;
1019 
1020 	/*
1021 	 * All ring buffer and DMA blocks should have the same
1022 	 * high address part of 64bit DMA address space.
1023 	 */
1024 	if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1025 	    (error = age_check_boundary(sc)) != 0) {
1026 		device_printf(sc->age_dev, "4GB boundary crossed, "
1027 		    "switching to 32bit DMA addressing mode.\n");
1028 		age_dma_free(sc);
1029 		/* Limit DMA address space to 32bit and try again. */
1030 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1031 		goto again;
1032 	}
1033 
1034 	/*
1035 	 * Create Tx/Rx buffer parent tag.
1036 	 * L1 supports full 64bit DMA addressing in Tx/Rx buffers
1037 	 * so it needs separate parent DMA tag.
1038 	 */
1039 	error = bus_dma_tag_create(
1040 	    NULL,			/* parent */
1041 	    1, 0,			/* alignment, boundary */
1042 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1043 	    BUS_SPACE_MAXADDR,		/* highaddr */
1044 	    NULL, NULL,			/* filter, filterarg */
1045 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1046 	    0,				/* nsegments */
1047 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1048 	    0,				/* flags */
1049 	    &sc->age_cdata.age_buffer_tag);
1050 	if (error != 0) {
1051 		device_printf(sc->age_dev,
1052 		    "could not create parent buffer DMA tag.\n");
1053 		goto fail;
1054 	}
1055 
1056 	/* Create tag for Tx buffers. */
1057 	error = bus_dma_tag_create(
1058 	    sc->age_cdata.age_buffer_tag, /* parent */
1059 	    1, 0,			/* alignment, boundary */
1060 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1061 	    BUS_SPACE_MAXADDR,		/* highaddr */
1062 	    NULL, NULL,			/* filter, filterarg */
1063 	    AGE_TSO_MAXSIZE,		/* maxsize */
1064 	    AGE_MAXTXSEGS,		/* nsegments */
1065 	    AGE_TSO_MAXSEGSIZE,		/* maxsegsize */
1066 	    0,				/* flags */
1067 	    &sc->age_cdata.age_tx_tag);
1068 	if (error != 0) {
1069 		device_printf(sc->age_dev, "could not create Tx DMA tag.\n");
1070 		goto fail;
1071 	}
1072 
1073 	/* Create tag for Rx buffers. */
1074 	error = bus_dma_tag_create(
1075 	    sc->age_cdata.age_buffer_tag, /* parent */
1076 	    1, 0,			/* alignment, boundary */
1077 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1078 	    BUS_SPACE_MAXADDR,		/* highaddr */
1079 	    NULL, NULL,			/* filter, filterarg */
1080 	    MCLBYTES,			/* maxsize */
1081 	    1,				/* nsegments */
1082 	    MCLBYTES,			/* maxsegsize */
1083 	    0,				/* flags */
1084 	    &sc->age_cdata.age_rx_tag);
1085 	if (error != 0) {
1086 		device_printf(sc->age_dev, "could not create Rx DMA tag.\n");
1087 		goto fail;
1088 	}
1089 
1090 	/* Create DMA maps for Tx buffers. */
1091 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
1092 		txd = &sc->age_cdata.age_txdesc[i];
1093 		txd->tx_m = NULL;
1094 		txd->tx_dmamap = NULL;
1095 		error = bus_dmamap_create(sc->age_cdata.age_tx_tag, 0,
1096 		    &txd->tx_dmamap);
1097 		if (error != 0) {
1098 			device_printf(sc->age_dev,
1099 			    "could not create Tx dmamap.\n");
1100 			goto fail;
1101 		}
1102 	}
1103 	/* Create DMA maps for Rx buffers. */
1104 	if ((error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
1105 	    &sc->age_cdata.age_rx_sparemap)) != 0) {
1106 		device_printf(sc->age_dev,
1107 		    "could not create spare Rx dmamap.\n");
1108 		goto fail;
1109 	}
1110 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
1111 		rxd = &sc->age_cdata.age_rxdesc[i];
1112 		rxd->rx_m = NULL;
1113 		rxd->rx_dmamap = NULL;
1114 		error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
1115 		    &rxd->rx_dmamap);
1116 		if (error != 0) {
1117 			device_printf(sc->age_dev,
1118 			    "could not create Rx dmamap.\n");
1119 			goto fail;
1120 		}
1121 	}
1122 fail:
1123 	return (error);
1124 }
1125 
1126 static void
1127 age_dma_free(struct age_softc *sc)
1128 {
1129 	struct age_txdesc *txd;
1130 	struct age_rxdesc *rxd;
1131 	int i;
1132 
1133 	/* Tx buffers */
1134 	if (sc->age_cdata.age_tx_tag != NULL) {
1135 		for (i = 0; i < AGE_TX_RING_CNT; i++) {
1136 			txd = &sc->age_cdata.age_txdesc[i];
1137 			if (txd->tx_dmamap != NULL) {
1138 				bus_dmamap_destroy(sc->age_cdata.age_tx_tag,
1139 				    txd->tx_dmamap);
1140 				txd->tx_dmamap = NULL;
1141 			}
1142 		}
1143 		bus_dma_tag_destroy(sc->age_cdata.age_tx_tag);
1144 		sc->age_cdata.age_tx_tag = NULL;
1145 	}
1146 	/* Rx buffers */
1147 	if (sc->age_cdata.age_rx_tag != NULL) {
1148 		for (i = 0; i < AGE_RX_RING_CNT; i++) {
1149 			rxd = &sc->age_cdata.age_rxdesc[i];
1150 			if (rxd->rx_dmamap != NULL) {
1151 				bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
1152 				    rxd->rx_dmamap);
1153 				rxd->rx_dmamap = NULL;
1154 			}
1155 		}
1156 		if (sc->age_cdata.age_rx_sparemap != NULL) {
1157 			bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
1158 			    sc->age_cdata.age_rx_sparemap);
1159 			sc->age_cdata.age_rx_sparemap = NULL;
1160 		}
1161 		bus_dma_tag_destroy(sc->age_cdata.age_rx_tag);
1162 		sc->age_cdata.age_rx_tag = NULL;
1163 	}
1164 	/* Tx ring. */
1165 	if (sc->age_cdata.age_tx_ring_tag != NULL) {
1166 		if (sc->age_cdata.age_tx_ring_map != NULL)
1167 			bus_dmamap_unload(sc->age_cdata.age_tx_ring_tag,
1168 			    sc->age_cdata.age_tx_ring_map);
1169 		if (sc->age_cdata.age_tx_ring_map != NULL &&
1170 		    sc->age_rdata.age_tx_ring != NULL)
1171 			bus_dmamem_free(sc->age_cdata.age_tx_ring_tag,
1172 			    sc->age_rdata.age_tx_ring,
1173 			    sc->age_cdata.age_tx_ring_map);
1174 		sc->age_rdata.age_tx_ring = NULL;
1175 		sc->age_cdata.age_tx_ring_map = NULL;
1176 		bus_dma_tag_destroy(sc->age_cdata.age_tx_ring_tag);
1177 		sc->age_cdata.age_tx_ring_tag = NULL;
1178 	}
1179 	/* Rx ring. */
1180 	if (sc->age_cdata.age_rx_ring_tag != NULL) {
1181 		if (sc->age_cdata.age_rx_ring_map != NULL)
1182 			bus_dmamap_unload(sc->age_cdata.age_rx_ring_tag,
1183 			    sc->age_cdata.age_rx_ring_map);
1184 		if (sc->age_cdata.age_rx_ring_map != NULL &&
1185 		    sc->age_rdata.age_rx_ring != NULL)
1186 			bus_dmamem_free(sc->age_cdata.age_rx_ring_tag,
1187 			    sc->age_rdata.age_rx_ring,
1188 			    sc->age_cdata.age_rx_ring_map);
1189 		sc->age_rdata.age_rx_ring = NULL;
1190 		sc->age_cdata.age_rx_ring_map = NULL;
1191 		bus_dma_tag_destroy(sc->age_cdata.age_rx_ring_tag);
1192 		sc->age_cdata.age_rx_ring_tag = NULL;
1193 	}
1194 	/* Rx return ring. */
1195 	if (sc->age_cdata.age_rr_ring_tag != NULL) {
1196 		if (sc->age_cdata.age_rr_ring_map != NULL)
1197 			bus_dmamap_unload(sc->age_cdata.age_rr_ring_tag,
1198 			    sc->age_cdata.age_rr_ring_map);
1199 		if (sc->age_cdata.age_rr_ring_map != NULL &&
1200 		    sc->age_rdata.age_rr_ring != NULL)
1201 			bus_dmamem_free(sc->age_cdata.age_rr_ring_tag,
1202 			    sc->age_rdata.age_rr_ring,
1203 			    sc->age_cdata.age_rr_ring_map);
1204 		sc->age_rdata.age_rr_ring = NULL;
1205 		sc->age_cdata.age_rr_ring_map = NULL;
1206 		bus_dma_tag_destroy(sc->age_cdata.age_rr_ring_tag);
1207 		sc->age_cdata.age_rr_ring_tag = NULL;
1208 	}
1209 	/* CMB block */
1210 	if (sc->age_cdata.age_cmb_block_tag != NULL) {
1211 		if (sc->age_cdata.age_cmb_block_map != NULL)
1212 			bus_dmamap_unload(sc->age_cdata.age_cmb_block_tag,
1213 			    sc->age_cdata.age_cmb_block_map);
1214 		if (sc->age_cdata.age_cmb_block_map != NULL &&
1215 		    sc->age_rdata.age_cmb_block != NULL)
1216 			bus_dmamem_free(sc->age_cdata.age_cmb_block_tag,
1217 			    sc->age_rdata.age_cmb_block,
1218 			    sc->age_cdata.age_cmb_block_map);
1219 		sc->age_rdata.age_cmb_block = NULL;
1220 		sc->age_cdata.age_cmb_block_map = NULL;
1221 		bus_dma_tag_destroy(sc->age_cdata.age_cmb_block_tag);
1222 		sc->age_cdata.age_cmb_block_tag = NULL;
1223 	}
1224 	/* SMB block */
1225 	if (sc->age_cdata.age_smb_block_tag != NULL) {
1226 		if (sc->age_cdata.age_smb_block_map != NULL)
1227 			bus_dmamap_unload(sc->age_cdata.age_smb_block_tag,
1228 			    sc->age_cdata.age_smb_block_map);
1229 		if (sc->age_cdata.age_smb_block_map != NULL &&
1230 		    sc->age_rdata.age_smb_block != NULL)
1231 			bus_dmamem_free(sc->age_cdata.age_smb_block_tag,
1232 			    sc->age_rdata.age_smb_block,
1233 			    sc->age_cdata.age_smb_block_map);
1234 		sc->age_rdata.age_smb_block = NULL;
1235 		sc->age_cdata.age_smb_block_map = NULL;
1236 		bus_dma_tag_destroy(sc->age_cdata.age_smb_block_tag);
1237 		sc->age_cdata.age_smb_block_tag = NULL;
1238 	}
1239 
1240 	if (sc->age_cdata.age_buffer_tag != NULL) {
1241 		bus_dma_tag_destroy(sc->age_cdata.age_buffer_tag);
1242 		sc->age_cdata.age_buffer_tag = NULL;
1243 	}
1244 	if (sc->age_cdata.age_parent_tag != NULL) {
1245 		bus_dma_tag_destroy(sc->age_cdata.age_parent_tag);
1246 		sc->age_cdata.age_parent_tag = NULL;
1247 	}
1248 }
1249 
1250 /*
1251  *	Make sure the interface is stopped at reboot time.
1252  */
1253 static int
1254 age_shutdown(device_t dev)
1255 {
1256 	return age_suspend(dev);
1257 }
1258 
1259 #ifdef wol_notyet
1260 
1261 static void
1262 age_setwol(struct age_softc *sc)
1263 {
1264 	struct ifnet *ifp;
1265 	struct mii_data *mii;
1266 	uint32_t reg, pmcs;
1267 	uint16_t pmstat;
1268 	int aneg, i, pmc;
1269 
1270 	AGE_LOCK_ASSERT(sc);
1271 
1272 	if (pci_find_extcap(sc->age_dev, PCIY_PMG, &pmc) == 0) {
1273 		CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1274 		/*
1275 		 * No PME capability, PHY power down.
1276 		 * XXX
1277 		 * Due to an unknown reason powering down PHY resulted
1278 		 * in unexpected results such as inaccessbility of
1279 		 * hardware of freshly rebooted system. Disable
1280 		 * powering down PHY until I got more information for
1281 		 * Attansic/Atheros PHY hardwares.
1282 		 */
1283 #ifdef notyet
1284 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1285 		    MII_BMCR, BMCR_PDOWN);
1286 #endif
1287 		return;
1288 	}
1289 
1290 	ifp = sc->age_ifp;
1291 	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1292 		/*
1293 		 * Note, this driver resets the link speed to 10/100Mbps with
1294 		 * auto-negotiation but we don't know whether that operation
1295 		 * would succeed or not as it have no control after powering
1296 		 * off. If the renegotiation fail WOL may not work. Running
1297 		 * at 1Gbps will draw more power than 375mA at 3.3V which is
1298 		 * specified in PCI specification and that would result in
1299 		 * complete shutdowning power to ethernet controller.
1300 		 *
1301 		 * TODO
1302 		 *  Save current negotiated media speed/duplex/flow-control
1303 		 *  to softc and restore the same link again after resuming.
1304 		 *  PHY handling such as power down/resetting to 100Mbps
1305 		 *  may be better handled in suspend method in phy driver.
1306 		 */
1307 		mii = device_get_softc(sc->age_miibus);
1308 		mii_pollstat(mii);
1309 		aneg = 0;
1310 		if ((mii->mii_media_status & IFM_AVALID) != 0) {
1311 			switch IFM_SUBTYPE(mii->mii_media_active) {
1312 			case IFM_10_T:
1313 			case IFM_100_TX:
1314 				goto got_link;
1315 			case IFM_1000_T:
1316 				aneg++;
1317 			default:
1318 				break;
1319 			}
1320 		}
1321 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1322 		    MII_100T2CR, 0);
1323 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1324 		    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD |
1325 		    ANAR_10 | ANAR_CSMA);
1326 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1327 		    MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1328 		DELAY(1000);
1329 		if (aneg != 0) {
1330 			/* Poll link state until age(4) get a 10/100 link. */
1331 			for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1332 				mii_pollstat(mii);
1333 				if ((mii->mii_media_status & IFM_AVALID) != 0) {
1334 					switch (IFM_SUBTYPE(
1335 					    mii->mii_media_active)) {
1336 					case IFM_10_T:
1337 					case IFM_100_TX:
1338 						age_mac_config(sc);
1339 						goto got_link;
1340 					default:
1341 						break;
1342 					}
1343 				}
1344 				AGE_UNLOCK(sc);
1345 				pause("agelnk", hz);
1346 				AGE_LOCK(sc);
1347 			}
1348 			if (i == MII_ANEGTICKS_GIGE)
1349 				device_printf(sc->age_dev,
1350 				    "establishing link failed, "
1351 				    "WOL may not work!");
1352 		}
1353 		/*
1354 		 * No link, force MAC to have 100Mbps, full-duplex link.
1355 		 * This is the last resort and may/may not work.
1356 		 */
1357 		mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1358 		mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1359 		age_mac_config(sc);
1360 	}
1361 
1362 got_link:
1363 	pmcs = 0;
1364 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1365 		pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1366 	CSR_WRITE_4(sc, AGE_WOL_CFG, pmcs);
1367 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1368 	reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC);
1369 	reg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST);
1370 	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1371 		reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1372 	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1373 		reg |= MAC_CFG_RX_ENB;
1374 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1375 	}
1376 
1377 	/* Request PME. */
1378 	pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2);
1379 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1380 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1381 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1382 	pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1383 #ifdef notyet
1384 	/* See above for powering down PHY issues. */
1385 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1386 		/* No WOL, PHY power down. */
1387 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1388 		    MII_BMCR, BMCR_PDOWN);
1389 	}
1390 #endif
1391 }
1392 
1393 #endif	/* wol_notyet */
1394 
1395 static int
1396 age_suspend(device_t dev)
1397 {
1398 	struct age_softc *sc = device_get_softc(dev);
1399 	struct ifnet *ifp = &sc->arpcom.ac_if;
1400 
1401 	lwkt_serialize_enter(ifp->if_serializer);
1402 	age_stop(sc);
1403 #ifdef wol_notyet
1404 	age_setwol(sc);
1405 #endif
1406 	lwkt_serialize_exit(ifp->if_serializer);
1407 
1408 	return (0);
1409 }
1410 
1411 static int
1412 age_resume(device_t dev)
1413 {
1414 	struct age_softc *sc = device_get_softc(dev);
1415 	struct ifnet *ifp = &sc->arpcom.ac_if;
1416 	uint16_t cmd;
1417 
1418 	lwkt_serialize_enter(ifp->if_serializer);
1419 
1420 	/*
1421 	 * Clear INTx emulation disable for hardwares that
1422 	 * is set in resume event. From Linux.
1423 	 */
1424 	cmd = pci_read_config(sc->age_dev, PCIR_COMMAND, 2);
1425 	if ((cmd & 0x0400) != 0) {
1426 		cmd &= ~0x0400;
1427 		pci_write_config(sc->age_dev, PCIR_COMMAND, cmd, 2);
1428 	}
1429 	if ((ifp->if_flags & IFF_UP) != 0)
1430 		age_init(sc);
1431 
1432 	lwkt_serialize_exit(ifp->if_serializer);
1433 
1434 	return (0);
1435 }
1436 
1437 static int
1438 age_encap(struct age_softc *sc, struct mbuf **m_head)
1439 {
1440 	struct age_txdesc *txd, *txd_last;
1441 	struct tx_desc *desc;
1442 	struct mbuf *m;
1443 	struct age_dmamap_ctx ctx;
1444 	bus_dma_segment_t txsegs[AGE_MAXTXSEGS];
1445 	bus_dmamap_t map;
1446 	uint32_t cflags, poff, vtag;
1447 	int error, i, nsegs, prod;
1448 
1449 	M_ASSERTPKTHDR((*m_head));
1450 
1451 	m = *m_head;
1452 	cflags = vtag = 0;
1453 	poff = 0;
1454 
1455 	prod = sc->age_cdata.age_tx_prod;
1456 	txd = &sc->age_cdata.age_txdesc[prod];
1457 	txd_last = txd;
1458 	map = txd->tx_dmamap;
1459 
1460 	ctx.nsegs = AGE_MAXTXSEGS;
1461 	ctx.segs = txsegs;
1462 	error = bus_dmamap_load_mbuf(sc->age_cdata.age_tx_tag, map,
1463 				     *m_head, age_dmamap_buf_cb, &ctx,
1464 				     BUS_DMA_NOWAIT);
1465 	if (!error && ctx.nsegs == 0) {
1466 		bus_dmamap_unload(sc->age_cdata.age_tx_tag, map);
1467 		error = EFBIG;
1468 	}
1469 	if (error == EFBIG) {
1470 		m = m_defrag(*m_head, M_NOWAIT);
1471 		if (m == NULL) {
1472 			m_freem(*m_head);
1473 			*m_head = NULL;
1474 			return (ENOBUFS);
1475 		}
1476 		*m_head = m;
1477 
1478 		ctx.nsegs = AGE_MAXTXSEGS;
1479 		ctx.segs = txsegs;
1480 		error = bus_dmamap_load_mbuf(sc->age_cdata.age_tx_tag, map,
1481 					     *m_head, age_dmamap_buf_cb, &ctx,
1482 					     BUS_DMA_NOWAIT);
1483 		if (error || ctx.nsegs == 0) {
1484 			if (!error) {
1485 				bus_dmamap_unload(sc->age_cdata.age_tx_tag,
1486 						  map);
1487 				error = EFBIG;
1488 			}
1489 			m_freem(*m_head);
1490 			*m_head = NULL;
1491 			return (error);
1492 		}
1493 	} else if (error != 0) {
1494 		return (error);
1495 	}
1496 	nsegs = ctx.nsegs;
1497 
1498 	if (nsegs == 0) {
1499 		m_freem(*m_head);
1500 		*m_head = NULL;
1501 		return (EIO);
1502 	}
1503 
1504 	/* Check descriptor overrun. */
1505 	if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1506 		bus_dmamap_unload(sc->age_cdata.age_tx_tag, map);
1507 		return (ENOBUFS);
1508 	}
1509 
1510 	m = *m_head;
1511 	/* Configure Tx IP/TCP/UDP checksum offload. */
1512 	if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1513 		cflags |= AGE_TD_CSUM;
1514 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1515 			cflags |= AGE_TD_TCPCSUM;
1516 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1517 			cflags |= AGE_TD_UDPCSUM;
1518 		/* Set checksum start offset. */
1519 		cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1520 		/* Set checksum insertion position of TCP/UDP. */
1521 		cflags |= ((poff + m->m_pkthdr.csum_data) <<
1522 		    AGE_TD_CSUM_XSUMOFFSET_SHIFT);
1523 	}
1524 
1525 	/* Configure VLAN hardware tag insertion. */
1526 	if ((m->m_flags & M_VLANTAG) != 0) {
1527 		vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vlantag);
1528 		vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1529 		cflags |= AGE_TD_INSERT_VLAN_TAG;
1530 	}
1531 
1532 	desc = NULL;
1533 	for (i = 0; i < nsegs; i++) {
1534 		desc = &sc->age_rdata.age_tx_ring[prod];
1535 		desc->addr = htole64(txsegs[i].ds_addr);
1536 		desc->len = htole32(AGE_TX_BYTES(txsegs[i].ds_len) | vtag);
1537 		desc->flags = htole32(cflags);
1538 		sc->age_cdata.age_tx_cnt++;
1539 		AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1540 	}
1541 	/* Update producer index. */
1542 	sc->age_cdata.age_tx_prod = prod;
1543 
1544 	/* Set EOP on the last descriptor. */
1545 	prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
1546 	desc = &sc->age_rdata.age_tx_ring[prod];
1547 	desc->flags |= htole32(AGE_TD_EOP);
1548 
1549 	/* Swap dmamap of the first and the last. */
1550 	txd = &sc->age_cdata.age_txdesc[prod];
1551 	map = txd_last->tx_dmamap;
1552 	txd_last->tx_dmamap = txd->tx_dmamap;
1553 	txd->tx_dmamap = map;
1554 	txd->tx_m = m;
1555 
1556 	/* Sync descriptors. */
1557 	bus_dmamap_sync(sc->age_cdata.age_tx_tag, map, BUS_DMASYNC_PREWRITE);
1558 	bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
1559 	    sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_PREWRITE);
1560 
1561 	return (0);
1562 }
1563 
1564 static void
1565 age_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1566 {
1567 	struct age_softc *sc = ifp->if_softc;
1568 	struct mbuf *m_head;
1569 	int enq;
1570 
1571 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1572 	ASSERT_SERIALIZED(ifp->if_serializer);
1573 
1574 	if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1575 		ifq_purge(&ifp->if_snd);
1576 		return;
1577 	}
1578 
1579 	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1580 		return;
1581 
1582 	enq = 0;
1583 	while (!ifq_is_empty(&ifp->if_snd)) {
1584 		m_head = ifq_dequeue(&ifp->if_snd);
1585 		if (m_head == NULL)
1586 			break;
1587 
1588 		/*
1589 		 * Pack the data into the transmit ring. If we
1590 		 * don't have room, set the OACTIVE flag and wait
1591 		 * for the NIC to drain the ring.
1592 		 */
1593 		if (age_encap(sc, &m_head)) {
1594 			if (m_head == NULL)
1595 				break;
1596 			ifq_prepend(&ifp->if_snd, m_head);
1597 			ifq_set_oactive(&ifp->if_snd);
1598 			break;
1599 		}
1600 		enq = 1;
1601 
1602 		/*
1603 		 * If there's a BPF listener, bounce a copy of this frame
1604 		 * to him.
1605 		 */
1606 		ETHER_BPF_MTAP(ifp, m_head);
1607 	}
1608 
1609 	if (enq) {
1610 		/* Update mbox. */
1611 		AGE_COMMIT_MBOX(sc);
1612 		/* Set a timeout in case the chip goes out to lunch. */
1613 		ifp->if_timer = AGE_TX_TIMEOUT;
1614 	}
1615 }
1616 
1617 static void
1618 age_watchdog(struct ifnet *ifp)
1619 {
1620 	struct age_softc *sc = ifp->if_softc;
1621 
1622 	ASSERT_SERIALIZED(ifp->if_serializer);
1623 
1624 	if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1625 		if_printf(ifp, "watchdog timeout (missed link)\n");
1626 		IFNET_STAT_INC(ifp, oerrors, 1);
1627 		age_init(sc);
1628 		return;
1629 	}
1630 
1631 	if (sc->age_cdata.age_tx_cnt == 0) {
1632 		if_printf(ifp,
1633 		    "watchdog timeout (missed Tx interrupts) -- recovering\n");
1634 		if (!ifq_is_empty(&ifp->if_snd))
1635 			if_devstart(ifp);
1636 		return;
1637 	}
1638 
1639 	if_printf(ifp, "watchdog timeout\n");
1640 	IFNET_STAT_INC(ifp, oerrors, 1);
1641 	age_init(sc);
1642 	if (!ifq_is_empty(&ifp->if_snd))
1643 		if_devstart(ifp);
1644 }
1645 
1646 static int
1647 age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1648 {
1649 	struct age_softc *sc = ifp->if_softc;
1650 	struct ifreq *ifr;
1651 	struct mii_data *mii;
1652 	uint32_t reg;
1653 	int error, mask;
1654 
1655 	ASSERT_SERIALIZED(ifp->if_serializer);
1656 
1657 	ifr = (struct ifreq *)data;
1658 	error = 0;
1659 	switch (cmd) {
1660 	case SIOCSIFMTU:
1661 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > AGE_JUMBO_MTU) {
1662 			error = EINVAL;
1663 		} else if (ifp->if_mtu != ifr->ifr_mtu) {
1664 			ifp->if_mtu = ifr->ifr_mtu;
1665 			if ((ifp->if_flags & IFF_RUNNING) != 0)
1666 				age_init(sc);
1667 		}
1668 		break;
1669 
1670 	case SIOCSIFFLAGS:
1671 		if ((ifp->if_flags & IFF_UP) != 0) {
1672 			if ((ifp->if_flags & IFF_RUNNING) != 0) {
1673 				if (((ifp->if_flags ^ sc->age_if_flags)
1674 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1675 					age_rxfilter(sc);
1676 			} else {
1677 				if ((sc->age_flags & AGE_FLAG_DETACH) == 0)
1678 					age_init(sc);
1679 			}
1680 		} else {
1681 			if ((ifp->if_flags & IFF_RUNNING) != 0)
1682 				age_stop(sc);
1683 		}
1684 		sc->age_if_flags = ifp->if_flags;
1685 		break;
1686 
1687 	case SIOCADDMULTI:
1688 	case SIOCDELMULTI:
1689 		if ((ifp->if_flags & IFF_RUNNING) != 0)
1690 			age_rxfilter(sc);
1691 		break;
1692 
1693 	case SIOCSIFMEDIA:
1694 	case SIOCGIFMEDIA:
1695 		mii = device_get_softc(sc->age_miibus);
1696 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1697 		break;
1698 
1699 	case SIOCSIFCAP:
1700 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1701 
1702 		if ((mask & IFCAP_TXCSUM) != 0 &&
1703 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1704 			ifp->if_capenable ^= IFCAP_TXCSUM;
1705 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1706 				ifp->if_hwassist |= AGE_CSUM_FEATURES;
1707 			else
1708 				ifp->if_hwassist &= ~AGE_CSUM_FEATURES;
1709 		}
1710 
1711 		if ((mask & IFCAP_RXCSUM) != 0 &&
1712 		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1713 			ifp->if_capenable ^= IFCAP_RXCSUM;
1714 			reg = CSR_READ_4(sc, AGE_MAC_CFG);
1715 			reg &= ~MAC_CFG_RXCSUM_ENB;
1716 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1717 				reg |= MAC_CFG_RXCSUM_ENB;
1718 			CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1719 		}
1720 
1721 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1722 		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1723 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1724 			age_rxvlan(sc);
1725 		}
1726 		break;
1727 
1728 	default:
1729 		error = ether_ioctl(ifp, cmd, data);
1730 		break;
1731 	}
1732 	return (error);
1733 }
1734 
1735 static void
1736 age_mac_config(struct age_softc *sc)
1737 {
1738 	struct mii_data *mii = device_get_softc(sc->age_miibus);
1739 	uint32_t reg;
1740 
1741 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1742 	reg &= ~MAC_CFG_FULL_DUPLEX;
1743 	reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1744 	reg &= ~MAC_CFG_SPEED_MASK;
1745 
1746 	/* Reprogram MAC with resolved speed/duplex. */
1747 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1748 	case IFM_10_T:
1749 	case IFM_100_TX:
1750 		reg |= MAC_CFG_SPEED_10_100;
1751 		break;
1752 	case IFM_1000_T:
1753 		reg |= MAC_CFG_SPEED_1000;
1754 		break;
1755 	}
1756 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1757 		reg |= MAC_CFG_FULL_DUPLEX;
1758 #ifdef notyet
1759 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1760 			reg |= MAC_CFG_TX_FC;
1761 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1762 			reg |= MAC_CFG_RX_FC;
1763 #endif
1764 	}
1765 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1766 }
1767 
1768 static void
1769 age_stats_update(struct age_softc *sc)
1770 {
1771 	struct ifnet *ifp = &sc->arpcom.ac_if;
1772 	struct age_stats *stat;
1773 	struct smb *smb;
1774 
1775 	stat = &sc->age_stat;
1776 
1777 	bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
1778 	    sc->age_cdata.age_smb_block_map, BUS_DMASYNC_POSTREAD);
1779 
1780 	smb = sc->age_rdata.age_smb_block;
1781 	if (smb->updated == 0)
1782 		return;
1783 
1784 	/* Rx stats. */
1785 	stat->rx_frames += smb->rx_frames;
1786 	stat->rx_bcast_frames += smb->rx_bcast_frames;
1787 	stat->rx_mcast_frames += smb->rx_mcast_frames;
1788 	stat->rx_pause_frames += smb->rx_pause_frames;
1789 	stat->rx_control_frames += smb->rx_control_frames;
1790 	stat->rx_crcerrs += smb->rx_crcerrs;
1791 	stat->rx_lenerrs += smb->rx_lenerrs;
1792 	stat->rx_bytes += smb->rx_bytes;
1793 	stat->rx_runts += smb->rx_runts;
1794 	stat->rx_fragments += smb->rx_fragments;
1795 	stat->rx_pkts_64 += smb->rx_pkts_64;
1796 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1797 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1798 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1799 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1800 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1801 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1802 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1803 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1804 	stat->rx_desc_oflows += smb->rx_desc_oflows;
1805 	stat->rx_alignerrs += smb->rx_alignerrs;
1806 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1807 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1808 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1809 
1810 	/* Tx stats. */
1811 	stat->tx_frames += smb->tx_frames;
1812 	stat->tx_bcast_frames += smb->tx_bcast_frames;
1813 	stat->tx_mcast_frames += smb->tx_mcast_frames;
1814 	stat->tx_pause_frames += smb->tx_pause_frames;
1815 	stat->tx_excess_defer += smb->tx_excess_defer;
1816 	stat->tx_control_frames += smb->tx_control_frames;
1817 	stat->tx_deferred += smb->tx_deferred;
1818 	stat->tx_bytes += smb->tx_bytes;
1819 	stat->tx_pkts_64 += smb->tx_pkts_64;
1820 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1821 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1822 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1823 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
1824 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
1825 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
1826 	stat->tx_single_colls += smb->tx_single_colls;
1827 	stat->tx_multi_colls += smb->tx_multi_colls;
1828 	stat->tx_late_colls += smb->tx_late_colls;
1829 	stat->tx_excess_colls += smb->tx_excess_colls;
1830 	stat->tx_underrun += smb->tx_underrun;
1831 	stat->tx_desc_underrun += smb->tx_desc_underrun;
1832 	stat->tx_lenerrs += smb->tx_lenerrs;
1833 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
1834 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
1835 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
1836 
1837 	/* Update counters in ifnet. */
1838 	IFNET_STAT_INC(ifp, opackets, smb->tx_frames);
1839 
1840 	IFNET_STAT_INC(ifp, collisions, smb->tx_single_colls +
1841 	    smb->tx_multi_colls + smb->tx_late_colls +
1842 	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT);
1843 
1844 	IFNET_STAT_INC(ifp, oerrors, smb->tx_excess_colls +
1845 	    smb->tx_late_colls + smb->tx_underrun +
1846 	    smb->tx_pkts_truncated);
1847 
1848 	IFNET_STAT_INC(ifp, ipackets, smb->rx_frames);
1849 
1850 	IFNET_STAT_INC(ifp, ierrors, smb->rx_crcerrs + smb->rx_lenerrs +
1851 	    smb->rx_runts + smb->rx_pkts_truncated +
1852 	    smb->rx_fifo_oflows + smb->rx_desc_oflows +
1853 	    smb->rx_alignerrs);
1854 
1855 	/* Update done, clear. */
1856 	smb->updated = 0;
1857 
1858 	bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
1859 	    sc->age_cdata.age_smb_block_map, BUS_DMASYNC_PREWRITE);
1860 }
1861 
1862 static void
1863 age_intr(void *xsc)
1864 {
1865 	struct age_softc *sc = xsc;
1866 	struct ifnet *ifp = &sc->arpcom.ac_if;
1867 	struct cmb *cmb;
1868 	uint32_t status;
1869 
1870 	ASSERT_SERIALIZED(ifp->if_serializer);
1871 
1872 	status = CSR_READ_4(sc, AGE_INTR_STATUS);
1873 	if (status == 0 || (status & AGE_INTRS) == 0)
1874 		return;
1875 
1876 	/* Disable and acknowledge interrupts. */
1877 	CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
1878 
1879 	cmb = sc->age_rdata.age_cmb_block;
1880 
1881 	bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
1882 	    sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_POSTREAD);
1883 	status = le32toh(cmb->intr_status);
1884 	if ((status & AGE_INTRS) == 0)
1885 		goto done;
1886 again:
1887 	sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
1888 	    TPD_CONS_SHIFT;
1889 	sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
1890 	    RRD_PROD_SHIFT;
1891 
1892 	/* Let hardware know CMB was served. */
1893 	cmb->intr_status = 0;
1894 	bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
1895 	    sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_PREWRITE);
1896 
1897 #if 0
1898 	kprintf("INTR: 0x%08x\n", status);
1899 	status &= ~INTR_DIS_DMA;
1900 	CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
1901 #endif
1902 
1903 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
1904 		if ((status & INTR_CMB_RX) != 0)
1905 			age_rxintr(sc, sc->age_rr_prod);
1906 
1907 		if ((status & INTR_CMB_TX) != 0)
1908 			age_txintr(sc, sc->age_tpd_cons);
1909 
1910 		if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) {
1911 			if ((status & INTR_DMA_RD_TO_RST) != 0)
1912 				device_printf(sc->age_dev,
1913 				    "DMA read error! -- resetting\n");
1914 			if ((status & INTR_DMA_WR_TO_RST) != 0)
1915 				device_printf(sc->age_dev,
1916 				    "DMA write error! -- resetting\n");
1917 			age_init(sc);
1918 			/* XXX return? */
1919 		}
1920 
1921 		if (!ifq_is_empty(&ifp->if_snd))
1922 			if_devstart(ifp);
1923 
1924 		if ((status & INTR_SMB) != 0)
1925 			age_stats_update(sc);
1926 	}
1927 
1928 	/* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
1929 	bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
1930 	    sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_POSTREAD);
1931 	status = le32toh(cmb->intr_status);
1932 	if ((status & AGE_INTRS) != 0)
1933 		goto again;
1934 done:
1935 	/* Re-enable interrupts. */
1936 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
1937 }
1938 
1939 static void
1940 age_txintr(struct age_softc *sc, int tpd_cons)
1941 {
1942 	struct ifnet *ifp = &sc->arpcom.ac_if;
1943 	struct age_txdesc *txd;
1944 	int cons, prog;
1945 
1946 	bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
1947 	    sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_POSTREAD);
1948 
1949 	/*
1950 	 * Go through our Tx list and free mbufs for those
1951 	 * frames which have been transmitted.
1952 	 */
1953 	cons = sc->age_cdata.age_tx_cons;
1954 	for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
1955 		if (sc->age_cdata.age_tx_cnt <= 0)
1956 			break;
1957 		prog++;
1958 		ifq_clr_oactive(&ifp->if_snd);
1959 		sc->age_cdata.age_tx_cnt--;
1960 		txd = &sc->age_cdata.age_txdesc[cons];
1961 		/*
1962 		 * Clear Tx descriptors, it's not required but would
1963 		 * help debugging in case of Tx issues.
1964 		 */
1965 		txd->tx_desc->addr = 0;
1966 		txd->tx_desc->len = 0;
1967 		txd->tx_desc->flags = 0;
1968 
1969 		if (txd->tx_m == NULL)
1970 			continue;
1971 		/* Reclaim transmitted mbufs. */
1972 		bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap);
1973 		m_freem(txd->tx_m);
1974 		txd->tx_m = NULL;
1975 	}
1976 
1977 	if (prog > 0) {
1978 		sc->age_cdata.age_tx_cons = cons;
1979 
1980 		/*
1981 		 * Unarm watchdog timer only when there are no pending
1982 		 * Tx descriptors in queue.
1983 		 */
1984 		if (sc->age_cdata.age_tx_cnt == 0)
1985 			ifp->if_timer = 0;
1986 		bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
1987 		    sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_PREWRITE);
1988 	}
1989 }
1990 
1991 /* Receive a frame. */
1992 static void
1993 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
1994 {
1995 	struct ifnet *ifp = &sc->arpcom.ac_if;
1996 	struct age_rxdesc *rxd;
1997 	struct rx_desc *desc;
1998 	struct mbuf *mp, *m;
1999 	uint32_t status, index, vtag;
2000 	int count, nsegs, pktlen;
2001 	int rx_cons;
2002 
2003 	status = le32toh(rxrd->flags);
2004 	index = le32toh(rxrd->index);
2005 	rx_cons = AGE_RX_CONS(index);
2006 	nsegs = AGE_RX_NSEGS(index);
2007 
2008 	sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
2009 	if ((status & AGE_RRD_ERROR) != 0 &&
2010 	    (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
2011 	    AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
2012 		/*
2013 		 * We want to pass the following frames to upper
2014 		 * layer regardless of error status of Rx return
2015 		 * ring.
2016 		 *
2017 		 *  o IP/TCP/UDP checksum is bad.
2018 		 *  o frame length and protocol specific length
2019 		 *     does not match.
2020 		 */
2021 		sc->age_cdata.age_rx_cons += nsegs;
2022 		sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
2023 		return;
2024 	}
2025 
2026 	pktlen = 0;
2027 	for (count = 0; count < nsegs; count++,
2028 	    AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
2029 		rxd = &sc->age_cdata.age_rxdesc[rx_cons];
2030 		mp = rxd->rx_m;
2031 		desc = rxd->rx_desc;
2032 		/* Add a new receive buffer to the ring. */
2033 		if (age_newbuf(sc, rxd, 0) != 0) {
2034 			IFNET_STAT_INC(ifp, iqdrops, 1);
2035 			/* Reuse Rx buffers. */
2036 			if (sc->age_cdata.age_rxhead != NULL) {
2037 				m_freem(sc->age_cdata.age_rxhead);
2038 				AGE_RXCHAIN_RESET(sc);
2039 			}
2040 			break;
2041 		}
2042 
2043 		/* The length of the first mbuf is computed last. */
2044 		if (count != 0) {
2045 			mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
2046 			pktlen += mp->m_len;
2047 		}
2048 
2049 		/* Chain received mbufs. */
2050 		if (sc->age_cdata.age_rxhead == NULL) {
2051 			sc->age_cdata.age_rxhead = mp;
2052 			sc->age_cdata.age_rxtail = mp;
2053 		} else {
2054 			mp->m_flags &= ~M_PKTHDR;
2055 			sc->age_cdata.age_rxprev_tail =
2056 			    sc->age_cdata.age_rxtail;
2057 			sc->age_cdata.age_rxtail->m_next = mp;
2058 			sc->age_cdata.age_rxtail = mp;
2059 		}
2060 
2061 		if (count == nsegs - 1) {
2062 			/*
2063 			 * It seems that L1 controller has no way
2064 			 * to tell hardware to strip CRC bytes.
2065 			 */
2066 			sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
2067 			if (nsegs > 1) {
2068 				/* Remove the CRC bytes in chained mbufs. */
2069 				pktlen -= ETHER_CRC_LEN;
2070 				if (mp->m_len <= ETHER_CRC_LEN) {
2071 					sc->age_cdata.age_rxtail =
2072 					    sc->age_cdata.age_rxprev_tail;
2073 					sc->age_cdata.age_rxtail->m_len -=
2074 					    (ETHER_CRC_LEN - mp->m_len);
2075 					sc->age_cdata.age_rxtail->m_next = NULL;
2076 					m_freem(mp);
2077 				} else {
2078 					mp->m_len -= ETHER_CRC_LEN;
2079 				}
2080 			}
2081 
2082 			m = sc->age_cdata.age_rxhead;
2083 			m->m_flags |= M_PKTHDR;
2084 			m->m_pkthdr.rcvif = ifp;
2085 			m->m_pkthdr.len = sc->age_cdata.age_rxlen;
2086 			/* Set the first mbuf length. */
2087 			m->m_len = sc->age_cdata.age_rxlen - pktlen;
2088 
2089 			/*
2090 			 * Set checksum information.
2091 			 * It seems that L1 controller can compute partial
2092 			 * checksum. The partial checksum value can be used
2093 			 * to accelerate checksum computation for fragmented
2094 			 * TCP/UDP packets. Upper network stack already
2095 			 * takes advantage of the partial checksum value in
2096 			 * IP reassembly stage. But I'm not sure the
2097 			 * correctness of the partial hardware checksum
2098 			 * assistance due to lack of data sheet. If it is
2099 			 * proven to work on L1 I'll enable it.
2100 			 */
2101 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2102 			    (status & AGE_RRD_IPV4) != 0) {
2103 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2104 				if ((status & AGE_RRD_IPCSUM_NOK) == 0)
2105 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2106 				if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
2107 				    (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) {
2108 					m->m_pkthdr.csum_flags |=
2109 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2110 					m->m_pkthdr.csum_data = 0xffff;
2111 				}
2112 				/*
2113 				 * Don't mark bad checksum for TCP/UDP frames
2114 				 * as fragmented frames may always have set
2115 				 * bad checksummed bit of descriptor status.
2116 				 */
2117 			}
2118 
2119 			/* Check for VLAN tagged frames. */
2120 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2121 			    (status & AGE_RRD_VLAN) != 0) {
2122 				vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
2123 				m->m_pkthdr.ether_vlantag =
2124 				    AGE_RX_VLAN_TAG(vtag);
2125 				m->m_flags |= M_VLANTAG;
2126 			}
2127 
2128 			/* Pass it on. */
2129 			ifp->if_input(ifp, m, NULL, -1);
2130 
2131 			/* Reset mbuf chains. */
2132 			AGE_RXCHAIN_RESET(sc);
2133 		}
2134 	}
2135 
2136 	if (count != nsegs) {
2137 		sc->age_cdata.age_rx_cons += nsegs;
2138 		sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
2139 	} else {
2140 		sc->age_cdata.age_rx_cons = rx_cons;
2141 	}
2142 }
2143 
2144 static void
2145 age_rxintr(struct age_softc *sc, int rr_prod)
2146 {
2147 	struct rx_rdesc *rxrd;
2148 	int rr_cons, nsegs, pktlen, prog;
2149 
2150 	rr_cons = sc->age_cdata.age_rr_cons;
2151 	if (rr_cons == rr_prod)
2152 		return;
2153 
2154 	bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
2155 	    sc->age_cdata.age_rr_ring_map, BUS_DMASYNC_POSTREAD);
2156 
2157 	for (prog = 0; rr_cons != rr_prod; prog++) {
2158 		rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
2159 		nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
2160 		if (nsegs == 0)
2161 			break;
2162 
2163 		/*
2164 		 * Check number of segments against received bytes.
2165 		 * Non-matching value would indicate that hardware
2166 		 * is still trying to update Rx return descriptors.
2167 		 * I'm not sure whether this check is really needed.
2168 		 */
2169 		pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
2170 		if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
2171 		    (MCLBYTES - ETHER_ALIGN)))
2172 			break;
2173 
2174 		/* Received a frame. */
2175 		age_rxeof(sc, rxrd);
2176 
2177 		/* Clear return ring. */
2178 		rxrd->index = 0;
2179 		AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
2180 	}
2181 
2182 	if (prog > 0) {
2183 		/* Update the consumer index. */
2184 		sc->age_cdata.age_rr_cons = rr_cons;
2185 
2186 		/* Sync descriptors. */
2187 		bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
2188 		    sc->age_cdata.age_rr_ring_map, BUS_DMASYNC_PREWRITE);
2189 
2190 		/* Notify hardware availability of new Rx buffers. */
2191 		AGE_COMMIT_MBOX(sc);
2192 	}
2193 }
2194 
2195 static void
2196 age_tick(void *xsc)
2197 {
2198 	struct age_softc *sc = xsc;
2199 	struct ifnet *ifp = &sc->arpcom.ac_if;
2200 	struct mii_data *mii = device_get_softc(sc->age_miibus);
2201 
2202 	lwkt_serialize_enter(ifp->if_serializer);
2203 
2204 	mii_tick(mii);
2205 	callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
2206 
2207 	lwkt_serialize_exit(ifp->if_serializer);
2208 }
2209 
2210 static void
2211 age_reset(struct age_softc *sc)
2212 {
2213 	uint32_t reg;
2214 	int i;
2215 
2216 	CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
2217 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2218 		DELAY(1);
2219 		if ((CSR_READ_4(sc, AGE_MASTER_CFG) & MASTER_RESET) == 0)
2220 			break;
2221 	}
2222 	if (i == 0)
2223 		device_printf(sc->age_dev, "master reset timeout!\n");
2224 
2225 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2226 		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
2227 			break;
2228 		DELAY(10);
2229 	}
2230 	if (i == 0)
2231 		device_printf(sc->age_dev, "reset timeout(0x%08x)!\n", reg);
2232 
2233 	/* Initialize PCIe module. From Linux. */
2234 	CSR_WRITE_4(sc, 0x12FC, 0x6500);
2235 	CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
2236 }
2237 
2238 static void
2239 age_init(void *xsc)
2240 {
2241 	struct age_softc *sc = xsc;
2242 	struct ifnet *ifp = &sc->arpcom.ac_if;
2243 	struct mii_data *mii;
2244 	uint8_t eaddr[ETHER_ADDR_LEN];
2245 	bus_addr_t paddr;
2246 	uint32_t reg, fsize;
2247 	uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
2248 	int error;
2249 
2250 	ASSERT_SERIALIZED(ifp->if_serializer);
2251 
2252 	mii = device_get_softc(sc->age_miibus);
2253 
2254 	/*
2255 	 * Cancel any pending I/O.
2256 	 */
2257 	age_stop(sc);
2258 
2259 	/*
2260 	 * Reset the chip to a known state.
2261 	 */
2262 	age_reset(sc);
2263 
2264 	/* Initialize descriptors. */
2265 	error = age_init_rx_ring(sc);
2266         if (error != 0) {
2267                 device_printf(sc->age_dev, "no memory for Rx buffers.\n");
2268                 age_stop(sc);
2269 		return;
2270         }
2271 	age_init_rr_ring(sc);
2272 	age_init_tx_ring(sc);
2273 	age_init_cmb_block(sc);
2274 	age_init_smb_block(sc);
2275 
2276 	/* Reprogram the station address. */
2277 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2278 	CSR_WRITE_4(sc, AGE_PAR0,
2279 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2280 	CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
2281 
2282 	/* Set descriptor base addresses. */
2283 	paddr = sc->age_rdata.age_tx_ring_paddr;
2284 	CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
2285 	paddr = sc->age_rdata.age_rx_ring_paddr;
2286 	CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
2287 	paddr = sc->age_rdata.age_rr_ring_paddr;
2288 	CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
2289 	paddr = sc->age_rdata.age_tx_ring_paddr;
2290 	CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
2291 	paddr = sc->age_rdata.age_cmb_block_paddr;
2292 	CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
2293 	paddr = sc->age_rdata.age_smb_block_paddr;
2294 	CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
2295 
2296 	/* Set Rx/Rx return descriptor counter. */
2297 	CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
2298 	    ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
2299 	    DESC_RRD_CNT_MASK) |
2300 	    ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
2301 
2302 	/* Set Tx descriptor counter. */
2303 	CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
2304 	    (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
2305 
2306 	/* Tell hardware that we're ready to load descriptors. */
2307 	CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
2308 
2309 	/*
2310 	 * Initialize mailbox register.
2311 	 * Updated producer/consumer index information is exchanged
2312 	 * through this mailbox register. However Tx producer and
2313 	 * Rx return consumer/Rx producer are all shared such that
2314 	 * it's hard to separate code path between Tx and Rx without
2315 	 * locking. If L1 hardware have a separate mail box register
2316 	 * for Tx and Rx consumer/producer management we could have
2317 	 * indepent Tx/Rx handler which in turn Rx handler could have
2318 	 * been run without any locking.
2319 	 */
2320 	AGE_COMMIT_MBOX(sc);
2321 
2322 	/* Configure IPG/IFG parameters. */
2323 	CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
2324 	    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
2325 	    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
2326 	    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
2327 	    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
2328 
2329 	/* Set parameters for half-duplex media. */
2330 	CSR_WRITE_4(sc, AGE_HDPX_CFG,
2331 	    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2332 	    HDPX_CFG_LCOL_MASK) |
2333 	    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2334 	    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2335 	    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2336 	    HDPX_CFG_ABEBT_MASK) |
2337 	    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2338 	    HDPX_CFG_JAMIPG_MASK));
2339 
2340 	/* Configure interrupt moderation timer. */
2341 	CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
2342 	reg = CSR_READ_4(sc, AGE_MASTER_CFG);
2343 	reg &= ~MASTER_MTIMER_ENB;
2344 	if (AGE_USECS(sc->age_int_mod) == 0)
2345 		reg &= ~MASTER_ITIMER_ENB;
2346 	else
2347 		reg |= MASTER_ITIMER_ENB;
2348 	CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
2349 	if (bootverbose)
2350 		device_printf(sc->age_dev, "interrupt moderation is %d us.\n",
2351 		    sc->age_int_mod);
2352 	CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
2353 
2354 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
2355 	if (ifp->if_mtu < ETHERMTU)
2356 		sc->age_max_frame_size = ETHERMTU;
2357 	else
2358 		sc->age_max_frame_size = ifp->if_mtu;
2359 	sc->age_max_frame_size += ETHER_HDR_LEN +
2360 	    sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
2361 	CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
2362 
2363 	/* Configure jumbo frame. */
2364 	fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
2365 	CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
2366 	    (((fsize / sizeof(uint64_t)) <<
2367 	    RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
2368 	    ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
2369 	    RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
2370 	    ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
2371 	    RXQ_JUMBO_CFG_RRD_TIMER_MASK));
2372 
2373 	/* Configure flow-control parameters. From Linux. */
2374 	if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
2375 		/*
2376 		 * Magic workaround for old-L1.
2377 		 * Don't know which hw revision requires this magic.
2378 		 */
2379 		CSR_WRITE_4(sc, 0x12FC, 0x6500);
2380 		/*
2381 		 * Another magic workaround for flow-control mode
2382 		 * change. From Linux.
2383 		 */
2384 		CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
2385 	}
2386 	/*
2387 	 * TODO
2388 	 *  Should understand pause parameter relationships between FIFO
2389 	 *  size and number of Rx descriptors and Rx return descriptors.
2390 	 *
2391 	 *  Magic parameters came from Linux.
2392 	 */
2393 	switch (sc->age_chip_rev) {
2394 	case 0x8001:
2395 	case 0x9001:
2396 	case 0x9002:
2397 	case 0x9003:
2398 		rxf_hi = AGE_RX_RING_CNT / 16;
2399 		rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
2400 		rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
2401 		rrd_lo = AGE_RR_RING_CNT / 16;
2402 		break;
2403 	default:
2404 		reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
2405 		rxf_lo = reg / 16;
2406 		if (rxf_lo < 192)
2407 			rxf_lo = 192;
2408 		rxf_hi = (reg * 7) / 8;
2409 		if (rxf_hi < rxf_lo)
2410 			rxf_hi = rxf_lo + 16;
2411 		reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
2412 		rrd_lo = reg / 8;
2413 		rrd_hi = (reg * 7) / 8;
2414 		if (rrd_lo < 2)
2415 			rrd_lo = 2;
2416 		if (rrd_hi < rrd_lo)
2417 			rrd_hi = rrd_lo + 3;
2418 		break;
2419 	}
2420 	CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
2421 	    ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
2422 	    RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
2423 	    ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
2424 	    RXQ_FIFO_PAUSE_THRESH_HI_MASK));
2425 	CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
2426 	    ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
2427 	    RXQ_RRD_PAUSE_THRESH_LO_MASK) |
2428 	    ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
2429 	    RXQ_RRD_PAUSE_THRESH_HI_MASK));
2430 
2431 	/* Configure RxQ. */
2432 	CSR_WRITE_4(sc, AGE_RXQ_CFG,
2433 	    ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2434 	    RXQ_CFG_RD_BURST_MASK) |
2435 	    ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
2436 	    RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
2437 	    ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
2438 	    RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
2439 	    RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
2440 
2441 	/* Configure TxQ. */
2442 	CSR_WRITE_4(sc, AGE_TXQ_CFG,
2443 	    ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
2444 	    TXQ_CFG_TPD_BURST_MASK) |
2445 	    ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
2446 	    TXQ_CFG_TX_FIFO_BURST_MASK) |
2447 	    ((TXQ_CFG_TPD_FETCH_DEFAULT <<
2448 	    TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
2449 	    TXQ_CFG_ENB);
2450 
2451 	CSR_WRITE_4(sc, AGE_TX_JUMBO_TPD_TH_IPG,
2452 	    (((fsize / sizeof(uint64_t) << TX_JUMBO_TPD_TH_SHIFT)) &
2453 	    TX_JUMBO_TPD_TH_MASK) |
2454 	    ((TX_JUMBO_TPD_IPG_DEFAULT << TX_JUMBO_TPD_IPG_SHIFT) &
2455 	    TX_JUMBO_TPD_IPG_MASK));
2456 
2457 	/* Configure DMA parameters. */
2458 	CSR_WRITE_4(sc, AGE_DMA_CFG,
2459 	    DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
2460 	    sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
2461 	    sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
2462 
2463 	/* Configure CMB DMA write threshold. */
2464 	CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
2465 	    ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
2466 	    CMB_WR_THRESH_RRD_MASK) |
2467 	    ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
2468 	    CMB_WR_THRESH_TPD_MASK));
2469 
2470 	/* Set CMB/SMB timer and enable them. */
2471 	CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
2472 	    ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
2473 	    ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
2474 
2475 	/* Request SMB updates for every seconds. */
2476 	CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
2477 	CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
2478 
2479 	/*
2480 	 * Disable all WOL bits as WOL can interfere normal Rx
2481 	 * operation.
2482 	 */
2483 	CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
2484 
2485 	/*
2486 	 * Configure Tx/Rx MACs.
2487 	 *  - Auto-padding for short frames.
2488 	 *  - Enable CRC generation.
2489 	 *  Start with full-duplex/1000Mbps media. Actual reconfiguration
2490 	 *  of MAC is followed after link establishment.
2491 	 */
2492 	CSR_WRITE_4(sc, AGE_MAC_CFG,
2493 	    MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
2494 	    MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
2495 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2496 	    MAC_CFG_PREAMBLE_MASK));
2497 
2498 	/* Set up the receive filter. */
2499 	age_rxfilter(sc);
2500 	age_rxvlan(sc);
2501 
2502 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2503 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2504 		reg |= MAC_CFG_RXCSUM_ENB;
2505 
2506 	/* Ack all pending interrupts and clear it. */
2507 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
2508 	CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
2509 
2510 	/* Finally enable Tx/Rx MAC. */
2511 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
2512 
2513 	sc->age_flags &= ~AGE_FLAG_LINK;
2514 	/* Switch to the current media. */
2515 	mii_mediachg(mii);
2516 
2517 	callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
2518 
2519 	ifp->if_flags |= IFF_RUNNING;
2520 	ifq_clr_oactive(&ifp->if_snd);
2521 }
2522 
2523 static void
2524 age_stop(struct age_softc *sc)
2525 {
2526 	struct ifnet *ifp = &sc->arpcom.ac_if;
2527 	struct age_txdesc *txd;
2528 	struct age_rxdesc *rxd;
2529 	uint32_t reg;
2530 	int i;
2531 
2532 	ASSERT_SERIALIZED(ifp->if_serializer);
2533 
2534 	/*
2535 	 * Mark the interface down and cancel the watchdog timer.
2536 	 */
2537 	ifp->if_flags &= ~IFF_RUNNING;
2538 	ifq_clr_oactive(&ifp->if_snd);
2539 	ifp->if_timer = 0;
2540 
2541 	sc->age_flags &= ~AGE_FLAG_LINK;
2542 	callout_stop(&sc->age_tick_ch);
2543 
2544 	/*
2545 	 * Disable interrupts.
2546 	 */
2547 	CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
2548 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
2549 
2550 	/* Stop CMB/SMB updates. */
2551 	CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
2552 
2553 	/* Stop Rx/Tx MAC. */
2554 	age_stop_rxmac(sc);
2555 	age_stop_txmac(sc);
2556 
2557 	/* Stop DMA. */
2558 	CSR_WRITE_4(sc, AGE_DMA_CFG,
2559 	    CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
2560 
2561 	/* Stop TxQ/RxQ. */
2562 	CSR_WRITE_4(sc, AGE_TXQ_CFG,
2563 	    CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
2564 	CSR_WRITE_4(sc, AGE_RXQ_CFG,
2565 	    CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
2566 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2567 		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
2568 			break;
2569 		DELAY(10);
2570 	}
2571 	if (i == 0)
2572 		device_printf(sc->age_dev,
2573 		    "stopping Rx/Tx MACs timed out(0x%08x)!\n", reg);
2574 
2575 	/* Reclaim Rx buffers that have been processed. */
2576 	if (sc->age_cdata.age_rxhead != NULL)
2577 		m_freem(sc->age_cdata.age_rxhead);
2578 	AGE_RXCHAIN_RESET(sc);
2579 
2580 	/*
2581 	 * Free RX and TX mbufs still in the queues.
2582 	 */
2583 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
2584 		rxd = &sc->age_cdata.age_rxdesc[i];
2585 		if (rxd->rx_m != NULL) {
2586 			bus_dmamap_unload(sc->age_cdata.age_rx_tag,
2587 			    rxd->rx_dmamap);
2588 			m_freem(rxd->rx_m);
2589 			rxd->rx_m = NULL;
2590 		}
2591         }
2592 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
2593 		txd = &sc->age_cdata.age_txdesc[i];
2594 		if (txd->tx_m != NULL) {
2595 			bus_dmamap_unload(sc->age_cdata.age_tx_tag,
2596 			    txd->tx_dmamap);
2597 			m_freem(txd->tx_m);
2598 			txd->tx_m = NULL;
2599 		}
2600         }
2601 }
2602 
2603 static void
2604 age_stop_txmac(struct age_softc *sc)
2605 {
2606 	uint32_t reg;
2607 	int i;
2608 
2609 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2610 	if ((reg & MAC_CFG_TX_ENB) != 0) {
2611 		reg &= ~MAC_CFG_TX_ENB;
2612 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2613 	}
2614 	/* Stop Tx DMA engine. */
2615 	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2616 	if ((reg & DMA_CFG_RD_ENB) != 0) {
2617 		reg &= ~DMA_CFG_RD_ENB;
2618 		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2619 	}
2620 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2621 		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2622 		    (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2623 			break;
2624 		DELAY(10);
2625 	}
2626 	if (i == 0)
2627 		device_printf(sc->age_dev, "stopping TxMAC timeout!\n");
2628 }
2629 
2630 static void
2631 age_stop_rxmac(struct age_softc *sc)
2632 {
2633 	uint32_t reg;
2634 	int i;
2635 
2636 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2637 	if ((reg & MAC_CFG_RX_ENB) != 0) {
2638 		reg &= ~MAC_CFG_RX_ENB;
2639 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2640 	}
2641 	/* Stop Rx DMA engine. */
2642 	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2643 	if ((reg & DMA_CFG_WR_ENB) != 0) {
2644 		reg &= ~DMA_CFG_WR_ENB;
2645 		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2646 	}
2647 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2648 		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2649 		    (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2650 			break;
2651 		DELAY(10);
2652 	}
2653 	if (i == 0)
2654 		device_printf(sc->age_dev, "stopping RxMAC timeout!\n");
2655 }
2656 
2657 static void
2658 age_init_tx_ring(struct age_softc *sc)
2659 {
2660 	struct age_ring_data *rd;
2661 	struct age_txdesc *txd;
2662 	int i;
2663 
2664 	sc->age_cdata.age_tx_prod = 0;
2665 	sc->age_cdata.age_tx_cons = 0;
2666 	sc->age_cdata.age_tx_cnt = 0;
2667 
2668 	rd = &sc->age_rdata;
2669 	bzero(rd->age_tx_ring, AGE_TX_RING_SZ);
2670 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
2671 		txd = &sc->age_cdata.age_txdesc[i];
2672 		txd->tx_desc = &rd->age_tx_ring[i];
2673 		txd->tx_m = NULL;
2674 	}
2675 
2676 	bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
2677 	    sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_PREWRITE);
2678 }
2679 
2680 static int
2681 age_init_rx_ring(struct age_softc *sc)
2682 {
2683 	struct age_ring_data *rd;
2684 	struct age_rxdesc *rxd;
2685 	int i;
2686 
2687 	sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
2688 	rd = &sc->age_rdata;
2689 	bzero(rd->age_rx_ring, AGE_RX_RING_SZ);
2690 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
2691 		rxd = &sc->age_cdata.age_rxdesc[i];
2692 		rxd->rx_m = NULL;
2693 		rxd->rx_desc = &rd->age_rx_ring[i];
2694 		if (age_newbuf(sc, rxd, 1) != 0)
2695 			return (ENOBUFS);
2696 	}
2697 
2698 	bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
2699 	    sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE);
2700 
2701 	return (0);
2702 }
2703 
2704 static void
2705 age_init_rr_ring(struct age_softc *sc)
2706 {
2707 	struct age_ring_data *rd;
2708 
2709 	sc->age_cdata.age_rr_cons = 0;
2710 	AGE_RXCHAIN_RESET(sc);
2711 
2712 	rd = &sc->age_rdata;
2713 	bzero(rd->age_rr_ring, AGE_RR_RING_SZ);
2714 	bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
2715 	    sc->age_cdata.age_rr_ring_map, BUS_DMASYNC_PREWRITE);
2716 }
2717 
2718 static void
2719 age_init_cmb_block(struct age_softc *sc)
2720 {
2721 	struct age_ring_data *rd;
2722 
2723 	rd = &sc->age_rdata;
2724 	bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ);
2725 	bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
2726 	    sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_PREWRITE);
2727 }
2728 
2729 static void
2730 age_init_smb_block(struct age_softc *sc)
2731 {
2732 	struct age_ring_data *rd;
2733 
2734 	rd = &sc->age_rdata;
2735 	bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ);
2736 	bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
2737 	    sc->age_cdata.age_smb_block_map, BUS_DMASYNC_PREWRITE);
2738 }
2739 
2740 static int
2741 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init)
2742 {
2743 	struct rx_desc *desc;
2744 	struct mbuf *m;
2745 	struct age_dmamap_ctx ctx;
2746 	bus_dma_segment_t segs[1];
2747 	bus_dmamap_t map;
2748 	int error;
2749 
2750 	m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
2751 	if (m == NULL)
2752 		return (ENOBUFS);
2753 
2754 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2755 	m_adj(m, ETHER_ALIGN);
2756 
2757 	ctx.nsegs = 1;
2758 	ctx.segs = segs;
2759 	error = bus_dmamap_load_mbuf(sc->age_cdata.age_rx_tag,
2760 				     sc->age_cdata.age_rx_sparemap,
2761 				     m, age_dmamap_buf_cb, &ctx,
2762 				     BUS_DMA_NOWAIT);
2763 	if (error || ctx.nsegs == 0) {
2764 		if (!error) {
2765 			bus_dmamap_unload(sc->age_cdata.age_rx_tag,
2766 					  sc->age_cdata.age_rx_sparemap);
2767 			error = EFBIG;
2768 			if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2769 		}
2770 		m_freem(m);
2771 
2772 		if (init)
2773 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2774 		return (error);
2775 	}
2776 	KASSERT(ctx.nsegs == 1,
2777 		("%s: %d segments returned!", __func__, ctx.nsegs));
2778 
2779 	if (rxd->rx_m != NULL) {
2780 		bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
2781 		    BUS_DMASYNC_POSTREAD);
2782 		bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap);
2783 	}
2784 	map = rxd->rx_dmamap;
2785 	rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
2786 	sc->age_cdata.age_rx_sparemap = map;
2787 	rxd->rx_m = m;
2788 
2789 	desc = rxd->rx_desc;
2790 	desc->addr = htole64(segs[0].ds_addr);
2791 	desc->len = htole32((segs[0].ds_len & AGE_RD_LEN_MASK) <<
2792 	    AGE_RD_LEN_SHIFT);
2793 	return (0);
2794 }
2795 
2796 static void
2797 age_rxvlan(struct age_softc *sc)
2798 {
2799 	struct ifnet *ifp = &sc->arpcom.ac_if;
2800 	uint32_t reg;
2801 
2802 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2803 	reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2804 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2805 		reg |= MAC_CFG_VLAN_TAG_STRIP;
2806 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2807 }
2808 
2809 static void
2810 age_rxfilter(struct age_softc *sc)
2811 {
2812 	struct ifnet *ifp = &sc->arpcom.ac_if;
2813 	struct ifmultiaddr *ifma;
2814 	uint32_t crc;
2815 	uint32_t mchash[2];
2816 	uint32_t rxcfg;
2817 
2818 	rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
2819 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2820 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2821 		rxcfg |= MAC_CFG_BCAST;
2822 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2823 		if ((ifp->if_flags & IFF_PROMISC) != 0)
2824 			rxcfg |= MAC_CFG_PROMISC;
2825 		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2826 			rxcfg |= MAC_CFG_ALLMULTI;
2827 		CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF);
2828 		CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF);
2829 		CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2830 		return;
2831 	}
2832 
2833 	/* Program new filter. */
2834 	bzero(mchash, sizeof(mchash));
2835 
2836 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2837 		if (ifma->ifma_addr->sa_family != AF_LINK)
2838 			continue;
2839 		crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2840 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2841 		mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2842 	}
2843 
2844 	CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
2845 	CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
2846 	CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2847 }
2848 
2849 static int
2850 sysctl_age_stats(SYSCTL_HANDLER_ARGS)
2851 {
2852 	struct age_softc *sc;
2853 	struct age_stats *stats;
2854 	int error, result;
2855 
2856 	result = -1;
2857 	error = sysctl_handle_int(oidp, &result, 0, req);
2858 
2859 	if (error != 0 || req->newptr == NULL)
2860 		return (error);
2861 
2862 	if (result != 1)
2863 		return (error);
2864 
2865 	sc = (struct age_softc *)arg1;
2866 	stats = &sc->age_stat;
2867 	kprintf("%s statistics:\n", device_get_nameunit(sc->age_dev));
2868 	kprintf("Transmit good frames : %ju\n",
2869 	    (uintmax_t)stats->tx_frames);
2870 	kprintf("Transmit good broadcast frames : %ju\n",
2871 	    (uintmax_t)stats->tx_bcast_frames);
2872 	kprintf("Transmit good multicast frames : %ju\n",
2873 	    (uintmax_t)stats->tx_mcast_frames);
2874 	kprintf("Transmit pause control frames : %u\n",
2875 	    stats->tx_pause_frames);
2876 	kprintf("Transmit control frames : %u\n",
2877 	    stats->tx_control_frames);
2878 	kprintf("Transmit frames with excessive deferrals : %u\n",
2879 	    stats->tx_excess_defer);
2880 	kprintf("Transmit deferrals : %u\n",
2881 	    stats->tx_deferred);
2882 	kprintf("Transmit good octets : %ju\n",
2883 	    (uintmax_t)stats->tx_bytes);
2884 	kprintf("Transmit good broadcast octets : %ju\n",
2885 	    (uintmax_t)stats->tx_bcast_bytes);
2886 	kprintf("Transmit good multicast octets : %ju\n",
2887 	    (uintmax_t)stats->tx_mcast_bytes);
2888 	kprintf("Transmit frames 64 bytes : %ju\n",
2889 	    (uintmax_t)stats->tx_pkts_64);
2890 	kprintf("Transmit frames 65 to 127 bytes : %ju\n",
2891 	    (uintmax_t)stats->tx_pkts_65_127);
2892 	kprintf("Transmit frames 128 to 255 bytes : %ju\n",
2893 	    (uintmax_t)stats->tx_pkts_128_255);
2894 	kprintf("Transmit frames 256 to 511 bytes : %ju\n",
2895 	    (uintmax_t)stats->tx_pkts_256_511);
2896 	kprintf("Transmit frames 512 to 1024 bytes : %ju\n",
2897 	    (uintmax_t)stats->tx_pkts_512_1023);
2898 	kprintf("Transmit frames 1024 to 1518 bytes : %ju\n",
2899 	    (uintmax_t)stats->tx_pkts_1024_1518);
2900 	kprintf("Transmit frames 1519 to MTU bytes : %ju\n",
2901 	    (uintmax_t)stats->tx_pkts_1519_max);
2902 	kprintf("Transmit single collisions : %u\n",
2903 	    stats->tx_single_colls);
2904 	kprintf("Transmit multiple collisions : %u\n",
2905 	    stats->tx_multi_colls);
2906 	kprintf("Transmit late collisions : %u\n",
2907 	    stats->tx_late_colls);
2908 	kprintf("Transmit abort due to excessive collisions : %u\n",
2909 	    stats->tx_excess_colls);
2910 	kprintf("Transmit underruns due to FIFO underruns : %u\n",
2911 	    stats->tx_underrun);
2912 	kprintf("Transmit descriptor write-back errors : %u\n",
2913 	    stats->tx_desc_underrun);
2914 	kprintf("Transmit frames with length mismatched frame size : %u\n",
2915 	    stats->tx_lenerrs);
2916 	kprintf("Transmit frames with truncated due to MTU size : %u\n",
2917 	    stats->tx_lenerrs);
2918 
2919 	kprintf("Receive good frames : %ju\n",
2920 	    (uintmax_t)stats->rx_frames);
2921 	kprintf("Receive good broadcast frames : %ju\n",
2922 	    (uintmax_t)stats->rx_bcast_frames);
2923 	kprintf("Receive good multicast frames : %ju\n",
2924 	    (uintmax_t)stats->rx_mcast_frames);
2925 	kprintf("Receive pause control frames : %u\n",
2926 	    stats->rx_pause_frames);
2927 	kprintf("Receive control frames : %u\n",
2928 	    stats->rx_control_frames);
2929 	kprintf("Receive CRC errors : %u\n",
2930 	    stats->rx_crcerrs);
2931 	kprintf("Receive frames with length errors : %u\n",
2932 	    stats->rx_lenerrs);
2933 	kprintf("Receive good octets : %ju\n",
2934 	    (uintmax_t)stats->rx_bytes);
2935 	kprintf("Receive good broadcast octets : %ju\n",
2936 	    (uintmax_t)stats->rx_bcast_bytes);
2937 	kprintf("Receive good multicast octets : %ju\n",
2938 	    (uintmax_t)stats->rx_mcast_bytes);
2939 	kprintf("Receive frames too short : %u\n",
2940 	    stats->rx_runts);
2941 	kprintf("Receive fragmented frames : %ju\n",
2942 	    (uintmax_t)stats->rx_fragments);
2943 	kprintf("Receive frames 64 bytes : %ju\n",
2944 	    (uintmax_t)stats->rx_pkts_64);
2945 	kprintf("Receive frames 65 to 127 bytes : %ju\n",
2946 	    (uintmax_t)stats->rx_pkts_65_127);
2947 	kprintf("Receive frames 128 to 255 bytes : %ju\n",
2948 	    (uintmax_t)stats->rx_pkts_128_255);
2949 	kprintf("Receive frames 256 to 511 bytes : %ju\n",
2950 	    (uintmax_t)stats->rx_pkts_256_511);
2951 	kprintf("Receive frames 512 to 1024 bytes : %ju\n",
2952 	    (uintmax_t)stats->rx_pkts_512_1023);
2953 	kprintf("Receive frames 1024 to 1518 bytes : %ju\n",
2954 	    (uintmax_t)stats->rx_pkts_1024_1518);
2955 	kprintf("Receive frames 1519 to MTU bytes : %ju\n",
2956 	    (uintmax_t)stats->rx_pkts_1519_max);
2957 	kprintf("Receive frames too long : %ju\n",
2958 	    (uint64_t)stats->rx_pkts_truncated);
2959 	kprintf("Receive frames with FIFO overflow : %u\n",
2960 	    stats->rx_fifo_oflows);
2961 	kprintf("Receive frames with return descriptor overflow : %u\n",
2962 	    stats->rx_desc_oflows);
2963 	kprintf("Receive frames with alignment errors : %u\n",
2964 	    stats->rx_alignerrs);
2965 	kprintf("Receive frames dropped due to address filtering : %ju\n",
2966 	    (uint64_t)stats->rx_pkts_filtered);
2967 
2968 	return (error);
2969 }
2970 
2971 static int
2972 sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS)
2973 {
2974 
2975 	return (sysctl_int_range(oidp, arg1, arg2, req, AGE_IM_TIMER_MIN,
2976 	    AGE_IM_TIMER_MAX));
2977 }
2978 
2979 static void
2980 age_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs,
2981 		  bus_size_t mapsz __unused, int error)
2982 {
2983 	struct age_dmamap_ctx *ctx = xctx;
2984 	int i;
2985 
2986 	if (error)
2987 		return;
2988 
2989 	if (nsegs > ctx->nsegs) {
2990 		ctx->nsegs = 0;
2991 		return;
2992 	}
2993 
2994 	ctx->nsegs = nsegs;
2995 	for (i = 0; i < nsegs; ++i)
2996 		ctx->segs[i] = segs[i];
2997 }
2998