xref: /dragonfly/sys/dev/netif/ae/if_ae.c (revision 7d84b73d)
1 /*-
2  * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  *
25  * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
26  *
27  * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
28  *
29  * $FreeBSD: src/sys/dev/ae/if_ae.c,v 1.1.2.3.2.1 2009/04/15 03:14:26 kensmith Exp $
30  */
31 
32 #include <sys/param.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/interrupt.h>
37 #include <sys/malloc.h>
38 #include <sys/proc.h>
39 #include <sys/rman.h>
40 #include <sys/serialize.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44 
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/bpf.h>
48 #include <net/if_arp.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 #include <net/ifq_var.h>
52 #include <net/vlan/if_vlan_var.h>
53 #include <net/vlan/if_vlan_ether.h>
54 
55 #include <bus/pci/pcireg.h>
56 #include <bus/pci/pcivar.h>
57 #include "pcidevs.h"
58 
59 #include <dev/netif/mii_layer/miivar.h>
60 
61 #include <dev/netif/ae/if_aereg.h>
62 #include <dev/netif/ae/if_aevar.h>
63 
64 /* "device miibus" required.  See GENERIC if you get errors here. */
65 #include "miibus_if.h"
66 
67 /*
68  * Devices supported by this driver.
69  */
70 static const struct ae_dev {
71 	uint16_t	ae_vendorid;
72 	uint16_t	ae_deviceid;
73 	const char	*ae_name;
74 } ae_devs[] = {
75         { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
76             "Attansic Technology Corp, L2 Fast Ethernet" },
77 	/* Required last entry */
78 	{ 0, 0, NULL }
79 };
80 
81 
82 static int	ae_probe(device_t);
83 static int	ae_attach(device_t);
84 static int	ae_detach(device_t);
85 static int	ae_shutdown(device_t);
86 static int	ae_suspend(device_t);
87 static int	ae_resume(device_t);
88 static int	ae_miibus_readreg(device_t, int, int);
89 static int	ae_miibus_writereg(device_t, int, int, int);
90 static void	ae_miibus_statchg(device_t);
91 
92 static int	ae_mediachange(struct ifnet *);
93 static void	ae_mediastatus(struct ifnet *, struct ifmediareq *);
94 static void	ae_init(void *);
95 static void	ae_start(struct ifnet *, struct ifaltq_subque *);
96 static int	ae_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
97 static void	ae_watchdog(struct ifnet *);
98 static void	ae_stop(struct ae_softc *);
99 static void	ae_tick(void *);
100 
101 static void	ae_intr(void *);
102 static void	ae_tx_intr(struct ae_softc *);
103 static void	ae_rx_intr(struct ae_softc *);
104 static int	ae_rxeof(struct ae_softc *, struct ae_rxd *);
105 
106 static int	ae_encap(struct ae_softc *, struct mbuf **);
107 static void	ae_sysctl_node(struct ae_softc *);
108 static void	ae_phy_reset(struct ae_softc *);
109 static int	ae_reset(struct ae_softc *);
110 static void	ae_pcie_init(struct ae_softc *);
111 static void	ae_get_eaddr(struct ae_softc *);
112 static void	ae_dma_free(struct ae_softc *);
113 static int	ae_dma_alloc(struct ae_softc *);
114 static void	ae_mac_config(struct ae_softc *);
115 static void	ae_stop_rxmac(struct ae_softc *);
116 static void	ae_stop_txmac(struct ae_softc *);
117 static void	ae_rxfilter(struct ae_softc *);
118 static void	ae_rxvlan(struct ae_softc *);
119 static void	ae_update_stats_rx(uint16_t, struct ae_stats *);
120 static void	ae_update_stats_tx(uint16_t, struct ae_stats *);
121 static void	ae_powersave_disable(struct ae_softc *);
122 static void	ae_powersave_enable(struct ae_softc *);
123 
124 static device_method_t ae_methods[] = {
125 	/* Device interface. */
126 	DEVMETHOD(device_probe,		ae_probe),
127 	DEVMETHOD(device_attach,	ae_attach),
128 	DEVMETHOD(device_detach,	ae_detach),
129 	DEVMETHOD(device_shutdown,	ae_shutdown),
130 	DEVMETHOD(device_suspend,	ae_suspend),
131 	DEVMETHOD(device_resume,	ae_resume),
132 
133 	/* Bus interface. */
134 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
135 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
136 
137 	/* MII interface. */
138 	DEVMETHOD(miibus_readreg,	ae_miibus_readreg),
139 	DEVMETHOD(miibus_writereg,	ae_miibus_writereg),
140 	DEVMETHOD(miibus_statchg,	ae_miibus_statchg),
141 	{ NULL, NULL }
142 };
143 
144 static driver_t ae_driver = {
145 	"ae",
146 	ae_methods,
147 	sizeof(struct ae_softc)
148 };
149 
150 static devclass_t ae_devclass;
151 DECLARE_DUMMY_MODULE(if_ae);
152 MODULE_DEPEND(if_ae, miibus, 1, 1, 1);
153 DRIVER_MODULE(if_ae, pci, ae_driver, ae_devclass, NULL, NULL);
154 DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, NULL, NULL);
155 
156 /* Register access macros. */
157 #define AE_WRITE_4(_sc, reg, val)	\
158 	bus_space_write_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
159 #define AE_WRITE_2(_sc, reg, val)	\
160 	bus_space_write_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
161 #define AE_WRITE_1(_sc, reg, val)	\
162 	bus_space_write_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
163 #define AE_READ_4(_sc, reg)		\
164 	bus_space_read_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
165 #define AE_READ_2(_sc, reg)		\
166 	bus_space_read_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
167 #define AE_READ_1(_sc, reg)		\
168 	bus_space_read_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
169 
170 #define AE_PHY_READ(sc, reg)		\
171 	ae_miibus_readreg(sc->ae_dev, 0, reg)
172 #define AE_PHY_WRITE(sc, reg, val)	\
173 	ae_miibus_writereg(sc->ae_dev, 0, reg, val)
174 #define AE_CHECK_EADDR_VALID(eaddr)	\
175 	((eaddr[0] == 0 && eaddr[1] == 0) || \
176 	 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
177 #define AE_RXD_VLAN(vtag) \
178 	(((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
179 #define AE_TXD_VLAN(vtag) \
180 	(((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
181 
182 /*
183  * ae statistics.
184  */
185 #define STATS_ENTRY(node, desc, field) \
186 	{ node, desc, offsetof(struct ae_stats, field) }
187 struct {
188 	const char	*node;
189 	const char	*desc;
190 	intptr_t	offset;
191 } ae_stats_tx[] = {
192 	STATS_ENTRY("bcast", "broadcast frames", tx_bcast),
193 	STATS_ENTRY("mcast", "multicast frames", tx_mcast),
194 	STATS_ENTRY("pause", "PAUSE frames", tx_pause),
195 	STATS_ENTRY("control", "control frames", tx_ctrl),
196 	STATS_ENTRY("defers", "deferrals occuried", tx_defer),
197 	STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer),
198 	STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol),
199 	STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol),
200 	STATS_ENTRY("latecols", "late collisions occuried", tx_latecol),
201 	STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol),
202 	STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun)
203 }, ae_stats_rx[] = {
204 	STATS_ENTRY("bcast", "broadcast frames", rx_bcast),
205 	STATS_ENTRY("mcast", "multicast frames", rx_mcast),
206 	STATS_ENTRY("pause", "PAUSE frames", rx_pause),
207 	STATS_ENTRY("control", "control frames", rx_ctrl),
208 	STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr),
209 	STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr),
210 	STATS_ENTRY("runt", "runt frames", rx_runt),
211 	STATS_ENTRY("frag", "fragmented frames", rx_frag),
212 	STATS_ENTRY("align_errors", "frames with alignment errors", rx_align),
213 	STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun",
214 	    rx_trunc)
215 };
216 #define AE_STATS_RX_LEN NELEM(ae_stats_rx)
217 #define AE_STATS_TX_LEN NELEM(ae_stats_tx)
218 
219 static void
220 ae_stop(struct ae_softc *sc)
221 {
222 	struct ifnet *ifp = &sc->arpcom.ac_if;
223 	int i;
224 
225 	ASSERT_SERIALIZED(ifp->if_serializer);
226 
227 	ifp->if_flags &= ~IFF_RUNNING;
228 	ifq_clr_oactive(&ifp->if_snd);
229 	ifp->if_timer = 0;
230 
231 	sc->ae_flags &= ~AE_FLAG_LINK;
232 	callout_stop(&sc->ae_tick_ch);
233 
234 	/*
235 	 * Clear and disable interrupts.
236 	 */
237 	AE_WRITE_4(sc, AE_IMR_REG, 0);
238 	AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
239 
240 	/*
241 	 * Stop Rx/Tx MACs.
242 	 */
243 	ae_stop_txmac(sc);
244 	ae_stop_rxmac(sc);
245 
246 	/*
247 	 * Stop DMA engines.
248 	 */
249 	AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
250 	AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
251 
252 	/*
253 	 * Wait for everything to enter idle state.
254 	 */
255 	for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
256 		if (AE_READ_4(sc, AE_IDLE_REG) == 0)
257 			break;
258 		DELAY(100);
259 	}
260 	if (i == AE_IDLE_TIMEOUT)
261 		if_printf(ifp, "could not enter idle state in stop.\n");
262 }
263 
264 static void
265 ae_stop_rxmac(struct ae_softc *sc)
266 {
267 	uint32_t val;
268 	int i;
269 
270 	/*
271 	 * Stop Rx MAC engine.
272 	 */
273 	val = AE_READ_4(sc, AE_MAC_REG);
274 	if ((val & AE_MAC_RX_EN) != 0) {
275 		val &= ~AE_MAC_RX_EN;
276 		AE_WRITE_4(sc, AE_MAC_REG, val);
277 	}
278 
279 	/*
280 	 * Stop Rx DMA engine.
281 	 */
282 	if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
283 		AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
284 
285 	/*
286 	 * Wait for IDLE state.
287 	 */
288 	for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
289 		val = AE_READ_4(sc, AE_IDLE_REG);
290 		if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
291 			break;
292 		DELAY(100);
293 	}
294 	if (i == AE_IDLE_TIMEOUT) {
295 		if_printf(&sc->arpcom.ac_if,
296 			  "timed out while stopping Rx MAC.\n");
297 	}
298 }
299 
300 static void
301 ae_stop_txmac(struct ae_softc *sc)
302 {
303 	uint32_t val;
304 	int i;
305 
306 	/*
307 	 * Stop Tx MAC engine.
308 	 */
309 	val = AE_READ_4(sc, AE_MAC_REG);
310 	if ((val & AE_MAC_TX_EN) != 0) {
311 		val &= ~AE_MAC_TX_EN;
312 		AE_WRITE_4(sc, AE_MAC_REG, val);
313 	}
314 
315 	/*
316 	 * Stop Tx DMA engine.
317 	 */
318 	if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
319 		AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
320 
321 	/*
322 	 * Wait for IDLE state.
323 	 */
324 	for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
325 		val = AE_READ_4(sc, AE_IDLE_REG);
326 		if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
327 			break;
328 		DELAY(100);
329 	}
330 	if (i == AE_IDLE_TIMEOUT) {
331 		if_printf(&sc->arpcom.ac_if,
332 			  "timed out while stopping Tx MAC.\n");
333 	}
334 }
335 
336 /*
337  * Callback from MII layer when media changes.
338  */
339 static void
340 ae_miibus_statchg(device_t dev)
341 {
342 	struct ae_softc *sc = device_get_softc(dev);
343 	struct ifnet *ifp = &sc->arpcom.ac_if;
344 	struct mii_data *mii;
345 	uint32_t val;
346 
347 	ASSERT_SERIALIZED(ifp->if_serializer);
348 
349 	if ((ifp->if_flags & IFF_RUNNING) == 0)
350 		return;
351 
352 	mii = device_get_softc(sc->ae_miibus);
353 	sc->ae_flags &= ~AE_FLAG_LINK;
354 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
355 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
356 		case IFM_10_T:
357 		case IFM_100_TX:
358 			sc->ae_flags |= AE_FLAG_LINK;
359 			break;
360 		default:
361 			break;
362 		}
363 	}
364 
365 	/* Stop Rx/Tx MACs. */
366 	ae_stop_rxmac(sc);
367 	ae_stop_txmac(sc);
368 
369 	/* Program MACs with resolved speed/duplex/flow-control. */
370 	if ((sc->ae_flags & AE_FLAG_LINK) != 0) {
371 		ae_mac_config(sc);
372 
373 		/*
374 		 * Restart DMA engines.
375 		 */
376 		AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
377 		AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
378 
379 		/*
380 		 * Enable Rx and Tx MACs.
381 		 */
382 		val = AE_READ_4(sc, AE_MAC_REG);
383 		val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
384 		AE_WRITE_4(sc, AE_MAC_REG, val);
385 	}
386 }
387 
388 static void
389 ae_sysctl_node(struct ae_softc *sc)
390 {
391 	struct sysctl_ctx_list *ctx;
392 	struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
393 	struct ae_stats *ae_stats;
394 	unsigned int i;
395 
396 	ae_stats = &sc->stats;
397 
398 	ctx = device_get_sysctl_ctx(sc->ae_dev);
399 	root = device_get_sysctl_tree(sc->ae_dev);
400 	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
401 	    CTLFLAG_RD, NULL, "ae statistics");
402 	if (stats == NULL) {
403 		device_printf(sc->ae_dev, "can't add stats sysctl node\n");
404 		return;
405 	}
406 
407 	/*
408 	 * Receiver statistcics.
409 	 */
410 	stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
411 	    CTLFLAG_RD, NULL, "Rx MAC statistics");
412 	if (stats_rx != NULL) {
413 		for (i = 0; i < AE_STATS_RX_LEN; i++) {
414 			SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_rx),
415 			    OID_AUTO, ae_stats_rx[i].node, CTLFLAG_RD,
416 			    (char *)ae_stats + ae_stats_rx[i].offset, 0,
417 			    ae_stats_rx[i].desc);
418 		}
419 	}
420 
421 	/*
422 	 * Transmitter statistcics.
423 	 */
424 	stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
425 	    CTLFLAG_RD, NULL, "Tx MAC statistics");
426 	if (stats_tx != NULL) {
427 		for (i = 0; i < AE_STATS_TX_LEN; i++) {
428 			SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_tx),
429 			    OID_AUTO, ae_stats_tx[i].node, CTLFLAG_RD,
430 			    (char *)ae_stats + ae_stats_tx[i].offset, 0,
431 			    ae_stats_tx[i].desc);
432 		}
433 	}
434 }
435 
436 static int
437 ae_miibus_readreg(device_t dev, int phy, int reg)
438 {
439 	struct ae_softc *sc = device_get_softc(dev);
440 	uint32_t val;
441 	int i;
442 
443 	/*
444 	 * Locking is done in upper layers.
445 	 */
446 	if (phy != sc->ae_phyaddr)
447 		return (0);
448 	val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
449 	    AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
450 	    ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
451 	AE_WRITE_4(sc, AE_MDIO_REG, val);
452 
453 	/*
454 	 * Wait for operation to complete.
455 	 */
456 	for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
457 		DELAY(2);
458 		val = AE_READ_4(sc, AE_MDIO_REG);
459 		if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
460 			break;
461 	}
462 	if (i == AE_MDIO_TIMEOUT) {
463 		device_printf(sc->ae_dev, "phy read timeout: %d.\n", reg);
464 		return (0);
465 	}
466 	return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
467 }
468 
469 static int
470 ae_miibus_writereg(device_t dev, int phy, int reg, int val)
471 {
472 	struct ae_softc *sc = device_get_softc(dev);
473 	uint32_t aereg;
474 	int i;
475 
476 	/*
477 	 * Locking is done in upper layers.
478 	 */
479 	if (phy != sc->ae_phyaddr)
480 		return (0);
481 	aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
482 	    AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
483 	    ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
484 	    ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
485 	AE_WRITE_4(sc, AE_MDIO_REG, aereg);
486 
487 	/*
488 	 * Wait for operation to complete.
489 	 */
490 	for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
491 		DELAY(2);
492 		aereg = AE_READ_4(sc, AE_MDIO_REG);
493 		if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
494 			break;
495 	}
496 	if (i == AE_MDIO_TIMEOUT)
497 		device_printf(sc->ae_dev, "phy write timeout: %d.\n", reg);
498 	return (0);
499 }
500 
501 static int
502 ae_probe(device_t dev)
503 {
504 	uint16_t vendor, devid;
505 	const struct ae_dev *sp;
506 
507 	vendor = pci_get_vendor(dev);
508 	devid = pci_get_device(dev);
509 	for (sp = ae_devs; sp->ae_name != NULL; sp++) {
510 		if (vendor == sp->ae_vendorid &&
511 		    devid == sp->ae_deviceid) {
512 			device_set_desc(dev, sp->ae_name);
513 			return (0);
514 		}
515 	}
516 	return (ENXIO);
517 }
518 
519 static int
520 ae_dma_alloc(struct ae_softc *sc)
521 {
522 	bus_addr_t busaddr;
523 	int error;
524 
525 	/*
526 	 * Create parent DMA tag.
527 	 */
528 	error = bus_dma_tag_create(NULL, 1, 0,
529 				   BUS_SPACE_MAXADDR_32BIT,
530 				   BUS_SPACE_MAXADDR,
531 				   BUS_SPACE_MAXSIZE_32BIT,
532 				   0,
533 				   BUS_SPACE_MAXSIZE_32BIT,
534 				   0, &sc->dma_parent_tag);
535 	if (error) {
536 		device_printf(sc->ae_dev, "could not creare parent DMA tag.\n");
537 		return (error);
538 	}
539 
540 	/*
541 	 * Create DMA stuffs for TxD.
542 	 */
543 	sc->txd_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4,
544 			AE_TXD_BUFSIZE_DEFAULT, BUS_DMA_WAITOK | BUS_DMA_ZERO,
545 			&sc->dma_txd_tag, &sc->dma_txd_map,
546 			&sc->dma_txd_busaddr);
547 	if (sc->txd_base == NULL) {
548 		device_printf(sc->ae_dev, "could not creare TxD DMA stuffs.\n");
549 		return ENOMEM;
550 	}
551 
552 	/*
553 	 * Create DMA stuffs for TxS.
554 	 */
555 	sc->txs_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4,
556 			AE_TXS_COUNT_DEFAULT * 4, BUS_DMA_WAITOK | BUS_DMA_ZERO,
557 			&sc->dma_txs_tag, &sc->dma_txs_map,
558 			&sc->dma_txs_busaddr);
559 	if (sc->txs_base == NULL) {
560 		device_printf(sc->ae_dev, "could not creare TxS DMA stuffs.\n");
561 		return ENOMEM;
562 	}
563 
564 	/*
565 	 * Create DMA stuffs for RxD.
566 	 */
567 	sc->rxd_base_dma = bus_dmamem_coherent_any(sc->dma_parent_tag, 128,
568 				AE_RXD_COUNT_DEFAULT * 1536 + 120,
569 				BUS_DMA_WAITOK | BUS_DMA_ZERO,
570 				&sc->dma_rxd_tag, &sc->dma_rxd_map,
571 				&busaddr);
572 	if (sc->rxd_base_dma == NULL) {
573 		device_printf(sc->ae_dev, "could not creare RxD DMA stuffs.\n");
574 		return ENOMEM;
575 	}
576 	sc->dma_rxd_busaddr = busaddr + 120;
577 	sc->rxd_base = (struct ae_rxd *)(sc->rxd_base_dma + 120);
578 
579 	return (0);
580 }
581 
582 static void
583 ae_mac_config(struct ae_softc *sc)
584 {
585 	struct mii_data *mii;
586 	uint32_t val;
587 
588 	mii = device_get_softc(sc->ae_miibus);
589 	val = AE_READ_4(sc, AE_MAC_REG);
590 	val &= ~AE_MAC_FULL_DUPLEX;
591 	/* XXX disable AE_MAC_TX_FLOW_EN? */
592 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
593 		val |= AE_MAC_FULL_DUPLEX;
594 	AE_WRITE_4(sc, AE_MAC_REG, val);
595 }
596 
597 static int
598 ae_rxeof(struct ae_softc *sc, struct ae_rxd *rxd)
599 {
600 	struct ifnet *ifp = &sc->arpcom.ac_if;
601 	struct mbuf *m;
602 	unsigned int size;
603 	uint16_t flags;
604 
605 	flags = le16toh(rxd->flags);
606 #ifdef AE_DEBUG
607 	if_printf(ifp, "Rx interrupt occuried.\n");
608 #endif
609 	size = le16toh(rxd->len) - ETHER_CRC_LEN;
610 	if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN -
611 		    sizeof(struct ether_vlan_header))) {
612 		if_printf(ifp, "Runt frame received.");
613 		return (EIO);
614 	}
615 
616 	m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp);
617 	if (m == NULL)
618 		return (ENOBUFS);
619 
620 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
621 	    (flags & AE_RXD_HAS_VLAN)) {
622 		m->m_pkthdr.ether_vlantag = AE_RXD_VLAN(le16toh(rxd->vlan));
623 		m->m_flags |= M_VLANTAG;
624 	}
625 	ifp->if_input(ifp, m, NULL, -1);
626 
627 	return (0);
628 }
629 
630 static void
631 ae_rx_intr(struct ae_softc *sc)
632 {
633 	struct ifnet *ifp = &sc->arpcom.ac_if;
634 	struct ae_rxd *rxd;
635 	uint16_t flags;
636 	int error;
637 
638 	/*
639 	 * Syncronize DMA buffers.
640 	 */
641 	bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
642 			BUS_DMASYNC_POSTREAD);
643 	for (;;) {
644 		rxd = (struct ae_rxd *)(sc->rxd_base + sc->rxd_cur);
645 
646 		flags = le16toh(rxd->flags);
647 		if ((flags & AE_RXD_UPDATE) == 0)
648 			break;
649 		rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
650 
651 		/* Update stats. */
652 		ae_update_stats_rx(flags, &sc->stats);
653 
654 		/*
655 		 * Update position index.
656 		 */
657 		sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
658 		if ((flags & AE_RXD_SUCCESS) == 0) {
659 			IFNET_STAT_INC(ifp, ierrors, 1);
660 			continue;
661 		}
662 
663 		error = ae_rxeof(sc, rxd);
664 		if (error)
665 			IFNET_STAT_INC(ifp, ierrors, 1);
666 		else
667 			IFNET_STAT_INC(ifp, ipackets, 1);
668 	}
669 
670 	/* Update Rx index. */
671 	AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
672 }
673 
674 static void
675 ae_tx_intr(struct ae_softc *sc)
676 {
677 	struct ifnet *ifp = &sc->arpcom.ac_if;
678 	struct ae_txd *txd;
679 	struct ae_txs *txs;
680 	uint16_t flags;
681 
682 	/*
683 	 * Syncronize DMA buffers.
684 	 */
685 	bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_POSTREAD);
686 	bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_POSTREAD);
687 
688 	for (;;) {
689 		txs = sc->txs_base + sc->txs_ack;
690 
691 		flags = le16toh(txs->flags);
692 		if ((flags & AE_TXS_UPDATE) == 0)
693 			break;
694 		txs->flags = htole16(flags & ~AE_TXS_UPDATE);
695 
696 		/* Update stats. */
697 		ae_update_stats_tx(flags, &sc->stats);
698 
699 		/*
700 		 * Update TxS position.
701 		 */
702 		sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
703 		sc->ae_flags |= AE_FLAG_TXAVAIL;
704 		txd = (struct ae_txd *)(sc->txd_base + sc->txd_ack);
705 		if (txs->len != txd->len) {
706 			device_printf(sc->ae_dev, "Size mismatch: "
707 				"TxS:%d TxD:%d\n",
708 				le16toh(txs->len), le16toh(txd->len));
709 		}
710 
711 		/*
712 		 * Move txd ack and align on 4-byte boundary.
713 		 */
714 		sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) %
715 		    AE_TXD_BUFSIZE_DEFAULT;
716 		if ((flags & AE_TXS_SUCCESS) != 0)
717 			IFNET_STAT_INC(ifp, opackets, 1);
718 		else
719 			IFNET_STAT_INC(ifp, oerrors, 1);
720 		sc->tx_inproc--;
721 	}
722 
723 	if (sc->tx_inproc < 0) {
724 		/* XXX assert? */
725 		if_printf(ifp, "Received stray Tx interrupt(s).\n");
726 		sc->tx_inproc = 0;
727 	}
728 	if (sc->tx_inproc == 0)
729 		ifp->if_timer = 0;	/* Unarm watchdog. */
730 	if (sc->ae_flags & AE_FLAG_TXAVAIL) {
731 		ifq_clr_oactive(&ifp->if_snd);
732 		if (!ifq_is_empty(&ifp->if_snd))
733 #ifdef foo
734 			ae_intr(sc);
735 #else
736 			if_devstart(ifp);
737 #endif
738 	}
739 
740 	/*
741 	 * Syncronize DMA buffers.
742 	 */
743 	bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE);
744 	bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE);
745 }
746 
747 static void
748 ae_intr(void *xsc)
749 {
750 	struct ae_softc *sc = xsc;
751 	struct ifnet *ifp = &sc->arpcom.ac_if;
752 	uint32_t val;
753 
754 	ASSERT_SERIALIZED(ifp->if_serializer);
755 
756 	val = AE_READ_4(sc, AE_ISR_REG);
757 	if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
758 		return;
759 
760 #ifdef foo
761 	AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
762 #endif
763 
764 	/* Read interrupt status. */
765 	val = AE_READ_4(sc, AE_ISR_REG);
766 
767 	/* Clear interrupts and disable them. */
768 	AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
769 
770 	if (ifp->if_flags & IFF_RUNNING) {
771 		if (val & (AE_ISR_DMAR_TIMEOUT |
772 			   AE_ISR_DMAW_TIMEOUT |
773 			   AE_ISR_PHY_LINKDOWN)) {
774 			ae_init(sc);
775 		}
776 		if (val & AE_ISR_TX_EVENT)
777 			ae_tx_intr(sc);
778 		if (val & AE_ISR_RX_EVENT)
779 			ae_rx_intr(sc);
780 	}
781 
782 	/* Re-enable interrupts. */
783 	AE_WRITE_4(sc, AE_ISR_REG, 0);
784 }
785 
786 static void
787 ae_init(void *xsc)
788 {
789 	struct ae_softc *sc = xsc;
790 	struct ifnet *ifp = &sc->arpcom.ac_if;
791 	struct mii_data *mii;
792 	uint8_t eaddr[ETHER_ADDR_LEN];
793 	uint32_t val;
794 	bus_addr_t addr;
795 
796 	ASSERT_SERIALIZED(ifp->if_serializer);
797 
798 	mii = device_get_softc(sc->ae_miibus);
799 	ae_stop(sc);
800 	ae_reset(sc);
801 	ae_pcie_init(sc);
802 	ae_powersave_disable(sc);
803 
804 	/*
805 	 * Clear and disable interrupts.
806 	 */
807 	AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
808 
809 	/*
810 	 * Set the MAC address.
811 	 */
812 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
813 	val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
814 	AE_WRITE_4(sc, AE_EADDR0_REG, val);
815 	val = eaddr[0] << 8 | eaddr[1];
816 	AE_WRITE_4(sc, AE_EADDR1_REG, val);
817 
818 	/*
819 	 * Set ring buffers base addresses.
820 	 */
821 	addr = sc->dma_rxd_busaddr;
822 	AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
823 	AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
824 	addr = sc->dma_txd_busaddr;
825 	AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
826 	addr = sc->dma_txs_busaddr;
827 	AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
828 
829 	/*
830 	 * Configure ring buffers sizes.
831 	 */
832 	AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
833 	AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
834 	AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
835 
836 	/*
837 	 * Configure interframe gap parameters.
838 	 */
839 	val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
840 	    AE_IFG_TXIPG_MASK) |
841 	    ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
842 	    AE_IFG_RXIPG_MASK) |
843 	    ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
844 	    AE_IFG_IPGR1_MASK) |
845 	    ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
846 	    AE_IFG_IPGR2_MASK);
847 	AE_WRITE_4(sc, AE_IFG_REG, val);
848 
849 	/*
850 	 * Configure half-duplex operation.
851 	 */
852 	val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
853 	    AE_HDPX_LCOL_MASK) |
854 	    ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
855 	    AE_HDPX_RETRY_MASK) |
856 	    ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
857 	    AE_HDPX_ABEBT_MASK) |
858 	    ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
859 	    AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
860 	AE_WRITE_4(sc, AE_HDPX_REG, val);
861 
862 	/*
863 	 * Configure interrupt moderate timer.
864 	 */
865 	AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
866 	val = AE_READ_4(sc, AE_MASTER_REG);
867 	val |= AE_MASTER_IMT_EN;
868 	AE_WRITE_4(sc, AE_MASTER_REG, val);
869 
870 	/*
871 	 * Configure interrupt clearing timer.
872 	 */
873 	AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
874 
875 	/*
876 	 * Configure MTU.
877 	 */
878 	val = ifp->if_mtu + ETHER_HDR_LEN + sizeof(struct ether_vlan_header) +
879 	    ETHER_CRC_LEN;
880 	AE_WRITE_2(sc, AE_MTU_REG, val);
881 
882 	/*
883 	 * Configure cut-through threshold.
884 	 */
885 	AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
886 
887 	/*
888 	 * Configure flow control.
889 	 */
890 	AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
891 	AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
892 	    (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
893 	    (AE_RXD_COUNT_DEFAULT / 12));
894 
895 	/*
896 	 * Init mailboxes.
897 	 */
898 	sc->txd_cur = sc->rxd_cur = 0;
899 	sc->txs_ack = sc->txd_ack = 0;
900 	sc->rxd_cur = 0;
901 	AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
902 	AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
903 	sc->tx_inproc = 0;
904 	sc->ae_flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */
905 
906 	/*
907 	 * Enable DMA.
908 	 */
909 	AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
910 	AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
911 
912 	/*
913 	 * Check if everything is OK.
914 	 */
915 	val = AE_READ_4(sc, AE_ISR_REG);
916 	if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
917 		device_printf(sc->ae_dev, "Initialization failed.\n");
918 		return;
919 	}
920 
921 	/*
922 	 * Clear interrupt status.
923 	 */
924 	AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
925 	AE_WRITE_4(sc, AE_ISR_REG, 0x0);
926 
927 	/*
928 	 * Enable interrupts.
929 	 */
930 	val = AE_READ_4(sc, AE_MASTER_REG);
931 	AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
932 	AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
933 
934 	/*
935 	 * Disable WOL.
936 	 */
937 	AE_WRITE_4(sc, AE_WOL_REG, 0);
938 
939 	/*
940 	 * Configure MAC.
941 	 */
942 	val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
943 	    AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
944 	    AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
945 	    ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
946 	    ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
947 	    AE_MAC_PREAMBLE_MASK);
948 	AE_WRITE_4(sc, AE_MAC_REG, val);
949 
950 	/*
951 	 * Configure Rx MAC.
952 	 */
953 	ae_rxfilter(sc);
954 	ae_rxvlan(sc);
955 
956 	/*
957 	 * Enable Tx/Rx.
958 	 */
959 	val = AE_READ_4(sc, AE_MAC_REG);
960 	AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
961 
962 	sc->ae_flags &= ~AE_FLAG_LINK;
963 	mii_mediachg(mii);	/* Switch to the current media. */
964 
965 	callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc);
966 	ifp->if_flags |= IFF_RUNNING;
967 	ifq_clr_oactive(&ifp->if_snd);
968 }
969 
970 static void
971 ae_watchdog(struct ifnet *ifp)
972 {
973 	struct ae_softc *sc = ifp->if_softc;
974 
975 	ASSERT_SERIALIZED(ifp->if_serializer);
976 
977 	if ((sc->ae_flags & AE_FLAG_LINK) == 0)
978 		if_printf(ifp, "watchdog timeout (missed link).\n");
979 	else
980 		if_printf(ifp, "watchdog timeout - resetting.\n");
981 	IFNET_STAT_INC(ifp, oerrors, 1);
982 
983 	ae_init(sc);
984 	if (!ifq_is_empty(&ifp->if_snd))
985 		if_devstart(ifp);
986 }
987 
988 static void
989 ae_tick(void *xsc)
990 {
991 	struct ae_softc *sc = xsc;
992 	struct ifnet *ifp = &sc->arpcom.ac_if;
993 	struct mii_data *mii = device_get_softc(sc->ae_miibus);
994 
995 	lwkt_serialize_enter(ifp->if_serializer);
996 	mii_tick(mii);
997 	callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc);
998 	lwkt_serialize_exit(ifp->if_serializer);
999 }
1000 
1001 static void
1002 ae_rxvlan(struct ae_softc *sc)
1003 {
1004 	struct ifnet *ifp = &sc->arpcom.ac_if;
1005 	uint32_t val;
1006 
1007 	val = AE_READ_4(sc, AE_MAC_REG);
1008 	val &= ~AE_MAC_RMVLAN_EN;
1009 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1010 		val |= AE_MAC_RMVLAN_EN;
1011 	AE_WRITE_4(sc, AE_MAC_REG, val);
1012 }
1013 
1014 static void
1015 ae_rxfilter(struct ae_softc *sc)
1016 {
1017 	struct ifnet *ifp = &sc->arpcom.ac_if;
1018 	struct ifmultiaddr *ifma;
1019 	uint32_t crc;
1020 	uint32_t mchash[2];
1021 	uint32_t rxcfg;
1022 
1023 	rxcfg = AE_READ_4(sc, AE_MAC_REG);
1024 	rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
1025 	rxcfg |= AE_MAC_BCAST_EN;
1026 	if (ifp->if_flags & IFF_PROMISC)
1027 		rxcfg |= AE_MAC_PROMISC_EN;
1028 	if (ifp->if_flags & IFF_ALLMULTI)
1029 		rxcfg |= AE_MAC_MCAST_EN;
1030 
1031 	/*
1032 	 * Wipe old settings.
1033 	 */
1034 	AE_WRITE_4(sc, AE_REG_MHT0, 0);
1035 	AE_WRITE_4(sc, AE_REG_MHT1, 0);
1036 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1037 		AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
1038 		AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
1039 		AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
1040 		return;
1041 	}
1042 
1043 	/*
1044 	 * Load multicast tables.
1045 	 */
1046 	bzero(mchash, sizeof(mchash));
1047 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1048 		if (ifma->ifma_addr->sa_family != AF_LINK)
1049 			continue;
1050 		crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1051 			ifma->ifma_addr), ETHER_ADDR_LEN);
1052 		mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1053 	}
1054 	AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
1055 	AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
1056 	AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
1057 }
1058 
1059 static unsigned int
1060 ae_tx_avail_size(struct ae_softc *sc)
1061 {
1062 	unsigned int avail;
1063 
1064 	if (sc->txd_cur >= sc->txd_ack)
1065 		avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
1066 	else
1067 		avail = sc->txd_ack - sc->txd_cur;
1068 	return (avail - 4);     /* 4-byte header. */
1069 }
1070 
1071 static int
1072 ae_encap(struct ae_softc *sc, struct mbuf **m_head)
1073 {
1074 	struct mbuf *m0;
1075 	struct ae_txd *hdr;
1076 	unsigned int to_end;
1077 	uint16_t len;
1078 
1079 	M_ASSERTPKTHDR((*m_head));
1080 	m0 = *m_head;
1081 	len = m0->m_pkthdr.len;
1082 	if ((sc->ae_flags & AE_FLAG_TXAVAIL) == 0 ||
1083 	    ae_tx_avail_size(sc) < len) {
1084 #ifdef AE_DEBUG
1085 		if_printf(sc->ifp, "No free Tx available.\n");
1086 #endif
1087 		return ENOBUFS;
1088 	}
1089 
1090 	hdr = (struct ae_txd *)(sc->txd_base + sc->txd_cur);
1091 	bzero(hdr, sizeof(*hdr));
1092 
1093 	/* Header size. */
1094 	sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT;
1095 
1096 	/* Space available to the end of the ring */
1097 	to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
1098 
1099 	if (to_end >= len) {
1100 		m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
1101 	} else {
1102 		m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
1103 		    sc->txd_cur));
1104 		m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
1105 	}
1106 
1107 	/*
1108 	 * Set TxD flags and parameters.
1109 	 */
1110 	if ((m0->m_flags & M_VLANTAG) != 0) {
1111 		hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vlantag));
1112 		hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
1113 	} else {
1114 		hdr->len = htole16(len);
1115 	}
1116 
1117 	/*
1118 	 * Set current TxD position and round up to a 4-byte boundary.
1119 	 */
1120 	sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1121 	if (sc->txd_cur == sc->txd_ack)
1122 		sc->ae_flags &= ~AE_FLAG_TXAVAIL;
1123 #ifdef AE_DEBUG
1124 	if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
1125 #endif
1126 
1127 	/*
1128 	 * Update TxS position and check if there are empty TxS available.
1129 	 */
1130 	sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
1131 	sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
1132 	if (sc->txs_cur == sc->txs_ack)
1133 		sc->ae_flags &= ~AE_FLAG_TXAVAIL;
1134 
1135 	/*
1136 	 * Synchronize DMA memory.
1137 	 */
1138 	bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE);
1139 	bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE);
1140 
1141 	return (0);
1142 }
1143 
1144 static void
1145 ae_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1146 {
1147 	struct ae_softc *sc = ifp->if_softc;
1148 	int error, trans;
1149 
1150 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1151 	ASSERT_SERIALIZED(ifp->if_serializer);
1152 
1153 #ifdef AE_DEBUG
1154 	if_printf(ifp, "Start called.\n");
1155 #endif
1156 	if ((sc->ae_flags & AE_FLAG_LINK) == 0) {
1157 		ifq_purge(&ifp->if_snd);
1158 		return;
1159 	}
1160 	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1161 		return;
1162 
1163 	trans = 0;
1164 	while (!ifq_is_empty(&ifp->if_snd)) {
1165 		struct mbuf *m0;
1166 
1167 		m0 = ifq_dequeue(&ifp->if_snd);
1168 		if (m0 == NULL)
1169 			break;  /* Nothing to do. */
1170 
1171 		error = ae_encap(sc, &m0);
1172 		if (error != 0) {
1173 			if (m0 != NULL) {
1174 				ifq_prepend(&ifp->if_snd, m0);
1175 				ifq_set_oactive(&ifp->if_snd);
1176 #ifdef AE_DEBUG
1177 				if_printf(ifp, "Setting OACTIVE.\n");
1178 #endif
1179 			}
1180 			break;
1181 		}
1182 		trans = 1;
1183 		sc->tx_inproc++;
1184 
1185 		/* Bounce a copy of the frame to BPF. */
1186 		ETHER_BPF_MTAP(ifp, m0);
1187 		m_freem(m0);
1188 	}
1189 	if (trans) {	/* Something was dequeued. */
1190 		AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
1191 		ifp->if_timer = AE_TX_TIMEOUT; /* Load watchdog. */
1192 #ifdef AE_DEBUG
1193 		if_printf(ifp, "%d packets dequeued.\n", count);
1194 		if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
1195 #endif
1196 	}
1197 }
1198 
1199 static int
1200 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1201 {
1202         struct ae_softc *sc = ifp->if_softc;
1203         struct ifreq *ifr;
1204         struct mii_data *mii;
1205         int error = 0, mask;
1206 
1207 	ASSERT_SERIALIZED(ifp->if_serializer);
1208 
1209 	ifr = (struct ifreq *)data;
1210 	switch (cmd) {
1211 	case SIOCSIFFLAGS:
1212 		if (ifp->if_flags & IFF_UP) {
1213 			if (ifp->if_flags & IFF_RUNNING) {
1214 				if (((ifp->if_flags ^ sc->ae_if_flags)
1215 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1216 					ae_rxfilter(sc);
1217 			} else {
1218 				ae_init(sc);
1219 			}
1220 		} else {
1221 			if (ifp->if_flags & IFF_RUNNING)
1222 				ae_stop(sc);
1223 		}
1224 		sc->ae_if_flags = ifp->if_flags;
1225 		break;
1226 
1227 	case SIOCADDMULTI:
1228 	case SIOCDELMULTI:
1229 		if (ifp->if_flags & IFF_RUNNING)
1230 			ae_rxfilter(sc);
1231 		break;
1232 
1233 	case SIOCSIFMEDIA:
1234 	case SIOCGIFMEDIA:
1235 		mii = device_get_softc(sc->ae_miibus);
1236 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1237 		break;
1238 
1239 	case SIOCSIFCAP:
1240 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1241 		if (mask & IFCAP_VLAN_HWTAGGING) {
1242 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1243 			ae_rxvlan(sc);
1244 		}
1245 		break;
1246 
1247 	default:
1248 		error = ether_ioctl(ifp, cmd, data);
1249 		break;
1250 	}
1251 	return (error);
1252 }
1253 
1254 static int
1255 ae_attach(device_t dev)
1256 {
1257 	struct ae_softc *sc = device_get_softc(dev);
1258 	struct ifnet *ifp = &sc->arpcom.ac_if;
1259 	int error = 0;
1260 
1261 	sc->ae_dev = dev;
1262 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1263 	callout_init(&sc->ae_tick_ch);
1264 
1265 	/* Enable bus mastering */
1266 	pci_enable_busmaster(dev);
1267 
1268 	/*
1269 	 * Allocate memory mapped IO
1270 	 */
1271 	sc->ae_mem_rid = PCIR_BAR(0);
1272 	sc->ae_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1273 						&sc->ae_mem_rid, RF_ACTIVE);
1274 	if (sc->ae_mem_res == NULL) {
1275 		device_printf(dev, "can't allocate IO memory\n");
1276 		return ENXIO;
1277 	}
1278 	sc->ae_mem_bt = rman_get_bustag(sc->ae_mem_res);
1279 	sc->ae_mem_bh = rman_get_bushandle(sc->ae_mem_res);
1280 
1281 	/*
1282 	 * Allocate IRQ
1283 	 */
1284 	sc->ae_irq_rid = 0;
1285 	sc->ae_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1286 						&sc->ae_irq_rid,
1287 						RF_SHAREABLE | RF_ACTIVE);
1288 	if (sc->ae_irq_res == NULL) {
1289 		device_printf(dev, "can't allocate irq\n");
1290 		error = ENXIO;
1291 		goto fail;
1292 	}
1293 
1294 	/* Set PHY address. */
1295 	sc->ae_phyaddr = AE_PHYADDR_DEFAULT;
1296 
1297 	/* Create sysctl tree */
1298 	ae_sysctl_node(sc);
1299 
1300 	/* Reset PHY. */
1301 	ae_phy_reset(sc);
1302 
1303 	/*
1304 	 * Reset the ethernet controller.
1305 	 */
1306 	ae_reset(sc);
1307 	ae_pcie_init(sc);
1308 
1309 	/*
1310 	 * Get PCI and chip id/revision.
1311 	 */
1312 	sc->ae_rev = pci_get_revid(dev);
1313 	sc->ae_chip_rev =
1314 	(AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
1315 	AE_MASTER_REVNUM_MASK;
1316 	if (bootverbose) {
1317 		device_printf(dev, "PCI device revision : 0x%04x\n", sc->ae_rev);
1318 		device_printf(dev, "Chip id/revision : 0x%04x\n",
1319 		    sc->ae_chip_rev);
1320 	}
1321 
1322 	/*
1323 	 * XXX
1324 	 * Unintialized hardware returns an invalid chip id/revision
1325 	 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
1326 	 * unplugged cable results in putting hardware into automatic
1327 	 * power down mode which in turn returns invalld chip revision.
1328 	 */
1329 	if (sc->ae_chip_rev == 0xFFFF) {
1330 		device_printf(dev,"invalid chip revision : 0x%04x -- "
1331 		    "not initialized?\n", sc->ae_chip_rev);
1332 		error = ENXIO;
1333 		goto fail;
1334 	}
1335 #if 0
1336 	/* Get DMA parameters from PCIe device control register. */
1337 	pcie_ptr = pci_get_pciecap_ptr(dev);
1338 	if (pcie_ptr) {
1339 		uint16_t devctl;
1340 		sc->ae_flags |= AE_FLAG_PCIE;
1341 		devctl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
1342 		/* Max read request size. */
1343 		sc->ae_dma_rd_burst = ((devctl >> 12) & 0x07) <<
1344 		    DMA_CFG_RD_BURST_SHIFT;
1345 		/* Max payload size. */
1346 		sc->ae_dma_wr_burst = ((devctl >> 5) & 0x07) <<
1347 		    DMA_CFG_WR_BURST_SHIFT;
1348 		if (bootverbose) {
1349 			device_printf(dev, "Read request size : %d bytes.\n",
1350 			    128 << ((devctl >> 12) & 0x07));
1351 			device_printf(dev, "TLP payload size : %d bytes.\n",
1352 			    128 << ((devctl >> 5) & 0x07));
1353 		}
1354 	} else {
1355 		sc->ae_dma_rd_burst = DMA_CFG_RD_BURST_128;
1356 		sc->ae_dma_wr_burst = DMA_CFG_WR_BURST_128;
1357 	}
1358 #endif
1359 
1360 	/* Create DMA stuffs */
1361 	error = ae_dma_alloc(sc);
1362 	if (error)
1363 		goto fail;
1364 
1365 	/* Load station address. */
1366 	ae_get_eaddr(sc);
1367 
1368 	ifp->if_softc = sc;
1369 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1370 	ifp->if_ioctl = ae_ioctl;
1371 	ifp->if_start = ae_start;
1372 	ifp->if_init = ae_init;
1373 	ifp->if_watchdog = ae_watchdog;
1374 	ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN - 1);
1375 	ifq_set_ready(&ifp->if_snd);
1376 	ifp->if_capabilities = IFCAP_VLAN_MTU |
1377 			       IFCAP_VLAN_HWTAGGING;
1378 	ifp->if_hwassist = 0;
1379 	ifp->if_capenable = ifp->if_capabilities;
1380 
1381 	/* Set up MII bus. */
1382 	error = mii_phy_probe(dev, &sc->ae_miibus,
1383 			      ae_mediachange, ae_mediastatus);
1384 	if (error) {
1385 		device_printf(dev, "no PHY found!\n");
1386 		goto fail;
1387 	}
1388 	ether_ifattach(ifp, sc->ae_eaddr, NULL);
1389 
1390 	/* Tell the upper layer(s) we support long frames. */
1391 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1392 
1393 	ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->ae_irq_res));
1394 
1395 	error = bus_setup_intr(dev, sc->ae_irq_res, INTR_MPSAFE, ae_intr, sc,
1396 			       &sc->ae_irq_handle, ifp->if_serializer);
1397 	if (error) {
1398 		device_printf(dev, "could not set up interrupt handler.\n");
1399 		ether_ifdetach(ifp);
1400 		goto fail;
1401 	}
1402 	return 0;
1403 fail:
1404 	ae_detach(dev);
1405 	return (error);
1406 }
1407 
1408 static int
1409 ae_detach(device_t dev)
1410 {
1411 	struct ae_softc *sc = device_get_softc(dev);
1412 
1413 	if (device_is_attached(dev)) {
1414 		struct ifnet *ifp = &sc->arpcom.ac_if;
1415 
1416 		lwkt_serialize_enter(ifp->if_serializer);
1417 		sc->ae_flags |= AE_FLAG_DETACH;
1418 		ae_stop(sc);
1419 		bus_teardown_intr(dev, sc->ae_irq_res, sc->ae_irq_handle);
1420 		lwkt_serialize_exit(ifp->if_serializer);
1421 
1422 		ether_ifdetach(ifp);
1423 	}
1424 
1425 	if (sc->ae_miibus != NULL)
1426 		device_delete_child(dev, sc->ae_miibus);
1427 	bus_generic_detach(dev);
1428 
1429 	if (sc->ae_irq_res != NULL) {
1430 		bus_release_resource(dev, SYS_RES_IRQ, sc->ae_irq_rid,
1431 				     sc->ae_irq_res);
1432 	}
1433 	if (sc->ae_mem_res != NULL) {
1434 		bus_release_resource(dev, SYS_RES_MEMORY, sc->ae_mem_rid,
1435 				     sc->ae_mem_res);
1436 	}
1437 	ae_dma_free(sc);
1438 
1439 	return (0);
1440 }
1441 
1442 static void
1443 ae_dma_free(struct ae_softc *sc)
1444 {
1445 	if (sc->dma_txd_tag != NULL) {
1446 		bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
1447 		bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
1448 		    sc->dma_txd_map);
1449 		bus_dma_tag_destroy(sc->dma_txd_tag);
1450 	}
1451 	if (sc->dma_txs_tag != NULL) {
1452 		bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
1453 		bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
1454 		    sc->dma_txs_map);
1455 		bus_dma_tag_destroy(sc->dma_txs_tag);
1456 	}
1457 	if (sc->dma_rxd_tag != NULL) {
1458 		bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
1459 		bus_dmamem_free(sc->dma_rxd_tag,
1460 		    sc->rxd_base_dma, sc->dma_rxd_map);
1461 		bus_dma_tag_destroy(sc->dma_rxd_tag);
1462 	}
1463 	if (sc->dma_parent_tag != NULL)
1464 		bus_dma_tag_destroy(sc->dma_parent_tag);
1465 }
1466 
1467 static void
1468 ae_pcie_init(struct ae_softc *sc)
1469 {
1470 	AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG,
1471 		   AE_PCIE_LTSSM_TESTMODE_DEFAULT);
1472 	AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG,
1473 		   AE_PCIE_DLL_TX_CTRL_DEFAULT);
1474 }
1475 
1476 static void
1477 ae_phy_reset(struct ae_softc *sc)
1478 {
1479 	AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
1480 	DELAY(1000);    /* XXX: pause(9) ? */
1481 }
1482 
1483 static int
1484 ae_reset(struct ae_softc *sc)
1485 {
1486 	int i;
1487 
1488 	/*
1489 	 * Issue a soft reset.
1490 	 */
1491 	AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
1492 	bus_space_barrier(sc->ae_mem_bt, sc->ae_mem_bh, AE_MASTER_REG, 4,
1493 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1494 
1495 	/*
1496 	 * Wait for reset to complete.
1497 	 */
1498 	for (i = 0; i < AE_RESET_TIMEOUT; i++) {
1499 		if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
1500 			break;
1501 		DELAY(10);
1502 	}
1503 	if (i == AE_RESET_TIMEOUT) {
1504 		device_printf(sc->ae_dev, "reset timeout.\n");
1505 		return (ENXIO);
1506 	}
1507 
1508 	/*
1509 	 * Wait for everything to enter idle state.
1510 	 */
1511 	for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1512 		if (AE_READ_4(sc, AE_IDLE_REG) == 0)
1513 			break;
1514 		DELAY(100);
1515 	}
1516 	if (i == AE_IDLE_TIMEOUT) {
1517 		device_printf(sc->ae_dev, "could not enter idle state.\n");
1518 		return (ENXIO);
1519 	}
1520 	return (0);
1521 }
1522 
1523 static int
1524 ae_check_eeprom_present(struct ae_softc *sc, int *vpdc)
1525 {
1526 	int error;
1527 	uint32_t val;
1528 
1529 	/*
1530 	 * Not sure why, but Linux does this.
1531 	 */
1532 	val = AE_READ_4(sc, AE_SPICTL_REG);
1533 	if ((val & AE_SPICTL_VPD_EN) != 0) {
1534 		val &= ~AE_SPICTL_VPD_EN;
1535 		AE_WRITE_4(sc, AE_SPICTL_REG, val);
1536 	}
1537 	error = pci_find_extcap(sc->ae_dev, PCIY_VPD, vpdc);
1538 	return (error);
1539 }
1540 
1541 static int
1542 ae_vpd_read_word(struct ae_softc *sc, int reg, uint32_t *word)
1543 {
1544 	uint32_t val;
1545 	int i;
1546 
1547 	AE_WRITE_4(sc, AE_VPD_DATA_REG, 0);	/* Clear register value. */
1548 
1549 	/*
1550 	 * VPD registers start at offset 0x100. Read them.
1551 	 */
1552 	val = 0x100 + reg * 4;
1553 	AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
1554 	    AE_VPD_CAP_ADDR_MASK);
1555 	for (i = 0; i < AE_VPD_TIMEOUT; i++) {
1556 		DELAY(2000);
1557 		val = AE_READ_4(sc, AE_VPD_CAP_REG);
1558 		if ((val & AE_VPD_CAP_DONE) != 0)
1559 			break;
1560 	}
1561 	if (i == AE_VPD_TIMEOUT) {
1562 		device_printf(sc->ae_dev, "timeout reading VPD register %d.\n",
1563 		    reg);
1564 		return (ETIMEDOUT);
1565 	}
1566 	*word = AE_READ_4(sc, AE_VPD_DATA_REG);
1567 	return (0);
1568 }
1569 
1570 static int
1571 ae_get_vpd_eaddr(struct ae_softc *sc, uint32_t *eaddr)
1572 {
1573 	uint32_t word, reg, val;
1574 	int error;
1575 	int found;
1576 	int vpdc;
1577 	int i;
1578 
1579 	/*
1580 	 * Check for EEPROM.
1581 	 */
1582 	error = ae_check_eeprom_present(sc, &vpdc);
1583 	if (error != 0)
1584 		return (error);
1585 
1586 	/*
1587 	 * Read the VPD configuration space.
1588 	 * Each register is prefixed with signature,
1589 	 * so we can check if it is valid.
1590 	 */
1591 	for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
1592 		error = ae_vpd_read_word(sc, i, &word);
1593 		if (error != 0)
1594 			break;
1595 
1596 		/*
1597 		 * Check signature.
1598 		 */
1599 		if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
1600 			break;
1601 		reg = word >> AE_VPD_REG_SHIFT;
1602 		i++;	/* Move to the next word. */
1603 		if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
1604 			continue;
1605 
1606 		error = ae_vpd_read_word(sc, i, &val);
1607 		if (error != 0)
1608 			break;
1609 		if (reg == AE_EADDR0_REG)
1610 			eaddr[0] = val;
1611 		else
1612 			eaddr[1] = val;
1613 		found++;
1614 	}
1615 	if (found < 2)
1616 		return (ENOENT);
1617 
1618 	eaddr[1] &= 0xffff;	/* Only last 2 bytes are used. */
1619 	if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1620 		if (bootverbose)
1621 			device_printf(sc->ae_dev,
1622 			    "VPD ethernet address registers are invalid.\n");
1623 		return (EINVAL);
1624 	}
1625 	return (0);
1626 }
1627 
1628 static int
1629 ae_get_reg_eaddr(struct ae_softc *sc, uint32_t *eaddr)
1630 {
1631 	/*
1632 	 * BIOS is supposed to set this.
1633 	 */
1634 	eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
1635 	eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
1636 	eaddr[1] &= 0xffff;	/* Only last 2 bytes are used. */
1637 	if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1638 		if (bootverbose)
1639 			device_printf(sc->ae_dev,
1640 			    "Ethetnet address registers are invalid.\n");
1641 		return (EINVAL);
1642 	}
1643 	return (0);
1644 }
1645 
1646 static void
1647 ae_get_eaddr(struct ae_softc *sc)
1648 {
1649 	uint32_t eaddr[2] = {0, 0};
1650 	int error;
1651 
1652 	/*
1653 	 *Check for EEPROM.
1654 	 */
1655 	error = ae_get_vpd_eaddr(sc, eaddr);
1656 	if (error)
1657 		error = ae_get_reg_eaddr(sc, eaddr);
1658 	if (error) {
1659 		if (bootverbose)
1660 			device_printf(sc->ae_dev,
1661 			    "Generating random ethernet address.\n");
1662 		eaddr[0] = karc4random();
1663 		/*
1664 		 * Set OUI to ASUSTek COMPUTER INC.
1665 		 */
1666 		sc->ae_eaddr[0] = 0x02;	/* U/L bit set. */
1667 		sc->ae_eaddr[1] = 0x1f;
1668 		sc->ae_eaddr[2] = 0xc6;
1669 		sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff;
1670 		sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff;
1671 		sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff;
1672 	} else {
1673 		sc->ae_eaddr[0] = (eaddr[1] >> 8) & 0xff;
1674 		sc->ae_eaddr[1] = (eaddr[1] >> 0) & 0xff;
1675 		sc->ae_eaddr[2] = (eaddr[0] >> 24) & 0xff;
1676 		sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff;
1677 		sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff;
1678 		sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff;
1679 	}
1680 }
1681 
1682 static int
1683 ae_mediachange(struct ifnet *ifp)
1684 {
1685 	struct ae_softc *sc = ifp->if_softc;
1686 	struct mii_data *mii = device_get_softc(sc->ae_miibus);
1687 	int error;
1688 
1689 	ASSERT_SERIALIZED(ifp->if_serializer);
1690 	if (mii->mii_instance != 0) {
1691 		struct mii_softc *miisc;
1692 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1693 			mii_phy_reset(miisc);
1694 	}
1695 	error = mii_mediachg(mii);
1696 	return (error);
1697 }
1698 
1699 static void
1700 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1701 {
1702 	struct ae_softc *sc = ifp->if_softc;
1703 	struct mii_data *mii = device_get_softc(sc->ae_miibus);
1704 
1705 	ASSERT_SERIALIZED(ifp->if_serializer);
1706 	mii_pollstat(mii);
1707 	ifmr->ifm_status = mii->mii_media_status;
1708 	ifmr->ifm_active = mii->mii_media_active;
1709 }
1710 
1711 static void
1712 ae_update_stats_tx(uint16_t flags, struct ae_stats *stats)
1713 {
1714 	if ((flags & AE_TXS_BCAST) != 0)
1715 		stats->tx_bcast++;
1716 	if ((flags & AE_TXS_MCAST) != 0)
1717 		stats->tx_mcast++;
1718 	if ((flags & AE_TXS_PAUSE) != 0)
1719 		stats->tx_pause++;
1720 	if ((flags & AE_TXS_CTRL) != 0)
1721 		stats->tx_ctrl++;
1722 	if ((flags & AE_TXS_DEFER) != 0)
1723 		stats->tx_defer++;
1724 	if ((flags & AE_TXS_EXCDEFER) != 0)
1725 		stats->tx_excdefer++;
1726 	if ((flags & AE_TXS_SINGLECOL) != 0)
1727 		stats->tx_singlecol++;
1728 	if ((flags & AE_TXS_MULTICOL) != 0)
1729 		stats->tx_multicol++;
1730 	if ((flags & AE_TXS_LATECOL) != 0)
1731 		stats->tx_latecol++;
1732 	if ((flags & AE_TXS_ABORTCOL) != 0)
1733 		stats->tx_abortcol++;
1734 	if ((flags & AE_TXS_UNDERRUN) != 0)
1735 		stats->tx_underrun++;
1736 }
1737 
1738 static void
1739 ae_update_stats_rx(uint16_t flags, struct ae_stats *stats)
1740 {
1741 	if ((flags & AE_RXD_BCAST) != 0)
1742 		stats->rx_bcast++;
1743 	if ((flags & AE_RXD_MCAST) != 0)
1744 		stats->rx_mcast++;
1745 	if ((flags & AE_RXD_PAUSE) != 0)
1746 		stats->rx_pause++;
1747 	if ((flags & AE_RXD_CTRL) != 0)
1748 		stats->rx_ctrl++;
1749 	if ((flags & AE_RXD_CRCERR) != 0)
1750 		stats->rx_crcerr++;
1751 	if ((flags & AE_RXD_CODEERR) != 0)
1752 		stats->rx_codeerr++;
1753 	if ((flags & AE_RXD_RUNT) != 0)
1754 		stats->rx_runt++;
1755 	if ((flags & AE_RXD_FRAG) != 0)
1756 		stats->rx_frag++;
1757 	if ((flags & AE_RXD_TRUNC) != 0)
1758 		stats->rx_trunc++;
1759 	if ((flags & AE_RXD_ALIGN) != 0)
1760 		stats->rx_align++;
1761 }
1762 
1763 static int
1764 ae_resume(device_t dev)
1765 {
1766 	struct ae_softc *sc = device_get_softc(dev);
1767 	struct ifnet *ifp = &sc->arpcom.ac_if;
1768 
1769 	lwkt_serialize_enter(ifp->if_serializer);
1770 #if 0
1771 	AE_READ_4(sc, AE_WOL_REG);	/* Clear WOL status. */
1772 #endif
1773 	ae_phy_reset(sc);
1774 	if ((ifp->if_flags & IFF_UP) != 0)
1775 		ae_init(sc);
1776 	lwkt_serialize_exit(ifp->if_serializer);
1777 	return (0);
1778 }
1779 
1780 static int
1781 ae_suspend(device_t dev)
1782 {
1783 	struct ae_softc *sc = device_get_softc(dev);
1784 	struct ifnet *ifp = &sc->arpcom.ac_if;
1785 
1786 	lwkt_serialize_enter(ifp->if_serializer);
1787 	ae_stop(sc);
1788 #if 0
1789 	/* we don't use ae_pm_init because we don't want WOL */
1790 	ae_pm_init(sc);
1791 #endif
1792 	lwkt_serialize_exit(ifp->if_serializer);
1793 	return (0);
1794 }
1795 
1796 static int
1797 ae_shutdown(device_t dev)
1798 {
1799 	struct ae_softc *sc = device_get_softc(dev);
1800 	struct ifnet *ifp = &sc->arpcom.ac_if;
1801 
1802 	ae_suspend(dev);
1803 
1804 	lwkt_serialize_enter(ifp->if_serializer);
1805 	ae_powersave_enable(sc);
1806 	lwkt_serialize_exit(ifp->if_serializer);
1807 
1808 	return (0);
1809 }
1810 
1811 static void
1812 ae_powersave_disable(struct ae_softc *sc)
1813 {
1814 	uint32_t val;
1815 
1816 	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1817 	val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1818 	if (val & AE_PHY_DBG_POWERSAVE) {
1819 		val &= ~AE_PHY_DBG_POWERSAVE;
1820 		AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
1821 		DELAY(1000);
1822 	}
1823 }
1824 
1825 static void
1826 ae_powersave_enable(struct ae_softc *sc)
1827 {
1828 	uint32_t val;
1829 
1830 	/*
1831 	 * XXX magic numbers.
1832 	 */
1833 	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1834 	val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1835 	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
1836 	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
1837 	AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
1838 	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
1839 	AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
1840 }
1841