xref: /openbsd/sys/dev/pci/if_ale.c (revision 09467b48)
1 /*	$OpenBSD: if_ale.c,v 1.48 2020/07/10 13:26:37 patrick Exp $	*/
2 /*-
3  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/dev/ale/if_ale.c,v 1.3 2008/12/03 09:01:12 yongari Exp $
29  */
30 
31 /* Driver for Atheros AR8121/AR8113/AR8114 PCIe Ethernet. */
32 
33 #include "bpfilter.h"
34 #include "vlan.h"
35 
36 #include <sys/param.h>
37 #include <sys/endian.h>
38 #include <sys/systm.h>
39 #include <sys/sockio.h>
40 #include <sys/mbuf.h>
41 #include <sys/queue.h>
42 #include <sys/kernel.h>
43 #include <sys/device.h>
44 #include <sys/timeout.h>
45 #include <sys/socket.h>
46 
47 #include <machine/bus.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_llc.h>
52 #include <net/if_media.h>
53 
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/if_ether.h>
57 
58 #if NBPFILTER > 0
59 #include <net/bpf.h>
60 #endif
61 
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
64 
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 #include <dev/pci/pcidevs.h>
68 
69 #include <dev/pci/if_alereg.h>
70 
71 int	ale_match(struct device *, void *, void *);
72 void	ale_attach(struct device *, struct device *, void *);
73 int	ale_detach(struct device *, int);
74 int	ale_activate(struct device *, int);
75 
76 int	ale_miibus_readreg(struct device *, int, int);
77 void	ale_miibus_writereg(struct device *, int, int, int);
78 void	ale_miibus_statchg(struct device *);
79 
80 int	ale_init(struct ifnet *);
81 void	ale_start(struct ifnet *);
82 int	ale_ioctl(struct ifnet *, u_long, caddr_t);
83 void	ale_watchdog(struct ifnet *);
84 int	ale_mediachange(struct ifnet *);
85 void	ale_mediastatus(struct ifnet *, struct ifmediareq *);
86 
87 int	ale_intr(void *);
88 int	ale_rxeof(struct ale_softc *sc);
89 void	ale_rx_update_page(struct ale_softc *, struct ale_rx_page **,
90 	    uint32_t, uint32_t *);
91 void	ale_rxcsum(struct ale_softc *, struct mbuf *, uint32_t);
92 void	ale_txeof(struct ale_softc *);
93 
94 int	ale_dma_alloc(struct ale_softc *);
95 void	ale_dma_free(struct ale_softc *);
96 int	ale_encap(struct ale_softc *, struct mbuf *);
97 void	ale_init_rx_pages(struct ale_softc *);
98 void	ale_init_tx_ring(struct ale_softc *);
99 
100 void	ale_stop(struct ale_softc *);
101 void	ale_tick(void *);
102 void	ale_get_macaddr(struct ale_softc *);
103 void	ale_mac_config(struct ale_softc *);
104 void	ale_phy_reset(struct ale_softc *);
105 void	ale_reset(struct ale_softc *);
106 void	ale_iff(struct ale_softc *);
107 void	ale_rxvlan(struct ale_softc *);
108 void	ale_stats_clear(struct ale_softc *);
109 void	ale_stats_update(struct ale_softc *);
110 void	ale_stop_mac(struct ale_softc *);
111 
112 const struct pci_matchid ale_devices[] = {
113 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1E }
114 };
115 
116 struct cfattach ale_ca = {
117 	sizeof (struct ale_softc), ale_match, ale_attach, NULL,
118 	ale_activate
119 };
120 
121 struct cfdriver ale_cd = {
122 	NULL, "ale", DV_IFNET
123 };
124 
125 int aledebug = 0;
126 #define DPRINTF(x)	do { if (aledebug) printf x; } while (0)
127 
128 #define ALE_CSUM_FEATURES	(M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)
129 
130 int
131 ale_miibus_readreg(struct device *dev, int phy, int reg)
132 {
133 	struct ale_softc *sc = (struct ale_softc *)dev;
134 	uint32_t v;
135 	int i;
136 
137 	if (phy != sc->ale_phyaddr)
138 		return (0);
139 
140 	if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0 &&
141 	    reg == MII_EXTSR)
142 		return (0);
143 
144 	CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
145 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
146 	for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
147 		DELAY(5);
148 		v = CSR_READ_4(sc, ALE_MDIO);
149 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
150 			break;
151 	}
152 
153 	if (i == 0) {
154 		printf("%s: phy read timeout: phy %d, reg %d\n",
155 		    sc->sc_dev.dv_xname, phy, reg);
156 		return (0);
157 	}
158 
159 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
160 }
161 
162 void
163 ale_miibus_writereg(struct device *dev, int phy, int reg, int val)
164 {
165 	struct ale_softc *sc = (struct ale_softc *)dev;
166 	uint32_t v;
167 	int i;
168 
169 	if (phy != sc->ale_phyaddr)
170 		return;
171 
172 	CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
173 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
174 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
175 	for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
176 		DELAY(5);
177 		v = CSR_READ_4(sc, ALE_MDIO);
178 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
179 			break;
180 	}
181 
182 	if (i == 0)
183 		printf("%s: phy write timeout: phy %d, reg %d\n",
184 		    sc->sc_dev.dv_xname, phy, reg);
185 }
186 
187 void
188 ale_miibus_statchg(struct device *dev)
189 {
190 	struct ale_softc *sc = (struct ale_softc *)dev;
191 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
192 	struct mii_data *mii = &sc->sc_miibus;
193 	uint32_t reg;
194 
195 	if ((ifp->if_flags & IFF_RUNNING) == 0)
196 		return;
197 
198 	sc->ale_flags &= ~ALE_FLAG_LINK;
199 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
200 	    (IFM_ACTIVE | IFM_AVALID)) {
201 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
202 		case IFM_10_T:
203 		case IFM_100_TX:
204 			sc->ale_flags |= ALE_FLAG_LINK;
205 			break;
206 
207 		case IFM_1000_T:
208 			if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0)
209 				sc->ale_flags |= ALE_FLAG_LINK;
210 			break;
211 
212 		default:
213 			break;
214 		}
215 	}
216 
217 	/* Stop Rx/Tx MACs. */
218 	ale_stop_mac(sc);
219 
220 	/* Program MACs with resolved speed/duplex/flow-control. */
221 	if ((sc->ale_flags & ALE_FLAG_LINK) != 0) {
222 		ale_mac_config(sc);
223 		/* Reenable Tx/Rx MACs. */
224 		reg = CSR_READ_4(sc, ALE_MAC_CFG);
225 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
226 		CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
227 	}
228 }
229 
230 void
231 ale_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
232 {
233 	struct ale_softc *sc = ifp->if_softc;
234 	struct mii_data *mii = &sc->sc_miibus;
235 
236 	if ((ifp->if_flags & IFF_UP) == 0)
237 		return;
238 
239 	mii_pollstat(mii);
240 	ifmr->ifm_status = mii->mii_media_status;
241 	ifmr->ifm_active = mii->mii_media_active;
242 }
243 
244 int
245 ale_mediachange(struct ifnet *ifp)
246 {
247 	struct ale_softc *sc = ifp->if_softc;
248 	struct mii_data *mii = &sc->sc_miibus;
249 	int error;
250 
251 	if (mii->mii_instance != 0) {
252 		struct mii_softc *miisc;
253 
254 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
255 			mii_phy_reset(miisc);
256 	}
257 	error = mii_mediachg(mii);
258 
259 	return (error);
260 }
261 
262 int
263 ale_match(struct device *dev, void *match, void *aux)
264 {
265 	return pci_matchbyid((struct pci_attach_args *)aux, ale_devices,
266 	    sizeof (ale_devices) / sizeof (ale_devices[0]));
267 }
268 
269 void
270 ale_get_macaddr(struct ale_softc *sc)
271 {
272 	uint32_t ea[2], reg;
273 	int i, vpdc;
274 
275 	reg = CSR_READ_4(sc, ALE_SPI_CTRL);
276 	if ((reg & SPI_VPD_ENB) != 0) {
277 		reg &= ~SPI_VPD_ENB;
278 		CSR_WRITE_4(sc, ALE_SPI_CTRL, reg);
279 	}
280 
281 	if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, PCI_CAP_VPD,
282 	    &vpdc, NULL)) {
283 		/*
284 		 * PCI VPD capability found, let TWSI reload EEPROM.
285 		 * This will set ethernet address of controller.
286 		 */
287 		CSR_WRITE_4(sc, ALE_TWSI_CTRL, CSR_READ_4(sc, ALE_TWSI_CTRL) |
288 		    TWSI_CTRL_SW_LD_START);
289 		for (i = 100; i > 0; i--) {
290 			DELAY(1000);
291 			reg = CSR_READ_4(sc, ALE_TWSI_CTRL);
292 			if ((reg & TWSI_CTRL_SW_LD_START) == 0)
293 				break;
294 		}
295 		if (i == 0)
296 			printf("%s: reloading EEPROM timeout!\n",
297 			    sc->sc_dev.dv_xname);
298 	} else {
299 		if (aledebug)
300 			printf("%s: PCI VPD capability not found!\n",
301 			    sc->sc_dev.dv_xname);
302 	}
303 
304 	ea[0] = CSR_READ_4(sc, ALE_PAR0);
305 	ea[1] = CSR_READ_4(sc, ALE_PAR1);
306 	sc->ale_eaddr[0] = (ea[1] >> 8) & 0xFF;
307 	sc->ale_eaddr[1] = (ea[1] >> 0) & 0xFF;
308 	sc->ale_eaddr[2] = (ea[0] >> 24) & 0xFF;
309 	sc->ale_eaddr[3] = (ea[0] >> 16) & 0xFF;
310 	sc->ale_eaddr[4] = (ea[0] >> 8) & 0xFF;
311 	sc->ale_eaddr[5] = (ea[0] >> 0) & 0xFF;
312 }
313 
314 void
315 ale_phy_reset(struct ale_softc *sc)
316 {
317 	/* Reset magic from Linux. */
318 	CSR_WRITE_2(sc, ALE_GPHY_CTRL,
319 	    GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET |
320 	    GPHY_CTRL_PHY_PLL_ON);
321 	DELAY(1000);
322 	CSR_WRITE_2(sc, ALE_GPHY_CTRL,
323 	    GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE |
324 	    GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON);
325 	DELAY(1000);
326 
327 #define	ATPHY_DBG_ADDR		0x1D
328 #define	ATPHY_DBG_DATA		0x1E
329 
330 	/* Enable hibernation mode. */
331 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
332 	    ATPHY_DBG_ADDR, 0x0B);
333 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
334 	    ATPHY_DBG_DATA, 0xBC00);
335 	/* Set Class A/B for all modes. */
336 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
337 	    ATPHY_DBG_ADDR, 0x00);
338 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
339 	    ATPHY_DBG_DATA, 0x02EF);
340 	/* Enable 10BT power saving. */
341 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
342 	    ATPHY_DBG_ADDR, 0x12);
343 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
344 	    ATPHY_DBG_DATA, 0x4C04);
345 	/* Adjust 1000T power. */
346 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
347 	    ATPHY_DBG_ADDR, 0x04);
348 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
349 	    ATPHY_DBG_ADDR, 0x8BBB);
350 	/* 10BT center tap voltage. */
351 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
352 	    ATPHY_DBG_ADDR, 0x05);
353 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
354 	    ATPHY_DBG_ADDR, 0x2C46);
355 
356 #undef	ATPHY_DBG_ADDR
357 #undef	ATPHY_DBG_DATA
358 	DELAY(1000);
359 }
360 
361 void
362 ale_attach(struct device *parent, struct device *self, void *aux)
363 {
364 	struct ale_softc *sc = (struct ale_softc *)self;
365 	struct pci_attach_args *pa = aux;
366 	pci_chipset_tag_t pc = pa->pa_pc;
367 	pci_intr_handle_t ih;
368 	const char *intrstr;
369 	struct ifnet *ifp;
370 	pcireg_t memtype;
371 	int mii_flags, error = 0;
372 	uint32_t rxf_len, txf_len;
373 	const char *chipname;
374 
375 	/*
376 	 * Allocate IO memory
377 	 */
378 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALE_PCIR_BAR);
379 	if (pci_mapreg_map(pa, ALE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
380 	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
381 		printf(": can't map mem space\n");
382 		return;
383 	}
384 
385 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
386 		printf(": can't map interrupt\n");
387 		goto fail;
388 	}
389 
390 	/*
391 	 * Allocate IRQ
392 	 */
393 	intrstr = pci_intr_string(pc, ih);
394 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, ale_intr, sc,
395 	    sc->sc_dev.dv_xname);
396 	if (sc->sc_irq_handle == NULL) {
397 		printf(": could not establish interrupt");
398 		if (intrstr != NULL)
399 			printf(" at %s", intrstr);
400 		printf("\n");
401 		goto fail;
402 	}
403 
404 	sc->sc_dmat = pa->pa_dmat;
405 	sc->sc_pct = pa->pa_pc;
406 	sc->sc_pcitag = pa->pa_tag;
407 
408 	/* Set PHY address. */
409 	sc->ale_phyaddr = ALE_PHY_ADDR;
410 
411 	/* Reset PHY. */
412 	ale_phy_reset(sc);
413 
414 	/* Reset the ethernet controller. */
415 	ale_reset(sc);
416 
417 	/* Get PCI and chip id/revision. */
418 	sc->ale_rev = PCI_REVISION(pa->pa_class);
419 	if (sc->ale_rev >= 0xF0) {
420 		/* L2E Rev. B. AR8114 */
421 		sc->ale_flags |= ALE_FLAG_FASTETHER;
422 		chipname = "AR8114";
423 	} else {
424 		if ((CSR_READ_4(sc, ALE_PHY_STATUS) & PHY_STATUS_100M) != 0) {
425 			/* L1E AR8121 */
426 			sc->ale_flags |= ALE_FLAG_JUMBO;
427 			chipname = "AR8121";
428 		} else {
429 			/* L2E Rev. A. AR8113 */
430 			sc->ale_flags |= ALE_FLAG_FASTETHER;
431 			chipname = "AR8113";
432 		}
433 	}
434 
435 	printf(": %s, %s", chipname, intrstr);
436 
437 	/*
438 	 * All known controllers seems to require 4 bytes alignment
439 	 * of Tx buffers to make Tx checksum offload with custom
440 	 * checksum generation method work.
441 	 */
442 	sc->ale_flags |= ALE_FLAG_TXCSUM_BUG;
443 
444 	/*
445 	 * All known controllers seems to have issues on Rx checksum
446 	 * offload for fragmented IP datagrams.
447 	 */
448 	sc->ale_flags |= ALE_FLAG_RXCSUM_BUG;
449 
450 	/*
451 	 * Don't use Tx CMB. It is known to cause RRS update failure
452 	 * under certain circumstances. Typical phenomenon of the
453 	 * issue would be unexpected sequence number encountered in
454 	 * Rx handler.
455 	 */
456 	sc->ale_flags |= ALE_FLAG_TXCMB_BUG;
457 	sc->ale_chip_rev = CSR_READ_4(sc, ALE_MASTER_CFG) >>
458 	    MASTER_CHIP_REV_SHIFT;
459 	if (aledebug) {
460 		printf("%s: PCI device revision : 0x%04x\n",
461 		    sc->sc_dev.dv_xname, sc->ale_rev);
462 		printf("%s: Chip id/revision : 0x%04x\n",
463 		    sc->sc_dev.dv_xname, sc->ale_chip_rev);
464 	}
465 
466 	/*
467 	 * Uninitialized hardware returns an invalid chip id/revision
468 	 * as well as 0xFFFFFFFF for Tx/Rx fifo length.
469 	 */
470 	txf_len = CSR_READ_4(sc, ALE_SRAM_TX_FIFO_LEN);
471 	rxf_len = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
472 	if (sc->ale_chip_rev == 0xFFFF || txf_len == 0xFFFFFFFF ||
473 	    rxf_len == 0xFFFFFFF) {
474 		printf("%s: chip revision : 0x%04x, %u Tx FIFO "
475 		    "%u Rx FIFO -- not initialized?\n", sc->sc_dev.dv_xname,
476 		    sc->ale_chip_rev, txf_len, rxf_len);
477 		goto fail;
478 	}
479 
480 	if (aledebug) {
481 		printf("%s: %u Tx FIFO, %u Rx FIFO\n", sc->sc_dev.dv_xname,
482 		    txf_len, rxf_len);
483 	}
484 
485 	/* Set max allowable DMA size. */
486 	sc->ale_dma_rd_burst = DMA_CFG_RD_BURST_128;
487 	sc->ale_dma_wr_burst = DMA_CFG_WR_BURST_128;
488 
489 	error = ale_dma_alloc(sc);
490 	if (error)
491 		goto fail;
492 
493 	/* Load station address. */
494 	ale_get_macaddr(sc);
495 
496 	ifp = &sc->sc_arpcom.ac_if;
497 	ifp->if_softc = sc;
498 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
499 	ifp->if_ioctl = ale_ioctl;
500 	ifp->if_start = ale_start;
501 	ifp->if_watchdog = ale_watchdog;
502 	ifq_set_maxlen(&ifp->if_snd, ALE_TX_RING_CNT - 1);
503 	bcopy(sc->ale_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
504 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
505 
506 	ifp->if_capabilities = IFCAP_VLAN_MTU;
507 
508 #ifdef ALE_CHECKSUM
509 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
510 	    IFCAP_CSUM_UDPv4;
511 #endif
512 
513 #if NVLAN > 0
514 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
515 #endif
516 
517 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
518 
519 	/* Set up MII bus. */
520 	sc->sc_miibus.mii_ifp = ifp;
521 	sc->sc_miibus.mii_readreg = ale_miibus_readreg;
522 	sc->sc_miibus.mii_writereg = ale_miibus_writereg;
523 	sc->sc_miibus.mii_statchg = ale_miibus_statchg;
524 
525 	ifmedia_init(&sc->sc_miibus.mii_media, 0, ale_mediachange,
526 	    ale_mediastatus);
527 	mii_flags = 0;
528 	if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0)
529 		mii_flags |= MIIF_DOPAUSE;
530 	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
531 	    MII_OFFSET_ANY, mii_flags);
532 
533 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
534 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
535 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
536 		    0, NULL);
537 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
538 	} else
539 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
540 
541 	if_attach(ifp);
542 	ether_ifattach(ifp);
543 
544 	timeout_set(&sc->ale_tick_ch, ale_tick, sc);
545 
546 	return;
547 fail:
548 	ale_dma_free(sc);
549 	if (sc->sc_irq_handle != NULL)
550 		pci_intr_disestablish(pc, sc->sc_irq_handle);
551 	if (sc->sc_mem_size)
552 		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
553 }
554 
555 int
556 ale_detach(struct device *self, int flags)
557 {
558 	struct ale_softc *sc = (struct ale_softc *)self;
559 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
560 	int s;
561 
562 	s = splnet();
563 	ale_stop(sc);
564 	splx(s);
565 
566 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
567 
568 	/* Delete all remaining media. */
569 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
570 
571 	ether_ifdetach(ifp);
572 	if_detach(ifp);
573 	ale_dma_free(sc);
574 
575 	if (sc->sc_irq_handle != NULL) {
576 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
577 		sc->sc_irq_handle = NULL;
578 	}
579 
580 	return (0);
581 }
582 
583 int
584 ale_activate(struct device *self, int act)
585 {
586 	struct ale_softc *sc = (struct ale_softc *)self;
587 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
588 	int rv = 0;
589 
590 	switch (act) {
591 	case DVACT_SUSPEND:
592 		if (ifp->if_flags & IFF_RUNNING)
593 			ale_stop(sc);
594 		rv = config_activate_children(self, act);
595 		break;
596 	case DVACT_RESUME:
597 		if (ifp->if_flags & IFF_UP)
598 			ale_init(ifp);
599 		break;
600 	default:
601 		rv = config_activate_children(self, act);
602 		break;
603 	}
604 	return (rv);
605 }
606 
607 int
608 ale_dma_alloc(struct ale_softc *sc)
609 {
610 	struct ale_txdesc *txd;
611 	int nsegs, error, guard_size, i;
612 
613 	if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0)
614 		guard_size = ALE_JUMBO_FRAMELEN;
615 	else
616 		guard_size = ALE_MAX_FRAMELEN;
617 	sc->ale_pagesize = roundup(guard_size + ALE_RX_PAGE_SZ,
618 	    ALE_RX_PAGE_ALIGN);
619 
620 	/*
621 	 * Create DMA stuffs for TX ring
622 	 */
623 	error = bus_dmamap_create(sc->sc_dmat, ALE_TX_RING_SZ, 1,
624 	    ALE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->ale_cdata.ale_tx_ring_map);
625 	if (error)
626 		return (ENOBUFS);
627 
628 	/* Allocate DMA'able memory for TX ring */
629 	error = bus_dmamem_alloc(sc->sc_dmat, ALE_TX_RING_SZ,
630 	    ETHER_ALIGN, 0, &sc->ale_cdata.ale_tx_ring_seg, 1,
631 	    &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
632 	if (error) {
633 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
634 		    sc->sc_dev.dv_xname);
635 		return error;
636 	}
637 
638 	error = bus_dmamem_map(sc->sc_dmat, &sc->ale_cdata.ale_tx_ring_seg,
639 	    nsegs, ALE_TX_RING_SZ, (caddr_t *)&sc->ale_cdata.ale_tx_ring,
640 	    BUS_DMA_NOWAIT);
641 	if (error)
642 		return (ENOBUFS);
643 
644 	/* Load the DMA map for Tx ring. */
645 	error = bus_dmamap_load(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map,
646 	    sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
647 	if (error) {
648 		printf("%s: could not load DMA'able memory for Tx ring.\n",
649 		    sc->sc_dev.dv_xname);
650 		bus_dmamem_free(sc->sc_dmat,
651 		    (bus_dma_segment_t *)&sc->ale_cdata.ale_tx_ring, 1);
652 		return error;
653 	}
654 	sc->ale_cdata.ale_tx_ring_paddr =
655 	    sc->ale_cdata.ale_tx_ring_map->dm_segs[0].ds_addr;
656 
657 	for (i = 0; i < ALE_RX_PAGES; i++) {
658 		/*
659 		 * Create DMA stuffs for RX pages
660 		 */
661 		error = bus_dmamap_create(sc->sc_dmat, sc->ale_pagesize, 1,
662 		    sc->ale_pagesize, 0, BUS_DMA_NOWAIT,
663 		    &sc->ale_cdata.ale_rx_page[i].page_map);
664 		if (error)
665 			return (ENOBUFS);
666 
667 		/* Allocate DMA'able memory for RX pages */
668 		error = bus_dmamem_alloc(sc->sc_dmat, sc->ale_pagesize,
669 		    ETHER_ALIGN, 0, &sc->ale_cdata.ale_rx_page[i].page_seg,
670 		    1, &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
671 		if (error) {
672 			printf("%s: could not allocate DMA'able memory for "
673 			    "Rx ring.\n", sc->sc_dev.dv_xname);
674 			return error;
675 		}
676 		error = bus_dmamem_map(sc->sc_dmat,
677 		    &sc->ale_cdata.ale_rx_page[i].page_seg, nsegs,
678 		    sc->ale_pagesize,
679 		    (caddr_t *)&sc->ale_cdata.ale_rx_page[i].page_addr,
680 		    BUS_DMA_NOWAIT);
681 		if (error)
682 			return (ENOBUFS);
683 
684 		/* Load the DMA map for Rx pages. */
685 		error = bus_dmamap_load(sc->sc_dmat,
686 		    sc->ale_cdata.ale_rx_page[i].page_map,
687 		    sc->ale_cdata.ale_rx_page[i].page_addr,
688 		    sc->ale_pagesize, NULL, BUS_DMA_WAITOK);
689 		if (error) {
690 			printf("%s: could not load DMA'able memory for "
691 			    "Rx pages.\n", sc->sc_dev.dv_xname);
692 			bus_dmamem_free(sc->sc_dmat,
693 			    (bus_dma_segment_t *)sc->ale_cdata.ale_rx_page[i].page_addr, 1);
694 			return error;
695 		}
696 		sc->ale_cdata.ale_rx_page[i].page_paddr =
697 		    sc->ale_cdata.ale_rx_page[i].page_map->dm_segs[0].ds_addr;
698 	}
699 
700 	/*
701 	 * Create DMA stuffs for Tx CMB.
702 	 */
703 	error = bus_dmamap_create(sc->sc_dmat, ALE_TX_CMB_SZ, 1,
704 	    ALE_TX_CMB_SZ, 0, BUS_DMA_NOWAIT, &sc->ale_cdata.ale_tx_cmb_map);
705 	if (error)
706 		return (ENOBUFS);
707 
708 	/* Allocate DMA'able memory for Tx CMB. */
709 	error = bus_dmamem_alloc(sc->sc_dmat, ALE_TX_CMB_SZ, ETHER_ALIGN, 0,
710 	    &sc->ale_cdata.ale_tx_cmb_seg, 1, &nsegs,
711 	    BUS_DMA_WAITOK |BUS_DMA_ZERO);
712 
713 	if (error) {
714 		printf("%s: could not allocate DMA'able memory for Tx CMB.\n",
715 		    sc->sc_dev.dv_xname);
716 		return error;
717 	}
718 
719 	error = bus_dmamem_map(sc->sc_dmat, &sc->ale_cdata.ale_tx_cmb_seg,
720 	    nsegs, ALE_TX_CMB_SZ, (caddr_t *)&sc->ale_cdata.ale_tx_cmb,
721 	    BUS_DMA_NOWAIT);
722 	if (error)
723 		return (ENOBUFS);
724 
725 	/* Load the DMA map for Tx CMB. */
726 	error = bus_dmamap_load(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map,
727 	    sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ, NULL, BUS_DMA_WAITOK);
728 	if (error) {
729 		printf("%s: could not load DMA'able memory for Tx CMB.\n",
730 		    sc->sc_dev.dv_xname);
731 		bus_dmamem_free(sc->sc_dmat,
732 		    (bus_dma_segment_t *)&sc->ale_cdata.ale_tx_cmb, 1);
733 		return error;
734 	}
735 
736 	sc->ale_cdata.ale_tx_cmb_paddr =
737 	    sc->ale_cdata.ale_tx_cmb_map->dm_segs[0].ds_addr;
738 
739 	for (i = 0; i < ALE_RX_PAGES; i++) {
740 		/*
741 		 * Create DMA stuffs for Rx CMB.
742 		 */
743 		error = bus_dmamap_create(sc->sc_dmat, ALE_RX_CMB_SZ, 1,
744 		    ALE_RX_CMB_SZ, 0, BUS_DMA_NOWAIT,
745 		    &sc->ale_cdata.ale_rx_page[i].cmb_map);
746 		if (error)
747 			return (ENOBUFS);
748 
749 		/* Allocate DMA'able memory for Rx CMB */
750 		error = bus_dmamem_alloc(sc->sc_dmat, ALE_RX_CMB_SZ,
751 		    ETHER_ALIGN, 0, &sc->ale_cdata.ale_rx_page[i].cmb_seg, 1,
752 		    &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
753 		if (error) {
754 			printf("%s: could not allocate DMA'able memory for "
755 			    "Rx CMB\n", sc->sc_dev.dv_xname);
756 			return error;
757 		}
758 		error = bus_dmamem_map(sc->sc_dmat,
759 		    &sc->ale_cdata.ale_rx_page[i].cmb_seg, nsegs,
760 		    ALE_RX_CMB_SZ,
761 		    (caddr_t *)&sc->ale_cdata.ale_rx_page[i].cmb_addr,
762 		    BUS_DMA_NOWAIT);
763 		if (error)
764 			return (ENOBUFS);
765 
766 		/* Load the DMA map for Rx CMB */
767 		error = bus_dmamap_load(sc->sc_dmat,
768 		    sc->ale_cdata.ale_rx_page[i].cmb_map,
769 		    sc->ale_cdata.ale_rx_page[i].cmb_addr,
770 		    ALE_RX_CMB_SZ, NULL, BUS_DMA_WAITOK);
771 		if (error) {
772 			printf("%s: could not load DMA'able memory for Rx CMB"
773 			    "\n", sc->sc_dev.dv_xname);
774 			bus_dmamem_free(sc->sc_dmat,
775 			    (bus_dma_segment_t *)&sc->ale_cdata.ale_rx_page[i].cmb_addr, 1);
776 			return error;
777 		}
778 		sc->ale_cdata.ale_rx_page[i].cmb_paddr =
779 		    sc->ale_cdata.ale_rx_page[i].cmb_map->dm_segs[0].ds_addr;
780 	}
781 
782 
783 	/* Create DMA maps for Tx buffers. */
784 	for (i = 0; i < ALE_TX_RING_CNT; i++) {
785 		txd = &sc->ale_cdata.ale_txdesc[i];
786 		txd->tx_m = NULL;
787 		txd->tx_dmamap = NULL;
788 		error = bus_dmamap_create(sc->sc_dmat, ALE_TSO_MAXSIZE,
789 		    ALE_MAXTXSEGS, ALE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
790 		    &txd->tx_dmamap);
791 		if (error) {
792 			printf("%s: could not create Tx dmamap.\n",
793 			    sc->sc_dev.dv_xname);
794 			return error;
795 		}
796 	}
797 
798 	return (0);
799 }
800 
801 void
802 ale_dma_free(struct ale_softc *sc)
803 {
804 	struct ale_txdesc *txd;
805 	int i;
806 
807 	/* Tx buffers. */
808 	for (i = 0; i < ALE_TX_RING_CNT; i++) {
809 		txd = &sc->ale_cdata.ale_txdesc[i];
810 		if (txd->tx_dmamap != NULL) {
811 			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
812 			txd->tx_dmamap = NULL;
813 		}
814 	}
815 
816 	/* Tx descriptor ring. */
817 	if (sc->ale_cdata.ale_tx_ring_map != NULL)
818 		bus_dmamap_unload(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map);
819 	if (sc->ale_cdata.ale_tx_ring_map != NULL &&
820 	    sc->ale_cdata.ale_tx_ring != NULL)
821 		bus_dmamem_free(sc->sc_dmat,
822 		    (bus_dma_segment_t *)sc->ale_cdata.ale_tx_ring, 1);
823 	sc->ale_cdata.ale_tx_ring = NULL;
824 	sc->ale_cdata.ale_tx_ring_map = NULL;
825 
826 	/* Rx page block. */
827 	for (i = 0; i < ALE_RX_PAGES; i++) {
828 		if (sc->ale_cdata.ale_rx_page[i].page_map != NULL)
829 			bus_dmamap_unload(sc->sc_dmat,
830 			    sc->ale_cdata.ale_rx_page[i].page_map);
831 		if (sc->ale_cdata.ale_rx_page[i].page_map != NULL &&
832 		    sc->ale_cdata.ale_rx_page[i].page_addr != NULL)
833 			bus_dmamem_free(sc->sc_dmat,
834 			    (bus_dma_segment_t *)sc->ale_cdata.ale_rx_page[i].page_addr, 1);
835 		sc->ale_cdata.ale_rx_page[i].page_addr = NULL;
836 		sc->ale_cdata.ale_rx_page[i].page_map = NULL;
837 	}
838 
839 	/* Rx CMB. */
840 	for (i = 0; i < ALE_RX_PAGES; i++) {
841 		if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL)
842 			bus_dmamap_unload(sc->sc_dmat,
843 			    sc->ale_cdata.ale_rx_page[i].cmb_map);
844 		if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL &&
845 		    sc->ale_cdata.ale_rx_page[i].cmb_addr != NULL)
846 			bus_dmamem_free(sc->sc_dmat,
847 			    (bus_dma_segment_t *)sc->ale_cdata.ale_rx_page[i].cmb_addr, 1);
848 		sc->ale_cdata.ale_rx_page[i].cmb_addr = NULL;
849 		sc->ale_cdata.ale_rx_page[i].cmb_map = NULL;
850 	}
851 
852 	/* Tx CMB. */
853 	if (sc->ale_cdata.ale_tx_cmb_map != NULL)
854 		bus_dmamap_unload(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map);
855 	if (sc->ale_cdata.ale_tx_cmb_map != NULL &&
856 	    sc->ale_cdata.ale_tx_cmb != NULL)
857 		bus_dmamem_free(sc->sc_dmat,
858 		    (bus_dma_segment_t *)sc->ale_cdata.ale_tx_cmb, 1);
859 	sc->ale_cdata.ale_tx_cmb = NULL;
860 	sc->ale_cdata.ale_tx_cmb_map = NULL;
861 
862 }
863 
864 int
865 ale_encap(struct ale_softc *sc, struct mbuf *m)
866 {
867 	struct ale_txdesc *txd, *txd_last;
868 	struct tx_desc *desc;
869 	bus_dmamap_t map;
870 	uint32_t cflags, poff, vtag;
871 	int error, i, prod;
872 
873 	cflags = vtag = 0;
874 	poff = 0;
875 
876 	prod = sc->ale_cdata.ale_tx_prod;
877 	txd = &sc->ale_cdata.ale_txdesc[prod];
878 	txd_last = txd;
879 	map = txd->tx_dmamap;
880 
881 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
882 	if (error != 0 && error != EFBIG)
883 		goto drop;
884 	if (error != 0) {
885 		if (m_defrag(m, M_DONTWAIT)) {
886 			error = ENOBUFS;
887 			goto drop;
888 		}
889 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
890 		    BUS_DMA_NOWAIT);
891 		if (error != 0)
892 			goto drop;
893 	}
894 
895 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
896 	    BUS_DMASYNC_PREWRITE);
897 
898 	/* Configure Tx checksum offload. */
899 	if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) {
900 		/*
901 		 * AR81xx supports Tx custom checksum offload feature
902 		 * that offloads single 16bit checksum computation.
903 		 * So you can choose one among IP, TCP and UDP.
904 		 * Normally driver sets checksum start/insertion
905 		 * position from the information of TCP/UDP frame as
906 		 * TCP/UDP checksum takes more time than that of IP.
907 		 * However it seems that custom checksum offload
908 		 * requires 4 bytes aligned Tx buffers due to hardware
909 		 * bug.
910 		 * AR81xx also supports explicit Tx checksum computation
911 		 * if it is told that the size of IP header and TCP
912 		 * header(for UDP, the header size does not matter
913 		 * because it's fixed length). However with this scheme
914 		 * TSO does not work so you have to choose one either
915 		 * TSO or explicit Tx checksum offload. I chosen TSO
916 		 * plus custom checksum offload with work-around which
917 		 * will cover most common usage for this consumer
918 		 * ethernet controller. The work-around takes a lot of
919 		 * CPU cycles if Tx buffer is not aligned on 4 bytes
920 		 * boundary, though.
921 		 */
922 		cflags |= ALE_TD_CXSUM;
923 		/* Set checksum start offset. */
924 		cflags |= (poff << ALE_TD_CSUM_PLOADOFFSET_SHIFT);
925 	}
926 
927 #if NVLAN > 0
928 	/* Configure VLAN hardware tag insertion. */
929 	if (m->m_flags & M_VLANTAG) {
930 		vtag = ALE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
931 		vtag = ((vtag << ALE_TD_VLAN_SHIFT) & ALE_TD_VLAN_MASK);
932 		cflags |= ALE_TD_INSERT_VLAN_TAG;
933 	}
934 #endif
935 
936 	desc = NULL;
937 	for (i = 0; i < map->dm_nsegs; i++) {
938 		desc = &sc->ale_cdata.ale_tx_ring[prod];
939 		desc->addr = htole64(map->dm_segs[i].ds_addr);
940 		desc->len =
941 		    htole32(ALE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
942 		desc->flags = htole32(cflags);
943 		sc->ale_cdata.ale_tx_cnt++;
944 		ALE_DESC_INC(prod, ALE_TX_RING_CNT);
945 	}
946 
947 	/* Update producer index. */
948 	sc->ale_cdata.ale_tx_prod = prod;
949 
950 	/* Finally set EOP on the last descriptor. */
951 	prod = (prod + ALE_TX_RING_CNT - 1) % ALE_TX_RING_CNT;
952 	desc = &sc->ale_cdata.ale_tx_ring[prod];
953 	desc->flags |= htole32(ALE_TD_EOP);
954 
955 	/* Swap dmamap of the first and the last. */
956 	txd = &sc->ale_cdata.ale_txdesc[prod];
957 	map = txd_last->tx_dmamap;
958 	txd_last->tx_dmamap = txd->tx_dmamap;
959 	txd->tx_dmamap = map;
960 	txd->tx_m = m;
961 
962 	/* Sync descriptors. */
963 	bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0,
964 	    sc->ale_cdata.ale_tx_ring_map->dm_mapsize,
965 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
966 
967 	return (0);
968 
969  drop:
970 	m_freem(m);
971 	return (error);
972 }
973 
974 void
975 ale_start(struct ifnet *ifp)
976 {
977         struct ale_softc *sc = ifp->if_softc;
978 	struct mbuf *m;
979 	int enq;
980 
981 	/* Reclaim transmitted frames. */
982 	if (sc->ale_cdata.ale_tx_cnt >= ALE_TX_DESC_HIWAT)
983 		ale_txeof(sc);
984 
985 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
986 		return;
987 	if ((sc->ale_flags & ALE_FLAG_LINK) == 0)
988 		return;
989 	if (ifq_empty(&ifp->if_snd))
990 		return;
991 
992 	enq = 0;
993 	for (;;) {
994 		/* Check descriptor overrun. */
995 		if (sc->ale_cdata.ale_tx_cnt + ALE_MAXTXSEGS >=
996 		    ALE_TX_RING_CNT - 2) {
997 			ifq_set_oactive(&ifp->if_snd);
998 			break;
999 		}
1000 
1001 		m = ifq_dequeue(&ifp->if_snd);
1002 		if (m == NULL)
1003 			break;
1004 
1005 		/*
1006 		 * Pack the data into the transmit ring. If we
1007 		 * don't have room, set the OACTIVE flag and wait
1008 		 * for the NIC to drain the ring.
1009 		 */
1010 		if (ale_encap(sc, m) != 0) {
1011 			ifp->if_oerrors++;
1012 			continue;
1013 		}
1014 
1015 		enq = 1;
1016 
1017 #if NBPFILTER > 0
1018 		/*
1019 		 * If there's a BPF listener, bounce a copy of this frame
1020 		 * to him.
1021 		 */
1022 		if (ifp->if_bpf != NULL)
1023 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1024 #endif
1025 	}
1026 
1027 	if (enq) {
1028 		/* Kick. */
1029 		CSR_WRITE_4(sc, ALE_MBOX_TPD_PROD_IDX,
1030 		    sc->ale_cdata.ale_tx_prod);
1031 
1032 		/* Set a timeout in case the chip goes out to lunch. */
1033 		ifp->if_timer = ALE_TX_TIMEOUT;
1034 	}
1035 }
1036 
1037 void
1038 ale_watchdog(struct ifnet *ifp)
1039 {
1040 	struct ale_softc *sc = ifp->if_softc;
1041 
1042 	if ((sc->ale_flags & ALE_FLAG_LINK) == 0) {
1043 		printf("%s: watchdog timeout (missed link)\n",
1044 		    sc->sc_dev.dv_xname);
1045 		ifp->if_oerrors++;
1046 		ale_init(ifp);
1047 		return;
1048 	}
1049 
1050 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1051 	ifp->if_oerrors++;
1052 	ale_init(ifp);
1053 	ale_start(ifp);
1054 }
1055 
1056 int
1057 ale_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1058 {
1059 	struct ale_softc *sc = ifp->if_softc;
1060 	struct mii_data *mii = &sc->sc_miibus;
1061 	struct ifreq *ifr = (struct ifreq *)data;
1062 	int s, error = 0;
1063 
1064 	s = splnet();
1065 
1066 	switch (cmd) {
1067 	case SIOCSIFADDR:
1068 		ifp->if_flags |= IFF_UP;
1069 		if (!(ifp->if_flags & IFF_RUNNING))
1070 			ale_init(ifp);
1071 		break;
1072 
1073 	case SIOCSIFFLAGS:
1074 		if (ifp->if_flags & IFF_UP) {
1075 			if (ifp->if_flags & IFF_RUNNING)
1076 				error = ENETRESET;
1077 			else
1078 				ale_init(ifp);
1079 		} else {
1080 			if (ifp->if_flags & IFF_RUNNING)
1081 				ale_stop(sc);
1082 		}
1083 		break;
1084 
1085 	case SIOCSIFMEDIA:
1086 	case SIOCGIFMEDIA:
1087 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1088 		break;
1089 
1090 	default:
1091 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1092 		break;
1093 	}
1094 
1095 	if (error == ENETRESET) {
1096 		if (ifp->if_flags & IFF_RUNNING)
1097 			ale_iff(sc);
1098 		error = 0;
1099 	}
1100 
1101 	splx(s);
1102 	return (error);
1103 }
1104 
1105 void
1106 ale_mac_config(struct ale_softc *sc)
1107 {
1108 	struct mii_data *mii;
1109 	uint32_t reg;
1110 
1111 	mii = &sc->sc_miibus;
1112 	reg = CSR_READ_4(sc, ALE_MAC_CFG);
1113 	reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
1114 	    MAC_CFG_SPEED_MASK);
1115 	/* Reprogram MAC with resolved speed/duplex. */
1116 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1117 	case IFM_10_T:
1118 	case IFM_100_TX:
1119 		reg |= MAC_CFG_SPEED_10_100;
1120 		break;
1121 	case IFM_1000_T:
1122 		reg |= MAC_CFG_SPEED_1000;
1123 		break;
1124 	}
1125 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1126 		reg |= MAC_CFG_FULL_DUPLEX;
1127 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1128 			reg |= MAC_CFG_TX_FC;
1129 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1130 			reg |= MAC_CFG_RX_FC;
1131 	}
1132 	CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1133 }
1134 
1135 void
1136 ale_stats_clear(struct ale_softc *sc)
1137 {
1138 	struct smb sb;
1139 	uint32_t *reg;
1140 	int i;
1141 
1142 	for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
1143 		CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
1144 		i += sizeof(uint32_t);
1145 	}
1146 	/* Read Tx statistics. */
1147 	for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
1148 		CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
1149 		i += sizeof(uint32_t);
1150 	}
1151 }
1152 
1153 void
1154 ale_stats_update(struct ale_softc *sc)
1155 {
1156 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1157 	struct ale_hw_stats *stat;
1158 	struct smb sb, *smb;
1159 	uint32_t *reg;
1160 	int i;
1161 
1162 	stat = &sc->ale_stats;
1163 	smb = &sb;
1164 
1165 	/* Read Rx statistics. */
1166 	for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
1167 		*reg = CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
1168 		i += sizeof(uint32_t);
1169 	}
1170 	/* Read Tx statistics. */
1171 	for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
1172 		*reg = CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
1173 		i += sizeof(uint32_t);
1174 	}
1175 
1176 	/* Rx stats. */
1177 	stat->rx_frames += smb->rx_frames;
1178 	stat->rx_bcast_frames += smb->rx_bcast_frames;
1179 	stat->rx_mcast_frames += smb->rx_mcast_frames;
1180 	stat->rx_pause_frames += smb->rx_pause_frames;
1181 	stat->rx_control_frames += smb->rx_control_frames;
1182 	stat->rx_crcerrs += smb->rx_crcerrs;
1183 	stat->rx_lenerrs += smb->rx_lenerrs;
1184 	stat->rx_bytes += smb->rx_bytes;
1185 	stat->rx_runts += smb->rx_runts;
1186 	stat->rx_fragments += smb->rx_fragments;
1187 	stat->rx_pkts_64 += smb->rx_pkts_64;
1188 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1189 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1190 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1191 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1192 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1193 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1194 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1195 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1196 	stat->rx_rrs_errs += smb->rx_rrs_errs;
1197 	stat->rx_alignerrs += smb->rx_alignerrs;
1198 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1199 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1200 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1201 
1202 	/* Tx stats. */
1203 	stat->tx_frames += smb->tx_frames;
1204 	stat->tx_bcast_frames += smb->tx_bcast_frames;
1205 	stat->tx_mcast_frames += smb->tx_mcast_frames;
1206 	stat->tx_pause_frames += smb->tx_pause_frames;
1207 	stat->tx_excess_defer += smb->tx_excess_defer;
1208 	stat->tx_control_frames += smb->tx_control_frames;
1209 	stat->tx_deferred += smb->tx_deferred;
1210 	stat->tx_bytes += smb->tx_bytes;
1211 	stat->tx_pkts_64 += smb->tx_pkts_64;
1212 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1213 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1214 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1215 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
1216 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
1217 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
1218 	stat->tx_single_colls += smb->tx_single_colls;
1219 	stat->tx_multi_colls += smb->tx_multi_colls;
1220 	stat->tx_late_colls += smb->tx_late_colls;
1221 	stat->tx_excess_colls += smb->tx_excess_colls;
1222 	stat->tx_underrun += smb->tx_underrun;
1223 	stat->tx_desc_underrun += smb->tx_desc_underrun;
1224 	stat->tx_lenerrs += smb->tx_lenerrs;
1225 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
1226 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
1227 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
1228 
1229 	ifp->if_collisions += smb->tx_single_colls +
1230 	    smb->tx_multi_colls * 2 + smb->tx_late_colls +
1231 	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
1232 
1233 	ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls +
1234 	    smb->tx_underrun + smb->tx_pkts_truncated;
1235 
1236 	ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
1237 	    smb->rx_runts + smb->rx_pkts_truncated +
1238 	    smb->rx_fifo_oflows + smb->rx_rrs_errs +
1239 	    smb->rx_alignerrs;
1240 }
1241 
1242 int
1243 ale_intr(void *xsc)
1244 {
1245 	struct ale_softc *sc = xsc;
1246 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1247 	uint32_t status;
1248 
1249 	status = CSR_READ_4(sc, ALE_INTR_STATUS);
1250 	if ((status & ALE_INTRS) == 0)
1251 		return (0);
1252 
1253 	/* Acknowledge and disable interrupts. */
1254 	CSR_WRITE_4(sc, ALE_INTR_STATUS, status | INTR_DIS_INT);
1255 
1256 	if (ifp->if_flags & IFF_RUNNING) {
1257 		int error;
1258 
1259 		error = ale_rxeof(sc);
1260 		if (error) {
1261 			sc->ale_stats.reset_brk_seq++;
1262 			ale_init(ifp);
1263 			return (0);
1264 		}
1265 
1266 		if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
1267 			if (status & INTR_DMA_RD_TO_RST)
1268 				printf("%s: DMA read error! -- resetting\n",
1269 				    sc->sc_dev.dv_xname);
1270 			if (status & INTR_DMA_WR_TO_RST)
1271 				printf("%s: DMA write error! -- resetting\n",
1272 				    sc->sc_dev.dv_xname);
1273 			ale_init(ifp);
1274 			return (0);
1275 		}
1276 
1277 		ale_txeof(sc);
1278 		ale_start(ifp);
1279 	}
1280 
1281 	/* Re-enable interrupts. */
1282 	CSR_WRITE_4(sc, ALE_INTR_STATUS, 0x7FFFFFFF);
1283 	return (1);
1284 }
1285 
1286 void
1287 ale_txeof(struct ale_softc *sc)
1288 {
1289 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1290 	struct ale_txdesc *txd;
1291 	uint32_t cons, prod;
1292 	int prog;
1293 
1294 	if (sc->ale_cdata.ale_tx_cnt == 0)
1295 		return;
1296 
1297 	bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0,
1298 	    sc->ale_cdata.ale_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1299 	if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) {
1300 		bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, 0,
1301 		    sc->ale_cdata.ale_tx_cmb_map->dm_mapsize,
1302 		    BUS_DMASYNC_POSTREAD);
1303 		prod = *sc->ale_cdata.ale_tx_cmb & TPD_CNT_MASK;
1304 	} else
1305 		prod = CSR_READ_2(sc, ALE_TPD_CONS_IDX);
1306 	cons = sc->ale_cdata.ale_tx_cons;
1307 	/*
1308 	 * Go through our Tx list and free mbufs for those
1309 	 * frames which have been transmitted.
1310 	 */
1311 	for (prog = 0; cons != prod; prog++,
1312 	     ALE_DESC_INC(cons, ALE_TX_RING_CNT)) {
1313 		if (sc->ale_cdata.ale_tx_cnt <= 0)
1314 			break;
1315 		prog++;
1316 		ifq_clr_oactive(&ifp->if_snd);
1317 		sc->ale_cdata.ale_tx_cnt--;
1318 		txd = &sc->ale_cdata.ale_txdesc[cons];
1319 		if (txd->tx_m != NULL) {
1320 			/* Reclaim transmitted mbufs. */
1321 			bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1322 			    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1323 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1324 			m_freem(txd->tx_m);
1325 			txd->tx_m = NULL;
1326 		}
1327 	}
1328 
1329 	if (prog > 0) {
1330 		sc->ale_cdata.ale_tx_cons = cons;
1331 		/*
1332 		 * Unarm watchdog timer only when there is no pending
1333 		 * Tx descriptors in queue.
1334 		 */
1335 		if (sc->ale_cdata.ale_tx_cnt == 0)
1336 			ifp->if_timer = 0;
1337 	}
1338 }
1339 
1340 void
1341 ale_rx_update_page(struct ale_softc *sc, struct ale_rx_page **page,
1342     uint32_t length, uint32_t *prod)
1343 {
1344 	struct ale_rx_page *rx_page;
1345 
1346 	rx_page = *page;
1347 	/* Update consumer position. */
1348 	rx_page->cons += roundup(length + sizeof(struct rx_rs),
1349 	    ALE_RX_PAGE_ALIGN);
1350 	if (rx_page->cons >= ALE_RX_PAGE_SZ) {
1351 		/*
1352 		 * End of Rx page reached, let hardware reuse
1353 		 * this page.
1354 		 */
1355 		rx_page->cons = 0;
1356 		*rx_page->cmb_addr = 0;
1357 		bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1358 		    rx_page->cmb_map->dm_mapsize,
1359 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1360 		CSR_WRITE_1(sc, ALE_RXF0_PAGE0 + sc->ale_cdata.ale_rx_curp,
1361 		    RXF_VALID);
1362 		/* Switch to alternate Rx page. */
1363 		sc->ale_cdata.ale_rx_curp ^= 1;
1364 		rx_page = *page =
1365 		    &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
1366 		/* Page flipped, sync CMB and Rx page. */
1367 		bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0,
1368 		    rx_page->page_map->dm_mapsize,
1369 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1370 		bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1371 		    rx_page->cmb_map->dm_mapsize,
1372 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1373 		/* Sync completed, cache updated producer index. */
1374 		*prod = *rx_page->cmb_addr;
1375 	}
1376 }
1377 
1378 
1379 /*
1380  * It seems that AR81xx controller can compute partial checksum.
1381  * The partial checksum value can be used to accelerate checksum
1382  * computation for fragmented TCP/UDP packets. Upper network stack
1383  * already takes advantage of the partial checksum value in IP
1384  * reassembly stage. But I'm not sure the correctness of the
1385  * partial hardware checksum assistance due to lack of data sheet.
1386  * In addition, the Rx feature of controller that requires copying
1387  * for every frames effectively nullifies one of most nice offload
1388  * capability of controller.
1389  */
1390 void
1391 ale_rxcsum(struct ale_softc *sc, struct mbuf *m, uint32_t status)
1392 {
1393 	struct ip *ip;
1394 	char *p;
1395 
1396 	if ((status & ALE_RD_IPCSUM_NOK) == 0)
1397 		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1398 
1399 	if ((sc->ale_flags & ALE_FLAG_RXCSUM_BUG) == 0) {
1400 		if (((status & ALE_RD_IPV4_FRAG) == 0) &&
1401 		    ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) &&
1402 		    ((status & ALE_RD_TCP_UDPCSUM_NOK) == 0)) {
1403 			m->m_pkthdr.csum_flags |=
1404 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1405 		}
1406 	} else {
1407 		if ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0 &&
1408 		    (status & ALE_RD_TCP_UDPCSUM_NOK) == 0) {
1409 			p = mtod(m, char *);
1410 			p += ETHER_HDR_LEN;
1411 			if ((status & ALE_RD_802_3) != 0)
1412 				p += LLC_SNAPFRAMELEN;
1413 #if NVLAN > 0
1414 			if (status & ALE_RD_VLAN)
1415 				p += EVL_ENCAPLEN;
1416 #endif
1417 			ip = (struct ip *)p;
1418 			if (ip->ip_off != 0 && (status & ALE_RD_IPV4_DF) == 0)
1419 				return;
1420 			m->m_pkthdr.csum_flags |=
1421 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1422 		}
1423 	}
1424 	/*
1425 	 * Don't mark bad checksum for TCP/UDP frames
1426 	 * as fragmented frames may always have set
1427 	 * bad checksummed bit of frame status.
1428 	 */
1429 }
1430 
1431 /* Process received frames. */
1432 int
1433 ale_rxeof(struct ale_softc *sc)
1434 {
1435 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1436 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1437 	struct ale_rx_page *rx_page;
1438 	struct rx_rs *rs;
1439 	struct mbuf *m;
1440 	uint32_t length, prod, seqno, status;
1441 	int prog;
1442 
1443 	rx_page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
1444 	bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1445 	    rx_page->cmb_map->dm_mapsize,
1446 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1447 	bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0,
1448 	    rx_page->page_map->dm_mapsize,
1449 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1450 	/*
1451 	 * Don't directly access producer index as hardware may
1452 	 * update it while Rx handler is in progress. It would
1453 	 * be even better if there is a way to let hardware
1454 	 * know how far driver processed its received frames.
1455 	 * Alternatively, hardware could provide a way to disable
1456 	 * CMB updates until driver acknowledges the end of CMB
1457 	 * access.
1458 	 */
1459 	prod = *rx_page->cmb_addr;
1460 	for (prog = 0; ; prog++) {
1461 		if (rx_page->cons >= prod)
1462 			break;
1463 		rs = (struct rx_rs *)(rx_page->page_addr + rx_page->cons);
1464 		seqno = ALE_RX_SEQNO(letoh32(rs->seqno));
1465 		if (sc->ale_cdata.ale_rx_seqno != seqno) {
1466 			/*
1467 			 * Normally I believe this should not happen unless
1468 			 * severe driver bug or corrupted memory. However
1469 			 * it seems to happen under certain conditions which
1470 			 * is triggered by abrupt Rx events such as initiation
1471 			 * of bulk transfer of remote host. It's not easy to
1472 			 * reproduce this and I doubt it could be related
1473 			 * with FIFO overflow of hardware or activity of Tx
1474 			 * CMB updates. I also remember similar behaviour
1475 			 * seen on Realtek 8139 which uses resembling Rx
1476 			 * scheme.
1477 			 */
1478 			if (aledebug)
1479 				printf("%s: garbled seq: %u, expected: %u -- "
1480 				    "resetting!\n", sc->sc_dev.dv_xname,
1481 				    seqno, sc->ale_cdata.ale_rx_seqno);
1482 			return (EIO);
1483 		}
1484 		/* Frame received. */
1485 		sc->ale_cdata.ale_rx_seqno++;
1486 		length = ALE_RX_BYTES(letoh32(rs->length));
1487 		status = letoh32(rs->flags);
1488 		if (status & ALE_RD_ERROR) {
1489 			/*
1490 			 * We want to pass the following frames to upper
1491 			 * layer regardless of error status of Rx return
1492 			 * status.
1493 			 *
1494 			 *  o IP/TCP/UDP checksum is bad.
1495 			 *  o frame length and protocol specific length
1496 			 *     does not match.
1497 			 */
1498 			if (status & (ALE_RD_CRC | ALE_RD_CODE |
1499 			    ALE_RD_DRIBBLE | ALE_RD_RUNT | ALE_RD_OFLOW |
1500 			    ALE_RD_TRUNC)) {
1501 				ale_rx_update_page(sc, &rx_page, length, &prod);
1502 				continue;
1503 			}
1504 		}
1505 		/*
1506 		 * m_devget(9) is major bottle-neck of ale(4)(It comes
1507 		 * from hardware limitation). For jumbo frames we could
1508 		 * get a slightly better performance if driver use
1509 		 * m_getjcl(9) with proper buffer size argument. However
1510 		 * that would make code more complicated and I don't
1511 		 * think users would expect good Rx performance numbers
1512 		 * on these low-end consumer ethernet controller.
1513 		 */
1514 		m = m_devget((char *)(rs + 1), length - ETHER_CRC_LEN,
1515 		    ETHER_ALIGN);
1516 		if (m == NULL) {
1517 			ifp->if_iqdrops++;
1518 			ale_rx_update_page(sc, &rx_page, length, &prod);
1519 			continue;
1520 		}
1521 		if (status & ALE_RD_IPV4)
1522 			ale_rxcsum(sc, m, status);
1523 #if NVLAN > 0
1524 		if (status & ALE_RD_VLAN) {
1525 			uint32_t vtags = ALE_RX_VLAN(letoh32(rs->vtags));
1526 			m->m_pkthdr.ether_vtag = ALE_RX_VLAN_TAG(vtags);
1527 			m->m_flags |= M_VLANTAG;
1528 		}
1529 #endif
1530 
1531 		ml_enqueue(&ml, m);
1532 
1533 		ale_rx_update_page(sc, &rx_page, length, &prod);
1534 	}
1535 
1536 	if_input(ifp, &ml);
1537 
1538 	return 0;
1539 }
1540 
1541 void
1542 ale_tick(void *xsc)
1543 {
1544 	struct ale_softc *sc = xsc;
1545 	struct mii_data *mii = &sc->sc_miibus;
1546 	int s;
1547 
1548 	s = splnet();
1549 	mii_tick(mii);
1550 	ale_stats_update(sc);
1551 
1552 	timeout_add_sec(&sc->ale_tick_ch, 1);
1553 	splx(s);
1554 }
1555 
1556 void
1557 ale_reset(struct ale_softc *sc)
1558 {
1559 	uint32_t reg;
1560 	int i;
1561 
1562 	/* Initialize PCIe module. From Linux. */
1563 	CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1564 
1565 	CSR_WRITE_4(sc, ALE_MASTER_CFG, MASTER_RESET);
1566 	for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
1567 		DELAY(10);
1568 		if ((CSR_READ_4(sc, ALE_MASTER_CFG) & MASTER_RESET) == 0)
1569 			break;
1570 	}
1571 	if (i == 0)
1572 		printf("%s: master reset timeout!\n", sc->sc_dev.dv_xname);
1573 
1574 	for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
1575 		if ((reg = CSR_READ_4(sc, ALE_IDLE_STATUS)) == 0)
1576 			break;
1577 		DELAY(10);
1578 	}
1579 
1580 	if (i == 0)
1581 		printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname,
1582 		    reg);
1583 }
1584 
1585 int
1586 ale_init(struct ifnet *ifp)
1587 {
1588 	struct ale_softc *sc = ifp->if_softc;
1589 	struct mii_data *mii;
1590 	uint8_t eaddr[ETHER_ADDR_LEN];
1591 	bus_addr_t paddr;
1592 	uint32_t reg, rxf_hi, rxf_lo;
1593 
1594 	/*
1595 	 * Cancel any pending I/O.
1596 	 */
1597 	ale_stop(sc);
1598 
1599 	/*
1600 	 * Reset the chip to a known state.
1601 	 */
1602 	ale_reset(sc);
1603 
1604 	/* Initialize Tx descriptors, DMA memory blocks. */
1605 	ale_init_rx_pages(sc);
1606 	ale_init_tx_ring(sc);
1607 
1608 	/* Reprogram the station address. */
1609 	bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
1610 	CSR_WRITE_4(sc, ALE_PAR0,
1611 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1612 	CSR_WRITE_4(sc, ALE_PAR1, eaddr[0] << 8 | eaddr[1]);
1613 
1614 	/*
1615 	 * Clear WOL status and disable all WOL feature as WOL
1616 	 * would interfere Rx operation under normal environments.
1617 	 */
1618 	CSR_READ_4(sc, ALE_WOL_CFG);
1619 	CSR_WRITE_4(sc, ALE_WOL_CFG, 0);
1620 
1621 	/*
1622 	 * Set Tx descriptor/RXF0/CMB base addresses. They share
1623 	 * the same high address part of DMAable region.
1624 	 */
1625 	paddr = sc->ale_cdata.ale_tx_ring_paddr;
1626 	CSR_WRITE_4(sc, ALE_TPD_ADDR_HI, ALE_ADDR_HI(paddr));
1627 	CSR_WRITE_4(sc, ALE_TPD_ADDR_LO, ALE_ADDR_LO(paddr));
1628 	CSR_WRITE_4(sc, ALE_TPD_CNT,
1629 	    (ALE_TX_RING_CNT << TPD_CNT_SHIFT) & TPD_CNT_MASK);
1630 
1631 	/* Set Rx page base address, note we use single queue. */
1632 	paddr = sc->ale_cdata.ale_rx_page[0].page_paddr;
1633 	CSR_WRITE_4(sc, ALE_RXF0_PAGE0_ADDR_LO, ALE_ADDR_LO(paddr));
1634 	paddr = sc->ale_cdata.ale_rx_page[1].page_paddr;
1635 	CSR_WRITE_4(sc, ALE_RXF0_PAGE1_ADDR_LO, ALE_ADDR_LO(paddr));
1636 
1637 	/* Set Tx/Rx CMB addresses. */
1638 	paddr = sc->ale_cdata.ale_tx_cmb_paddr;
1639 	CSR_WRITE_4(sc, ALE_TX_CMB_ADDR_LO, ALE_ADDR_LO(paddr));
1640 	paddr = sc->ale_cdata.ale_rx_page[0].cmb_paddr;
1641 	CSR_WRITE_4(sc, ALE_RXF0_CMB0_ADDR_LO, ALE_ADDR_LO(paddr));
1642 	paddr = sc->ale_cdata.ale_rx_page[1].cmb_paddr;
1643 	CSR_WRITE_4(sc, ALE_RXF0_CMB1_ADDR_LO, ALE_ADDR_LO(paddr));
1644 
1645 	/* Mark RXF0 is valid. */
1646 	CSR_WRITE_1(sc, ALE_RXF0_PAGE0, RXF_VALID);
1647 	CSR_WRITE_1(sc, ALE_RXF0_PAGE1, RXF_VALID);
1648 	/*
1649 	 * No need to initialize RFX1/RXF2/RXF3. We don't use
1650 	 * multi-queue yet.
1651 	 */
1652 
1653 	/* Set Rx page size, excluding guard frame size. */
1654 	CSR_WRITE_4(sc, ALE_RXF_PAGE_SIZE, ALE_RX_PAGE_SZ);
1655 
1656 	/* Tell hardware that we're ready to load DMA blocks. */
1657 	CSR_WRITE_4(sc, ALE_DMA_BLOCK, DMA_BLOCK_LOAD);
1658 
1659 	/* Set Rx/Tx interrupt trigger threshold. */
1660 	CSR_WRITE_4(sc, ALE_INT_TRIG_THRESH, (1 << INT_TRIG_RX_THRESH_SHIFT) |
1661 	    (4 << INT_TRIG_TX_THRESH_SHIFT));
1662 	/*
1663 	 * XXX
1664 	 * Set interrupt trigger timer, its purpose and relation
1665 	 * with interrupt moderation mechanism is not clear yet.
1666 	 */
1667 	CSR_WRITE_4(sc, ALE_INT_TRIG_TIMER,
1668 	    ((ALE_USECS(10) << INT_TRIG_RX_TIMER_SHIFT) |
1669 	    (ALE_USECS(1000) << INT_TRIG_TX_TIMER_SHIFT)));
1670 
1671 	/* Configure interrupt moderation timer. */
1672 	sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT;
1673 	sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT;
1674 	reg = ALE_USECS(sc->ale_int_rx_mod) << IM_TIMER_RX_SHIFT;
1675 	reg |= ALE_USECS(sc->ale_int_tx_mod) << IM_TIMER_TX_SHIFT;
1676 	CSR_WRITE_4(sc, ALE_IM_TIMER, reg);
1677 	reg = CSR_READ_4(sc, ALE_MASTER_CFG);
1678 	reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK);
1679 	reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
1680 	if (ALE_USECS(sc->ale_int_rx_mod) != 0)
1681 		reg |= MASTER_IM_RX_TIMER_ENB;
1682 	if (ALE_USECS(sc->ale_int_tx_mod) != 0)
1683 		reg |= MASTER_IM_TX_TIMER_ENB;
1684 	CSR_WRITE_4(sc, ALE_MASTER_CFG, reg);
1685 	CSR_WRITE_2(sc, ALE_INTR_CLR_TIMER, ALE_USECS(1000));
1686 
1687 	/* Set Maximum frame size of controller. */
1688 	if (ifp->if_mtu < ETHERMTU)
1689 		sc->ale_max_frame_size = ETHERMTU;
1690 	else
1691 		sc->ale_max_frame_size = ifp->if_mtu;
1692 	sc->ale_max_frame_size += ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN;
1693 	CSR_WRITE_4(sc, ALE_FRAME_SIZE, sc->ale_max_frame_size);
1694 
1695 	/* Configure IPG/IFG parameters. */
1696 	CSR_WRITE_4(sc, ALE_IPG_IFG_CFG,
1697 	    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
1698 	    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1699 	    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1700 	    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
1701 
1702 	/* Set parameters for half-duplex media. */
1703 	CSR_WRITE_4(sc, ALE_HDPX_CFG,
1704 	    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1705 	    HDPX_CFG_LCOL_MASK) |
1706 	    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1707 	    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1708 	    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1709 	    HDPX_CFG_ABEBT_MASK) |
1710 	    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1711 	    HDPX_CFG_JAMIPG_MASK));
1712 
1713 	/* Configure Tx jumbo frame parameters. */
1714 	if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
1715 		if (ifp->if_mtu < ETHERMTU)
1716 			reg = sc->ale_max_frame_size;
1717 		else if (ifp->if_mtu < 6 * 1024)
1718 			reg = (sc->ale_max_frame_size * 2) / 3;
1719 		else
1720 			reg = sc->ale_max_frame_size / 2;
1721 		CSR_WRITE_4(sc, ALE_TX_JUMBO_THRESH,
1722 		    roundup(reg, TX_JUMBO_THRESH_UNIT) >>
1723 		    TX_JUMBO_THRESH_UNIT_SHIFT);
1724 	}
1725 
1726 	/* Configure TxQ. */
1727 	reg = (128 << (sc->ale_dma_rd_burst >> DMA_CFG_RD_BURST_SHIFT))
1728 	    << TXQ_CFG_TX_FIFO_BURST_SHIFT;
1729 	reg |= (TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1730 	    TXQ_CFG_TPD_BURST_MASK;
1731 	CSR_WRITE_4(sc, ALE_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE | TXQ_CFG_ENB);
1732 
1733 	/* Configure Rx jumbo frame & flow control parameters. */
1734 	if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
1735 		reg = roundup(sc->ale_max_frame_size, RX_JUMBO_THRESH_UNIT);
1736 		CSR_WRITE_4(sc, ALE_RX_JUMBO_THRESH,
1737 		    (((reg >> RX_JUMBO_THRESH_UNIT_SHIFT) <<
1738 		    RX_JUMBO_THRESH_MASK_SHIFT) & RX_JUMBO_THRESH_MASK) |
1739 		    ((RX_JUMBO_LKAH_DEFAULT << RX_JUMBO_LKAH_SHIFT) &
1740 		    RX_JUMBO_LKAH_MASK));
1741 		reg = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
1742 		rxf_hi = (reg * 7) / 10;
1743 		rxf_lo = (reg * 3)/ 10;
1744 		CSR_WRITE_4(sc, ALE_RX_FIFO_PAUSE_THRESH,
1745 		    ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
1746 		    RX_FIFO_PAUSE_THRESH_LO_MASK) |
1747 		    ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
1748 		     RX_FIFO_PAUSE_THRESH_HI_MASK));
1749 	}
1750 
1751 	/* Disable RSS. */
1752 	CSR_WRITE_4(sc, ALE_RSS_IDT_TABLE0, 0);
1753 	CSR_WRITE_4(sc, ALE_RSS_CPU, 0);
1754 
1755 	/* Configure RxQ. */
1756 	CSR_WRITE_4(sc, ALE_RXQ_CFG,
1757 	    RXQ_CFG_ALIGN_32 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1758 
1759 	/* Configure DMA parameters. */
1760 	reg = 0;
1761 	if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0)
1762 		reg |= DMA_CFG_TXCMB_ENB;
1763 	CSR_WRITE_4(sc, ALE_DMA_CFG,
1764 	    DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI | DMA_CFG_RCB_64 |
1765 	    sc->ale_dma_rd_burst | reg |
1766 	    sc->ale_dma_wr_burst | DMA_CFG_RXCMB_ENB |
1767 	    ((DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
1768 	    DMA_CFG_RD_DELAY_CNT_MASK) |
1769 	    ((DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
1770 	    DMA_CFG_WR_DELAY_CNT_MASK));
1771 
1772 	/*
1773 	 * Hardware can be configured to issue SMB interrupt based
1774 	 * on programmed interval. Since there is a callout that is
1775 	 * invoked for every hz in driver we use that instead of
1776 	 * relying on periodic SMB interrupt.
1777 	 */
1778 	CSR_WRITE_4(sc, ALE_SMB_STAT_TIMER, ALE_USECS(0));
1779 
1780 	/* Clear MAC statistics. */
1781 	ale_stats_clear(sc);
1782 
1783 	/*
1784 	 * Configure Tx/Rx MACs.
1785 	 *  - Auto-padding for short frames.
1786 	 *  - Enable CRC generation.
1787 	 *  Actual reconfiguration of MAC for resolved speed/duplex
1788 	 *  is followed after detection of link establishment.
1789 	 *  AR81xx always does checksum computation regardless of
1790 	 *  MAC_CFG_RXCSUM_ENB bit. In fact, setting the bit will
1791 	 *  cause Rx handling issue for fragmented IP datagrams due
1792 	 *  to silicon bug.
1793 	 */
1794 	reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
1795 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1796 	    MAC_CFG_PREAMBLE_MASK);
1797 	if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0)
1798 		reg |= MAC_CFG_SPEED_10_100;
1799 	else
1800 		reg |= MAC_CFG_SPEED_1000;
1801 	CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1802 
1803 	/* Set up the receive filter. */
1804 	ale_iff(sc);
1805 
1806 	ale_rxvlan(sc);
1807 
1808 	/* Acknowledge all pending interrupts and clear it. */
1809 	CSR_WRITE_4(sc, ALE_INTR_MASK, ALE_INTRS);
1810 	CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
1811 	CSR_WRITE_4(sc, ALE_INTR_STATUS, 0);
1812 
1813 	sc->ale_flags &= ~ALE_FLAG_LINK;
1814 
1815 	/* Switch to the current media. */
1816 	mii = &sc->sc_miibus;
1817 	mii_mediachg(mii);
1818 
1819 	timeout_add_sec(&sc->ale_tick_ch, 1);
1820 
1821 	ifp->if_flags |= IFF_RUNNING;
1822 	ifq_clr_oactive(&ifp->if_snd);
1823 
1824 	return 0;
1825 }
1826 
1827 void
1828 ale_stop(struct ale_softc *sc)
1829 {
1830 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1831 	struct ale_txdesc *txd;
1832 	uint32_t reg;
1833 	int i;
1834 
1835 	/*
1836 	 * Mark the interface down and cancel the watchdog timer.
1837 	 */
1838 	ifp->if_flags &= ~IFF_RUNNING;
1839 	ifq_clr_oactive(&ifp->if_snd);
1840 	ifp->if_timer = 0;
1841 
1842 	timeout_del(&sc->ale_tick_ch);
1843 	sc->ale_flags &= ~ALE_FLAG_LINK;
1844 
1845 	ale_stats_update(sc);
1846 
1847 	/* Disable interrupts. */
1848 	CSR_WRITE_4(sc, ALE_INTR_MASK, 0);
1849 	CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
1850 
1851 	/* Disable queue processing and DMA. */
1852 	reg = CSR_READ_4(sc, ALE_TXQ_CFG);
1853 	reg &= ~TXQ_CFG_ENB;
1854 	CSR_WRITE_4(sc, ALE_TXQ_CFG, reg);
1855 	reg = CSR_READ_4(sc, ALE_RXQ_CFG);
1856 	reg &= ~RXQ_CFG_ENB;
1857 	CSR_WRITE_4(sc, ALE_RXQ_CFG, reg);
1858 	reg = CSR_READ_4(sc, ALE_DMA_CFG);
1859 	reg &= ~(DMA_CFG_TXCMB_ENB | DMA_CFG_RXCMB_ENB);
1860 	CSR_WRITE_4(sc, ALE_DMA_CFG, reg);
1861 	DELAY(1000);
1862 
1863 	/* Stop Rx/Tx MACs. */
1864 	ale_stop_mac(sc);
1865 
1866 	/* Disable interrupts again? XXX */
1867 	CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
1868 
1869 	/*
1870 	 * Free TX mbufs still in the queues.
1871 	 */
1872 	for (i = 0; i < ALE_TX_RING_CNT; i++) {
1873 		txd = &sc->ale_cdata.ale_txdesc[i];
1874 		if (txd->tx_m != NULL) {
1875 			bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1876 			    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1877 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1878 			m_freem(txd->tx_m);
1879 			txd->tx_m = NULL;
1880 		}
1881         }
1882 }
1883 
1884 void
1885 ale_stop_mac(struct ale_softc *sc)
1886 {
1887 	uint32_t reg;
1888 	int i;
1889 
1890 	reg = CSR_READ_4(sc, ALE_MAC_CFG);
1891 	if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
1892 		reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1893 		CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1894 	}
1895 
1896 	for (i = ALE_TIMEOUT; i > 0; i--) {
1897 		reg = CSR_READ_4(sc, ALE_IDLE_STATUS);
1898 		if (reg == 0)
1899 			break;
1900 		DELAY(10);
1901 	}
1902 	if (i == 0)
1903 		printf("%s: could not disable Tx/Rx MAC(0x%08x)!\n",
1904 		    sc->sc_dev.dv_xname, reg);
1905 }
1906 
1907 void
1908 ale_init_tx_ring(struct ale_softc *sc)
1909 {
1910 	struct ale_txdesc *txd;
1911 	int i;
1912 
1913 	sc->ale_cdata.ale_tx_prod = 0;
1914 	sc->ale_cdata.ale_tx_cons = 0;
1915 	sc->ale_cdata.ale_tx_cnt = 0;
1916 
1917 	bzero(sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ);
1918 	bzero(sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ);
1919 	for (i = 0; i < ALE_TX_RING_CNT; i++) {
1920 		txd = &sc->ale_cdata.ale_txdesc[i];
1921 		txd->tx_m = NULL;
1922 	}
1923 	*sc->ale_cdata.ale_tx_cmb = 0;
1924 	bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, 0,
1925 	    sc->ale_cdata.ale_tx_cmb_map->dm_mapsize,
1926 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1927 	bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0,
1928 	    sc->ale_cdata.ale_tx_ring_map->dm_mapsize,
1929 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1930 }
1931 
1932 void
1933 ale_init_rx_pages(struct ale_softc *sc)
1934 {
1935 	struct ale_rx_page *rx_page;
1936 	int i;
1937 
1938 	sc->ale_cdata.ale_rx_seqno = 0;
1939 	sc->ale_cdata.ale_rx_curp = 0;
1940 
1941 	for (i = 0; i < ALE_RX_PAGES; i++) {
1942 		rx_page = &sc->ale_cdata.ale_rx_page[i];
1943 		bzero(rx_page->page_addr, sc->ale_pagesize);
1944 		bzero(rx_page->cmb_addr, ALE_RX_CMB_SZ);
1945 		rx_page->cons = 0;
1946 		*rx_page->cmb_addr = 0;
1947 		bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0,
1948 		    rx_page->page_map->dm_mapsize,
1949 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1950 		bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1951 		    rx_page->cmb_map->dm_mapsize,
1952 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1953 	}
1954 }
1955 
1956 void
1957 ale_rxvlan(struct ale_softc *sc)
1958 {
1959 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1960 	uint32_t reg;
1961 
1962 	reg = CSR_READ_4(sc, ALE_MAC_CFG);
1963 	reg &= ~MAC_CFG_VLAN_TAG_STRIP;
1964 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1965 		reg |= MAC_CFG_VLAN_TAG_STRIP;
1966 	CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1967 }
1968 
1969 void
1970 ale_iff(struct ale_softc *sc)
1971 {
1972 	struct arpcom *ac = &sc->sc_arpcom;
1973 	struct ifnet *ifp = &ac->ac_if;
1974 	struct ether_multi *enm;
1975 	struct ether_multistep step;
1976 	uint32_t crc;
1977 	uint32_t mchash[2];
1978 	uint32_t rxcfg;
1979 
1980 	rxcfg = CSR_READ_4(sc, ALE_MAC_CFG);
1981 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
1982 	ifp->if_flags &= ~IFF_ALLMULTI;
1983 
1984 	/*
1985 	 * Always accept broadcast frames.
1986 	 */
1987 	rxcfg |= MAC_CFG_BCAST;
1988 
1989 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1990 		ifp->if_flags |= IFF_ALLMULTI;
1991 		if (ifp->if_flags & IFF_PROMISC)
1992 			rxcfg |= MAC_CFG_PROMISC;
1993 		else
1994 			rxcfg |= MAC_CFG_ALLMULTI;
1995 		mchash[0] = mchash[1] = 0xFFFFFFFF;
1996 	} else {
1997 		/* Program new filter. */
1998 		bzero(mchash, sizeof(mchash));
1999 
2000 		ETHER_FIRST_MULTI(step, ac, enm);
2001 		while (enm != NULL) {
2002 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2003 
2004 			mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2005 
2006 			ETHER_NEXT_MULTI(step, enm);
2007 		}
2008 	}
2009 
2010 	CSR_WRITE_4(sc, ALE_MAR0, mchash[0]);
2011 	CSR_WRITE_4(sc, ALE_MAR1, mchash[1]);
2012 	CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg);
2013 }
2014