1 /*- 2 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * Driver for Attansic Technology Corp. L2 FastEthernet adapter. 26 * 27 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon. 28 * 29 * $FreeBSD: src/sys/dev/ae/if_ae.c,v 1.1.2.3.2.1 2009/04/15 03:14:26 kensmith Exp $ 30 */ 31 32 #include <sys/param.h> 33 #include <sys/endian.h> 34 #include <sys/kernel.h> 35 #include <sys/bus.h> 36 #include <sys/interrupt.h> 37 #include <sys/malloc.h> 38 #include <sys/proc.h> 39 #include <sys/rman.h> 40 #include <sys/serialize.h> 41 #include <sys/socket.h> 42 #include <sys/sockio.h> 43 #include <sys/sysctl.h> 44 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/bpf.h> 48 #include <net/if_arp.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 #include <net/ifq_var.h> 52 #include <net/vlan/if_vlan_var.h> 53 #include <net/vlan/if_vlan_ether.h> 54 55 #include <bus/pci/pcireg.h> 56 #include <bus/pci/pcivar.h> 57 #include "pcidevs.h" 58 59 #include <dev/netif/mii_layer/miivar.h> 60 61 #include <dev/netif/ae/if_aereg.h> 62 #include <dev/netif/ae/if_aevar.h> 63 64 /* "device miibus" required. See GENERIC if you get errors here. */ 65 #include "miibus_if.h" 66 67 /* 68 * Devices supported by this driver. 69 */ 70 static const struct ae_dev { 71 uint16_t ae_vendorid; 72 uint16_t ae_deviceid; 73 const char *ae_name; 74 } ae_devs[] = { 75 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2, 76 "Attansic Technology Corp, L2 Fast Ethernet" }, 77 /* Required last entry */ 78 { 0, 0, NULL } 79 }; 80 81 82 static int ae_probe(device_t); 83 static int ae_attach(device_t); 84 static int ae_detach(device_t); 85 static int ae_shutdown(device_t); 86 static int ae_suspend(device_t); 87 static int ae_resume(device_t); 88 static int ae_miibus_readreg(device_t, int, int); 89 static int ae_miibus_writereg(device_t, int, int, int); 90 static void ae_miibus_statchg(device_t); 91 92 static int ae_mediachange(struct ifnet *); 93 static void ae_mediastatus(struct ifnet *, struct ifmediareq *); 94 static void ae_init(void *); 95 static void ae_start(struct ifnet *, struct ifaltq_subque *); 96 static int ae_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 97 static void ae_watchdog(struct ifnet *); 98 static void ae_stop(struct ae_softc *); 99 static void ae_tick(void *); 100 101 static void ae_intr(void *); 102 static void ae_tx_intr(struct ae_softc *); 103 static void ae_rx_intr(struct ae_softc *); 104 static int ae_rxeof(struct ae_softc *, struct ae_rxd *); 105 106 static int ae_encap(struct ae_softc *, struct mbuf **); 107 static void ae_sysctl_node(struct ae_softc *); 108 static void ae_phy_reset(struct ae_softc *); 109 static int ae_reset(struct ae_softc *); 110 static void ae_pcie_init(struct ae_softc *); 111 static void ae_get_eaddr(struct ae_softc *); 112 static void ae_dma_free(struct ae_softc *); 113 static int ae_dma_alloc(struct ae_softc *); 114 static void ae_mac_config(struct ae_softc *); 115 static void ae_stop_rxmac(struct ae_softc *); 116 static void ae_stop_txmac(struct ae_softc *); 117 static void ae_rxfilter(struct ae_softc *); 118 static void ae_rxvlan(struct ae_softc *); 119 static void ae_update_stats_rx(uint16_t, struct ae_stats *); 120 static void ae_update_stats_tx(uint16_t, struct ae_stats *); 121 static void ae_powersave_disable(struct ae_softc *); 122 static void ae_powersave_enable(struct ae_softc *); 123 124 static device_method_t ae_methods[] = { 125 /* Device interface. */ 126 DEVMETHOD(device_probe, ae_probe), 127 DEVMETHOD(device_attach, ae_attach), 128 DEVMETHOD(device_detach, ae_detach), 129 DEVMETHOD(device_shutdown, ae_shutdown), 130 DEVMETHOD(device_suspend, ae_suspend), 131 DEVMETHOD(device_resume, ae_resume), 132 133 /* Bus interface. */ 134 DEVMETHOD(bus_print_child, bus_generic_print_child), 135 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 136 137 /* MII interface. */ 138 DEVMETHOD(miibus_readreg, ae_miibus_readreg), 139 DEVMETHOD(miibus_writereg, ae_miibus_writereg), 140 DEVMETHOD(miibus_statchg, ae_miibus_statchg), 141 { NULL, NULL } 142 }; 143 144 static driver_t ae_driver = { 145 "ae", 146 ae_methods, 147 sizeof(struct ae_softc) 148 }; 149 150 static devclass_t ae_devclass; 151 DECLARE_DUMMY_MODULE(if_ae); 152 MODULE_DEPEND(if_ae, miibus, 1, 1, 1); 153 DRIVER_MODULE(if_ae, pci, ae_driver, ae_devclass, NULL, NULL); 154 DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, NULL, NULL); 155 156 /* Register access macros. */ 157 #define AE_WRITE_4(_sc, reg, val) \ 158 bus_space_write_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val)) 159 #define AE_WRITE_2(_sc, reg, val) \ 160 bus_space_write_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val)) 161 #define AE_WRITE_1(_sc, reg, val) \ 162 bus_space_write_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val)) 163 #define AE_READ_4(_sc, reg) \ 164 bus_space_read_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg)) 165 #define AE_READ_2(_sc, reg) \ 166 bus_space_read_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg)) 167 #define AE_READ_1(_sc, reg) \ 168 bus_space_read_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg)) 169 170 #define AE_PHY_READ(sc, reg) \ 171 ae_miibus_readreg(sc->ae_dev, 0, reg) 172 #define AE_PHY_WRITE(sc, reg, val) \ 173 ae_miibus_writereg(sc->ae_dev, 0, reg, val) 174 #define AE_CHECK_EADDR_VALID(eaddr) \ 175 ((eaddr[0] == 0 && eaddr[1] == 0) || \ 176 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff)) 177 #define AE_RXD_VLAN(vtag) \ 178 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9)) 179 #define AE_TXD_VLAN(vtag) \ 180 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08)) 181 182 /* 183 * ae statistics. 184 */ 185 #define STATS_ENTRY(node, desc, field) \ 186 { node, desc, offsetof(struct ae_stats, field) } 187 struct { 188 const char *node; 189 const char *desc; 190 intptr_t offset; 191 } ae_stats_tx[] = { 192 STATS_ENTRY("bcast", "broadcast frames", tx_bcast), 193 STATS_ENTRY("mcast", "multicast frames", tx_mcast), 194 STATS_ENTRY("pause", "PAUSE frames", tx_pause), 195 STATS_ENTRY("control", "control frames", tx_ctrl), 196 STATS_ENTRY("defers", "deferrals occuried", tx_defer), 197 STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer), 198 STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol), 199 STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol), 200 STATS_ENTRY("latecols", "late collisions occuried", tx_latecol), 201 STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol), 202 STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun) 203 }, ae_stats_rx[] = { 204 STATS_ENTRY("bcast", "broadcast frames", rx_bcast), 205 STATS_ENTRY("mcast", "multicast frames", rx_mcast), 206 STATS_ENTRY("pause", "PAUSE frames", rx_pause), 207 STATS_ENTRY("control", "control frames", rx_ctrl), 208 STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr), 209 STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr), 210 STATS_ENTRY("runt", "runt frames", rx_runt), 211 STATS_ENTRY("frag", "fragmented frames", rx_frag), 212 STATS_ENTRY("align_errors", "frames with alignment errors", rx_align), 213 STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun", 214 rx_trunc) 215 }; 216 #define AE_STATS_RX_LEN NELEM(ae_stats_rx) 217 #define AE_STATS_TX_LEN NELEM(ae_stats_tx) 218 219 static void 220 ae_stop(struct ae_softc *sc) 221 { 222 struct ifnet *ifp = &sc->arpcom.ac_if; 223 int i; 224 225 ASSERT_SERIALIZED(ifp->if_serializer); 226 227 ifp->if_flags &= ~IFF_RUNNING; 228 ifq_clr_oactive(&ifp->if_snd); 229 ifp->if_timer = 0; 230 231 sc->ae_flags &= ~AE_FLAG_LINK; 232 callout_stop(&sc->ae_tick_ch); 233 234 /* 235 * Clear and disable interrupts. 236 */ 237 AE_WRITE_4(sc, AE_IMR_REG, 0); 238 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 239 240 /* 241 * Stop Rx/Tx MACs. 242 */ 243 ae_stop_txmac(sc); 244 ae_stop_rxmac(sc); 245 246 /* 247 * Stop DMA engines. 248 */ 249 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN); 250 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN); 251 252 /* 253 * Wait for everything to enter idle state. 254 */ 255 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 256 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 257 break; 258 DELAY(100); 259 } 260 if (i == AE_IDLE_TIMEOUT) 261 if_printf(ifp, "could not enter idle state in stop.\n"); 262 } 263 264 static void 265 ae_stop_rxmac(struct ae_softc *sc) 266 { 267 uint32_t val; 268 int i; 269 270 /* 271 * Stop Rx MAC engine. 272 */ 273 val = AE_READ_4(sc, AE_MAC_REG); 274 if ((val & AE_MAC_RX_EN) != 0) { 275 val &= ~AE_MAC_RX_EN; 276 AE_WRITE_4(sc, AE_MAC_REG, val); 277 } 278 279 /* 280 * Stop Rx DMA engine. 281 */ 282 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN) 283 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0); 284 285 /* 286 * Wait for IDLE state. 287 */ 288 for (i = 0; i < AE_IDLE_TIMEOUT; i--) { 289 val = AE_READ_4(sc, AE_IDLE_REG); 290 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0) 291 break; 292 DELAY(100); 293 } 294 if (i == AE_IDLE_TIMEOUT) { 295 if_printf(&sc->arpcom.ac_if, 296 "timed out while stopping Rx MAC.\n"); 297 } 298 } 299 300 static void 301 ae_stop_txmac(struct ae_softc *sc) 302 { 303 uint32_t val; 304 int i; 305 306 /* 307 * Stop Tx MAC engine. 308 */ 309 val = AE_READ_4(sc, AE_MAC_REG); 310 if ((val & AE_MAC_TX_EN) != 0) { 311 val &= ~AE_MAC_TX_EN; 312 AE_WRITE_4(sc, AE_MAC_REG, val); 313 } 314 315 /* 316 * Stop Tx DMA engine. 317 */ 318 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN) 319 AE_WRITE_1(sc, AE_DMAREAD_REG, 0); 320 321 /* 322 * Wait for IDLE state. 323 */ 324 for (i = 0; i < AE_IDLE_TIMEOUT; i--) { 325 val = AE_READ_4(sc, AE_IDLE_REG); 326 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0) 327 break; 328 DELAY(100); 329 } 330 if (i == AE_IDLE_TIMEOUT) { 331 if_printf(&sc->arpcom.ac_if, 332 "timed out while stopping Tx MAC.\n"); 333 } 334 } 335 336 /* 337 * Callback from MII layer when media changes. 338 */ 339 static void 340 ae_miibus_statchg(device_t dev) 341 { 342 struct ae_softc *sc = device_get_softc(dev); 343 struct ifnet *ifp = &sc->arpcom.ac_if; 344 struct mii_data *mii; 345 uint32_t val; 346 347 ASSERT_SERIALIZED(ifp->if_serializer); 348 349 if ((ifp->if_flags & IFF_RUNNING) == 0) 350 return; 351 352 mii = device_get_softc(sc->ae_miibus); 353 sc->ae_flags &= ~AE_FLAG_LINK; 354 if ((mii->mii_media_status & IFM_AVALID) != 0) { 355 switch (IFM_SUBTYPE(mii->mii_media_active)) { 356 case IFM_10_T: 357 case IFM_100_TX: 358 sc->ae_flags |= AE_FLAG_LINK; 359 break; 360 default: 361 break; 362 } 363 } 364 365 /* Stop Rx/Tx MACs. */ 366 ae_stop_rxmac(sc); 367 ae_stop_txmac(sc); 368 369 /* Program MACs with resolved speed/duplex/flow-control. */ 370 if ((sc->ae_flags & AE_FLAG_LINK) != 0) { 371 ae_mac_config(sc); 372 373 /* 374 * Restart DMA engines. 375 */ 376 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 377 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 378 379 /* 380 * Enable Rx and Tx MACs. 381 */ 382 val = AE_READ_4(sc, AE_MAC_REG); 383 val |= AE_MAC_TX_EN | AE_MAC_RX_EN; 384 AE_WRITE_4(sc, AE_MAC_REG, val); 385 } 386 } 387 388 static void 389 ae_sysctl_node(struct ae_softc *sc) 390 { 391 struct sysctl_ctx_list *ctx; 392 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx; 393 struct ae_stats *ae_stats; 394 unsigned int i; 395 396 ae_stats = &sc->stats; 397 398 ctx = device_get_sysctl_ctx(sc->ae_dev); 399 root = device_get_sysctl_tree(sc->ae_dev); 400 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats", 401 CTLFLAG_RD, NULL, "ae statistics"); 402 if (stats == NULL) { 403 device_printf(sc->ae_dev, "can't add stats sysctl node\n"); 404 return; 405 } 406 407 /* 408 * Receiver statistcics. 409 */ 410 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx", 411 CTLFLAG_RD, NULL, "Rx MAC statistics"); 412 if (stats_rx != NULL) { 413 for (i = 0; i < AE_STATS_RX_LEN; i++) { 414 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_rx), 415 OID_AUTO, ae_stats_rx[i].node, CTLFLAG_RD, 416 (char *)ae_stats + ae_stats_rx[i].offset, 0, 417 ae_stats_rx[i].desc); 418 } 419 } 420 421 /* 422 * Transmitter statistcics. 423 */ 424 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx", 425 CTLFLAG_RD, NULL, "Tx MAC statistics"); 426 if (stats_tx != NULL) { 427 for (i = 0; i < AE_STATS_TX_LEN; i++) { 428 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_tx), 429 OID_AUTO, ae_stats_tx[i].node, CTLFLAG_RD, 430 (char *)ae_stats + ae_stats_tx[i].offset, 0, 431 ae_stats_tx[i].desc); 432 } 433 } 434 } 435 436 static int 437 ae_miibus_readreg(device_t dev, int phy, int reg) 438 { 439 struct ae_softc *sc = device_get_softc(dev); 440 uint32_t val; 441 int i; 442 443 /* 444 * Locking is done in upper layers. 445 */ 446 if (phy != sc->ae_phyaddr) 447 return (0); 448 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 449 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE | 450 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK); 451 AE_WRITE_4(sc, AE_MDIO_REG, val); 452 453 /* 454 * Wait for operation to complete. 455 */ 456 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 457 DELAY(2); 458 val = AE_READ_4(sc, AE_MDIO_REG); 459 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 460 break; 461 } 462 if (i == AE_MDIO_TIMEOUT) { 463 device_printf(sc->ae_dev, "phy read timeout: %d.\n", reg); 464 return (0); 465 } 466 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 467 } 468 469 static int 470 ae_miibus_writereg(device_t dev, int phy, int reg, int val) 471 { 472 struct ae_softc *sc = device_get_softc(dev); 473 uint32_t aereg; 474 int i; 475 476 /* 477 * Locking is done in upper layers. 478 */ 479 if (phy != sc->ae_phyaddr) 480 return (0); 481 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 482 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE | 483 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) | 484 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 485 AE_WRITE_4(sc, AE_MDIO_REG, aereg); 486 487 /* 488 * Wait for operation to complete. 489 */ 490 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 491 DELAY(2); 492 aereg = AE_READ_4(sc, AE_MDIO_REG); 493 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 494 break; 495 } 496 if (i == AE_MDIO_TIMEOUT) 497 device_printf(sc->ae_dev, "phy write timeout: %d.\n", reg); 498 return (0); 499 } 500 501 static int 502 ae_probe(device_t dev) 503 { 504 uint16_t vendor, devid; 505 const struct ae_dev *sp; 506 507 vendor = pci_get_vendor(dev); 508 devid = pci_get_device(dev); 509 for (sp = ae_devs; sp->ae_name != NULL; sp++) { 510 if (vendor == sp->ae_vendorid && 511 devid == sp->ae_deviceid) { 512 device_set_desc(dev, sp->ae_name); 513 return (0); 514 } 515 } 516 return (ENXIO); 517 } 518 519 static int 520 ae_dma_alloc(struct ae_softc *sc) 521 { 522 bus_addr_t busaddr; 523 int error; 524 525 /* 526 * Create parent DMA tag. 527 */ 528 error = bus_dma_tag_create(NULL, 1, 0, 529 BUS_SPACE_MAXADDR_32BIT, 530 BUS_SPACE_MAXADDR, 531 NULL, NULL, 532 BUS_SPACE_MAXSIZE_32BIT, 533 0, 534 BUS_SPACE_MAXSIZE_32BIT, 535 0, &sc->dma_parent_tag); 536 if (error) { 537 device_printf(sc->ae_dev, "could not creare parent DMA tag.\n"); 538 return (error); 539 } 540 541 /* 542 * Create DMA stuffs for TxD. 543 */ 544 sc->txd_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4, 545 AE_TXD_BUFSIZE_DEFAULT, BUS_DMA_WAITOK | BUS_DMA_ZERO, 546 &sc->dma_txd_tag, &sc->dma_txd_map, 547 &sc->dma_txd_busaddr); 548 if (sc->txd_base == NULL) { 549 device_printf(sc->ae_dev, "could not creare TxD DMA stuffs.\n"); 550 return ENOMEM; 551 } 552 553 /* 554 * Create DMA stuffs for TxS. 555 */ 556 sc->txs_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4, 557 AE_TXS_COUNT_DEFAULT * 4, BUS_DMA_WAITOK | BUS_DMA_ZERO, 558 &sc->dma_txs_tag, &sc->dma_txs_map, 559 &sc->dma_txs_busaddr); 560 if (sc->txs_base == NULL) { 561 device_printf(sc->ae_dev, "could not creare TxS DMA stuffs.\n"); 562 return ENOMEM; 563 } 564 565 /* 566 * Create DMA stuffs for RxD. 567 */ 568 sc->rxd_base_dma = bus_dmamem_coherent_any(sc->dma_parent_tag, 128, 569 AE_RXD_COUNT_DEFAULT * 1536 + 120, 570 BUS_DMA_WAITOK | BUS_DMA_ZERO, 571 &sc->dma_rxd_tag, &sc->dma_rxd_map, 572 &busaddr); 573 if (sc->rxd_base_dma == NULL) { 574 device_printf(sc->ae_dev, "could not creare RxD DMA stuffs.\n"); 575 return ENOMEM; 576 } 577 sc->dma_rxd_busaddr = busaddr + 120; 578 sc->rxd_base = (struct ae_rxd *)(sc->rxd_base_dma + 120); 579 580 return (0); 581 } 582 583 static void 584 ae_mac_config(struct ae_softc *sc) 585 { 586 struct mii_data *mii; 587 uint32_t val; 588 589 mii = device_get_softc(sc->ae_miibus); 590 val = AE_READ_4(sc, AE_MAC_REG); 591 val &= ~AE_MAC_FULL_DUPLEX; 592 /* XXX disable AE_MAC_TX_FLOW_EN? */ 593 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 594 val |= AE_MAC_FULL_DUPLEX; 595 AE_WRITE_4(sc, AE_MAC_REG, val); 596 } 597 598 static int 599 ae_rxeof(struct ae_softc *sc, struct ae_rxd *rxd) 600 { 601 struct ifnet *ifp = &sc->arpcom.ac_if; 602 struct mbuf *m; 603 unsigned int size; 604 uint16_t flags; 605 606 flags = le16toh(rxd->flags); 607 #ifdef AE_DEBUG 608 if_printf(ifp, "Rx interrupt occuried.\n"); 609 #endif 610 size = le16toh(rxd->len) - ETHER_CRC_LEN; 611 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - 612 sizeof(struct ether_vlan_header))) { 613 if_printf(ifp, "Runt frame received."); 614 return (EIO); 615 } 616 617 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL); 618 if (m == NULL) 619 return (ENOBUFS); 620 621 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) && 622 (flags & AE_RXD_HAS_VLAN)) { 623 m->m_pkthdr.ether_vlantag = AE_RXD_VLAN(le16toh(rxd->vlan)); 624 m->m_flags |= M_VLANTAG; 625 } 626 ifp->if_input(ifp, m, NULL, -1); 627 628 return (0); 629 } 630 631 static void 632 ae_rx_intr(struct ae_softc *sc) 633 { 634 struct ifnet *ifp = &sc->arpcom.ac_if; 635 struct ae_rxd *rxd; 636 uint16_t flags; 637 int error; 638 639 /* 640 * Syncronize DMA buffers. 641 */ 642 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map, 643 BUS_DMASYNC_POSTREAD); 644 for (;;) { 645 rxd = (struct ae_rxd *)(sc->rxd_base + sc->rxd_cur); 646 647 flags = le16toh(rxd->flags); 648 if ((flags & AE_RXD_UPDATE) == 0) 649 break; 650 rxd->flags = htole16(flags & ~AE_RXD_UPDATE); 651 652 /* Update stats. */ 653 ae_update_stats_rx(flags, &sc->stats); 654 655 /* 656 * Update position index. 657 */ 658 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT; 659 if ((flags & AE_RXD_SUCCESS) == 0) { 660 IFNET_STAT_INC(ifp, ierrors, 1); 661 continue; 662 } 663 664 error = ae_rxeof(sc, rxd); 665 if (error) 666 IFNET_STAT_INC(ifp, ierrors, 1); 667 else 668 IFNET_STAT_INC(ifp, ipackets, 1); 669 } 670 671 /* Update Rx index. */ 672 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 673 } 674 675 static void 676 ae_tx_intr(struct ae_softc *sc) 677 { 678 struct ifnet *ifp = &sc->arpcom.ac_if; 679 struct ae_txd *txd; 680 struct ae_txs *txs; 681 uint16_t flags; 682 683 /* 684 * Syncronize DMA buffers. 685 */ 686 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_POSTREAD); 687 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_POSTREAD); 688 689 for (;;) { 690 txs = sc->txs_base + sc->txs_ack; 691 692 flags = le16toh(txs->flags); 693 if ((flags & AE_TXS_UPDATE) == 0) 694 break; 695 txs->flags = htole16(flags & ~AE_TXS_UPDATE); 696 697 /* Update stats. */ 698 ae_update_stats_tx(flags, &sc->stats); 699 700 /* 701 * Update TxS position. 702 */ 703 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT; 704 sc->ae_flags |= AE_FLAG_TXAVAIL; 705 txd = (struct ae_txd *)(sc->txd_base + sc->txd_ack); 706 if (txs->len != txd->len) { 707 device_printf(sc->ae_dev, "Size mismatch: " 708 "TxS:%d TxD:%d\n", 709 le16toh(txs->len), le16toh(txd->len)); 710 } 711 712 /* 713 * Move txd ack and align on 4-byte boundary. 714 */ 715 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) % 716 AE_TXD_BUFSIZE_DEFAULT; 717 if ((flags & AE_TXS_SUCCESS) != 0) 718 IFNET_STAT_INC(ifp, opackets, 1); 719 else 720 IFNET_STAT_INC(ifp, oerrors, 1); 721 sc->tx_inproc--; 722 } 723 724 if (sc->tx_inproc < 0) { 725 /* XXX assert? */ 726 if_printf(ifp, "Received stray Tx interrupt(s).\n"); 727 sc->tx_inproc = 0; 728 } 729 if (sc->tx_inproc == 0) 730 ifp->if_timer = 0; /* Unarm watchdog. */ 731 if (sc->ae_flags & AE_FLAG_TXAVAIL) { 732 ifq_clr_oactive(&ifp->if_snd); 733 if (!ifq_is_empty(&ifp->if_snd)) 734 #ifdef foo 735 ae_intr(sc); 736 #else 737 if_devstart(ifp); 738 #endif 739 } 740 741 /* 742 * Syncronize DMA buffers. 743 */ 744 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE); 745 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE); 746 } 747 748 static void 749 ae_intr(void *xsc) 750 { 751 struct ae_softc *sc = xsc; 752 struct ifnet *ifp = &sc->arpcom.ac_if; 753 uint32_t val; 754 755 ASSERT_SERIALIZED(ifp->if_serializer); 756 757 val = AE_READ_4(sc, AE_ISR_REG); 758 if (val == 0 || (val & AE_IMR_DEFAULT) == 0) 759 return; 760 761 #ifdef foo 762 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE); 763 #endif 764 765 /* Read interrupt status. */ 766 val = AE_READ_4(sc, AE_ISR_REG); 767 768 /* Clear interrupts and disable them. */ 769 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE); 770 771 if (ifp->if_flags & IFF_RUNNING) { 772 if (val & (AE_ISR_DMAR_TIMEOUT | 773 AE_ISR_DMAW_TIMEOUT | 774 AE_ISR_PHY_LINKDOWN)) { 775 ae_init(sc); 776 } 777 if (val & AE_ISR_TX_EVENT) 778 ae_tx_intr(sc); 779 if (val & AE_ISR_RX_EVENT) 780 ae_rx_intr(sc); 781 } 782 783 /* Re-enable interrupts. */ 784 AE_WRITE_4(sc, AE_ISR_REG, 0); 785 } 786 787 static void 788 ae_init(void *xsc) 789 { 790 struct ae_softc *sc = xsc; 791 struct ifnet *ifp = &sc->arpcom.ac_if; 792 struct mii_data *mii; 793 uint8_t eaddr[ETHER_ADDR_LEN]; 794 uint32_t val; 795 bus_addr_t addr; 796 797 ASSERT_SERIALIZED(ifp->if_serializer); 798 799 mii = device_get_softc(sc->ae_miibus); 800 ae_stop(sc); 801 ae_reset(sc); 802 ae_pcie_init(sc); 803 ae_powersave_disable(sc); 804 805 /* 806 * Clear and disable interrupts. 807 */ 808 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 809 810 /* 811 * Set the MAC address. 812 */ 813 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 814 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]; 815 AE_WRITE_4(sc, AE_EADDR0_REG, val); 816 val = eaddr[0] << 8 | eaddr[1]; 817 AE_WRITE_4(sc, AE_EADDR1_REG, val); 818 819 /* 820 * Set ring buffers base addresses. 821 */ 822 addr = sc->dma_rxd_busaddr; 823 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr)); 824 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 825 addr = sc->dma_txd_busaddr; 826 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 827 addr = sc->dma_txs_busaddr; 828 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr)); 829 830 /* 831 * Configure ring buffers sizes. 832 */ 833 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT); 834 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4); 835 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT); 836 837 /* 838 * Configure interframe gap parameters. 839 */ 840 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) & 841 AE_IFG_TXIPG_MASK) | 842 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) & 843 AE_IFG_RXIPG_MASK) | 844 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) & 845 AE_IFG_IPGR1_MASK) | 846 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) & 847 AE_IFG_IPGR2_MASK); 848 AE_WRITE_4(sc, AE_IFG_REG, val); 849 850 /* 851 * Configure half-duplex operation. 852 */ 853 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) & 854 AE_HDPX_LCOL_MASK) | 855 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) & 856 AE_HDPX_RETRY_MASK) | 857 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) & 858 AE_HDPX_ABEBT_MASK) | 859 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) & 860 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN; 861 AE_WRITE_4(sc, AE_HDPX_REG, val); 862 863 /* 864 * Configure interrupt moderate timer. 865 */ 866 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT); 867 val = AE_READ_4(sc, AE_MASTER_REG); 868 val |= AE_MASTER_IMT_EN; 869 AE_WRITE_4(sc, AE_MASTER_REG, val); 870 871 /* 872 * Configure interrupt clearing timer. 873 */ 874 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT); 875 876 /* 877 * Configure MTU. 878 */ 879 val = ifp->if_mtu + ETHER_HDR_LEN + sizeof(struct ether_vlan_header) + 880 ETHER_CRC_LEN; 881 AE_WRITE_2(sc, AE_MTU_REG, val); 882 883 /* 884 * Configure cut-through threshold. 885 */ 886 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT); 887 888 /* 889 * Configure flow control. 890 */ 891 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7); 892 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) > 893 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) : 894 (AE_RXD_COUNT_DEFAULT / 12)); 895 896 /* 897 * Init mailboxes. 898 */ 899 sc->txd_cur = sc->rxd_cur = 0; 900 sc->txs_ack = sc->txd_ack = 0; 901 sc->rxd_cur = 0; 902 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur); 903 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 904 sc->tx_inproc = 0; 905 sc->ae_flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */ 906 907 /* 908 * Enable DMA. 909 */ 910 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 911 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 912 913 /* 914 * Check if everything is OK. 915 */ 916 val = AE_READ_4(sc, AE_ISR_REG); 917 if ((val & AE_ISR_PHY_LINKDOWN) != 0) { 918 device_printf(sc->ae_dev, "Initialization failed.\n"); 919 return; 920 } 921 922 /* 923 * Clear interrupt status. 924 */ 925 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff); 926 AE_WRITE_4(sc, AE_ISR_REG, 0x0); 927 928 /* 929 * Enable interrupts. 930 */ 931 val = AE_READ_4(sc, AE_MASTER_REG); 932 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT); 933 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT); 934 935 /* 936 * Disable WOL. 937 */ 938 AE_WRITE_4(sc, AE_WOL_REG, 0); 939 940 /* 941 * Configure MAC. 942 */ 943 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | 944 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY | 945 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN | 946 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) | 947 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) & 948 AE_MAC_PREAMBLE_MASK); 949 AE_WRITE_4(sc, AE_MAC_REG, val); 950 951 /* 952 * Configure Rx MAC. 953 */ 954 ae_rxfilter(sc); 955 ae_rxvlan(sc); 956 957 /* 958 * Enable Tx/Rx. 959 */ 960 val = AE_READ_4(sc, AE_MAC_REG); 961 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN); 962 963 sc->ae_flags &= ~AE_FLAG_LINK; 964 mii_mediachg(mii); /* Switch to the current media. */ 965 966 callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc); 967 ifp->if_flags |= IFF_RUNNING; 968 ifq_clr_oactive(&ifp->if_snd); 969 } 970 971 static void 972 ae_watchdog(struct ifnet *ifp) 973 { 974 struct ae_softc *sc = ifp->if_softc; 975 976 ASSERT_SERIALIZED(ifp->if_serializer); 977 978 if ((sc->ae_flags & AE_FLAG_LINK) == 0) 979 if_printf(ifp, "watchdog timeout (missed link).\n"); 980 else 981 if_printf(ifp, "watchdog timeout - resetting.\n"); 982 IFNET_STAT_INC(ifp, oerrors, 1); 983 984 ae_init(sc); 985 if (!ifq_is_empty(&ifp->if_snd)) 986 if_devstart(ifp); 987 } 988 989 static void 990 ae_tick(void *xsc) 991 { 992 struct ae_softc *sc = xsc; 993 struct ifnet *ifp = &sc->arpcom.ac_if; 994 struct mii_data *mii = device_get_softc(sc->ae_miibus); 995 996 lwkt_serialize_enter(ifp->if_serializer); 997 mii_tick(mii); 998 callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc); 999 lwkt_serialize_exit(ifp->if_serializer); 1000 } 1001 1002 static void 1003 ae_rxvlan(struct ae_softc *sc) 1004 { 1005 struct ifnet *ifp = &sc->arpcom.ac_if; 1006 uint32_t val; 1007 1008 val = AE_READ_4(sc, AE_MAC_REG); 1009 val &= ~AE_MAC_RMVLAN_EN; 1010 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1011 val |= AE_MAC_RMVLAN_EN; 1012 AE_WRITE_4(sc, AE_MAC_REG, val); 1013 } 1014 1015 static void 1016 ae_rxfilter(struct ae_softc *sc) 1017 { 1018 struct ifnet *ifp = &sc->arpcom.ac_if; 1019 struct ifmultiaddr *ifma; 1020 uint32_t crc; 1021 uint32_t mchash[2]; 1022 uint32_t rxcfg; 1023 1024 rxcfg = AE_READ_4(sc, AE_MAC_REG); 1025 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN); 1026 rxcfg |= AE_MAC_BCAST_EN; 1027 if (ifp->if_flags & IFF_PROMISC) 1028 rxcfg |= AE_MAC_PROMISC_EN; 1029 if (ifp->if_flags & IFF_ALLMULTI) 1030 rxcfg |= AE_MAC_MCAST_EN; 1031 1032 /* 1033 * Wipe old settings. 1034 */ 1035 AE_WRITE_4(sc, AE_REG_MHT0, 0); 1036 AE_WRITE_4(sc, AE_REG_MHT1, 0); 1037 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1038 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff); 1039 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff); 1040 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 1041 return; 1042 } 1043 1044 /* 1045 * Load multicast tables. 1046 */ 1047 bzero(mchash, sizeof(mchash)); 1048 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1049 if (ifma->ifma_addr->sa_family != AF_LINK) 1050 continue; 1051 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1052 ifma->ifma_addr), ETHER_ADDR_LEN); 1053 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 1054 } 1055 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]); 1056 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]); 1057 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 1058 } 1059 1060 static unsigned int 1061 ae_tx_avail_size(struct ae_softc *sc) 1062 { 1063 unsigned int avail; 1064 1065 if (sc->txd_cur >= sc->txd_ack) 1066 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack); 1067 else 1068 avail = sc->txd_ack - sc->txd_cur; 1069 return (avail - 4); /* 4-byte header. */ 1070 } 1071 1072 static int 1073 ae_encap(struct ae_softc *sc, struct mbuf **m_head) 1074 { 1075 struct mbuf *m0; 1076 struct ae_txd *hdr; 1077 unsigned int to_end; 1078 uint16_t len; 1079 1080 M_ASSERTPKTHDR((*m_head)); 1081 m0 = *m_head; 1082 len = m0->m_pkthdr.len; 1083 if ((sc->ae_flags & AE_FLAG_TXAVAIL) == 0 || 1084 ae_tx_avail_size(sc) < len) { 1085 #ifdef AE_DEBUG 1086 if_printf(sc->ifp, "No free Tx available.\n"); 1087 #endif 1088 return ENOBUFS; 1089 } 1090 1091 hdr = (struct ae_txd *)(sc->txd_base + sc->txd_cur); 1092 bzero(hdr, sizeof(*hdr)); 1093 1094 /* Header size. */ 1095 sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT; 1096 1097 /* Space available to the end of the ring */ 1098 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur; 1099 1100 if (to_end >= len) { 1101 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur)); 1102 } else { 1103 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base + 1104 sc->txd_cur)); 1105 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base); 1106 } 1107 1108 /* 1109 * Set TxD flags and parameters. 1110 */ 1111 if ((m0->m_flags & M_VLANTAG) != 0) { 1112 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vlantag)); 1113 hdr->len = htole16(len | AE_TXD_INSERT_VTAG); 1114 } else { 1115 hdr->len = htole16(len); 1116 } 1117 1118 /* 1119 * Set current TxD position and round up to a 4-byte boundary. 1120 */ 1121 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT; 1122 if (sc->txd_cur == sc->txd_ack) 1123 sc->ae_flags &= ~AE_FLAG_TXAVAIL; 1124 #ifdef AE_DEBUG 1125 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur); 1126 #endif 1127 1128 /* 1129 * Update TxS position and check if there are empty TxS available. 1130 */ 1131 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE); 1132 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT; 1133 if (sc->txs_cur == sc->txs_ack) 1134 sc->ae_flags &= ~AE_FLAG_TXAVAIL; 1135 1136 /* 1137 * Synchronize DMA memory. 1138 */ 1139 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE); 1140 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE); 1141 1142 return (0); 1143 } 1144 1145 static void 1146 ae_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1147 { 1148 struct ae_softc *sc = ifp->if_softc; 1149 int error, trans; 1150 1151 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1152 ASSERT_SERIALIZED(ifp->if_serializer); 1153 1154 #ifdef AE_DEBUG 1155 if_printf(ifp, "Start called.\n"); 1156 #endif 1157 if ((sc->ae_flags & AE_FLAG_LINK) == 0) { 1158 ifq_purge(&ifp->if_snd); 1159 return; 1160 } 1161 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1162 return; 1163 1164 trans = 0; 1165 while (!ifq_is_empty(&ifp->if_snd)) { 1166 struct mbuf *m0; 1167 1168 m0 = ifq_dequeue(&ifp->if_snd); 1169 if (m0 == NULL) 1170 break; /* Nothing to do. */ 1171 1172 error = ae_encap(sc, &m0); 1173 if (error != 0) { 1174 if (m0 != NULL) { 1175 ifq_prepend(&ifp->if_snd, m0); 1176 ifq_set_oactive(&ifp->if_snd); 1177 #ifdef AE_DEBUG 1178 if_printf(ifp, "Setting OACTIVE.\n"); 1179 #endif 1180 } 1181 break; 1182 } 1183 trans = 1; 1184 sc->tx_inproc++; 1185 1186 /* Bounce a copy of the frame to BPF. */ 1187 ETHER_BPF_MTAP(ifp, m0); 1188 m_freem(m0); 1189 } 1190 if (trans) { /* Something was dequeued. */ 1191 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4); 1192 ifp->if_timer = AE_TX_TIMEOUT; /* Load watchdog. */ 1193 #ifdef AE_DEBUG 1194 if_printf(ifp, "%d packets dequeued.\n", count); 1195 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur); 1196 #endif 1197 } 1198 } 1199 1200 static int 1201 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1202 { 1203 struct ae_softc *sc = ifp->if_softc; 1204 struct ifreq *ifr; 1205 struct mii_data *mii; 1206 int error = 0, mask; 1207 1208 ASSERT_SERIALIZED(ifp->if_serializer); 1209 1210 ifr = (struct ifreq *)data; 1211 switch (cmd) { 1212 case SIOCSIFFLAGS: 1213 if (ifp->if_flags & IFF_UP) { 1214 if (ifp->if_flags & IFF_RUNNING) { 1215 if (((ifp->if_flags ^ sc->ae_if_flags) 1216 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1217 ae_rxfilter(sc); 1218 } else { 1219 ae_init(sc); 1220 } 1221 } else { 1222 if (ifp->if_flags & IFF_RUNNING) 1223 ae_stop(sc); 1224 } 1225 sc->ae_if_flags = ifp->if_flags; 1226 break; 1227 1228 case SIOCADDMULTI: 1229 case SIOCDELMULTI: 1230 if (ifp->if_flags & IFF_RUNNING) 1231 ae_rxfilter(sc); 1232 break; 1233 1234 case SIOCSIFMEDIA: 1235 case SIOCGIFMEDIA: 1236 mii = device_get_softc(sc->ae_miibus); 1237 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1238 break; 1239 1240 case SIOCSIFCAP: 1241 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1242 if (mask & IFCAP_VLAN_HWTAGGING) { 1243 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1244 ae_rxvlan(sc); 1245 } 1246 break; 1247 1248 default: 1249 error = ether_ioctl(ifp, cmd, data); 1250 break; 1251 } 1252 return (error); 1253 } 1254 1255 static int 1256 ae_attach(device_t dev) 1257 { 1258 struct ae_softc *sc = device_get_softc(dev); 1259 struct ifnet *ifp = &sc->arpcom.ac_if; 1260 int error = 0; 1261 1262 sc->ae_dev = dev; 1263 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1264 callout_init(&sc->ae_tick_ch); 1265 1266 /* Enable bus mastering */ 1267 pci_enable_busmaster(dev); 1268 1269 /* 1270 * Allocate memory mapped IO 1271 */ 1272 sc->ae_mem_rid = PCIR_BAR(0); 1273 sc->ae_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1274 &sc->ae_mem_rid, RF_ACTIVE); 1275 if (sc->ae_mem_res == NULL) { 1276 device_printf(dev, "can't allocate IO memory\n"); 1277 return ENXIO; 1278 } 1279 sc->ae_mem_bt = rman_get_bustag(sc->ae_mem_res); 1280 sc->ae_mem_bh = rman_get_bushandle(sc->ae_mem_res); 1281 1282 /* 1283 * Allocate IRQ 1284 */ 1285 sc->ae_irq_rid = 0; 1286 sc->ae_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1287 &sc->ae_irq_rid, 1288 RF_SHAREABLE | RF_ACTIVE); 1289 if (sc->ae_irq_res == NULL) { 1290 device_printf(dev, "can't allocate irq\n"); 1291 error = ENXIO; 1292 goto fail; 1293 } 1294 1295 /* Set PHY address. */ 1296 sc->ae_phyaddr = AE_PHYADDR_DEFAULT; 1297 1298 /* Create sysctl tree */ 1299 ae_sysctl_node(sc); 1300 1301 /* Reset PHY. */ 1302 ae_phy_reset(sc); 1303 1304 /* 1305 * Reset the ethernet controller. 1306 */ 1307 ae_reset(sc); 1308 ae_pcie_init(sc); 1309 1310 /* 1311 * Get PCI and chip id/revision. 1312 */ 1313 sc->ae_rev = pci_get_revid(dev); 1314 sc->ae_chip_rev = 1315 (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) & 1316 AE_MASTER_REVNUM_MASK; 1317 if (bootverbose) { 1318 device_printf(dev, "PCI device revision : 0x%04x\n", sc->ae_rev); 1319 device_printf(dev, "Chip id/revision : 0x%04x\n", 1320 sc->ae_chip_rev); 1321 } 1322 1323 /* 1324 * XXX 1325 * Unintialized hardware returns an invalid chip id/revision 1326 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that 1327 * unplugged cable results in putting hardware into automatic 1328 * power down mode which in turn returns invalld chip revision. 1329 */ 1330 if (sc->ae_chip_rev == 0xFFFF) { 1331 device_printf(dev,"invalid chip revision : 0x%04x -- " 1332 "not initialized?\n", sc->ae_chip_rev); 1333 error = ENXIO; 1334 goto fail; 1335 } 1336 #if 0 1337 /* Get DMA parameters from PCIe device control register. */ 1338 pcie_ptr = pci_get_pciecap_ptr(dev); 1339 if (pcie_ptr) { 1340 uint16_t devctl; 1341 sc->ae_flags |= AE_FLAG_PCIE; 1342 devctl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 1343 /* Max read request size. */ 1344 sc->ae_dma_rd_burst = ((devctl >> 12) & 0x07) << 1345 DMA_CFG_RD_BURST_SHIFT; 1346 /* Max payload size. */ 1347 sc->ae_dma_wr_burst = ((devctl >> 5) & 0x07) << 1348 DMA_CFG_WR_BURST_SHIFT; 1349 if (bootverbose) { 1350 device_printf(dev, "Read request size : %d bytes.\n", 1351 128 << ((devctl >> 12) & 0x07)); 1352 device_printf(dev, "TLP payload size : %d bytes.\n", 1353 128 << ((devctl >> 5) & 0x07)); 1354 } 1355 } else { 1356 sc->ae_dma_rd_burst = DMA_CFG_RD_BURST_128; 1357 sc->ae_dma_wr_burst = DMA_CFG_WR_BURST_128; 1358 } 1359 #endif 1360 1361 /* Create DMA stuffs */ 1362 error = ae_dma_alloc(sc); 1363 if (error) 1364 goto fail; 1365 1366 /* Load station address. */ 1367 ae_get_eaddr(sc); 1368 1369 ifp->if_softc = sc; 1370 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1371 ifp->if_ioctl = ae_ioctl; 1372 ifp->if_start = ae_start; 1373 ifp->if_init = ae_init; 1374 ifp->if_watchdog = ae_watchdog; 1375 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN - 1); 1376 ifq_set_ready(&ifp->if_snd); 1377 ifp->if_capabilities = IFCAP_VLAN_MTU | 1378 IFCAP_VLAN_HWTAGGING; 1379 ifp->if_hwassist = 0; 1380 ifp->if_capenable = ifp->if_capabilities; 1381 1382 /* Set up MII bus. */ 1383 error = mii_phy_probe(dev, &sc->ae_miibus, 1384 ae_mediachange, ae_mediastatus); 1385 if (error) { 1386 device_printf(dev, "no PHY found!\n"); 1387 goto fail; 1388 } 1389 ether_ifattach(ifp, sc->ae_eaddr, NULL); 1390 1391 /* Tell the upper layer(s) we support long frames. */ 1392 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1393 1394 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->ae_irq_res)); 1395 1396 error = bus_setup_intr(dev, sc->ae_irq_res, INTR_MPSAFE, ae_intr, sc, 1397 &sc->ae_irq_handle, ifp->if_serializer); 1398 if (error) { 1399 device_printf(dev, "could not set up interrupt handler.\n"); 1400 ether_ifdetach(ifp); 1401 goto fail; 1402 } 1403 return 0; 1404 fail: 1405 ae_detach(dev); 1406 return (error); 1407 } 1408 1409 static int 1410 ae_detach(device_t dev) 1411 { 1412 struct ae_softc *sc = device_get_softc(dev); 1413 1414 if (device_is_attached(dev)) { 1415 struct ifnet *ifp = &sc->arpcom.ac_if; 1416 1417 lwkt_serialize_enter(ifp->if_serializer); 1418 sc->ae_flags |= AE_FLAG_DETACH; 1419 ae_stop(sc); 1420 bus_teardown_intr(dev, sc->ae_irq_res, sc->ae_irq_handle); 1421 lwkt_serialize_exit(ifp->if_serializer); 1422 1423 ether_ifdetach(ifp); 1424 } 1425 1426 if (sc->ae_miibus != NULL) 1427 device_delete_child(dev, sc->ae_miibus); 1428 bus_generic_detach(dev); 1429 1430 if (sc->ae_irq_res != NULL) { 1431 bus_release_resource(dev, SYS_RES_IRQ, sc->ae_irq_rid, 1432 sc->ae_irq_res); 1433 } 1434 if (sc->ae_mem_res != NULL) { 1435 bus_release_resource(dev, SYS_RES_MEMORY, sc->ae_mem_rid, 1436 sc->ae_mem_res); 1437 } 1438 ae_dma_free(sc); 1439 1440 return (0); 1441 } 1442 1443 static void 1444 ae_dma_free(struct ae_softc *sc) 1445 { 1446 if (sc->dma_txd_tag != NULL) { 1447 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map); 1448 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base, 1449 sc->dma_txd_map); 1450 bus_dma_tag_destroy(sc->dma_txd_tag); 1451 } 1452 if (sc->dma_txs_tag != NULL) { 1453 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map); 1454 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base, 1455 sc->dma_txs_map); 1456 bus_dma_tag_destroy(sc->dma_txs_tag); 1457 } 1458 if (sc->dma_rxd_tag != NULL) { 1459 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map); 1460 bus_dmamem_free(sc->dma_rxd_tag, 1461 sc->rxd_base_dma, sc->dma_rxd_map); 1462 bus_dma_tag_destroy(sc->dma_rxd_tag); 1463 } 1464 if (sc->dma_parent_tag != NULL) 1465 bus_dma_tag_destroy(sc->dma_parent_tag); 1466 } 1467 1468 static void 1469 ae_pcie_init(struct ae_softc *sc) 1470 { 1471 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, 1472 AE_PCIE_LTSSM_TESTMODE_DEFAULT); 1473 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, 1474 AE_PCIE_DLL_TX_CTRL_DEFAULT); 1475 } 1476 1477 static void 1478 ae_phy_reset(struct ae_softc *sc) 1479 { 1480 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE); 1481 DELAY(1000); /* XXX: pause(9) ? */ 1482 } 1483 1484 static int 1485 ae_reset(struct ae_softc *sc) 1486 { 1487 int i; 1488 1489 /* 1490 * Issue a soft reset. 1491 */ 1492 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET); 1493 bus_space_barrier(sc->ae_mem_bt, sc->ae_mem_bh, AE_MASTER_REG, 4, 1494 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1495 1496 /* 1497 * Wait for reset to complete. 1498 */ 1499 for (i = 0; i < AE_RESET_TIMEOUT; i++) { 1500 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0) 1501 break; 1502 DELAY(10); 1503 } 1504 if (i == AE_RESET_TIMEOUT) { 1505 device_printf(sc->ae_dev, "reset timeout.\n"); 1506 return (ENXIO); 1507 } 1508 1509 /* 1510 * Wait for everything to enter idle state. 1511 */ 1512 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 1513 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 1514 break; 1515 DELAY(100); 1516 } 1517 if (i == AE_IDLE_TIMEOUT) { 1518 device_printf(sc->ae_dev, "could not enter idle state.\n"); 1519 return (ENXIO); 1520 } 1521 return (0); 1522 } 1523 1524 static int 1525 ae_check_eeprom_present(struct ae_softc *sc, int *vpdc) 1526 { 1527 int error; 1528 uint32_t val; 1529 1530 /* 1531 * Not sure why, but Linux does this. 1532 */ 1533 val = AE_READ_4(sc, AE_SPICTL_REG); 1534 if ((val & AE_SPICTL_VPD_EN) != 0) { 1535 val &= ~AE_SPICTL_VPD_EN; 1536 AE_WRITE_4(sc, AE_SPICTL_REG, val); 1537 } 1538 error = pci_find_extcap(sc->ae_dev, PCIY_VPD, vpdc); 1539 return (error); 1540 } 1541 1542 static int 1543 ae_vpd_read_word(struct ae_softc *sc, int reg, uint32_t *word) 1544 { 1545 uint32_t val; 1546 int i; 1547 1548 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */ 1549 1550 /* 1551 * VPD registers start at offset 0x100. Read them. 1552 */ 1553 val = 0x100 + reg * 4; 1554 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) & 1555 AE_VPD_CAP_ADDR_MASK); 1556 for (i = 0; i < AE_VPD_TIMEOUT; i++) { 1557 DELAY(2000); 1558 val = AE_READ_4(sc, AE_VPD_CAP_REG); 1559 if ((val & AE_VPD_CAP_DONE) != 0) 1560 break; 1561 } 1562 if (i == AE_VPD_TIMEOUT) { 1563 device_printf(sc->ae_dev, "timeout reading VPD register %d.\n", 1564 reg); 1565 return (ETIMEDOUT); 1566 } 1567 *word = AE_READ_4(sc, AE_VPD_DATA_REG); 1568 return (0); 1569 } 1570 1571 static int 1572 ae_get_vpd_eaddr(struct ae_softc *sc, uint32_t *eaddr) 1573 { 1574 uint32_t word, reg, val; 1575 int error; 1576 int found; 1577 int vpdc; 1578 int i; 1579 1580 /* 1581 * Check for EEPROM. 1582 */ 1583 error = ae_check_eeprom_present(sc, &vpdc); 1584 if (error != 0) 1585 return (error); 1586 1587 /* 1588 * Read the VPD configuration space. 1589 * Each register is prefixed with signature, 1590 * so we can check if it is valid. 1591 */ 1592 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) { 1593 error = ae_vpd_read_word(sc, i, &word); 1594 if (error != 0) 1595 break; 1596 1597 /* 1598 * Check signature. 1599 */ 1600 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG) 1601 break; 1602 reg = word >> AE_VPD_REG_SHIFT; 1603 i++; /* Move to the next word. */ 1604 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG) 1605 continue; 1606 1607 error = ae_vpd_read_word(sc, i, &val); 1608 if (error != 0) 1609 break; 1610 if (reg == AE_EADDR0_REG) 1611 eaddr[0] = val; 1612 else 1613 eaddr[1] = val; 1614 found++; 1615 } 1616 if (found < 2) 1617 return (ENOENT); 1618 1619 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1620 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1621 if (bootverbose) 1622 device_printf(sc->ae_dev, 1623 "VPD ethernet address registers are invalid.\n"); 1624 return (EINVAL); 1625 } 1626 return (0); 1627 } 1628 1629 static int 1630 ae_get_reg_eaddr(struct ae_softc *sc, uint32_t *eaddr) 1631 { 1632 /* 1633 * BIOS is supposed to set this. 1634 */ 1635 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG); 1636 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG); 1637 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1638 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1639 if (bootverbose) 1640 device_printf(sc->ae_dev, 1641 "Ethetnet address registers are invalid.\n"); 1642 return (EINVAL); 1643 } 1644 return (0); 1645 } 1646 1647 static void 1648 ae_get_eaddr(struct ae_softc *sc) 1649 { 1650 uint32_t eaddr[2] = {0, 0}; 1651 int error; 1652 1653 /* 1654 *Check for EEPROM. 1655 */ 1656 error = ae_get_vpd_eaddr(sc, eaddr); 1657 if (error) 1658 error = ae_get_reg_eaddr(sc, eaddr); 1659 if (error) { 1660 if (bootverbose) 1661 device_printf(sc->ae_dev, 1662 "Generating random ethernet address.\n"); 1663 eaddr[0] = karc4random(); 1664 /* 1665 * Set OUI to ASUSTek COMPUTER INC. 1666 */ 1667 sc->ae_eaddr[0] = 0x02; /* U/L bit set. */ 1668 sc->ae_eaddr[1] = 0x1f; 1669 sc->ae_eaddr[2] = 0xc6; 1670 sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff; 1671 sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff; 1672 sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff; 1673 } else { 1674 sc->ae_eaddr[0] = (eaddr[1] >> 8) & 0xff; 1675 sc->ae_eaddr[1] = (eaddr[1] >> 0) & 0xff; 1676 sc->ae_eaddr[2] = (eaddr[0] >> 24) & 0xff; 1677 sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff; 1678 sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff; 1679 sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff; 1680 } 1681 } 1682 1683 static int 1684 ae_mediachange(struct ifnet *ifp) 1685 { 1686 struct ae_softc *sc = ifp->if_softc; 1687 struct mii_data *mii = device_get_softc(sc->ae_miibus); 1688 int error; 1689 1690 ASSERT_SERIALIZED(ifp->if_serializer); 1691 if (mii->mii_instance != 0) { 1692 struct mii_softc *miisc; 1693 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1694 mii_phy_reset(miisc); 1695 } 1696 error = mii_mediachg(mii); 1697 return (error); 1698 } 1699 1700 static void 1701 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1702 { 1703 struct ae_softc *sc = ifp->if_softc; 1704 struct mii_data *mii = device_get_softc(sc->ae_miibus); 1705 1706 ASSERT_SERIALIZED(ifp->if_serializer); 1707 mii_pollstat(mii); 1708 ifmr->ifm_status = mii->mii_media_status; 1709 ifmr->ifm_active = mii->mii_media_active; 1710 } 1711 1712 static void 1713 ae_update_stats_tx(uint16_t flags, struct ae_stats *stats) 1714 { 1715 if ((flags & AE_TXS_BCAST) != 0) 1716 stats->tx_bcast++; 1717 if ((flags & AE_TXS_MCAST) != 0) 1718 stats->tx_mcast++; 1719 if ((flags & AE_TXS_PAUSE) != 0) 1720 stats->tx_pause++; 1721 if ((flags & AE_TXS_CTRL) != 0) 1722 stats->tx_ctrl++; 1723 if ((flags & AE_TXS_DEFER) != 0) 1724 stats->tx_defer++; 1725 if ((flags & AE_TXS_EXCDEFER) != 0) 1726 stats->tx_excdefer++; 1727 if ((flags & AE_TXS_SINGLECOL) != 0) 1728 stats->tx_singlecol++; 1729 if ((flags & AE_TXS_MULTICOL) != 0) 1730 stats->tx_multicol++; 1731 if ((flags & AE_TXS_LATECOL) != 0) 1732 stats->tx_latecol++; 1733 if ((flags & AE_TXS_ABORTCOL) != 0) 1734 stats->tx_abortcol++; 1735 if ((flags & AE_TXS_UNDERRUN) != 0) 1736 stats->tx_underrun++; 1737 } 1738 1739 static void 1740 ae_update_stats_rx(uint16_t flags, struct ae_stats *stats) 1741 { 1742 if ((flags & AE_RXD_BCAST) != 0) 1743 stats->rx_bcast++; 1744 if ((flags & AE_RXD_MCAST) != 0) 1745 stats->rx_mcast++; 1746 if ((flags & AE_RXD_PAUSE) != 0) 1747 stats->rx_pause++; 1748 if ((flags & AE_RXD_CTRL) != 0) 1749 stats->rx_ctrl++; 1750 if ((flags & AE_RXD_CRCERR) != 0) 1751 stats->rx_crcerr++; 1752 if ((flags & AE_RXD_CODEERR) != 0) 1753 stats->rx_codeerr++; 1754 if ((flags & AE_RXD_RUNT) != 0) 1755 stats->rx_runt++; 1756 if ((flags & AE_RXD_FRAG) != 0) 1757 stats->rx_frag++; 1758 if ((flags & AE_RXD_TRUNC) != 0) 1759 stats->rx_trunc++; 1760 if ((flags & AE_RXD_ALIGN) != 0) 1761 stats->rx_align++; 1762 } 1763 1764 static int 1765 ae_resume(device_t dev) 1766 { 1767 struct ae_softc *sc = device_get_softc(dev); 1768 struct ifnet *ifp = &sc->arpcom.ac_if; 1769 1770 lwkt_serialize_enter(ifp->if_serializer); 1771 #if 0 1772 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */ 1773 #endif 1774 ae_phy_reset(sc); 1775 if ((ifp->if_flags & IFF_UP) != 0) 1776 ae_init(sc); 1777 lwkt_serialize_exit(ifp->if_serializer); 1778 return (0); 1779 } 1780 1781 static int 1782 ae_suspend(device_t dev) 1783 { 1784 struct ae_softc *sc = device_get_softc(dev); 1785 struct ifnet *ifp = &sc->arpcom.ac_if; 1786 1787 lwkt_serialize_enter(ifp->if_serializer); 1788 ae_stop(sc); 1789 #if 0 1790 /* we don't use ae_pm_init because we don't want WOL */ 1791 ae_pm_init(sc); 1792 #endif 1793 lwkt_serialize_exit(ifp->if_serializer); 1794 return (0); 1795 } 1796 1797 static int 1798 ae_shutdown(device_t dev) 1799 { 1800 struct ae_softc *sc = device_get_softc(dev); 1801 struct ifnet *ifp = &sc->arpcom.ac_if; 1802 1803 ae_suspend(dev); 1804 1805 lwkt_serialize_enter(ifp->if_serializer); 1806 ae_powersave_enable(sc); 1807 lwkt_serialize_exit(ifp->if_serializer); 1808 1809 return (0); 1810 } 1811 1812 static void 1813 ae_powersave_disable(struct ae_softc *sc) 1814 { 1815 uint32_t val; 1816 1817 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1818 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1819 if (val & AE_PHY_DBG_POWERSAVE) { 1820 val &= ~AE_PHY_DBG_POWERSAVE; 1821 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val); 1822 DELAY(1000); 1823 } 1824 } 1825 1826 static void 1827 ae_powersave_enable(struct ae_softc *sc) 1828 { 1829 uint32_t val; 1830 1831 /* 1832 * XXX magic numbers. 1833 */ 1834 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1835 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1836 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000); 1837 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2); 1838 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000); 1839 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3); 1840 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0); 1841 } 1842