1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/ale/if_ale.c,v 1.3 2008/12/03 09:01:12 yongari Exp $ 28 */ 29 30 /* Driver for Atheros AR8121/AR8113/AR8114 PCIe Ethernet. */ 31 32 #include <sys/param.h> 33 #include <sys/endian.h> 34 #include <sys/kernel.h> 35 #include <sys/bus.h> 36 #include <sys/interrupt.h> 37 #include <sys/malloc.h> 38 #include <sys/proc.h> 39 #include <sys/rman.h> 40 #include <sys/serialize.h> 41 #include <sys/socket.h> 42 #include <sys/sockio.h> 43 #include <sys/sysctl.h> 44 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/bpf.h> 48 #include <net/if_arp.h> 49 #include <net/if_dl.h> 50 #include <net/if_llc.h> 51 #include <net/if_media.h> 52 #include <net/ifq_var.h> 53 #include <net/vlan/if_vlan_var.h> 54 #include <net/vlan/if_vlan_ether.h> 55 56 #include <netinet/ip.h> 57 58 #include <dev/netif/mii_layer/mii.h> 59 #include <dev/netif/mii_layer/miivar.h> 60 61 #include <bus/pci/pcireg.h> 62 #include <bus/pci/pcivar.h> 63 #include "pcidevs.h" 64 65 #include <dev/netif/ale/if_alereg.h> 66 #include <dev/netif/ale/if_alevar.h> 67 68 /* "device miibus" required. See GENERIC if you get errors here. */ 69 #include "miibus_if.h" 70 71 /* For more information about Tx checksum offload issues see ale_encap(). */ 72 #define ALE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 73 74 struct ale_dmamap_ctx { 75 int nsegs; 76 bus_dma_segment_t *segs; 77 }; 78 79 static int ale_probe(device_t); 80 static int ale_attach(device_t); 81 static int ale_detach(device_t); 82 static int ale_shutdown(device_t); 83 static int ale_suspend(device_t); 84 static int ale_resume(device_t); 85 86 static int ale_miibus_readreg(device_t, int, int); 87 static int ale_miibus_writereg(device_t, int, int, int); 88 static void ale_miibus_statchg(device_t); 89 90 static void ale_init(void *); 91 static void ale_start(struct ifnet *, struct ifaltq_subque *); 92 static int ale_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 93 static void ale_watchdog(struct ifnet *); 94 static int ale_mediachange(struct ifnet *); 95 static void ale_mediastatus(struct ifnet *, struct ifmediareq *); 96 97 static void ale_intr(void *); 98 static int ale_rxeof(struct ale_softc *sc); 99 static void ale_rx_update_page(struct ale_softc *, struct ale_rx_page **, 100 uint32_t, uint32_t *); 101 static void ale_rxcsum(struct ale_softc *, struct mbuf *, uint32_t); 102 static void ale_txeof(struct ale_softc *); 103 104 static int ale_dma_alloc(struct ale_softc *); 105 static void ale_dma_free(struct ale_softc *); 106 static int ale_check_boundary(struct ale_softc *); 107 static void ale_dmamap_cb(void *, bus_dma_segment_t *, int, int); 108 static void ale_dmamap_buf_cb(void *, bus_dma_segment_t *, int, 109 bus_size_t, int); 110 static int ale_encap(struct ale_softc *, struct mbuf **); 111 static void ale_init_rx_pages(struct ale_softc *); 112 static void ale_init_tx_ring(struct ale_softc *); 113 114 static void ale_stop(struct ale_softc *); 115 static void ale_tick(void *); 116 static void ale_get_macaddr(struct ale_softc *); 117 static void ale_mac_config(struct ale_softc *); 118 static void ale_phy_reset(struct ale_softc *); 119 static void ale_reset(struct ale_softc *); 120 static void ale_rxfilter(struct ale_softc *); 121 static void ale_rxvlan(struct ale_softc *); 122 static void ale_stats_clear(struct ale_softc *); 123 static void ale_stats_update(struct ale_softc *); 124 static void ale_stop_mac(struct ale_softc *); 125 #ifdef notyet 126 static void ale_setlinkspeed(struct ale_softc *); 127 static void ale_setwol(struct ale_softc *); 128 #endif 129 130 static void ale_sysctl_node(struct ale_softc *); 131 static int sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS); 132 133 /* 134 * Devices supported by this driver. 135 */ 136 static struct ale_dev { 137 uint16_t ale_vendorid; 138 uint16_t ale_deviceid; 139 const char *ale_name; 140 } ale_devs[] = { 141 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR81XX, 142 "Atheros AR8121/AR8113/AR8114 PCIe Ethernet" }, 143 }; 144 145 static device_method_t ale_methods[] = { 146 /* Device interface. */ 147 DEVMETHOD(device_probe, ale_probe), 148 DEVMETHOD(device_attach, ale_attach), 149 DEVMETHOD(device_detach, ale_detach), 150 DEVMETHOD(device_shutdown, ale_shutdown), 151 DEVMETHOD(device_suspend, ale_suspend), 152 DEVMETHOD(device_resume, ale_resume), 153 154 /* Bus interface. */ 155 DEVMETHOD(bus_print_child, bus_generic_print_child), 156 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 157 158 /* MII interface. */ 159 DEVMETHOD(miibus_readreg, ale_miibus_readreg), 160 DEVMETHOD(miibus_writereg, ale_miibus_writereg), 161 DEVMETHOD(miibus_statchg, ale_miibus_statchg), 162 163 { NULL, NULL } 164 }; 165 166 static driver_t ale_driver = { 167 "ale", 168 ale_methods, 169 sizeof(struct ale_softc) 170 }; 171 172 static devclass_t ale_devclass; 173 174 DECLARE_DUMMY_MODULE(if_ale); 175 MODULE_VERSION(if_ale, 1); 176 MODULE_DEPEND(if_ale, miibus, 1, 1, 1); 177 DRIVER_MODULE(if_ale, pci, ale_driver, ale_devclass, NULL, NULL); 178 DRIVER_MODULE(miibus, ale, miibus_driver, miibus_devclass, NULL, NULL); 179 180 static int 181 ale_miibus_readreg(device_t dev, int phy, int reg) 182 { 183 struct ale_softc *sc; 184 uint32_t v; 185 int i; 186 187 sc = device_get_softc(dev); 188 189 if (phy != sc->ale_phyaddr) 190 return (0); 191 192 if (sc->ale_flags & ALE_FLAG_FASTETHER) { 193 if (reg == MII_100T2CR || reg == MII_100T2SR || 194 reg == MII_EXTSR) 195 return (0); 196 } 197 198 CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 199 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 200 for (i = ALE_PHY_TIMEOUT; i > 0; i--) { 201 DELAY(5); 202 v = CSR_READ_4(sc, ALE_MDIO); 203 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 204 break; 205 } 206 207 if (i == 0) { 208 device_printf(sc->ale_dev, "phy read timeout : %d\n", reg); 209 return (0); 210 } 211 212 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 213 } 214 215 static int 216 ale_miibus_writereg(device_t dev, int phy, int reg, int val) 217 { 218 struct ale_softc *sc; 219 uint32_t v; 220 int i; 221 222 sc = device_get_softc(dev); 223 224 if (phy != sc->ale_phyaddr) 225 return (0); 226 227 if (sc->ale_flags & ALE_FLAG_FASTETHER) { 228 if (reg == MII_100T2CR || reg == MII_100T2SR || 229 reg == MII_EXTSR) 230 return (0); 231 } 232 233 CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 234 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 235 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 236 for (i = ALE_PHY_TIMEOUT; i > 0; i--) { 237 DELAY(5); 238 v = CSR_READ_4(sc, ALE_MDIO); 239 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 240 break; 241 } 242 243 if (i == 0) 244 device_printf(sc->ale_dev, "phy write timeout : %d\n", reg); 245 246 return (0); 247 } 248 249 static void 250 ale_miibus_statchg(device_t dev) 251 { 252 struct ale_softc *sc = device_get_softc(dev); 253 struct ifnet *ifp = &sc->arpcom.ac_if; 254 struct mii_data *mii; 255 uint32_t reg; 256 257 ASSERT_SERIALIZED(ifp->if_serializer); 258 259 if ((ifp->if_flags & IFF_RUNNING) == 0) 260 return; 261 262 mii = device_get_softc(sc->ale_miibus); 263 264 sc->ale_flags &= ~ALE_FLAG_LINK; 265 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 266 (IFM_ACTIVE | IFM_AVALID)) { 267 switch (IFM_SUBTYPE(mii->mii_media_active)) { 268 case IFM_10_T: 269 case IFM_100_TX: 270 sc->ale_flags |= ALE_FLAG_LINK; 271 break; 272 273 case IFM_1000_T: 274 if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0) 275 sc->ale_flags |= ALE_FLAG_LINK; 276 break; 277 278 default: 279 break; 280 } 281 } 282 283 /* Stop Rx/Tx MACs. */ 284 ale_stop_mac(sc); 285 286 /* Program MACs with resolved speed/duplex/flow-control. */ 287 if ((sc->ale_flags & ALE_FLAG_LINK) != 0) { 288 ale_mac_config(sc); 289 /* Reenable Tx/Rx MACs. */ 290 reg = CSR_READ_4(sc, ALE_MAC_CFG); 291 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 292 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 293 } 294 } 295 296 static void 297 ale_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 298 { 299 struct ale_softc *sc = ifp->if_softc; 300 struct mii_data *mii = device_get_softc(sc->ale_miibus); 301 302 ASSERT_SERIALIZED(ifp->if_serializer); 303 304 mii_pollstat(mii); 305 ifmr->ifm_status = mii->mii_media_status; 306 ifmr->ifm_active = mii->mii_media_active; 307 } 308 309 static int 310 ale_mediachange(struct ifnet *ifp) 311 { 312 struct ale_softc *sc = ifp->if_softc; 313 struct mii_data *mii = device_get_softc(sc->ale_miibus); 314 int error; 315 316 ASSERT_SERIALIZED(ifp->if_serializer); 317 318 if (mii->mii_instance != 0) { 319 struct mii_softc *miisc; 320 321 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 322 mii_phy_reset(miisc); 323 } 324 error = mii_mediachg(mii); 325 326 return (error); 327 } 328 329 static int 330 ale_probe(device_t dev) 331 { 332 struct ale_dev *sp; 333 int i; 334 uint16_t vendor, devid; 335 336 vendor = pci_get_vendor(dev); 337 devid = pci_get_device(dev); 338 sp = ale_devs; 339 for (i = 0; i < NELEM(ale_devs); i++) { 340 if (vendor == sp->ale_vendorid && 341 devid == sp->ale_deviceid) { 342 device_set_desc(dev, sp->ale_name); 343 return (0); 344 } 345 sp++; 346 } 347 348 return (ENXIO); 349 } 350 351 static void 352 ale_get_macaddr(struct ale_softc *sc) 353 { 354 uint32_t ea[2], reg; 355 int i, vpdc; 356 357 reg = CSR_READ_4(sc, ALE_SPI_CTRL); 358 if ((reg & SPI_VPD_ENB) != 0) { 359 reg &= ~SPI_VPD_ENB; 360 CSR_WRITE_4(sc, ALE_SPI_CTRL, reg); 361 } 362 363 vpdc = pci_get_vpdcap_ptr(sc->ale_dev); 364 if (vpdc) { 365 /* 366 * PCI VPD capability found, let TWSI reload EEPROM. 367 * This will set ethernet address of controller. 368 */ 369 CSR_WRITE_4(sc, ALE_TWSI_CTRL, CSR_READ_4(sc, ALE_TWSI_CTRL) | 370 TWSI_CTRL_SW_LD_START); 371 for (i = 100; i > 0; i--) { 372 DELAY(1000); 373 reg = CSR_READ_4(sc, ALE_TWSI_CTRL); 374 if ((reg & TWSI_CTRL_SW_LD_START) == 0) 375 break; 376 } 377 if (i == 0) 378 device_printf(sc->ale_dev, 379 "reloading EEPROM timeout!\n"); 380 } else { 381 if (bootverbose) 382 device_printf(sc->ale_dev, 383 "PCI VPD capability not found!\n"); 384 } 385 386 ea[0] = CSR_READ_4(sc, ALE_PAR0); 387 ea[1] = CSR_READ_4(sc, ALE_PAR1); 388 sc->ale_eaddr[0] = (ea[1] >> 8) & 0xFF; 389 sc->ale_eaddr[1] = (ea[1] >> 0) & 0xFF; 390 sc->ale_eaddr[2] = (ea[0] >> 24) & 0xFF; 391 sc->ale_eaddr[3] = (ea[0] >> 16) & 0xFF; 392 sc->ale_eaddr[4] = (ea[0] >> 8) & 0xFF; 393 sc->ale_eaddr[5] = (ea[0] >> 0) & 0xFF; 394 } 395 396 static void 397 ale_phy_reset(struct ale_softc *sc) 398 { 399 /* Reset magic from Linux. */ 400 CSR_WRITE_2(sc, ALE_GPHY_CTRL, 401 GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET | 402 GPHY_CTRL_PHY_PLL_ON); 403 DELAY(1000); 404 CSR_WRITE_2(sc, ALE_GPHY_CTRL, 405 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | 406 GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON); 407 DELAY(1000); 408 409 #define ATPHY_DBG_ADDR 0x1D 410 #define ATPHY_DBG_DATA 0x1E 411 412 /* Enable hibernation mode. */ 413 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 414 ATPHY_DBG_ADDR, 0x0B); 415 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 416 ATPHY_DBG_DATA, 0xBC00); 417 /* Set Class A/B for all modes. */ 418 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 419 ATPHY_DBG_ADDR, 0x00); 420 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 421 ATPHY_DBG_DATA, 0x02EF); 422 /* Enable 10BT power saving. */ 423 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 424 ATPHY_DBG_ADDR, 0x12); 425 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 426 ATPHY_DBG_DATA, 0x4C04); 427 /* Adjust 1000T power. */ 428 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 429 ATPHY_DBG_ADDR, 0x04); 430 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 431 ATPHY_DBG_ADDR, 0x8BBB); 432 /* 10BT center tap voltage. */ 433 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 434 ATPHY_DBG_ADDR, 0x05); 435 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 436 ATPHY_DBG_ADDR, 0x2C46); 437 438 #undef ATPHY_DBG_ADDR 439 #undef ATPHY_DBG_DATA 440 DELAY(1000); 441 } 442 443 static int 444 ale_attach(device_t dev) 445 { 446 struct ale_softc *sc = device_get_softc(dev); 447 struct ifnet *ifp = &sc->arpcom.ac_if; 448 int error = 0; 449 uint32_t rxf_len, txf_len; 450 uint8_t pcie_ptr; 451 452 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 453 sc->ale_dev = dev; 454 455 callout_init(&sc->ale_tick_ch); 456 457 #ifndef BURN_BRIDGES 458 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 459 uint32_t irq, mem; 460 461 irq = pci_read_config(dev, PCIR_INTLINE, 4); 462 mem = pci_read_config(dev, ALE_PCIR_BAR, 4); 463 464 device_printf(dev, "chip is in D%d power mode " 465 "-- setting to D0\n", pci_get_powerstate(dev)); 466 467 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 468 469 pci_write_config(dev, PCIR_INTLINE, irq, 4); 470 pci_write_config(dev, ALE_PCIR_BAR, mem, 4); 471 } 472 #endif /* !BURN_BRIDGE */ 473 474 /* Enable bus mastering */ 475 pci_enable_busmaster(dev); 476 477 /* 478 * Allocate memory mapped IO 479 */ 480 sc->ale_mem_rid = ALE_PCIR_BAR; 481 sc->ale_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 482 &sc->ale_mem_rid, RF_ACTIVE); 483 if (sc->ale_mem_res == NULL) { 484 device_printf(dev, "can't allocate IO memory\n"); 485 return ENXIO; 486 } 487 sc->ale_mem_bt = rman_get_bustag(sc->ale_mem_res); 488 sc->ale_mem_bh = rman_get_bushandle(sc->ale_mem_res); 489 490 /* 491 * Allocate IRQ 492 */ 493 sc->ale_irq_rid = 0; 494 sc->ale_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 495 &sc->ale_irq_rid, 496 RF_SHAREABLE | RF_ACTIVE); 497 if (sc->ale_irq_res == NULL) { 498 device_printf(dev, "can't allocate irq\n"); 499 error = ENXIO; 500 goto fail; 501 } 502 503 /* Set PHY address. */ 504 sc->ale_phyaddr = ALE_PHY_ADDR; 505 506 /* Reset PHY. */ 507 ale_phy_reset(sc); 508 509 /* Reset the ethernet controller. */ 510 ale_reset(sc); 511 512 /* Get PCI and chip id/revision. */ 513 sc->ale_rev = pci_get_revid(dev); 514 if (sc->ale_rev >= 0xF0) { 515 /* L2E Rev. B. AR8114 */ 516 sc->ale_flags |= ALE_FLAG_FASTETHER; 517 } else { 518 if ((CSR_READ_4(sc, ALE_PHY_STATUS) & PHY_STATUS_100M) != 0) { 519 /* L1E AR8121 */ 520 sc->ale_flags |= ALE_FLAG_JUMBO; 521 } else { 522 /* L2E Rev. A. AR8113 */ 523 sc->ale_flags |= ALE_FLAG_FASTETHER; 524 } 525 } 526 527 /* 528 * All known controllers seems to require 4 bytes alignment 529 * of Tx buffers to make Tx checksum offload with custom 530 * checksum generation method work. 531 */ 532 sc->ale_flags |= ALE_FLAG_TXCSUM_BUG; 533 534 /* 535 * All known controllers seems to have issues on Rx checksum 536 * offload for fragmented IP datagrams. 537 */ 538 sc->ale_flags |= ALE_FLAG_RXCSUM_BUG; 539 540 /* 541 * Don't use Tx CMB. It is known to cause RRS update failure 542 * under certain circumstances. Typical phenomenon of the 543 * issue would be unexpected sequence number encountered in 544 * Rx handler. 545 */ 546 sc->ale_flags |= ALE_FLAG_TXCMB_BUG; 547 sc->ale_chip_rev = CSR_READ_4(sc, ALE_MASTER_CFG) >> 548 MASTER_CHIP_REV_SHIFT; 549 if (bootverbose) { 550 device_printf(dev, "PCI device revision : 0x%04x\n", 551 sc->ale_rev); 552 device_printf(dev, "Chip id/revision : 0x%04x\n", 553 sc->ale_chip_rev); 554 } 555 556 /* 557 * Uninitialized hardware returns an invalid chip id/revision 558 * as well as 0xFFFFFFFF for Tx/Rx fifo length. 559 */ 560 txf_len = CSR_READ_4(sc, ALE_SRAM_TX_FIFO_LEN); 561 rxf_len = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN); 562 if (sc->ale_chip_rev == 0xFFFF || txf_len == 0xFFFFFFFF || 563 rxf_len == 0xFFFFFFF) { 564 device_printf(dev,"chip revision : 0x%04x, %u Tx FIFO " 565 "%u Rx FIFO -- not initialized?\n", sc->ale_chip_rev, 566 txf_len, rxf_len); 567 error = ENXIO; 568 goto fail; 569 } 570 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", txf_len, rxf_len); 571 572 /* Get DMA parameters from PCIe device control register. */ 573 pcie_ptr = pci_get_pciecap_ptr(dev); 574 if (pcie_ptr) { 575 uint16_t devctl; 576 577 sc->ale_flags |= ALE_FLAG_PCIE; 578 devctl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 579 /* Max read request size. */ 580 sc->ale_dma_rd_burst = ((devctl >> 12) & 0x07) << 581 DMA_CFG_RD_BURST_SHIFT; 582 /* Max payload size. */ 583 sc->ale_dma_wr_burst = ((devctl >> 5) & 0x07) << 584 DMA_CFG_WR_BURST_SHIFT; 585 if (bootverbose) { 586 device_printf(dev, "Read request size : %d bytes.\n", 587 128 << ((devctl >> 12) & 0x07)); 588 device_printf(dev, "TLP payload size : %d bytes.\n", 589 128 << ((devctl >> 5) & 0x07)); 590 } 591 } else { 592 sc->ale_dma_rd_burst = DMA_CFG_RD_BURST_128; 593 sc->ale_dma_wr_burst = DMA_CFG_WR_BURST_128; 594 } 595 596 /* Create device sysctl node. */ 597 ale_sysctl_node(sc); 598 599 if ((error = ale_dma_alloc(sc)) != 0) 600 goto fail; 601 602 /* Load station address. */ 603 ale_get_macaddr(sc); 604 605 ifp->if_softc = sc; 606 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 607 ifp->if_ioctl = ale_ioctl; 608 ifp->if_start = ale_start; 609 ifp->if_init = ale_init; 610 ifp->if_watchdog = ale_watchdog; 611 ifq_set_maxlen(&ifp->if_snd, ALE_TX_RING_CNT - 1); 612 ifq_set_ready(&ifp->if_snd); 613 614 ifp->if_capabilities = IFCAP_RXCSUM | 615 IFCAP_VLAN_MTU | 616 IFCAP_VLAN_HWTAGGING; 617 #ifdef notyet 618 ifp->if_capabilities |= IFCAP_TXCSUM; 619 ifp->if_hwassist = ALE_CSUM_FEATURES; 620 #endif 621 ifp->if_capenable = ifp->if_capabilities; 622 623 /* Set up MII bus. */ 624 if ((error = mii_phy_probe(dev, &sc->ale_miibus, ale_mediachange, 625 ale_mediastatus)) != 0) { 626 device_printf(dev, "no PHY found!\n"); 627 goto fail; 628 } 629 630 ether_ifattach(ifp, sc->ale_eaddr, NULL); 631 632 /* Tell the upper layer(s) we support long frames. */ 633 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 634 635 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->ale_irq_res)); 636 637 error = bus_setup_intr(dev, sc->ale_irq_res, INTR_MPSAFE, ale_intr, sc, 638 &sc->ale_irq_handle, ifp->if_serializer); 639 if (error) { 640 device_printf(dev, "could not set up interrupt handler.\n"); 641 ether_ifdetach(ifp); 642 goto fail; 643 } 644 645 return 0; 646 fail: 647 ale_detach(dev); 648 return (error); 649 } 650 651 static int 652 ale_detach(device_t dev) 653 { 654 struct ale_softc *sc = device_get_softc(dev); 655 656 if (device_is_attached(dev)) { 657 struct ifnet *ifp = &sc->arpcom.ac_if; 658 659 lwkt_serialize_enter(ifp->if_serializer); 660 sc->ale_flags |= ALE_FLAG_DETACH; 661 ale_stop(sc); 662 bus_teardown_intr(dev, sc->ale_irq_res, sc->ale_irq_handle); 663 lwkt_serialize_exit(ifp->if_serializer); 664 665 ether_ifdetach(ifp); 666 } 667 668 if (sc->ale_miibus != NULL) 669 device_delete_child(dev, sc->ale_miibus); 670 bus_generic_detach(dev); 671 672 if (sc->ale_irq_res != NULL) { 673 bus_release_resource(dev, SYS_RES_IRQ, sc->ale_irq_rid, 674 sc->ale_irq_res); 675 } 676 if (sc->ale_mem_res != NULL) { 677 bus_release_resource(dev, SYS_RES_MEMORY, sc->ale_mem_rid, 678 sc->ale_mem_res); 679 } 680 681 ale_dma_free(sc); 682 683 return (0); 684 } 685 686 #define ALE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 687 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 688 #define ALE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 689 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 690 691 static void 692 ale_sysctl_node(struct ale_softc *sc) 693 { 694 struct sysctl_ctx_list *ctx; 695 struct sysctl_oid_list *child, *parent; 696 struct sysctl_oid *tree; 697 struct ale_hw_stats *stats; 698 int error; 699 700 stats = &sc->ale_stats; 701 ctx = device_get_sysctl_ctx(sc->ale_dev); 702 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ale_dev)); 703 704 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 705 CTLTYPE_INT | CTLFLAG_RW, &sc->ale_int_rx_mod, 0, 706 sysctl_hw_ale_int_mod, "I", "ale Rx interrupt moderation"); 707 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 708 CTLTYPE_INT | CTLFLAG_RW, &sc->ale_int_tx_mod, 0, 709 sysctl_hw_ale_int_mod, "I", "ale Tx interrupt moderation"); 710 711 /* 712 * Pull in device tunables. 713 */ 714 sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT; 715 error = resource_int_value(device_get_name(sc->ale_dev), 716 device_get_unit(sc->ale_dev), "int_rx_mod", &sc->ale_int_rx_mod); 717 if (error == 0) { 718 if (sc->ale_int_rx_mod < ALE_IM_TIMER_MIN || 719 sc->ale_int_rx_mod > ALE_IM_TIMER_MAX) { 720 device_printf(sc->ale_dev, "int_rx_mod value out of " 721 "range; using default: %d\n", 722 ALE_IM_RX_TIMER_DEFAULT); 723 sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT; 724 } 725 } 726 727 sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT; 728 error = resource_int_value(device_get_name(sc->ale_dev), 729 device_get_unit(sc->ale_dev), "int_tx_mod", &sc->ale_int_tx_mod); 730 if (error == 0) { 731 if (sc->ale_int_tx_mod < ALE_IM_TIMER_MIN || 732 sc->ale_int_tx_mod > ALE_IM_TIMER_MAX) { 733 device_printf(sc->ale_dev, "int_tx_mod value out of " 734 "range; using default: %d\n", 735 ALE_IM_TX_TIMER_DEFAULT); 736 sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT; 737 } 738 } 739 740 /* Misc statistics. */ 741 ALE_SYSCTL_STAT_ADD32(ctx, child, "reset_brk_seq", 742 &stats->reset_brk_seq, 743 "Controller resets due to broken Rx sequnce number"); 744 745 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 746 NULL, "ATE statistics"); 747 parent = SYSCTL_CHILDREN(tree); 748 749 /* Rx statistics. */ 750 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 751 NULL, "Rx MAC statistics"); 752 child = SYSCTL_CHILDREN(tree); 753 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 754 &stats->rx_frames, "Good frames"); 755 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 756 &stats->rx_bcast_frames, "Good broadcast frames"); 757 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 758 &stats->rx_mcast_frames, "Good multicast frames"); 759 ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 760 &stats->rx_pause_frames, "Pause control frames"); 761 ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 762 &stats->rx_control_frames, "Control frames"); 763 ALE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 764 &stats->rx_crcerrs, "CRC errors"); 765 ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 766 &stats->rx_lenerrs, "Frames with length mismatched"); 767 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 768 &stats->rx_bytes, "Good octets"); 769 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 770 &stats->rx_bcast_bytes, "Good broadcast octets"); 771 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 772 &stats->rx_mcast_bytes, "Good multicast octets"); 773 ALE_SYSCTL_STAT_ADD32(ctx, child, "runts", 774 &stats->rx_runts, "Too short frames"); 775 ALE_SYSCTL_STAT_ADD32(ctx, child, "fragments", 776 &stats->rx_fragments, "Fragmented frames"); 777 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 778 &stats->rx_pkts_64, "64 bytes frames"); 779 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 780 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 781 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 782 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 783 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 784 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 785 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 786 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 787 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 788 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 789 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 790 &stats->rx_pkts_1519_max, "1519 to max frames"); 791 ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 792 &stats->rx_pkts_truncated, "Truncated frames due to MTU size"); 793 ALE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 794 &stats->rx_fifo_oflows, "FIFO overflows"); 795 ALE_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs", 796 &stats->rx_rrs_errs, "Return status write-back errors"); 797 ALE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 798 &stats->rx_alignerrs, "Alignment errors"); 799 ALE_SYSCTL_STAT_ADD32(ctx, child, "filtered", 800 &stats->rx_pkts_filtered, 801 "Frames dropped due to address filtering"); 802 803 /* Tx statistics. */ 804 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 805 NULL, "Tx MAC statistics"); 806 child = SYSCTL_CHILDREN(tree); 807 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 808 &stats->tx_frames, "Good frames"); 809 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 810 &stats->tx_bcast_frames, "Good broadcast frames"); 811 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 812 &stats->tx_mcast_frames, "Good multicast frames"); 813 ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 814 &stats->tx_pause_frames, "Pause control frames"); 815 ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 816 &stats->tx_control_frames, "Control frames"); 817 ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 818 &stats->tx_excess_defer, "Frames with excessive derferrals"); 819 ALE_SYSCTL_STAT_ADD32(ctx, child, "defers", 820 &stats->tx_excess_defer, "Frames with derferrals"); 821 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 822 &stats->tx_bytes, "Good octets"); 823 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 824 &stats->tx_bcast_bytes, "Good broadcast octets"); 825 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 826 &stats->tx_mcast_bytes, "Good multicast octets"); 827 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 828 &stats->tx_pkts_64, "64 bytes frames"); 829 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 830 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 831 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 832 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 833 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 834 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 835 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 836 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 837 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 838 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 839 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 840 &stats->tx_pkts_1519_max, "1519 to max frames"); 841 ALE_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 842 &stats->tx_single_colls, "Single collisions"); 843 ALE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 844 &stats->tx_multi_colls, "Multiple collisions"); 845 ALE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 846 &stats->tx_late_colls, "Late collisions"); 847 ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", 848 &stats->tx_excess_colls, "Excessive collisions"); 849 ALE_SYSCTL_STAT_ADD32(ctx, child, "abort", 850 &stats->tx_abort, "Aborted frames due to Excessive collisions"); 851 ALE_SYSCTL_STAT_ADD32(ctx, child, "underruns", 852 &stats->tx_underrun, "FIFO underruns"); 853 ALE_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns", 854 &stats->tx_desc_underrun, "Descriptor write-back errors"); 855 ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 856 &stats->tx_lenerrs, "Frames with length mismatched"); 857 ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 858 &stats->tx_pkts_truncated, "Truncated frames due to MTU size"); 859 } 860 861 #undef ALE_SYSCTL_STAT_ADD32 862 #undef ALE_SYSCTL_STAT_ADD64 863 864 struct ale_dmamap_arg { 865 bus_addr_t ale_busaddr; 866 }; 867 868 static void 869 ale_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 870 { 871 struct ale_dmamap_arg *ctx; 872 873 if (error != 0) 874 return; 875 876 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 877 878 ctx = (struct ale_dmamap_arg *)arg; 879 ctx->ale_busaddr = segs[0].ds_addr; 880 } 881 882 /* 883 * Tx descriptors/RXF0/CMB DMA blocks share ALE_DESC_ADDR_HI register 884 * which specifies high address region of DMA blocks. Therefore these 885 * blocks should have the same high address of given 4GB address 886 * space(i.e. crossing 4GB boundary is not allowed). 887 */ 888 static int 889 ale_check_boundary(struct ale_softc *sc) 890 { 891 bus_addr_t rx_cmb_end[ALE_RX_PAGES], tx_cmb_end; 892 bus_addr_t rx_page_end[ALE_RX_PAGES], tx_ring_end; 893 894 rx_page_end[0] = sc->ale_cdata.ale_rx_page[0].page_paddr + 895 sc->ale_pagesize; 896 rx_page_end[1] = sc->ale_cdata.ale_rx_page[1].page_paddr + 897 sc->ale_pagesize; 898 tx_ring_end = sc->ale_cdata.ale_tx_ring_paddr + ALE_TX_RING_SZ; 899 tx_cmb_end = sc->ale_cdata.ale_tx_cmb_paddr + ALE_TX_CMB_SZ; 900 rx_cmb_end[0] = sc->ale_cdata.ale_rx_page[0].cmb_paddr + ALE_RX_CMB_SZ; 901 rx_cmb_end[1] = sc->ale_cdata.ale_rx_page[1].cmb_paddr + ALE_RX_CMB_SZ; 902 903 if ((ALE_ADDR_HI(tx_ring_end) != 904 ALE_ADDR_HI(sc->ale_cdata.ale_tx_ring_paddr)) || 905 (ALE_ADDR_HI(rx_page_end[0]) != 906 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].page_paddr)) || 907 (ALE_ADDR_HI(rx_page_end[1]) != 908 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].page_paddr)) || 909 (ALE_ADDR_HI(tx_cmb_end) != 910 ALE_ADDR_HI(sc->ale_cdata.ale_tx_cmb_paddr)) || 911 (ALE_ADDR_HI(rx_cmb_end[0]) != 912 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].cmb_paddr)) || 913 (ALE_ADDR_HI(rx_cmb_end[1]) != 914 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].cmb_paddr))) 915 return (EFBIG); 916 917 if ((ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[0])) || 918 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[1])) || 919 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[0])) || 920 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[1])) || 921 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(tx_cmb_end))) 922 return (EFBIG); 923 924 return (0); 925 } 926 927 static int 928 ale_dma_alloc(struct ale_softc *sc) 929 { 930 struct ale_txdesc *txd; 931 bus_addr_t lowaddr; 932 struct ale_dmamap_arg ctx; 933 int error, guard_size, i; 934 935 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) 936 guard_size = ALE_JUMBO_FRAMELEN; 937 else 938 guard_size = ALE_MAX_FRAMELEN; 939 sc->ale_pagesize = roundup(guard_size + ALE_RX_PAGE_SZ, 940 ALE_RX_PAGE_ALIGN); 941 lowaddr = BUS_SPACE_MAXADDR; 942 again: 943 /* Create parent DMA tag. */ 944 error = bus_dma_tag_create( 945 NULL, /* parent */ 946 1, 0, /* alignment, boundary */ 947 lowaddr, /* lowaddr */ 948 BUS_SPACE_MAXADDR, /* highaddr */ 949 NULL, NULL, /* filter, filterarg */ 950 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 951 0, /* nsegments */ 952 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 953 0, /* flags */ 954 &sc->ale_cdata.ale_parent_tag); 955 if (error != 0) { 956 device_printf(sc->ale_dev, 957 "could not create parent DMA tag.\n"); 958 goto fail; 959 } 960 961 /* Create DMA tag for Tx descriptor ring. */ 962 error = bus_dma_tag_create( 963 sc->ale_cdata.ale_parent_tag, /* parent */ 964 ALE_TX_RING_ALIGN, 0, /* alignment, boundary */ 965 BUS_SPACE_MAXADDR, /* lowaddr */ 966 BUS_SPACE_MAXADDR, /* highaddr */ 967 NULL, NULL, /* filter, filterarg */ 968 ALE_TX_RING_SZ, /* maxsize */ 969 1, /* nsegments */ 970 ALE_TX_RING_SZ, /* maxsegsize */ 971 0, /* flags */ 972 &sc->ale_cdata.ale_tx_ring_tag); 973 if (error != 0) { 974 device_printf(sc->ale_dev, 975 "could not create Tx ring DMA tag.\n"); 976 goto fail; 977 } 978 979 /* Create DMA tag for Rx pages. */ 980 for (i = 0; i < ALE_RX_PAGES; i++) { 981 error = bus_dma_tag_create( 982 sc->ale_cdata.ale_parent_tag, /* parent */ 983 ALE_RX_PAGE_ALIGN, 0, /* alignment, boundary */ 984 BUS_SPACE_MAXADDR, /* lowaddr */ 985 BUS_SPACE_MAXADDR, /* highaddr */ 986 NULL, NULL, /* filter, filterarg */ 987 sc->ale_pagesize, /* maxsize */ 988 1, /* nsegments */ 989 sc->ale_pagesize, /* maxsegsize */ 990 0, /* flags */ 991 &sc->ale_cdata.ale_rx_page[i].page_tag); 992 if (error != 0) { 993 device_printf(sc->ale_dev, 994 "could not create Rx page %d DMA tag.\n", i); 995 goto fail; 996 } 997 } 998 999 /* Create DMA tag for Tx coalescing message block. */ 1000 error = bus_dma_tag_create( 1001 sc->ale_cdata.ale_parent_tag, /* parent */ 1002 ALE_CMB_ALIGN, 0, /* alignment, boundary */ 1003 BUS_SPACE_MAXADDR, /* lowaddr */ 1004 BUS_SPACE_MAXADDR, /* highaddr */ 1005 NULL, NULL, /* filter, filterarg */ 1006 ALE_TX_CMB_SZ, /* maxsize */ 1007 1, /* nsegments */ 1008 ALE_TX_CMB_SZ, /* maxsegsize */ 1009 0, /* flags */ 1010 &sc->ale_cdata.ale_tx_cmb_tag); 1011 if (error != 0) { 1012 device_printf(sc->ale_dev, 1013 "could not create Tx CMB DMA tag.\n"); 1014 goto fail; 1015 } 1016 1017 /* Create DMA tag for Rx coalescing message block. */ 1018 for (i = 0; i < ALE_RX_PAGES; i++) { 1019 error = bus_dma_tag_create( 1020 sc->ale_cdata.ale_parent_tag, /* parent */ 1021 ALE_CMB_ALIGN, 0, /* alignment, boundary */ 1022 BUS_SPACE_MAXADDR, /* lowaddr */ 1023 BUS_SPACE_MAXADDR, /* highaddr */ 1024 NULL, NULL, /* filter, filterarg */ 1025 ALE_RX_CMB_SZ, /* maxsize */ 1026 1, /* nsegments */ 1027 ALE_RX_CMB_SZ, /* maxsegsize */ 1028 0, /* flags */ 1029 &sc->ale_cdata.ale_rx_page[i].cmb_tag); 1030 if (error != 0) { 1031 device_printf(sc->ale_dev, 1032 "could not create Rx page %d CMB DMA tag.\n", i); 1033 goto fail; 1034 } 1035 } 1036 1037 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1038 error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_ring_tag, 1039 (void **)&sc->ale_cdata.ale_tx_ring, 1040 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1041 &sc->ale_cdata.ale_tx_ring_map); 1042 if (error != 0) { 1043 device_printf(sc->ale_dev, 1044 "could not allocate DMA'able memory for Tx ring.\n"); 1045 goto fail; 1046 } 1047 ctx.ale_busaddr = 0; 1048 error = bus_dmamap_load(sc->ale_cdata.ale_tx_ring_tag, 1049 sc->ale_cdata.ale_tx_ring_map, sc->ale_cdata.ale_tx_ring, 1050 ALE_TX_RING_SZ, ale_dmamap_cb, &ctx, 0); 1051 if (error != 0 || ctx.ale_busaddr == 0) { 1052 device_printf(sc->ale_dev, 1053 "could not load DMA'able memory for Tx ring.\n"); 1054 goto fail; 1055 } 1056 sc->ale_cdata.ale_tx_ring_paddr = ctx.ale_busaddr; 1057 1058 /* Rx pages. */ 1059 for (i = 0; i < ALE_RX_PAGES; i++) { 1060 error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].page_tag, 1061 (void **)&sc->ale_cdata.ale_rx_page[i].page_addr, 1062 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1063 &sc->ale_cdata.ale_rx_page[i].page_map); 1064 if (error != 0) { 1065 device_printf(sc->ale_dev, 1066 "could not allocate DMA'able memory for " 1067 "Rx page %d.\n", i); 1068 goto fail; 1069 } 1070 ctx.ale_busaddr = 0; 1071 error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].page_tag, 1072 sc->ale_cdata.ale_rx_page[i].page_map, 1073 sc->ale_cdata.ale_rx_page[i].page_addr, 1074 sc->ale_pagesize, ale_dmamap_cb, &ctx, 0); 1075 if (error != 0 || ctx.ale_busaddr == 0) { 1076 device_printf(sc->ale_dev, 1077 "could not load DMA'able memory for " 1078 "Rx page %d.\n", i); 1079 goto fail; 1080 } 1081 sc->ale_cdata.ale_rx_page[i].page_paddr = ctx.ale_busaddr; 1082 } 1083 1084 /* Tx CMB. */ 1085 error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_cmb_tag, 1086 (void **)&sc->ale_cdata.ale_tx_cmb, 1087 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1088 &sc->ale_cdata.ale_tx_cmb_map); 1089 if (error != 0) { 1090 device_printf(sc->ale_dev, 1091 "could not allocate DMA'able memory for Tx CMB.\n"); 1092 goto fail; 1093 } 1094 ctx.ale_busaddr = 0; 1095 error = bus_dmamap_load(sc->ale_cdata.ale_tx_cmb_tag, 1096 sc->ale_cdata.ale_tx_cmb_map, sc->ale_cdata.ale_tx_cmb, 1097 ALE_TX_CMB_SZ, ale_dmamap_cb, &ctx, 0); 1098 if (error != 0 || ctx.ale_busaddr == 0) { 1099 device_printf(sc->ale_dev, 1100 "could not load DMA'able memory for Tx CMB.\n"); 1101 goto fail; 1102 } 1103 sc->ale_cdata.ale_tx_cmb_paddr = ctx.ale_busaddr; 1104 1105 /* Rx CMB. */ 1106 for (i = 0; i < ALE_RX_PAGES; i++) { 1107 error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].cmb_tag, 1108 (void **)&sc->ale_cdata.ale_rx_page[i].cmb_addr, 1109 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1110 &sc->ale_cdata.ale_rx_page[i].cmb_map); 1111 if (error != 0) { 1112 device_printf(sc->ale_dev, "could not allocate " 1113 "DMA'able memory for Rx page %d CMB.\n", i); 1114 goto fail; 1115 } 1116 ctx.ale_busaddr = 0; 1117 error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].cmb_tag, 1118 sc->ale_cdata.ale_rx_page[i].cmb_map, 1119 sc->ale_cdata.ale_rx_page[i].cmb_addr, 1120 ALE_RX_CMB_SZ, ale_dmamap_cb, &ctx, 0); 1121 if (error != 0 || ctx.ale_busaddr == 0) { 1122 device_printf(sc->ale_dev, "could not load DMA'able " 1123 "memory for Rx page %d CMB.\n", i); 1124 goto fail; 1125 } 1126 sc->ale_cdata.ale_rx_page[i].cmb_paddr = ctx.ale_busaddr; 1127 } 1128 1129 /* 1130 * Tx descriptors/RXF0/CMB DMA blocks share the same 1131 * high address region of 64bit DMA address space. 1132 */ 1133 if (lowaddr != BUS_SPACE_MAXADDR_32BIT && 1134 (error = ale_check_boundary(sc)) != 0) { 1135 device_printf(sc->ale_dev, "4GB boundary crossed, " 1136 "switching to 32bit DMA addressing mode.\n"); 1137 ale_dma_free(sc); 1138 /* 1139 * Limit max allowable DMA address space to 32bit 1140 * and try again. 1141 */ 1142 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1143 goto again; 1144 } 1145 1146 /* 1147 * Create Tx buffer parent tag. 1148 * AR81xx allows 64bit DMA addressing of Tx buffers so it 1149 * needs separate parent DMA tag as parent DMA address space 1150 * could be restricted to be within 32bit address space by 1151 * 4GB boundary crossing. 1152 */ 1153 error = bus_dma_tag_create( 1154 NULL, /* parent */ 1155 1, 0, /* alignment, boundary */ 1156 BUS_SPACE_MAXADDR, /* lowaddr */ 1157 BUS_SPACE_MAXADDR, /* highaddr */ 1158 NULL, NULL, /* filter, filterarg */ 1159 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1160 0, /* nsegments */ 1161 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1162 0, /* flags */ 1163 &sc->ale_cdata.ale_buffer_tag); 1164 if (error != 0) { 1165 device_printf(sc->ale_dev, 1166 "could not create parent buffer DMA tag.\n"); 1167 goto fail; 1168 } 1169 1170 /* Create DMA tag for Tx buffers. */ 1171 error = bus_dma_tag_create( 1172 sc->ale_cdata.ale_buffer_tag, /* parent */ 1173 1, 0, /* alignment, boundary */ 1174 BUS_SPACE_MAXADDR, /* lowaddr */ 1175 BUS_SPACE_MAXADDR, /* highaddr */ 1176 NULL, NULL, /* filter, filterarg */ 1177 ALE_TSO_MAXSIZE, /* maxsize */ 1178 ALE_MAXTXSEGS, /* nsegments */ 1179 ALE_TSO_MAXSEGSIZE, /* maxsegsize */ 1180 0, /* flags */ 1181 &sc->ale_cdata.ale_tx_tag); 1182 if (error != 0) { 1183 device_printf(sc->ale_dev, "could not create Tx DMA tag.\n"); 1184 goto fail; 1185 } 1186 1187 /* Create DMA maps for Tx buffers. */ 1188 for (i = 0; i < ALE_TX_RING_CNT; i++) { 1189 txd = &sc->ale_cdata.ale_txdesc[i]; 1190 txd->tx_m = NULL; 1191 txd->tx_dmamap = NULL; 1192 error = bus_dmamap_create(sc->ale_cdata.ale_tx_tag, 0, 1193 &txd->tx_dmamap); 1194 if (error != 0) { 1195 device_printf(sc->ale_dev, 1196 "could not create Tx dmamap.\n"); 1197 goto fail; 1198 } 1199 } 1200 fail: 1201 return (error); 1202 } 1203 1204 static void 1205 ale_dma_free(struct ale_softc *sc) 1206 { 1207 struct ale_txdesc *txd; 1208 int i; 1209 1210 /* Tx buffers. */ 1211 if (sc->ale_cdata.ale_tx_tag != NULL) { 1212 for (i = 0; i < ALE_TX_RING_CNT; i++) { 1213 txd = &sc->ale_cdata.ale_txdesc[i]; 1214 if (txd->tx_dmamap != NULL) { 1215 bus_dmamap_destroy(sc->ale_cdata.ale_tx_tag, 1216 txd->tx_dmamap); 1217 txd->tx_dmamap = NULL; 1218 } 1219 } 1220 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_tag); 1221 sc->ale_cdata.ale_tx_tag = NULL; 1222 } 1223 /* Tx descriptor ring. */ 1224 if (sc->ale_cdata.ale_tx_ring_tag != NULL) { 1225 if (sc->ale_cdata.ale_tx_ring_map != NULL) 1226 bus_dmamap_unload(sc->ale_cdata.ale_tx_ring_tag, 1227 sc->ale_cdata.ale_tx_ring_map); 1228 if (sc->ale_cdata.ale_tx_ring_map != NULL && 1229 sc->ale_cdata.ale_tx_ring != NULL) 1230 bus_dmamem_free(sc->ale_cdata.ale_tx_ring_tag, 1231 sc->ale_cdata.ale_tx_ring, 1232 sc->ale_cdata.ale_tx_ring_map); 1233 sc->ale_cdata.ale_tx_ring = NULL; 1234 sc->ale_cdata.ale_tx_ring_map = NULL; 1235 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_ring_tag); 1236 sc->ale_cdata.ale_tx_ring_tag = NULL; 1237 } 1238 /* Rx page block. */ 1239 for (i = 0; i < ALE_RX_PAGES; i++) { 1240 if (sc->ale_cdata.ale_rx_page[i].page_tag != NULL) { 1241 if (sc->ale_cdata.ale_rx_page[i].page_map != NULL) 1242 bus_dmamap_unload( 1243 sc->ale_cdata.ale_rx_page[i].page_tag, 1244 sc->ale_cdata.ale_rx_page[i].page_map); 1245 if (sc->ale_cdata.ale_rx_page[i].page_map != NULL && 1246 sc->ale_cdata.ale_rx_page[i].page_addr != NULL) 1247 bus_dmamem_free( 1248 sc->ale_cdata.ale_rx_page[i].page_tag, 1249 sc->ale_cdata.ale_rx_page[i].page_addr, 1250 sc->ale_cdata.ale_rx_page[i].page_map); 1251 sc->ale_cdata.ale_rx_page[i].page_addr = NULL; 1252 sc->ale_cdata.ale_rx_page[i].page_map = NULL; 1253 bus_dma_tag_destroy( 1254 sc->ale_cdata.ale_rx_page[i].page_tag); 1255 sc->ale_cdata.ale_rx_page[i].page_tag = NULL; 1256 } 1257 } 1258 /* Rx CMB. */ 1259 for (i = 0; i < ALE_RX_PAGES; i++) { 1260 if (sc->ale_cdata.ale_rx_page[i].cmb_tag != NULL) { 1261 if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL) 1262 bus_dmamap_unload( 1263 sc->ale_cdata.ale_rx_page[i].cmb_tag, 1264 sc->ale_cdata.ale_rx_page[i].cmb_map); 1265 if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL && 1266 sc->ale_cdata.ale_rx_page[i].cmb_addr != NULL) 1267 bus_dmamem_free( 1268 sc->ale_cdata.ale_rx_page[i].cmb_tag, 1269 sc->ale_cdata.ale_rx_page[i].cmb_addr, 1270 sc->ale_cdata.ale_rx_page[i].cmb_map); 1271 sc->ale_cdata.ale_rx_page[i].cmb_addr = NULL; 1272 sc->ale_cdata.ale_rx_page[i].cmb_map = NULL; 1273 bus_dma_tag_destroy( 1274 sc->ale_cdata.ale_rx_page[i].cmb_tag); 1275 sc->ale_cdata.ale_rx_page[i].cmb_tag = NULL; 1276 } 1277 } 1278 /* Tx CMB. */ 1279 if (sc->ale_cdata.ale_tx_cmb_tag != NULL) { 1280 if (sc->ale_cdata.ale_tx_cmb_map != NULL) 1281 bus_dmamap_unload(sc->ale_cdata.ale_tx_cmb_tag, 1282 sc->ale_cdata.ale_tx_cmb_map); 1283 if (sc->ale_cdata.ale_tx_cmb_map != NULL && 1284 sc->ale_cdata.ale_tx_cmb != NULL) 1285 bus_dmamem_free(sc->ale_cdata.ale_tx_cmb_tag, 1286 sc->ale_cdata.ale_tx_cmb, 1287 sc->ale_cdata.ale_tx_cmb_map); 1288 sc->ale_cdata.ale_tx_cmb = NULL; 1289 sc->ale_cdata.ale_tx_cmb_map = NULL; 1290 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_cmb_tag); 1291 sc->ale_cdata.ale_tx_cmb_tag = NULL; 1292 } 1293 if (sc->ale_cdata.ale_buffer_tag != NULL) { 1294 bus_dma_tag_destroy(sc->ale_cdata.ale_buffer_tag); 1295 sc->ale_cdata.ale_buffer_tag = NULL; 1296 } 1297 if (sc->ale_cdata.ale_parent_tag != NULL) { 1298 bus_dma_tag_destroy(sc->ale_cdata.ale_parent_tag); 1299 sc->ale_cdata.ale_parent_tag = NULL; 1300 } 1301 } 1302 1303 static int 1304 ale_shutdown(device_t dev) 1305 { 1306 return (ale_suspend(dev)); 1307 } 1308 1309 #ifdef notyet 1310 1311 /* 1312 * Note, this driver resets the link speed to 10/100Mbps by 1313 * restarting auto-negotiation in suspend/shutdown phase but we 1314 * don't know whether that auto-negotiation would succeed or not 1315 * as driver has no control after powering off/suspend operation. 1316 * If the renegotiation fail WOL may not work. Running at 1Gbps 1317 * will draw more power than 375mA at 3.3V which is specified in 1318 * PCI specification and that would result in complete 1319 * shutdowning power to ethernet controller. 1320 * 1321 * TODO 1322 * Save current negotiated media speed/duplex/flow-control to 1323 * softc and restore the same link again after resuming. PHY 1324 * handling such as power down/resetting to 100Mbps may be better 1325 * handled in suspend method in phy driver. 1326 */ 1327 static void 1328 ale_setlinkspeed(struct ale_softc *sc) 1329 { 1330 struct mii_data *mii; 1331 int aneg, i; 1332 1333 mii = device_get_softc(sc->ale_miibus); 1334 mii_pollstat(mii); 1335 aneg = 0; 1336 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1337 (IFM_ACTIVE | IFM_AVALID)) { 1338 switch IFM_SUBTYPE(mii->mii_media_active) { 1339 case IFM_10_T: 1340 case IFM_100_TX: 1341 return; 1342 case IFM_1000_T: 1343 aneg++; 1344 break; 1345 default: 1346 break; 1347 } 1348 } 1349 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, MII_100T2CR, 0); 1350 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 1351 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1352 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 1353 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 1354 DELAY(1000); 1355 if (aneg != 0) { 1356 /* 1357 * Poll link state until ale(4) get a 10/100Mbps link. 1358 */ 1359 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1360 mii_pollstat(mii); 1361 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 1362 == (IFM_ACTIVE | IFM_AVALID)) { 1363 switch (IFM_SUBTYPE( 1364 mii->mii_media_active)) { 1365 case IFM_10_T: 1366 case IFM_100_TX: 1367 ale_mac_config(sc); 1368 return; 1369 default: 1370 break; 1371 } 1372 } 1373 ALE_UNLOCK(sc); 1374 pause("alelnk", hz); 1375 ALE_LOCK(sc); 1376 } 1377 if (i == MII_ANEGTICKS_GIGE) 1378 device_printf(sc->ale_dev, 1379 "establishing a link failed, WOL may not work!"); 1380 } 1381 /* 1382 * No link, force MAC to have 100Mbps, full-duplex link. 1383 * This is the last resort and may/may not work. 1384 */ 1385 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1386 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1387 ale_mac_config(sc); 1388 } 1389 1390 static void 1391 ale_setwol(struct ale_softc *sc) 1392 { 1393 struct ifnet *ifp; 1394 uint32_t reg, pmcs; 1395 uint16_t pmstat; 1396 int pmc; 1397 1398 ALE_LOCK_ASSERT(sc); 1399 1400 if (pci_find_extcap(sc->ale_dev, PCIY_PMG, &pmc) != 0) { 1401 /* Disable WOL. */ 1402 CSR_WRITE_4(sc, ALE_WOL_CFG, 0); 1403 reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC); 1404 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1405 CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg); 1406 /* Force PHY power down. */ 1407 CSR_WRITE_2(sc, ALE_GPHY_CTRL, 1408 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | 1409 GPHY_CTRL_HIB_PULSE | GPHY_CTRL_PHY_PLL_ON | 1410 GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_IDDQ | 1411 GPHY_CTRL_PCLK_SEL_DIS | GPHY_CTRL_PWDOWN_HW); 1412 return; 1413 } 1414 1415 ifp = sc->ale_ifp; 1416 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1417 if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0) 1418 ale_setlinkspeed(sc); 1419 } 1420 1421 pmcs = 0; 1422 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 1423 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 1424 CSR_WRITE_4(sc, ALE_WOL_CFG, pmcs); 1425 reg = CSR_READ_4(sc, ALE_MAC_CFG); 1426 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 1427 MAC_CFG_BCAST); 1428 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 1429 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 1430 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1431 reg |= MAC_CFG_RX_ENB; 1432 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 1433 1434 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1435 /* WOL disabled, PHY power down. */ 1436 reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC); 1437 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1438 CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg); 1439 CSR_WRITE_2(sc, ALE_GPHY_CTRL, 1440 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | 1441 GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET | 1442 GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PCLK_SEL_DIS | 1443 GPHY_CTRL_PWDOWN_HW); 1444 } 1445 /* Request PME. */ 1446 pmstat = pci_read_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, 2); 1447 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1448 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1449 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1450 pci_write_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1451 } 1452 1453 #endif /* notyet */ 1454 1455 static int 1456 ale_suspend(device_t dev) 1457 { 1458 struct ale_softc *sc = device_get_softc(dev); 1459 struct ifnet *ifp = &sc->arpcom.ac_if; 1460 1461 lwkt_serialize_enter(ifp->if_serializer); 1462 ale_stop(sc); 1463 #ifdef notyet 1464 ale_setwol(sc); 1465 #endif 1466 lwkt_serialize_exit(ifp->if_serializer); 1467 return (0); 1468 } 1469 1470 static int 1471 ale_resume(device_t dev) 1472 { 1473 struct ale_softc *sc = device_get_softc(dev); 1474 struct ifnet *ifp = &sc->arpcom.ac_if; 1475 uint16_t cmd; 1476 1477 lwkt_serialize_enter(ifp->if_serializer); 1478 1479 /* 1480 * Clear INTx emulation disable for hardwares that 1481 * is set in resume event. From Linux. 1482 */ 1483 cmd = pci_read_config(sc->ale_dev, PCIR_COMMAND, 2); 1484 if ((cmd & 0x0400) != 0) { 1485 cmd &= ~0x0400; 1486 pci_write_config(sc->ale_dev, PCIR_COMMAND, cmd, 2); 1487 } 1488 1489 #ifdef notyet 1490 if (pci_find_extcap(sc->ale_dev, PCIY_PMG, &pmc) == 0) { 1491 uint16_t pmstat; 1492 int pmc; 1493 1494 /* Disable PME and clear PME status. */ 1495 pmstat = pci_read_config(sc->ale_dev, 1496 pmc + PCIR_POWER_STATUS, 2); 1497 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 1498 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1499 pci_write_config(sc->ale_dev, 1500 pmc + PCIR_POWER_STATUS, pmstat, 2); 1501 } 1502 } 1503 #endif 1504 1505 /* Reset PHY. */ 1506 ale_phy_reset(sc); 1507 if ((ifp->if_flags & IFF_UP) != 0) 1508 ale_init(sc); 1509 1510 lwkt_serialize_exit(ifp->if_serializer); 1511 return (0); 1512 } 1513 1514 static int 1515 ale_encap(struct ale_softc *sc, struct mbuf **m_head) 1516 { 1517 struct ale_txdesc *txd, *txd_last; 1518 struct tx_desc *desc; 1519 struct mbuf *m; 1520 bus_dma_segment_t txsegs[ALE_MAXTXSEGS]; 1521 struct ale_dmamap_ctx ctx; 1522 bus_dmamap_t map; 1523 uint32_t cflags, poff, vtag; 1524 int error, i, nsegs, prod; 1525 1526 M_ASSERTPKTHDR((*m_head)); 1527 1528 m = *m_head; 1529 cflags = vtag = 0; 1530 poff = 0; 1531 1532 prod = sc->ale_cdata.ale_tx_prod; 1533 txd = &sc->ale_cdata.ale_txdesc[prod]; 1534 txd_last = txd; 1535 map = txd->tx_dmamap; 1536 1537 ctx.nsegs = ALE_MAXTXSEGS; 1538 ctx.segs = txsegs; 1539 error = bus_dmamap_load_mbuf(sc->ale_cdata.ale_tx_tag, map, 1540 *m_head, ale_dmamap_buf_cb, &ctx, 1541 BUS_DMA_NOWAIT); 1542 if (error == EFBIG) { 1543 m = m_defrag(*m_head, M_NOWAIT); 1544 if (m == NULL) { 1545 m_freem(*m_head); 1546 *m_head = NULL; 1547 return (ENOMEM); 1548 } 1549 *m_head = m; 1550 1551 ctx.nsegs = ALE_MAXTXSEGS; 1552 ctx.segs = txsegs; 1553 error = bus_dmamap_load_mbuf(sc->ale_cdata.ale_tx_tag, map, 1554 *m_head, ale_dmamap_buf_cb, &ctx, 1555 BUS_DMA_NOWAIT); 1556 if (error != 0) { 1557 m_freem(*m_head); 1558 *m_head = NULL; 1559 return (error); 1560 } 1561 } else if (error != 0) { 1562 return (error); 1563 } 1564 nsegs = ctx.nsegs; 1565 1566 if (nsegs == 0) { 1567 m_freem(*m_head); 1568 *m_head = NULL; 1569 return (EIO); 1570 } 1571 1572 /* Check descriptor overrun. */ 1573 if (sc->ale_cdata.ale_tx_cnt + nsegs >= ALE_TX_RING_CNT - 2) { 1574 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, map); 1575 return (ENOBUFS); 1576 } 1577 bus_dmamap_sync(sc->ale_cdata.ale_tx_tag, map, BUS_DMASYNC_PREWRITE); 1578 1579 m = *m_head; 1580 /* Configure Tx checksum offload. */ 1581 if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) { 1582 /* 1583 * AR81xx supports Tx custom checksum offload feature 1584 * that offloads single 16bit checksum computation. 1585 * So you can choose one among IP, TCP and UDP. 1586 * Normally driver sets checksum start/insertion 1587 * position from the information of TCP/UDP frame as 1588 * TCP/UDP checksum takes more time than that of IP. 1589 * However it seems that custom checksum offload 1590 * requires 4 bytes aligned Tx buffers due to hardware 1591 * bug. 1592 * AR81xx also supports explicit Tx checksum computation 1593 * if it is told that the size of IP header and TCP 1594 * header(for UDP, the header size does not matter 1595 * because it's fixed length). However with this scheme 1596 * TSO does not work so you have to choose one either 1597 * TSO or explicit Tx checksum offload. I chosen TSO 1598 * plus custom checksum offload with work-around which 1599 * will cover most common usage for this consumer 1600 * ethernet controller. The work-around takes a lot of 1601 * CPU cycles if Tx buffer is not aligned on 4 bytes 1602 * boundary, though. 1603 */ 1604 cflags |= ALE_TD_CXSUM; 1605 /* Set checksum start offset. */ 1606 cflags |= (poff << ALE_TD_CSUM_PLOADOFFSET_SHIFT); 1607 /* Set checksum insertion position of TCP/UDP. */ 1608 cflags |= ((poff + m->m_pkthdr.csum_data) << 1609 ALE_TD_CSUM_XSUMOFFSET_SHIFT); 1610 } 1611 1612 /* Configure VLAN hardware tag insertion. */ 1613 if ((m->m_flags & M_VLANTAG) != 0) { 1614 vtag = ALE_TX_VLAN_TAG(m->m_pkthdr.ether_vlantag); 1615 vtag = ((vtag << ALE_TD_VLAN_SHIFT) & ALE_TD_VLAN_MASK); 1616 cflags |= ALE_TD_INSERT_VLAN_TAG; 1617 } 1618 1619 desc = NULL; 1620 for (i = 0; i < nsegs; i++) { 1621 desc = &sc->ale_cdata.ale_tx_ring[prod]; 1622 desc->addr = htole64(txsegs[i].ds_addr); 1623 desc->len = htole32(ALE_TX_BYTES(txsegs[i].ds_len) | vtag); 1624 desc->flags = htole32(cflags); 1625 sc->ale_cdata.ale_tx_cnt++; 1626 ALE_DESC_INC(prod, ALE_TX_RING_CNT); 1627 } 1628 /* Update producer index. */ 1629 sc->ale_cdata.ale_tx_prod = prod; 1630 1631 /* Finally set EOP on the last descriptor. */ 1632 prod = (prod + ALE_TX_RING_CNT - 1) % ALE_TX_RING_CNT; 1633 desc = &sc->ale_cdata.ale_tx_ring[prod]; 1634 desc->flags |= htole32(ALE_TD_EOP); 1635 1636 /* Swap dmamap of the first and the last. */ 1637 txd = &sc->ale_cdata.ale_txdesc[prod]; 1638 map = txd_last->tx_dmamap; 1639 txd_last->tx_dmamap = txd->tx_dmamap; 1640 txd->tx_dmamap = map; 1641 txd->tx_m = m; 1642 1643 /* Sync descriptors. */ 1644 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag, 1645 sc->ale_cdata.ale_tx_ring_map, BUS_DMASYNC_PREWRITE); 1646 1647 return (0); 1648 } 1649 1650 static void 1651 ale_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1652 { 1653 struct ale_softc *sc = ifp->if_softc; 1654 struct mbuf *m_head; 1655 int enq; 1656 1657 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1658 ASSERT_SERIALIZED(ifp->if_serializer); 1659 1660 if ((sc->ale_flags & ALE_FLAG_LINK) == 0) { 1661 ifq_purge(&ifp->if_snd); 1662 return; 1663 } 1664 1665 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1666 return; 1667 1668 /* Reclaim transmitted frames. */ 1669 if (sc->ale_cdata.ale_tx_cnt >= ALE_TX_DESC_HIWAT) 1670 ale_txeof(sc); 1671 1672 enq = 0; 1673 while (!ifq_is_empty(&ifp->if_snd)) { 1674 m_head = ifq_dequeue(&ifp->if_snd); 1675 if (m_head == NULL) 1676 break; 1677 1678 /* 1679 * Pack the data into the transmit ring. If we 1680 * don't have room, set the OACTIVE flag and wait 1681 * for the NIC to drain the ring. 1682 */ 1683 if (ale_encap(sc, &m_head)) { 1684 if (m_head == NULL) 1685 break; 1686 ifq_prepend(&ifp->if_snd, m_head); 1687 ifq_set_oactive(&ifp->if_snd); 1688 break; 1689 } 1690 enq = 1; 1691 1692 /* 1693 * If there's a BPF listener, bounce a copy of this frame 1694 * to him. 1695 */ 1696 ETHER_BPF_MTAP(ifp, m_head); 1697 } 1698 1699 if (enq) { 1700 /* Kick. */ 1701 CSR_WRITE_4(sc, ALE_MBOX_TPD_PROD_IDX, 1702 sc->ale_cdata.ale_tx_prod); 1703 1704 /* Set a timeout in case the chip goes out to lunch. */ 1705 ifp->if_timer = ALE_TX_TIMEOUT; 1706 } 1707 } 1708 1709 static void 1710 ale_watchdog(struct ifnet *ifp) 1711 { 1712 struct ale_softc *sc = ifp->if_softc; 1713 1714 ASSERT_SERIALIZED(ifp->if_serializer); 1715 1716 if ((sc->ale_flags & ALE_FLAG_LINK) == 0) { 1717 if_printf(ifp, "watchdog timeout (lost link)\n"); 1718 IFNET_STAT_INC(ifp, oerrors, 1); 1719 ale_init(sc); 1720 return; 1721 } 1722 1723 if_printf(ifp, "watchdog timeout -- resetting\n"); 1724 IFNET_STAT_INC(ifp, oerrors, 1); 1725 ale_init(sc); 1726 1727 if (!ifq_is_empty(&ifp->if_snd)) 1728 if_devstart(ifp); 1729 } 1730 1731 static int 1732 ale_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1733 { 1734 struct ale_softc *sc; 1735 struct ifreq *ifr; 1736 struct mii_data *mii; 1737 int error, mask; 1738 1739 ASSERT_SERIALIZED(ifp->if_serializer); 1740 1741 sc = ifp->if_softc; 1742 ifr = (struct ifreq *)data; 1743 error = 0; 1744 1745 switch (cmd) { 1746 case SIOCSIFMTU: 1747 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ALE_JUMBO_MTU || 1748 ((sc->ale_flags & ALE_FLAG_JUMBO) == 0 && 1749 ifr->ifr_mtu > ETHERMTU)) 1750 error = EINVAL; 1751 else if (ifp->if_mtu != ifr->ifr_mtu) { 1752 ifp->if_mtu = ifr->ifr_mtu; 1753 if ((ifp->if_flags & IFF_RUNNING) != 0) 1754 ale_init(sc); 1755 } 1756 break; 1757 1758 case SIOCSIFFLAGS: 1759 if ((ifp->if_flags & IFF_UP) != 0) { 1760 if ((ifp->if_flags & IFF_RUNNING) != 0) { 1761 if (((ifp->if_flags ^ sc->ale_if_flags) 1762 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1763 ale_rxfilter(sc); 1764 } else { 1765 if ((sc->ale_flags & ALE_FLAG_DETACH) == 0) 1766 ale_init(sc); 1767 } 1768 } else { 1769 if ((ifp->if_flags & IFF_RUNNING) != 0) 1770 ale_stop(sc); 1771 } 1772 sc->ale_if_flags = ifp->if_flags; 1773 break; 1774 1775 case SIOCADDMULTI: 1776 case SIOCDELMULTI: 1777 if ((ifp->if_flags & IFF_RUNNING) != 0) 1778 ale_rxfilter(sc); 1779 break; 1780 1781 case SIOCSIFMEDIA: 1782 case SIOCGIFMEDIA: 1783 mii = device_get_softc(sc->ale_miibus); 1784 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1785 break; 1786 1787 case SIOCSIFCAP: 1788 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1789 if ((mask & IFCAP_TXCSUM) != 0 && 1790 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1791 ifp->if_capenable ^= IFCAP_TXCSUM; 1792 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1793 ifp->if_hwassist |= ALE_CSUM_FEATURES; 1794 else 1795 ifp->if_hwassist &= ~ALE_CSUM_FEATURES; 1796 } 1797 if ((mask & IFCAP_RXCSUM) != 0 && 1798 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 1799 ifp->if_capenable ^= IFCAP_RXCSUM; 1800 1801 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1802 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1803 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1804 ale_rxvlan(sc); 1805 } 1806 break; 1807 1808 default: 1809 error = ether_ioctl(ifp, cmd, data); 1810 break; 1811 } 1812 return (error); 1813 } 1814 1815 static void 1816 ale_mac_config(struct ale_softc *sc) 1817 { 1818 struct mii_data *mii; 1819 uint32_t reg; 1820 1821 mii = device_get_softc(sc->ale_miibus); 1822 reg = CSR_READ_4(sc, ALE_MAC_CFG); 1823 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 1824 MAC_CFG_SPEED_MASK); 1825 /* Reprogram MAC with resolved speed/duplex. */ 1826 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1827 case IFM_10_T: 1828 case IFM_100_TX: 1829 reg |= MAC_CFG_SPEED_10_100; 1830 break; 1831 case IFM_1000_T: 1832 reg |= MAC_CFG_SPEED_1000; 1833 break; 1834 } 1835 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1836 reg |= MAC_CFG_FULL_DUPLEX; 1837 #ifdef notyet 1838 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1839 reg |= MAC_CFG_TX_FC; 1840 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1841 reg |= MAC_CFG_RX_FC; 1842 #endif 1843 } 1844 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 1845 } 1846 1847 static void 1848 ale_stats_clear(struct ale_softc *sc) 1849 { 1850 struct smb sb; 1851 uint32_t *reg; 1852 int i; 1853 1854 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { 1855 CSR_READ_4(sc, ALE_RX_MIB_BASE + i); 1856 i += sizeof(uint32_t); 1857 } 1858 /* Read Tx statistics. */ 1859 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { 1860 CSR_READ_4(sc, ALE_TX_MIB_BASE + i); 1861 i += sizeof(uint32_t); 1862 } 1863 } 1864 1865 static void 1866 ale_stats_update(struct ale_softc *sc) 1867 { 1868 struct ale_hw_stats *stat; 1869 struct smb sb, *smb; 1870 struct ifnet *ifp; 1871 uint32_t *reg; 1872 int i; 1873 1874 ifp = &sc->arpcom.ac_if; 1875 stat = &sc->ale_stats; 1876 smb = &sb; 1877 1878 /* Read Rx statistics. */ 1879 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { 1880 *reg = CSR_READ_4(sc, ALE_RX_MIB_BASE + i); 1881 i += sizeof(uint32_t); 1882 } 1883 /* Read Tx statistics. */ 1884 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { 1885 *reg = CSR_READ_4(sc, ALE_TX_MIB_BASE + i); 1886 i += sizeof(uint32_t); 1887 } 1888 1889 /* Rx stats. */ 1890 stat->rx_frames += smb->rx_frames; 1891 stat->rx_bcast_frames += smb->rx_bcast_frames; 1892 stat->rx_mcast_frames += smb->rx_mcast_frames; 1893 stat->rx_pause_frames += smb->rx_pause_frames; 1894 stat->rx_control_frames += smb->rx_control_frames; 1895 stat->rx_crcerrs += smb->rx_crcerrs; 1896 stat->rx_lenerrs += smb->rx_lenerrs; 1897 stat->rx_bytes += smb->rx_bytes; 1898 stat->rx_runts += smb->rx_runts; 1899 stat->rx_fragments += smb->rx_fragments; 1900 stat->rx_pkts_64 += smb->rx_pkts_64; 1901 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1902 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 1903 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 1904 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 1905 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 1906 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 1907 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 1908 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 1909 stat->rx_rrs_errs += smb->rx_rrs_errs; 1910 stat->rx_alignerrs += smb->rx_alignerrs; 1911 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 1912 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 1913 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 1914 1915 /* Tx stats. */ 1916 stat->tx_frames += smb->tx_frames; 1917 stat->tx_bcast_frames += smb->tx_bcast_frames; 1918 stat->tx_mcast_frames += smb->tx_mcast_frames; 1919 stat->tx_pause_frames += smb->tx_pause_frames; 1920 stat->tx_excess_defer += smb->tx_excess_defer; 1921 stat->tx_control_frames += smb->tx_control_frames; 1922 stat->tx_deferred += smb->tx_deferred; 1923 stat->tx_bytes += smb->tx_bytes; 1924 stat->tx_pkts_64 += smb->tx_pkts_64; 1925 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 1926 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 1927 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 1928 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 1929 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 1930 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 1931 stat->tx_single_colls += smb->tx_single_colls; 1932 stat->tx_multi_colls += smb->tx_multi_colls; 1933 stat->tx_late_colls += smb->tx_late_colls; 1934 stat->tx_excess_colls += smb->tx_excess_colls; 1935 stat->tx_abort += smb->tx_abort; 1936 stat->tx_underrun += smb->tx_underrun; 1937 stat->tx_desc_underrun += smb->tx_desc_underrun; 1938 stat->tx_lenerrs += smb->tx_lenerrs; 1939 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 1940 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 1941 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 1942 1943 /* Update counters in ifnet. */ 1944 IFNET_STAT_INC(ifp, opackets, smb->tx_frames); 1945 1946 IFNET_STAT_INC(ifp, collisions, smb->tx_single_colls + 1947 smb->tx_multi_colls * 2 + smb->tx_late_colls + 1948 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT); 1949 1950 /* 1951 * XXX 1952 * tx_pkts_truncated counter looks suspicious. It constantly 1953 * increments with no sign of Tx errors. This may indicate 1954 * the counter name is not correct one so I've removed the 1955 * counter in output errors. 1956 */ 1957 IFNET_STAT_INC(ifp, oerrors, smb->tx_abort + smb->tx_late_colls + 1958 smb->tx_underrun); 1959 1960 IFNET_STAT_INC(ifp, ipackets, smb->rx_frames); 1961 1962 IFNET_STAT_INC(ifp, ierrors, smb->rx_crcerrs + smb->rx_lenerrs + 1963 smb->rx_runts + smb->rx_pkts_truncated + 1964 smb->rx_fifo_oflows + smb->rx_rrs_errs + 1965 smb->rx_alignerrs); 1966 } 1967 1968 static void 1969 ale_intr(void *xsc) 1970 { 1971 struct ale_softc *sc = xsc; 1972 struct ifnet *ifp = &sc->arpcom.ac_if; 1973 uint32_t status; 1974 1975 ASSERT_SERIALIZED(ifp->if_serializer); 1976 1977 status = CSR_READ_4(sc, ALE_INTR_STATUS); 1978 if ((status & ALE_INTRS) == 0) 1979 return; 1980 1981 /* Acknowledge and disable interrupts. */ 1982 CSR_WRITE_4(sc, ALE_INTR_STATUS, status | INTR_DIS_INT); 1983 1984 if ((ifp->if_flags & IFF_RUNNING) != 0) { 1985 int error; 1986 1987 error = ale_rxeof(sc); 1988 if (error) { 1989 sc->ale_stats.reset_brk_seq++; 1990 ale_init(sc); 1991 return; 1992 } 1993 1994 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) { 1995 if ((status & INTR_DMA_RD_TO_RST) != 0) 1996 device_printf(sc->ale_dev, 1997 "DMA read error! -- resetting\n"); 1998 if ((status & INTR_DMA_WR_TO_RST) != 0) 1999 device_printf(sc->ale_dev, 2000 "DMA write error! -- resetting\n"); 2001 ale_init(sc); 2002 return; 2003 } 2004 2005 ale_txeof(sc); 2006 if (!ifq_is_empty(&ifp->if_snd)) 2007 if_devstart(ifp); 2008 } 2009 2010 /* Re-enable interrupts. */ 2011 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0x7FFFFFFF); 2012 } 2013 2014 static void 2015 ale_txeof(struct ale_softc *sc) 2016 { 2017 struct ifnet *ifp = &sc->arpcom.ac_if; 2018 struct ale_txdesc *txd; 2019 uint32_t cons, prod; 2020 int prog; 2021 2022 if (sc->ale_cdata.ale_tx_cnt == 0) 2023 return; 2024 2025 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag, 2026 sc->ale_cdata.ale_tx_ring_map, BUS_DMASYNC_POSTREAD); 2027 if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) { 2028 bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag, 2029 sc->ale_cdata.ale_tx_cmb_map, BUS_DMASYNC_POSTREAD); 2030 prod = *sc->ale_cdata.ale_tx_cmb & TPD_CNT_MASK; 2031 } else 2032 prod = CSR_READ_2(sc, ALE_TPD_CONS_IDX); 2033 cons = sc->ale_cdata.ale_tx_cons; 2034 /* 2035 * Go through our Tx list and free mbufs for those 2036 * frames which have been transmitted. 2037 */ 2038 for (prog = 0; cons != prod; prog++, 2039 ALE_DESC_INC(cons, ALE_TX_RING_CNT)) { 2040 if (sc->ale_cdata.ale_tx_cnt <= 0) 2041 break; 2042 prog++; 2043 ifq_clr_oactive(&ifp->if_snd); 2044 sc->ale_cdata.ale_tx_cnt--; 2045 txd = &sc->ale_cdata.ale_txdesc[cons]; 2046 if (txd->tx_m != NULL) { 2047 /* Reclaim transmitted mbufs. */ 2048 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, 2049 txd->tx_dmamap); 2050 m_freem(txd->tx_m); 2051 txd->tx_m = NULL; 2052 } 2053 } 2054 2055 if (prog > 0) { 2056 sc->ale_cdata.ale_tx_cons = cons; 2057 /* 2058 * Unarm watchdog timer only when there is no pending 2059 * Tx descriptors in queue. 2060 */ 2061 if (sc->ale_cdata.ale_tx_cnt == 0) 2062 ifp->if_timer = 0; 2063 } 2064 } 2065 2066 static void 2067 ale_rx_update_page(struct ale_softc *sc, struct ale_rx_page **page, 2068 uint32_t length, uint32_t *prod) 2069 { 2070 struct ale_rx_page *rx_page; 2071 2072 rx_page = *page; 2073 /* Update consumer position. */ 2074 rx_page->cons += roundup(length + sizeof(struct rx_rs), 2075 ALE_RX_PAGE_ALIGN); 2076 if (rx_page->cons >= ALE_RX_PAGE_SZ) { 2077 /* 2078 * End of Rx page reached, let hardware reuse 2079 * this page. 2080 */ 2081 rx_page->cons = 0; 2082 *rx_page->cmb_addr = 0; 2083 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, 2084 BUS_DMASYNC_PREWRITE); 2085 CSR_WRITE_1(sc, ALE_RXF0_PAGE0 + sc->ale_cdata.ale_rx_curp, 2086 RXF_VALID); 2087 /* Switch to alternate Rx page. */ 2088 sc->ale_cdata.ale_rx_curp ^= 1; 2089 rx_page = *page = 2090 &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp]; 2091 /* Page flipped, sync CMB and Rx page. */ 2092 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map, 2093 BUS_DMASYNC_POSTREAD); 2094 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, 2095 BUS_DMASYNC_POSTREAD); 2096 /* Sync completed, cache updated producer index. */ 2097 *prod = *rx_page->cmb_addr; 2098 } 2099 } 2100 2101 2102 /* 2103 * It seems that AR81xx controller can compute partial checksum. 2104 * The partial checksum value can be used to accelerate checksum 2105 * computation for fragmented TCP/UDP packets. Upper network stack 2106 * already takes advantage of the partial checksum value in IP 2107 * reassembly stage. But I'm not sure the correctness of the 2108 * partial hardware checksum assistance due to lack of data sheet. 2109 * In addition, the Rx feature of controller that requires copying 2110 * for every frames effectively nullifies one of most nice offload 2111 * capability of controller. 2112 */ 2113 static void 2114 ale_rxcsum(struct ale_softc *sc, struct mbuf *m, uint32_t status) 2115 { 2116 struct ifnet *ifp = &sc->arpcom.ac_if; 2117 struct ip *ip; 2118 char *p; 2119 2120 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2121 if ((status & ALE_RD_IPCSUM_NOK) == 0) 2122 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2123 2124 if ((sc->ale_flags & ALE_FLAG_RXCSUM_BUG) == 0) { 2125 if (((status & ALE_RD_IPV4_FRAG) == 0) && 2126 ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) && 2127 ((status & ALE_RD_TCP_UDPCSUM_NOK) == 0)) { 2128 m->m_pkthdr.csum_flags |= 2129 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2130 m->m_pkthdr.csum_data = 0xffff; 2131 } 2132 } else { 2133 if ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0 && 2134 (status & ALE_RD_TCP_UDPCSUM_NOK) == 0) { 2135 p = mtod(m, char *); 2136 p += ETHER_HDR_LEN; 2137 if ((status & ALE_RD_802_3) != 0) 2138 p += LLC_SNAPFRAMELEN; 2139 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0 && 2140 (status & ALE_RD_VLAN) != 0) 2141 p += EVL_ENCAPLEN; 2142 ip = (struct ip *)p; 2143 if (ip->ip_off != 0 && (status & ALE_RD_IPV4_DF) == 0) 2144 return; 2145 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 2146 CSUM_PSEUDO_HDR; 2147 m->m_pkthdr.csum_data = 0xffff; 2148 } 2149 } 2150 /* 2151 * Don't mark bad checksum for TCP/UDP frames 2152 * as fragmented frames may always have set 2153 * bad checksummed bit of frame status. 2154 */ 2155 } 2156 2157 /* Process received frames. */ 2158 static int 2159 ale_rxeof(struct ale_softc *sc) 2160 { 2161 struct ifnet *ifp = &sc->arpcom.ac_if; 2162 struct ale_rx_page *rx_page; 2163 struct rx_rs *rs; 2164 struct mbuf *m; 2165 uint32_t length, prod, seqno, status, vtags; 2166 int prog; 2167 2168 rx_page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp]; 2169 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, 2170 BUS_DMASYNC_POSTREAD); 2171 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map, 2172 BUS_DMASYNC_POSTREAD); 2173 /* 2174 * Don't directly access producer index as hardware may 2175 * update it while Rx handler is in progress. It would 2176 * be even better if there is a way to let hardware 2177 * know how far driver processed its received frames. 2178 * Alternatively, hardware could provide a way to disable 2179 * CMB updates until driver acknowledges the end of CMB 2180 * access. 2181 */ 2182 prod = *rx_page->cmb_addr; 2183 for (prog = 0; ; prog++) { 2184 if (rx_page->cons >= prod) 2185 break; 2186 rs = (struct rx_rs *)(rx_page->page_addr + rx_page->cons); 2187 seqno = ALE_RX_SEQNO(le32toh(rs->seqno)); 2188 if (sc->ale_cdata.ale_rx_seqno != seqno) { 2189 /* 2190 * Normally I believe this should not happen unless 2191 * severe driver bug or corrupted memory. However 2192 * it seems to happen under certain conditions which 2193 * is triggered by abrupt Rx events such as initiation 2194 * of bulk transfer of remote host. It's not easy to 2195 * reproduce this and I doubt it could be related 2196 * with FIFO overflow of hardware or activity of Tx 2197 * CMB updates. I also remember similar behaviour 2198 * seen on RealTek 8139 which uses resembling Rx 2199 * scheme. 2200 */ 2201 if (bootverbose) 2202 device_printf(sc->ale_dev, 2203 "garbled seq: %u, expected: %u -- " 2204 "resetting!\n", seqno, 2205 sc->ale_cdata.ale_rx_seqno); 2206 return (EIO); 2207 } 2208 /* Frame received. */ 2209 sc->ale_cdata.ale_rx_seqno++; 2210 length = ALE_RX_BYTES(le32toh(rs->length)); 2211 status = le32toh(rs->flags); 2212 if ((status & ALE_RD_ERROR) != 0) { 2213 /* 2214 * We want to pass the following frames to upper 2215 * layer regardless of error status of Rx return 2216 * status. 2217 * 2218 * o IP/TCP/UDP checksum is bad. 2219 * o frame length and protocol specific length 2220 * does not match. 2221 */ 2222 if ((status & (ALE_RD_CRC | ALE_RD_CODE | 2223 ALE_RD_DRIBBLE | ALE_RD_RUNT | ALE_RD_OFLOW | 2224 ALE_RD_TRUNC)) != 0) { 2225 ale_rx_update_page(sc, &rx_page, length, &prod); 2226 continue; 2227 } 2228 } 2229 /* 2230 * m_devget(9) is major bottle-neck of ale(4) (It comes 2231 * from hardware limitation). For jumbo frames we could 2232 * get a slightly better performance if driver use 2233 * m_getjcl(9) with proper buffer size argument. However 2234 * that would make code more complicated and I don't 2235 * think users would expect good Rx performance numbers 2236 * on these low-end consumer ethernet controller. 2237 */ 2238 m = m_devget((char *)(rs + 1), length - ETHER_CRC_LEN, 2239 ETHER_ALIGN, ifp); 2240 if (m == NULL) { 2241 IFNET_STAT_INC(ifp, iqdrops, 1); 2242 ale_rx_update_page(sc, &rx_page, length, &prod); 2243 continue; 2244 } 2245 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 2246 (status & ALE_RD_IPV4) != 0) 2247 ale_rxcsum(sc, m, status); 2248 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 2249 (status & ALE_RD_VLAN) != 0) { 2250 vtags = ALE_RX_VLAN(le32toh(rs->vtags)); 2251 m->m_pkthdr.ether_vlantag = ALE_RX_VLAN_TAG(vtags); 2252 m->m_flags |= M_VLANTAG; 2253 } 2254 2255 /* Pass it to upper layer. */ 2256 ifp->if_input(ifp, m, NULL, -1); 2257 2258 ale_rx_update_page(sc, &rx_page, length, &prod); 2259 } 2260 return 0; 2261 } 2262 2263 static void 2264 ale_tick(void *xsc) 2265 { 2266 struct ale_softc *sc = xsc; 2267 struct ifnet *ifp = &sc->arpcom.ac_if; 2268 struct mii_data *mii; 2269 2270 lwkt_serialize_enter(ifp->if_serializer); 2271 2272 mii = device_get_softc(sc->ale_miibus); 2273 mii_tick(mii); 2274 ale_stats_update(sc); 2275 2276 callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc); 2277 2278 lwkt_serialize_exit(ifp->if_serializer); 2279 } 2280 2281 static void 2282 ale_reset(struct ale_softc *sc) 2283 { 2284 uint32_t reg; 2285 int i; 2286 2287 /* Initialize PCIe module. From Linux. */ 2288 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 2289 2290 CSR_WRITE_4(sc, ALE_MASTER_CFG, MASTER_RESET); 2291 for (i = ALE_RESET_TIMEOUT; i > 0; i--) { 2292 DELAY(10); 2293 if ((CSR_READ_4(sc, ALE_MASTER_CFG) & MASTER_RESET) == 0) 2294 break; 2295 } 2296 if (i == 0) 2297 device_printf(sc->ale_dev, "master reset timeout!\n"); 2298 2299 for (i = ALE_RESET_TIMEOUT; i > 0; i--) { 2300 if ((reg = CSR_READ_4(sc, ALE_IDLE_STATUS)) == 0) 2301 break; 2302 DELAY(10); 2303 } 2304 2305 if (i == 0) 2306 device_printf(sc->ale_dev, "reset timeout(0x%08x)!\n", reg); 2307 } 2308 2309 static void 2310 ale_init(void *xsc) 2311 { 2312 struct ale_softc *sc = xsc; 2313 struct ifnet *ifp = &sc->arpcom.ac_if; 2314 struct mii_data *mii; 2315 uint8_t eaddr[ETHER_ADDR_LEN]; 2316 bus_addr_t paddr; 2317 uint32_t reg, rxf_hi, rxf_lo; 2318 2319 ASSERT_SERIALIZED(ifp->if_serializer); 2320 2321 mii = device_get_softc(sc->ale_miibus); 2322 2323 /* 2324 * Cancel any pending I/O. 2325 */ 2326 ale_stop(sc); 2327 2328 /* 2329 * Reset the chip to a known state. 2330 */ 2331 ale_reset(sc); 2332 2333 /* Initialize Tx descriptors, DMA memory blocks. */ 2334 ale_init_rx_pages(sc); 2335 ale_init_tx_ring(sc); 2336 2337 /* Reprogram the station address. */ 2338 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2339 CSR_WRITE_4(sc, ALE_PAR0, 2340 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2341 CSR_WRITE_4(sc, ALE_PAR1, eaddr[0] << 8 | eaddr[1]); 2342 2343 /* 2344 * Clear WOL status and disable all WOL feature as WOL 2345 * would interfere Rx operation under normal environments. 2346 */ 2347 CSR_READ_4(sc, ALE_WOL_CFG); 2348 CSR_WRITE_4(sc, ALE_WOL_CFG, 0); 2349 2350 /* 2351 * Set Tx descriptor/RXF0/CMB base addresses. They share 2352 * the same high address part of DMAable region. 2353 */ 2354 paddr = sc->ale_cdata.ale_tx_ring_paddr; 2355 CSR_WRITE_4(sc, ALE_TPD_ADDR_HI, ALE_ADDR_HI(paddr)); 2356 CSR_WRITE_4(sc, ALE_TPD_ADDR_LO, ALE_ADDR_LO(paddr)); 2357 CSR_WRITE_4(sc, ALE_TPD_CNT, 2358 (ALE_TX_RING_CNT << TPD_CNT_SHIFT) & TPD_CNT_MASK); 2359 2360 /* Set Rx page base address, note we use single queue. */ 2361 paddr = sc->ale_cdata.ale_rx_page[0].page_paddr; 2362 CSR_WRITE_4(sc, ALE_RXF0_PAGE0_ADDR_LO, ALE_ADDR_LO(paddr)); 2363 paddr = sc->ale_cdata.ale_rx_page[1].page_paddr; 2364 CSR_WRITE_4(sc, ALE_RXF0_PAGE1_ADDR_LO, ALE_ADDR_LO(paddr)); 2365 2366 /* Set Tx/Rx CMB addresses. */ 2367 paddr = sc->ale_cdata.ale_tx_cmb_paddr; 2368 CSR_WRITE_4(sc, ALE_TX_CMB_ADDR_LO, ALE_ADDR_LO(paddr)); 2369 paddr = sc->ale_cdata.ale_rx_page[0].cmb_paddr; 2370 CSR_WRITE_4(sc, ALE_RXF0_CMB0_ADDR_LO, ALE_ADDR_LO(paddr)); 2371 paddr = sc->ale_cdata.ale_rx_page[1].cmb_paddr; 2372 CSR_WRITE_4(sc, ALE_RXF0_CMB1_ADDR_LO, ALE_ADDR_LO(paddr)); 2373 2374 /* Mark RXF0 is valid. */ 2375 CSR_WRITE_1(sc, ALE_RXF0_PAGE0, RXF_VALID); 2376 CSR_WRITE_1(sc, ALE_RXF0_PAGE1, RXF_VALID); 2377 /* 2378 * No need to initialize RFX1/RXF2/RXF3. We don't use 2379 * multi-queue yet. 2380 */ 2381 2382 /* Set Rx page size, excluding guard frame size. */ 2383 CSR_WRITE_4(sc, ALE_RXF_PAGE_SIZE, ALE_RX_PAGE_SZ); 2384 2385 /* Tell hardware that we're ready to load DMA blocks. */ 2386 CSR_WRITE_4(sc, ALE_DMA_BLOCK, DMA_BLOCK_LOAD); 2387 2388 /* Set Rx/Tx interrupt trigger threshold. */ 2389 CSR_WRITE_4(sc, ALE_INT_TRIG_THRESH, (1 << INT_TRIG_RX_THRESH_SHIFT) | 2390 (4 << INT_TRIG_TX_THRESH_SHIFT)); 2391 /* 2392 * XXX 2393 * Set interrupt trigger timer, its purpose and relation 2394 * with interrupt moderation mechanism is not clear yet. 2395 */ 2396 CSR_WRITE_4(sc, ALE_INT_TRIG_TIMER, 2397 ((ALE_USECS(10) << INT_TRIG_RX_TIMER_SHIFT) | 2398 (ALE_USECS(1000) << INT_TRIG_TX_TIMER_SHIFT))); 2399 2400 /* Configure interrupt moderation timer. */ 2401 reg = ALE_USECS(sc->ale_int_rx_mod) << IM_TIMER_RX_SHIFT; 2402 reg |= ALE_USECS(sc->ale_int_tx_mod) << IM_TIMER_TX_SHIFT; 2403 CSR_WRITE_4(sc, ALE_IM_TIMER, reg); 2404 reg = CSR_READ_4(sc, ALE_MASTER_CFG); 2405 reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK); 2406 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); 2407 if (ALE_USECS(sc->ale_int_rx_mod) != 0) 2408 reg |= MASTER_IM_RX_TIMER_ENB; 2409 if (ALE_USECS(sc->ale_int_tx_mod) != 0) 2410 reg |= MASTER_IM_TX_TIMER_ENB; 2411 CSR_WRITE_4(sc, ALE_MASTER_CFG, reg); 2412 CSR_WRITE_2(sc, ALE_INTR_CLR_TIMER, ALE_USECS(1000)); 2413 2414 /* Set Maximum frame size of controller. */ 2415 if (ifp->if_mtu < ETHERMTU) 2416 sc->ale_max_frame_size = ETHERMTU; 2417 else 2418 sc->ale_max_frame_size = ifp->if_mtu; 2419 sc->ale_max_frame_size += ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN; 2420 CSR_WRITE_4(sc, ALE_FRAME_SIZE, sc->ale_max_frame_size); 2421 2422 /* Configure IPG/IFG parameters. */ 2423 CSR_WRITE_4(sc, ALE_IPG_IFG_CFG, 2424 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) | 2425 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 2426 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 2427 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK)); 2428 2429 /* Set parameters for half-duplex media. */ 2430 CSR_WRITE_4(sc, ALE_HDPX_CFG, 2431 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 2432 HDPX_CFG_LCOL_MASK) | 2433 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 2434 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 2435 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 2436 HDPX_CFG_ABEBT_MASK) | 2437 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 2438 HDPX_CFG_JAMIPG_MASK)); 2439 2440 /* Configure Tx jumbo frame parameters. */ 2441 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) { 2442 if (ifp->if_mtu < ETHERMTU) 2443 reg = sc->ale_max_frame_size; 2444 else if (ifp->if_mtu < 6 * 1024) 2445 reg = (sc->ale_max_frame_size * 2) / 3; 2446 else 2447 reg = sc->ale_max_frame_size / 2; 2448 CSR_WRITE_4(sc, ALE_TX_JUMBO_THRESH, 2449 roundup(reg, TX_JUMBO_THRESH_UNIT) >> 2450 TX_JUMBO_THRESH_UNIT_SHIFT); 2451 } 2452 2453 /* Configure TxQ. */ 2454 reg = (128 << (sc->ale_dma_rd_burst >> DMA_CFG_RD_BURST_SHIFT)) 2455 << TXQ_CFG_TX_FIFO_BURST_SHIFT; 2456 reg |= (TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 2457 TXQ_CFG_TPD_BURST_MASK; 2458 CSR_WRITE_4(sc, ALE_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE | TXQ_CFG_ENB); 2459 2460 /* Configure Rx jumbo frame & flow control parameters. */ 2461 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) { 2462 reg = roundup(sc->ale_max_frame_size, RX_JUMBO_THRESH_UNIT); 2463 CSR_WRITE_4(sc, ALE_RX_JUMBO_THRESH, 2464 (((reg >> RX_JUMBO_THRESH_UNIT_SHIFT) << 2465 RX_JUMBO_THRESH_MASK_SHIFT) & RX_JUMBO_THRESH_MASK) | 2466 ((RX_JUMBO_LKAH_DEFAULT << RX_JUMBO_LKAH_SHIFT) & 2467 RX_JUMBO_LKAH_MASK)); 2468 reg = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN); 2469 rxf_hi = (reg * 7) / 10; 2470 rxf_lo = (reg * 3)/ 10; 2471 CSR_WRITE_4(sc, ALE_RX_FIFO_PAUSE_THRESH, 2472 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 2473 RX_FIFO_PAUSE_THRESH_LO_MASK) | 2474 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 2475 RX_FIFO_PAUSE_THRESH_HI_MASK)); 2476 } 2477 2478 /* Disable RSS. */ 2479 CSR_WRITE_4(sc, ALE_RSS_IDT_TABLE0, 0); 2480 CSR_WRITE_4(sc, ALE_RSS_CPU, 0); 2481 2482 /* Configure RxQ. */ 2483 CSR_WRITE_4(sc, ALE_RXQ_CFG, 2484 RXQ_CFG_ALIGN_32 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 2485 2486 /* Configure DMA parameters. */ 2487 reg = 0; 2488 if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) 2489 reg |= DMA_CFG_TXCMB_ENB; 2490 CSR_WRITE_4(sc, ALE_DMA_CFG, 2491 DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI | DMA_CFG_RCB_64 | 2492 sc->ale_dma_rd_burst | reg | 2493 sc->ale_dma_wr_burst | DMA_CFG_RXCMB_ENB | 2494 ((DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 2495 DMA_CFG_RD_DELAY_CNT_MASK) | 2496 ((DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 2497 DMA_CFG_WR_DELAY_CNT_MASK)); 2498 2499 /* 2500 * Hardware can be configured to issue SMB interrupt based 2501 * on programmed interval. Since there is a callout that is 2502 * invoked for every hz in driver we use that instead of 2503 * relying on periodic SMB interrupt. 2504 */ 2505 CSR_WRITE_4(sc, ALE_SMB_STAT_TIMER, ALE_USECS(0)); 2506 2507 /* Clear MAC statistics. */ 2508 ale_stats_clear(sc); 2509 2510 /* 2511 * Configure Tx/Rx MACs. 2512 * - Auto-padding for short frames. 2513 * - Enable CRC generation. 2514 * Actual reconfiguration of MAC for resolved speed/duplex 2515 * is followed after detection of link establishment. 2516 * AR81xx always does checksum computation regardless of 2517 * MAC_CFG_RXCSUM_ENB bit. In fact, setting the bit will 2518 * cause Rx handling issue for fragmented IP datagrams due 2519 * to silicon bug. 2520 */ 2521 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 2522 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 2523 MAC_CFG_PREAMBLE_MASK); 2524 if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0) 2525 reg |= MAC_CFG_SPEED_10_100; 2526 else 2527 reg |= MAC_CFG_SPEED_1000; 2528 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 2529 2530 /* Set up the receive filter. */ 2531 ale_rxfilter(sc); 2532 ale_rxvlan(sc); 2533 2534 /* Acknowledge all pending interrupts and clear it. */ 2535 CSR_WRITE_4(sc, ALE_INTR_MASK, ALE_INTRS); 2536 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); 2537 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0); 2538 2539 sc->ale_flags &= ~ALE_FLAG_LINK; 2540 2541 /* Switch to the current media. */ 2542 mii_mediachg(mii); 2543 2544 callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc); 2545 2546 ifp->if_flags |= IFF_RUNNING; 2547 ifq_clr_oactive(&ifp->if_snd); 2548 } 2549 2550 static void 2551 ale_stop(struct ale_softc *sc) 2552 { 2553 struct ifnet *ifp = &sc->arpcom.ac_if; 2554 struct ale_txdesc *txd; 2555 uint32_t reg; 2556 int i; 2557 2558 ASSERT_SERIALIZED(ifp->if_serializer); 2559 2560 /* 2561 * Mark the interface down and cancel the watchdog timer. 2562 */ 2563 ifp->if_flags &= ~IFF_RUNNING; 2564 ifq_clr_oactive(&ifp->if_snd); 2565 ifp->if_timer = 0; 2566 2567 callout_stop(&sc->ale_tick_ch); 2568 sc->ale_flags &= ~ALE_FLAG_LINK; 2569 2570 ale_stats_update(sc); 2571 2572 /* Disable interrupts. */ 2573 CSR_WRITE_4(sc, ALE_INTR_MASK, 0); 2574 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); 2575 2576 /* Disable queue processing and DMA. */ 2577 reg = CSR_READ_4(sc, ALE_TXQ_CFG); 2578 reg &= ~TXQ_CFG_ENB; 2579 CSR_WRITE_4(sc, ALE_TXQ_CFG, reg); 2580 reg = CSR_READ_4(sc, ALE_RXQ_CFG); 2581 reg &= ~RXQ_CFG_ENB; 2582 CSR_WRITE_4(sc, ALE_RXQ_CFG, reg); 2583 reg = CSR_READ_4(sc, ALE_DMA_CFG); 2584 reg &= ~(DMA_CFG_TXCMB_ENB | DMA_CFG_RXCMB_ENB); 2585 CSR_WRITE_4(sc, ALE_DMA_CFG, reg); 2586 DELAY(1000); 2587 2588 /* Stop Rx/Tx MACs. */ 2589 ale_stop_mac(sc); 2590 2591 /* Disable interrupts again? XXX */ 2592 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); 2593 2594 /* 2595 * Free TX mbufs still in the queues. 2596 */ 2597 for (i = 0; i < ALE_TX_RING_CNT; i++) { 2598 txd = &sc->ale_cdata.ale_txdesc[i]; 2599 if (txd->tx_m != NULL) { 2600 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, 2601 txd->tx_dmamap); 2602 m_freem(txd->tx_m); 2603 txd->tx_m = NULL; 2604 } 2605 } 2606 } 2607 2608 static void 2609 ale_stop_mac(struct ale_softc *sc) 2610 { 2611 uint32_t reg; 2612 int i; 2613 2614 reg = CSR_READ_4(sc, ALE_MAC_CFG); 2615 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 2616 reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 2617 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 2618 } 2619 2620 for (i = ALE_TIMEOUT; i > 0; i--) { 2621 reg = CSR_READ_4(sc, ALE_IDLE_STATUS); 2622 if (reg == 0) 2623 break; 2624 DELAY(10); 2625 } 2626 if (i == 0) 2627 device_printf(sc->ale_dev, 2628 "could not disable Tx/Rx MAC(0x%08x)!\n", reg); 2629 } 2630 2631 static void 2632 ale_init_tx_ring(struct ale_softc *sc) 2633 { 2634 struct ale_txdesc *txd; 2635 int i; 2636 2637 sc->ale_cdata.ale_tx_prod = 0; 2638 sc->ale_cdata.ale_tx_cons = 0; 2639 sc->ale_cdata.ale_tx_cnt = 0; 2640 2641 bzero(sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ); 2642 bzero(sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ); 2643 for (i = 0; i < ALE_TX_RING_CNT; i++) { 2644 txd = &sc->ale_cdata.ale_txdesc[i]; 2645 txd->tx_m = NULL; 2646 } 2647 *sc->ale_cdata.ale_tx_cmb = 0; 2648 bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag, 2649 sc->ale_cdata.ale_tx_cmb_map, 2650 BUS_DMASYNC_PREWRITE); 2651 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag, 2652 sc->ale_cdata.ale_tx_ring_map, 2653 BUS_DMASYNC_PREWRITE); 2654 } 2655 2656 static void 2657 ale_init_rx_pages(struct ale_softc *sc) 2658 { 2659 struct ale_rx_page *rx_page; 2660 int i; 2661 2662 sc->ale_cdata.ale_rx_seqno = 0; 2663 sc->ale_cdata.ale_rx_curp = 0; 2664 2665 for (i = 0; i < ALE_RX_PAGES; i++) { 2666 rx_page = &sc->ale_cdata.ale_rx_page[i]; 2667 bzero(rx_page->page_addr, sc->ale_pagesize); 2668 bzero(rx_page->cmb_addr, ALE_RX_CMB_SZ); 2669 rx_page->cons = 0; 2670 *rx_page->cmb_addr = 0; 2671 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map, 2672 BUS_DMASYNC_PREWRITE); 2673 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, 2674 BUS_DMASYNC_PREWRITE); 2675 } 2676 } 2677 2678 static void 2679 ale_rxvlan(struct ale_softc *sc) 2680 { 2681 struct ifnet *ifp; 2682 uint32_t reg; 2683 2684 ifp = &sc->arpcom.ac_if; 2685 reg = CSR_READ_4(sc, ALE_MAC_CFG); 2686 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2687 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2688 reg |= MAC_CFG_VLAN_TAG_STRIP; 2689 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 2690 } 2691 2692 static void 2693 ale_rxfilter(struct ale_softc *sc) 2694 { 2695 struct ifnet *ifp; 2696 struct ifmultiaddr *ifma; 2697 uint32_t crc; 2698 uint32_t mchash[2]; 2699 uint32_t rxcfg; 2700 2701 ifp = &sc->arpcom.ac_if; 2702 2703 rxcfg = CSR_READ_4(sc, ALE_MAC_CFG); 2704 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2705 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2706 rxcfg |= MAC_CFG_BCAST; 2707 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2708 if ((ifp->if_flags & IFF_PROMISC) != 0) 2709 rxcfg |= MAC_CFG_PROMISC; 2710 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2711 rxcfg |= MAC_CFG_ALLMULTI; 2712 CSR_WRITE_4(sc, ALE_MAR0, 0xFFFFFFFF); 2713 CSR_WRITE_4(sc, ALE_MAR1, 0xFFFFFFFF); 2714 CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg); 2715 return; 2716 } 2717 2718 /* Program new filter. */ 2719 bzero(mchash, sizeof(mchash)); 2720 2721 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2722 if (ifma->ifma_addr->sa_family != AF_LINK) 2723 continue; 2724 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2725 ifma->ifma_addr), ETHER_ADDR_LEN); 2726 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2727 } 2728 2729 CSR_WRITE_4(sc, ALE_MAR0, mchash[0]); 2730 CSR_WRITE_4(sc, ALE_MAR1, mchash[1]); 2731 CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg); 2732 } 2733 2734 static int 2735 sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS) 2736 { 2737 return (sysctl_int_range(oidp, arg1, arg2, req, 2738 ALE_IM_TIMER_MIN, ALE_IM_TIMER_MAX)); 2739 } 2740 2741 static void 2742 ale_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs, 2743 bus_size_t mapsz __unused, int error) 2744 { 2745 struct ale_dmamap_ctx *ctx = xctx; 2746 int i; 2747 2748 if (error) 2749 return; 2750 2751 if (nsegs > ctx->nsegs) { 2752 ctx->nsegs = 0; 2753 return; 2754 } 2755 2756 ctx->nsegs = nsegs; 2757 for (i = 0; i < nsegs; ++i) 2758 ctx->segs[i] = segs[i]; 2759 } 2760