1 /*- 2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/alc/if_alc.c,v 1.6 2009/09/29 23:03:16 yongari Exp $ 28 * $DragonFly$ 29 */ 30 31 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */ 32 33 #include <sys/cdefs.h> 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/bus.h> 38 #include <sys/endian.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/module.h> 44 #include <sys/spinlock.h> 45 #include <sys/rman.h> 46 #include <sys/queue.h> 47 #include <sys/socket.h> 48 #include <sys/sockio.h> 49 #include <sys/sysctl.h> 50 #include <sys/taskqueue.h> 51 52 #include <net/bpf.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/ethernet.h> 56 #include <net/if_dl.h> 57 #include <net/if_llc.h> 58 #include <net/if_media.h> 59 #include <net/if_types.h> 60 #include <net/ifq_var.h> 61 #include <net/vlan/if_vlan_var.h> 62 #include <net/vlan/if_vlan_ether.h> 63 64 #include <netinet/in.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/ip.h> 67 #include <netinet/tcp.h> 68 69 #include <dev/netif/mii_layer/mii.h> 70 #include <dev/netif/mii_layer/miivar.h> 71 72 #include <bus/pci/pcireg.h> 73 #include <bus/pci/pcivar.h> 74 75 #include <machine/atomic.h> 76 /* 77 XXX 78 #include <machine/bus.h> 79 #include <machine/in_cksum.h> 80 */ 81 82 #include "if_alcreg.h" 83 #include "if_alcvar.h" 84 85 /* "device miibus" required. See GENERIC if you get errors here. */ 86 #include "miibus_if.h" 87 #undef ALC_USE_CUSTOM_CSUM 88 89 #ifdef ALC_USE_CUSTOM_CSUM 90 #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 91 #else 92 #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 93 #endif 94 #ifndef IFCAP_VLAN_HWTSO 95 #define IFCAP_VLAN_HWTSO 0 96 #endif 97 98 MODULE_DEPEND(alc, pci, 1, 1, 1); 99 MODULE_DEPEND(alc, ether, 1, 1, 1); 100 MODULE_DEPEND(alc, miibus, 1, 1, 1); 101 102 /* Tunables. */ 103 static int msi_disable = 0; 104 static int msix_disable = 0; 105 TUNABLE_INT("hw.alc.msi_disable", &msi_disable); 106 TUNABLE_INT("hw.alc.msix_disable", &msix_disable); 107 108 /* 109 * Devices supported by this driver. 110 */ 111 static struct alc_dev { 112 uint16_t alc_vendorid; 113 uint16_t alc_deviceid; 114 const char *alc_name; 115 } alc_devs[] = { 116 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 117 "Atheros AR8131 PCIe Gigabit Ethernet" }, 118 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 119 "Atheros AR8132 PCIe Fast Ethernet" } 120 }; 121 122 static void alc_aspm(struct alc_softc *); 123 static int alc_attach(device_t); 124 static int alc_check_boundary(struct alc_softc *); 125 static int alc_detach(device_t); 126 static void alc_disable_l0s_l1(struct alc_softc *); 127 static int alc_dma_alloc(struct alc_softc *); 128 static void alc_dma_free(struct alc_softc *); 129 static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int); 130 static int alc_encap(struct alc_softc *, struct mbuf **); 131 #ifndef __NO_STRICT_ALIGNMENT 132 static struct mbuf * 133 alc_fixup_rx(struct ifnet *, struct mbuf *); 134 #endif 135 static void alc_get_macaddr(struct alc_softc *); 136 static void alc_init(void *); 137 static void alc_init_cmb(struct alc_softc *); 138 static void alc_init_locked(struct alc_softc *); 139 static void alc_init_rr_ring(struct alc_softc *); 140 static int alc_init_rx_ring(struct alc_softc *); 141 static void alc_init_smb(struct alc_softc *); 142 static void alc_init_tx_ring(struct alc_softc *); 143 static void alc_int_task(void *, int); 144 static void alc_intr(void *); 145 static int alc_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 146 static void alc_mac_config(struct alc_softc *); 147 static int alc_miibus_readreg(device_t, int, int); 148 static void alc_miibus_statchg(device_t); 149 static int alc_miibus_writereg(device_t, int, int, int); 150 static int alc_mediachange(struct ifnet *); 151 static void alc_mediastatus(struct ifnet *, struct ifmediareq *); 152 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *); 153 static void alc_phy_down(struct alc_softc *); 154 static void alc_phy_reset(struct alc_softc *); 155 static int alc_probe(device_t); 156 static void alc_reset(struct alc_softc *); 157 static int alc_resume(device_t); 158 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *); 159 static int alc_rxintr(struct alc_softc *, int); 160 static void alc_rxfilter(struct alc_softc *); 161 static void alc_rxvlan(struct alc_softc *); 162 #if 0 163 static void alc_setlinkspeed(struct alc_softc *); 164 /* XXX: WOL */ 165 static void alc_setwol(struct alc_softc *); 166 #endif 167 static int alc_shutdown(device_t); 168 static void alc_start(struct ifnet *); 169 static void alc_start_queue(struct alc_softc *); 170 static void alc_stats_clear(struct alc_softc *); 171 static void alc_stats_update(struct alc_softc *); 172 static void alc_stop(struct alc_softc *); 173 static void alc_stop_mac(struct alc_softc *); 174 static void alc_stop_queue(struct alc_softc *); 175 static int alc_suspend(device_t); 176 static void alc_sysctl_node(struct alc_softc *); 177 static void alc_tick(void *); 178 static void alc_tx_task(void *, int); 179 static void alc_txeof(struct alc_softc *); 180 static void alc_watchdog(struct alc_softc *); 181 static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS); 182 static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS); 183 184 static device_method_t alc_methods[] = { 185 /* Device interface. */ 186 DEVMETHOD(device_probe, alc_probe), 187 DEVMETHOD(device_attach, alc_attach), 188 DEVMETHOD(device_detach, alc_detach), 189 DEVMETHOD(device_shutdown, alc_shutdown), 190 DEVMETHOD(device_suspend, alc_suspend), 191 DEVMETHOD(device_resume, alc_resume), 192 193 /* MII interface. */ 194 DEVMETHOD(miibus_readreg, alc_miibus_readreg), 195 DEVMETHOD(miibus_writereg, alc_miibus_writereg), 196 DEVMETHOD(miibus_statchg, alc_miibus_statchg), 197 198 { NULL, NULL } 199 }; 200 201 static driver_t alc_driver = { 202 "alc", 203 alc_methods, 204 sizeof(struct alc_softc) 205 }; 206 207 static devclass_t alc_devclass; 208 209 DRIVER_MODULE(alc, pci, alc_driver, alc_devclass, 0, 0); 210 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, 0, 0); 211 212 static struct resource_spec alc_res_spec_mem[] = { 213 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 214 { -1, 0, 0 } 215 }; 216 217 static struct resource_spec alc_irq_spec_legacy[] = { 218 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 219 { -1, 0, 0 } 220 }; 221 222 static struct resource_spec alc_irq_spec_msi[] = { 223 { SYS_RES_IRQ, 1, RF_ACTIVE }, 224 { -1, 0, 0 } 225 }; 226 227 static struct resource_spec alc_irq_spec_msix[] = { 228 { SYS_RES_IRQ, 1, RF_ACTIVE }, 229 { -1, 0, 0 } 230 }; 231 232 static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 }; 233 234 static int 235 alc_miibus_readreg(device_t dev, int phy, int reg) 236 { 237 struct alc_softc *sc; 238 uint32_t v; 239 int i; 240 241 sc = device_get_softc(dev); 242 243 if (phy != sc->alc_phyaddr) 244 return (0); 245 246 /* 247 * For AR8132 fast ethernet controller, do not report 1000baseT 248 * capability to mii(4). Even though AR8132 uses the same 249 * model/revision number of F1 gigabit PHY, the PHY has no 250 * ability to establish 1000baseT link. 251 */ 252 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && 253 reg == MII_EXTSR) 254 return (0); 255 256 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 257 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 258 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 259 DELAY(5); 260 v = CSR_READ_4(sc, ALC_MDIO); 261 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 262 break; 263 } 264 265 if (i == 0) { 266 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); 267 return (0); 268 } 269 270 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 271 } 272 273 static int 274 alc_miibus_writereg(device_t dev, int phy, int reg, int val) 275 { 276 struct alc_softc *sc; 277 uint32_t v; 278 int i; 279 280 sc = device_get_softc(dev); 281 282 if (phy != sc->alc_phyaddr) 283 return (0); 284 285 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 286 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 287 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 288 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 289 DELAY(5); 290 v = CSR_READ_4(sc, ALC_MDIO); 291 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 292 break; 293 } 294 295 if (i == 0) 296 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); 297 298 return (0); 299 } 300 301 static void 302 alc_miibus_statchg(device_t dev) 303 { 304 struct alc_softc *sc; 305 struct mii_data *mii; 306 struct ifnet *ifp; 307 uint32_t reg; 308 309 sc = device_get_softc(dev); 310 311 mii = device_get_softc(sc->alc_miibus); 312 ifp = sc->alc_ifp; 313 if (mii == NULL || ifp == NULL || 314 (ifp->if_flags & IFF_RUNNING) == 0) 315 return; 316 317 sc->alc_flags &= ~ALC_FLAG_LINK; 318 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 319 (IFM_ACTIVE | IFM_AVALID)) { 320 switch (IFM_SUBTYPE(mii->mii_media_active)) { 321 case IFM_10_T: 322 case IFM_100_TX: 323 sc->alc_flags |= ALC_FLAG_LINK; 324 break; 325 case IFM_1000_T: 326 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 327 sc->alc_flags |= ALC_FLAG_LINK; 328 break; 329 default: 330 break; 331 } 332 } 333 alc_stop_queue(sc); 334 /* Stop Rx/Tx MACs. */ 335 alc_stop_mac(sc); 336 337 /* Program MACs with resolved speed/duplex/flow-control. */ 338 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 339 alc_start_queue(sc); 340 alc_mac_config(sc); 341 /* Re-enable Tx/Rx MACs. */ 342 reg = CSR_READ_4(sc, ALC_MAC_CFG); 343 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 344 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 345 } 346 alc_aspm(sc); 347 } 348 349 static void 350 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 351 { 352 struct alc_softc *sc; 353 struct mii_data *mii; 354 355 sc = ifp->if_softc; 356 ALC_LOCK(sc); 357 if ((ifp->if_flags & IFF_UP) == 0) { 358 ALC_UNLOCK(sc); 359 return; 360 } 361 mii = device_get_softc(sc->alc_miibus); 362 363 mii_pollstat(mii); 364 ALC_UNLOCK(sc); 365 ifmr->ifm_status = mii->mii_media_status; 366 ifmr->ifm_active = mii->mii_media_active; 367 } 368 369 static int 370 alc_mediachange(struct ifnet *ifp) 371 { 372 struct alc_softc *sc; 373 struct mii_data *mii; 374 struct mii_softc *miisc; 375 int error; 376 377 sc = ifp->if_softc; 378 ALC_LOCK(sc); 379 mii = device_get_softc(sc->alc_miibus); 380 if (mii->mii_instance != 0) { 381 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 382 mii_phy_reset(miisc); 383 } 384 error = mii_mediachg(mii); 385 ALC_UNLOCK(sc); 386 387 return (error); 388 } 389 390 static int 391 alc_probe(device_t dev) 392 { 393 struct alc_dev *sp; 394 int i; 395 uint16_t vendor, devid; 396 397 vendor = pci_get_vendor(dev); 398 devid = pci_get_device(dev); 399 sp = alc_devs; 400 for (i = 0; i < sizeof(alc_devs) / sizeof(alc_devs[0]); i++) { 401 if (vendor == sp->alc_vendorid && 402 devid == sp->alc_deviceid) { 403 device_set_desc(dev, sp->alc_name); 404 return (BUS_PROBE_DEFAULT); 405 } 406 sp++; 407 } 408 409 return (ENXIO); 410 } 411 412 static void 413 alc_get_macaddr(struct alc_softc *sc) 414 { 415 uint32_t ea[2], opt; 416 int i; 417 418 opt = CSR_READ_4(sc, ALC_OPT_CFG); 419 if ((CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { 420 /* 421 * EEPROM found, let TWSI reload EEPROM configuration. 422 * This will set ethernet address of controller. 423 */ 424 if ((opt & OPT_CFG_CLK_ENB) == 0) { 425 opt |= OPT_CFG_CLK_ENB; 426 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 427 CSR_READ_4(sc, ALC_OPT_CFG); 428 DELAY(1000); 429 } 430 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | 431 TWSI_CFG_SW_LD_START); 432 for (i = 100; i > 0; i--) { 433 DELAY(1000); 434 if ((CSR_READ_4(sc, ALC_TWSI_CFG) & 435 TWSI_CFG_SW_LD_START) == 0) 436 break; 437 } 438 if (i == 0) 439 device_printf(sc->alc_dev, 440 "reloading EEPROM timeout!\n"); 441 } else { 442 if (bootverbose) 443 device_printf(sc->alc_dev, "EEPROM not found!\n"); 444 } 445 if ((opt & OPT_CFG_CLK_ENB) != 0) { 446 opt &= ~OPT_CFG_CLK_ENB; 447 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 448 CSR_READ_4(sc, ALC_OPT_CFG); 449 DELAY(1000); 450 } 451 452 ea[0] = CSR_READ_4(sc, ALC_PAR0); 453 ea[1] = CSR_READ_4(sc, ALC_PAR1); 454 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; 455 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; 456 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; 457 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; 458 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; 459 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; 460 } 461 462 static void 463 alc_disable_l0s_l1(struct alc_softc *sc) 464 { 465 uint32_t pmcfg; 466 467 /* Another magic from vendor. */ 468 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 469 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | 470 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK | 471 PM_CFG_SERDES_PD_EX_L1); 472 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | 473 PM_CFG_SERDES_L1_ENB; 474 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 475 } 476 477 static void 478 alc_phy_reset(struct alc_softc *sc) 479 { 480 uint16_t data; 481 482 /* Reset magic from Linux. */ 483 CSR_WRITE_2(sc, ALC_GPHY_CFG, 484 GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET); 485 CSR_READ_2(sc, ALC_GPHY_CFG); 486 DELAY(10 * 1000); 487 488 CSR_WRITE_2(sc, ALC_GPHY_CFG, 489 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 490 GPHY_CFG_SEL_ANA_RESET); 491 CSR_READ_2(sc, ALC_GPHY_CFG); 492 DELAY(10 * 1000); 493 494 /* Load DSP codes, vendor magic. */ 495 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | 496 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); 497 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 498 ALC_MII_DBG_ADDR, MII_ANA_CFG18); 499 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 500 ALC_MII_DBG_DATA, data); 501 502 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | 503 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | 504 ANA_SERDES_EN_LCKDT; 505 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 506 ALC_MII_DBG_ADDR, MII_ANA_CFG5); 507 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 508 ALC_MII_DBG_DATA, data); 509 510 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & 511 ANA_LONG_CABLE_TH_100_MASK) | 512 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & 513 ANA_SHORT_CABLE_TH_100_SHIFT) | 514 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; 515 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 516 ALC_MII_DBG_ADDR, MII_ANA_CFG54); 517 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 518 ALC_MII_DBG_DATA, data); 519 520 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | 521 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | 522 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | 523 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); 524 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 525 ALC_MII_DBG_ADDR, MII_ANA_CFG4); 526 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 527 ALC_MII_DBG_DATA, data); 528 529 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | 530 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | 531 ANA_OEN_125M; 532 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 533 ALC_MII_DBG_ADDR, MII_ANA_CFG0); 534 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 535 ALC_MII_DBG_DATA, data); 536 DELAY(1000); 537 } 538 539 static void 540 alc_phy_down(struct alc_softc *sc) 541 { 542 543 /* Force PHY down. */ 544 CSR_WRITE_2(sc, ALC_GPHY_CFG, 545 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 546 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW); 547 DELAY(1000); 548 } 549 550 static void 551 alc_aspm(struct alc_softc *sc) 552 { 553 uint32_t pmcfg; 554 555 ALC_LOCK_ASSERT(sc); 556 557 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 558 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; 559 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB; 560 pmcfg |= PM_CFG_SERDES_L1_ENB; 561 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; 562 pmcfg |= PM_CFG_MAC_ASPM_CHK; 563 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 564 pmcfg |= PM_CFG_SERDES_PLL_L1_ENB; 565 pmcfg &= ~PM_CFG_CLK_SWH_L1; 566 pmcfg &= ~PM_CFG_ASPM_L1_ENB; 567 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 568 } else { 569 pmcfg &= ~PM_CFG_SERDES_PLL_L1_ENB; 570 pmcfg |= PM_CFG_CLK_SWH_L1; 571 pmcfg &= ~PM_CFG_ASPM_L1_ENB; 572 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 573 } 574 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 575 } 576 577 static int 578 alc_attach(device_t dev) 579 { 580 struct alc_softc *sc; 581 struct ifnet *ifp; 582 char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/l1" }; 583 uint16_t burst; 584 int base, error, i, msic, msixc, state; 585 uint32_t cap, ctl, val; 586 587 error = 0; 588 sc = device_get_softc(dev); 589 sc->alc_dev = dev; 590 591 lockinit(&sc->alc_lock, "alc_lock", 0, LK_CANRECURSE); 592 callout_init_mp(&sc->alc_tick_ch); 593 TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc); 594 595 /* Map the device. */ 596 pci_enable_busmaster(dev); 597 sc->alc_res_spec = alc_res_spec_mem; 598 sc->alc_irq_spec = alc_irq_spec_legacy; 599 error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res); 600 if (error != 0) { 601 device_printf(dev, "cannot allocate memory resources.\n"); 602 goto fail; 603 } 604 605 /* Set PHY address. */ 606 sc->alc_phyaddr = ALC_PHY_ADDR; 607 608 /* Initialize DMA parameters. */ 609 sc->alc_dma_rd_burst = 0; 610 sc->alc_dma_wr_burst = 0; 611 sc->alc_rcb = DMA_CFG_RCB_64; 612 if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) { 613 sc->alc_flags |= ALC_FLAG_PCIE; 614 burst = CSR_READ_2(sc, base + PCIR_EXPRESS_DEVICE_CTL); 615 sc->alc_dma_rd_burst = 616 (burst & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12; 617 sc->alc_dma_wr_burst = (burst & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5; 618 if (bootverbose) { 619 device_printf(dev, "Read request size : %u bytes.\n", 620 alc_dma_burst[sc->alc_dma_rd_burst]); 621 device_printf(dev, "TLP payload size : %u bytes.\n", 622 alc_dma_burst[sc->alc_dma_wr_burst]); 623 } 624 /* Clear data link and flow-control protocol error. */ 625 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); 626 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); 627 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); 628 /* Disable ASPM L0S and L1. */ 629 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP); 630 if ((cap & PCIM_LINK_CAP_ASPM) != 0) { 631 ctl = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL); 632 if ((ctl & 0x08) != 0) 633 sc->alc_rcb = DMA_CFG_RCB_128; 634 if (bootverbose) 635 device_printf(dev, "RCB %u bytes\n", 636 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); 637 state = ctl & 0x03; 638 if (bootverbose) 639 device_printf(sc->alc_dev, "ASPM %s %s\n", 640 aspm_state[state], 641 state == 0 ? "disabled" : "enabled"); 642 if (state != 0) 643 alc_disable_l0s_l1(sc); 644 } 645 } 646 647 /* Reset PHY. */ 648 alc_phy_reset(sc); 649 650 /* Reset the ethernet controller. */ 651 alc_reset(sc); 652 653 /* 654 * One odd thing is AR8132 uses the same PHY hardware(F1 655 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports 656 * the PHY supports 1000Mbps but that's not true. The PHY 657 * used in AR8132 can't establish gigabit link even if it 658 * shows the same PHY model/revision number of AR8131. 659 */ 660 if (pci_get_device(dev) == DEVICEID_ATHEROS_AR8132) 661 sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_JUMBO; 662 else 663 sc->alc_flags |= ALC_FLAG_JUMBO | ALC_FLAG_ASPM_MON; 664 /* 665 * It seems that AR8131/AR8132 has silicon bug for SMB. In 666 * addition, Atheros said that enabling SMB wouldn't improve 667 * performance. However I think it's bad to access lots of 668 * registers to extract MAC statistics. 669 */ 670 sc->alc_flags |= ALC_FLAG_SMB_BUG; 671 /* 672 * Don't use Tx CMB. It is known to have silicon bug. 673 */ 674 sc->alc_flags |= ALC_FLAG_CMB_BUG; 675 sc->alc_rev = pci_get_revid(dev); 676 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> 677 MASTER_CHIP_REV_SHIFT; 678 if (bootverbose) { 679 device_printf(dev, "PCI device revision : 0x%04x\n", 680 sc->alc_rev); 681 device_printf(dev, "Chip id/revision : 0x%04x\n", 682 sc->alc_chip_rev); 683 } 684 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", 685 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, 686 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); 687 688 /* Allocate IRQ resources. */ 689 msixc = pci_msix_count(dev); 690 msic = pci_msi_count(dev); 691 if (bootverbose) { 692 device_printf(dev, "MSIX count : %d\n", msixc); 693 device_printf(dev, "MSI count : %d\n", msic); 694 } 695 /* Prefer MSIX over MSI. */ 696 if (msix_disable == 0 || msi_disable == 0) { 697 if (msix_disable == 0 && msixc == ALC_MSIX_MESSAGES && 698 pci_alloc_msix(dev, &msixc) == 0) { 699 if (msic == ALC_MSIX_MESSAGES) { 700 device_printf(dev, 701 "Using %d MSIX message(s).\n", msixc); 702 sc->alc_flags |= ALC_FLAG_MSIX; 703 sc->alc_irq_spec = alc_irq_spec_msix; 704 } else 705 pci_release_msi(dev); 706 } 707 if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 && 708 msic == ALC_MSI_MESSAGES && 709 pci_alloc_msi(dev, &msic) == 0) { 710 if (msic == ALC_MSI_MESSAGES) { 711 device_printf(dev, 712 "Using %d MSI message(s).\n", msic); 713 sc->alc_flags |= ALC_FLAG_MSI; 714 sc->alc_irq_spec = alc_irq_spec_msi; 715 } else 716 pci_release_msi(dev); 717 } 718 } 719 720 error = bus_alloc_resources(dev, sc->alc_irq_spec, sc->alc_irq); 721 if (error != 0) { 722 device_printf(dev, "cannot allocate IRQ resources.\n"); 723 goto fail; 724 } 725 726 /* Create device sysctl node. */ 727 alc_sysctl_node(sc); 728 729 if ((error = alc_dma_alloc(sc) != 0)) 730 goto fail; 731 732 /* Load station address. */ 733 alc_get_macaddr(sc); 734 735 ifp = sc->alc_ifp = &sc->arpcom.ac_if; 736 ifp->if_softc = sc; 737 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 738 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 739 ifp->if_ioctl = alc_ioctl; 740 ifp->if_start = alc_start; 741 ifp->if_init = alc_init; 742 ifp->if_snd.ifq_maxlen = ALC_TX_RING_CNT - 1; 743 ifq_set_maxlen(&ifp->if_snd, ifp->if_snd.ifq_maxlen); 744 ifq_set_ready(&ifp->if_snd); 745 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4; 746 ifp->if_hwassist = ALC_CSUM_FEATURES | CSUM_TSO; 747 #if 0 748 /* XXX: WOL */ 749 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 750 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST; 751 #endif 752 ifp->if_capenable = ifp->if_capabilities; 753 754 /* Set up MII bus. */ 755 if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange, 756 alc_mediastatus)) != 0) { 757 device_printf(dev, "no PHY found!\n"); 758 goto fail; 759 } 760 761 ether_ifattach(ifp, sc->alc_eaddr, NULL); 762 763 /* VLAN capability setup. */ 764 ifp->if_capabilities |= IFCAP_VLAN_MTU; 765 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 766 ifp->if_capenable = ifp->if_capabilities; 767 /* 768 * XXX 769 * It seems enabling Tx checksum offloading makes more trouble. 770 * Sometimes the controller does not receive any frames when 771 * Tx checksum offloading is enabled. I'm not sure whether this 772 * is a bug in Tx checksum offloading logic or I got broken 773 * sample boards. To safety, don't enable Tx checksum offloading 774 * by default but give chance to users to toggle it if they know 775 * their controllers work without problems. 776 */ 777 ifp->if_capenable &= ~IFCAP_TXCSUM; 778 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 779 780 /* Tell the upper layer(s) we support long frames. */ 781 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 782 783 /* Create local taskq. */ 784 TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp); 785 sc->alc_tq = taskqueue_create("alc_taskq", M_WAITOK, 786 taskqueue_thread_enqueue, &sc->alc_tq); 787 if (sc->alc_tq == NULL) { 788 device_printf(dev, "could not create taskqueue.\n"); 789 ether_ifdetach(ifp); 790 error = ENXIO; 791 goto fail; 792 } 793 taskqueue_start_threads(&sc->alc_tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq", 794 device_get_nameunit(sc->alc_dev)); 795 796 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) 797 msic = ALC_MSIX_MESSAGES; 798 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 799 msic = ALC_MSI_MESSAGES; 800 else 801 msic = 1; 802 for (i = 0; i < msic; i++) { 803 error = bus_setup_intr(dev, sc->alc_irq[i], INTR_MPSAFE, 804 alc_intr, sc, 805 &sc->alc_intrhand[i], NULL); 806 if (error != 0) 807 break; 808 } 809 if (error != 0) { 810 device_printf(dev, "could not set up interrupt handler.\n"); 811 taskqueue_free(sc->alc_tq); 812 sc->alc_tq = NULL; 813 ether_ifdetach(ifp); 814 goto fail; 815 } 816 817 fail: 818 if (error != 0) 819 alc_detach(dev); 820 821 return (error); 822 } 823 824 static int 825 alc_detach(device_t dev) 826 { 827 struct alc_softc *sc; 828 struct ifnet *ifp; 829 int i, msic; 830 831 sc = device_get_softc(dev); 832 833 ifp = sc->alc_ifp; 834 if (device_is_attached(dev)) { 835 ALC_LOCK(sc); 836 sc->alc_flags |= ALC_FLAG_DETACH; 837 alc_stop(sc); 838 ALC_UNLOCK(sc); 839 #if 0 840 /* XXX */ 841 callout_drain(&sc->alc_tick_ch); 842 #endif 843 taskqueue_drain(sc->alc_tq, &sc->alc_int_task); 844 taskqueue_drain(sc->alc_tq, &sc->alc_tx_task); 845 ether_ifdetach(ifp); 846 } 847 848 if (sc->alc_tq != NULL) { 849 taskqueue_drain(sc->alc_tq, &sc->alc_int_task); 850 taskqueue_free(sc->alc_tq); 851 sc->alc_tq = NULL; 852 } 853 854 if (sc->alc_miibus != NULL) { 855 device_delete_child(dev, sc->alc_miibus); 856 sc->alc_miibus = NULL; 857 } 858 bus_generic_detach(dev); 859 alc_dma_free(sc); 860 861 if (ifp != NULL) { 862 // XXX? if_free(ifp); 863 sc->alc_ifp = NULL; 864 } 865 866 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) 867 msic = ALC_MSIX_MESSAGES; 868 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 869 msic = ALC_MSI_MESSAGES; 870 else 871 msic = 1; 872 for (i = 0; i < msic; i++) { 873 if (sc->alc_intrhand[i] != NULL) { 874 bus_teardown_intr(dev, sc->alc_irq[i], 875 sc->alc_intrhand[i]); 876 sc->alc_intrhand[i] = NULL; 877 } 878 } 879 if (sc->alc_res[0] != NULL) 880 alc_phy_down(sc); 881 bus_release_resources(dev, sc->alc_irq_spec, sc->alc_irq); 882 if ((sc->alc_flags & (ALC_FLAG_MSI | ALC_FLAG_MSIX)) != 0) 883 pci_release_msi(dev); 884 bus_release_resources(dev, sc->alc_res_spec, sc->alc_res); 885 lockuninit(&sc->alc_lock); 886 887 return (0); 888 } 889 890 #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 891 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 892 #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 893 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 894 895 static void 896 alc_sysctl_node(struct alc_softc *sc) 897 { 898 struct sysctl_ctx_list *ctx; 899 struct sysctl_oid *tree; 900 struct sysctl_oid_list *child, *parent; 901 struct alc_hw_stats *stats; 902 int error; 903 904 stats = &sc->alc_stats; 905 ctx = &sc->alc_sysctl_ctx; 906 sysctl_ctx_init(ctx); 907 908 tree = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw), 909 OID_AUTO, 910 device_get_nameunit(sc->alc_dev), 911 CTLFLAG_RD, 0, ""); 912 if (tree == NULL) { 913 device_printf(sc->alc_dev, "can't add sysctl node\n"); 914 return; 915 } 916 child = SYSCTL_CHILDREN(tree); 917 918 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 919 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0, 920 sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation"); 921 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 922 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0, 923 sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation"); 924 /* Pull in device tunables. */ 925 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 926 error = resource_int_value(device_get_name(sc->alc_dev), 927 device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod); 928 if (error == 0) { 929 if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN || 930 sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) { 931 device_printf(sc->alc_dev, "int_rx_mod value out of " 932 "range; using default: %d\n", 933 ALC_IM_RX_TIMER_DEFAULT); 934 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 935 } 936 } 937 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 938 error = resource_int_value(device_get_name(sc->alc_dev), 939 device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod); 940 if (error == 0) { 941 if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN || 942 sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) { 943 device_printf(sc->alc_dev, "int_tx_mod value out of " 944 "range; using default: %d\n", 945 ALC_IM_TX_TIMER_DEFAULT); 946 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 947 } 948 } 949 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", 950 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0, 951 sysctl_hw_alc_proc_limit, "I", 952 "max number of Rx events to process"); 953 /* Pull in device tunables. */ 954 sc->alc_process_limit = ALC_PROC_DEFAULT; 955 error = resource_int_value(device_get_name(sc->alc_dev), 956 device_get_unit(sc->alc_dev), "process_limit", 957 &sc->alc_process_limit); 958 if (error == 0) { 959 if (sc->alc_process_limit < ALC_PROC_MIN || 960 sc->alc_process_limit > ALC_PROC_MAX) { 961 device_printf(sc->alc_dev, 962 "process_limit value out of range; " 963 "using default: %d\n", ALC_PROC_DEFAULT); 964 sc->alc_process_limit = ALC_PROC_DEFAULT; 965 } 966 } 967 968 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 969 NULL, "ALC statistics"); 970 parent = SYSCTL_CHILDREN(tree); 971 972 /* Rx statistics. */ 973 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 974 NULL, "Rx MAC statistics"); 975 child = SYSCTL_CHILDREN(tree); 976 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 977 &stats->rx_frames, "Good frames"); 978 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 979 &stats->rx_bcast_frames, "Good broadcast frames"); 980 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 981 &stats->rx_mcast_frames, "Good multicast frames"); 982 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 983 &stats->rx_pause_frames, "Pause control frames"); 984 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 985 &stats->rx_control_frames, "Control frames"); 986 ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 987 &stats->rx_crcerrs, "CRC errors"); 988 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 989 &stats->rx_lenerrs, "Frames with length mismatched"); 990 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 991 &stats->rx_bytes, "Good octets"); 992 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 993 &stats->rx_bcast_bytes, "Good broadcast octets"); 994 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 995 &stats->rx_mcast_bytes, "Good multicast octets"); 996 ALC_SYSCTL_STAT_ADD32(ctx, child, "runts", 997 &stats->rx_runts, "Too short frames"); 998 ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments", 999 &stats->rx_fragments, "Fragmented frames"); 1000 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1001 &stats->rx_pkts_64, "64 bytes frames"); 1002 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1003 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 1004 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1005 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 1006 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1007 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 1008 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1009 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 1010 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1011 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1012 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1013 &stats->rx_pkts_1519_max, "1519 to max frames"); 1014 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1015 &stats->rx_pkts_truncated, "Truncated frames due to MTU size"); 1016 ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 1017 &stats->rx_fifo_oflows, "FIFO overflows"); 1018 ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs", 1019 &stats->rx_rrs_errs, "Return status write-back errors"); 1020 ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 1021 &stats->rx_alignerrs, "Alignment errors"); 1022 ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered", 1023 &stats->rx_pkts_filtered, 1024 "Frames dropped due to address filtering"); 1025 1026 /* Tx statistics. */ 1027 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 1028 NULL, "Tx MAC statistics"); 1029 child = SYSCTL_CHILDREN(tree); 1030 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1031 &stats->tx_frames, "Good frames"); 1032 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1033 &stats->tx_bcast_frames, "Good broadcast frames"); 1034 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1035 &stats->tx_mcast_frames, "Good multicast frames"); 1036 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1037 &stats->tx_pause_frames, "Pause control frames"); 1038 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1039 &stats->tx_control_frames, "Control frames"); 1040 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 1041 &stats->tx_excess_defer, "Frames with excessive derferrals"); 1042 ALC_SYSCTL_STAT_ADD32(ctx, child, "defers", 1043 &stats->tx_excess_defer, "Frames with derferrals"); 1044 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1045 &stats->tx_bytes, "Good octets"); 1046 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1047 &stats->tx_bcast_bytes, "Good broadcast octets"); 1048 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1049 &stats->tx_mcast_bytes, "Good multicast octets"); 1050 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1051 &stats->tx_pkts_64, "64 bytes frames"); 1052 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1053 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 1054 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1055 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 1056 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1057 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 1058 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1059 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 1060 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1061 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1062 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1063 &stats->tx_pkts_1519_max, "1519 to max frames"); 1064 ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 1065 &stats->tx_single_colls, "Single collisions"); 1066 ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 1067 &stats->tx_multi_colls, "Multiple collisions"); 1068 ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 1069 &stats->tx_late_colls, "Late collisions"); 1070 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", 1071 &stats->tx_excess_colls, "Excessive collisions"); 1072 ALC_SYSCTL_STAT_ADD32(ctx, child, "abort", 1073 &stats->tx_abort, "Aborted frames due to Excessive collisions"); 1074 ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns", 1075 &stats->tx_underrun, "FIFO underruns"); 1076 ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns", 1077 &stats->tx_desc_underrun, "Descriptor write-back errors"); 1078 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1079 &stats->tx_lenerrs, "Frames with length mismatched"); 1080 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1081 &stats->tx_pkts_truncated, "Truncated frames due to MTU size"); 1082 } 1083 1084 #undef ALC_SYSCTL_STAT_ADD32 1085 #undef ALC_SYSCTL_STAT_ADD64 1086 1087 struct alc_dmamap_arg { 1088 bus_addr_t alc_busaddr; 1089 }; 1090 1091 static void 1092 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1093 { 1094 struct alc_dmamap_arg *ctx; 1095 1096 if (error != 0) 1097 return; 1098 1099 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1100 1101 ctx = (struct alc_dmamap_arg *)arg; 1102 ctx->alc_busaddr = segs[0].ds_addr; 1103 } 1104 1105 /* 1106 * Normal and high Tx descriptors shares single Tx high address. 1107 * Four Rx descriptor/return rings and CMB shares the same Rx 1108 * high address. 1109 */ 1110 static int 1111 alc_check_boundary(struct alc_softc *sc) 1112 { 1113 bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end; 1114 1115 rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ; 1116 rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ; 1117 cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ; 1118 tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ; 1119 1120 /* 4GB boundary crossing is not allowed. */ 1121 if ((ALC_ADDR_HI(rx_ring_end) != 1122 ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) || 1123 (ALC_ADDR_HI(rr_ring_end) != 1124 ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) || 1125 (ALC_ADDR_HI(cmb_end) != 1126 ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) || 1127 (ALC_ADDR_HI(tx_ring_end) != 1128 ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr))) 1129 return (EFBIG); 1130 /* 1131 * Make sure Rx return descriptor/Rx descriptor/CMB use 1132 * the same high address. 1133 */ 1134 if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) || 1135 (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end))) 1136 return (EFBIG); 1137 1138 return (0); 1139 } 1140 1141 static int 1142 alc_dma_alloc(struct alc_softc *sc) 1143 { 1144 struct alc_txdesc *txd; 1145 struct alc_rxdesc *rxd; 1146 bus_addr_t lowaddr; 1147 struct alc_dmamap_arg ctx; 1148 int error, i; 1149 1150 lowaddr = BUS_SPACE_MAXADDR; 1151 again: 1152 /* Create parent DMA tag. */ 1153 error = bus_dma_tag_create( 1154 sc->alc_cdata.alc_parent_tag, /* parent */ 1155 1, 0, /* alignment, boundary */ 1156 lowaddr, /* lowaddr */ 1157 BUS_SPACE_MAXADDR, /* highaddr */ 1158 NULL, NULL, /* filter, filterarg */ 1159 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1160 0, /* nsegments */ 1161 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1162 0, /* flags */ 1163 &sc->alc_cdata.alc_parent_tag); 1164 if (error != 0) { 1165 device_printf(sc->alc_dev, 1166 "could not create parent DMA tag.\n"); 1167 goto fail; 1168 } 1169 1170 /* Create DMA tag for Tx descriptor ring. */ 1171 error = bus_dma_tag_create( 1172 sc->alc_cdata.alc_parent_tag, /* parent */ 1173 ALC_TX_RING_ALIGN, 0, /* alignment, boundary */ 1174 BUS_SPACE_MAXADDR, /* lowaddr */ 1175 BUS_SPACE_MAXADDR, /* highaddr */ 1176 NULL, NULL, /* filter, filterarg */ 1177 ALC_TX_RING_SZ, /* maxsize */ 1178 1, /* nsegments */ 1179 ALC_TX_RING_SZ, /* maxsegsize */ 1180 0, /* flags */ 1181 &sc->alc_cdata.alc_tx_ring_tag); 1182 if (error != 0) { 1183 device_printf(sc->alc_dev, 1184 "could not create Tx ring DMA tag.\n"); 1185 goto fail; 1186 } 1187 1188 /* Create DMA tag for Rx free descriptor ring. */ 1189 error = bus_dma_tag_create( 1190 sc->alc_cdata.alc_parent_tag, /* parent */ 1191 ALC_RX_RING_ALIGN, 0, /* alignment, boundary */ 1192 BUS_SPACE_MAXADDR, /* lowaddr */ 1193 BUS_SPACE_MAXADDR, /* highaddr */ 1194 NULL, NULL, /* filter, filterarg */ 1195 ALC_RX_RING_SZ, /* maxsize */ 1196 1, /* nsegments */ 1197 ALC_RX_RING_SZ, /* maxsegsize */ 1198 0, /* flags */ 1199 &sc->alc_cdata.alc_rx_ring_tag); 1200 if (error != 0) { 1201 device_printf(sc->alc_dev, 1202 "could not create Rx ring DMA tag.\n"); 1203 goto fail; 1204 } 1205 /* Create DMA tag for Rx return descriptor ring. */ 1206 error = bus_dma_tag_create( 1207 sc->alc_cdata.alc_parent_tag, /* parent */ 1208 ALC_RR_RING_ALIGN, 0, /* alignment, boundary */ 1209 BUS_SPACE_MAXADDR, /* lowaddr */ 1210 BUS_SPACE_MAXADDR, /* highaddr */ 1211 NULL, NULL, /* filter, filterarg */ 1212 ALC_RR_RING_SZ, /* maxsize */ 1213 1, /* nsegments */ 1214 ALC_RR_RING_SZ, /* maxsegsize */ 1215 0, /* flags */ 1216 &sc->alc_cdata.alc_rr_ring_tag); 1217 if (error != 0) { 1218 device_printf(sc->alc_dev, 1219 "could not create Rx return ring DMA tag.\n"); 1220 goto fail; 1221 } 1222 1223 /* Create DMA tag for coalescing message block. */ 1224 error = bus_dma_tag_create( 1225 sc->alc_cdata.alc_parent_tag, /* parent */ 1226 ALC_CMB_ALIGN, 0, /* alignment, boundary */ 1227 BUS_SPACE_MAXADDR, /* lowaddr */ 1228 BUS_SPACE_MAXADDR, /* highaddr */ 1229 NULL, NULL, /* filter, filterarg */ 1230 ALC_CMB_SZ, /* maxsize */ 1231 1, /* nsegments */ 1232 ALC_CMB_SZ, /* maxsegsize */ 1233 0, /* flags */ 1234 &sc->alc_cdata.alc_cmb_tag); 1235 if (error != 0) { 1236 device_printf(sc->alc_dev, 1237 "could not create CMB DMA tag.\n"); 1238 goto fail; 1239 } 1240 /* Create DMA tag for status message block. */ 1241 error = bus_dma_tag_create( 1242 sc->alc_cdata.alc_parent_tag, /* parent */ 1243 ALC_SMB_ALIGN, 0, /* alignment, boundary */ 1244 BUS_SPACE_MAXADDR, /* lowaddr */ 1245 BUS_SPACE_MAXADDR, /* highaddr */ 1246 NULL, NULL, /* filter, filterarg */ 1247 ALC_SMB_SZ, /* maxsize */ 1248 1, /* nsegments */ 1249 ALC_SMB_SZ, /* maxsegsize */ 1250 0, /* flags */ 1251 &sc->alc_cdata.alc_smb_tag); 1252 if (error != 0) { 1253 device_printf(sc->alc_dev, 1254 "could not create SMB DMA tag.\n"); 1255 goto fail; 1256 } 1257 1258 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1259 error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag, 1260 (void **)&sc->alc_rdata.alc_tx_ring, 1261 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1262 &sc->alc_cdata.alc_tx_ring_map); 1263 if (error != 0) { 1264 device_printf(sc->alc_dev, 1265 "could not allocate DMA'able memory for Tx ring.\n"); 1266 goto fail; 1267 } 1268 ctx.alc_busaddr = 0; 1269 error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag, 1270 sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring, 1271 ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0); 1272 if (error != 0 || ctx.alc_busaddr == 0) { 1273 device_printf(sc->alc_dev, 1274 "could not load DMA'able memory for Tx ring.\n"); 1275 goto fail; 1276 } 1277 sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr; 1278 1279 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1280 error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag, 1281 (void **)&sc->alc_rdata.alc_rx_ring, 1282 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1283 &sc->alc_cdata.alc_rx_ring_map); 1284 if (error != 0) { 1285 device_printf(sc->alc_dev, 1286 "could not allocate DMA'able memory for Rx ring.\n"); 1287 goto fail; 1288 } 1289 ctx.alc_busaddr = 0; 1290 error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag, 1291 sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring, 1292 ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0); 1293 if (error != 0 || ctx.alc_busaddr == 0) { 1294 device_printf(sc->alc_dev, 1295 "could not load DMA'able memory for Rx ring.\n"); 1296 goto fail; 1297 } 1298 sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr; 1299 1300 /* Allocate DMA'able memory and load the DMA map for Rx return ring. */ 1301 error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag, 1302 (void **)&sc->alc_rdata.alc_rr_ring, 1303 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1304 &sc->alc_cdata.alc_rr_ring_map); 1305 if (error != 0) { 1306 device_printf(sc->alc_dev, 1307 "could not allocate DMA'able memory for Rx return ring.\n"); 1308 goto fail; 1309 } 1310 ctx.alc_busaddr = 0; 1311 error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag, 1312 sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring, 1313 ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0); 1314 if (error != 0 || ctx.alc_busaddr == 0) { 1315 device_printf(sc->alc_dev, 1316 "could not load DMA'able memory for Tx ring.\n"); 1317 goto fail; 1318 } 1319 sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr; 1320 1321 /* Allocate DMA'able memory and load the DMA map for CMB. */ 1322 error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag, 1323 (void **)&sc->alc_rdata.alc_cmb, 1324 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1325 &sc->alc_cdata.alc_cmb_map); 1326 if (error != 0) { 1327 device_printf(sc->alc_dev, 1328 "could not allocate DMA'able memory for CMB.\n"); 1329 goto fail; 1330 } 1331 ctx.alc_busaddr = 0; 1332 error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag, 1333 sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb, 1334 ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0); 1335 if (error != 0 || ctx.alc_busaddr == 0) { 1336 device_printf(sc->alc_dev, 1337 "could not load DMA'able memory for CMB.\n"); 1338 goto fail; 1339 } 1340 sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr; 1341 1342 /* Allocate DMA'able memory and load the DMA map for SMB. */ 1343 error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag, 1344 (void **)&sc->alc_rdata.alc_smb, 1345 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1346 &sc->alc_cdata.alc_smb_map); 1347 if (error != 0) { 1348 device_printf(sc->alc_dev, 1349 "could not allocate DMA'able memory for SMB.\n"); 1350 goto fail; 1351 } 1352 ctx.alc_busaddr = 0; 1353 error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag, 1354 sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb, 1355 ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0); 1356 if (error != 0 || ctx.alc_busaddr == 0) { 1357 device_printf(sc->alc_dev, 1358 "could not load DMA'able memory for CMB.\n"); 1359 goto fail; 1360 } 1361 sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr; 1362 1363 /* Make sure we've not crossed 4GB boundary. */ 1364 if (lowaddr != BUS_SPACE_MAXADDR_32BIT && 1365 (error = alc_check_boundary(sc)) != 0) { 1366 device_printf(sc->alc_dev, "4GB boundary crossed, " 1367 "switching to 32bit DMA addressing mode.\n"); 1368 alc_dma_free(sc); 1369 /* 1370 * Limit max allowable DMA address space to 32bit 1371 * and try again. 1372 */ 1373 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1374 goto again; 1375 } 1376 1377 /* 1378 * Create Tx buffer parent tag. 1379 * AR8131/AR8132 allows 64bit DMA addressing of Tx/Rx buffers 1380 * so it needs separate parent DMA tag as parent DMA address 1381 * space could be restricted to be within 32bit address space 1382 * by 4GB boundary crossing. 1383 */ 1384 error = bus_dma_tag_create( 1385 sc->alc_cdata.alc_parent_tag, /* parent */ 1386 1, 0, /* alignment, boundary */ 1387 BUS_SPACE_MAXADDR, /* lowaddr */ 1388 BUS_SPACE_MAXADDR, /* highaddr */ 1389 NULL, NULL, /* filter, filterarg */ 1390 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1391 0, /* nsegments */ 1392 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1393 0, /* flags */ 1394 &sc->alc_cdata.alc_buffer_tag); 1395 if (error != 0) { 1396 device_printf(sc->alc_dev, 1397 "could not create parent buffer DMA tag.\n"); 1398 goto fail; 1399 } 1400 1401 /* Create DMA tag for Tx buffers. */ 1402 error = bus_dma_tag_create( 1403 sc->alc_cdata.alc_buffer_tag, /* parent */ 1404 1, 0, /* alignment, boundary */ 1405 BUS_SPACE_MAXADDR, /* lowaddr */ 1406 BUS_SPACE_MAXADDR, /* highaddr */ 1407 NULL, NULL, /* filter, filterarg */ 1408 ALC_TSO_MAXSIZE, /* maxsize */ 1409 ALC_MAXTXSEGS, /* nsegments */ 1410 ALC_TSO_MAXSEGSIZE, /* maxsegsize */ 1411 0, /* flags */ 1412 &sc->alc_cdata.alc_tx_tag); 1413 if (error != 0) { 1414 device_printf(sc->alc_dev, "could not create Tx DMA tag.\n"); 1415 goto fail; 1416 } 1417 1418 /* Create DMA tag for Rx buffers. */ 1419 error = bus_dma_tag_create( 1420 sc->alc_cdata.alc_buffer_tag, /* parent */ 1421 ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */ 1422 BUS_SPACE_MAXADDR, /* lowaddr */ 1423 BUS_SPACE_MAXADDR, /* highaddr */ 1424 NULL, NULL, /* filter, filterarg */ 1425 MCLBYTES, /* maxsize */ 1426 1, /* nsegments */ 1427 MCLBYTES, /* maxsegsize */ 1428 0, /* flags */ 1429 &sc->alc_cdata.alc_rx_tag); 1430 if (error != 0) { 1431 device_printf(sc->alc_dev, "could not create Rx DMA tag.\n"); 1432 goto fail; 1433 } 1434 /* Create DMA maps for Tx buffers. */ 1435 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1436 txd = &sc->alc_cdata.alc_txdesc[i]; 1437 txd->tx_m = NULL; 1438 txd->tx_dmamap = NULL; 1439 error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 1440 BUS_DMA_WAITOK, &txd->tx_dmamap); 1441 if (error != 0) { 1442 device_printf(sc->alc_dev, 1443 "could not create Tx dmamap.\n"); 1444 goto fail; 1445 } 1446 } 1447 /* Create DMA maps for Rx buffers. */ 1448 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 1449 BUS_DMA_WAITOK, 1450 &sc->alc_cdata.alc_rx_sparemap); 1451 if (error) { 1452 device_printf(sc->alc_dev, 1453 "could not create spare Rx dmamap.\n"); 1454 goto fail; 1455 } 1456 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1457 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1458 rxd->rx_m = NULL; 1459 rxd->rx_dmamap = NULL; 1460 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 1461 BUS_DMA_WAITOK, 1462 &rxd->rx_dmamap); 1463 if (error != 0) { 1464 device_printf(sc->alc_dev, 1465 "could not create Rx dmamap.\n"); 1466 goto fail; 1467 } 1468 } 1469 1470 fail: 1471 return (error); 1472 } 1473 1474 static void 1475 alc_dma_free(struct alc_softc *sc) 1476 { 1477 struct alc_txdesc *txd; 1478 struct alc_rxdesc *rxd; 1479 int i; 1480 1481 /* Tx buffers. */ 1482 if (sc->alc_cdata.alc_tx_tag != NULL) { 1483 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1484 txd = &sc->alc_cdata.alc_txdesc[i]; 1485 if (txd->tx_dmamap != NULL) { 1486 bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag, 1487 txd->tx_dmamap); 1488 txd->tx_dmamap = NULL; 1489 } 1490 } 1491 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag); 1492 sc->alc_cdata.alc_tx_tag = NULL; 1493 } 1494 /* Rx buffers */ 1495 if (sc->alc_cdata.alc_rx_tag != NULL) { 1496 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1497 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1498 if (rxd->rx_dmamap != NULL) { 1499 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 1500 rxd->rx_dmamap); 1501 rxd->rx_dmamap = NULL; 1502 } 1503 } 1504 if (sc->alc_cdata.alc_rx_sparemap != NULL) { 1505 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 1506 sc->alc_cdata.alc_rx_sparemap); 1507 sc->alc_cdata.alc_rx_sparemap = NULL; 1508 } 1509 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag); 1510 sc->alc_cdata.alc_rx_tag = NULL; 1511 } 1512 /* Tx descriptor ring. */ 1513 if (sc->alc_cdata.alc_tx_ring_tag != NULL) { 1514 if (sc->alc_cdata.alc_tx_ring_map != NULL) 1515 bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag, 1516 sc->alc_cdata.alc_tx_ring_map); 1517 if (sc->alc_cdata.alc_tx_ring_map != NULL && 1518 sc->alc_rdata.alc_tx_ring != NULL) 1519 bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag, 1520 sc->alc_rdata.alc_tx_ring, 1521 sc->alc_cdata.alc_tx_ring_map); 1522 sc->alc_rdata.alc_tx_ring = NULL; 1523 sc->alc_cdata.alc_tx_ring_map = NULL; 1524 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag); 1525 sc->alc_cdata.alc_tx_ring_tag = NULL; 1526 } 1527 /* Rx ring. */ 1528 if (sc->alc_cdata.alc_rx_ring_tag != NULL) { 1529 if (sc->alc_cdata.alc_rx_ring_map != NULL) 1530 bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag, 1531 sc->alc_cdata.alc_rx_ring_map); 1532 if (sc->alc_cdata.alc_rx_ring_map != NULL && 1533 sc->alc_rdata.alc_rx_ring != NULL) 1534 bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag, 1535 sc->alc_rdata.alc_rx_ring, 1536 sc->alc_cdata.alc_rx_ring_map); 1537 sc->alc_rdata.alc_rx_ring = NULL; 1538 sc->alc_cdata.alc_rx_ring_map = NULL; 1539 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag); 1540 sc->alc_cdata.alc_rx_ring_tag = NULL; 1541 } 1542 /* Rx return ring. */ 1543 if (sc->alc_cdata.alc_rr_ring_tag != NULL) { 1544 if (sc->alc_cdata.alc_rr_ring_map != NULL) 1545 bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag, 1546 sc->alc_cdata.alc_rr_ring_map); 1547 if (sc->alc_cdata.alc_rr_ring_map != NULL && 1548 sc->alc_rdata.alc_rr_ring != NULL) 1549 bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag, 1550 sc->alc_rdata.alc_rr_ring, 1551 sc->alc_cdata.alc_rr_ring_map); 1552 sc->alc_rdata.alc_rr_ring = NULL; 1553 sc->alc_cdata.alc_rr_ring_map = NULL; 1554 bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag); 1555 sc->alc_cdata.alc_rr_ring_tag = NULL; 1556 } 1557 /* CMB block */ 1558 if (sc->alc_cdata.alc_cmb_tag != NULL) { 1559 if (sc->alc_cdata.alc_cmb_map != NULL) 1560 bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag, 1561 sc->alc_cdata.alc_cmb_map); 1562 if (sc->alc_cdata.alc_cmb_map != NULL && 1563 sc->alc_rdata.alc_cmb != NULL) 1564 bus_dmamem_free(sc->alc_cdata.alc_cmb_tag, 1565 sc->alc_rdata.alc_cmb, 1566 sc->alc_cdata.alc_cmb_map); 1567 sc->alc_rdata.alc_cmb = NULL; 1568 sc->alc_cdata.alc_cmb_map = NULL; 1569 bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag); 1570 sc->alc_cdata.alc_cmb_tag = NULL; 1571 } 1572 /* SMB block */ 1573 if (sc->alc_cdata.alc_smb_tag != NULL) { 1574 if (sc->alc_cdata.alc_smb_map != NULL) 1575 bus_dmamap_unload(sc->alc_cdata.alc_smb_tag, 1576 sc->alc_cdata.alc_smb_map); 1577 if (sc->alc_cdata.alc_smb_map != NULL && 1578 sc->alc_rdata.alc_smb != NULL) 1579 bus_dmamem_free(sc->alc_cdata.alc_smb_tag, 1580 sc->alc_rdata.alc_smb, 1581 sc->alc_cdata.alc_smb_map); 1582 sc->alc_rdata.alc_smb = NULL; 1583 sc->alc_cdata.alc_smb_map = NULL; 1584 bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag); 1585 sc->alc_cdata.alc_smb_tag = NULL; 1586 } 1587 if (sc->alc_cdata.alc_buffer_tag != NULL) { 1588 bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag); 1589 sc->alc_cdata.alc_buffer_tag = NULL; 1590 } 1591 if (sc->alc_cdata.alc_parent_tag != NULL) { 1592 bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag); 1593 sc->alc_cdata.alc_parent_tag = NULL; 1594 } 1595 } 1596 1597 static int 1598 alc_shutdown(device_t dev) 1599 { 1600 1601 return (alc_suspend(dev)); 1602 } 1603 1604 #if 0 1605 /* XXX: LINK SPEED */ 1606 /* 1607 * Note, this driver resets the link speed to 10/100Mbps by 1608 * restarting auto-negotiation in suspend/shutdown phase but we 1609 * don't know whether that auto-negotiation would succeed or not 1610 * as driver has no control after powering off/suspend operation. 1611 * If the renegotiation fail WOL may not work. Running at 1Gbps 1612 * will draw more power than 375mA at 3.3V which is specified in 1613 * PCI specification and that would result in complete 1614 * shutdowning power to ethernet controller. 1615 * 1616 * TODO 1617 * Save current negotiated media speed/duplex/flow-control to 1618 * softc and restore the same link again after resuming. PHY 1619 * handling such as power down/resetting to 100Mbps may be better 1620 * handled in suspend method in phy driver. 1621 */ 1622 static void 1623 alc_setlinkspeed(struct alc_softc *sc) 1624 { 1625 struct mii_data *mii; 1626 int aneg, i; 1627 1628 mii = device_get_softc(sc->alc_miibus); 1629 mii_pollstat(mii); 1630 aneg = 0; 1631 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1632 (IFM_ACTIVE | IFM_AVALID)) { 1633 switch IFM_SUBTYPE(mii->mii_media_active) { 1634 case IFM_10_T: 1635 case IFM_100_TX: 1636 return; 1637 case IFM_1000_T: 1638 aneg++; 1639 break; 1640 default: 1641 break; 1642 } 1643 } 1644 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0); 1645 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 1646 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1647 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 1648 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 1649 DELAY(1000); 1650 if (aneg != 0) { 1651 /* 1652 * Poll link state until alc(4) get a 10/100Mbps link. 1653 */ 1654 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1655 mii_pollstat(mii); 1656 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 1657 == (IFM_ACTIVE | IFM_AVALID)) { 1658 switch (IFM_SUBTYPE( 1659 mii->mii_media_active)) { 1660 case IFM_10_T: 1661 case IFM_100_TX: 1662 alc_mac_config(sc); 1663 return; 1664 default: 1665 break; 1666 } 1667 } 1668 ALC_UNLOCK(sc); 1669 pause("alclnk", hz); 1670 ALC_LOCK(sc); 1671 } 1672 if (i == MII_ANEGTICKS_GIGE) 1673 device_printf(sc->alc_dev, 1674 "establishing a link failed, WOL may not work!"); 1675 } 1676 /* 1677 * No link, force MAC to have 100Mbps, full-duplex link. 1678 * This is the last resort and may/may not work. 1679 */ 1680 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1681 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1682 alc_mac_config(sc); 1683 } 1684 #endif 1685 1686 #if 0 1687 /* XXX: WOL */ 1688 static void 1689 alc_setwol(struct alc_softc *sc) 1690 { 1691 struct ifnet *ifp; 1692 uint32_t cap, reg, pmcs; 1693 uint16_t pmstat; 1694 int base, pmc; 1695 1696 ALC_LOCK_ASSERT(sc); 1697 1698 if (pci_find_extcap(sc->alc_dev, PCIY_EXPRESS, &base) == 0) { 1699 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP); 1700 if ((cap & PCIM_LINK_CAP_ASPM) != 0) { 1701 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL); 1702 alc_disable_l0s_l1(sc); 1703 } 1704 } 1705 if (pci_find_extcap(sc->alc_dev, PCIY_PMG, &pmc) != 0) { 1706 /* Disable WOL. */ 1707 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 1708 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 1709 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1710 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 1711 /* Force PHY power down. */ 1712 alc_phy_down(sc); 1713 return; 1714 } 1715 1716 ifp = sc->alc_ifp; 1717 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1718 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 1719 alc_setlinkspeed(sc); 1720 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 1721 reg &= ~MASTER_CLK_SEL_DIS; 1722 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 1723 } 1724 1725 pmcs = 0; 1726 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 1727 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 1728 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); 1729 reg = CSR_READ_4(sc, ALC_MAC_CFG); 1730 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 1731 MAC_CFG_BCAST); 1732 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 1733 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 1734 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1735 reg |= MAC_CFG_RX_ENB; 1736 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 1737 1738 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 1739 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1740 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 1741 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1742 /* WOL disabled, PHY power down. */ 1743 alc_phy_down(sc); 1744 } 1745 /* Request PME. */ 1746 pmstat = pci_read_config(sc->alc_dev, pmc + PCIR_POWER_STATUS, 2); 1747 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1748 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1749 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1750 pci_write_config(sc->alc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1751 } 1752 #endif 1753 1754 static int 1755 alc_suspend(device_t dev) 1756 { 1757 struct alc_softc *sc; 1758 1759 sc = device_get_softc(dev); 1760 1761 ALC_LOCK(sc); 1762 alc_stop(sc); 1763 #if 0 1764 /* XXX: WOL */ 1765 alc_setwol(sc); 1766 #endif 1767 ALC_UNLOCK(sc); 1768 1769 return (0); 1770 } 1771 1772 static int 1773 alc_resume(device_t dev) 1774 { 1775 struct alc_softc *sc; 1776 struct ifnet *ifp; 1777 int pmc; 1778 uint16_t pmstat; 1779 1780 sc = device_get_softc(dev); 1781 1782 ALC_LOCK(sc); 1783 if (pci_find_extcap(sc->alc_dev, PCIY_PMG, &pmc) == 0) { 1784 /* Disable PME and clear PME status. */ 1785 pmstat = pci_read_config(sc->alc_dev, 1786 pmc + PCIR_POWER_STATUS, 2); 1787 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 1788 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1789 pci_write_config(sc->alc_dev, 1790 pmc + PCIR_POWER_STATUS, pmstat, 2); 1791 } 1792 } 1793 /* Reset PHY. */ 1794 alc_phy_reset(sc); 1795 ifp = sc->alc_ifp; 1796 if ((ifp->if_flags & IFF_UP) != 0) { 1797 ifp->if_flags &= ~IFF_RUNNING; 1798 alc_init_locked(sc); 1799 } 1800 ALC_UNLOCK(sc); 1801 1802 return (0); 1803 } 1804 1805 static int 1806 alc_encap(struct alc_softc *sc, struct mbuf **m_head) 1807 { 1808 struct alc_txdesc *txd, *txd_last; 1809 struct tx_desc *desc; 1810 struct mbuf *m; 1811 struct ip *ip; 1812 struct tcphdr *tcp; 1813 bus_dma_segment_t txsegs[ALC_MAXTXSEGS]; 1814 bus_dmamap_t map; 1815 uint32_t cflags, hdrlen, ip_off, poff, vtag; 1816 int error, idx, nsegs, prod; 1817 1818 ALC_LOCK_ASSERT(sc); 1819 1820 M_ASSERTPKTHDR((*m_head)); 1821 1822 m = *m_head; 1823 ip = NULL; 1824 tcp = NULL; 1825 ip_off = poff = 0; 1826 #if 0 1827 /* XXX: TSO */ 1828 if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) { 1829 /* 1830 * AR8131/AR8132 requires offset of TCP/UDP header in its 1831 * Tx descriptor to perform Tx checksum offloading. TSO 1832 * also requires TCP header offset and modification of 1833 * IP/TCP header. This kind of operation takes many CPU 1834 * cycles on FreeBSD so fast host CPU is required to get 1835 * smooth TSO performance. 1836 */ 1837 struct ether_header *eh; 1838 1839 if (M_WRITABLE(m) == 0) { 1840 /* Get a writable copy. */ 1841 m = m_dup(*m_head, MB_DONTWAIT); 1842 /* Release original mbufs. */ 1843 m_freem(*m_head); 1844 if (m == NULL) { 1845 *m_head = NULL; 1846 return (ENOBUFS); 1847 } 1848 *m_head = m; 1849 } 1850 1851 ip_off = sizeof(struct ether_header); 1852 m = m_pullup(m, ip_off); 1853 if (m == NULL) { 1854 *m_head = NULL; 1855 return (ENOBUFS); 1856 } 1857 eh = mtod(m, struct ether_header *); 1858 /* 1859 * Check if hardware VLAN insertion is off. 1860 * Additional check for LLC/SNAP frame? 1861 */ 1862 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1863 ip_off = sizeof(struct ether_vlan_header); 1864 m = m_pullup(m, ip_off); 1865 if (m == NULL) { 1866 *m_head = NULL; 1867 return (ENOBUFS); 1868 } 1869 } 1870 m = m_pullup(m, ip_off + sizeof(struct ip)); 1871 if (m == NULL) { 1872 *m_head = NULL; 1873 return (ENOBUFS); 1874 } 1875 ip = (struct ip *)(mtod(m, char *) + ip_off); 1876 poff = ip_off + (ip->ip_hl << 2); 1877 1878 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1879 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1880 if (m == NULL) { 1881 *m_head = NULL; 1882 return (ENOBUFS); 1883 } 1884 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1885 m = m_pullup(m, poff + (tcp->th_off << 2)); 1886 if (m == NULL) { 1887 *m_head = NULL; 1888 return (ENOBUFS); 1889 } 1890 /* 1891 * Due to strict adherence of Microsoft NDIS 1892 * Large Send specification, hardware expects 1893 * a pseudo TCP checksum inserted by upper 1894 * stack. Unfortunately the pseudo TCP 1895 * checksum that NDIS refers to does not include 1896 * TCP payload length so driver should recompute 1897 * the pseudo checksum here. Hopefully this 1898 * wouldn't be much burden on modern CPUs. 1899 * 1900 * Reset IP checksum and recompute TCP pseudo 1901 * checksum as NDIS specification said. 1902 */ 1903 ip->ip_sum = 0; 1904 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 1905 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1906 } 1907 *m_head = m; 1908 } 1909 #endif /* TSO */ 1910 1911 prod = sc->alc_cdata.alc_tx_prod; 1912 txd = &sc->alc_cdata.alc_txdesc[prod]; 1913 txd_last = txd; 1914 map = txd->tx_dmamap; 1915 1916 error = bus_dmamap_load_mbuf_defrag( 1917 sc->alc_cdata.alc_tx_tag, map, m_head, 1918 txsegs, ALC_MAXTXSEGS, &nsegs, BUS_DMA_NOWAIT); 1919 if (error) { 1920 m_freem(*m_head); 1921 *m_head = NULL; 1922 return (error); 1923 } 1924 if (nsegs == 0) { 1925 m_freem(*m_head); 1926 *m_head = NULL; 1927 return (EIO); 1928 } 1929 1930 /* Check descriptor overrun. */ 1931 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) { 1932 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map); 1933 return (ENOBUFS); 1934 } 1935 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE); 1936 1937 m = *m_head; 1938 cflags = TD_ETHERNET; 1939 vtag = 0; 1940 desc = NULL; 1941 idx = 0; 1942 /* Configure VLAN hardware tag insertion. */ 1943 if ((m->m_flags & M_VLANTAG) != 0) { 1944 vtag = htons(m->m_pkthdr.ether_vlantag); 1945 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; 1946 cflags |= TD_INS_VLAN_TAG; 1947 } 1948 /* Configure Tx checksum offload. */ 1949 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { 1950 #ifdef ALC_USE_CUSTOM_CSUM 1951 cflags |= TD_CUSTOM_CSUM; 1952 /* Set checksum start offset. */ 1953 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & 1954 TD_PLOAD_OFFSET_MASK; 1955 /* Set checksum insertion position of TCP/UDP. */ 1956 cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) << 1957 TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK; 1958 #else 1959 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1960 cflags |= TD_IPCSUM; 1961 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1962 cflags |= TD_TCPCSUM; 1963 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1964 cflags |= TD_UDPCSUM; 1965 /* Set TCP/UDP header offset. */ 1966 cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) & 1967 TD_L4HDR_OFFSET_MASK; 1968 #endif 1969 } else if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1970 /* Request TSO and set MSS. */ 1971 cflags |= TD_TSO | TD_TSO_DESCV1; 1972 #if 0 1973 /* XXX: TSO */ 1974 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) & 1975 TD_MSS_MASK; 1976 /* Set TCP header offset. */ 1977 #endif 1978 cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) & 1979 TD_TCPHDR_OFFSET_MASK; 1980 /* 1981 * AR8131/AR8132 requires the first buffer should 1982 * only hold IP/TCP header data. Payload should 1983 * be handled in other descriptors. 1984 */ 1985 hdrlen = poff + (tcp->th_off << 2); 1986 desc = &sc->alc_rdata.alc_tx_ring[prod]; 1987 desc->len = htole32(TX_BYTES(hdrlen | vtag)); 1988 desc->flags = htole32(cflags); 1989 desc->addr = htole64(txsegs[0].ds_addr); 1990 sc->alc_cdata.alc_tx_cnt++; 1991 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 1992 if (m->m_len - hdrlen > 0) { 1993 /* Handle remaining payload of the first fragment. */ 1994 desc = &sc->alc_rdata.alc_tx_ring[prod]; 1995 desc->len = htole32(TX_BYTES((m->m_len - hdrlen) | 1996 vtag)); 1997 desc->flags = htole32(cflags); 1998 desc->addr = htole64(txsegs[0].ds_addr + hdrlen); 1999 sc->alc_cdata.alc_tx_cnt++; 2000 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2001 } 2002 /* Handle remaining fragments. */ 2003 idx = 1; 2004 } 2005 for (; idx < nsegs; idx++) { 2006 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2007 desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag); 2008 desc->flags = htole32(cflags); 2009 desc->addr = htole64(txsegs[idx].ds_addr); 2010 sc->alc_cdata.alc_tx_cnt++; 2011 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2012 } 2013 /* Update producer index. */ 2014 sc->alc_cdata.alc_tx_prod = prod; 2015 2016 /* Finally set EOP on the last descriptor. */ 2017 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; 2018 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2019 desc->flags |= htole32(TD_EOP); 2020 2021 /* Swap dmamap of the first and the last. */ 2022 txd = &sc->alc_cdata.alc_txdesc[prod]; 2023 map = txd_last->tx_dmamap; 2024 txd_last->tx_dmamap = txd->tx_dmamap; 2025 txd->tx_dmamap = map; 2026 txd->tx_m = m; 2027 2028 return (0); 2029 } 2030 2031 static void 2032 alc_tx_task(void *arg, int pending) 2033 { 2034 struct ifnet *ifp; 2035 2036 ifp = (struct ifnet *)arg; 2037 alc_start(ifp); 2038 } 2039 2040 static void 2041 alc_start(struct ifnet *ifp) 2042 { 2043 struct alc_softc *sc; 2044 struct mbuf *m_head; 2045 int enq; 2046 2047 sc = ifp->if_softc; 2048 2049 ALC_LOCK(sc); 2050 2051 /* Reclaim transmitted frames. */ 2052 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) 2053 alc_txeof(sc); 2054 2055 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) { 2056 ALC_UNLOCK(sc); 2057 return; 2058 } 2059 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2060 ifq_purge(&ifp->if_snd); 2061 ALC_UNLOCK(sc); 2062 return; 2063 } 2064 2065 for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) { 2066 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2067 if (m_head == NULL) 2068 break; 2069 /* 2070 * Pack the data into the transmit ring. If we 2071 * don't have room, set the OACTIVE flag and wait 2072 * for the NIC to drain the ring. 2073 */ 2074 if (alc_encap(sc, &m_head)) { 2075 if (m_head == NULL) 2076 break; 2077 ifq_prepend(&ifp->if_snd, m_head); 2078 ifp->if_flags |= IFF_OACTIVE; 2079 break; 2080 } 2081 2082 enq++; 2083 /* 2084 * If there's a BPF listener, bounce a copy of this frame 2085 * to him. 2086 */ 2087 ETHER_BPF_MTAP(ifp, m_head); 2088 } 2089 2090 if (enq > 0) { 2091 /* Sync descriptors. */ 2092 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 2093 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 2094 /* Kick. Assume we're using normal Tx priority queue. */ 2095 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, 2096 (sc->alc_cdata.alc_tx_prod << 2097 MBOX_TD_PROD_LO_IDX_SHIFT) & 2098 MBOX_TD_PROD_LO_IDX_MASK); 2099 /* Set a timeout in case the chip goes out to lunch. */ 2100 sc->alc_watchdog_timer = ALC_TX_TIMEOUT; 2101 } 2102 2103 ALC_UNLOCK(sc); 2104 } 2105 2106 static void 2107 alc_watchdog(struct alc_softc *sc) 2108 { 2109 struct ifnet *ifp; 2110 2111 ALC_LOCK_ASSERT(sc); 2112 2113 if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer) 2114 return; 2115 2116 ifp = sc->alc_ifp; 2117 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2118 if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n"); 2119 ifp->if_oerrors++; 2120 ifp->if_flags &= ~IFF_RUNNING; 2121 alc_init_locked(sc); 2122 return; 2123 } 2124 if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n"); 2125 ifp->if_oerrors++; 2126 ifp->if_flags &= ~IFF_RUNNING; 2127 alc_init_locked(sc); 2128 if (!ifq_is_empty(&ifp->if_snd)) 2129 taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task); 2130 } 2131 2132 static int 2133 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 2134 { 2135 struct alc_softc *sc; 2136 struct ifreq *ifr; 2137 struct mii_data *mii; 2138 int error, mask; 2139 2140 (void)cr; 2141 sc = ifp->if_softc; 2142 ifr = (struct ifreq *)data; 2143 error = 0; 2144 switch (cmd) { 2145 case SIOCSIFMTU: 2146 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ALC_JUMBO_MTU || 2147 ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 && 2148 ifr->ifr_mtu > ETHERMTU)) 2149 error = EINVAL; 2150 else if (ifp->if_mtu != ifr->ifr_mtu) { 2151 ALC_LOCK(sc); 2152 ifp->if_mtu = ifr->ifr_mtu; 2153 /* AR8131/AR8132 has 13 bits MSS field. */ 2154 if (ifp->if_mtu > ALC_TSO_MTU && 2155 (ifp->if_capenable & IFCAP_TSO4) != 0) { 2156 ifp->if_capenable &= ~IFCAP_TSO4; 2157 ifp->if_hwassist &= ~CSUM_TSO; 2158 } 2159 ALC_UNLOCK(sc); 2160 } 2161 break; 2162 case SIOCSIFFLAGS: 2163 ALC_LOCK(sc); 2164 if ((ifp->if_flags & IFF_UP) != 0) { 2165 if ((ifp->if_flags & IFF_RUNNING) != 0 && 2166 ((ifp->if_flags ^ sc->alc_if_flags) & 2167 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2168 alc_rxfilter(sc); 2169 else if ((sc->alc_flags & ALC_FLAG_DETACH) == 0) 2170 alc_init_locked(sc); 2171 } else if ((ifp->if_flags & IFF_RUNNING) != 0) 2172 alc_stop(sc); 2173 sc->alc_if_flags = ifp->if_flags; 2174 ALC_UNLOCK(sc); 2175 break; 2176 case SIOCADDMULTI: 2177 case SIOCDELMULTI: 2178 ALC_LOCK(sc); 2179 if ((ifp->if_flags & IFF_RUNNING) != 0) 2180 alc_rxfilter(sc); 2181 ALC_UNLOCK(sc); 2182 break; 2183 case SIOCSIFMEDIA: 2184 case SIOCGIFMEDIA: 2185 mii = device_get_softc(sc->alc_miibus); 2186 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2187 break; 2188 case SIOCSIFCAP: 2189 ALC_LOCK(sc); 2190 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2191 if ((mask & IFCAP_TXCSUM) != 0 && 2192 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2193 ifp->if_capenable ^= IFCAP_TXCSUM; 2194 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2195 ifp->if_hwassist |= ALC_CSUM_FEATURES; 2196 else 2197 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 2198 } 2199 if ((mask & IFCAP_TSO4) != 0 && 2200 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 2201 ifp->if_capenable ^= IFCAP_TSO4; 2202 if ((ifp->if_capenable & IFCAP_TSO4) != 0) { 2203 /* AR8131/AR8132 has 13 bits MSS field. */ 2204 if (ifp->if_mtu > ALC_TSO_MTU) { 2205 ifp->if_capenable &= ~IFCAP_TSO4; 2206 ifp->if_hwassist &= ~CSUM_TSO; 2207 } else 2208 ifp->if_hwassist |= CSUM_TSO; 2209 } else 2210 ifp->if_hwassist &= ~CSUM_TSO; 2211 } 2212 #if 0 2213 /* XXX: WOL */ 2214 if ((mask & IFCAP_WOL_MCAST) != 0 && 2215 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 2216 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2217 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2218 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2219 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2220 #endif 2221 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2222 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2223 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2224 alc_rxvlan(sc); 2225 } 2226 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2227 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2228 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2229 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 2230 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 2231 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2232 /* 2233 * VLAN hardware tagging is required to do checksum 2234 * offload or TSO on VLAN interface. Checksum offload 2235 * on VLAN interface also requires hardware checksum 2236 * offload of parent interface. 2237 */ 2238 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 2239 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2240 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 2241 ifp->if_capenable &= 2242 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 2243 ALC_UNLOCK(sc); 2244 // XXX VLAN_CAPABILITIES(ifp); 2245 break; 2246 default: 2247 error = ether_ioctl(ifp, cmd, data); 2248 break; 2249 } 2250 2251 return (error); 2252 } 2253 2254 static void 2255 alc_mac_config(struct alc_softc *sc) 2256 { 2257 struct mii_data *mii; 2258 uint32_t reg; 2259 2260 ALC_LOCK_ASSERT(sc); 2261 2262 mii = device_get_softc(sc->alc_miibus); 2263 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2264 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 2265 MAC_CFG_SPEED_MASK); 2266 /* Reprogram MAC with resolved speed/duplex. */ 2267 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2268 case IFM_10_T: 2269 case IFM_100_TX: 2270 reg |= MAC_CFG_SPEED_10_100; 2271 break; 2272 case IFM_1000_T: 2273 reg |= MAC_CFG_SPEED_1000; 2274 break; 2275 } 2276 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2277 reg |= MAC_CFG_FULL_DUPLEX; 2278 #ifdef notyet 2279 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2280 reg |= MAC_CFG_TX_FC; 2281 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2282 reg |= MAC_CFG_RX_FC; 2283 #endif 2284 } 2285 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2286 } 2287 2288 static void 2289 alc_stats_clear(struct alc_softc *sc) 2290 { 2291 struct smb sb, *smb; 2292 uint32_t *reg; 2293 int i; 2294 2295 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2296 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2297 sc->alc_cdata.alc_smb_map, 2298 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2299 smb = sc->alc_rdata.alc_smb; 2300 /* Update done, clear. */ 2301 smb->updated = 0; 2302 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2303 sc->alc_cdata.alc_smb_map, 2304 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2305 } else { 2306 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2307 reg++) { 2308 CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2309 i += sizeof(uint32_t); 2310 } 2311 /* Read Tx statistics. */ 2312 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2313 reg++) { 2314 CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2315 i += sizeof(uint32_t); 2316 } 2317 } 2318 } 2319 2320 static void 2321 alc_stats_update(struct alc_softc *sc) 2322 { 2323 struct alc_hw_stats *stat; 2324 struct smb sb, *smb; 2325 struct ifnet *ifp; 2326 uint32_t *reg; 2327 int i; 2328 2329 ALC_LOCK_ASSERT(sc); 2330 2331 ifp = sc->alc_ifp; 2332 stat = &sc->alc_stats; 2333 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2334 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2335 sc->alc_cdata.alc_smb_map, 2336 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2337 smb = sc->alc_rdata.alc_smb; 2338 if (smb->updated == 0) 2339 return; 2340 } else { 2341 smb = &sb; 2342 /* Read Rx statistics. */ 2343 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2344 reg++) { 2345 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2346 i += sizeof(uint32_t); 2347 } 2348 /* Read Tx statistics. */ 2349 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2350 reg++) { 2351 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2352 i += sizeof(uint32_t); 2353 } 2354 } 2355 2356 /* Rx stats. */ 2357 stat->rx_frames += smb->rx_frames; 2358 stat->rx_bcast_frames += smb->rx_bcast_frames; 2359 stat->rx_mcast_frames += smb->rx_mcast_frames; 2360 stat->rx_pause_frames += smb->rx_pause_frames; 2361 stat->rx_control_frames += smb->rx_control_frames; 2362 stat->rx_crcerrs += smb->rx_crcerrs; 2363 stat->rx_lenerrs += smb->rx_lenerrs; 2364 stat->rx_bytes += smb->rx_bytes; 2365 stat->rx_runts += smb->rx_runts; 2366 stat->rx_fragments += smb->rx_fragments; 2367 stat->rx_pkts_64 += smb->rx_pkts_64; 2368 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 2369 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2370 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2371 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2372 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2373 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2374 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2375 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2376 stat->rx_rrs_errs += smb->rx_rrs_errs; 2377 stat->rx_alignerrs += smb->rx_alignerrs; 2378 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2379 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2380 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2381 2382 /* Tx stats. */ 2383 stat->tx_frames += smb->tx_frames; 2384 stat->tx_bcast_frames += smb->tx_bcast_frames; 2385 stat->tx_mcast_frames += smb->tx_mcast_frames; 2386 stat->tx_pause_frames += smb->tx_pause_frames; 2387 stat->tx_excess_defer += smb->tx_excess_defer; 2388 stat->tx_control_frames += smb->tx_control_frames; 2389 stat->tx_deferred += smb->tx_deferred; 2390 stat->tx_bytes += smb->tx_bytes; 2391 stat->tx_pkts_64 += smb->tx_pkts_64; 2392 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2393 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2394 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2395 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2396 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2397 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2398 stat->tx_single_colls += smb->tx_single_colls; 2399 stat->tx_multi_colls += smb->tx_multi_colls; 2400 stat->tx_late_colls += smb->tx_late_colls; 2401 stat->tx_excess_colls += smb->tx_excess_colls; 2402 stat->tx_abort += smb->tx_abort; 2403 stat->tx_underrun += smb->tx_underrun; 2404 stat->tx_desc_underrun += smb->tx_desc_underrun; 2405 stat->tx_lenerrs += smb->tx_lenerrs; 2406 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2407 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2408 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2409 2410 /* Update counters in ifnet. */ 2411 ifp->if_opackets += smb->tx_frames; 2412 2413 ifp->if_collisions += smb->tx_single_colls + 2414 smb->tx_multi_colls * 2 + smb->tx_late_colls + 2415 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT; 2416 2417 /* 2418 * XXX 2419 * tx_pkts_truncated counter looks suspicious. It constantly 2420 * increments with no sign of Tx errors. This may indicate 2421 * the counter name is not correct one so I've removed the 2422 * counter in output errors. 2423 */ 2424 ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls + 2425 smb->tx_underrun; 2426 2427 ifp->if_ipackets += smb->rx_frames; 2428 2429 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2430 smb->rx_runts + smb->rx_pkts_truncated + 2431 smb->rx_fifo_oflows + smb->rx_rrs_errs + 2432 smb->rx_alignerrs; 2433 2434 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2435 /* Update done, clear. */ 2436 smb->updated = 0; 2437 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2438 sc->alc_cdata.alc_smb_map, 2439 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2440 } 2441 } 2442 2443 static void 2444 alc_intr(void *arg) 2445 { 2446 struct alc_softc *sc; 2447 uint32_t status; 2448 2449 sc = (struct alc_softc *)arg; 2450 2451 status = CSR_READ_4(sc, ALC_INTR_STATUS); 2452 if ((status & ALC_INTRS) == 0) { 2453 return; 2454 } 2455 /* Disable interrupts. */ 2456 CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT); 2457 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task); 2458 2459 return; 2460 } 2461 2462 static void 2463 alc_int_task(void *arg, int pending) 2464 { 2465 struct alc_softc *sc; 2466 struct ifnet *ifp; 2467 uint32_t status; 2468 int more; 2469 2470 sc = (struct alc_softc *)arg; 2471 ifp = sc->alc_ifp; 2472 2473 status = CSR_READ_4(sc, ALC_INTR_STATUS); 2474 more = atomic_readandclear_32(&sc->alc_morework); 2475 if (more != 0) 2476 status |= INTR_RX_PKT; 2477 if ((status & ALC_INTRS) == 0) 2478 goto done; 2479 2480 /* Acknowledge interrupts but still disable interrupts. */ 2481 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); 2482 2483 more = 0; 2484 if ((ifp->if_flags & IFF_RUNNING) != 0) { 2485 if ((status & INTR_RX_PKT) != 0) { 2486 more = alc_rxintr(sc, sc->alc_process_limit); 2487 if (more == EAGAIN) 2488 atomic_set_int(&sc->alc_morework, 1); 2489 else if (more == EIO) { 2490 ALC_LOCK(sc); 2491 ifp->if_flags &= ~IFF_RUNNING; 2492 alc_init_locked(sc); 2493 ALC_UNLOCK(sc); 2494 return; 2495 } 2496 } 2497 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | 2498 INTR_TXQ_TO_RST)) != 0) { 2499 if ((status & INTR_DMA_RD_TO_RST) != 0) 2500 device_printf(sc->alc_dev, 2501 "DMA read error! -- resetting\n"); 2502 if ((status & INTR_DMA_WR_TO_RST) != 0) 2503 device_printf(sc->alc_dev, 2504 "DMA write error! -- resetting\n"); 2505 if ((status & INTR_TXQ_TO_RST) != 0) 2506 device_printf(sc->alc_dev, 2507 "TxQ reset! -- resetting\n"); 2508 ALC_LOCK(sc); 2509 ifp->if_flags &= ~IFF_RUNNING; 2510 alc_init_locked(sc); 2511 ALC_UNLOCK(sc); 2512 return; 2513 } 2514 if ((ifp->if_flags & IFF_RUNNING) != 0 && 2515 !ifq_is_empty(&ifp->if_snd)) 2516 taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task); 2517 } 2518 2519 if (more == EAGAIN || 2520 (CSR_READ_4(sc, ALC_INTR_STATUS) & ALC_INTRS) != 0) { 2521 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task); 2522 return; 2523 } 2524 2525 done: 2526 if ((ifp->if_flags & IFF_RUNNING) != 0) { 2527 /* Re-enable interrupts if we're running. */ 2528 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); 2529 } 2530 } 2531 2532 static void 2533 alc_txeof(struct alc_softc *sc) 2534 { 2535 struct ifnet *ifp; 2536 struct alc_txdesc *txd; 2537 uint32_t cons, prod; 2538 int prog; 2539 2540 ALC_LOCK_ASSERT(sc); 2541 2542 ifp = sc->alc_ifp; 2543 2544 if (sc->alc_cdata.alc_tx_cnt == 0) 2545 return; 2546 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 2547 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE); 2548 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 2549 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 2550 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD); 2551 prod = sc->alc_rdata.alc_cmb->cons; 2552 } else 2553 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); 2554 /* Assume we're using normal Tx priority queue. */ 2555 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> 2556 MBOX_TD_CONS_LO_IDX_SHIFT; 2557 cons = sc->alc_cdata.alc_tx_cons; 2558 /* 2559 * Go through our Tx list and free mbufs for those 2560 * frames which have been transmitted. 2561 */ 2562 for (prog = 0; cons != prod; prog++, 2563 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { 2564 if (sc->alc_cdata.alc_tx_cnt <= 0) 2565 break; 2566 prog++; 2567 ifp->if_flags &= ~IFF_OACTIVE; 2568 sc->alc_cdata.alc_tx_cnt--; 2569 txd = &sc->alc_cdata.alc_txdesc[cons]; 2570 if (txd->tx_m != NULL) { 2571 /* Reclaim transmitted mbufs. */ 2572 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 2573 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2574 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 2575 txd->tx_dmamap); 2576 m_freem(txd->tx_m); 2577 txd->tx_m = NULL; 2578 } 2579 } 2580 2581 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 2582 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 2583 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD); 2584 sc->alc_cdata.alc_tx_cons = cons; 2585 /* 2586 * Unarm watchdog timer only when there is no pending 2587 * frames in Tx queue. 2588 */ 2589 if (sc->alc_cdata.alc_tx_cnt == 0) 2590 sc->alc_watchdog_timer = 0; 2591 } 2592 2593 static int 2594 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd) 2595 { 2596 struct mbuf *m; 2597 bus_dma_segment_t segs[1]; 2598 bus_dmamap_t map; 2599 int nsegs; 2600 int error; 2601 2602 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 2603 if (m == NULL) 2604 return (ENOBUFS); 2605 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX; 2606 #ifndef __NO_STRICT_ALIGNMENT 2607 m_adj(m, sizeof(uint64_t)); 2608 #endif 2609 2610 error = bus_dmamap_load_mbuf_segment( 2611 sc->alc_cdata.alc_rx_tag, 2612 sc->alc_cdata.alc_rx_sparemap, 2613 m, segs, 1, &nsegs, BUS_DMA_NOWAIT); 2614 if (error) { 2615 m_freem(m); 2616 return (ENOBUFS); 2617 } 2618 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2619 2620 if (rxd->rx_m != NULL) { 2621 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 2622 BUS_DMASYNC_POSTREAD); 2623 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap); 2624 } 2625 map = rxd->rx_dmamap; 2626 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; 2627 sc->alc_cdata.alc_rx_sparemap = map; 2628 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 2629 BUS_DMASYNC_PREREAD); 2630 rxd->rx_m = m; 2631 rxd->rx_desc->addr = htole64(segs[0].ds_addr); 2632 return (0); 2633 } 2634 2635 static int 2636 alc_rxintr(struct alc_softc *sc, int count) 2637 { 2638 struct ifnet *ifp; 2639 struct rx_rdesc *rrd; 2640 uint32_t nsegs, status; 2641 int rr_cons, prog; 2642 2643 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 2644 sc->alc_cdata.alc_rr_ring_map, 2645 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2646 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 2647 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE); 2648 rr_cons = sc->alc_cdata.alc_rr_cons; 2649 ifp = sc->alc_ifp; 2650 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) { 2651 if (count-- <= 0) 2652 break; 2653 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; 2654 status = le32toh(rrd->status); 2655 if ((status & RRD_VALID) == 0) 2656 break; 2657 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo)); 2658 if (nsegs == 0) { 2659 /* This should not happen! */ 2660 device_printf(sc->alc_dev, 2661 "unexpected segment count -- resetting\n"); 2662 return (EIO); 2663 } 2664 alc_rxeof(sc, rrd); 2665 /* Clear Rx return status. */ 2666 rrd->status = 0; 2667 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); 2668 sc->alc_cdata.alc_rx_cons += nsegs; 2669 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; 2670 prog += nsegs; 2671 } 2672 2673 if (prog > 0) { 2674 /* Update the consumer index. */ 2675 sc->alc_cdata.alc_rr_cons = rr_cons; 2676 /* Sync Rx return descriptors. */ 2677 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 2678 sc->alc_cdata.alc_rr_ring_map, 2679 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2680 /* 2681 * Sync updated Rx descriptors such that controller see 2682 * modified buffer addresses. 2683 */ 2684 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 2685 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 2686 /* 2687 * Let controller know availability of new Rx buffers. 2688 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors 2689 * it may be possible to update ALC_MBOX_RD0_PROD_IDX 2690 * only when Rx buffer pre-fetching is required. In 2691 * addition we already set ALC_RX_RD_FREE_THRESH to 2692 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However 2693 * it still seems that pre-fetching needs more 2694 * experimentation. 2695 */ 2696 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 2697 sc->alc_cdata.alc_rx_cons); 2698 } 2699 2700 return (count > 0 ? 0 : EAGAIN); 2701 } 2702 2703 #ifndef __NO_STRICT_ALIGNMENT 2704 static struct mbuf * 2705 alc_fixup_rx(struct ifnet *ifp, struct mbuf *m) 2706 { 2707 struct mbuf *n; 2708 int i; 2709 uint16_t *src, *dst; 2710 2711 src = mtod(m, uint16_t *); 2712 dst = src - 3; 2713 2714 if (m->m_next == NULL) { 2715 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 2716 *dst++ = *src++; 2717 m->m_data -= 6; 2718 return (m); 2719 } 2720 /* 2721 * Append a new mbuf to received mbuf chain and copy ethernet 2722 * header from the mbuf chain. This can save lots of CPU 2723 * cycles for jumbo frame. 2724 */ 2725 MGETHDR(n, MB_DONTWAIT, MT_DATA); 2726 if (n == NULL) { 2727 ifp->if_iqdrops++; 2728 m_freem(m); 2729 return (NULL); 2730 } 2731 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 2732 m->m_data += ETHER_HDR_LEN; 2733 m->m_len -= ETHER_HDR_LEN; 2734 n->m_len = ETHER_HDR_LEN; 2735 M_MOVE_PKTHDR(n, m); 2736 n->m_next = m; 2737 return (n); 2738 } 2739 #endif 2740 2741 /* Receive a frame. */ 2742 static void 2743 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) 2744 { 2745 struct alc_rxdesc *rxd; 2746 struct ifnet *ifp; 2747 struct mbuf *mp, *m; 2748 uint32_t rdinfo, status, vtag; 2749 int count, nsegs, rx_cons; 2750 2751 ifp = sc->alc_ifp; 2752 status = le32toh(rrd->status); 2753 rdinfo = le32toh(rrd->rdinfo); 2754 rx_cons = RRD_RD_IDX(rdinfo); 2755 nsegs = RRD_RD_CNT(rdinfo); 2756 2757 sc->alc_cdata.alc_rxlen = RRD_BYTES(status); 2758 if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) { 2759 /* 2760 * We want to pass the following frames to upper 2761 * layer regardless of error status of Rx return 2762 * ring. 2763 * 2764 * o IP/TCP/UDP checksum is bad. 2765 * o frame length and protocol specific length 2766 * does not match. 2767 * 2768 * Force network stack compute checksum for 2769 * errored frames. 2770 */ 2771 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK; 2772 if ((RRD_ERR_CRC | RRD_ERR_ALIGN | RRD_ERR_TRUNC | 2773 RRD_ERR_RUNT) != 0) 2774 return; 2775 } 2776 2777 for (count = 0; count < nsegs; count++, 2778 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { 2779 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; 2780 mp = rxd->rx_m; 2781 /* Add a new receive buffer to the ring. */ 2782 if (alc_newbuf(sc, rxd) != 0) { 2783 ifp->if_iqdrops++; 2784 /* Reuse Rx buffers. */ 2785 if (sc->alc_cdata.alc_rxhead != NULL) 2786 m_freem(sc->alc_cdata.alc_rxhead); 2787 break; 2788 } 2789 2790 /* 2791 * Assume we've received a full sized frame. 2792 * Actual size is fixed when we encounter the end of 2793 * multi-segmented frame. 2794 */ 2795 mp->m_len = sc->alc_buf_size; 2796 2797 /* Chain received mbufs. */ 2798 if (sc->alc_cdata.alc_rxhead == NULL) { 2799 sc->alc_cdata.alc_rxhead = mp; 2800 sc->alc_cdata.alc_rxtail = mp; 2801 } else { 2802 mp->m_flags &= ~M_PKTHDR; 2803 sc->alc_cdata.alc_rxprev_tail = 2804 sc->alc_cdata.alc_rxtail; 2805 sc->alc_cdata.alc_rxtail->m_next = mp; 2806 sc->alc_cdata.alc_rxtail = mp; 2807 } 2808 2809 if (count == nsegs - 1) { 2810 /* Last desc. for this frame. */ 2811 m = sc->alc_cdata.alc_rxhead; 2812 m->m_flags |= M_PKTHDR; 2813 /* 2814 * It seems that L1C/L2C controller has no way 2815 * to tell hardware to strip CRC bytes. 2816 */ 2817 m->m_pkthdr.len = 2818 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; 2819 if (nsegs > 1) { 2820 /* Set last mbuf size. */ 2821 mp->m_len = sc->alc_cdata.alc_rxlen - 2822 (nsegs - 1) * sc->alc_buf_size; 2823 /* Remove the CRC bytes in chained mbufs. */ 2824 if (mp->m_len <= ETHER_CRC_LEN) { 2825 sc->alc_cdata.alc_rxtail = 2826 sc->alc_cdata.alc_rxprev_tail; 2827 sc->alc_cdata.alc_rxtail->m_len -= 2828 (ETHER_CRC_LEN - mp->m_len); 2829 sc->alc_cdata.alc_rxtail->m_next = NULL; 2830 m_freem(mp); 2831 } else { 2832 mp->m_len -= ETHER_CRC_LEN; 2833 } 2834 } else 2835 m->m_len = m->m_pkthdr.len; 2836 m->m_pkthdr.rcvif = ifp; 2837 /* 2838 * Due to hardware bugs, Rx checksum offloading 2839 * was intentionally disabled. 2840 */ 2841 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 2842 (status & RRD_VLAN_TAG) != 0) { 2843 vtag = RRD_VLAN(le32toh(rrd->vtag)); 2844 m->m_pkthdr.ether_vlantag = ntohs(vtag); 2845 m->m_flags |= M_VLANTAG; 2846 } 2847 #ifndef __NO_STRICT_ALIGNMENT 2848 m = alc_fixup_rx(ifp, m); 2849 if (m != NULL) 2850 #endif 2851 { 2852 /* Pass it on. */ 2853 (*ifp->if_input)(ifp, m); 2854 } 2855 } 2856 } 2857 /* Reset mbuf chains. */ 2858 ALC_RXCHAIN_RESET(sc); 2859 } 2860 2861 static void 2862 alc_tick(void *arg) 2863 { 2864 struct alc_softc *sc; 2865 struct mii_data *mii; 2866 2867 sc = (struct alc_softc *)arg; 2868 2869 ALC_LOCK(sc); 2870 2871 mii = device_get_softc(sc->alc_miibus); 2872 mii_tick(mii); 2873 alc_stats_update(sc); 2874 /* 2875 * alc(4) does not rely on Tx completion interrupts to reclaim 2876 * transferred buffers. Instead Tx completion interrupts are 2877 * used to hint for scheduling Tx task. So it's necessary to 2878 * release transmitted buffers by kicking Tx completion 2879 * handler. This limits the maximum reclamation delay to a hz. 2880 */ 2881 alc_txeof(sc); 2882 alc_watchdog(sc); 2883 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 2884 ALC_UNLOCK(sc); 2885 } 2886 2887 static void 2888 alc_reset(struct alc_softc *sc) 2889 { 2890 uint32_t reg; 2891 int i; 2892 2893 CSR_WRITE_4(sc, ALC_MASTER_CFG, MASTER_RESET); 2894 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2895 DELAY(10); 2896 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) 2897 break; 2898 } 2899 if (i == 0) 2900 device_printf(sc->alc_dev, "master reset timeout!\n"); 2901 2902 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2903 if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0) 2904 break; 2905 DELAY(10); 2906 } 2907 2908 if (i == 0) 2909 device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg); 2910 } 2911 2912 static void 2913 alc_init(void *xsc) 2914 { 2915 struct alc_softc *sc; 2916 2917 sc = (struct alc_softc *)xsc; 2918 ALC_LOCK(sc); 2919 alc_init_locked(sc); 2920 ALC_UNLOCK(sc); 2921 } 2922 2923 static void 2924 alc_init_locked(struct alc_softc *sc) 2925 { 2926 struct ifnet *ifp; 2927 struct mii_data *mii; 2928 uint8_t eaddr[ETHER_ADDR_LEN]; 2929 bus_addr_t paddr; 2930 uint32_t reg, rxf_hi, rxf_lo; 2931 2932 ALC_LOCK_ASSERT(sc); 2933 2934 ifp = sc->alc_ifp; 2935 mii = device_get_softc(sc->alc_miibus); 2936 2937 if ((ifp->if_flags & IFF_RUNNING) != 0) 2938 return; 2939 /* 2940 * Cancel any pending I/O. 2941 */ 2942 alc_stop(sc); 2943 /* 2944 * Reset the chip to a known state. 2945 */ 2946 alc_reset(sc); 2947 2948 /* Initialize Rx descriptors. */ 2949 if (alc_init_rx_ring(sc) != 0) { 2950 device_printf(sc->alc_dev, "no memory for Rx buffers.\n"); 2951 alc_stop(sc); 2952 return; 2953 } 2954 alc_init_rr_ring(sc); 2955 alc_init_tx_ring(sc); 2956 alc_init_cmb(sc); 2957 alc_init_smb(sc); 2958 2959 /* Reprogram the station address. */ 2960 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2961 CSR_WRITE_4(sc, ALC_PAR0, 2962 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2963 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); 2964 /* 2965 * Clear WOL status and disable all WOL feature as WOL 2966 * would interfere Rx operation under normal environments. 2967 */ 2968 CSR_READ_4(sc, ALC_WOL_CFG); 2969 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2970 /* Set Tx descriptor base addresses. */ 2971 paddr = sc->alc_rdata.alc_tx_ring_paddr; 2972 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2973 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2974 /* We don't use high priority ring. */ 2975 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); 2976 /* Set Tx descriptor counter. */ 2977 CSR_WRITE_4(sc, ALC_TD_RING_CNT, 2978 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); 2979 /* Set Rx descriptor base addresses. */ 2980 paddr = sc->alc_rdata.alc_rx_ring_paddr; 2981 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2982 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2983 /* We use one Rx ring. */ 2984 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); 2985 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); 2986 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); 2987 /* Set Rx descriptor counter. */ 2988 CSR_WRITE_4(sc, ALC_RD_RING_CNT, 2989 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); 2990 2991 /* 2992 * Let hardware split jumbo frames into alc_max_buf_sized chunks. 2993 * if it do not fit the buffer size. Rx return descriptor holds 2994 * a counter that indicates how many fragments were made by the 2995 * hardware. The buffer size should be multiple of 8 bytes. 2996 * Since hardware has limit on the size of buffer size, always 2997 * use the maximum value. 2998 * For strict-alignment architectures make sure to reduce buffer 2999 * size by 8 bytes to make room for alignment fixup. 3000 */ 3001 #ifndef __NO_STRICT_ALIGNMENT 3002 sc->alc_buf_size = RX_BUF_SIZE_MAX - sizeof(uint64_t); 3003 #else 3004 sc->alc_buf_size = RX_BUF_SIZE_MAX; 3005 #endif 3006 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); 3007 3008 paddr = sc->alc_rdata.alc_rr_ring_paddr; 3009 /* Set Rx return descriptor base addresses. */ 3010 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3011 /* We use one Rx return ring. */ 3012 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); 3013 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); 3014 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); 3015 /* Set Rx return descriptor counter. */ 3016 CSR_WRITE_4(sc, ALC_RRD_RING_CNT, 3017 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); 3018 paddr = sc->alc_rdata.alc_cmb_paddr; 3019 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3020 paddr = sc->alc_rdata.alc_smb_paddr; 3021 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3022 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3023 3024 /* Tell hardware that we're ready to load DMA blocks. */ 3025 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); 3026 3027 /* Configure interrupt moderation timer. */ 3028 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; 3029 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; 3030 CSR_WRITE_4(sc, ALC_IM_TIMER, reg); 3031 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 3032 reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK); 3033 /* 3034 * We don't want to automatic interrupt clear as task queue 3035 * for the interrupt should know interrupt status. 3036 */ 3037 reg &= ~MASTER_INTR_RD_CLR; 3038 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); 3039 if (ALC_USECS(sc->alc_int_rx_mod) != 0) 3040 reg |= MASTER_IM_RX_TIMER_ENB; 3041 if (ALC_USECS(sc->alc_int_tx_mod) != 0) 3042 reg |= MASTER_IM_TX_TIMER_ENB; 3043 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3044 /* 3045 * Disable interrupt re-trigger timer. We don't want automatic 3046 * re-triggering of un-ACKed interrupts. 3047 */ 3048 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); 3049 /* Configure CMB. */ 3050 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); 3051 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 3052 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); 3053 else 3054 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); 3055 /* 3056 * Hardware can be configured to issue SMB interrupt based 3057 * on programmed interval. Since there is a callout that is 3058 * invoked for every hz in driver we use that instead of 3059 * relying on periodic SMB interrupt. 3060 */ 3061 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); 3062 /* Clear MAC statistics. */ 3063 alc_stats_clear(sc); 3064 3065 /* 3066 * Always use maximum frame size that controller can support. 3067 * Otherwise received frames that has larger frame length 3068 * than alc(4) MTU would be silently dropped in hardware. This 3069 * would make path-MTU discovery hard as sender wouldn't get 3070 * any responses from receiver. alc(4) supports 3071 * multi-fragmented frames on Rx path so it has no issue on 3072 * assembling fragmented frames. Using maximum frame size also 3073 * removes the need to reinitialize hardware when interface 3074 * MTU configuration was changed. 3075 * 3076 * Be conservative in what you do, be liberal in what you 3077 * accept from others - RFC 793. 3078 */ 3079 CSR_WRITE_4(sc, ALC_FRAME_SIZE, ALC_JUMBO_FRAMELEN); 3080 3081 /* Disable header split(?) */ 3082 CSR_WRITE_4(sc, ALC_HDS_CFG, 0); 3083 3084 /* Configure IPG/IFG parameters. */ 3085 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, 3086 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) | 3087 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 3088 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 3089 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK)); 3090 /* Set parameters for half-duplex media. */ 3091 CSR_WRITE_4(sc, ALC_HDPX_CFG, 3092 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 3093 HDPX_CFG_LCOL_MASK) | 3094 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 3095 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 3096 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 3097 HDPX_CFG_ABEBT_MASK) | 3098 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 3099 HDPX_CFG_JAMIPG_MASK)); 3100 /* 3101 * Set TSO/checksum offload threshold. For frames that is 3102 * larger than this threshold, hardware wouldn't do 3103 * TSO/checksum offloading. 3104 */ 3105 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, 3106 (ALC_JUMBO_FRAMELEN >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & 3107 TSO_OFFLOAD_THRESH_MASK); 3108 /* Configure TxQ. */ 3109 reg = (alc_dma_burst[sc->alc_dma_rd_burst] << 3110 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; 3111 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & 3112 TXQ_CFG_TD_BURST_MASK; 3113 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); 3114 3115 /* Configure Rx free descriptor pre-fetching. */ 3116 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, 3117 ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) & 3118 RX_RD_FREE_THRESH_HI_MASK) | 3119 ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) & 3120 RX_RD_FREE_THRESH_LO_MASK)); 3121 3122 /* 3123 * Configure flow control parameters. 3124 * XON : 80% of Rx FIFO 3125 * XOFF : 30% of Rx FIFO 3126 */ 3127 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3128 rxf_hi = (reg * 8) / 10; 3129 rxf_lo = (reg * 3)/ 10; 3130 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3131 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3132 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3133 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 3134 RX_FIFO_PAUSE_THRESH_HI_MASK)); 3135 3136 /* Disable RSS until I understand L1C/L2C's RSS logic. */ 3137 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); 3138 CSR_WRITE_4(sc, ALC_RSS_CPU, 0); 3139 3140 /* Configure RxQ. */ 3141 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 3142 RXQ_CFG_RD_BURST_MASK; 3143 reg |= RXQ_CFG_RSS_MODE_DIS; 3144 if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0) 3145 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; 3146 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3147 3148 /* Configure Rx DMAW request thresold. */ 3149 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 3150 ((RD_DMA_CFG_THRESH_DEFAULT << RD_DMA_CFG_THRESH_SHIFT) & 3151 RD_DMA_CFG_THRESH_MASK) | 3152 ((ALC_RD_DMA_CFG_USECS(0) << RD_DMA_CFG_TIMER_SHIFT) & 3153 RD_DMA_CFG_TIMER_MASK)); 3154 /* Configure DMA parameters. */ 3155 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; 3156 reg |= sc->alc_rcb; 3157 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 3158 reg |= DMA_CFG_CMB_ENB; 3159 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) 3160 reg |= DMA_CFG_SMB_ENB; 3161 else 3162 reg |= DMA_CFG_SMB_DIS; 3163 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << 3164 DMA_CFG_RD_BURST_SHIFT; 3165 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << 3166 DMA_CFG_WR_BURST_SHIFT; 3167 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 3168 DMA_CFG_RD_DELAY_CNT_MASK; 3169 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 3170 DMA_CFG_WR_DELAY_CNT_MASK; 3171 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3172 3173 /* 3174 * Configure Tx/Rx MACs. 3175 * - Auto-padding for short frames. 3176 * - Enable CRC generation. 3177 * Actual reconfiguration of MAC for resolved speed/duplex 3178 * is followed after detection of link establishment. 3179 * AR8131/AR8132 always does checksum computation regardless 3180 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to 3181 * have bug in protocol field in Rx return structure so 3182 * these controllers can't handle fragmented frames. Disable 3183 * Rx checksum offloading until there is a newer controller 3184 * that has sane implementation. 3185 */ 3186 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 3187 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 3188 MAC_CFG_PREAMBLE_MASK); 3189 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) 3190 reg |= MAC_CFG_SPEED_10_100; 3191 else 3192 reg |= MAC_CFG_SPEED_1000; 3193 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3194 3195 /* Set up the receive filter. */ 3196 alc_rxfilter(sc); 3197 alc_rxvlan(sc); 3198 3199 /* Acknowledge all pending interrupts and clear it. */ 3200 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); 3201 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3202 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); 3203 3204 sc->alc_flags &= ~ALC_FLAG_LINK; 3205 /* Switch to the current media. */ 3206 mii_mediachg(mii); 3207 3208 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 3209 3210 ifp->if_flags |= IFF_RUNNING; 3211 ifp->if_flags &= ~IFF_OACTIVE; 3212 } 3213 3214 static void 3215 alc_stop(struct alc_softc *sc) 3216 { 3217 struct ifnet *ifp; 3218 struct alc_txdesc *txd; 3219 struct alc_rxdesc *rxd; 3220 uint32_t reg; 3221 int i; 3222 3223 ALC_LOCK_ASSERT(sc); 3224 /* 3225 * Mark the interface down and cancel the watchdog timer. 3226 */ 3227 ifp = sc->alc_ifp; 3228 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3229 sc->alc_flags &= ~ALC_FLAG_LINK; 3230 callout_stop(&sc->alc_tick_ch); 3231 sc->alc_watchdog_timer = 0; 3232 alc_stats_update(sc); 3233 /* Disable interrupts. */ 3234 CSR_WRITE_4(sc, ALC_INTR_MASK, 0); 3235 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3236 alc_stop_queue(sc); 3237 /* Disable DMA. */ 3238 reg = CSR_READ_4(sc, ALC_DMA_CFG); 3239 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); 3240 reg |= DMA_CFG_SMB_DIS; 3241 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3242 DELAY(1000); 3243 /* Stop Rx/Tx MACs. */ 3244 alc_stop_mac(sc); 3245 /* Disable interrupts which might be touched in taskq handler. */ 3246 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3247 3248 /* Reclaim Rx buffers that have been processed. */ 3249 if (sc->alc_cdata.alc_rxhead != NULL) 3250 m_freem(sc->alc_cdata.alc_rxhead); 3251 ALC_RXCHAIN_RESET(sc); 3252 /* 3253 * Free Tx/Rx mbufs still in the queues. 3254 */ 3255 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3256 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3257 if (rxd->rx_m != NULL) { 3258 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, 3259 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3260 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, 3261 rxd->rx_dmamap); 3262 m_freem(rxd->rx_m); 3263 rxd->rx_m = NULL; 3264 } 3265 } 3266 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3267 txd = &sc->alc_cdata.alc_txdesc[i]; 3268 if (txd->tx_m != NULL) { 3269 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 3270 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3271 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 3272 txd->tx_dmamap); 3273 m_freem(txd->tx_m); 3274 txd->tx_m = NULL; 3275 } 3276 } 3277 } 3278 3279 static void 3280 alc_stop_mac(struct alc_softc *sc) 3281 { 3282 uint32_t reg; 3283 int i; 3284 3285 ALC_LOCK_ASSERT(sc); 3286 3287 /* Disable Rx/Tx MAC. */ 3288 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3289 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 3290 reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 3291 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3292 } 3293 for (i = ALC_TIMEOUT; i > 0; i--) { 3294 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3295 if (reg == 0) 3296 break; 3297 DELAY(10); 3298 } 3299 if (i == 0) 3300 device_printf(sc->alc_dev, 3301 "could not disable Rx/Tx MAC(0x%08x)!\n", reg); 3302 } 3303 3304 static void 3305 alc_start_queue(struct alc_softc *sc) 3306 { 3307 uint32_t qcfg[] = { 3308 0, 3309 RXQ_CFG_QUEUE0_ENB, 3310 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, 3311 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, 3312 RXQ_CFG_ENB 3313 }; 3314 uint32_t cfg; 3315 3316 ALC_LOCK_ASSERT(sc); 3317 3318 /* Enable RxQ. */ 3319 cfg = CSR_READ_4(sc, ALC_RXQ_CFG); 3320 cfg &= ~RXQ_CFG_ENB; 3321 cfg |= qcfg[1]; 3322 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); 3323 /* Enable TxQ. */ 3324 cfg = CSR_READ_4(sc, ALC_TXQ_CFG); 3325 cfg |= TXQ_CFG_ENB; 3326 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); 3327 } 3328 3329 static void 3330 alc_stop_queue(struct alc_softc *sc) 3331 { 3332 uint32_t reg; 3333 int i; 3334 3335 ALC_LOCK_ASSERT(sc); 3336 3337 /* Disable RxQ. */ 3338 reg = CSR_READ_4(sc, ALC_RXQ_CFG); 3339 if ((reg & RXQ_CFG_ENB) != 0) { 3340 reg &= ~RXQ_CFG_ENB; 3341 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3342 } 3343 /* Disable TxQ. */ 3344 reg = CSR_READ_4(sc, ALC_TXQ_CFG); 3345 if ((reg & TXQ_CFG_ENB) == 0) { 3346 reg &= ~TXQ_CFG_ENB; 3347 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); 3348 } 3349 for (i = ALC_TIMEOUT; i > 0; i--) { 3350 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3351 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 3352 break; 3353 DELAY(10); 3354 } 3355 if (i == 0) 3356 device_printf(sc->alc_dev, 3357 "could not disable RxQ/TxQ (0x%08x)!\n", reg); 3358 } 3359 3360 static void 3361 alc_init_tx_ring(struct alc_softc *sc) 3362 { 3363 struct alc_ring_data *rd; 3364 struct alc_txdesc *txd; 3365 int i; 3366 3367 ALC_LOCK_ASSERT(sc); 3368 3369 sc->alc_cdata.alc_tx_prod = 0; 3370 sc->alc_cdata.alc_tx_cons = 0; 3371 sc->alc_cdata.alc_tx_cnt = 0; 3372 3373 rd = &sc->alc_rdata; 3374 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ); 3375 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3376 txd = &sc->alc_cdata.alc_txdesc[i]; 3377 txd->tx_m = NULL; 3378 } 3379 3380 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 3381 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 3382 } 3383 3384 static int 3385 alc_init_rx_ring(struct alc_softc *sc) 3386 { 3387 struct alc_ring_data *rd; 3388 struct alc_rxdesc *rxd; 3389 int i; 3390 3391 ALC_LOCK_ASSERT(sc); 3392 3393 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; 3394 sc->alc_morework = 0; 3395 rd = &sc->alc_rdata; 3396 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ); 3397 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3398 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3399 rxd->rx_m = NULL; 3400 rxd->rx_desc = &rd->alc_rx_ring[i]; 3401 if (alc_newbuf(sc, rxd) != 0) 3402 return (ENOBUFS); 3403 } 3404 3405 /* 3406 * Since controller does not update Rx descriptors, driver 3407 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE 3408 * is enough to ensure coherence. 3409 */ 3410 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 3411 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 3412 /* Let controller know availability of new Rx buffers. */ 3413 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); 3414 3415 return (0); 3416 } 3417 3418 static void 3419 alc_init_rr_ring(struct alc_softc *sc) 3420 { 3421 struct alc_ring_data *rd; 3422 3423 ALC_LOCK_ASSERT(sc); 3424 3425 sc->alc_cdata.alc_rr_cons = 0; 3426 ALC_RXCHAIN_RESET(sc); 3427 3428 rd = &sc->alc_rdata; 3429 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ); 3430 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 3431 sc->alc_cdata.alc_rr_ring_map, 3432 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3433 } 3434 3435 static void 3436 alc_init_cmb(struct alc_softc *sc) 3437 { 3438 struct alc_ring_data *rd; 3439 3440 ALC_LOCK_ASSERT(sc); 3441 3442 rd = &sc->alc_rdata; 3443 bzero(rd->alc_cmb, ALC_CMB_SZ); 3444 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map, 3445 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3446 } 3447 3448 static void 3449 alc_init_smb(struct alc_softc *sc) 3450 { 3451 struct alc_ring_data *rd; 3452 3453 ALC_LOCK_ASSERT(sc); 3454 3455 rd = &sc->alc_rdata; 3456 bzero(rd->alc_smb, ALC_SMB_SZ); 3457 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, 3458 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3459 } 3460 3461 static void 3462 alc_rxvlan(struct alc_softc *sc) 3463 { 3464 struct ifnet *ifp; 3465 uint32_t reg; 3466 3467 ALC_LOCK_ASSERT(sc); 3468 3469 ifp = sc->alc_ifp; 3470 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3471 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3472 reg |= MAC_CFG_VLAN_TAG_STRIP; 3473 else 3474 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 3475 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3476 } 3477 3478 static void 3479 alc_rxfilter(struct alc_softc *sc) 3480 { 3481 struct ifnet *ifp; 3482 struct ifmultiaddr *ifma; 3483 uint32_t crc; 3484 uint32_t mchash[2]; 3485 uint32_t rxcfg; 3486 3487 ALC_LOCK_ASSERT(sc); 3488 3489 ifp = sc->alc_ifp; 3490 3491 bzero(mchash, sizeof(mchash)); 3492 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); 3493 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 3494 if ((ifp->if_flags & IFF_BROADCAST) != 0) 3495 rxcfg |= MAC_CFG_BCAST; 3496 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 3497 if ((ifp->if_flags & IFF_PROMISC) != 0) 3498 rxcfg |= MAC_CFG_PROMISC; 3499 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 3500 rxcfg |= MAC_CFG_ALLMULTI; 3501 mchash[0] = 0xFFFFFFFF; 3502 mchash[1] = 0xFFFFFFFF; 3503 goto chipit; 3504 } 3505 3506 #if 0 3507 /* XXX */ 3508 if_maddr_rlock(ifp); 3509 #endif 3510 TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) { 3511 if (ifma->ifma_addr->sa_family != AF_LINK) 3512 continue; 3513 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3514 ifma->ifma_addr), ETHER_ADDR_LEN); 3515 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 3516 } 3517 #if 0 3518 /* XXX */ 3519 if_maddr_runlock(ifp); 3520 #endif 3521 3522 chipit: 3523 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); 3524 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); 3525 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); 3526 } 3527 3528 static int 3529 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS) 3530 { 3531 return (sysctl_int_range(oidp, arg1, arg2, req, 3532 ALC_PROC_MIN, ALC_PROC_MAX)); 3533 } 3534 3535 static int 3536 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS) 3537 { 3538 3539 return (sysctl_int_range(oidp, arg1, arg2, req, 3540 ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX)); 3541 } 3542