1 /*- 2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/alc/if_alc.c,v 1.6 2009/09/29 23:03:16 yongari Exp $ 28 */ 29 30 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */ 31 32 #include <sys/param.h> 33 #include <sys/bitops.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/rman.h> 41 #include <sys/serialize.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <sys/sysctl.h> 45 46 #include <net/ethernet.h> 47 #include <net/if.h> 48 #include <net/bpf.h> 49 #include <net/if_arp.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 #include <net/ifq_var.h> 53 #include <net/vlan/if_vlan_var.h> 54 #include <net/vlan/if_vlan_ether.h> 55 56 #include <netinet/tcp.h> 57 58 #include <dev/netif/mii_layer/mii.h> 59 #include <dev/netif/mii_layer/miivar.h> 60 61 #include <bus/pci/pcireg.h> 62 #include <bus/pci/pcivar.h> 63 #include "pcidevs.h" 64 65 #include <dev/netif/alc/if_alcreg.h> 66 #include <dev/netif/alc/if_alcvar.h> 67 68 /* "device miibus" required. See GENERIC if you get errors here. */ 69 #include "miibus_if.h" 70 71 #undef ALC_USE_CUSTOM_CSUM 72 #ifdef ALC_USE_CUSTOM_CSUM 73 #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 74 #else 75 #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 76 #endif 77 78 /* Tunables. */ 79 static int alc_msi_enable = 1; 80 TUNABLE_INT("hw.alc.msi.enable", &alc_msi_enable); 81 82 /* 83 * Devices supported by this driver. 84 */ 85 86 static struct alc_ident alc_ident_table[] = { 87 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024, 88 "Atheros AR8131 PCIe Gigabit Ethernet" }, 89 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024, 90 "Atheros AR8132 PCIe Fast Ethernet" }, 91 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024, 92 "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" }, 93 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024, 94 "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" }, 95 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024, 96 "Atheros AR8152 v1.1 PCIe Fast Ethernet" }, 97 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024, 98 "Atheros AR8152 v2.0 PCIe Fast Ethernet" }, 99 { 0, 0, 0, NULL } 100 }; 101 102 static int alc_attach(device_t); 103 static int alc_probe(device_t); 104 static int alc_detach(device_t); 105 static int alc_shutdown(device_t); 106 static int alc_suspend(device_t); 107 static int alc_resume(device_t); 108 static int alc_miibus_readreg(device_t, int, int); 109 static void alc_miibus_statchg(device_t); 110 static int alc_miibus_writereg(device_t, int, int, int); 111 112 static void alc_init(void *); 113 static void alc_start(struct ifnet *, struct ifaltq_subque *); 114 static void alc_watchdog(struct alc_softc *); 115 static int alc_mediachange(struct ifnet *); 116 static void alc_mediastatus(struct ifnet *, struct ifmediareq *); 117 static int alc_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 118 119 static void alc_aspm(struct alc_softc *, int); 120 #ifdef foo 121 static int alc_check_boundary(struct alc_softc *); 122 #endif 123 static void alc_disable_l0s_l1(struct alc_softc *); 124 static int alc_dma_alloc(struct alc_softc *); 125 static void alc_dma_free(struct alc_softc *); 126 static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int); 127 static int alc_encap(struct alc_softc *, struct mbuf **); 128 static struct alc_ident *alc_find_ident(device_t); 129 static void alc_get_macaddr(struct alc_softc *); 130 static void alc_init_cmb(struct alc_softc *); 131 static void alc_init_rr_ring(struct alc_softc *); 132 static int alc_init_rx_ring(struct alc_softc *); 133 static void alc_init_smb(struct alc_softc *); 134 static void alc_init_tx_ring(struct alc_softc *); 135 static void alc_intr(void *); 136 static void alc_mac_config(struct alc_softc *); 137 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, boolean_t); 138 static void alc_phy_down(struct alc_softc *); 139 static void alc_phy_reset(struct alc_softc *); 140 static void alc_reset(struct alc_softc *); 141 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *); 142 static int alc_rxintr(struct alc_softc *); 143 static void alc_rxfilter(struct alc_softc *); 144 static void alc_rxvlan(struct alc_softc *); 145 #if 0 146 static void alc_setlinkspeed(struct alc_softc *); 147 /* XXX: WOL */ 148 static void alc_setwol(struct alc_softc *); 149 #endif 150 static void alc_start_queue(struct alc_softc *); 151 static void alc_stats_clear(struct alc_softc *); 152 static void alc_stats_update(struct alc_softc *); 153 static void alc_stop(struct alc_softc *); 154 static void alc_stop_mac(struct alc_softc *); 155 static void alc_stop_queue(struct alc_softc *); 156 static void alc_sysctl_node(struct alc_softc *); 157 static void alc_tick(void *); 158 static void alc_txeof(struct alc_softc *); 159 static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS); 160 static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS); 161 162 static device_method_t alc_methods[] = { 163 /* Device interface. */ 164 DEVMETHOD(device_probe, alc_probe), 165 DEVMETHOD(device_attach, alc_attach), 166 DEVMETHOD(device_detach, alc_detach), 167 DEVMETHOD(device_shutdown, alc_shutdown), 168 DEVMETHOD(device_suspend, alc_suspend), 169 DEVMETHOD(device_resume, alc_resume), 170 171 /* MII interface. */ 172 DEVMETHOD(miibus_readreg, alc_miibus_readreg), 173 DEVMETHOD(miibus_writereg, alc_miibus_writereg), 174 DEVMETHOD(miibus_statchg, alc_miibus_statchg), 175 176 { NULL, NULL } 177 }; 178 179 static DEFINE_CLASS_0(alc, alc_driver, alc_methods, sizeof(struct alc_softc)); 180 static devclass_t alc_devclass; 181 182 DECLARE_DUMMY_MODULE(if_alc); 183 DRIVER_MODULE(if_alc, pci, alc_driver, alc_devclass, NULL, NULL); 184 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, NULL, NULL); 185 186 static const uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 }; 187 188 static int 189 alc_miibus_readreg(device_t dev, int phy, int reg) 190 { 191 struct alc_softc *sc; 192 uint32_t v; 193 int i; 194 195 sc = device_get_softc(dev); 196 197 if (phy != sc->alc_phyaddr) 198 return (0); 199 200 /* 201 * For AR8132 fast ethernet controller, do not report 1000baseT 202 * capability to mii(4). Even though AR8132 uses the same 203 * model/revision number of F1 gigabit PHY, the PHY has no 204 * ability to establish 1000baseT link. 205 */ 206 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && 207 reg == MII_EXTSR) 208 return (0); 209 210 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 211 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 212 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 213 DELAY(5); 214 v = CSR_READ_4(sc, ALC_MDIO); 215 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 216 break; 217 } 218 219 if (i == 0) { 220 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); 221 return (0); 222 } 223 224 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 225 } 226 227 static int 228 alc_miibus_writereg(device_t dev, int phy, int reg, int val) 229 { 230 struct alc_softc *sc; 231 uint32_t v; 232 int i; 233 234 sc = device_get_softc(dev); 235 236 if (phy != sc->alc_phyaddr) 237 return (0); 238 239 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 240 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 241 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 242 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 243 DELAY(5); 244 v = CSR_READ_4(sc, ALC_MDIO); 245 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 246 break; 247 } 248 249 if (i == 0) 250 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); 251 252 return (0); 253 } 254 255 static void 256 alc_miibus_statchg(device_t dev) 257 { 258 struct alc_softc *sc; 259 struct mii_data *mii; 260 struct ifnet *ifp; 261 uint32_t reg; 262 263 sc = device_get_softc(dev); 264 265 mii = device_get_softc(sc->alc_miibus); 266 ifp = sc->alc_ifp; 267 if (mii == NULL || ifp == NULL || 268 (ifp->if_flags & IFF_RUNNING) == 0) 269 return; 270 271 sc->alc_flags &= ~ALC_FLAG_LINK; 272 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 273 (IFM_ACTIVE | IFM_AVALID)) { 274 switch (IFM_SUBTYPE(mii->mii_media_active)) { 275 case IFM_10_T: 276 case IFM_100_TX: 277 sc->alc_flags |= ALC_FLAG_LINK; 278 break; 279 case IFM_1000_T: 280 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 281 sc->alc_flags |= ALC_FLAG_LINK; 282 break; 283 default: 284 break; 285 } 286 } 287 alc_stop_queue(sc); 288 /* Stop Rx/Tx MACs. */ 289 alc_stop_mac(sc); 290 291 /* Program MACs with resolved speed/duplex/flow-control. */ 292 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 293 alc_start_queue(sc); 294 alc_mac_config(sc); 295 /* Re-enable Tx/Rx MACs. */ 296 reg = CSR_READ_4(sc, ALC_MAC_CFG); 297 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 298 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 299 alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active)); 300 } 301 } 302 303 static void 304 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 305 { 306 struct alc_softc *sc; 307 struct mii_data *mii; 308 309 sc = ifp->if_softc; 310 if ((ifp->if_flags & IFF_UP) == 0) 311 return; 312 mii = device_get_softc(sc->alc_miibus); 313 314 mii_pollstat(mii); 315 ifmr->ifm_status = mii->mii_media_status; 316 ifmr->ifm_active = mii->mii_media_active; 317 } 318 319 static int 320 alc_mediachange(struct ifnet *ifp) 321 { 322 struct alc_softc *sc; 323 struct mii_data *mii; 324 struct mii_softc *miisc; 325 int error; 326 327 sc = ifp->if_softc; 328 mii = device_get_softc(sc->alc_miibus); 329 if (mii->mii_instance != 0) { 330 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 331 mii_phy_reset(miisc); 332 } 333 error = mii_mediachg(mii); 334 335 return (error); 336 } 337 338 static struct alc_ident * 339 alc_find_ident(device_t dev) 340 { 341 struct alc_ident *ident; 342 uint16_t vendor, devid; 343 344 vendor = pci_get_vendor(dev); 345 devid = pci_get_device(dev); 346 for (ident = alc_ident_table; ident->name != NULL; ident++) { 347 if (vendor == ident->vendorid && devid == ident->deviceid) 348 return (ident); 349 } 350 return (NULL); 351 } 352 353 static int 354 alc_probe(device_t dev) 355 { 356 struct alc_ident *ident; 357 358 ident = alc_find_ident(dev); 359 if (ident != NULL) { 360 device_set_desc(dev, ident->name); 361 return (BUS_PROBE_DEFAULT); 362 } 363 return (ENXIO); 364 } 365 366 static void 367 alc_get_macaddr(struct alc_softc *sc) 368 { 369 uint32_t ea[2], opt; 370 uint16_t val; 371 int eeprom, i; 372 373 eeprom = 0; 374 opt = CSR_READ_4(sc, ALC_OPT_CFG); 375 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 && 376 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { 377 /* 378 * EEPROM found, let TWSI reload EEPROM configuration. 379 * This will set ethernet address of controller. 380 */ 381 eeprom++; 382 switch (sc->alc_ident->deviceid) { 383 case DEVICEID_ATHEROS_AR8131: 384 case DEVICEID_ATHEROS_AR8132: 385 if ((opt & OPT_CFG_CLK_ENB) == 0) { 386 opt |= OPT_CFG_CLK_ENB; 387 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 388 CSR_READ_4(sc, ALC_OPT_CFG); 389 DELAY(1000); 390 } 391 break; 392 case DEVICEID_ATHEROS_AR8151: 393 case DEVICEID_ATHEROS_AR8151_V2: 394 case DEVICEID_ATHEROS_AR8152_B: 395 case DEVICEID_ATHEROS_AR8152_B2: 396 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 397 ALC_MII_DBG_ADDR, 0x00); 398 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 399 ALC_MII_DBG_DATA); 400 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 401 ALC_MII_DBG_DATA, val & 0xFF7F); 402 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 403 ALC_MII_DBG_ADDR, 0x3B); 404 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 405 ALC_MII_DBG_DATA); 406 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 407 ALC_MII_DBG_DATA, val | 0x0008); 408 DELAY(20); 409 break; 410 } 411 412 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 413 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 414 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 415 CSR_READ_4(sc, ALC_WOL_CFG); 416 417 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | 418 TWSI_CFG_SW_LD_START); 419 420 for (i = 100; i > 0; i--) { 421 DELAY(1000); 422 if ((CSR_READ_4(sc, ALC_TWSI_CFG) & 423 TWSI_CFG_SW_LD_START) == 0) 424 break; 425 } 426 if (i == 0) 427 device_printf(sc->alc_dev, 428 "reloading EEPROM timeout!\n"); 429 } else { 430 if (bootverbose) 431 device_printf(sc->alc_dev, "EEPROM not found!\n"); 432 } 433 434 if (eeprom != 0) { 435 switch (sc->alc_ident->deviceid) { 436 case DEVICEID_ATHEROS_AR8131: 437 case DEVICEID_ATHEROS_AR8132: 438 if ((opt & OPT_CFG_CLK_ENB) != 0) { 439 opt &= ~OPT_CFG_CLK_ENB; 440 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 441 CSR_READ_4(sc, ALC_OPT_CFG); 442 DELAY(1000); 443 } 444 break; 445 case DEVICEID_ATHEROS_AR8151: 446 case DEVICEID_ATHEROS_AR8151_V2: 447 case DEVICEID_ATHEROS_AR8152_B: 448 case DEVICEID_ATHEROS_AR8152_B2: 449 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 450 ALC_MII_DBG_ADDR, 0x00); 451 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 452 ALC_MII_DBG_DATA); 453 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 454 ALC_MII_DBG_DATA, val | 0x0080); 455 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 456 ALC_MII_DBG_ADDR, 0x3B); 457 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 458 ALC_MII_DBG_DATA); 459 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 460 ALC_MII_DBG_DATA, val & 0xFFF7); 461 DELAY(20); 462 break; 463 } 464 } 465 466 ea[0] = CSR_READ_4(sc, ALC_PAR0); 467 ea[1] = CSR_READ_4(sc, ALC_PAR1); 468 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; 469 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; 470 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; 471 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; 472 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; 473 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; 474 } 475 476 static void 477 alc_disable_l0s_l1(struct alc_softc *sc) 478 { 479 uint32_t pmcfg; 480 481 /* Another magic from vendor. */ 482 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 483 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | 484 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK | 485 PM_CFG_SERDES_PD_EX_L1); 486 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | 487 PM_CFG_SERDES_L1_ENB; 488 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 489 } 490 491 static void 492 alc_phy_reset(struct alc_softc *sc) 493 { 494 uint16_t data; 495 496 /* Reset magic from Linux. */ 497 CSR_WRITE_2(sc, ALC_GPHY_CFG, 498 GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET); 499 CSR_READ_2(sc, ALC_GPHY_CFG); 500 DELAY(10 * 1000); 501 502 CSR_WRITE_2(sc, ALC_GPHY_CFG, 503 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 504 GPHY_CFG_SEL_ANA_RESET); 505 CSR_READ_2(sc, ALC_GPHY_CFG); 506 DELAY(10 * 1000); 507 508 /* DSP fixup, Vendor magic. */ 509 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 510 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 511 ALC_MII_DBG_ADDR, 0x000A); 512 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 513 ALC_MII_DBG_DATA); 514 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 515 ALC_MII_DBG_DATA, data & 0xDFFF); 516 } 517 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 518 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 519 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 520 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 521 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 522 ALC_MII_DBG_ADDR, 0x003B); 523 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 524 ALC_MII_DBG_DATA); 525 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 526 ALC_MII_DBG_DATA, data & 0xFFF7); 527 DELAY(20 * 1000); 528 } 529 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) { 530 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 531 ALC_MII_DBG_ADDR, 0x0029); 532 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 533 ALC_MII_DBG_DATA, 0x929D); 534 } 535 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 536 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 || 537 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 538 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 539 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 540 ALC_MII_DBG_ADDR, 0x0029); 541 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 542 ALC_MII_DBG_DATA, 0xB6DD); 543 } 544 545 /* Load DSP codes, vendor magic. */ 546 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | 547 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); 548 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 549 ALC_MII_DBG_ADDR, MII_ANA_CFG18); 550 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 551 ALC_MII_DBG_DATA, data); 552 553 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | 554 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | 555 ANA_SERDES_EN_LCKDT; 556 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 557 ALC_MII_DBG_ADDR, MII_ANA_CFG5); 558 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 559 ALC_MII_DBG_DATA, data); 560 561 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & 562 ANA_LONG_CABLE_TH_100_MASK) | 563 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & 564 ANA_SHORT_CABLE_TH_100_SHIFT) | 565 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; 566 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 567 ALC_MII_DBG_ADDR, MII_ANA_CFG54); 568 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 569 ALC_MII_DBG_DATA, data); 570 571 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | 572 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | 573 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | 574 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); 575 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 576 ALC_MII_DBG_ADDR, MII_ANA_CFG4); 577 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 578 ALC_MII_DBG_DATA, data); 579 580 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | 581 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | 582 ANA_OEN_125M; 583 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 584 ALC_MII_DBG_ADDR, MII_ANA_CFG0); 585 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 586 ALC_MII_DBG_DATA, data); 587 DELAY(1000); 588 } 589 590 static void 591 alc_phy_down(struct alc_softc *sc) 592 { 593 switch (sc->alc_ident->deviceid) { 594 case DEVICEID_ATHEROS_AR8151: 595 case DEVICEID_ATHEROS_AR8151_V2: 596 /* 597 * GPHY power down caused more problems on AR8151 v2.0. 598 * When driver is reloaded after GPHY power down, 599 * accesses to PHY/MAC registers hung the system. Only 600 * cold boot recovered from it. I'm not sure whether 601 * AR8151 v1.0 also requires this one though. I don't 602 * have AR8151 v1.0 controller in hand. 603 * The only option left is to isolate the PHY and 604 * initiates power down the PHY which in turn saves 605 * more power when driver is unloaded. 606 */ 607 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 608 MII_BMCR, BMCR_ISO | BMCR_PDOWN); 609 break; 610 default: 611 /* Force PHY down. */ 612 CSR_WRITE_2(sc, ALC_GPHY_CFG, 613 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 614 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | 615 GPHY_CFG_PWDOWN_HW); 616 DELAY(1000); 617 break; 618 } 619 620 } 621 622 static void 623 alc_aspm(struct alc_softc *sc, int media) 624 { 625 uint32_t pmcfg; 626 uint16_t linkcfg; 627 628 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 629 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) == 630 (ALC_FLAG_APS | ALC_FLAG_PCIE)) { 631 linkcfg = CSR_READ_2(sc, sc->alc_expcap + 632 PCIR_EXPRESS_LINK_CTL); 633 } else { 634 linkcfg = 0; 635 } 636 637 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; 638 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK); 639 pmcfg |= PM_CFG_MAC_ASPM_CHK; 640 pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT); 641 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 642 643 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 644 /* Disable extended sync except AR8152 B v1.0 */ 645 linkcfg &= ~0x80; 646 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 647 sc->alc_rev == ATHEROS_AR8152_B_V10) 648 linkcfg |= 0x80; 649 CSR_WRITE_2(sc, sc->alc_expcap + PCIR_EXPRESS_LINK_CTL, 650 linkcfg); 651 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB | 652 PM_CFG_HOTRST); 653 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT << 654 PM_CFG_L1_ENTRY_TIMER_SHIFT); 655 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 656 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT << 657 PM_CFG_PM_REQ_TIMER_SHIFT); 658 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV; 659 } 660 661 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 662 if ((sc->alc_flags & ALC_FLAG_L0S) != 0) 663 pmcfg |= PM_CFG_ASPM_L0S_ENB; 664 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 665 pmcfg |= PM_CFG_ASPM_L1_ENB; 666 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 667 if (sc->alc_ident->deviceid == 668 DEVICEID_ATHEROS_AR8152_B) { 669 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 670 } 671 pmcfg &= ~(PM_CFG_SERDES_L1_ENB | 672 PM_CFG_SERDES_PLL_L1_ENB | 673 PM_CFG_SERDES_BUDS_RX_L1_ENB); 674 pmcfg |= PM_CFG_CLK_SWH_L1; 675 if (media == IFM_100_TX || media == IFM_1000_T) { 676 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; 677 switch (sc->alc_ident->deviceid) { 678 case DEVICEID_ATHEROS_AR8152_B: 679 pmcfg |= (7 << 680 PM_CFG_L1_ENTRY_TIMER_SHIFT); 681 break; 682 case DEVICEID_ATHEROS_AR8152_B2: 683 case DEVICEID_ATHEROS_AR8151_V2: 684 pmcfg |= (4 << 685 PM_CFG_L1_ENTRY_TIMER_SHIFT); 686 break; 687 default: 688 pmcfg |= (15 << 689 PM_CFG_L1_ENTRY_TIMER_SHIFT); 690 break; 691 } 692 } 693 } else { 694 pmcfg |= PM_CFG_SERDES_L1_ENB | 695 PM_CFG_SERDES_PLL_L1_ENB | 696 PM_CFG_SERDES_BUDS_RX_L1_ENB; 697 pmcfg &= ~(PM_CFG_CLK_SWH_L1 | 698 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 699 } 700 } else { 701 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB | 702 PM_CFG_SERDES_PLL_L1_ENB); 703 pmcfg |= PM_CFG_CLK_SWH_L1; 704 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 705 pmcfg |= PM_CFG_ASPM_L1_ENB; 706 } 707 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 708 } 709 710 static int 711 alc_attach(device_t dev) 712 { 713 struct alc_softc *sc; 714 struct ifnet *ifp; 715 const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" }; 716 uint16_t burst; 717 int base, error, state; 718 uint32_t cap, ctl, val; 719 u_int intr_flags; 720 721 error = 0; 722 sc = device_get_softc(dev); 723 sc->alc_dev = dev; 724 725 callout_init_mp(&sc->alc_tick_ch); 726 sc->alc_ident = alc_find_ident(dev); 727 728 /* Enable bus mastering */ 729 pci_enable_busmaster(dev); 730 731 /* Map the device. */ 732 sc->alc_res_rid = PCIR_BAR(0); 733 sc->alc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 734 &sc->alc_res_rid, RF_ACTIVE); 735 if (error != 0) { 736 device_printf(dev, "cannot allocate memory resources.\n"); 737 goto fail; 738 } 739 sc->alc_res_btag = rman_get_bustag(sc->alc_res); 740 sc->alc_res_bhand = rman_get_bushandle(sc->alc_res); 741 742 /* Set PHY address. */ 743 sc->alc_phyaddr = ALC_PHY_ADDR; 744 745 /* Initialize DMA parameters. */ 746 sc->alc_dma_rd_burst = 0; 747 sc->alc_dma_wr_burst = 0; 748 sc->alc_rcb = DMA_CFG_RCB_64; 749 if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) { 750 sc->alc_flags |= ALC_FLAG_PCIE; 751 sc->alc_expcap = base; 752 burst = CSR_READ_2(sc, base + PCIR_EXPRESS_DEVICE_CTL); 753 sc->alc_dma_rd_burst = 754 (burst & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12; 755 sc->alc_dma_wr_burst = (burst & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5; 756 if (bootverbose) { 757 device_printf(dev, "Read request size : %u bytes.\n", 758 alc_dma_burst[sc->alc_dma_rd_burst]); 759 device_printf(dev, "TLP payload size : %u bytes.\n", 760 alc_dma_burst[sc->alc_dma_wr_burst]); 761 } 762 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024) 763 sc->alc_dma_rd_burst = 3; 764 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) 765 sc->alc_dma_wr_burst = 3; 766 /* Clear data link and flow-control protocol error. */ 767 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); 768 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); 769 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); 770 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 771 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 772 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, 773 CSR_READ_4(sc, ALC_PCIE_PHYMISC) | 774 PCIE_PHYMISC_FORCE_RCV_DET); 775 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 776 sc->alc_rev == ATHEROS_AR8152_B_V10) { 777 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2); 778 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK | 779 PCIE_PHYMISC2_SERDES_TH_MASK); 780 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 781 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; 782 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val); 783 } 784 785 /* Disable ASPM L0S and L1. */ 786 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP); 787 if ((cap & PCIM_LINK_CAP_ASPM) != 0) { 788 ctl = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL); 789 if ((ctl & 0x08) != 0) 790 sc->alc_rcb = DMA_CFG_RCB_128; 791 if (bootverbose) 792 device_printf(dev, "RCB %u bytes\n", 793 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); 794 state = ctl & 0x03; 795 if (state & 0x01) 796 sc->alc_flags |= ALC_FLAG_L0S; 797 if (state & 0x02) 798 sc->alc_flags |= ALC_FLAG_L1S; 799 if (bootverbose) 800 device_printf(sc->alc_dev, "ASPM %s %s\n", 801 aspm_state[state], 802 state == 0 ? "disabled" : "enabled"); 803 alc_disable_l0s_l1(sc); 804 } else { 805 if (bootverbose) 806 device_printf(sc->alc_dev, "no ASPM support\n"); 807 } 808 } 809 810 /* Reset PHY. */ 811 alc_phy_reset(sc); 812 813 /* Reset the ethernet controller. */ 814 alc_reset(sc); 815 816 /* 817 * One odd thing is AR8132 uses the same PHY hardware(F1 818 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports 819 * the PHY supports 1000Mbps but that's not true. The PHY 820 * used in AR8132 can't establish gigabit link even if it 821 * shows the same PHY model/revision number of AR8131. 822 */ 823 switch (sc->alc_ident->deviceid) { 824 case DEVICEID_ATHEROS_AR8152_B: 825 case DEVICEID_ATHEROS_AR8152_B2: 826 sc->alc_flags |= ALC_FLAG_APS; 827 /* FALLTHROUGH */ 828 case DEVICEID_ATHEROS_AR8132: 829 sc->alc_flags |= ALC_FLAG_FASTETHER; 830 break; 831 case DEVICEID_ATHEROS_AR8151: 832 case DEVICEID_ATHEROS_AR8151_V2: 833 sc->alc_flags |= ALC_FLAG_APS; 834 /* FALLTHROUGH */ 835 default: 836 break; 837 } 838 sc->alc_flags |= ALC_FLAG_ASPM_MON | ALC_FLAG_JUMBO; 839 840 /* 841 * It seems that AR813x/AR815x has silicon bug for SMB. In 842 * addition, Atheros said that enabling SMB wouldn't improve 843 * performance. However I think it's bad to access lots of 844 * registers to extract MAC statistics. 845 */ 846 sc->alc_flags |= ALC_FLAG_SMB_BUG; 847 848 /* 849 * Don't use Tx CMB. It is known to have silicon bug. 850 */ 851 sc->alc_flags |= ALC_FLAG_CMB_BUG; 852 sc->alc_rev = pci_get_revid(dev); 853 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> 854 MASTER_CHIP_REV_SHIFT; 855 if (bootverbose) { 856 device_printf(dev, "PCI device revision : 0x%04x\n", 857 sc->alc_rev); 858 device_printf(dev, "Chip id/revision : 0x%04x\n", 859 sc->alc_chip_rev); 860 } 861 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", 862 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, 863 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); 864 865 sc->alc_irq_type = pci_alloc_1intr(dev, alc_msi_enable, 866 &sc->alc_irq_rid, &intr_flags); 867 868 /* Allocate IRQ resources. */ 869 sc->alc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 870 &sc->alc_irq_rid, intr_flags); 871 if (error != 0) { 872 device_printf(dev, "cannot allocate IRQ resources.\n"); 873 goto fail; 874 } 875 876 /* Create device sysctl node. */ 877 alc_sysctl_node(sc); 878 879 if ((error = alc_dma_alloc(sc) != 0)) 880 goto fail; 881 882 /* Load station address. */ 883 alc_get_macaddr(sc); 884 885 ifp = sc->alc_ifp = &sc->arpcom.ac_if; 886 ifp->if_softc = sc; 887 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 888 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 889 ifp->if_ioctl = alc_ioctl; 890 ifp->if_start = alc_start; 891 ifp->if_init = alc_init; 892 ifq_set_maxlen(&ifp->if_snd, ALC_TX_RING_CNT - 1); 893 ifq_set_ready(&ifp->if_snd); 894 ifp->if_capabilities = IFCAP_TXCSUM; 895 ifp->if_hwassist = ALC_CSUM_FEATURES; 896 #if 0 897 /* XXX: WOL */ 898 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) { 899 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST; 900 sc->alc_flags |= ALC_FLAG_PM; 901 sc->alc_pmcap = base; 902 } 903 #endif 904 ifp->if_capenable = ifp->if_capabilities; 905 906 /* VLAN capability setup. */ 907 ifp->if_capabilities |= IFCAP_VLAN_MTU; 908 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 909 ifp->if_capenable = ifp->if_capabilities; 910 911 /* 912 * XXX 913 * It seems enabling Tx checksum offloading makes more trouble. 914 * Sometimes the controller does not receive any frames when 915 * Tx checksum offloading is enabled. I'm not sure whether this 916 * is a bug in Tx checksum offloading logic or I got broken 917 * sample boards. To safety, don't enable Tx checksum offloading 918 * by default but give chance to users to toggle it if they know 919 * their controllers work without problems. 920 */ 921 ifp->if_capenable &= ~IFCAP_TXCSUM; 922 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 923 924 /* Set up MII bus. */ 925 if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange, 926 alc_mediastatus)) != 0) { 927 device_printf(dev, "no PHY found!\n"); 928 goto fail; 929 } 930 931 ether_ifattach(ifp, sc->alc_eaddr, NULL); 932 933 /* Tell the upper layer(s) we support long frames. */ 934 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 935 936 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->alc_irq)); 937 #if 0 938 /* Create local taskq. */ 939 TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp); 940 sc->alc_tq = taskqueue_create("alc_taskq", M_WAITOK, 941 taskqueue_thread_enqueue, &sc->alc_tq); 942 if (sc->alc_tq == NULL) { 943 device_printf(dev, "could not create taskqueue.\n"); 944 ether_ifdetach(ifp); 945 error = ENXIO; 946 goto fail; 947 } 948 taskqueue_start_threads(&sc->alc_tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq", 949 device_get_nameunit(sc->alc_dev)); 950 951 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) 952 msic = ALC_MSIX_MESSAGES; 953 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 954 msic = ALC_MSI_MESSAGES; 955 else 956 msic = 1; 957 for (i = 0; i < msic; i++) { 958 error = bus_setup_intr(dev, sc->alc_irq[i], INTR_MPSAFE, 959 alc_intr, sc, 960 &sc->alc_intrhand[i], NULL); 961 if (error != 0) 962 break; 963 } 964 if (error != 0) { 965 device_printf(dev, "could not set up interrupt handler.\n"); 966 taskqueue_free(sc->alc_tq); 967 sc->alc_tq = NULL; 968 ether_ifdetach(ifp); 969 goto fail; 970 } 971 #else 972 error = bus_setup_intr(dev, sc->alc_irq, INTR_MPSAFE, alc_intr, sc, 973 &sc->alc_intrhand, ifp->if_serializer); 974 if (error) { 975 device_printf(dev, "could not set up interrupt handler.\n"); 976 ether_ifdetach(ifp); 977 goto fail; 978 } 979 #endif 980 981 fail: 982 if (error != 0) 983 alc_detach(dev); 984 985 return (error); 986 } 987 988 static int 989 alc_detach(device_t dev) 990 { 991 struct alc_softc *sc = device_get_softc(dev); 992 993 if (device_is_attached(dev)) { 994 struct ifnet *ifp = sc->alc_ifp; 995 996 lwkt_serialize_enter(ifp->if_serializer); 997 alc_stop(sc); 998 bus_teardown_intr(dev, sc->alc_irq, sc->alc_intrhand); 999 lwkt_serialize_exit(ifp->if_serializer); 1000 1001 ether_ifdetach(ifp); 1002 } 1003 1004 if (sc->alc_miibus != NULL) 1005 device_delete_child(dev, sc->alc_miibus); 1006 bus_generic_detach(dev); 1007 1008 if (sc->alc_res != NULL) 1009 alc_phy_down(sc); 1010 1011 if (sc->alc_irq != NULL) { 1012 bus_release_resource(dev, SYS_RES_IRQ, sc->alc_irq_rid, 1013 sc->alc_irq); 1014 } 1015 if (sc->alc_irq_type == PCI_INTR_TYPE_MSI) 1016 pci_release_msi(dev); 1017 1018 if (sc->alc_res != NULL) { 1019 bus_release_resource(dev, SYS_RES_MEMORY, sc->alc_res_rid, 1020 sc->alc_res); 1021 } 1022 1023 alc_dma_free(sc); 1024 1025 return (0); 1026 } 1027 1028 #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 1029 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 1030 #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 1031 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 1032 1033 static void 1034 alc_sysctl_node(struct alc_softc *sc) 1035 { 1036 struct sysctl_ctx_list *ctx; 1037 struct sysctl_oid *tree; 1038 struct sysctl_oid_list *child, *parent; 1039 struct alc_hw_stats *stats; 1040 int error; 1041 1042 stats = &sc->alc_stats; 1043 ctx = device_get_sysctl_ctx(sc->alc_dev); 1044 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev)); 1045 1046 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 1047 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0, 1048 sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation"); 1049 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 1050 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0, 1051 sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation"); 1052 /* Pull in device tunables. */ 1053 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1054 error = resource_int_value(device_get_name(sc->alc_dev), 1055 device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod); 1056 if (error == 0) { 1057 if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN || 1058 sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) { 1059 device_printf(sc->alc_dev, "int_rx_mod value out of " 1060 "range; using default: %d\n", 1061 ALC_IM_RX_TIMER_DEFAULT); 1062 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1063 } 1064 } 1065 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1066 error = resource_int_value(device_get_name(sc->alc_dev), 1067 device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod); 1068 if (error == 0) { 1069 if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN || 1070 sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) { 1071 device_printf(sc->alc_dev, "int_tx_mod value out of " 1072 "range; using default: %d\n", 1073 ALC_IM_TX_TIMER_DEFAULT); 1074 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1075 } 1076 } 1077 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", 1078 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0, 1079 sysctl_hw_alc_proc_limit, "I", 1080 "max number of Rx events to process"); 1081 /* Pull in device tunables. */ 1082 sc->alc_process_limit = ALC_PROC_DEFAULT; 1083 error = resource_int_value(device_get_name(sc->alc_dev), 1084 device_get_unit(sc->alc_dev), "process_limit", 1085 &sc->alc_process_limit); 1086 if (error == 0) { 1087 if (sc->alc_process_limit < ALC_PROC_MIN || 1088 sc->alc_process_limit > ALC_PROC_MAX) { 1089 device_printf(sc->alc_dev, 1090 "process_limit value out of range; " 1091 "using default: %d\n", ALC_PROC_DEFAULT); 1092 sc->alc_process_limit = ALC_PROC_DEFAULT; 1093 } 1094 } 1095 1096 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 1097 NULL, "ALC statistics"); 1098 parent = SYSCTL_CHILDREN(tree); 1099 1100 /* Rx statistics. */ 1101 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 1102 NULL, "Rx MAC statistics"); 1103 child = SYSCTL_CHILDREN(tree); 1104 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1105 &stats->rx_frames, "Good frames"); 1106 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1107 &stats->rx_bcast_frames, "Good broadcast frames"); 1108 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1109 &stats->rx_mcast_frames, "Good multicast frames"); 1110 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1111 &stats->rx_pause_frames, "Pause control frames"); 1112 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1113 &stats->rx_control_frames, "Control frames"); 1114 ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 1115 &stats->rx_crcerrs, "CRC errors"); 1116 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1117 &stats->rx_lenerrs, "Frames with length mismatched"); 1118 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1119 &stats->rx_bytes, "Good octets"); 1120 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1121 &stats->rx_bcast_bytes, "Good broadcast octets"); 1122 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1123 &stats->rx_mcast_bytes, "Good multicast octets"); 1124 ALC_SYSCTL_STAT_ADD32(ctx, child, "runts", 1125 &stats->rx_runts, "Too short frames"); 1126 ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments", 1127 &stats->rx_fragments, "Fragmented frames"); 1128 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1129 &stats->rx_pkts_64, "64 bytes frames"); 1130 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1131 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 1132 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1133 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 1134 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1135 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 1136 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1137 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 1138 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1139 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1140 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1141 &stats->rx_pkts_1519_max, "1519 to max frames"); 1142 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1143 &stats->rx_pkts_truncated, "Truncated frames due to MTU size"); 1144 ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 1145 &stats->rx_fifo_oflows, "FIFO overflows"); 1146 ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs", 1147 &stats->rx_rrs_errs, "Return status write-back errors"); 1148 ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 1149 &stats->rx_alignerrs, "Alignment errors"); 1150 ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered", 1151 &stats->rx_pkts_filtered, 1152 "Frames dropped due to address filtering"); 1153 1154 /* Tx statistics. */ 1155 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 1156 NULL, "Tx MAC statistics"); 1157 child = SYSCTL_CHILDREN(tree); 1158 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1159 &stats->tx_frames, "Good frames"); 1160 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1161 &stats->tx_bcast_frames, "Good broadcast frames"); 1162 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1163 &stats->tx_mcast_frames, "Good multicast frames"); 1164 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1165 &stats->tx_pause_frames, "Pause control frames"); 1166 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1167 &stats->tx_control_frames, "Control frames"); 1168 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 1169 &stats->tx_excess_defer, "Frames with excessive derferrals"); 1170 ALC_SYSCTL_STAT_ADD32(ctx, child, "defers", 1171 &stats->tx_excess_defer, "Frames with derferrals"); 1172 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1173 &stats->tx_bytes, "Good octets"); 1174 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1175 &stats->tx_bcast_bytes, "Good broadcast octets"); 1176 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1177 &stats->tx_mcast_bytes, "Good multicast octets"); 1178 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1179 &stats->tx_pkts_64, "64 bytes frames"); 1180 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1181 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 1182 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1183 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 1184 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1185 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 1186 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1187 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 1188 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1189 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1190 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1191 &stats->tx_pkts_1519_max, "1519 to max frames"); 1192 ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 1193 &stats->tx_single_colls, "Single collisions"); 1194 ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 1195 &stats->tx_multi_colls, "Multiple collisions"); 1196 ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 1197 &stats->tx_late_colls, "Late collisions"); 1198 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", 1199 &stats->tx_excess_colls, "Excessive collisions"); 1200 ALC_SYSCTL_STAT_ADD32(ctx, child, "abort", 1201 &stats->tx_abort, "Aborted frames due to Excessive collisions"); 1202 ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns", 1203 &stats->tx_underrun, "FIFO underruns"); 1204 ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns", 1205 &stats->tx_desc_underrun, "Descriptor write-back errors"); 1206 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1207 &stats->tx_lenerrs, "Frames with length mismatched"); 1208 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1209 &stats->tx_pkts_truncated, "Truncated frames due to MTU size"); 1210 } 1211 1212 #undef ALC_SYSCTL_STAT_ADD32 1213 #undef ALC_SYSCTL_STAT_ADD64 1214 1215 struct alc_dmamap_arg { 1216 bus_addr_t alc_busaddr; 1217 }; 1218 1219 static void 1220 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1221 { 1222 struct alc_dmamap_arg *ctx; 1223 1224 if (error != 0) 1225 return; 1226 1227 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1228 1229 ctx = (struct alc_dmamap_arg *)arg; 1230 ctx->alc_busaddr = segs[0].ds_addr; 1231 } 1232 1233 #ifdef foo 1234 /* 1235 * Normal and high Tx descriptors shares single Tx high address. 1236 * Four Rx descriptor/return rings and CMB shares the same Rx 1237 * high address. 1238 */ 1239 static int 1240 alc_check_boundary(struct alc_softc *sc) 1241 { 1242 bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end; 1243 1244 rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ; 1245 rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ; 1246 cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ; 1247 tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ; 1248 1249 /* 4GB boundary crossing is not allowed. */ 1250 if ((ALC_ADDR_HI(rx_ring_end) != 1251 ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) || 1252 (ALC_ADDR_HI(rr_ring_end) != 1253 ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) || 1254 (ALC_ADDR_HI(cmb_end) != 1255 ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) || 1256 (ALC_ADDR_HI(tx_ring_end) != 1257 ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr))) 1258 return (EFBIG); 1259 /* 1260 * Make sure Rx return descriptor/Rx descriptor/CMB use 1261 * the same high address. 1262 */ 1263 if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) || 1264 (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end))) 1265 return (EFBIG); 1266 1267 return (0); 1268 } 1269 #endif 1270 1271 static int 1272 alc_dma_alloc(struct alc_softc *sc) 1273 { 1274 struct alc_txdesc *txd; 1275 struct alc_rxdesc *rxd; 1276 struct alc_dmamap_arg ctx; 1277 int error, i; 1278 1279 /* Create parent DMA tag. */ 1280 error = bus_dma_tag_create( 1281 sc->alc_cdata.alc_parent_tag, /* parent */ 1282 1, 0, /* alignment, boundary */ 1283 BUS_SPACE_MAXADDR, /* lowaddr */ 1284 BUS_SPACE_MAXADDR, /* highaddr */ 1285 NULL, NULL, /* filter, filterarg */ 1286 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1287 0, /* nsegments */ 1288 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1289 0, /* flags */ 1290 &sc->alc_cdata.alc_parent_tag); 1291 if (error != 0) { 1292 device_printf(sc->alc_dev, 1293 "could not create parent DMA tag.\n"); 1294 goto fail; 1295 } 1296 1297 /* Create DMA tag for Tx descriptor ring. */ 1298 error = bus_dma_tag_create( 1299 sc->alc_cdata.alc_parent_tag, /* parent */ 1300 ALC_TX_RING_ALIGN, 0, /* alignment, boundary */ 1301 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1302 BUS_SPACE_MAXADDR, /* highaddr */ 1303 NULL, NULL, /* filter, filterarg */ 1304 ALC_TX_RING_SZ, /* maxsize */ 1305 1, /* nsegments */ 1306 ALC_TX_RING_SZ, /* maxsegsize */ 1307 0, /* flags */ 1308 &sc->alc_cdata.alc_tx_ring_tag); 1309 if (error != 0) { 1310 device_printf(sc->alc_dev, 1311 "could not create Tx ring DMA tag.\n"); 1312 goto fail; 1313 } 1314 1315 /* Create DMA tag for Rx free descriptor ring. */ 1316 error = bus_dma_tag_create( 1317 sc->alc_cdata.alc_parent_tag, /* parent */ 1318 ALC_RX_RING_ALIGN, 0, /* alignment, boundary */ 1319 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1320 BUS_SPACE_MAXADDR, /* highaddr */ 1321 NULL, NULL, /* filter, filterarg */ 1322 ALC_RX_RING_SZ, /* maxsize */ 1323 1, /* nsegments */ 1324 ALC_RX_RING_SZ, /* maxsegsize */ 1325 0, /* flags */ 1326 &sc->alc_cdata.alc_rx_ring_tag); 1327 if (error != 0) { 1328 device_printf(sc->alc_dev, 1329 "could not create Rx ring DMA tag.\n"); 1330 goto fail; 1331 } 1332 /* Create DMA tag for Rx return descriptor ring. */ 1333 error = bus_dma_tag_create( 1334 sc->alc_cdata.alc_parent_tag, /* parent */ 1335 ALC_RR_RING_ALIGN, 0, /* alignment, boundary */ 1336 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1337 BUS_SPACE_MAXADDR, /* highaddr */ 1338 NULL, NULL, /* filter, filterarg */ 1339 ALC_RR_RING_SZ, /* maxsize */ 1340 1, /* nsegments */ 1341 ALC_RR_RING_SZ, /* maxsegsize */ 1342 0, /* flags */ 1343 &sc->alc_cdata.alc_rr_ring_tag); 1344 if (error != 0) { 1345 device_printf(sc->alc_dev, 1346 "could not create Rx return ring DMA tag.\n"); 1347 goto fail; 1348 } 1349 1350 /* Create DMA tag for coalescing message block. */ 1351 error = bus_dma_tag_create( 1352 sc->alc_cdata.alc_parent_tag, /* parent */ 1353 ALC_CMB_ALIGN, 0, /* alignment, boundary */ 1354 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1355 BUS_SPACE_MAXADDR, /* highaddr */ 1356 NULL, NULL, /* filter, filterarg */ 1357 ALC_CMB_SZ, /* maxsize */ 1358 1, /* nsegments */ 1359 ALC_CMB_SZ, /* maxsegsize */ 1360 0, /* flags */ 1361 &sc->alc_cdata.alc_cmb_tag); 1362 if (error != 0) { 1363 device_printf(sc->alc_dev, 1364 "could not create CMB DMA tag.\n"); 1365 goto fail; 1366 } 1367 /* Create DMA tag for status message block. */ 1368 error = bus_dma_tag_create( 1369 sc->alc_cdata.alc_parent_tag, /* parent */ 1370 ALC_SMB_ALIGN, 0, /* alignment, boundary */ 1371 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1372 BUS_SPACE_MAXADDR, /* highaddr */ 1373 NULL, NULL, /* filter, filterarg */ 1374 ALC_SMB_SZ, /* maxsize */ 1375 1, /* nsegments */ 1376 ALC_SMB_SZ, /* maxsegsize */ 1377 0, /* flags */ 1378 &sc->alc_cdata.alc_smb_tag); 1379 if (error != 0) { 1380 device_printf(sc->alc_dev, 1381 "could not create SMB DMA tag.\n"); 1382 goto fail; 1383 } 1384 1385 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1386 error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag, 1387 (void **)&sc->alc_rdata.alc_tx_ring, 1388 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1389 &sc->alc_cdata.alc_tx_ring_map); 1390 if (error != 0) { 1391 device_printf(sc->alc_dev, 1392 "could not allocate DMA'able memory for Tx ring.\n"); 1393 goto fail; 1394 } 1395 ctx.alc_busaddr = 0; 1396 error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag, 1397 sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring, 1398 ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0); 1399 if (error != 0 || ctx.alc_busaddr == 0) { 1400 device_printf(sc->alc_dev, 1401 "could not load DMA'able memory for Tx ring.\n"); 1402 goto fail; 1403 } 1404 sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr; 1405 1406 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1407 error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag, 1408 (void **)&sc->alc_rdata.alc_rx_ring, 1409 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1410 &sc->alc_cdata.alc_rx_ring_map); 1411 if (error != 0) { 1412 device_printf(sc->alc_dev, 1413 "could not allocate DMA'able memory for Rx ring.\n"); 1414 goto fail; 1415 } 1416 ctx.alc_busaddr = 0; 1417 error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag, 1418 sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring, 1419 ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0); 1420 if (error != 0 || ctx.alc_busaddr == 0) { 1421 device_printf(sc->alc_dev, 1422 "could not load DMA'able memory for Rx ring.\n"); 1423 goto fail; 1424 } 1425 sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr; 1426 1427 /* Allocate DMA'able memory and load the DMA map for Rx return ring. */ 1428 error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag, 1429 (void **)&sc->alc_rdata.alc_rr_ring, 1430 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1431 &sc->alc_cdata.alc_rr_ring_map); 1432 if (error != 0) { 1433 device_printf(sc->alc_dev, 1434 "could not allocate DMA'able memory for Rx return ring.\n"); 1435 goto fail; 1436 } 1437 ctx.alc_busaddr = 0; 1438 error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag, 1439 sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring, 1440 ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0); 1441 if (error != 0 || ctx.alc_busaddr == 0) { 1442 device_printf(sc->alc_dev, 1443 "could not load DMA'able memory for Tx ring.\n"); 1444 goto fail; 1445 } 1446 sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr; 1447 1448 /* Allocate DMA'able memory and load the DMA map for CMB. */ 1449 error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag, 1450 (void **)&sc->alc_rdata.alc_cmb, 1451 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1452 &sc->alc_cdata.alc_cmb_map); 1453 if (error != 0) { 1454 device_printf(sc->alc_dev, 1455 "could not allocate DMA'able memory for CMB.\n"); 1456 goto fail; 1457 } 1458 ctx.alc_busaddr = 0; 1459 error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag, 1460 sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb, 1461 ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0); 1462 if (error != 0 || ctx.alc_busaddr == 0) { 1463 device_printf(sc->alc_dev, 1464 "could not load DMA'able memory for CMB.\n"); 1465 goto fail; 1466 } 1467 sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr; 1468 1469 /* Allocate DMA'able memory and load the DMA map for SMB. */ 1470 error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag, 1471 (void **)&sc->alc_rdata.alc_smb, 1472 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1473 &sc->alc_cdata.alc_smb_map); 1474 if (error != 0) { 1475 device_printf(sc->alc_dev, 1476 "could not allocate DMA'able memory for SMB.\n"); 1477 goto fail; 1478 } 1479 ctx.alc_busaddr = 0; 1480 error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag, 1481 sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb, 1482 ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0); 1483 if (error != 0 || ctx.alc_busaddr == 0) { 1484 device_printf(sc->alc_dev, 1485 "could not load DMA'able memory for CMB.\n"); 1486 goto fail; 1487 } 1488 sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr; 1489 1490 #ifdef foo 1491 /* 1492 * All of the status blocks and descriptor rings are 1493 * allocated at lower 4GB, their addresses high 32bits 1494 * part are same (all 0). 1495 */ 1496 1497 /* Make sure we've not crossed 4GB boundary. */ 1498 if ((error = alc_check_boundary(sc)) != 0) { 1499 device_printf(sc->alc_dev, "4GB boundary crossed, " 1500 "switching to 32bit DMA addressing mode.\n"); 1501 alc_dma_free(sc); 1502 /* 1503 * Limit max allowable DMA address space to 32bit 1504 * and try again. 1505 */ 1506 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1507 goto again; 1508 } 1509 #endif 1510 1511 /* 1512 * Create Tx buffer parent tag. 1513 * AR813x/AR815x allows 64bit DMA addressing of Tx/Rx buffers 1514 * so it needs separate parent DMA tag as parent DMA address 1515 * space could be restricted to be within 32bit address space 1516 * by 4GB boundary crossing. 1517 */ 1518 error = bus_dma_tag_create( 1519 sc->alc_cdata.alc_parent_tag, /* parent */ 1520 1, 0, /* alignment, boundary */ 1521 BUS_SPACE_MAXADDR, /* lowaddr */ 1522 BUS_SPACE_MAXADDR, /* highaddr */ 1523 NULL, NULL, /* filter, filterarg */ 1524 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1525 0, /* nsegments */ 1526 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1527 0, /* flags */ 1528 &sc->alc_cdata.alc_buffer_tag); 1529 if (error != 0) { 1530 device_printf(sc->alc_dev, 1531 "could not create parent buffer DMA tag.\n"); 1532 goto fail; 1533 } 1534 1535 /* Create DMA tag for Tx buffers. */ 1536 error = bus_dma_tag_create( 1537 sc->alc_cdata.alc_buffer_tag, /* parent */ 1538 1, 0, /* alignment, boundary */ 1539 BUS_SPACE_MAXADDR, /* lowaddr */ 1540 BUS_SPACE_MAXADDR, /* highaddr */ 1541 NULL, NULL, /* filter, filterarg */ 1542 ALC_TSO_MAXSIZE, /* maxsize */ 1543 ALC_MAXTXSEGS, /* nsegments */ 1544 ALC_TSO_MAXSEGSIZE, /* maxsegsize */ 1545 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, /* flags */ 1546 &sc->alc_cdata.alc_tx_tag); 1547 if (error != 0) { 1548 device_printf(sc->alc_dev, "could not create Tx DMA tag.\n"); 1549 goto fail; 1550 } 1551 1552 /* Create DMA tag for Rx buffers. */ 1553 error = bus_dma_tag_create( 1554 sc->alc_cdata.alc_buffer_tag, /* parent */ 1555 ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */ 1556 BUS_SPACE_MAXADDR, /* lowaddr */ 1557 BUS_SPACE_MAXADDR, /* highaddr */ 1558 NULL, NULL, /* filter, filterarg */ 1559 MCLBYTES, /* maxsize */ 1560 1, /* nsegments */ 1561 MCLBYTES, /* maxsegsize */ 1562 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED, /* flags */ 1563 &sc->alc_cdata.alc_rx_tag); 1564 if (error != 0) { 1565 device_printf(sc->alc_dev, "could not create Rx DMA tag.\n"); 1566 goto fail; 1567 } 1568 /* Create DMA maps for Tx buffers. */ 1569 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1570 txd = &sc->alc_cdata.alc_txdesc[i]; 1571 txd->tx_m = NULL; 1572 txd->tx_dmamap = NULL; 1573 error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 1574 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1575 &txd->tx_dmamap); 1576 if (error != 0) { 1577 device_printf(sc->alc_dev, 1578 "could not create Tx dmamap.\n"); 1579 goto fail; 1580 } 1581 } 1582 /* Create DMA maps for Rx buffers. */ 1583 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 1584 BUS_DMA_WAITOK, 1585 &sc->alc_cdata.alc_rx_sparemap); 1586 if (error) { 1587 device_printf(sc->alc_dev, 1588 "could not create spare Rx dmamap.\n"); 1589 goto fail; 1590 } 1591 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1592 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1593 rxd->rx_m = NULL; 1594 rxd->rx_dmamap = NULL; 1595 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 1596 BUS_DMA_WAITOK, 1597 &rxd->rx_dmamap); 1598 if (error != 0) { 1599 device_printf(sc->alc_dev, 1600 "could not create Rx dmamap.\n"); 1601 goto fail; 1602 } 1603 } 1604 1605 fail: 1606 return (error); 1607 } 1608 1609 static void 1610 alc_dma_free(struct alc_softc *sc) 1611 { 1612 struct alc_txdesc *txd; 1613 struct alc_rxdesc *rxd; 1614 int i; 1615 1616 /* Tx buffers. */ 1617 if (sc->alc_cdata.alc_tx_tag != NULL) { 1618 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1619 txd = &sc->alc_cdata.alc_txdesc[i]; 1620 if (txd->tx_dmamap != NULL) { 1621 bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag, 1622 txd->tx_dmamap); 1623 txd->tx_dmamap = NULL; 1624 } 1625 } 1626 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag); 1627 sc->alc_cdata.alc_tx_tag = NULL; 1628 } 1629 /* Rx buffers */ 1630 if (sc->alc_cdata.alc_rx_tag != NULL) { 1631 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1632 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1633 if (rxd->rx_dmamap != NULL) { 1634 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 1635 rxd->rx_dmamap); 1636 rxd->rx_dmamap = NULL; 1637 } 1638 } 1639 if (sc->alc_cdata.alc_rx_sparemap != NULL) { 1640 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 1641 sc->alc_cdata.alc_rx_sparemap); 1642 sc->alc_cdata.alc_rx_sparemap = NULL; 1643 } 1644 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag); 1645 sc->alc_cdata.alc_rx_tag = NULL; 1646 } 1647 /* Tx descriptor ring. */ 1648 if (sc->alc_cdata.alc_tx_ring_tag != NULL) { 1649 if (sc->alc_cdata.alc_tx_ring_map != NULL) 1650 bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag, 1651 sc->alc_cdata.alc_tx_ring_map); 1652 if (sc->alc_cdata.alc_tx_ring_map != NULL && 1653 sc->alc_rdata.alc_tx_ring != NULL) 1654 bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag, 1655 sc->alc_rdata.alc_tx_ring, 1656 sc->alc_cdata.alc_tx_ring_map); 1657 sc->alc_rdata.alc_tx_ring = NULL; 1658 sc->alc_cdata.alc_tx_ring_map = NULL; 1659 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag); 1660 sc->alc_cdata.alc_tx_ring_tag = NULL; 1661 } 1662 /* Rx ring. */ 1663 if (sc->alc_cdata.alc_rx_ring_tag != NULL) { 1664 if (sc->alc_cdata.alc_rx_ring_map != NULL) 1665 bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag, 1666 sc->alc_cdata.alc_rx_ring_map); 1667 if (sc->alc_cdata.alc_rx_ring_map != NULL && 1668 sc->alc_rdata.alc_rx_ring != NULL) 1669 bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag, 1670 sc->alc_rdata.alc_rx_ring, 1671 sc->alc_cdata.alc_rx_ring_map); 1672 sc->alc_rdata.alc_rx_ring = NULL; 1673 sc->alc_cdata.alc_rx_ring_map = NULL; 1674 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag); 1675 sc->alc_cdata.alc_rx_ring_tag = NULL; 1676 } 1677 /* Rx return ring. */ 1678 if (sc->alc_cdata.alc_rr_ring_tag != NULL) { 1679 if (sc->alc_cdata.alc_rr_ring_map != NULL) 1680 bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag, 1681 sc->alc_cdata.alc_rr_ring_map); 1682 if (sc->alc_cdata.alc_rr_ring_map != NULL && 1683 sc->alc_rdata.alc_rr_ring != NULL) 1684 bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag, 1685 sc->alc_rdata.alc_rr_ring, 1686 sc->alc_cdata.alc_rr_ring_map); 1687 sc->alc_rdata.alc_rr_ring = NULL; 1688 sc->alc_cdata.alc_rr_ring_map = NULL; 1689 bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag); 1690 sc->alc_cdata.alc_rr_ring_tag = NULL; 1691 } 1692 /* CMB block */ 1693 if (sc->alc_cdata.alc_cmb_tag != NULL) { 1694 if (sc->alc_cdata.alc_cmb_map != NULL) 1695 bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag, 1696 sc->alc_cdata.alc_cmb_map); 1697 if (sc->alc_cdata.alc_cmb_map != NULL && 1698 sc->alc_rdata.alc_cmb != NULL) 1699 bus_dmamem_free(sc->alc_cdata.alc_cmb_tag, 1700 sc->alc_rdata.alc_cmb, 1701 sc->alc_cdata.alc_cmb_map); 1702 sc->alc_rdata.alc_cmb = NULL; 1703 sc->alc_cdata.alc_cmb_map = NULL; 1704 bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag); 1705 sc->alc_cdata.alc_cmb_tag = NULL; 1706 } 1707 /* SMB block */ 1708 if (sc->alc_cdata.alc_smb_tag != NULL) { 1709 if (sc->alc_cdata.alc_smb_map != NULL) 1710 bus_dmamap_unload(sc->alc_cdata.alc_smb_tag, 1711 sc->alc_cdata.alc_smb_map); 1712 if (sc->alc_cdata.alc_smb_map != NULL && 1713 sc->alc_rdata.alc_smb != NULL) 1714 bus_dmamem_free(sc->alc_cdata.alc_smb_tag, 1715 sc->alc_rdata.alc_smb, 1716 sc->alc_cdata.alc_smb_map); 1717 sc->alc_rdata.alc_smb = NULL; 1718 sc->alc_cdata.alc_smb_map = NULL; 1719 bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag); 1720 sc->alc_cdata.alc_smb_tag = NULL; 1721 } 1722 if (sc->alc_cdata.alc_buffer_tag != NULL) { 1723 bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag); 1724 sc->alc_cdata.alc_buffer_tag = NULL; 1725 } 1726 if (sc->alc_cdata.alc_parent_tag != NULL) { 1727 bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag); 1728 sc->alc_cdata.alc_parent_tag = NULL; 1729 } 1730 } 1731 1732 static int 1733 alc_shutdown(device_t dev) 1734 { 1735 1736 return (alc_suspend(dev)); 1737 } 1738 1739 #if 0 1740 /* XXX: LINK SPEED */ 1741 /* 1742 * Note, this driver resets the link speed to 10/100Mbps by 1743 * restarting auto-negotiation in suspend/shutdown phase but we 1744 * don't know whether that auto-negotiation would succeed or not 1745 * as driver has no control after powering off/suspend operation. 1746 * If the renegotiation fail WOL may not work. Running at 1Gbps 1747 * will draw more power than 375mA at 3.3V which is specified in 1748 * PCI specification and that would result in complete 1749 * shutdowning power to ethernet controller. 1750 * 1751 * TODO 1752 * Save current negotiated media speed/duplex/flow-control to 1753 * softc and restore the same link again after resuming. PHY 1754 * handling such as power down/resetting to 100Mbps may be better 1755 * handled in suspend method in phy driver. 1756 */ 1757 static void 1758 alc_setlinkspeed(struct alc_softc *sc) 1759 { 1760 struct mii_data *mii; 1761 int aneg, i; 1762 1763 mii = device_get_softc(sc->alc_miibus); 1764 mii_pollstat(mii); 1765 aneg = 0; 1766 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1767 (IFM_ACTIVE | IFM_AVALID)) { 1768 switch IFM_SUBTYPE(mii->mii_media_active) { 1769 case IFM_10_T: 1770 case IFM_100_TX: 1771 return; 1772 case IFM_1000_T: 1773 aneg++; 1774 break; 1775 default: 1776 break; 1777 } 1778 } 1779 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0); 1780 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 1781 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1782 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 1783 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 1784 DELAY(1000); 1785 if (aneg != 0) { 1786 /* 1787 * Poll link state until alc(4) get a 10/100Mbps link. 1788 */ 1789 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1790 mii_pollstat(mii); 1791 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 1792 == (IFM_ACTIVE | IFM_AVALID)) { 1793 switch (IFM_SUBTYPE( 1794 mii->mii_media_active)) { 1795 case IFM_10_T: 1796 case IFM_100_TX: 1797 alc_mac_config(sc); 1798 return; 1799 default: 1800 break; 1801 } 1802 } 1803 ALC_UNLOCK(sc); 1804 pause("alclnk", hz); 1805 ALC_LOCK(sc); 1806 } 1807 if (i == MII_ANEGTICKS_GIGE) 1808 device_printf(sc->alc_dev, 1809 "establishing a link failed, WOL may not work!"); 1810 } 1811 /* 1812 * No link, force MAC to have 100Mbps, full-duplex link. 1813 * This is the last resort and may/may not work. 1814 */ 1815 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1816 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1817 alc_mac_config(sc); 1818 } 1819 #endif 1820 1821 #if 0 1822 /* XXX: WOL */ 1823 static void 1824 alc_setwol(struct alc_softc *sc) 1825 { 1826 struct ifnet *ifp; 1827 uint32_t reg, pmcs; 1828 uint16_t pmstat; 1829 1830 ALC_LOCK_ASSERT(sc); 1831 1832 alc_disable_l0s_l1(sc); 1833 ifp = sc->alc_ifp; 1834 if ((sc->alc_flags & ALC_FLAG_PM) == 0) { 1835 /* Disable WOL. */ 1836 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 1837 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 1838 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1839 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 1840 /* Force PHY power down. */ 1841 alc_phy_down(sc); 1842 CSR_WRITE_4(sc, ALC_MASTER_CFG, 1843 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 1844 return; 1845 } 1846 1847 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1848 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 1849 alc_setlinkspeed(sc); 1850 CSR_WRITE_4(sc, ALC_MASTER_CFG, 1851 CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS); 1852 } 1853 1854 pmcs = 0; 1855 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 1856 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 1857 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); 1858 reg = CSR_READ_4(sc, ALC_MAC_CFG); 1859 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 1860 MAC_CFG_BCAST); 1861 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 1862 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 1863 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1864 reg |= MAC_CFG_RX_ENB; 1865 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 1866 1867 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 1868 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1869 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 1870 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1871 /* WOL disabled, PHY power down. */ 1872 alc_phy_down(sc); 1873 CSR_WRITE_4(sc, ALC_MASTER_CFG, 1874 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 1875 1876 } 1877 /* Request PME. */ 1878 pmstat = pci_read_config(sc->alc_dev, 1879 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 1880 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1881 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1882 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1883 pci_write_config(sc->alc_dev, 1884 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 1885 } 1886 #endif 1887 1888 static int 1889 alc_suspend(device_t dev) 1890 { 1891 struct alc_softc *sc = device_get_softc(dev); 1892 struct ifnet *ifp = &sc->arpcom.ac_if; 1893 1894 lwkt_serialize_enter(ifp->if_serializer); 1895 alc_stop(sc); 1896 #if 0 1897 /* XXX: WOL */ 1898 alc_setwol(sc); 1899 #endif 1900 lwkt_serialize_exit(ifp->if_serializer); 1901 1902 return (0); 1903 } 1904 1905 static int 1906 alc_resume(device_t dev) 1907 { 1908 struct alc_softc *sc = device_get_softc(dev); 1909 struct ifnet *ifp = &sc->arpcom.ac_if; 1910 uint16_t pmstat; 1911 1912 lwkt_serialize_enter(ifp->if_serializer); 1913 1914 if ((sc->alc_flags & ALC_FLAG_PM) != 0) { 1915 /* Disable PME and clear PME status. */ 1916 pmstat = pci_read_config(sc->alc_dev, 1917 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 1918 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 1919 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1920 pci_write_config(sc->alc_dev, 1921 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 1922 } 1923 } 1924 1925 /* Reset PHY. */ 1926 alc_phy_reset(sc); 1927 if (ifp->if_flags & IFF_UP) 1928 alc_init(sc); 1929 1930 lwkt_serialize_exit(ifp->if_serializer); 1931 1932 return (0); 1933 } 1934 1935 static int 1936 alc_encap(struct alc_softc *sc, struct mbuf **m_head) 1937 { 1938 struct alc_txdesc *txd, *txd_last; 1939 struct tx_desc *desc; 1940 struct mbuf *m; 1941 #if 0 /* XXX: TSO */ 1942 struct ip *ip; 1943 #endif 1944 struct tcphdr *tcp; 1945 bus_dma_segment_t txsegs[ALC_MAXTXSEGS]; 1946 bus_dmamap_t map; 1947 uint32_t cflags, hdrlen, poff, vtag; 1948 #if 0 /* XXX: TSO */ 1949 uint32_t ip_off; 1950 #endif 1951 int error, idx, nsegs, prod; 1952 1953 M_ASSERTPKTHDR((*m_head)); 1954 1955 m = *m_head; 1956 tcp = NULL; 1957 poff = 0; 1958 #if 0 /* XXX: TSO */ 1959 ip_off = 0; 1960 ip = NULL; 1961 1962 if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) { 1963 /* 1964 * AR813x/AR815x requires offset of TCP/UDP header in its 1965 * Tx descriptor to perform Tx checksum offloading. TSO 1966 * also requires TCP header offset and modification of 1967 * IP/TCP header. This kind of operation takes many CPU 1968 * cycles on FreeBSD so fast host CPU is required to get 1969 * smooth TSO performance. 1970 */ 1971 struct ether_header *eh; 1972 1973 if (M_WRITABLE(m) == 0) { 1974 /* Get a writable copy. */ 1975 m = m_dup(*m_head, MB_DONTWAIT); 1976 /* Release original mbufs. */ 1977 m_freem(*m_head); 1978 if (m == NULL) { 1979 *m_head = NULL; 1980 return (ENOBUFS); 1981 } 1982 *m_head = m; 1983 } 1984 1985 ip_off = sizeof(struct ether_header); 1986 m = m_pullup(m, ip_off + sizeof(struct ip)); 1987 if (m == NULL) { 1988 *m_head = NULL; 1989 return (ENOBUFS); 1990 } 1991 eh = mtod(m, struct ether_header *); 1992 /* 1993 * Check if hardware VLAN insertion is off. 1994 * Additional check for LLC/SNAP frame? 1995 */ 1996 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1997 ip_off = sizeof(struct ether_vlan_header); 1998 m = m_pullup(m, ip_off); 1999 if (m == NULL) { 2000 *m_head = NULL; 2001 return (ENOBUFS); 2002 } 2003 } 2004 m = m_pullup(m, ip_off + sizeof(struct ip)); 2005 if (m == NULL) { 2006 *m_head = NULL; 2007 return (ENOBUFS); 2008 } 2009 ip = (struct ip *)(mtod(m, char *) + ip_off); 2010 poff = ip_off + (ip->ip_hl << 2); 2011 2012 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2013 m = m_pullup(m, poff + sizeof(struct tcphdr)); 2014 if (m == NULL) { 2015 *m_head = NULL; 2016 return (ENOBUFS); 2017 } 2018 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 2019 m = m_pullup(m, poff + (tcp->th_off << 2)); 2020 if (m == NULL) { 2021 *m_head = NULL; 2022 return (ENOBUFS); 2023 } 2024 /* 2025 * Due to strict adherence of Microsoft NDIS 2026 * Large Send specification, hardware expects 2027 * a pseudo TCP checksum inserted by upper 2028 * stack. Unfortunately the pseudo TCP 2029 * checksum that NDIS refers to does not include 2030 * TCP payload length so driver should recompute 2031 * the pseudo checksum here. Hopefully this 2032 * wouldn't be much burden on modern CPUs. 2033 * 2034 * Reset IP checksum and recompute TCP pseudo 2035 * checksum as NDIS specification said. 2036 */ 2037 ip->ip_sum = 0; 2038 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 2039 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2040 } 2041 *m_head = m; 2042 } 2043 #endif /* TSO */ 2044 2045 prod = sc->alc_cdata.alc_tx_prod; 2046 txd = &sc->alc_cdata.alc_txdesc[prod]; 2047 txd_last = txd; 2048 map = txd->tx_dmamap; 2049 2050 error = bus_dmamap_load_mbuf_defrag( 2051 sc->alc_cdata.alc_tx_tag, map, m_head, 2052 txsegs, ALC_MAXTXSEGS, &nsegs, BUS_DMA_NOWAIT); 2053 if (error) { 2054 m_freem(*m_head); 2055 *m_head = NULL; 2056 return (error); 2057 } 2058 if (nsegs == 0) { 2059 m_freem(*m_head); 2060 *m_head = NULL; 2061 return (EIO); 2062 } 2063 2064 /* Check descriptor overrun. */ 2065 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) { 2066 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map); 2067 return (ENOBUFS); 2068 } 2069 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE); 2070 2071 m = *m_head; 2072 cflags = TD_ETHERNET; 2073 vtag = 0; 2074 desc = NULL; 2075 idx = 0; 2076 /* Configure VLAN hardware tag insertion. */ 2077 if ((m->m_flags & M_VLANTAG) != 0) { 2078 vtag = htons(m->m_pkthdr.ether_vlantag); 2079 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; 2080 cflags |= TD_INS_VLAN_TAG; 2081 } 2082 /* Configure Tx checksum offload. */ 2083 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { 2084 #ifdef ALC_USE_CUSTOM_CSUM 2085 cflags |= TD_CUSTOM_CSUM; 2086 /* Set checksum start offset. */ 2087 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & 2088 TD_PLOAD_OFFSET_MASK; 2089 /* Set checksum insertion position of TCP/UDP. */ 2090 cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) << 2091 TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK; 2092 #else 2093 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2094 cflags |= TD_IPCSUM; 2095 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2096 cflags |= TD_TCPCSUM; 2097 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2098 cflags |= TD_UDPCSUM; 2099 /* Set TCP/UDP header offset. */ 2100 cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) & 2101 TD_L4HDR_OFFSET_MASK; 2102 #endif 2103 } else if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2104 /* Request TSO and set MSS. */ 2105 cflags |= TD_TSO | TD_TSO_DESCV1; 2106 #if 0 2107 /* XXX: TSO */ 2108 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) & 2109 TD_MSS_MASK; 2110 /* Set TCP header offset. */ 2111 #endif 2112 cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) & 2113 TD_TCPHDR_OFFSET_MASK; 2114 /* 2115 * AR813x/AR815x requires the first buffer should 2116 * only hold IP/TCP header data. Payload should 2117 * be handled in other descriptors. 2118 */ 2119 hdrlen = poff + (tcp->th_off << 2); 2120 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2121 desc->len = htole32(TX_BYTES(hdrlen | vtag)); 2122 desc->flags = htole32(cflags); 2123 desc->addr = htole64(txsegs[0].ds_addr); 2124 sc->alc_cdata.alc_tx_cnt++; 2125 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2126 if (m->m_len - hdrlen > 0) { 2127 /* Handle remaining payload of the first fragment. */ 2128 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2129 desc->len = htole32(TX_BYTES((m->m_len - hdrlen) | 2130 vtag)); 2131 desc->flags = htole32(cflags); 2132 desc->addr = htole64(txsegs[0].ds_addr + hdrlen); 2133 sc->alc_cdata.alc_tx_cnt++; 2134 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2135 } 2136 /* Handle remaining fragments. */ 2137 idx = 1; 2138 } 2139 for (; idx < nsegs; idx++) { 2140 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2141 desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag); 2142 desc->flags = htole32(cflags); 2143 desc->addr = htole64(txsegs[idx].ds_addr); 2144 sc->alc_cdata.alc_tx_cnt++; 2145 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2146 } 2147 /* Update producer index. */ 2148 sc->alc_cdata.alc_tx_prod = prod; 2149 2150 /* Finally set EOP on the last descriptor. */ 2151 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; 2152 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2153 desc->flags |= htole32(TD_EOP); 2154 2155 /* Swap dmamap of the first and the last. */ 2156 txd = &sc->alc_cdata.alc_txdesc[prod]; 2157 map = txd_last->tx_dmamap; 2158 txd_last->tx_dmamap = txd->tx_dmamap; 2159 txd->tx_dmamap = map; 2160 txd->tx_m = m; 2161 2162 return (0); 2163 } 2164 2165 static void 2166 alc_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 2167 { 2168 struct alc_softc *sc = ifp->if_softc; 2169 struct mbuf *m_head; 2170 int enq; 2171 2172 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 2173 ASSERT_SERIALIZED(ifp->if_serializer); 2174 2175 /* Reclaim transmitted frames. */ 2176 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) 2177 alc_txeof(sc); 2178 2179 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 2180 return; 2181 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2182 ifq_purge(&ifp->if_snd); 2183 return; 2184 } 2185 2186 for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) { 2187 m_head = ifq_dequeue(&ifp->if_snd); 2188 if (m_head == NULL) 2189 break; 2190 /* 2191 * Pack the data into the transmit ring. If we 2192 * don't have room, set the OACTIVE flag and wait 2193 * for the NIC to drain the ring. 2194 */ 2195 if (alc_encap(sc, &m_head)) { 2196 if (m_head == NULL) 2197 break; 2198 ifq_prepend(&ifp->if_snd, m_head); 2199 ifq_set_oactive(&ifp->if_snd); 2200 break; 2201 } 2202 2203 enq++; 2204 /* 2205 * If there's a BPF listener, bounce a copy of this frame 2206 * to him. 2207 */ 2208 ETHER_BPF_MTAP(ifp, m_head); 2209 } 2210 2211 if (enq > 0) { 2212 /* Sync descriptors. */ 2213 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 2214 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 2215 /* Kick. Assume we're using normal Tx priority queue. */ 2216 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, 2217 (sc->alc_cdata.alc_tx_prod << 2218 MBOX_TD_PROD_LO_IDX_SHIFT) & 2219 MBOX_TD_PROD_LO_IDX_MASK); 2220 /* Set a timeout in case the chip goes out to lunch. */ 2221 sc->alc_watchdog_timer = ALC_TX_TIMEOUT; 2222 } 2223 } 2224 2225 static void 2226 alc_watchdog(struct alc_softc *sc) 2227 { 2228 struct ifnet *ifp = &sc->arpcom.ac_if; 2229 2230 ASSERT_SERIALIZED(ifp->if_serializer); 2231 2232 if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer) 2233 return; 2234 2235 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2236 if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n"); 2237 IFNET_STAT_INC(ifp, oerrors, 1); 2238 alc_init(sc); 2239 return; 2240 } 2241 if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n"); 2242 IFNET_STAT_INC(ifp, oerrors, 1); 2243 alc_init(sc); 2244 if (!ifq_is_empty(&ifp->if_snd)) 2245 if_devstart(ifp); 2246 } 2247 2248 static int 2249 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 2250 { 2251 struct alc_softc *sc; 2252 struct ifreq *ifr; 2253 struct mii_data *mii; 2254 int error, mask; 2255 2256 ASSERT_SERIALIZED(ifp->if_serializer); 2257 2258 sc = ifp->if_softc; 2259 ifr = (struct ifreq *)data; 2260 error = 0; 2261 switch (cmd) { 2262 case SIOCSIFMTU: 2263 if (ifr->ifr_mtu < ETHERMIN || 2264 ifr->ifr_mtu > (sc->alc_ident->max_framelen - 2265 sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) || 2266 ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 && 2267 ifr->ifr_mtu > ETHERMTU)) { 2268 error = EINVAL; 2269 } else if (ifp->if_mtu != ifr->ifr_mtu) { 2270 ifp->if_mtu = ifr->ifr_mtu; 2271 #if 0 2272 /* AR813x/AR815x has 13 bits MSS field. */ 2273 if (ifp->if_mtu > ALC_TSO_MTU && 2274 (ifp->if_capenable & IFCAP_TSO4) != 0) { 2275 ifp->if_capenable &= ~IFCAP_TSO4; 2276 ifp->if_hwassist &= ~CSUM_TSO; 2277 } 2278 #endif 2279 } 2280 break; 2281 case SIOCSIFFLAGS: 2282 if ((ifp->if_flags & IFF_UP) != 0) { 2283 if ((ifp->if_flags & IFF_RUNNING) != 0 && 2284 ((ifp->if_flags ^ sc->alc_if_flags) & 2285 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2286 alc_rxfilter(sc); 2287 else if ((ifp->if_flags & IFF_RUNNING) == 0) 2288 alc_init(sc); 2289 } else if ((ifp->if_flags & IFF_RUNNING) != 0) 2290 alc_stop(sc); 2291 sc->alc_if_flags = ifp->if_flags; 2292 break; 2293 case SIOCADDMULTI: 2294 case SIOCDELMULTI: 2295 if ((ifp->if_flags & IFF_RUNNING) != 0) 2296 alc_rxfilter(sc); 2297 break; 2298 case SIOCSIFMEDIA: 2299 case SIOCGIFMEDIA: 2300 mii = device_get_softc(sc->alc_miibus); 2301 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2302 break; 2303 case SIOCSIFCAP: 2304 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2305 if ((mask & IFCAP_TXCSUM) != 0 && 2306 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2307 ifp->if_capenable ^= IFCAP_TXCSUM; 2308 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2309 ifp->if_hwassist |= ALC_CSUM_FEATURES; 2310 else 2311 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 2312 } 2313 #if 0 2314 /* XXX: WOL */ 2315 if ((mask & IFCAP_WOL_MCAST) != 0 && 2316 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 2317 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2318 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2319 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2320 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2321 #endif 2322 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2323 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2324 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2325 alc_rxvlan(sc); 2326 } 2327 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2328 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2329 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2330 2331 /* 2332 * VLAN hardware tagging is required to do checksum 2333 * offload or TSO on VLAN interface. Checksum offload 2334 * on VLAN interface also requires hardware checksum 2335 * offload of parent interface. 2336 */ 2337 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 2338 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2339 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 2340 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2341 // XXX VLAN_CAPABILITIES(ifp); 2342 break; 2343 default: 2344 error = ether_ioctl(ifp, cmd, data); 2345 break; 2346 } 2347 2348 return (error); 2349 } 2350 2351 static void 2352 alc_mac_config(struct alc_softc *sc) 2353 { 2354 struct mii_data *mii; 2355 uint32_t reg; 2356 2357 mii = device_get_softc(sc->alc_miibus); 2358 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2359 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 2360 MAC_CFG_SPEED_MASK); 2361 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 2362 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 2363 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 2364 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 2365 } 2366 /* Reprogram MAC with resolved speed/duplex. */ 2367 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2368 case IFM_10_T: 2369 case IFM_100_TX: 2370 reg |= MAC_CFG_SPEED_10_100; 2371 break; 2372 case IFM_1000_T: 2373 reg |= MAC_CFG_SPEED_1000; 2374 break; 2375 } 2376 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2377 reg |= MAC_CFG_FULL_DUPLEX; 2378 #ifdef notyet 2379 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2380 reg |= MAC_CFG_TX_FC; 2381 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2382 reg |= MAC_CFG_RX_FC; 2383 #endif 2384 } 2385 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2386 } 2387 2388 static void 2389 alc_stats_clear(struct alc_softc *sc) 2390 { 2391 struct smb sb, *smb; 2392 uint32_t *reg; 2393 int i; 2394 2395 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2396 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2397 sc->alc_cdata.alc_smb_map, 2398 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2399 smb = sc->alc_rdata.alc_smb; 2400 /* Update done, clear. */ 2401 smb->updated = 0; 2402 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2403 sc->alc_cdata.alc_smb_map, 2404 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2405 } else { 2406 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2407 reg++) { 2408 CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2409 i += sizeof(uint32_t); 2410 } 2411 /* Read Tx statistics. */ 2412 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2413 reg++) { 2414 CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2415 i += sizeof(uint32_t); 2416 } 2417 } 2418 } 2419 2420 static void 2421 alc_stats_update(struct alc_softc *sc) 2422 { 2423 struct alc_hw_stats *stat; 2424 struct smb sb, *smb; 2425 struct ifnet *ifp; 2426 uint32_t *reg; 2427 int i; 2428 2429 ifp = sc->alc_ifp; 2430 stat = &sc->alc_stats; 2431 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2432 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2433 sc->alc_cdata.alc_smb_map, 2434 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2435 smb = sc->alc_rdata.alc_smb; 2436 if (smb->updated == 0) 2437 return; 2438 } else { 2439 smb = &sb; 2440 /* Read Rx statistics. */ 2441 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2442 reg++) { 2443 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2444 i += sizeof(uint32_t); 2445 } 2446 /* Read Tx statistics. */ 2447 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2448 reg++) { 2449 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2450 i += sizeof(uint32_t); 2451 } 2452 } 2453 2454 /* Rx stats. */ 2455 stat->rx_frames += smb->rx_frames; 2456 stat->rx_bcast_frames += smb->rx_bcast_frames; 2457 stat->rx_mcast_frames += smb->rx_mcast_frames; 2458 stat->rx_pause_frames += smb->rx_pause_frames; 2459 stat->rx_control_frames += smb->rx_control_frames; 2460 stat->rx_crcerrs += smb->rx_crcerrs; 2461 stat->rx_lenerrs += smb->rx_lenerrs; 2462 stat->rx_bytes += smb->rx_bytes; 2463 stat->rx_runts += smb->rx_runts; 2464 stat->rx_fragments += smb->rx_fragments; 2465 stat->rx_pkts_64 += smb->rx_pkts_64; 2466 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 2467 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2468 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2469 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2470 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2471 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2472 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2473 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2474 stat->rx_rrs_errs += smb->rx_rrs_errs; 2475 stat->rx_alignerrs += smb->rx_alignerrs; 2476 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2477 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2478 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2479 2480 /* Tx stats. */ 2481 stat->tx_frames += smb->tx_frames; 2482 stat->tx_bcast_frames += smb->tx_bcast_frames; 2483 stat->tx_mcast_frames += smb->tx_mcast_frames; 2484 stat->tx_pause_frames += smb->tx_pause_frames; 2485 stat->tx_excess_defer += smb->tx_excess_defer; 2486 stat->tx_control_frames += smb->tx_control_frames; 2487 stat->tx_deferred += smb->tx_deferred; 2488 stat->tx_bytes += smb->tx_bytes; 2489 stat->tx_pkts_64 += smb->tx_pkts_64; 2490 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2491 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2492 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2493 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2494 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2495 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2496 stat->tx_single_colls += smb->tx_single_colls; 2497 stat->tx_multi_colls += smb->tx_multi_colls; 2498 stat->tx_late_colls += smb->tx_late_colls; 2499 stat->tx_excess_colls += smb->tx_excess_colls; 2500 stat->tx_abort += smb->tx_abort; 2501 stat->tx_underrun += smb->tx_underrun; 2502 stat->tx_desc_underrun += smb->tx_desc_underrun; 2503 stat->tx_lenerrs += smb->tx_lenerrs; 2504 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2505 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2506 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2507 2508 /* Update counters in ifnet. */ 2509 IFNET_STAT_INC(ifp, opackets, smb->tx_frames); 2510 2511 IFNET_STAT_INC(ifp, collisions, smb->tx_single_colls + 2512 smb->tx_multi_colls * 2 + smb->tx_late_colls + 2513 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT); 2514 2515 /* 2516 * XXX 2517 * tx_pkts_truncated counter looks suspicious. It constantly 2518 * increments with no sign of Tx errors. This may indicate 2519 * the counter name is not correct one so I've removed the 2520 * counter in output errors. 2521 */ 2522 IFNET_STAT_INC(ifp, oerrors, smb->tx_abort + smb->tx_late_colls + 2523 smb->tx_underrun); 2524 2525 IFNET_STAT_INC(ifp, ipackets, smb->rx_frames); 2526 2527 IFNET_STAT_INC(ifp, ierrors, smb->rx_crcerrs + smb->rx_lenerrs + 2528 smb->rx_runts + smb->rx_pkts_truncated + 2529 smb->rx_fifo_oflows + smb->rx_rrs_errs + 2530 smb->rx_alignerrs); 2531 2532 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2533 /* Update done, clear. */ 2534 smb->updated = 0; 2535 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2536 sc->alc_cdata.alc_smb_map, 2537 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2538 } 2539 } 2540 2541 static void 2542 alc_intr(void *arg) 2543 { 2544 struct alc_softc *sc = arg; 2545 struct ifnet *ifp = &sc->arpcom.ac_if; 2546 uint32_t status; 2547 2548 ASSERT_SERIALIZED(ifp->if_serializer); 2549 2550 status = CSR_READ_4(sc, ALC_INTR_STATUS); 2551 if ((status & ALC_INTRS) == 0) 2552 return; 2553 2554 /* Acknowledge interrupts and disable interrupts. */ 2555 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); 2556 2557 if (ifp->if_flags & IFF_RUNNING) { 2558 if (status & INTR_RX_PKT) { 2559 if (alc_rxintr(sc)) { 2560 alc_init(sc); 2561 return; 2562 } 2563 } 2564 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | 2565 INTR_TXQ_TO_RST)) { 2566 if (status & INTR_DMA_RD_TO_RST) { 2567 if_printf(ifp, 2568 "DMA read error! -- resetting\n"); 2569 } 2570 if (status & INTR_DMA_WR_TO_RST) { 2571 if_printf(ifp, 2572 "DMA write error! -- resetting\n"); 2573 } 2574 if (status & INTR_TXQ_TO_RST) 2575 if_printf(ifp, "TxQ reset! -- resetting\n"); 2576 alc_init(sc); 2577 return; 2578 } 2579 if (!ifq_is_empty(&ifp->if_snd)) 2580 if_devstart(ifp); 2581 2582 /* Re-enable interrupts */ 2583 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); 2584 } 2585 } 2586 2587 static void 2588 alc_txeof(struct alc_softc *sc) 2589 { 2590 struct ifnet *ifp; 2591 struct alc_txdesc *txd; 2592 uint32_t cons, prod; 2593 int prog; 2594 2595 ifp = sc->alc_ifp; 2596 2597 if (sc->alc_cdata.alc_tx_cnt == 0) 2598 return; 2599 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 2600 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE); 2601 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 2602 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 2603 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD); 2604 prod = sc->alc_rdata.alc_cmb->cons; 2605 } else 2606 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); 2607 /* Assume we're using normal Tx priority queue. */ 2608 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> 2609 MBOX_TD_CONS_LO_IDX_SHIFT; 2610 cons = sc->alc_cdata.alc_tx_cons; 2611 /* 2612 * Go through our Tx list and free mbufs for those 2613 * frames which have been transmitted. 2614 */ 2615 for (prog = 0; cons != prod; prog++, 2616 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { 2617 if (sc->alc_cdata.alc_tx_cnt <= 0) 2618 break; 2619 prog++; 2620 ifq_clr_oactive(&ifp->if_snd); 2621 sc->alc_cdata.alc_tx_cnt--; 2622 txd = &sc->alc_cdata.alc_txdesc[cons]; 2623 if (txd->tx_m != NULL) { 2624 /* Reclaim transmitted mbufs. */ 2625 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 2626 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2627 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 2628 txd->tx_dmamap); 2629 m_freem(txd->tx_m); 2630 txd->tx_m = NULL; 2631 } 2632 } 2633 2634 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 2635 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 2636 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD); 2637 sc->alc_cdata.alc_tx_cons = cons; 2638 /* 2639 * Unarm watchdog timer only when there is no pending 2640 * frames in Tx queue. 2641 */ 2642 if (sc->alc_cdata.alc_tx_cnt == 0) 2643 sc->alc_watchdog_timer = 0; 2644 } 2645 2646 static int 2647 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, boolean_t wait) 2648 { 2649 struct mbuf *m; 2650 bus_dma_segment_t segs[1]; 2651 bus_dmamap_t map; 2652 int nsegs; 2653 int error; 2654 2655 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2656 if (m == NULL) 2657 return (ENOBUFS); 2658 m->m_len = m->m_pkthdr.len = MCLBYTES; 2659 #ifdef foo 2660 /* Hardware require 4 bytes align */ 2661 m_adj(m, ETHER_ALIGN); 2662 #endif 2663 2664 error = bus_dmamap_load_mbuf_segment( 2665 sc->alc_cdata.alc_rx_tag, 2666 sc->alc_cdata.alc_rx_sparemap, 2667 m, segs, 1, &nsegs, BUS_DMA_NOWAIT); 2668 if (error) { 2669 m_freem(m); 2670 return (ENOBUFS); 2671 } 2672 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2673 2674 if (rxd->rx_m != NULL) { 2675 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 2676 BUS_DMASYNC_POSTREAD); 2677 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap); 2678 } 2679 map = rxd->rx_dmamap; 2680 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; 2681 sc->alc_cdata.alc_rx_sparemap = map; 2682 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 2683 BUS_DMASYNC_PREREAD); 2684 rxd->rx_m = m; 2685 rxd->rx_desc->addr = htole64(segs[0].ds_addr); 2686 return (0); 2687 } 2688 2689 static int 2690 alc_rxintr(struct alc_softc *sc) 2691 { 2692 struct ifnet *ifp; 2693 struct rx_rdesc *rrd; 2694 uint32_t nsegs, status; 2695 int rr_cons, prog; 2696 2697 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 2698 sc->alc_cdata.alc_rr_ring_map, 2699 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2700 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 2701 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE); 2702 rr_cons = sc->alc_cdata.alc_rr_cons; 2703 ifp = sc->alc_ifp; 2704 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) { 2705 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; 2706 status = le32toh(rrd->status); 2707 if ((status & RRD_VALID) == 0) 2708 break; 2709 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo)); 2710 if (nsegs == 0) { 2711 /* This should not happen! */ 2712 device_printf(sc->alc_dev, 2713 "unexpected segment count -- resetting\n"); 2714 return (EIO); 2715 } 2716 alc_rxeof(sc, rrd); 2717 /* Clear Rx return status. */ 2718 rrd->status = 0; 2719 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); 2720 sc->alc_cdata.alc_rx_cons += nsegs; 2721 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; 2722 prog += nsegs; 2723 } 2724 2725 if (prog > 0) { 2726 /* Update the consumer index. */ 2727 sc->alc_cdata.alc_rr_cons = rr_cons; 2728 /* Sync Rx return descriptors. */ 2729 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 2730 sc->alc_cdata.alc_rr_ring_map, 2731 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2732 /* 2733 * Sync updated Rx descriptors such that controller see 2734 * modified buffer addresses. 2735 */ 2736 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 2737 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 2738 /* 2739 * Let controller know availability of new Rx buffers. 2740 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors 2741 * it may be possible to update ALC_MBOX_RD0_PROD_IDX 2742 * only when Rx buffer pre-fetching is required. In 2743 * addition we already set ALC_RX_RD_FREE_THRESH to 2744 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However 2745 * it still seems that pre-fetching needs more 2746 * experimentation. 2747 */ 2748 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 2749 sc->alc_cdata.alc_rx_cons); 2750 } 2751 2752 return 0; 2753 } 2754 2755 /* Receive a frame. */ 2756 static void 2757 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) 2758 { 2759 struct alc_rxdesc *rxd; 2760 struct ifnet *ifp; 2761 struct mbuf *mp, *m; 2762 uint32_t rdinfo, status, vtag; 2763 int count, nsegs, rx_cons; 2764 2765 ifp = sc->alc_ifp; 2766 status = le32toh(rrd->status); 2767 rdinfo = le32toh(rrd->rdinfo); 2768 rx_cons = RRD_RD_IDX(rdinfo); 2769 nsegs = RRD_RD_CNT(rdinfo); 2770 2771 sc->alc_cdata.alc_rxlen = RRD_BYTES(status); 2772 if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) { 2773 /* 2774 * We want to pass the following frames to upper 2775 * layer regardless of error status of Rx return 2776 * ring. 2777 * 2778 * o IP/TCP/UDP checksum is bad. 2779 * o frame length and protocol specific length 2780 * does not match. 2781 * 2782 * Force network stack compute checksum for 2783 * errored frames. 2784 */ 2785 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK; 2786 if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN | 2787 RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0) 2788 return; 2789 } 2790 2791 for (count = 0; count < nsegs; count++, 2792 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { 2793 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; 2794 mp = rxd->rx_m; 2795 /* Add a new receive buffer to the ring. */ 2796 if (alc_newbuf(sc, rxd, FALSE) != 0) { 2797 IFNET_STAT_INC(ifp, iqdrops, 1); 2798 /* Reuse Rx buffers. */ 2799 if (sc->alc_cdata.alc_rxhead != NULL) 2800 m_freem(sc->alc_cdata.alc_rxhead); 2801 break; 2802 } 2803 2804 /* 2805 * Assume we've received a full sized frame. 2806 * Actual size is fixed when we encounter the end of 2807 * multi-segmented frame. 2808 */ 2809 mp->m_len = sc->alc_buf_size; 2810 2811 /* Chain received mbufs. */ 2812 if (sc->alc_cdata.alc_rxhead == NULL) { 2813 sc->alc_cdata.alc_rxhead = mp; 2814 sc->alc_cdata.alc_rxtail = mp; 2815 } else { 2816 sc->alc_cdata.alc_rxprev_tail = 2817 sc->alc_cdata.alc_rxtail; 2818 sc->alc_cdata.alc_rxtail->m_next = mp; 2819 sc->alc_cdata.alc_rxtail = mp; 2820 } 2821 2822 if (count == nsegs - 1) { 2823 /* Last desc. for this frame. */ 2824 m = sc->alc_cdata.alc_rxhead; 2825 /* 2826 * It seems that L1C/L2C controller has no way 2827 * to tell hardware to strip CRC bytes. 2828 */ 2829 m->m_pkthdr.len = 2830 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; 2831 if (nsegs > 1) { 2832 /* Set last mbuf size. */ 2833 mp->m_len = sc->alc_cdata.alc_rxlen - 2834 (nsegs - 1) * sc->alc_buf_size; 2835 /* Remove the CRC bytes in chained mbufs. */ 2836 if (mp->m_len <= ETHER_CRC_LEN) { 2837 sc->alc_cdata.alc_rxtail = 2838 sc->alc_cdata.alc_rxprev_tail; 2839 sc->alc_cdata.alc_rxtail->m_len -= 2840 (ETHER_CRC_LEN - mp->m_len); 2841 sc->alc_cdata.alc_rxtail->m_next = NULL; 2842 m_freem(mp); 2843 } else { 2844 mp->m_len -= ETHER_CRC_LEN; 2845 } 2846 } else 2847 m->m_len = m->m_pkthdr.len; 2848 m->m_pkthdr.rcvif = ifp; 2849 /* 2850 * Due to hardware bugs, Rx checksum offloading 2851 * was intentionally disabled. 2852 */ 2853 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 2854 (status & RRD_VLAN_TAG) != 0) { 2855 vtag = RRD_VLAN(le32toh(rrd->vtag)); 2856 m->m_pkthdr.ether_vlantag = ntohs(vtag); 2857 m->m_flags |= M_VLANTAG; 2858 } 2859 2860 /* Pass it on. */ 2861 ifp->if_input(ifp, m, NULL, -1); 2862 } 2863 } 2864 /* Reset mbuf chains. */ 2865 ALC_RXCHAIN_RESET(sc); 2866 } 2867 2868 static void 2869 alc_tick(void *arg) 2870 { 2871 struct alc_softc *sc = arg; 2872 struct ifnet *ifp = &sc->arpcom.ac_if; 2873 struct mii_data *mii; 2874 2875 lwkt_serialize_enter(ifp->if_serializer); 2876 2877 mii = device_get_softc(sc->alc_miibus); 2878 mii_tick(mii); 2879 alc_stats_update(sc); 2880 /* 2881 * alc(4) does not rely on Tx completion interrupts to reclaim 2882 * transferred buffers. Instead Tx completion interrupts are 2883 * used to hint for scheduling Tx task. So it's necessary to 2884 * release transmitted buffers by kicking Tx completion 2885 * handler. This limits the maximum reclamation delay to a hz. 2886 */ 2887 alc_txeof(sc); 2888 alc_watchdog(sc); 2889 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 2890 2891 lwkt_serialize_exit(ifp->if_serializer); 2892 } 2893 2894 static void 2895 alc_reset(struct alc_softc *sc) 2896 { 2897 uint32_t reg; 2898 int i; 2899 2900 reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF; 2901 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET; 2902 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2903 2904 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2905 DELAY(10); 2906 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) 2907 break; 2908 } 2909 if (i == 0) 2910 device_printf(sc->alc_dev, "master reset timeout!\n"); 2911 2912 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2913 if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0) 2914 break; 2915 DELAY(10); 2916 } 2917 2918 if (i == 0) 2919 device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg); 2920 } 2921 2922 static void 2923 alc_init(void *xsc) 2924 { 2925 struct alc_softc *sc = xsc; 2926 struct ifnet *ifp = &sc->arpcom.ac_if; 2927 struct mii_data *mii; 2928 uint8_t eaddr[ETHER_ADDR_LEN]; 2929 bus_addr_t paddr; 2930 uint32_t reg, rxf_hi, rxf_lo; 2931 2932 ASSERT_SERIALIZED(ifp->if_serializer); 2933 2934 mii = device_get_softc(sc->alc_miibus); 2935 2936 /* 2937 * Cancel any pending I/O. 2938 */ 2939 alc_stop(sc); 2940 /* 2941 * Reset the chip to a known state. 2942 */ 2943 alc_reset(sc); 2944 2945 /* Initialize Rx descriptors. */ 2946 if (alc_init_rx_ring(sc) != 0) { 2947 device_printf(sc->alc_dev, "no memory for Rx buffers.\n"); 2948 alc_stop(sc); 2949 return; 2950 } 2951 alc_init_rr_ring(sc); 2952 alc_init_tx_ring(sc); 2953 alc_init_cmb(sc); 2954 alc_init_smb(sc); 2955 2956 /* Enable all clocks. */ 2957 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0); 2958 2959 /* Reprogram the station address. */ 2960 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2961 CSR_WRITE_4(sc, ALC_PAR0, 2962 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2963 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); 2964 /* 2965 * Clear WOL status and disable all WOL feature as WOL 2966 * would interfere Rx operation under normal environments. 2967 */ 2968 CSR_READ_4(sc, ALC_WOL_CFG); 2969 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2970 /* Set Tx descriptor base addresses. */ 2971 paddr = sc->alc_rdata.alc_tx_ring_paddr; 2972 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2973 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2974 /* We don't use high priority ring. */ 2975 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); 2976 /* Set Tx descriptor counter. */ 2977 CSR_WRITE_4(sc, ALC_TD_RING_CNT, 2978 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); 2979 /* Set Rx descriptor base addresses. */ 2980 paddr = sc->alc_rdata.alc_rx_ring_paddr; 2981 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2982 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2983 /* We use one Rx ring. */ 2984 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); 2985 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); 2986 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); 2987 /* Set Rx descriptor counter. */ 2988 CSR_WRITE_4(sc, ALC_RD_RING_CNT, 2989 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); 2990 2991 /* 2992 * Let hardware split jumbo frames into alc_max_buf_sized chunks. 2993 * if it do not fit the buffer size. Rx return descriptor holds 2994 * a counter that indicates how many fragments were made by the 2995 * hardware. The buffer size should be multiple of 8 bytes. 2996 * Since hardware has limit on the size of buffer size, always 2997 * use the maximum value. 2998 * For strict-alignment architectures make sure to reduce buffer 2999 * size by 8 bytes to make room for alignment fixup. 3000 */ 3001 sc->alc_buf_size = RX_BUF_SIZE_MAX; 3002 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); 3003 3004 paddr = sc->alc_rdata.alc_rr_ring_paddr; 3005 /* Set Rx return descriptor base addresses. */ 3006 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3007 /* We use one Rx return ring. */ 3008 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); 3009 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); 3010 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); 3011 /* Set Rx return descriptor counter. */ 3012 CSR_WRITE_4(sc, ALC_RRD_RING_CNT, 3013 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); 3014 paddr = sc->alc_rdata.alc_cmb_paddr; 3015 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3016 paddr = sc->alc_rdata.alc_smb_paddr; 3017 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3018 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3019 3020 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 3021 /* Reconfigure SRAM - Vendor magic. */ 3022 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0); 3023 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100); 3024 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000); 3025 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0); 3026 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0); 3027 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0); 3028 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000); 3029 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000); 3030 } 3031 3032 /* Tell hardware that we're ready to load DMA blocks. */ 3033 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); 3034 3035 /* Configure interrupt moderation timer. */ 3036 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; 3037 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; 3038 CSR_WRITE_4(sc, ALC_IM_TIMER, reg); 3039 /* 3040 * We don't want to automatic interrupt clear as task queue 3041 * for the interrupt should know interrupt status. 3042 */ 3043 reg = MASTER_SA_TIMER_ENB; 3044 if (ALC_USECS(sc->alc_int_rx_mod) != 0) 3045 reg |= MASTER_IM_RX_TIMER_ENB; 3046 if (ALC_USECS(sc->alc_int_tx_mod) != 0) 3047 reg |= MASTER_IM_TX_TIMER_ENB; 3048 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3049 /* 3050 * Disable interrupt re-trigger timer. We don't want automatic 3051 * re-triggering of un-ACKed interrupts. 3052 */ 3053 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); 3054 /* Configure CMB. */ 3055 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 3056 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); 3057 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); 3058 } else { 3059 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); 3060 } 3061 /* 3062 * Hardware can be configured to issue SMB interrupt based 3063 * on programmed interval. Since there is a callout that is 3064 * invoked for every hz in driver we use that instead of 3065 * relying on periodic SMB interrupt. 3066 */ 3067 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); 3068 /* Clear MAC statistics. */ 3069 alc_stats_clear(sc); 3070 3071 /* 3072 * Always use maximum frame size that controller can support. 3073 * Otherwise received frames that has larger frame length 3074 * than alc(4) MTU would be silently dropped in hardware. This 3075 * would make path-MTU discovery hard as sender wouldn't get 3076 * any responses from receiver. alc(4) supports 3077 * multi-fragmented frames on Rx path so it has no issue on 3078 * assembling fragmented frames. Using maximum frame size also 3079 * removes the need to reinitialize hardware when interface 3080 * MTU configuration was changed. 3081 * 3082 * Be conservative in what you do, be liberal in what you 3083 * accept from others - RFC 793. 3084 */ 3085 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen); 3086 3087 /* Disable header split(?) */ 3088 CSR_WRITE_4(sc, ALC_HDS_CFG, 0); 3089 3090 /* Configure IPG/IFG parameters. */ 3091 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, 3092 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) | 3093 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 3094 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 3095 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK)); 3096 /* Set parameters for half-duplex media. */ 3097 CSR_WRITE_4(sc, ALC_HDPX_CFG, 3098 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 3099 HDPX_CFG_LCOL_MASK) | 3100 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 3101 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 3102 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 3103 HDPX_CFG_ABEBT_MASK) | 3104 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 3105 HDPX_CFG_JAMIPG_MASK)); 3106 /* 3107 * Set TSO/checksum offload threshold. For frames that is 3108 * larger than this threshold, hardware wouldn't do 3109 * TSO/checksum offloading. 3110 */ 3111 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, 3112 (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & 3113 TSO_OFFLOAD_THRESH_MASK); 3114 /* Configure TxQ. */ 3115 reg = (alc_dma_burst[sc->alc_dma_rd_burst] << 3116 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; 3117 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 3118 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 3119 reg >>= 1; 3120 } 3121 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & 3122 TXQ_CFG_TD_BURST_MASK; 3123 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); 3124 3125 /* Configure Rx free descriptor pre-fetching. */ 3126 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, 3127 ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) & 3128 RX_RD_FREE_THRESH_HI_MASK) | 3129 ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) & 3130 RX_RD_FREE_THRESH_LO_MASK)); 3131 3132 /* 3133 * Configure flow control parameters. 3134 * XON : 80% of Rx FIFO 3135 * XOFF : 30% of Rx FIFO 3136 */ 3137 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 3138 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) { 3139 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3140 rxf_hi = (reg * 8) / 10; 3141 rxf_lo = (reg * 3) / 10; 3142 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3143 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3144 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3145 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 3146 RX_FIFO_PAUSE_THRESH_HI_MASK)); 3147 } 3148 3149 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 3150 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2) { 3151 CSR_WRITE_4(sc, ALC_SERDES_LOCK, 3152 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN | 3153 SERDES_PHY_CLK_SLOWDOWN); 3154 } 3155 3156 /* Disable RSS until I understand L1C/L2C's RSS logic. */ 3157 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); 3158 CSR_WRITE_4(sc, ALC_RSS_CPU, 0); 3159 3160 /* Configure RxQ. */ 3161 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 3162 RXQ_CFG_RD_BURST_MASK; 3163 reg |= RXQ_CFG_RSS_MODE_DIS; 3164 if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0) 3165 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M; 3166 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3167 3168 /* Configure DMA parameters. */ 3169 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; 3170 reg |= sc->alc_rcb; 3171 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 3172 reg |= DMA_CFG_CMB_ENB; 3173 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) 3174 reg |= DMA_CFG_SMB_ENB; 3175 else 3176 reg |= DMA_CFG_SMB_DIS; 3177 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << 3178 DMA_CFG_RD_BURST_SHIFT; 3179 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << 3180 DMA_CFG_WR_BURST_SHIFT; 3181 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 3182 DMA_CFG_RD_DELAY_CNT_MASK; 3183 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 3184 DMA_CFG_WR_DELAY_CNT_MASK; 3185 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3186 3187 /* 3188 * Configure Tx/Rx MACs. 3189 * - Auto-padding for short frames. 3190 * - Enable CRC generation. 3191 * Actual reconfiguration of MAC for resolved speed/duplex 3192 * is followed after detection of link establishment. 3193 * AR813x/AR815x always does checksum computation regardless 3194 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to 3195 * have bug in protocol field in Rx return structure so 3196 * these controllers can't handle fragmented frames. Disable 3197 * Rx checksum offloading until there is a newer controller 3198 * that has sane implementation. 3199 */ 3200 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 3201 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 3202 MAC_CFG_PREAMBLE_MASK); 3203 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 3204 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 3205 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 3206 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 3207 } 3208 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) 3209 reg |= MAC_CFG_SPEED_10_100; 3210 else 3211 reg |= MAC_CFG_SPEED_1000; 3212 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3213 3214 /* Set up the receive filter. */ 3215 alc_rxfilter(sc); 3216 alc_rxvlan(sc); 3217 3218 /* Acknowledge all pending interrupts and clear it. */ 3219 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); 3220 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3221 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); 3222 3223 sc->alc_flags &= ~ALC_FLAG_LINK; 3224 /* Switch to the current media. */ 3225 mii_mediachg(mii); 3226 3227 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 3228 3229 ifp->if_flags |= IFF_RUNNING; 3230 ifq_clr_oactive(&ifp->if_snd); 3231 } 3232 3233 static void 3234 alc_stop(struct alc_softc *sc) 3235 { 3236 struct ifnet *ifp = &sc->arpcom.ac_if; 3237 struct alc_txdesc *txd; 3238 struct alc_rxdesc *rxd; 3239 uint32_t reg; 3240 int i; 3241 3242 ASSERT_SERIALIZED(ifp->if_serializer); 3243 3244 /* 3245 * Mark the interface down and cancel the watchdog timer. 3246 */ 3247 ifp->if_flags &= ~IFF_RUNNING; 3248 ifq_clr_oactive(&ifp->if_snd); 3249 sc->alc_flags &= ~ALC_FLAG_LINK; 3250 callout_stop(&sc->alc_tick_ch); 3251 sc->alc_watchdog_timer = 0; 3252 alc_stats_update(sc); 3253 /* Disable interrupts. */ 3254 CSR_WRITE_4(sc, ALC_INTR_MASK, 0); 3255 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3256 alc_stop_queue(sc); 3257 /* Disable DMA. */ 3258 reg = CSR_READ_4(sc, ALC_DMA_CFG); 3259 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); 3260 reg |= DMA_CFG_SMB_DIS; 3261 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3262 DELAY(1000); 3263 /* Stop Rx/Tx MACs. */ 3264 alc_stop_mac(sc); 3265 /* Disable interrupts which might be touched in taskq handler. */ 3266 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3267 3268 /* Reclaim Rx buffers that have been processed. */ 3269 if (sc->alc_cdata.alc_rxhead != NULL) 3270 m_freem(sc->alc_cdata.alc_rxhead); 3271 ALC_RXCHAIN_RESET(sc); 3272 /* 3273 * Free Tx/Rx mbufs still in the queues. 3274 */ 3275 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3276 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3277 if (rxd->rx_m != NULL) { 3278 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, 3279 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3280 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, 3281 rxd->rx_dmamap); 3282 m_freem(rxd->rx_m); 3283 rxd->rx_m = NULL; 3284 } 3285 } 3286 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3287 txd = &sc->alc_cdata.alc_txdesc[i]; 3288 if (txd->tx_m != NULL) { 3289 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 3290 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3291 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 3292 txd->tx_dmamap); 3293 m_freem(txd->tx_m); 3294 txd->tx_m = NULL; 3295 } 3296 } 3297 } 3298 3299 static void 3300 alc_stop_mac(struct alc_softc *sc) 3301 { 3302 uint32_t reg; 3303 int i; 3304 3305 /* Disable Rx/Tx MAC. */ 3306 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3307 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 3308 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 3309 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3310 } 3311 for (i = ALC_TIMEOUT; i > 0; i--) { 3312 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3313 if (reg == 0) 3314 break; 3315 DELAY(10); 3316 } 3317 if (i == 0) 3318 device_printf(sc->alc_dev, 3319 "could not disable Rx/Tx MAC(0x%08x)!\n", reg); 3320 } 3321 3322 static void 3323 alc_start_queue(struct alc_softc *sc) 3324 { 3325 uint32_t qcfg[] = { 3326 0, 3327 RXQ_CFG_QUEUE0_ENB, 3328 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, 3329 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, 3330 RXQ_CFG_ENB 3331 }; 3332 uint32_t cfg; 3333 3334 /* Enable RxQ. */ 3335 cfg = CSR_READ_4(sc, ALC_RXQ_CFG); 3336 cfg &= ~RXQ_CFG_ENB; 3337 cfg |= qcfg[1]; 3338 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); 3339 /* Enable TxQ. */ 3340 cfg = CSR_READ_4(sc, ALC_TXQ_CFG); 3341 cfg |= TXQ_CFG_ENB; 3342 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); 3343 } 3344 3345 static void 3346 alc_stop_queue(struct alc_softc *sc) 3347 { 3348 uint32_t reg; 3349 int i; 3350 3351 /* Disable RxQ. */ 3352 reg = CSR_READ_4(sc, ALC_RXQ_CFG); 3353 if ((reg & RXQ_CFG_ENB) != 0) { 3354 reg &= ~RXQ_CFG_ENB; 3355 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3356 } 3357 /* Disable TxQ. */ 3358 reg = CSR_READ_4(sc, ALC_TXQ_CFG); 3359 if ((reg & TXQ_CFG_ENB) != 0) { 3360 reg &= ~TXQ_CFG_ENB; 3361 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); 3362 } 3363 for (i = ALC_TIMEOUT; i > 0; i--) { 3364 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3365 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 3366 break; 3367 DELAY(10); 3368 } 3369 if (i == 0) 3370 device_printf(sc->alc_dev, 3371 "could not disable RxQ/TxQ (0x%08x)!\n", reg); 3372 } 3373 3374 static void 3375 alc_init_tx_ring(struct alc_softc *sc) 3376 { 3377 struct alc_ring_data *rd; 3378 struct alc_txdesc *txd; 3379 int i; 3380 3381 sc->alc_cdata.alc_tx_prod = 0; 3382 sc->alc_cdata.alc_tx_cons = 0; 3383 sc->alc_cdata.alc_tx_cnt = 0; 3384 3385 rd = &sc->alc_rdata; 3386 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ); 3387 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3388 txd = &sc->alc_cdata.alc_txdesc[i]; 3389 txd->tx_m = NULL; 3390 } 3391 3392 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 3393 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 3394 } 3395 3396 static int 3397 alc_init_rx_ring(struct alc_softc *sc) 3398 { 3399 struct alc_ring_data *rd; 3400 struct alc_rxdesc *rxd; 3401 int i; 3402 3403 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; 3404 rd = &sc->alc_rdata; 3405 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ); 3406 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3407 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3408 rxd->rx_m = NULL; 3409 rxd->rx_desc = &rd->alc_rx_ring[i]; 3410 if (alc_newbuf(sc, rxd, TRUE) != 0) 3411 return (ENOBUFS); 3412 } 3413 3414 /* 3415 * Since controller does not update Rx descriptors, driver 3416 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE 3417 * is enough to ensure coherence. 3418 */ 3419 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 3420 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 3421 /* Let controller know availability of new Rx buffers. */ 3422 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); 3423 3424 return (0); 3425 } 3426 3427 static void 3428 alc_init_rr_ring(struct alc_softc *sc) 3429 { 3430 struct alc_ring_data *rd; 3431 3432 sc->alc_cdata.alc_rr_cons = 0; 3433 ALC_RXCHAIN_RESET(sc); 3434 3435 rd = &sc->alc_rdata; 3436 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ); 3437 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 3438 sc->alc_cdata.alc_rr_ring_map, 3439 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3440 } 3441 3442 static void 3443 alc_init_cmb(struct alc_softc *sc) 3444 { 3445 struct alc_ring_data *rd; 3446 3447 rd = &sc->alc_rdata; 3448 bzero(rd->alc_cmb, ALC_CMB_SZ); 3449 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map, 3450 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3451 } 3452 3453 static void 3454 alc_init_smb(struct alc_softc *sc) 3455 { 3456 struct alc_ring_data *rd; 3457 3458 rd = &sc->alc_rdata; 3459 bzero(rd->alc_smb, ALC_SMB_SZ); 3460 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, 3461 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3462 } 3463 3464 static void 3465 alc_rxvlan(struct alc_softc *sc) 3466 { 3467 struct ifnet *ifp; 3468 uint32_t reg; 3469 3470 ifp = sc->alc_ifp; 3471 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3472 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3473 reg |= MAC_CFG_VLAN_TAG_STRIP; 3474 else 3475 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 3476 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3477 } 3478 3479 static void 3480 alc_rxfilter(struct alc_softc *sc) 3481 { 3482 struct ifnet *ifp; 3483 struct ifmultiaddr *ifma; 3484 uint32_t crc; 3485 uint32_t mchash[2]; 3486 uint32_t rxcfg; 3487 3488 ifp = sc->alc_ifp; 3489 3490 bzero(mchash, sizeof(mchash)); 3491 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); 3492 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 3493 if ((ifp->if_flags & IFF_BROADCAST) != 0) 3494 rxcfg |= MAC_CFG_BCAST; 3495 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 3496 if ((ifp->if_flags & IFF_PROMISC) != 0) 3497 rxcfg |= MAC_CFG_PROMISC; 3498 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 3499 rxcfg |= MAC_CFG_ALLMULTI; 3500 mchash[0] = 0xFFFFFFFF; 3501 mchash[1] = 0xFFFFFFFF; 3502 goto chipit; 3503 } 3504 3505 #if 0 3506 /* XXX */ 3507 if_maddr_rlock(ifp); 3508 #endif 3509 TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) { 3510 if (ifma->ifma_addr->sa_family != AF_LINK) 3511 continue; 3512 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3513 ifma->ifma_addr), ETHER_ADDR_LEN); 3514 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 3515 } 3516 #if 0 3517 /* XXX */ 3518 if_maddr_runlock(ifp); 3519 #endif 3520 3521 chipit: 3522 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); 3523 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); 3524 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); 3525 } 3526 3527 static int 3528 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS) 3529 { 3530 return (sysctl_int_range(oidp, arg1, arg2, req, 3531 ALC_PROC_MIN, ALC_PROC_MAX)); 3532 } 3533 3534 static int 3535 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS) 3536 { 3537 3538 return (sysctl_int_range(oidp, arg1, arg2, req, 3539 ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX)); 3540 } 3541