1 /* $OpenBSD: if_alc.c,v 1.58 2024/05/24 06:02:53 jsg Exp $ */ 2 /*- 3 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* Driver for Atheros AR813x/AR815x/AR816x/AR817x PCIe Ethernet. */ 30 31 #include "bpfilter.h" 32 #include "vlan.h" 33 34 #include <sys/param.h> 35 #include <sys/endian.h> 36 #include <sys/systm.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/queue.h> 40 #include <sys/device.h> 41 #include <sys/timeout.h> 42 43 #include <machine/bus.h> 44 45 #include <net/if.h> 46 #include <net/if_dl.h> 47 #include <net/if_media.h> 48 49 #include <netinet/in.h> 50 #include <netinet/if_ether.h> 51 52 #if NBPFILTER > 0 53 #include <net/bpf.h> 54 #endif 55 56 #include <dev/mii/mii.h> 57 #include <dev/mii/miivar.h> 58 59 #include <dev/pci/pcireg.h> 60 #include <dev/pci/pcivar.h> 61 #include <dev/pci/pcidevs.h> 62 63 #include <dev/pci/if_alcreg.h> 64 65 int alc_match(struct device *, void *, void *); 66 void alc_attach(struct device *, struct device *, void *); 67 int alc_detach(struct device *, int); 68 int alc_activate(struct device *, int); 69 70 int alc_init(struct ifnet *); 71 void alc_start(struct ifnet *); 72 int alc_ioctl(struct ifnet *, u_long, caddr_t); 73 void alc_watchdog(struct ifnet *); 74 int alc_mediachange(struct ifnet *); 75 void alc_mediastatus(struct ifnet *, struct ifmediareq *); 76 77 void alc_aspm(struct alc_softc *, int, uint64_t); 78 void alc_aspm_813x(struct alc_softc *, uint64_t); 79 void alc_aspm_816x(struct alc_softc *, int); 80 void alc_disable_l0s_l1(struct alc_softc *); 81 int alc_dma_alloc(struct alc_softc *); 82 void alc_dma_free(struct alc_softc *); 83 int alc_encap(struct alc_softc *, struct mbuf *); 84 void alc_get_macaddr(struct alc_softc *); 85 void alc_get_macaddr_813x(struct alc_softc *); 86 void alc_get_macaddr_816x(struct alc_softc *); 87 void alc_get_macaddr_par(struct alc_softc *); 88 void alc_init_cmb(struct alc_softc *); 89 void alc_init_rr_ring(struct alc_softc *); 90 int alc_init_rx_ring(struct alc_softc *); 91 void alc_init_smb(struct alc_softc *); 92 void alc_init_tx_ring(struct alc_softc *); 93 int alc_intr(void *); 94 void alc_mac_config(struct alc_softc *); 95 int alc_mii_readreg_813x(struct device *, int, int); 96 int alc_mii_readreg_816x(struct device *, int, int); 97 void alc_mii_writereg_813x(struct device *, int, int, int); 98 void alc_mii_writereg_816x(struct device *, int, int, int); 99 void alc_dsp_fixup(struct alc_softc *, int); 100 int alc_miibus_readreg(struct device *, int, int); 101 void alc_miibus_statchg(struct device *); 102 void alc_miibus_writereg(struct device *, int, int, int); 103 int alc_miidbg_readreg(struct alc_softc *, int); 104 void alc_miidbg_writereg(struct alc_softc *, int, int); 105 int alc_miiext_readreg(struct alc_softc *, int, int); 106 void alc_miiext_writereg(struct alc_softc *, int, int, int); 107 void alc_phy_reset_813x(struct alc_softc *); 108 void alc_phy_reset_816x(struct alc_softc *); 109 int alc_newbuf(struct alc_softc *, struct alc_rxdesc *); 110 void alc_phy_down(struct alc_softc *); 111 void alc_phy_reset(struct alc_softc *); 112 void alc_reset(struct alc_softc *); 113 void alc_rxeof(struct alc_softc *, struct rx_rdesc *); 114 int alc_rxintr(struct alc_softc *); 115 void alc_iff(struct alc_softc *); 116 void alc_rxvlan(struct alc_softc *); 117 void alc_start_queue(struct alc_softc *); 118 void alc_stats_clear(struct alc_softc *); 119 void alc_stats_update(struct alc_softc *); 120 void alc_stop(struct alc_softc *); 121 void alc_stop_mac(struct alc_softc *); 122 void alc_stop_queue(struct alc_softc *); 123 void alc_tick(void *); 124 void alc_txeof(struct alc_softc *); 125 void alc_init_pcie(struct alc_softc *, int); 126 void alc_config_msi(struct alc_softc *); 127 int alc_dma_alloc(struct alc_softc *); 128 void alc_dma_free(struct alc_softc *); 129 int alc_encap(struct alc_softc *, struct mbuf *); 130 void alc_osc_reset(struct alc_softc *); 131 132 uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0, 0 }; 133 134 const struct pci_matchid alc_devices[] = { 135 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1C }, 136 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C }, 137 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D }, 138 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D_1 }, 139 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_1 }, 140 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_2 }, 141 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8161 }, 142 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8162 }, 143 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8171 }, 144 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8172 }, 145 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2200 }, 146 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2400 }, 147 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2500 } 148 }; 149 150 const struct cfattach alc_ca = { 151 sizeof (struct alc_softc), alc_match, alc_attach, alc_detach, 152 alc_activate 153 }; 154 155 struct cfdriver alc_cd = { 156 NULL, "alc", DV_IFNET 157 }; 158 159 int alcdebug = 0; 160 #define DPRINTF(x) do { if (alcdebug) printf x; } while (0) 161 162 #define ALC_CSUM_FEATURES (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT) 163 164 int 165 alc_miibus_readreg(struct device *dev, int phy, int reg) 166 { 167 struct alc_softc *sc = (struct alc_softc *)dev; 168 uint32_t v; 169 170 if (phy != sc->alc_phyaddr) 171 return (0); 172 173 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 174 v = alc_mii_readreg_816x(dev, phy, reg); 175 else 176 v = alc_mii_readreg_813x(dev, phy, reg); 177 178 return (v); 179 } 180 181 int 182 alc_mii_readreg_813x(struct device *dev, int phy, int reg) 183 { 184 struct alc_softc *sc = (struct alc_softc *)dev; 185 uint32_t v; 186 int i; 187 188 /* 189 * For AR8132 fast ethernet controller, do not report 1000baseT 190 * capability to mii(4). Even though AR8132 uses the same 191 * model/revision number of F1 gigabit PHY, the PHY has no 192 * ability to establish 1000baseT link. 193 */ 194 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && 195 reg == MII_EXTSR) 196 return (0); 197 198 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 199 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 200 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 201 DELAY(5); 202 v = CSR_READ_4(sc, ALC_MDIO); 203 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 204 break; 205 } 206 207 if (i == 0) { 208 printf("%s: phy read timeout: phy %d, reg %d\n", 209 sc->sc_dev.dv_xname, phy, reg); 210 return (0); 211 } 212 213 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 214 } 215 216 int 217 alc_mii_readreg_816x(struct device *dev, int phy, int reg) 218 { 219 struct alc_softc *sc = (struct alc_softc *)dev; 220 uint32_t clk, v; 221 int i; 222 223 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 224 clk = MDIO_CLK_25_128; 225 else 226 clk = MDIO_CLK_25_4; 227 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 228 MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg)); 229 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 230 DELAY(5); 231 v = CSR_READ_4(sc, ALC_MDIO); 232 if ((v & MDIO_OP_BUSY) == 0) 233 break; 234 } 235 236 if (i == 0) { 237 printf("%s: phy read timeout: phy %d, reg %d\n", 238 sc->sc_dev.dv_xname, phy, reg); 239 return (0); 240 } 241 242 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 243 } 244 245 void 246 alc_miibus_writereg(struct device *dev, int phy, int reg, int val) 247 { 248 struct alc_softc *sc = (struct alc_softc *)dev; 249 250 if (phy != sc->alc_phyaddr) 251 return; 252 253 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 254 alc_mii_writereg_816x(dev, phy, reg, val); 255 else 256 alc_mii_writereg_813x(dev, phy, reg, val); 257 } 258 259 void 260 alc_mii_writereg_813x(struct device *dev, int phy, int reg, int val) 261 { 262 struct alc_softc *sc = (struct alc_softc *)dev; 263 uint32_t v; 264 int i; 265 266 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 267 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 268 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 269 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 270 DELAY(5); 271 v = CSR_READ_4(sc, ALC_MDIO); 272 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 273 break; 274 } 275 276 if (i == 0) 277 printf("%s: phy write timeout: phy %d, reg %d\n", 278 sc->sc_dev.dv_xname, phy, reg); 279 } 280 281 void 282 alc_mii_writereg_816x(struct device *dev, int phy, int reg, int val) 283 { 284 struct alc_softc *sc = (struct alc_softc *)dev; 285 uint32_t clk, v; 286 int i; 287 288 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 289 clk = MDIO_CLK_25_128; 290 else 291 clk = MDIO_CLK_25_4; 292 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 293 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) | 294 MDIO_SUP_PREAMBLE | clk); 295 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 296 DELAY(5); 297 v = CSR_READ_4(sc, ALC_MDIO); 298 if ((v & MDIO_OP_BUSY) == 0) 299 break; 300 } 301 302 if (i == 0) 303 printf("%s: phy write timeout: phy %d, reg %d\n", 304 sc->sc_dev.dv_xname, phy, reg); 305 } 306 307 void 308 alc_miibus_statchg(struct device *dev) 309 { 310 struct alc_softc *sc = (struct alc_softc *)dev; 311 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 312 struct mii_data *mii = &sc->sc_miibus; 313 uint32_t reg; 314 315 if ((ifp->if_flags & IFF_RUNNING) == 0) 316 return; 317 318 sc->alc_flags &= ~ALC_FLAG_LINK; 319 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 320 (IFM_ACTIVE | IFM_AVALID)) { 321 switch (IFM_SUBTYPE(mii->mii_media_active)) { 322 case IFM_10_T: 323 case IFM_100_TX: 324 sc->alc_flags |= ALC_FLAG_LINK; 325 break; 326 case IFM_1000_T: 327 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 328 sc->alc_flags |= ALC_FLAG_LINK; 329 break; 330 default: 331 break; 332 } 333 } 334 /* Stop Rx/Tx MACs. */ 335 alc_stop_mac(sc); 336 337 /* Program MACs with resolved speed/duplex/flow-control. */ 338 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 339 alc_start_queue(sc); 340 alc_mac_config(sc); 341 /* Re-enable Tx/Rx MACs. */ 342 reg = CSR_READ_4(sc, ALC_MAC_CFG); 343 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 344 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 345 } 346 alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active)); 347 alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active)); 348 } 349 350 int 351 alc_miidbg_readreg(struct alc_softc *sc, int reg) 352 { 353 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 354 reg); 355 return (alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 356 ALC_MII_DBG_DATA)); 357 } 358 359 360 void 361 alc_miidbg_writereg(struct alc_softc *sc, int reg, int val) 362 { 363 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 364 reg); 365 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 366 val); 367 } 368 369 int 370 alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg) 371 { 372 uint32_t clk, v; 373 int i; 374 375 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 376 EXT_MDIO_DEVADDR(devaddr)); 377 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 378 clk = MDIO_CLK_25_128; 379 else 380 clk = MDIO_CLK_25_4; 381 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 382 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 383 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 384 DELAY(5); 385 v = CSR_READ_4(sc, ALC_MDIO); 386 if ((v & MDIO_OP_BUSY) == 0) 387 break; 388 } 389 390 if (i == 0) { 391 printf("%s: phy ext read timeout: phy %d, reg %d\n", 392 sc->sc_dev.dv_xname, devaddr, reg); 393 return (0); 394 } 395 396 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 397 } 398 399 void 400 alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, int val) 401 { 402 uint32_t clk, v; 403 int i; 404 405 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 406 EXT_MDIO_DEVADDR(devaddr)); 407 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 408 clk = MDIO_CLK_25_128; 409 else 410 clk = MDIO_CLK_25_4; 411 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 412 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | 413 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 414 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 415 DELAY(5); 416 v = CSR_READ_4(sc, ALC_MDIO); 417 if ((v & MDIO_OP_BUSY) == 0) 418 break; 419 } 420 421 if (i == 0) 422 printf("%s: phy ext write timeout: phy %d, reg %d\n", 423 sc->sc_dev.dv_xname, devaddr, reg); 424 } 425 426 void 427 alc_dsp_fixup(struct alc_softc *sc, int media) 428 { 429 uint16_t agc, len, val; 430 431 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 432 return; 433 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0) 434 return; 435 436 /* 437 * Vendor PHY magic. 438 * 1000BT/AZ, wrong cable length 439 */ 440 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 441 len = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6); 442 len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) & 443 EXT_CLDCTL6_CAB_LEN_MASK; 444 agc = alc_miidbg_readreg(sc, MII_DBG_AGC); 445 agc = (agc >> DBG_AGC_2_VGA_SHIFT) & DBG_AGC_2_VGA_MASK; 446 if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G && 447 agc > DBG_AGC_LONG1G_LIMT) || 448 (media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT && 449 agc > DBG_AGC_LONG1G_LIMT)) { 450 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 451 DBG_AZ_ANADECT_LONG); 452 val = alc_miiext_readreg(sc, MII_EXT_ANEG, 453 MII_EXT_ANEG_AFE); 454 val |= ANEG_AFEE_10BT_100M_TH; 455 alc_miiext_writereg(sc, MII_EXT_ANEG, 456 MII_EXT_ANEG_AFE, val); 457 } else { 458 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 459 DBG_AZ_ANADECT_DEFAULT); 460 val = alc_miiext_readreg(sc, MII_EXT_ANEG, 461 MII_EXT_ANEG_AFE); 462 val &= ~ANEG_AFEE_10BT_100M_TH; 463 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, 464 val); 465 } 466 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 467 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 468 if (media == IFM_1000_T) { 469 /* 470 * Giga link threshold, raise the tolerance of 471 * noise 50%. 472 */ 473 val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB); 474 val &= ~DBG_MSE20DB_TH_MASK; 475 val |= (DBG_MSE20DB_TH_HI << 476 DBG_MSE20DB_TH_SHIFT); 477 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 478 } else if (media == IFM_100_TX) 479 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 480 DBG_MSE16DB_UP); 481 } 482 } else { 483 val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE); 484 val &= ~ANEG_AFEE_10BT_100M_TH; 485 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val); 486 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 487 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 488 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 489 DBG_MSE16DB_DOWN); 490 val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB); 491 val &= ~DBG_MSE20DB_TH_MASK; 492 val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT); 493 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 494 } 495 } 496 } 497 498 void 499 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 500 { 501 struct alc_softc *sc = ifp->if_softc; 502 struct mii_data *mii = &sc->sc_miibus; 503 504 if ((ifp->if_flags & IFF_UP) == 0) 505 return; 506 507 mii_pollstat(mii); 508 ifmr->ifm_status = mii->mii_media_status; 509 ifmr->ifm_active = mii->mii_media_active; 510 } 511 512 int 513 alc_mediachange(struct ifnet *ifp) 514 { 515 struct alc_softc *sc = ifp->if_softc; 516 struct mii_data *mii = &sc->sc_miibus; 517 int error; 518 519 if (mii->mii_instance != 0) { 520 struct mii_softc *miisc; 521 522 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 523 mii_phy_reset(miisc); 524 } 525 error = mii_mediachg(mii); 526 527 return (error); 528 } 529 530 int 531 alc_match(struct device *dev, void *match, void *aux) 532 { 533 return pci_matchbyid((struct pci_attach_args *)aux, alc_devices, 534 nitems(alc_devices)); 535 } 536 537 void 538 alc_get_macaddr(struct alc_softc *sc) 539 { 540 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 541 alc_get_macaddr_816x(sc); 542 else 543 alc_get_macaddr_813x(sc); 544 } 545 546 void 547 alc_get_macaddr_813x(struct alc_softc *sc) 548 { 549 uint32_t opt; 550 uint16_t val; 551 int eeprom, i; 552 553 eeprom = 0; 554 opt = CSR_READ_4(sc, ALC_OPT_CFG); 555 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 && 556 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { 557 /* 558 * EEPROM found, let TWSI reload EEPROM configuration. 559 * This will set ethernet address of controller. 560 */ 561 eeprom++; 562 switch (sc->sc_product) { 563 case PCI_PRODUCT_ATTANSIC_L1C: 564 case PCI_PRODUCT_ATTANSIC_L2C: 565 if ((opt & OPT_CFG_CLK_ENB) == 0) { 566 opt |= OPT_CFG_CLK_ENB; 567 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 568 CSR_READ_4(sc, ALC_OPT_CFG); 569 DELAY(1000); 570 } 571 break; 572 case PCI_PRODUCT_ATTANSIC_L1D: 573 case PCI_PRODUCT_ATTANSIC_L1D_1: 574 case PCI_PRODUCT_ATTANSIC_L2C_1: 575 case PCI_PRODUCT_ATTANSIC_L2C_2: 576 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 577 ALC_MII_DBG_ADDR, 0x00); 578 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 579 ALC_MII_DBG_DATA); 580 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 581 ALC_MII_DBG_DATA, val & 0xFF7F); 582 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 583 ALC_MII_DBG_ADDR, 0x3B); 584 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 585 ALC_MII_DBG_DATA); 586 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 587 ALC_MII_DBG_DATA, val | 0x0008); 588 DELAY(20); 589 break; 590 } 591 592 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 593 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 594 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 595 CSR_READ_4(sc, ALC_WOL_CFG); 596 597 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | 598 TWSI_CFG_SW_LD_START); 599 for (i = 100; i > 0; i--) { 600 DELAY(1000); 601 if ((CSR_READ_4(sc, ALC_TWSI_CFG) & 602 TWSI_CFG_SW_LD_START) == 0) 603 break; 604 } 605 if (i == 0) 606 printf("%s: reloading EEPROM timeout!\n", 607 sc->sc_dev.dv_xname); 608 } else { 609 if (alcdebug) 610 printf("%s: EEPROM not found!\n", sc->sc_dev.dv_xname); 611 } 612 if (eeprom != 0) { 613 switch (sc->sc_product) { 614 case PCI_PRODUCT_ATTANSIC_L1C: 615 case PCI_PRODUCT_ATTANSIC_L2C: 616 if ((opt & OPT_CFG_CLK_ENB) != 0) { 617 opt &= ~OPT_CFG_CLK_ENB; 618 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 619 CSR_READ_4(sc, ALC_OPT_CFG); 620 DELAY(1000); 621 } 622 break; 623 case PCI_PRODUCT_ATTANSIC_L1D: 624 case PCI_PRODUCT_ATTANSIC_L1D_1: 625 case PCI_PRODUCT_ATTANSIC_L2C_1: 626 case PCI_PRODUCT_ATTANSIC_L2C_2: 627 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 628 ALC_MII_DBG_ADDR, 0x00); 629 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 630 ALC_MII_DBG_DATA); 631 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 632 ALC_MII_DBG_DATA, val | 0x0080); 633 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 634 ALC_MII_DBG_ADDR, 0x3B); 635 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 636 ALC_MII_DBG_DATA); 637 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 638 ALC_MII_DBG_DATA, val & 0xFFF7); 639 DELAY(20); 640 break; 641 } 642 } 643 644 alc_get_macaddr_par(sc); 645 } 646 647 void 648 alc_get_macaddr_816x(struct alc_softc *sc) 649 { 650 uint32_t reg; 651 int i, reloaded; 652 653 reloaded = 0; 654 /* Try to reload station address via TWSI. */ 655 for (i = 100; i > 0; i--) { 656 reg = CSR_READ_4(sc, ALC_SLD); 657 if ((reg & (SLD_PROGRESS | SLD_START)) == 0) 658 break; 659 DELAY(1000); 660 } 661 if (i != 0) { 662 CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START); 663 for (i = 100; i > 0; i--) { 664 DELAY(1000); 665 reg = CSR_READ_4(sc, ALC_SLD); 666 if ((reg & SLD_START) == 0) 667 break; 668 } 669 if (i != 0) 670 reloaded++; 671 else if (alcdebug) 672 printf("%s: reloading station address via TWSI timed" 673 "out!\n", sc->sc_dev.dv_xname); 674 } 675 676 /* Try to reload station address from EEPROM or FLASH. */ 677 if (reloaded == 0) { 678 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 679 if ((reg & (EEPROM_LD_EEPROM_EXIST | 680 EEPROM_LD_FLASH_EXIST)) != 0) { 681 for (i = 100; i > 0; i--) { 682 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 683 if ((reg & (EEPROM_LD_PROGRESS | 684 EEPROM_LD_START)) == 0) 685 break; 686 DELAY(1000); 687 } 688 if (i != 0) { 689 CSR_WRITE_4(sc, ALC_EEPROM_LD, reg | 690 EEPROM_LD_START); 691 for (i = 100; i > 0; i--) { 692 DELAY(1000); 693 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 694 if ((reg & EEPROM_LD_START) == 0) 695 break; 696 } 697 } else if (alcdebug) 698 printf("%s: reloading EEPROM/FLASH timed out!\n", 699 sc->sc_dev.dv_xname); 700 } 701 } 702 703 alc_get_macaddr_par(sc); 704 } 705 706 void 707 alc_get_macaddr_par(struct alc_softc *sc) 708 { 709 uint32_t ea[2]; 710 711 ea[0] = CSR_READ_4(sc, ALC_PAR0); 712 ea[1] = CSR_READ_4(sc, ALC_PAR1); 713 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; 714 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; 715 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; 716 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; 717 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; 718 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; 719 } 720 721 void 722 alc_disable_l0s_l1(struct alc_softc *sc) 723 { 724 uint32_t pmcfg; 725 726 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 727 /* Another magic from vendor. */ 728 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 729 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | 730 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 731 PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1); 732 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | 733 PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB; 734 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 735 } 736 } 737 738 void 739 alc_phy_reset(struct alc_softc *sc) 740 { 741 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 742 alc_phy_reset_816x(sc); 743 else 744 alc_phy_reset_813x(sc); 745 } 746 747 void 748 alc_phy_reset_813x(struct alc_softc *sc) 749 { 750 uint16_t data; 751 752 /* Reset magic from Linux. */ 753 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET); 754 CSR_READ_2(sc, ALC_GPHY_CFG); 755 DELAY(10 * 1000); 756 757 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 758 GPHY_CFG_SEL_ANA_RESET); 759 CSR_READ_2(sc, ALC_GPHY_CFG); 760 DELAY(10 * 1000); 761 762 /* DSP fixup, Vendor magic. */ 763 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) { 764 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 765 ALC_MII_DBG_ADDR, 0x000A); 766 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 767 ALC_MII_DBG_DATA); 768 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 769 ALC_MII_DBG_DATA, data & 0xDFFF); 770 } 771 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D || 772 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 || 773 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 || 774 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) { 775 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 776 ALC_MII_DBG_ADDR, 0x003B); 777 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 778 ALC_MII_DBG_DATA); 779 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 780 ALC_MII_DBG_DATA, data & 0xFFF7); 781 DELAY(20 * 1000); 782 } 783 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D) { 784 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 785 ALC_MII_DBG_ADDR, 0x0029); 786 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 787 ALC_MII_DBG_DATA, 0x929D); 788 } 789 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C || 790 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C || 791 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 || 792 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) { 793 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 794 ALC_MII_DBG_ADDR, 0x0029); 795 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 796 ALC_MII_DBG_DATA, 0xB6DD); 797 } 798 799 /* Load DSP codes, vendor magic. */ 800 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | 801 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); 802 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 803 ALC_MII_DBG_ADDR, MII_ANA_CFG18); 804 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 805 ALC_MII_DBG_DATA, data); 806 807 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | 808 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | 809 ANA_SERDES_EN_LCKDT; 810 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 811 ALC_MII_DBG_ADDR, MII_ANA_CFG5); 812 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 813 ALC_MII_DBG_DATA, data); 814 815 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & 816 ANA_LONG_CABLE_TH_100_MASK) | 817 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & 818 ANA_SHORT_CABLE_TH_100_SHIFT) | 819 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; 820 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 821 ALC_MII_DBG_ADDR, MII_ANA_CFG54); 822 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 823 ALC_MII_DBG_DATA, data); 824 825 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | 826 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | 827 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | 828 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); 829 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 830 ALC_MII_DBG_ADDR, MII_ANA_CFG4); 831 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 832 ALC_MII_DBG_DATA, data); 833 834 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | 835 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | 836 ANA_OEN_125M; 837 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 838 ALC_MII_DBG_ADDR, MII_ANA_CFG0); 839 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 840 ALC_MII_DBG_DATA, data); 841 DELAY(1000); 842 843 /* Disable hibernation. */ 844 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 845 0x0029); 846 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 847 ALC_MII_DBG_DATA); 848 data &= ~0x8000; 849 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 850 data); 851 852 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 853 0x000B); 854 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 855 ALC_MII_DBG_DATA); 856 data &= ~0x8000; 857 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 858 data); 859 } 860 861 void 862 alc_phy_reset_816x(struct alc_softc *sc) 863 { 864 uint32_t val; 865 866 val = CSR_READ_4(sc, ALC_GPHY_CFG); 867 val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 868 GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON | 869 GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB); 870 val |= GPHY_CFG_SEL_ANA_RESET; 871 /* Disable PHY hibernation. */ 872 val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN); 873 CSR_WRITE_4(sc, ALC_GPHY_CFG, val); 874 DELAY(10); 875 CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET); 876 DELAY(800); 877 /* Vendor PHY magic. */ 878 /* Disable PHY hibernation. */ 879 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, 880 DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB); 881 alc_miidbg_writereg(sc, MII_DBG_HIBNEG, DBG_HIBNEG_DEFAULT & 882 ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE)); 883 alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT); 884 /* XXX Disable EEE. */ 885 val = CSR_READ_4(sc, ALC_LPI_CTL); 886 val &= ~LPI_CTL_ENB; 887 CSR_WRITE_4(sc, ALC_LPI_CTL, val); 888 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0); 889 /* PHY power saving. */ 890 alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT); 891 alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT); 892 alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT); 893 alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT); 894 val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2); 895 val &= ~DBG_GREENCFG2_GATE_DFSE_EN; 896 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val); 897 /* RTL8139C, 120m issue. */ 898 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78, 899 ANEG_NLP78_120M_DEFAULT); 900 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10, 901 ANEG_S3DIG10_DEFAULT); 902 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) { 903 /* Turn off half amplitude. */ 904 val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3); 905 val |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT; 906 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, val); 907 /* Turn off Green feature. */ 908 val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2); 909 val |= DBG_GREENCFG2_BP_GREEN; 910 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val); 911 /* Turn off half bias. */ 912 val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5); 913 val |= EXT_CLDCTL5_BP_VD_HLFBIAS; 914 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, val); 915 } 916 } 917 918 void 919 alc_phy_down(struct alc_softc *sc) 920 { 921 uint32_t gphy; 922 923 switch (sc->sc_product) { 924 case PCI_PRODUCT_ATTANSIC_AR8161: 925 case PCI_PRODUCT_ATTANSIC_E2200: 926 case PCI_PRODUCT_ATTANSIC_E2400: 927 case PCI_PRODUCT_ATTANSIC_E2500: 928 case PCI_PRODUCT_ATTANSIC_AR8162: 929 case PCI_PRODUCT_ATTANSIC_AR8171: 930 case PCI_PRODUCT_ATTANSIC_AR8172: 931 gphy = CSR_READ_4(sc, ALC_GPHY_CFG); 932 gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 933 GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON); 934 gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 935 GPHY_CFG_SEL_ANA_RESET; 936 gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW; 937 CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy); 938 break; 939 case PCI_PRODUCT_ATTANSIC_L1D: 940 case PCI_PRODUCT_ATTANSIC_L1D_1: 941 case PCI_PRODUCT_ATTANSIC_L2C_1: 942 case PCI_PRODUCT_ATTANSIC_L2C_2: 943 /* 944 * GPHY power down caused more problems on AR8151 v2.0. 945 * When driver is reloaded after GPHY power down, 946 * accesses to PHY/MAC registers hung the system. Only 947 * cold boot recovered from it. I'm not sure whether 948 * AR8151 v1.0 also requires this one though. I don't 949 * have AR8151 v1.0 controller in hand. 950 * The only option left is to isolate the PHY and 951 * initiates power down the PHY which in turn saves 952 * more power when driver is unloaded. 953 */ 954 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 955 MII_BMCR, BMCR_ISO | BMCR_PDOWN); 956 break; 957 default: 958 /* Force PHY down. */ 959 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 960 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | 961 GPHY_CFG_PWDOWN_HW); 962 DELAY(1000); 963 break; 964 } 965 } 966 967 void 968 alc_aspm(struct alc_softc *sc, int init, uint64_t media) 969 { 970 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 971 alc_aspm_816x(sc, init); 972 else 973 alc_aspm_813x(sc, media); 974 } 975 976 void 977 alc_aspm_813x(struct alc_softc *sc, uint64_t media) 978 { 979 uint32_t pmcfg; 980 uint16_t linkcfg; 981 982 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 983 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) == 984 (ALC_FLAG_APS | ALC_FLAG_PCIE)) 985 linkcfg = CSR_READ_2(sc, sc->alc_expcap + PCI_PCIE_LCSR); 986 else 987 linkcfg = 0; 988 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; 989 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK); 990 pmcfg |= PM_CFG_MAC_ASPM_CHK; 991 pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT); 992 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 993 994 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 995 /* Disable extended sync except AR8152 B v1.0 */ 996 linkcfg &= ~0x80; 997 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 && 998 sc->alc_rev == ATHEROS_AR8152_B_V10) 999 linkcfg |= 0x80; 1000 CSR_WRITE_2(sc, sc->alc_expcap + PCI_PCIE_LCSR, linkcfg); 1001 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB | 1002 PM_CFG_HOTRST); 1003 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT << 1004 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1005 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1006 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT << 1007 PM_CFG_PM_REQ_TIMER_SHIFT); 1008 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV; 1009 } 1010 1011 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1012 if ((sc->alc_flags & ALC_FLAG_L0S) != 0) 1013 pmcfg |= PM_CFG_ASPM_L0S_ENB; 1014 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1015 pmcfg |= PM_CFG_ASPM_L1_ENB; 1016 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 1017 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) 1018 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 1019 pmcfg &= ~(PM_CFG_SERDES_L1_ENB | 1020 PM_CFG_SERDES_PLL_L1_ENB | 1021 PM_CFG_SERDES_BUDS_RX_L1_ENB); 1022 pmcfg |= PM_CFG_CLK_SWH_L1; 1023 if (media == IFM_100_TX || media == IFM_1000_T) { 1024 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; 1025 switch (sc->sc_product) { 1026 case PCI_PRODUCT_ATTANSIC_L2C_1: 1027 pmcfg |= (7 << 1028 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1029 break; 1030 case PCI_PRODUCT_ATTANSIC_L1D_1: 1031 case PCI_PRODUCT_ATTANSIC_L2C_2: 1032 pmcfg |= (4 << 1033 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1034 break; 1035 default: 1036 pmcfg |= (15 << 1037 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1038 break; 1039 } 1040 } 1041 } else { 1042 pmcfg |= PM_CFG_SERDES_L1_ENB | 1043 PM_CFG_SERDES_PLL_L1_ENB | 1044 PM_CFG_SERDES_BUDS_RX_L1_ENB; 1045 pmcfg &= ~(PM_CFG_CLK_SWH_L1 | 1046 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 1047 } 1048 } else { 1049 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB | 1050 PM_CFG_SERDES_PLL_L1_ENB); 1051 pmcfg |= PM_CFG_CLK_SWH_L1; 1052 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1053 pmcfg |= PM_CFG_ASPM_L1_ENB; 1054 } 1055 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1056 } 1057 1058 void 1059 alc_aspm_816x(struct alc_softc *sc, int init) 1060 { 1061 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1062 uint32_t pmcfg; 1063 1064 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 1065 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK; 1066 pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT; 1067 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1068 pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT; 1069 pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK; 1070 pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT; 1071 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV; 1072 pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S | 1073 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB | 1074 PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | 1075 PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB | 1076 PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST); 1077 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1078 (sc->alc_rev & 0x01) != 0) 1079 pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB; 1080 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1081 /* Link up, enable both L0s, L1s. */ 1082 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1083 PM_CFG_MAC_ASPM_CHK; 1084 } else { 1085 if (init != 0) 1086 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1087 PM_CFG_MAC_ASPM_CHK; 1088 else if ((ifp->if_flags & IFF_RUNNING) != 0) 1089 pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK; 1090 } 1091 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1092 } 1093 1094 void 1095 alc_init_pcie(struct alc_softc *sc, int base) 1096 { 1097 const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" }; 1098 uint32_t cap, ctl, val; 1099 int state; 1100 1101 /* Clear data link and flow-control protocol error. */ 1102 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); 1103 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); 1104 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); 1105 1106 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 1107 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 1108 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 1109 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, 1110 CSR_READ_4(sc, ALC_PCIE_PHYMISC) | 1111 PCIE_PHYMISC_FORCE_RCV_DET); 1112 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 && 1113 sc->alc_rev == ATHEROS_AR8152_B_V10) { 1114 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2); 1115 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK | 1116 PCIE_PHYMISC2_SERDES_TH_MASK); 1117 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 1118 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; 1119 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val); 1120 } 1121 /* Disable ASPM L0S and L1. */ 1122 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 1123 base + PCI_PCIE_LCAP) >> 16; 1124 if ((cap & 0x00000c00) != 0) { 1125 ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 1126 base + PCI_PCIE_LCSR) >> 16; 1127 if ((ctl & 0x08) != 0) 1128 sc->alc_rcb = DMA_CFG_RCB_128; 1129 if (alcdebug) 1130 printf("%s: RCB %u bytes\n", 1131 sc->sc_dev.dv_xname, 1132 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); 1133 state = ctl & 0x03; 1134 if (state & 0x01) 1135 sc->alc_flags |= ALC_FLAG_L0S; 1136 if (state & 0x02) 1137 sc->alc_flags |= ALC_FLAG_L1S; 1138 if (alcdebug) 1139 printf("%s: ASPM %s %s\n", 1140 sc->sc_dev.dv_xname, 1141 aspm_state[state], 1142 state == 0 ? "disabled" : "enabled"); 1143 alc_disable_l0s_l1(sc); 1144 } 1145 } else { 1146 val = CSR_READ_4(sc, ALC_PDLL_TRNS1); 1147 val &= ~PDLL_TRNS1_D3PLLOFF_ENB; 1148 CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val); 1149 val = CSR_READ_4(sc, ALC_MASTER_CFG); 1150 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1151 (sc->alc_rev & 0x01) != 0) { 1152 if ((val & MASTER_WAKEN_25M) == 0 || 1153 (val & MASTER_CLK_SEL_DIS) == 0) { 1154 val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS; 1155 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1156 } 1157 } else { 1158 if ((val & MASTER_WAKEN_25M) == 0 || 1159 (val & MASTER_CLK_SEL_DIS) != 0) { 1160 val |= MASTER_WAKEN_25M; 1161 val &= ~MASTER_CLK_SEL_DIS; 1162 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1163 } 1164 } 1165 } 1166 } 1167 1168 void 1169 alc_config_msi(struct alc_softc *sc) 1170 { 1171 uint32_t ctl, mod; 1172 1173 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 1174 /* 1175 * It seems interrupt moderation is controlled by 1176 * ALC_MSI_RETRANS_TIMER register if MSI/MSIX is active. 1177 * Driver uses RX interrupt moderation parameter to 1178 * program ALC_MSI_RETRANS_TIMER register. 1179 */ 1180 ctl = CSR_READ_4(sc, ALC_MSI_RETRANS_TIMER); 1181 ctl &= ~MSI_RETRANS_TIMER_MASK; 1182 ctl &= ~MSI_RETRANS_MASK_SEL_LINE; 1183 mod = ALC_USECS(sc->alc_int_rx_mod); 1184 if (mod == 0) 1185 mod = 1; 1186 ctl |= mod; 1187 if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 1188 CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl | 1189 MSI_RETRANS_MASK_SEL_LINE); 1190 else 1191 CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, 0); 1192 } 1193 } 1194 1195 void 1196 alc_attach(struct device *parent, struct device *self, void *aux) 1197 { 1198 struct alc_softc *sc = (struct alc_softc *)self; 1199 struct pci_attach_args *pa = aux; 1200 pci_chipset_tag_t pc = pa->pa_pc; 1201 pci_intr_handle_t ih; 1202 const char *intrstr; 1203 struct ifnet *ifp; 1204 pcireg_t memtype; 1205 uint16_t burst; 1206 int base, error = 0; 1207 1208 /* Set PHY address. */ 1209 sc->alc_phyaddr = ALC_PHY_ADDR; 1210 1211 /* Get PCI and chip id/revision. */ 1212 sc->sc_product = PCI_PRODUCT(pa->pa_id); 1213 sc->alc_rev = PCI_REVISION(pa->pa_class); 1214 1215 /* 1216 * One odd thing is AR8132 uses the same PHY hardware(F1 1217 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports 1218 * the PHY supports 1000Mbps but that's not true. The PHY 1219 * used in AR8132 can't establish gigabit link even if it 1220 * shows the same PHY model/revision number of AR8131. 1221 */ 1222 switch (sc->sc_product) { 1223 case PCI_PRODUCT_ATTANSIC_E2200: 1224 case PCI_PRODUCT_ATTANSIC_E2400: 1225 case PCI_PRODUCT_ATTANSIC_E2500: 1226 sc->alc_flags |= ALC_FLAG_E2X00; 1227 /* FALLTHROUGH */ 1228 case PCI_PRODUCT_ATTANSIC_AR8161: 1229 if (AR816X_REV(sc->alc_rev) == 0) 1230 sc->alc_flags |= ALC_FLAG_LINK_WAR; 1231 /* FALLTHROUGH */ 1232 case PCI_PRODUCT_ATTANSIC_AR8171: 1233 sc->alc_flags |= ALC_FLAG_AR816X_FAMILY; 1234 break; 1235 case PCI_PRODUCT_ATTANSIC_AR8162: 1236 case PCI_PRODUCT_ATTANSIC_AR8172: 1237 sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY; 1238 break; 1239 case PCI_PRODUCT_ATTANSIC_L2C_1: 1240 case PCI_PRODUCT_ATTANSIC_L2C_2: 1241 sc->alc_flags |= ALC_FLAG_APS; 1242 /* FALLTHROUGH */ 1243 case PCI_PRODUCT_ATTANSIC_L2C: 1244 sc->alc_flags |= ALC_FLAG_FASTETHER; 1245 break; 1246 case PCI_PRODUCT_ATTANSIC_L1D: 1247 case PCI_PRODUCT_ATTANSIC_L1D_1: 1248 sc->alc_flags |= ALC_FLAG_APS; 1249 /* FALLTHROUGH */ 1250 default: 1251 break; 1252 } 1253 sc->alc_flags |= ALC_FLAG_JUMBO; 1254 1255 /* 1256 * Allocate IO memory 1257 */ 1258 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR); 1259 if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 1260 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 1261 printf(": can't map mem space\n"); 1262 return; 1263 } 1264 1265 sc->alc_flags |= ALC_FLAG_MSI; 1266 if (pci_intr_map_msi(pa, &ih) != 0) { 1267 if (pci_intr_map(pa, &ih) != 0) { 1268 printf(": can't map interrupt\n"); 1269 goto fail; 1270 } 1271 sc->alc_flags &= ~ALC_FLAG_MSI; 1272 } 1273 1274 /* 1275 * Allocate IRQ 1276 */ 1277 intrstr = pci_intr_string(pc, ih); 1278 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, alc_intr, sc, 1279 sc->sc_dev.dv_xname); 1280 if (sc->sc_irq_handle == NULL) { 1281 printf(": could not establish interrupt"); 1282 if (intrstr != NULL) 1283 printf(" at %s", intrstr); 1284 printf("\n"); 1285 goto fail; 1286 } 1287 printf(": %s", intrstr); 1288 1289 alc_config_msi(sc); 1290 1291 sc->sc_dmat = pa->pa_dmat; 1292 sc->sc_pct = pa->pa_pc; 1293 sc->sc_pcitag = pa->pa_tag; 1294 1295 switch (sc->sc_product) { 1296 case PCI_PRODUCT_ATTANSIC_L1D: 1297 case PCI_PRODUCT_ATTANSIC_L1D_1: 1298 case PCI_PRODUCT_ATTANSIC_L2C_1: 1299 case PCI_PRODUCT_ATTANSIC_L2C_2: 1300 sc->alc_max_framelen = 6 * 1024; 1301 break; 1302 default: 1303 sc->alc_max_framelen = 9 * 1024; 1304 break; 1305 } 1306 1307 /* 1308 * It seems that AR813x/AR815x has silicon bug for SMB. In 1309 * addition, Atheros said that enabling SMB wouldn't improve 1310 * performance. However I think it's bad to access lots of 1311 * registers to extract MAC statistics. 1312 */ 1313 sc->alc_flags |= ALC_FLAG_SMB_BUG; 1314 /* 1315 * Don't use Tx CMB. It is known to have silicon bug. 1316 */ 1317 sc->alc_flags |= ALC_FLAG_CMB_BUG; 1318 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> 1319 MASTER_CHIP_REV_SHIFT; 1320 if (alcdebug) { 1321 printf("%s: PCI device revision : 0x%04x\n", 1322 sc->sc_dev.dv_xname, sc->alc_rev); 1323 printf("%s: Chip id/revision : 0x%04x\n", 1324 sc->sc_dev.dv_xname, sc->alc_chip_rev); 1325 printf("%s: %u Tx FIFO, %u Rx FIFO\n", sc->sc_dev.dv_xname, 1326 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, 1327 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); 1328 } 1329 1330 /* Initialize DMA parameters. */ 1331 sc->alc_dma_rd_burst = 0; 1332 sc->alc_dma_wr_burst = 0; 1333 sc->alc_rcb = DMA_CFG_RCB_64; 1334 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 1335 &base, NULL)) { 1336 sc->alc_flags |= ALC_FLAG_PCIE; 1337 sc->alc_expcap = base; 1338 burst = CSR_READ_2(sc, base + PCI_PCIE_DCSR); 1339 sc->alc_dma_rd_burst = (burst & 0x7000) >> 12; 1340 sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5; 1341 if (alcdebug) { 1342 printf("%s: Read request size : %u bytes.\n", 1343 sc->sc_dev.dv_xname, 1344 alc_dma_burst[sc->alc_dma_rd_burst]); 1345 printf("%s: TLP payload size : %u bytes.\n", 1346 sc->sc_dev.dv_xname, 1347 alc_dma_burst[sc->alc_dma_wr_burst]); 1348 } 1349 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024) 1350 sc->alc_dma_rd_burst = 3; 1351 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) 1352 sc->alc_dma_wr_burst = 3; 1353 /* 1354 * Force maximum payload size to 128 bytes for 1355 * E2200/E2400/E2500/AR8162/AR8171/AR8172. 1356 * Otherwise it triggers DMA write error. 1357 */ 1358 if ((sc->alc_flags & 1359 (ALC_FLAG_E2X00 | ALC_FLAG_AR816X_FAMILY)) != 0) 1360 sc->alc_dma_wr_burst = 0; 1361 alc_init_pcie(sc, base); 1362 } 1363 1364 /* Reset PHY. */ 1365 alc_phy_reset(sc); 1366 1367 /* Reset the ethernet controller. */ 1368 alc_stop_mac(sc); 1369 alc_reset(sc); 1370 1371 error = alc_dma_alloc(sc); 1372 if (error) 1373 goto fail; 1374 1375 /* Load station address. */ 1376 alc_get_macaddr(sc); 1377 1378 ifp = &sc->sc_arpcom.ac_if; 1379 ifp->if_softc = sc; 1380 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1381 ifp->if_ioctl = alc_ioctl; 1382 ifp->if_start = alc_start; 1383 ifp->if_watchdog = alc_watchdog; 1384 ifq_init_maxlen(&ifp->if_snd, ALC_TX_RING_CNT - 1); 1385 bcopy(sc->alc_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 1386 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1387 1388 ifp->if_capabilities = IFCAP_VLAN_MTU; 1389 1390 #ifdef ALC_CHECKSUM 1391 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 1392 IFCAP_CSUM_UDPv4; 1393 #endif 1394 1395 #if NVLAN > 0 1396 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1397 #endif 1398 1399 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 1400 1401 /* Set up MII bus. */ 1402 sc->sc_miibus.mii_ifp = ifp; 1403 sc->sc_miibus.mii_readreg = alc_miibus_readreg; 1404 sc->sc_miibus.mii_writereg = alc_miibus_writereg; 1405 sc->sc_miibus.mii_statchg = alc_miibus_statchg; 1406 1407 ifmedia_init(&sc->sc_miibus.mii_media, 0, alc_mediachange, 1408 alc_mediastatus); 1409 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 1410 MII_OFFSET_ANY, MIIF_DOPAUSE); 1411 1412 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 1413 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 1414 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 1415 0, NULL); 1416 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 1417 } else 1418 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 1419 1420 if_attach(ifp); 1421 ether_ifattach(ifp); 1422 1423 timeout_set(&sc->alc_tick_ch, alc_tick, sc); 1424 1425 return; 1426 fail: 1427 alc_dma_free(sc); 1428 if (sc->sc_irq_handle != NULL) 1429 pci_intr_disestablish(pc, sc->sc_irq_handle); 1430 if (sc->sc_mem_size) 1431 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 1432 } 1433 1434 int 1435 alc_detach(struct device *self, int flags) 1436 { 1437 struct alc_softc *sc = (struct alc_softc *)self; 1438 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1439 int s; 1440 1441 s = splnet(); 1442 alc_stop(sc); 1443 splx(s); 1444 1445 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 1446 1447 /* Delete all remaining media. */ 1448 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 1449 1450 ether_ifdetach(ifp); 1451 if_detach(ifp); 1452 alc_dma_free(sc); 1453 1454 alc_phy_down(sc); 1455 if (sc->sc_irq_handle != NULL) { 1456 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 1457 sc->sc_irq_handle = NULL; 1458 } 1459 1460 return (0); 1461 } 1462 1463 int 1464 alc_activate(struct device *self, int act) 1465 { 1466 struct alc_softc *sc = (struct alc_softc *)self; 1467 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1468 int rv = 0; 1469 1470 switch (act) { 1471 case DVACT_SUSPEND: 1472 if (ifp->if_flags & IFF_RUNNING) 1473 alc_stop(sc); 1474 rv = config_activate_children(self, act); 1475 break; 1476 case DVACT_RESUME: 1477 if (ifp->if_flags & IFF_UP) 1478 alc_init(ifp); 1479 break; 1480 default: 1481 rv = config_activate_children(self, act); 1482 break; 1483 } 1484 return (rv); 1485 } 1486 1487 int 1488 alc_dma_alloc(struct alc_softc *sc) 1489 { 1490 struct alc_txdesc *txd; 1491 struct alc_rxdesc *rxd; 1492 int nsegs, error, i; 1493 1494 /* 1495 * Create DMA stuffs for TX ring 1496 */ 1497 error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1, 1498 ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map); 1499 if (error) 1500 return (ENOBUFS); 1501 1502 /* Allocate DMA'able memory for TX ring */ 1503 error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ, 1504 ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1, 1505 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1506 if (error) { 1507 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 1508 sc->sc_dev.dv_xname); 1509 return (error); 1510 } 1511 1512 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg, 1513 nsegs, ALC_TX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_tx_ring, 1514 BUS_DMA_NOWAIT); 1515 if (error) 1516 return (ENOBUFS); 1517 1518 /* Load the DMA map for Tx ring. */ 1519 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 1520 sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 1521 if (error) { 1522 printf("%s: could not load DMA'able memory for Tx ring.\n", 1523 sc->sc_dev.dv_xname); 1524 bus_dmamem_free(sc->sc_dmat, 1525 (bus_dma_segment_t *)&sc->alc_rdata.alc_tx_ring, 1); 1526 return (error); 1527 } 1528 1529 sc->alc_rdata.alc_tx_ring_paddr = 1530 sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr; 1531 1532 /* 1533 * Create DMA stuffs for RX ring 1534 */ 1535 error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1, 1536 ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map); 1537 if (error) 1538 return (ENOBUFS); 1539 1540 /* Allocate DMA'able memory for RX ring */ 1541 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ, 1542 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1, 1543 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1544 if (error) { 1545 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 1546 sc->sc_dev.dv_xname); 1547 return (error); 1548 } 1549 1550 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg, 1551 nsegs, ALC_RX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rx_ring, 1552 BUS_DMA_NOWAIT); 1553 if (error) 1554 return (ENOBUFS); 1555 1556 /* Load the DMA map for Rx ring. */ 1557 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 1558 sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 1559 if (error) { 1560 printf("%s: could not load DMA'able memory for Rx ring.\n", 1561 sc->sc_dev.dv_xname); 1562 bus_dmamem_free(sc->sc_dmat, 1563 (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1); 1564 return (error); 1565 } 1566 1567 sc->alc_rdata.alc_rx_ring_paddr = 1568 sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr; 1569 1570 /* 1571 * Create DMA stuffs for RX return ring 1572 */ 1573 error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1, 1574 ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map); 1575 if (error) 1576 return (ENOBUFS); 1577 1578 /* Allocate DMA'able memory for RX return ring */ 1579 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ, 1580 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1, 1581 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1582 if (error) { 1583 printf("%s: could not allocate DMA'able memory for Rx " 1584 "return ring.\n", sc->sc_dev.dv_xname); 1585 return (error); 1586 } 1587 1588 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg, 1589 nsegs, ALC_RR_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rr_ring, 1590 BUS_DMA_NOWAIT); 1591 if (error) 1592 return (ENOBUFS); 1593 1594 /* Load the DMA map for Rx return ring. */ 1595 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 1596 sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK); 1597 if (error) { 1598 printf("%s: could not load DMA'able memory for Rx return ring." 1599 "\n", sc->sc_dev.dv_xname); 1600 bus_dmamem_free(sc->sc_dmat, 1601 (bus_dma_segment_t *)&sc->alc_rdata.alc_rr_ring, 1); 1602 return (error); 1603 } 1604 1605 sc->alc_rdata.alc_rr_ring_paddr = 1606 sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr; 1607 1608 /* 1609 * Create DMA stuffs for CMB block 1610 */ 1611 error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1, 1612 ALC_CMB_SZ, 0, BUS_DMA_NOWAIT, 1613 &sc->alc_cdata.alc_cmb_map); 1614 if (error) 1615 return (ENOBUFS); 1616 1617 /* Allocate DMA'able memory for CMB block */ 1618 error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ, 1619 ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1, 1620 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1621 if (error) { 1622 printf("%s: could not allocate DMA'able memory for " 1623 "CMB block\n", sc->sc_dev.dv_xname); 1624 return (error); 1625 } 1626 1627 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg, 1628 nsegs, ALC_CMB_SZ, (caddr_t *)&sc->alc_rdata.alc_cmb, 1629 BUS_DMA_NOWAIT); 1630 if (error) 1631 return (ENOBUFS); 1632 1633 /* Load the DMA map for CMB block. */ 1634 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 1635 sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL, 1636 BUS_DMA_WAITOK); 1637 if (error) { 1638 printf("%s: could not load DMA'able memory for CMB block\n", 1639 sc->sc_dev.dv_xname); 1640 bus_dmamem_free(sc->sc_dmat, 1641 (bus_dma_segment_t *)&sc->alc_rdata.alc_cmb, 1); 1642 return (error); 1643 } 1644 1645 sc->alc_rdata.alc_cmb_paddr = 1646 sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr; 1647 1648 /* 1649 * Create DMA stuffs for SMB block 1650 */ 1651 error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1, 1652 ALC_SMB_SZ, 0, BUS_DMA_NOWAIT, 1653 &sc->alc_cdata.alc_smb_map); 1654 if (error) 1655 return (ENOBUFS); 1656 1657 /* Allocate DMA'able memory for SMB block */ 1658 error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ, 1659 ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1, 1660 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1661 if (error) { 1662 printf("%s: could not allocate DMA'able memory for " 1663 "SMB block\n", sc->sc_dev.dv_xname); 1664 return (error); 1665 } 1666 1667 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg, 1668 nsegs, ALC_SMB_SZ, (caddr_t *)&sc->alc_rdata.alc_smb, 1669 BUS_DMA_NOWAIT); 1670 if (error) 1671 return (ENOBUFS); 1672 1673 /* Load the DMA map for SMB block */ 1674 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 1675 sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL, 1676 BUS_DMA_WAITOK); 1677 if (error) { 1678 printf("%s: could not load DMA'able memory for SMB block\n", 1679 sc->sc_dev.dv_xname); 1680 bus_dmamem_free(sc->sc_dmat, 1681 (bus_dma_segment_t *)&sc->alc_rdata.alc_smb, 1); 1682 return (error); 1683 } 1684 1685 sc->alc_rdata.alc_smb_paddr = 1686 sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr; 1687 1688 1689 /* Create DMA maps for Tx buffers. */ 1690 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1691 txd = &sc->alc_cdata.alc_txdesc[i]; 1692 txd->tx_m = NULL; 1693 txd->tx_dmamap = NULL; 1694 error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE, 1695 ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 1696 &txd->tx_dmamap); 1697 if (error) { 1698 printf("%s: could not create Tx dmamap.\n", 1699 sc->sc_dev.dv_xname); 1700 return (error); 1701 } 1702 } 1703 1704 /* Create DMA maps for Rx buffers. */ 1705 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 1706 BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap); 1707 if (error) { 1708 printf("%s: could not create spare Rx dmamap.\n", 1709 sc->sc_dev.dv_xname); 1710 return (error); 1711 } 1712 1713 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1714 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1715 rxd->rx_m = NULL; 1716 rxd->rx_dmamap = NULL; 1717 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1718 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 1719 if (error) { 1720 printf("%s: could not create Rx dmamap.\n", 1721 sc->sc_dev.dv_xname); 1722 return (error); 1723 } 1724 } 1725 1726 return (0); 1727 } 1728 1729 void 1730 alc_dma_free(struct alc_softc *sc) 1731 { 1732 struct alc_txdesc *txd; 1733 struct alc_rxdesc *rxd; 1734 int i; 1735 1736 /* Tx buffers */ 1737 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1738 txd = &sc->alc_cdata.alc_txdesc[i]; 1739 if (txd->tx_dmamap != NULL) { 1740 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 1741 txd->tx_dmamap = NULL; 1742 } 1743 } 1744 /* Rx buffers */ 1745 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1746 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1747 if (rxd->rx_dmamap != NULL) { 1748 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 1749 rxd->rx_dmamap = NULL; 1750 } 1751 } 1752 if (sc->alc_cdata.alc_rx_sparemap != NULL) { 1753 bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap); 1754 sc->alc_cdata.alc_rx_sparemap = NULL; 1755 } 1756 1757 /* Tx ring. */ 1758 if (sc->alc_cdata.alc_tx_ring_map != NULL) 1759 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map); 1760 if (sc->alc_cdata.alc_tx_ring_map != NULL && 1761 sc->alc_rdata.alc_tx_ring != NULL) 1762 bus_dmamem_free(sc->sc_dmat, 1763 (bus_dma_segment_t *)sc->alc_rdata.alc_tx_ring, 1); 1764 sc->alc_rdata.alc_tx_ring = NULL; 1765 sc->alc_cdata.alc_tx_ring_map = NULL; 1766 1767 /* Rx ring. */ 1768 if (sc->alc_cdata.alc_rx_ring_map != NULL) 1769 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map); 1770 if (sc->alc_cdata.alc_rx_ring_map != NULL && 1771 sc->alc_rdata.alc_rx_ring != NULL) 1772 bus_dmamem_free(sc->sc_dmat, 1773 (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1); 1774 sc->alc_rdata.alc_rx_ring = NULL; 1775 sc->alc_cdata.alc_rx_ring_map = NULL; 1776 1777 /* Rx return ring. */ 1778 if (sc->alc_cdata.alc_rr_ring_map != NULL) 1779 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map); 1780 if (sc->alc_cdata.alc_rr_ring_map != NULL && 1781 sc->alc_rdata.alc_rr_ring != NULL) 1782 bus_dmamem_free(sc->sc_dmat, 1783 (bus_dma_segment_t *)sc->alc_rdata.alc_rr_ring, 1); 1784 sc->alc_rdata.alc_rr_ring = NULL; 1785 sc->alc_cdata.alc_rr_ring_map = NULL; 1786 1787 /* CMB block */ 1788 if (sc->alc_cdata.alc_cmb_map != NULL) 1789 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map); 1790 if (sc->alc_cdata.alc_cmb_map != NULL && 1791 sc->alc_rdata.alc_cmb != NULL) 1792 bus_dmamem_free(sc->sc_dmat, 1793 (bus_dma_segment_t *)sc->alc_rdata.alc_cmb, 1); 1794 sc->alc_rdata.alc_cmb = NULL; 1795 sc->alc_cdata.alc_cmb_map = NULL; 1796 1797 /* SMB block */ 1798 if (sc->alc_cdata.alc_smb_map != NULL) 1799 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map); 1800 if (sc->alc_cdata.alc_smb_map != NULL && 1801 sc->alc_rdata.alc_smb != NULL) 1802 bus_dmamem_free(sc->sc_dmat, 1803 (bus_dma_segment_t *)sc->alc_rdata.alc_smb, 1); 1804 sc->alc_rdata.alc_smb = NULL; 1805 sc->alc_cdata.alc_smb_map = NULL; 1806 } 1807 1808 int 1809 alc_encap(struct alc_softc *sc, struct mbuf *m) 1810 { 1811 struct alc_txdesc *txd, *txd_last; 1812 struct tx_desc *desc; 1813 bus_dmamap_t map; 1814 uint32_t cflags, poff, vtag; 1815 int error, idx, prod; 1816 1817 cflags = vtag = 0; 1818 poff = 0; 1819 1820 prod = sc->alc_cdata.alc_tx_prod; 1821 txd = &sc->alc_cdata.alc_txdesc[prod]; 1822 txd_last = txd; 1823 map = txd->tx_dmamap; 1824 1825 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT); 1826 if (error != 0 && error != EFBIG) 1827 goto drop; 1828 if (error != 0) { 1829 if (m_defrag(m, M_DONTWAIT)) { 1830 error = ENOBUFS; 1831 goto drop; 1832 } 1833 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1834 BUS_DMA_NOWAIT); 1835 if (error != 0) 1836 goto drop; 1837 } 1838 1839 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1840 BUS_DMASYNC_PREWRITE); 1841 1842 desc = NULL; 1843 idx = 0; 1844 #if NVLAN > 0 1845 /* Configure VLAN hardware tag insertion. */ 1846 if (m->m_flags & M_VLANTAG) { 1847 vtag = htons(m->m_pkthdr.ether_vtag); 1848 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; 1849 cflags |= TD_INS_VLAN_TAG; 1850 } 1851 #endif 1852 /* Configure Tx checksum offload. */ 1853 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { 1854 cflags |= TD_CUSTOM_CSUM; 1855 /* Set checksum start offset. */ 1856 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & 1857 TD_PLOAD_OFFSET_MASK; 1858 } 1859 1860 for (; idx < map->dm_nsegs; idx++) { 1861 desc = &sc->alc_rdata.alc_tx_ring[prod]; 1862 desc->len = 1863 htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag); 1864 desc->flags = htole32(cflags); 1865 desc->addr = htole64(map->dm_segs[idx].ds_addr); 1866 sc->alc_cdata.alc_tx_cnt++; 1867 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 1868 } 1869 1870 /* Update producer index. */ 1871 sc->alc_cdata.alc_tx_prod = prod; 1872 1873 /* Finally set EOP on the last descriptor. */ 1874 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; 1875 desc = &sc->alc_rdata.alc_tx_ring[prod]; 1876 desc->flags |= htole32(TD_EOP); 1877 1878 /* Swap dmamap of the first and the last. */ 1879 txd = &sc->alc_cdata.alc_txdesc[prod]; 1880 map = txd_last->tx_dmamap; 1881 txd_last->tx_dmamap = txd->tx_dmamap; 1882 txd->tx_dmamap = map; 1883 txd->tx_m = m; 1884 1885 return (0); 1886 1887 drop: 1888 m_freem(m); 1889 return (error); 1890 } 1891 1892 void 1893 alc_start(struct ifnet *ifp) 1894 { 1895 struct alc_softc *sc = ifp->if_softc; 1896 struct mbuf *m; 1897 int enq = 0; 1898 1899 /* Reclaim transmitted frames. */ 1900 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) 1901 alc_txeof(sc); 1902 1903 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 1904 return; 1905 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) 1906 return; 1907 if (ifq_empty(&ifp->if_snd)) 1908 return; 1909 1910 for (;;) { 1911 if (sc->alc_cdata.alc_tx_cnt + ALC_MAXTXSEGS >= 1912 ALC_TX_RING_CNT - 3) { 1913 ifq_set_oactive(&ifp->if_snd); 1914 break; 1915 } 1916 1917 m = ifq_dequeue(&ifp->if_snd); 1918 if (m == NULL) 1919 break; 1920 1921 if (alc_encap(sc, m) != 0) { 1922 ifp->if_oerrors++; 1923 continue; 1924 } 1925 enq++; 1926 1927 #if NBPFILTER > 0 1928 /* 1929 * If there's a BPF listener, bounce a copy of this frame 1930 * to him. 1931 */ 1932 if (ifp->if_bpf != NULL) 1933 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1934 #endif 1935 } 1936 1937 if (enq > 0) { 1938 /* Sync descriptors. */ 1939 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 1940 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, 1941 BUS_DMASYNC_PREWRITE); 1942 /* Kick. Assume we're using normal Tx priority queue. */ 1943 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 1944 CSR_WRITE_2(sc, ALC_MBOX_TD_PRI0_PROD_IDX, 1945 (uint16_t)sc->alc_cdata.alc_tx_prod); 1946 else 1947 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, 1948 (sc->alc_cdata.alc_tx_prod << 1949 MBOX_TD_PROD_LO_IDX_SHIFT) & 1950 MBOX_TD_PROD_LO_IDX_MASK); 1951 /* Set a timeout in case the chip goes out to lunch. */ 1952 ifp->if_timer = ALC_TX_TIMEOUT; 1953 } 1954 } 1955 1956 void 1957 alc_watchdog(struct ifnet *ifp) 1958 { 1959 struct alc_softc *sc = ifp->if_softc; 1960 1961 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 1962 printf("%s: watchdog timeout (missed link)\n", 1963 sc->sc_dev.dv_xname); 1964 ifp->if_oerrors++; 1965 alc_init(ifp); 1966 return; 1967 } 1968 1969 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1970 ifp->if_oerrors++; 1971 alc_init(ifp); 1972 alc_start(ifp); 1973 } 1974 1975 int 1976 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1977 { 1978 struct alc_softc *sc = ifp->if_softc; 1979 struct mii_data *mii = &sc->sc_miibus; 1980 struct ifreq *ifr = (struct ifreq *)data; 1981 int s, error = 0; 1982 1983 s = splnet(); 1984 1985 switch (cmd) { 1986 case SIOCSIFADDR: 1987 ifp->if_flags |= IFF_UP; 1988 if (!(ifp->if_flags & IFF_RUNNING)) 1989 alc_init(ifp); 1990 break; 1991 1992 case SIOCSIFFLAGS: 1993 if (ifp->if_flags & IFF_UP) { 1994 if (ifp->if_flags & IFF_RUNNING) 1995 error = ENETRESET; 1996 else 1997 alc_init(ifp); 1998 } else { 1999 if (ifp->if_flags & IFF_RUNNING) 2000 alc_stop(sc); 2001 } 2002 break; 2003 2004 case SIOCSIFMEDIA: 2005 case SIOCGIFMEDIA: 2006 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2007 break; 2008 2009 default: 2010 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 2011 break; 2012 } 2013 2014 if (error == ENETRESET) { 2015 if (ifp->if_flags & IFF_RUNNING) 2016 alc_iff(sc); 2017 error = 0; 2018 } 2019 2020 splx(s); 2021 return (error); 2022 } 2023 2024 void 2025 alc_mac_config(struct alc_softc *sc) 2026 { 2027 struct mii_data *mii; 2028 uint32_t reg; 2029 2030 mii = &sc->sc_miibus; 2031 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2032 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 2033 MAC_CFG_SPEED_MASK); 2034 if ((sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D || 2035 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 || 2036 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2 || 2037 sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2038 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 2039 /* Reprogram MAC with resolved speed/duplex. */ 2040 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2041 case IFM_10_T: 2042 case IFM_100_TX: 2043 reg |= MAC_CFG_SPEED_10_100; 2044 break; 2045 case IFM_1000_T: 2046 reg |= MAC_CFG_SPEED_1000; 2047 break; 2048 } 2049 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2050 reg |= MAC_CFG_FULL_DUPLEX; 2051 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2052 reg |= MAC_CFG_TX_FC; 2053 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2054 reg |= MAC_CFG_RX_FC; 2055 } 2056 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2057 } 2058 2059 void 2060 alc_stats_clear(struct alc_softc *sc) 2061 { 2062 struct smb sb, *smb; 2063 uint32_t *reg; 2064 int i; 2065 2066 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2067 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2068 sc->alc_cdata.alc_smb_map->dm_mapsize, 2069 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2070 smb = sc->alc_rdata.alc_smb; 2071 /* Update done, clear. */ 2072 smb->updated = 0; 2073 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2074 sc->alc_cdata.alc_smb_map->dm_mapsize, 2075 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2076 } else { 2077 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2078 reg++) { 2079 CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2080 i += sizeof(uint32_t); 2081 } 2082 /* Read Tx statistics. */ 2083 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2084 reg++) { 2085 CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2086 i += sizeof(uint32_t); 2087 } 2088 } 2089 } 2090 2091 void 2092 alc_stats_update(struct alc_softc *sc) 2093 { 2094 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2095 struct alc_hw_stats *stat; 2096 struct smb sb, *smb; 2097 uint32_t *reg; 2098 int i; 2099 2100 stat = &sc->alc_stats; 2101 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2102 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2103 sc->alc_cdata.alc_smb_map->dm_mapsize, 2104 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2105 smb = sc->alc_rdata.alc_smb; 2106 if (smb->updated == 0) 2107 return; 2108 } else { 2109 smb = &sb; 2110 /* Read Rx statistics. */ 2111 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2112 reg++) { 2113 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2114 i += sizeof(uint32_t); 2115 } 2116 /* Read Tx statistics. */ 2117 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2118 reg++) { 2119 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2120 i += sizeof(uint32_t); 2121 } 2122 } 2123 2124 /* Rx stats. */ 2125 stat->rx_frames += smb->rx_frames; 2126 stat->rx_bcast_frames += smb->rx_bcast_frames; 2127 stat->rx_mcast_frames += smb->rx_mcast_frames; 2128 stat->rx_pause_frames += smb->rx_pause_frames; 2129 stat->rx_control_frames += smb->rx_control_frames; 2130 stat->rx_crcerrs += smb->rx_crcerrs; 2131 stat->rx_lenerrs += smb->rx_lenerrs; 2132 stat->rx_bytes += smb->rx_bytes; 2133 stat->rx_runts += smb->rx_runts; 2134 stat->rx_fragments += smb->rx_fragments; 2135 stat->rx_pkts_64 += smb->rx_pkts_64; 2136 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 2137 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2138 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2139 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2140 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2141 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2142 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2143 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2144 stat->rx_rrs_errs += smb->rx_rrs_errs; 2145 stat->rx_alignerrs += smb->rx_alignerrs; 2146 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2147 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2148 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2149 2150 /* Tx stats. */ 2151 stat->tx_frames += smb->tx_frames; 2152 stat->tx_bcast_frames += smb->tx_bcast_frames; 2153 stat->tx_mcast_frames += smb->tx_mcast_frames; 2154 stat->tx_pause_frames += smb->tx_pause_frames; 2155 stat->tx_excess_defer += smb->tx_excess_defer; 2156 stat->tx_control_frames += smb->tx_control_frames; 2157 stat->tx_deferred += smb->tx_deferred; 2158 stat->tx_bytes += smb->tx_bytes; 2159 stat->tx_pkts_64 += smb->tx_pkts_64; 2160 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2161 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2162 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2163 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2164 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2165 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2166 stat->tx_single_colls += smb->tx_single_colls; 2167 stat->tx_multi_colls += smb->tx_multi_colls; 2168 stat->tx_late_colls += smb->tx_late_colls; 2169 stat->tx_excess_colls += smb->tx_excess_colls; 2170 stat->tx_underrun += smb->tx_underrun; 2171 stat->tx_desc_underrun += smb->tx_desc_underrun; 2172 stat->tx_lenerrs += smb->tx_lenerrs; 2173 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2174 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2175 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2176 2177 ifp->if_collisions += smb->tx_single_colls + 2178 smb->tx_multi_colls * 2 + smb->tx_late_colls + 2179 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 2180 2181 ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls + 2182 smb->tx_underrun + smb->tx_pkts_truncated; 2183 2184 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2185 smb->rx_runts + smb->rx_pkts_truncated + 2186 smb->rx_fifo_oflows + smb->rx_rrs_errs + 2187 smb->rx_alignerrs; 2188 2189 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2190 /* Update done, clear. */ 2191 smb->updated = 0; 2192 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2193 sc->alc_cdata.alc_smb_map->dm_mapsize, 2194 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2195 } 2196 } 2197 2198 int 2199 alc_intr(void *arg) 2200 { 2201 struct alc_softc *sc = arg; 2202 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2203 uint32_t status; 2204 int claimed = 0; 2205 2206 status = CSR_READ_4(sc, ALC_INTR_STATUS); 2207 if ((status & ALC_INTRS) == 0) 2208 return (0); 2209 2210 /* Disable interrupts. */ 2211 CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT); 2212 2213 status = CSR_READ_4(sc, ALC_INTR_STATUS); 2214 if ((status & ALC_INTRS) == 0) 2215 goto back; 2216 2217 /* Acknowledge and disable interrupts. */ 2218 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); 2219 2220 if (ifp->if_flags & IFF_RUNNING) { 2221 int error = 0; 2222 2223 if (status & INTR_RX_PKT) { 2224 error = alc_rxintr(sc); 2225 if (error) { 2226 alc_init(ifp); 2227 return (0); 2228 } 2229 } 2230 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | 2231 INTR_TXQ_TO_RST)) { 2232 if (status & INTR_DMA_RD_TO_RST) 2233 printf("%s: DMA read error! -- resetting\n", 2234 sc->sc_dev.dv_xname); 2235 if (status & INTR_DMA_WR_TO_RST) 2236 printf("%s: DMA write error! -- resetting\n", 2237 sc->sc_dev.dv_xname); 2238 if (status & INTR_TXQ_TO_RST) 2239 printf("%s: TxQ reset! -- resetting\n", 2240 sc->sc_dev.dv_xname); 2241 alc_init(ifp); 2242 return (0); 2243 } 2244 2245 alc_txeof(sc); 2246 alc_start(ifp); 2247 } 2248 2249 claimed = 1; 2250 back: 2251 /* Re-enable interrupts. */ 2252 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); 2253 return (claimed); 2254 } 2255 2256 void 2257 alc_txeof(struct alc_softc *sc) 2258 { 2259 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2260 struct alc_txdesc *txd; 2261 uint32_t cons, prod; 2262 int prog; 2263 2264 if (sc->alc_cdata.alc_tx_cnt == 0) 2265 return; 2266 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 2267 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, 2268 BUS_DMASYNC_POSTWRITE); 2269 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 2270 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 2271 sc->alc_cdata.alc_cmb_map->dm_mapsize, 2272 BUS_DMASYNC_POSTREAD); 2273 prod = sc->alc_rdata.alc_cmb->cons; 2274 } else { 2275 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2276 prod = CSR_READ_2(sc, ALC_MBOX_TD_PRI0_CONS_IDX); 2277 else { 2278 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); 2279 /* Assume we're using normal Tx priority queue. */ 2280 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> 2281 MBOX_TD_CONS_LO_IDX_SHIFT; 2282 } 2283 } 2284 cons = sc->alc_cdata.alc_tx_cons; 2285 /* 2286 * Go through our Tx list and free mbufs for those 2287 * frames which have been transmitted. 2288 */ 2289 for (prog = 0; cons != prod; prog++, 2290 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { 2291 if (sc->alc_cdata.alc_tx_cnt <= 0) 2292 break; 2293 prog++; 2294 ifq_clr_oactive(&ifp->if_snd); 2295 sc->alc_cdata.alc_tx_cnt--; 2296 txd = &sc->alc_cdata.alc_txdesc[cons]; 2297 if (txd->tx_m != NULL) { 2298 /* Reclaim transmitted mbufs. */ 2299 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 2300 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2301 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 2302 m_freem(txd->tx_m); 2303 txd->tx_m = NULL; 2304 } 2305 } 2306 2307 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 2308 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 2309 sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2310 sc->alc_cdata.alc_tx_cons = cons; 2311 /* 2312 * Unarm watchdog timer only when there is no pending 2313 * frames in Tx queue. 2314 */ 2315 if (sc->alc_cdata.alc_tx_cnt == 0) 2316 ifp->if_timer = 0; 2317 } 2318 2319 int 2320 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd) 2321 { 2322 struct mbuf *m; 2323 bus_dmamap_t map; 2324 int error; 2325 2326 MGETHDR(m, M_DONTWAIT, MT_DATA); 2327 if (m == NULL) 2328 return (ENOBUFS); 2329 MCLGET(m, M_DONTWAIT); 2330 if (!(m->m_flags & M_EXT)) { 2331 m_freem(m); 2332 return (ENOBUFS); 2333 } 2334 2335 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX; 2336 2337 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2338 sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT); 2339 2340 if (error != 0) { 2341 m_freem(m); 2342 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2343 return (error); 2344 } 2345 2346 if (rxd->rx_m != NULL) { 2347 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2348 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2349 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2350 } 2351 map = rxd->rx_dmamap; 2352 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; 2353 sc->alc_cdata.alc_rx_sparemap = map; 2354 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize, 2355 BUS_DMASYNC_PREREAD); 2356 rxd->rx_m = m; 2357 rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2358 return (0); 2359 } 2360 2361 int 2362 alc_rxintr(struct alc_softc *sc) 2363 { 2364 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2365 struct rx_rdesc *rrd; 2366 uint32_t nsegs, status; 2367 int rr_cons, prog; 2368 2369 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 2370 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 2371 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2372 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 2373 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, 2374 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2375 rr_cons = sc->alc_cdata.alc_rr_cons; 2376 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) { 2377 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; 2378 status = letoh32(rrd->status); 2379 if ((status & RRD_VALID) == 0) 2380 break; 2381 nsegs = RRD_RD_CNT(letoh32(rrd->rdinfo)); 2382 if (nsegs == 0) { 2383 /* This should not happen! */ 2384 if (alcdebug) 2385 printf("%s: unexpected segment count -- " 2386 "resetting\n", sc->sc_dev.dv_xname); 2387 return (EIO); 2388 } 2389 alc_rxeof(sc, rrd); 2390 /* Clear Rx return status. */ 2391 rrd->status = 0; 2392 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); 2393 sc->alc_cdata.alc_rx_cons += nsegs; 2394 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; 2395 prog += nsegs; 2396 } 2397 2398 if (prog > 0) { 2399 /* Update the consumer index. */ 2400 sc->alc_cdata.alc_rr_cons = rr_cons; 2401 /* Sync Rx return descriptors. */ 2402 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 2403 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 2404 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2405 /* 2406 * Sync updated Rx descriptors such that controller see 2407 * modified buffer addresses. 2408 */ 2409 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 2410 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, 2411 BUS_DMASYNC_PREWRITE); 2412 /* 2413 * Let controller know availability of new Rx buffers. 2414 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors 2415 * it may be possible to update ALC_MBOX_RD0_PROD_IDX 2416 * only when Rx buffer pre-fetching is required. In 2417 * addition we already set ALC_RX_RD_FREE_THRESH to 2418 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However 2419 * it still seems that pre-fetching needs more 2420 * experimentation. 2421 */ 2422 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2423 CSR_WRITE_2(sc, ALC_MBOX_RD0_PROD_IDX, 2424 (uint16_t)sc->alc_cdata.alc_rx_cons); 2425 else 2426 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 2427 sc->alc_cdata.alc_rx_cons); 2428 } 2429 2430 return (0); 2431 } 2432 2433 /* Receive a frame. */ 2434 void 2435 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) 2436 { 2437 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2438 struct alc_rxdesc *rxd; 2439 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 2440 struct mbuf *mp, *m; 2441 uint32_t rdinfo, status; 2442 int count, nsegs, rx_cons; 2443 2444 status = letoh32(rrd->status); 2445 rdinfo = letoh32(rrd->rdinfo); 2446 rx_cons = RRD_RD_IDX(rdinfo); 2447 nsegs = RRD_RD_CNT(rdinfo); 2448 2449 sc->alc_cdata.alc_rxlen = RRD_BYTES(status); 2450 if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) { 2451 /* 2452 * We want to pass the following frames to upper 2453 * layer regardless of error status of Rx return 2454 * ring. 2455 * 2456 * o IP/TCP/UDP checksum is bad. 2457 * o frame length and protocol specific length 2458 * does not match. 2459 * 2460 * Force network stack compute checksum for 2461 * errored frames. 2462 */ 2463 if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN | 2464 RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0) 2465 return; 2466 } 2467 2468 for (count = 0; count < nsegs; count++, 2469 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { 2470 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; 2471 mp = rxd->rx_m; 2472 /* Add a new receive buffer to the ring. */ 2473 if (alc_newbuf(sc, rxd) != 0) { 2474 ifp->if_iqdrops++; 2475 /* Reuse Rx buffers. */ 2476 m_freem(sc->alc_cdata.alc_rxhead); 2477 break; 2478 } 2479 2480 /* 2481 * Assume we've received a full sized frame. 2482 * Actual size is fixed when we encounter the end of 2483 * multi-segmented frame. 2484 */ 2485 mp->m_len = sc->alc_buf_size; 2486 2487 /* Chain received mbufs. */ 2488 if (sc->alc_cdata.alc_rxhead == NULL) { 2489 sc->alc_cdata.alc_rxhead = mp; 2490 sc->alc_cdata.alc_rxtail = mp; 2491 } else { 2492 mp->m_flags &= ~M_PKTHDR; 2493 sc->alc_cdata.alc_rxprev_tail = 2494 sc->alc_cdata.alc_rxtail; 2495 sc->alc_cdata.alc_rxtail->m_next = mp; 2496 sc->alc_cdata.alc_rxtail = mp; 2497 } 2498 2499 if (count == nsegs - 1) { 2500 /* Last desc. for this frame. */ 2501 m = sc->alc_cdata.alc_rxhead; 2502 m->m_flags |= M_PKTHDR; 2503 /* 2504 * It seems that L1C/L2C controller has no way 2505 * to tell hardware to strip CRC bytes. 2506 */ 2507 m->m_pkthdr.len = 2508 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; 2509 if (nsegs > 1) { 2510 /* Set last mbuf size. */ 2511 mp->m_len = sc->alc_cdata.alc_rxlen - 2512 (nsegs - 1) * sc->alc_buf_size; 2513 /* Remove the CRC bytes in chained mbufs. */ 2514 if (mp->m_len <= ETHER_CRC_LEN) { 2515 sc->alc_cdata.alc_rxtail = 2516 sc->alc_cdata.alc_rxprev_tail; 2517 sc->alc_cdata.alc_rxtail->m_len -= 2518 (ETHER_CRC_LEN - mp->m_len); 2519 sc->alc_cdata.alc_rxtail->m_next = NULL; 2520 m_freem(mp); 2521 } else { 2522 mp->m_len -= ETHER_CRC_LEN; 2523 } 2524 } else 2525 m->m_len = m->m_pkthdr.len; 2526 /* 2527 * Due to hardware bugs, Rx checksum offloading 2528 * was intentionally disabled. 2529 */ 2530 #if NVLAN > 0 2531 if (status & RRD_VLAN_TAG) { 2532 u_int32_t vtag = RRD_VLAN(letoh32(rrd->vtag)); 2533 m->m_pkthdr.ether_vtag = ntohs(vtag); 2534 m->m_flags |= M_VLANTAG; 2535 } 2536 #endif 2537 2538 2539 ml_enqueue(&ml, m); 2540 } 2541 } 2542 if_input(ifp, &ml); 2543 2544 /* Reset mbuf chains. */ 2545 ALC_RXCHAIN_RESET(sc); 2546 } 2547 2548 void 2549 alc_tick(void *xsc) 2550 { 2551 struct alc_softc *sc = xsc; 2552 struct mii_data *mii = &sc->sc_miibus; 2553 int s; 2554 2555 s = splnet(); 2556 mii_tick(mii); 2557 alc_stats_update(sc); 2558 2559 timeout_add_sec(&sc->alc_tick_ch, 1); 2560 splx(s); 2561 } 2562 2563 void 2564 alc_osc_reset(struct alc_softc *sc) 2565 { 2566 uint32_t reg; 2567 2568 reg = CSR_READ_4(sc, ALC_MISC3); 2569 reg &= ~MISC3_25M_BY_SW; 2570 reg |= MISC3_25M_NOTO_INTNL; 2571 CSR_WRITE_4(sc, ALC_MISC3, reg); 2572 reg = CSR_READ_4(sc, ALC_MISC); 2573 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) { 2574 /* 2575 * Restore over-current protection default value. 2576 * This value could be reset by MAC reset. 2577 */ 2578 reg &= ~MISC_PSW_OCP_MASK; 2579 reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT); 2580 reg &= ~MISC_INTNLOSC_OPEN; 2581 CSR_WRITE_4(sc, ALC_MISC, reg); 2582 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 2583 reg = CSR_READ_4(sc, ALC_MISC2); 2584 reg &= ~MISC2_CALB_START; 2585 CSR_WRITE_4(sc, ALC_MISC2, reg); 2586 CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START); 2587 } else { 2588 reg &= ~MISC_INTNLOSC_OPEN; 2589 /* Disable isolate for revision A devices. */ 2590 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 2591 reg &= ~MISC_ISO_ENB; 2592 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 2593 CSR_WRITE_4(sc, ALC_MISC, reg); 2594 } 2595 DELAY(20); 2596 } 2597 2598 void 2599 alc_reset(struct alc_softc *sc) 2600 { 2601 uint32_t reg, pmcfg = 0; 2602 int i; 2603 2604 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2605 /* Reset workaround. */ 2606 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1); 2607 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 2608 (sc->alc_rev & 0x01) != 0) { 2609 /* Disable L0s/L1s before reset. */ 2610 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 2611 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | 2612 PM_CFG_ASPM_L1_ENB))!= 0) { 2613 pmcfg &= ~(PM_CFG_ASPM_L0S_ENB | 2614 PM_CFG_ASPM_L1_ENB); 2615 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 2616 } 2617 } 2618 } 2619 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 2620 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET; 2621 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2622 2623 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2624 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2625 DELAY(10); 2626 if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0) 2627 break; 2628 } 2629 if (i == 0) 2630 printf("MAC reset timeout!\n"); 2631 } 2632 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2633 DELAY(10); 2634 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) 2635 break; 2636 } 2637 if (i == 0) 2638 printf("%s: master reset timeout!\n", sc->sc_dev.dv_xname); 2639 2640 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2641 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 2642 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC | 2643 IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 2644 break; 2645 DELAY(10); 2646 } 2647 2648 if (i == 0) 2649 printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname, 2650 reg); 2651 2652 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2653 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 2654 (sc->alc_rev & 0x01) != 0) { 2655 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 2656 reg |= MASTER_CLK_SEL_DIS; 2657 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2658 /* Restore L0s/L1s config. */ 2659 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | 2660 PM_CFG_ASPM_L1_ENB)) != 0) 2661 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 2662 } 2663 alc_osc_reset(sc); 2664 reg = CSR_READ_4(sc, ALC_MISC3); 2665 reg &= ~MISC3_25M_BY_SW; 2666 reg |= MISC3_25M_NOTO_INTNL; 2667 CSR_WRITE_4(sc, ALC_MISC3, reg); 2668 reg = CSR_READ_4(sc, ALC_MISC); 2669 reg &= ~MISC_INTNLOSC_OPEN; 2670 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 2671 reg &= ~MISC_ISO_ENB; 2672 CSR_WRITE_4(sc, ALC_MISC, reg); 2673 DELAY(20); 2674 } 2675 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 2676 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 || 2677 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) 2678 CSR_WRITE_4(sc, ALC_SERDES_LOCK, 2679 CSR_READ_4(sc, ALC_SERDES_LOCK) | 2680 SERDES_MAC_CLK_SLOWDOWN | SERDES_PHY_CLK_SLOWDOWN); 2681 } 2682 2683 int 2684 alc_init(struct ifnet *ifp) 2685 { 2686 struct alc_softc *sc = ifp->if_softc; 2687 uint8_t eaddr[ETHER_ADDR_LEN]; 2688 bus_addr_t paddr; 2689 uint32_t reg, rxf_hi, rxf_lo; 2690 int error; 2691 2692 /* 2693 * Cancel any pending I/O. 2694 */ 2695 alc_stop(sc); 2696 /* 2697 * Reset the chip to a known state. 2698 */ 2699 alc_reset(sc); 2700 2701 /* Initialize Rx descriptors. */ 2702 error = alc_init_rx_ring(sc); 2703 if (error != 0) { 2704 printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname); 2705 alc_stop(sc); 2706 return (error); 2707 } 2708 alc_init_rr_ring(sc); 2709 alc_init_tx_ring(sc); 2710 alc_init_cmb(sc); 2711 alc_init_smb(sc); 2712 2713 /* Enable all clocks. */ 2714 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2715 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB | 2716 CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB | 2717 CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB | 2718 CLK_GATING_RXMAC_ENB); 2719 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) 2720 CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER, 2721 IDLE_DECISN_TIMER_DEFAULT_1MS); 2722 } else 2723 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0); 2724 2725 /* Reprogram the station address. */ 2726 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN); 2727 CSR_WRITE_4(sc, ALC_PAR0, 2728 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2729 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); 2730 /* 2731 * Clear WOL status and disable all WOL feature as WOL 2732 * would interfere Rx operation under normal environments. 2733 */ 2734 CSR_READ_4(sc, ALC_WOL_CFG); 2735 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2736 /* Set Tx descriptor base addresses. */ 2737 paddr = sc->alc_rdata.alc_tx_ring_paddr; 2738 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2739 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2740 /* We don't use high priority ring. */ 2741 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); 2742 /* Set Tx descriptor counter. */ 2743 CSR_WRITE_4(sc, ALC_TD_RING_CNT, 2744 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); 2745 /* Set Rx descriptor base addresses. */ 2746 paddr = sc->alc_rdata.alc_rx_ring_paddr; 2747 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2748 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2749 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 2750 /* We use one Rx ring. */ 2751 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); 2752 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); 2753 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); 2754 } 2755 /* Set Rx descriptor counter. */ 2756 CSR_WRITE_4(sc, ALC_RD_RING_CNT, 2757 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); 2758 2759 /* 2760 * Let hardware split jumbo frames into alc_max_buf_sized chunks. 2761 * if it do not fit the buffer size. Rx return descriptor holds 2762 * a counter that indicates how many fragments were made by the 2763 * hardware. The buffer size should be multiple of 8 bytes. 2764 * Since hardware has limit on the size of buffer size, always 2765 * use the maximum value. 2766 * For strict-alignment architectures make sure to reduce buffer 2767 * size by 8 bytes to make room for alignment fixup. 2768 */ 2769 sc->alc_buf_size = RX_BUF_SIZE_MAX; 2770 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); 2771 2772 paddr = sc->alc_rdata.alc_rr_ring_paddr; 2773 /* Set Rx return descriptor base addresses. */ 2774 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2775 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 2776 /* We use one Rx return ring. */ 2777 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); 2778 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); 2779 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); 2780 } 2781 /* Set Rx return descriptor counter. */ 2782 CSR_WRITE_4(sc, ALC_RRD_RING_CNT, 2783 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); 2784 paddr = sc->alc_rdata.alc_cmb_paddr; 2785 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 2786 paddr = sc->alc_rdata.alc_smb_paddr; 2787 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2788 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 2789 2790 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) { 2791 /* Reconfigure SRAM - Vendor magic. */ 2792 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0); 2793 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100); 2794 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000); 2795 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0); 2796 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0); 2797 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0); 2798 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000); 2799 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000); 2800 } 2801 2802 /* Tell hardware that we're ready to load DMA blocks. */ 2803 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); 2804 2805 /* Configure interrupt moderation timer. */ 2806 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 2807 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 2808 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; 2809 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) 2810 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; 2811 CSR_WRITE_4(sc, ALC_IM_TIMER, reg); 2812 /* 2813 * We don't want to automatic interrupt clear as task queue 2814 * for the interrupt should know interrupt status. 2815 */ 2816 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 2817 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); 2818 reg |= MASTER_SA_TIMER_ENB; 2819 if (ALC_USECS(sc->alc_int_rx_mod) != 0) 2820 reg |= MASTER_IM_RX_TIMER_ENB; 2821 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 && 2822 ALC_USECS(sc->alc_int_tx_mod) != 0) 2823 reg |= MASTER_IM_TX_TIMER_ENB; 2824 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2825 /* 2826 * Disable interrupt re-trigger timer. We don't want automatic 2827 * re-triggering of un-ACKed interrupts. 2828 */ 2829 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); 2830 /* Configure CMB. */ 2831 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2832 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3); 2833 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, 2834 ALC_USECS(sc->alc_int_tx_mod)); 2835 } else { 2836 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 2837 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); 2838 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); 2839 } else 2840 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); 2841 } 2842 /* 2843 * Hardware can be configured to issue SMB interrupt based 2844 * on programmed interval. Since there is a callout that is 2845 * invoked for every hz in driver we use that instead of 2846 * relying on periodic SMB interrupt. 2847 */ 2848 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); 2849 /* Clear MAC statistics. */ 2850 alc_stats_clear(sc); 2851 2852 /* 2853 * Always use maximum frame size that controller can support. 2854 * Otherwise received frames that has larger frame length 2855 * than alc(4) MTU would be silently dropped in hardware. This 2856 * would make path-MTU discovery hard as sender wouldn't get 2857 * any responses from receiver. alc(4) supports 2858 * multi-fragmented frames on Rx path so it has no issue on 2859 * assembling fragmented frames. Using maximum frame size also 2860 * removes the need to reinitialize hardware when interface 2861 * MTU configuration was changed. 2862 * 2863 * Be conservative in what you do, be liberal in what you 2864 * accept from others - RFC 793. 2865 */ 2866 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_max_framelen); 2867 2868 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 2869 /* Disable header split(?) */ 2870 CSR_WRITE_4(sc, ALC_HDS_CFG, 0); 2871 /* Configure IPG/IFG parameters. */ 2872 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, 2873 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & 2874 IPG_IFG_IPGT_MASK) | 2875 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & 2876 IPG_IFG_MIFG_MASK) | 2877 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & 2878 IPG_IFG_IPG1_MASK) | 2879 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & 2880 IPG_IFG_IPG2_MASK)); 2881 /* Set parameters for half-duplex media. */ 2882 CSR_WRITE_4(sc, ALC_HDPX_CFG, 2883 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 2884 HDPX_CFG_LCOL_MASK) | 2885 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 2886 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 2887 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 2888 HDPX_CFG_ABEBT_MASK) | 2889 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 2890 HDPX_CFG_JAMIPG_MASK)); 2891 } 2892 2893 /* 2894 * Set TSO/checksum offload threshold. For frames that is 2895 * larger than this threshold, hardware wouldn't do 2896 * TSO/checksum offloading. 2897 */ 2898 reg = (sc->alc_max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & 2899 TSO_OFFLOAD_THRESH_MASK; 2900 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2901 reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB; 2902 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg); 2903 /* Configure TxQ. */ 2904 reg = (alc_dma_burst[sc->alc_dma_rd_burst] << 2905 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; 2906 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 || 2907 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) 2908 reg >>= 1; 2909 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & 2910 TXQ_CFG_TD_BURST_MASK; 2911 reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB; 2912 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); 2913 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2914 reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT | 2915 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT | 2916 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT | 2917 HQTD_CFG_BURST_ENB); 2918 CSR_WRITE_4(sc, ALC_HQTD_CFG, reg); 2919 reg = WRR_PRI_RESTRICT_NONE; 2920 reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT | 2921 WRR_PRI_DEFAULT << WRR_PRI1_SHIFT | 2922 WRR_PRI_DEFAULT << WRR_PRI2_SHIFT | 2923 WRR_PRI_DEFAULT << WRR_PRI3_SHIFT); 2924 CSR_WRITE_4(sc, ALC_WRR, reg); 2925 } else { 2926 /* Configure Rx free descriptor pre-fetching. */ 2927 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, 2928 ((RX_RD_FREE_THRESH_HI_DEFAULT << 2929 RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) | 2930 ((RX_RD_FREE_THRESH_LO_DEFAULT << 2931 RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK)); 2932 } 2933 2934 /* 2935 * Configure flow control parameters. 2936 * XON : 80% of Rx FIFO 2937 * XOFF : 30% of Rx FIFO 2938 */ 2939 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2940 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 2941 reg &= SRAM_RX_FIFO_LEN_MASK; 2942 reg *= 8; 2943 if (reg > 8 * 1024) 2944 reg -= RX_FIFO_PAUSE_816X_RSVD; 2945 else 2946 reg -= RX_BUF_SIZE_MAX; 2947 reg /= 8; 2948 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 2949 ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 2950 RX_FIFO_PAUSE_THRESH_LO_MASK) | 2951 (((RX_FIFO_PAUSE_816X_RSVD / 8) << 2952 RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 2953 RX_FIFO_PAUSE_THRESH_HI_MASK)); 2954 } else if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C|| 2955 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C) { 2956 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 2957 rxf_hi = (reg * 8) / 10; 2958 rxf_lo = (reg * 3) / 10; 2959 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 2960 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 2961 RX_FIFO_PAUSE_THRESH_LO_MASK) | 2962 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 2963 RX_FIFO_PAUSE_THRESH_HI_MASK)); 2964 } 2965 2966 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 2967 /* Disable RSS until I understand L1C/L2C's RSS logic. */ 2968 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); 2969 CSR_WRITE_4(sc, ALC_RSS_CPU, 0); 2970 } 2971 2972 /* Configure RxQ. */ 2973 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 2974 RXQ_CFG_RD_BURST_MASK; 2975 reg |= RXQ_CFG_RSS_MODE_DIS; 2976 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2977 reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT << 2978 RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) & 2979 RXQ_CFG_816X_IDT_TBL_SIZE_MASK; 2980 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 2981 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; 2982 } else { 2983 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 && 2984 sc->sc_product != PCI_PRODUCT_ATTANSIC_L1D_1) 2985 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; 2986 } 2987 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 2988 2989 /* Configure DMA parameters. */ 2990 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; 2991 reg |= sc->alc_rcb; 2992 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 2993 reg |= DMA_CFG_CMB_ENB; 2994 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) 2995 reg |= DMA_CFG_SMB_ENB; 2996 else 2997 reg |= DMA_CFG_SMB_DIS; 2998 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << 2999 DMA_CFG_RD_BURST_SHIFT; 3000 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << 3001 DMA_CFG_WR_BURST_SHIFT; 3002 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 3003 DMA_CFG_RD_DELAY_CNT_MASK; 3004 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 3005 DMA_CFG_WR_DELAY_CNT_MASK; 3006 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3007 switch (AR816X_REV(sc->alc_rev)) { 3008 case AR816X_REV_A0: 3009 case AR816X_REV_A1: 3010 reg |= DMA_CFG_RD_CHNL_SEL_2; 3011 break; 3012 case AR816X_REV_B0: 3013 /* FALLTHROUGH */ 3014 default: 3015 reg |= DMA_CFG_RD_CHNL_SEL_4; 3016 break; 3017 } 3018 } 3019 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3020 3021 /* 3022 * Configure Tx/Rx MACs. 3023 * - Auto-padding for short frames. 3024 * - Enable CRC generation. 3025 * Actual reconfiguration of MAC for resolved speed/duplex 3026 * is followed after detection of link establishment. 3027 * AR813x/AR815x always does checksum computation regardless 3028 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to 3029 * have bug in protocol field in Rx return structure so 3030 * these controllers can't handle fragmented frames. Disable 3031 * Rx checksum offloading until there is a newer controller 3032 * that has sane implementation. 3033 */ 3034 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 3035 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 3036 MAC_CFG_PREAMBLE_MASK); 3037 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 3038 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D || 3039 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 || 3040 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) 3041 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 3042 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) 3043 reg |= MAC_CFG_SPEED_10_100; 3044 else 3045 reg |= MAC_CFG_SPEED_1000; 3046 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3047 3048 /* Set up the receive filter. */ 3049 alc_iff(sc); 3050 3051 alc_rxvlan(sc); 3052 3053 /* Acknowledge all pending interrupts and clear it. */ 3054 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); 3055 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3056 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); 3057 3058 ifp->if_flags |= IFF_RUNNING; 3059 ifq_clr_oactive(&ifp->if_snd); 3060 3061 sc->alc_flags &= ~ALC_FLAG_LINK; 3062 /* Switch to the current media. */ 3063 alc_mediachange(ifp); 3064 3065 timeout_add_sec(&sc->alc_tick_ch, 1); 3066 3067 return (0); 3068 } 3069 3070 void 3071 alc_stop(struct alc_softc *sc) 3072 { 3073 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 3074 struct alc_txdesc *txd; 3075 struct alc_rxdesc *rxd; 3076 uint32_t reg; 3077 int i; 3078 3079 /* 3080 * Mark the interface down and cancel the watchdog timer. 3081 */ 3082 ifp->if_flags &= ~IFF_RUNNING; 3083 ifq_clr_oactive(&ifp->if_snd); 3084 ifp->if_timer = 0; 3085 3086 timeout_del(&sc->alc_tick_ch); 3087 sc->alc_flags &= ~ALC_FLAG_LINK; 3088 3089 alc_stats_update(sc); 3090 3091 /* Disable interrupts. */ 3092 CSR_WRITE_4(sc, ALC_INTR_MASK, 0); 3093 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3094 3095 /* Disable DMA. */ 3096 reg = CSR_READ_4(sc, ALC_DMA_CFG); 3097 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); 3098 reg |= DMA_CFG_SMB_DIS; 3099 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3100 DELAY(1000); 3101 3102 /* Stop Rx/Tx MACs. */ 3103 alc_stop_mac(sc); 3104 3105 /* Disable interrupts which might be touched in taskq handler. */ 3106 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3107 3108 /* Disable L0s/L1s */ 3109 reg = CSR_READ_4(sc, ALC_PM_CFG); 3110 if ((reg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))!= 0) { 3111 reg &= ~(PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB); 3112 CSR_WRITE_4(sc, ALC_PM_CFG, reg); 3113 } 3114 3115 /* Reclaim Rx buffers that have been processed. */ 3116 m_freem(sc->alc_cdata.alc_rxhead); 3117 ALC_RXCHAIN_RESET(sc); 3118 /* 3119 * Free Tx/Rx mbufs still in the queues. 3120 */ 3121 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3122 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3123 if (rxd->rx_m != NULL) { 3124 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 3125 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3126 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 3127 m_freem(rxd->rx_m); 3128 rxd->rx_m = NULL; 3129 } 3130 } 3131 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3132 txd = &sc->alc_cdata.alc_txdesc[i]; 3133 if (txd->tx_m != NULL) { 3134 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 3135 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3136 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 3137 m_freem(txd->tx_m); 3138 txd->tx_m = NULL; 3139 } 3140 } 3141 } 3142 3143 void 3144 alc_stop_mac(struct alc_softc *sc) 3145 { 3146 uint32_t reg; 3147 int i; 3148 3149 alc_stop_queue(sc); 3150 /* Disable Rx/Tx MAC. */ 3151 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3152 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 3153 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 3154 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3155 } 3156 for (i = ALC_TIMEOUT; i > 0; i--) { 3157 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3158 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0) 3159 break; 3160 DELAY(10); 3161 } 3162 if (i == 0) 3163 printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n", 3164 sc->sc_dev.dv_xname, reg); 3165 } 3166 3167 void 3168 alc_start_queue(struct alc_softc *sc) 3169 { 3170 uint32_t qcfg[] = { 3171 0, 3172 RXQ_CFG_QUEUE0_ENB, 3173 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, 3174 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, 3175 RXQ_CFG_ENB 3176 }; 3177 uint32_t cfg; 3178 3179 /* Enable RxQ. */ 3180 cfg = CSR_READ_4(sc, ALC_RXQ_CFG); 3181 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3182 cfg &= ~RXQ_CFG_ENB; 3183 cfg |= qcfg[1]; 3184 } else 3185 cfg |= RXQ_CFG_QUEUE0_ENB; 3186 3187 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); 3188 /* Enable TxQ. */ 3189 cfg = CSR_READ_4(sc, ALC_TXQ_CFG); 3190 cfg |= TXQ_CFG_ENB; 3191 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); 3192 } 3193 3194 void 3195 alc_stop_queue(struct alc_softc *sc) 3196 { 3197 uint32_t reg; 3198 int i; 3199 3200 /* Disable RxQ. */ 3201 reg = CSR_READ_4(sc, ALC_RXQ_CFG); 3202 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3203 if ((reg & RXQ_CFG_ENB) != 0) { 3204 reg &= ~RXQ_CFG_ENB; 3205 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3206 } 3207 } else { 3208 if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) { 3209 reg &= ~RXQ_CFG_QUEUE0_ENB; 3210 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3211 } 3212 } 3213 /* Disable TxQ. */ 3214 reg = CSR_READ_4(sc, ALC_TXQ_CFG); 3215 if ((reg & TXQ_CFG_ENB) != 0) { 3216 reg &= ~TXQ_CFG_ENB; 3217 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); 3218 } 3219 DELAY(40); 3220 for (i = ALC_TIMEOUT; i > 0; i--) { 3221 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3222 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 3223 break; 3224 DELAY(10); 3225 } 3226 if (i == 0) 3227 printf("%s: could not disable RxQ/TxQ (0x%08x)!\n", 3228 sc->sc_dev.dv_xname, reg); 3229 } 3230 3231 void 3232 alc_init_tx_ring(struct alc_softc *sc) 3233 { 3234 struct alc_ring_data *rd; 3235 struct alc_txdesc *txd; 3236 int i; 3237 3238 sc->alc_cdata.alc_tx_prod = 0; 3239 sc->alc_cdata.alc_tx_cons = 0; 3240 sc->alc_cdata.alc_tx_cnt = 0; 3241 3242 rd = &sc->alc_rdata; 3243 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ); 3244 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3245 txd = &sc->alc_cdata.alc_txdesc[i]; 3246 txd->tx_m = NULL; 3247 } 3248 3249 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 3250 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 3251 } 3252 3253 int 3254 alc_init_rx_ring(struct alc_softc *sc) 3255 { 3256 struct alc_ring_data *rd; 3257 struct alc_rxdesc *rxd; 3258 int i; 3259 3260 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; 3261 rd = &sc->alc_rdata; 3262 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ); 3263 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3264 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3265 rxd->rx_m = NULL; 3266 rxd->rx_desc = &rd->alc_rx_ring[i]; 3267 if (alc_newbuf(sc, rxd) != 0) 3268 return (ENOBUFS); 3269 } 3270 3271 /* 3272 * Since controller does not update Rx descriptors, driver 3273 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE 3274 * is enough to ensure coherence. 3275 */ 3276 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 3277 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 3278 /* Let controller know availability of new Rx buffers. */ 3279 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); 3280 3281 return (0); 3282 } 3283 3284 void 3285 alc_init_rr_ring(struct alc_softc *sc) 3286 { 3287 struct alc_ring_data *rd; 3288 3289 sc->alc_cdata.alc_rr_cons = 0; 3290 ALC_RXCHAIN_RESET(sc); 3291 3292 rd = &sc->alc_rdata; 3293 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ); 3294 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 3295 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 3296 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3297 } 3298 3299 void 3300 alc_init_cmb(struct alc_softc *sc) 3301 { 3302 struct alc_ring_data *rd; 3303 3304 rd = &sc->alc_rdata; 3305 bzero(rd->alc_cmb, ALC_CMB_SZ); 3306 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 3307 sc->alc_cdata.alc_cmb_map->dm_mapsize, 3308 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3309 } 3310 3311 void 3312 alc_init_smb(struct alc_softc *sc) 3313 { 3314 struct alc_ring_data *rd; 3315 3316 rd = &sc->alc_rdata; 3317 bzero(rd->alc_smb, ALC_SMB_SZ); 3318 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 3319 sc->alc_cdata.alc_smb_map->dm_mapsize, 3320 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3321 } 3322 3323 void 3324 alc_rxvlan(struct alc_softc *sc) 3325 { 3326 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 3327 uint32_t reg; 3328 3329 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3330 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 3331 reg |= MAC_CFG_VLAN_TAG_STRIP; 3332 else 3333 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 3334 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3335 } 3336 3337 void 3338 alc_iff(struct alc_softc *sc) 3339 { 3340 struct arpcom *ac = &sc->sc_arpcom; 3341 struct ifnet *ifp = &ac->ac_if; 3342 struct ether_multi *enm; 3343 struct ether_multistep step; 3344 uint32_t crc; 3345 uint32_t mchash[2]; 3346 uint32_t rxcfg; 3347 3348 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); 3349 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 3350 ifp->if_flags &= ~IFF_ALLMULTI; 3351 3352 /* 3353 * Always accept broadcast frames. 3354 */ 3355 rxcfg |= MAC_CFG_BCAST; 3356 3357 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 3358 ifp->if_flags |= IFF_ALLMULTI; 3359 if (ifp->if_flags & IFF_PROMISC) 3360 rxcfg |= MAC_CFG_PROMISC; 3361 else 3362 rxcfg |= MAC_CFG_ALLMULTI; 3363 mchash[0] = mchash[1] = 0xFFFFFFFF; 3364 } else { 3365 /* Program new filter. */ 3366 bzero(mchash, sizeof(mchash)); 3367 3368 ETHER_FIRST_MULTI(step, ac, enm); 3369 while (enm != NULL) { 3370 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 3371 3372 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 3373 3374 ETHER_NEXT_MULTI(step, enm); 3375 } 3376 } 3377 3378 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); 3379 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); 3380 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); 3381 } 3382