1 /*- 2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/alc/if_alc.c,v 1.6 2009/09/29 23:03:16 yongari Exp $ 28 */ 29 30 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */ 31 32 #include <sys/param.h> 33 #include <sys/bitops.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/rman.h> 41 #include <sys/serialize.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <sys/sysctl.h> 45 #include <sys/in_cksum.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/bpf.h> 50 #include <net/if_arp.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 #include <net/ifq_var.h> 54 #include <net/vlan/if_vlan_var.h> 55 #include <net/vlan/if_vlan_ether.h> 56 57 #include <netinet/ip.h> 58 #include <netinet/tcp.h> 59 60 #include <dev/netif/mii_layer/mii.h> 61 #include <dev/netif/mii_layer/miivar.h> 62 63 #include <bus/pci/pcireg.h> 64 #include <bus/pci/pcivar.h> 65 #include "pcidevs.h" 66 67 #include <dev/netif/alc/if_alcreg.h> 68 #include <dev/netif/alc/if_alcvar.h> 69 70 /* "device miibus" required. See GENERIC if you get errors here. */ 71 #include "miibus_if.h" 72 73 #undef ALC_USE_CUSTOM_CSUM 74 #ifdef ALC_USE_CUSTOM_CSUM 75 #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 76 #else 77 #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 78 #endif 79 80 #define ALC_LOCK(sc) 81 #define ALC_UNLOCK(sc) 82 #define ALC_LOCK_ASSERT(sc) 83 84 #define PCIER_LINK_CAP PCIER_LINKCAP 85 #define PCIEM_LINK_CAP_ASPM PCIEM_LNKCAP_ASPM_MASK 86 #define PCIER_LINK_CTL PCIER_LINKCTRL 87 #define PCIEM_LINK_CTL_RCB PCIEM_LNKCTL_RCB 88 #define PCIEM_LINK_CTL_ASPMC PCIEM_LNKCTL_ASPM_MASK 89 #define PCIEM_LINK_CTL_ASPMC_L0S PCIEM_LNKCTL_ASPM_L0S 90 #define PCIEM_LINK_CTL_ASPMC_L1 PCIEM_LNKCTL_ASPM_L1 91 #define PCIEM_LINK_CTL_EXTENDED_SYNC PCIEM_LNKCTL_EXTENDED_SYNC 92 #define PCIER_DEVICE_CTL PCIER_DEVCTRL 93 #define PCIEM_CTL_MAX_READ_REQUEST PCIEM_DEVCTL_MAX_READRQ_MASK 94 #define PCIEM_CTL_MAX_PAYLOAD PCIEM_DEVCTL_MAX_PAYLOAD_MASK 95 96 /* Tunables. */ 97 static int alc_msi_enable = 1; 98 TUNABLE_INT("hw.alc.msi.enable", &alc_msi_enable); 99 100 /* 101 * Devices supported by this driver. 102 */ 103 104 static struct alc_ident alc_ident_table[] = { 105 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024, 106 "Atheros AR8131 PCIe Gigabit Ethernet" }, 107 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024, 108 "Atheros AR8132 PCIe Fast Ethernet" }, 109 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024, 110 "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" }, 111 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024, 112 "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" }, 113 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024, 114 "Atheros AR8152 v1.1 PCIe Fast Ethernet" }, 115 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024, 116 "Atheros AR8152 v2.0 PCIe Fast Ethernet" }, 117 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8161, 9 * 1024, 118 "Atheros AR8161 PCIe Gigabit Ethernet" }, 119 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8162, 9 * 1024, 120 "Atheros AR8162 PCIe Fast Ethernet" }, 121 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8171, 9 * 1024, 122 "Atheros AR8171 PCIe Gigabit Ethernet" }, 123 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8172, 9 * 1024, 124 "Atheros AR8172 PCIe Fast Ethernet" }, 125 { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2200, 9 * 1024, 126 "Killer E2200 Gigabit Ethernet" }, 127 { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2400, 9 * 1024, 128 "Killer E2400 Gigabit Ethernet" }, 129 { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2500, 9 * 1024, 130 "Killer E2500 Gigabit Ethernet" }, 131 { 0, 0, 0, NULL} 132 }; 133 134 static int alc_attach(device_t); 135 static int alc_probe(device_t); 136 static int alc_detach(device_t); 137 static int alc_shutdown(device_t); 138 static int alc_suspend(device_t); 139 static int alc_resume(device_t); 140 static int alc_miibus_readreg(device_t, int, int); 141 static void alc_miibus_statchg(device_t); 142 static int alc_miibus_writereg(device_t, int, int, int); 143 static uint32_t alc_miidbg_readreg(struct alc_softc *, int); 144 static uint32_t alc_miidbg_writereg(struct alc_softc *, int, int); 145 static uint32_t alc_miiext_readreg(struct alc_softc *, int, int); 146 static uint32_t alc_miiext_writereg(struct alc_softc *, int, int, int); 147 static void alc_init(void *); 148 static void alc_start(struct ifnet *, struct ifaltq_subque *); 149 static void alc_watchdog(struct alc_softc *); 150 static int alc_mediachange(struct ifnet *); 151 static int alc_mediachange_locked(struct alc_softc *); 152 static void alc_mediastatus(struct ifnet *, struct ifmediareq *); 153 static int alc_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 154 155 static void alc_aspm(struct alc_softc *, int, int); 156 static void alc_aspm_813x(struct alc_softc *, int); 157 static void alc_aspm_816x(struct alc_softc *, int); 158 #ifdef foo 159 static int alc_check_boundary(struct alc_softc *); 160 #endif 161 static void alc_config_msi(struct alc_softc *); 162 static void alc_disable_l0s_l1(struct alc_softc *); 163 static int alc_dma_alloc(struct alc_softc *); 164 static void alc_dma_free(struct alc_softc *); 165 static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int); 166 static void alc_dsp_fixup(struct alc_softc *, int); 167 static int alc_encap(struct alc_softc *, struct mbuf **); 168 static struct alc_ident *alc_find_ident(device_t); 169 static void alc_get_macaddr(struct alc_softc *); 170 static void alc_get_macaddr_813x(struct alc_softc *); 171 static void alc_get_macaddr_816x(struct alc_softc *); 172 static void alc_get_macaddr_par(struct alc_softc *); 173 static void alc_init_cmb(struct alc_softc *); 174 static void alc_init_rr_ring(struct alc_softc *); 175 static int alc_init_rx_ring(struct alc_softc *); 176 static void alc_init_smb(struct alc_softc *); 177 static void alc_init_tx_ring(struct alc_softc *); 178 static void alc_intr(void *); 179 static void alc_mac_config(struct alc_softc *); 180 static uint32_t alc_mii_readreg_813x(struct alc_softc *, int, int); 181 static uint32_t alc_mii_readreg_816x(struct alc_softc *, int, int); 182 static uint32_t alc_mii_writereg_813x(struct alc_softc *, int, int, int); 183 static uint32_t alc_mii_writereg_816x(struct alc_softc *, int, int, int); 184 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, boolean_t); 185 static void alc_osc_reset(struct alc_softc *); 186 static void alc_phy_down(struct alc_softc *); 187 static void alc_phy_reset(struct alc_softc *); 188 static void alc_phy_reset_813x(struct alc_softc *); 189 static void alc_phy_reset_816x(struct alc_softc *); 190 static void alc_reset(struct alc_softc *); 191 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *); 192 static int alc_rxintr(struct alc_softc *); 193 static void alc_rxfilter(struct alc_softc *); 194 static void alc_rxvlan(struct alc_softc *); 195 #if 0 196 static void alc_setlinkspeed(struct alc_softc *); 197 /* XXX: WOL */ 198 static void alc_setwol(struct alc_softc *); 199 static void alc_setwol_813x(struct alc_softc *); 200 static void alc_setwol_816x(struct alc_softc *); 201 #endif 202 static void alc_start_queue(struct alc_softc *); 203 static void alc_stats_clear(struct alc_softc *); 204 static void alc_stats_update(struct alc_softc *); 205 static void alc_stop(struct alc_softc *); 206 static void alc_stop_mac(struct alc_softc *); 207 static void alc_stop_queue(struct alc_softc *); 208 static void alc_sysctl_node(struct alc_softc *); 209 static void alc_tick(void *); 210 static void alc_txeof(struct alc_softc *); 211 static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS); 212 static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS); 213 214 static device_method_t alc_methods[] = { 215 /* Device interface. */ 216 DEVMETHOD(device_probe, alc_probe), 217 DEVMETHOD(device_attach, alc_attach), 218 DEVMETHOD(device_detach, alc_detach), 219 DEVMETHOD(device_shutdown, alc_shutdown), 220 DEVMETHOD(device_suspend, alc_suspend), 221 DEVMETHOD(device_resume, alc_resume), 222 223 /* MII interface. */ 224 DEVMETHOD(miibus_readreg, alc_miibus_readreg), 225 DEVMETHOD(miibus_writereg, alc_miibus_writereg), 226 DEVMETHOD(miibus_statchg, alc_miibus_statchg), 227 228 { NULL, NULL } 229 }; 230 231 static DEFINE_CLASS_0(alc, alc_driver, alc_methods, sizeof(struct alc_softc)); 232 static devclass_t alc_devclass; 233 234 DECLARE_DUMMY_MODULE(if_alc); 235 DRIVER_MODULE(if_alc, pci, alc_driver, alc_devclass, NULL, NULL); 236 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, NULL, NULL); 237 238 static const uint32_t alc_dma_burst[] = 239 { 128, 256, 512, 1024, 2048, 4096, 0, 0 }; 240 241 static int 242 alc_miibus_readreg(device_t dev, int phy, int reg) 243 { 244 struct alc_softc *sc; 245 int v; 246 247 sc = device_get_softc(dev); 248 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 249 v = alc_mii_readreg_816x(sc, phy, reg); 250 else 251 v = alc_mii_readreg_813x(sc, phy, reg); 252 return (v); 253 } 254 255 static uint32_t 256 alc_mii_readreg_813x(struct alc_softc *sc, int phy, int reg) 257 { 258 uint32_t v; 259 int i; 260 261 /* 262 * For AR8132 fast ethernet controller, do not report 1000baseT 263 * capability to mii(4). Even though AR8132 uses the same 264 * model/revision number of F1 gigabit PHY, the PHY has no 265 * ability to establish 1000baseT link. 266 */ 267 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && 268 reg == MII_EXTSR) 269 return (0); 270 271 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 272 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 273 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 274 DELAY(5); 275 v = CSR_READ_4(sc, ALC_MDIO); 276 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 277 break; 278 } 279 280 if (i == 0) { 281 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); 282 return (0); 283 } 284 285 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 286 } 287 288 static uint32_t 289 alc_mii_readreg_816x(struct alc_softc *sc, int phy, int reg) 290 { 291 uint32_t clk, v; 292 int i; 293 294 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 295 clk = MDIO_CLK_25_128; 296 else 297 clk = MDIO_CLK_25_4; 298 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 299 MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg)); 300 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 301 DELAY(5); 302 v = CSR_READ_4(sc, ALC_MDIO); 303 if ((v & MDIO_OP_BUSY) == 0) 304 break; 305 } 306 307 if (i == 0) { 308 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); 309 return (0); 310 } 311 312 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 313 } 314 315 static int 316 alc_miibus_writereg(device_t dev, int phy, int reg, int val) 317 { 318 struct alc_softc *sc; 319 int v; 320 321 sc = device_get_softc(dev); 322 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 323 v = alc_mii_writereg_816x(sc, phy, reg, val); 324 else 325 v = alc_mii_writereg_813x(sc, phy, reg, val); 326 return (v); 327 } 328 329 static uint32_t 330 alc_mii_writereg_813x(struct alc_softc *sc, int phy, int reg, int val) 331 { 332 uint32_t v; 333 int i; 334 335 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 336 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 337 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 338 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 339 DELAY(5); 340 v = CSR_READ_4(sc, ALC_MDIO); 341 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 342 break; 343 } 344 345 if (i == 0) 346 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); 347 348 return (0); 349 } 350 351 static uint32_t 352 alc_mii_writereg_816x(struct alc_softc *sc, int phy, int reg, int val) 353 { 354 uint32_t clk, v; 355 int i; 356 357 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 358 clk = MDIO_CLK_25_128; 359 else 360 clk = MDIO_CLK_25_4; 361 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 362 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) | 363 MDIO_SUP_PREAMBLE | clk); 364 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 365 DELAY(5); 366 v = CSR_READ_4(sc, ALC_MDIO); 367 if ((v & MDIO_OP_BUSY) == 0) 368 break; 369 } 370 371 if (i == 0) 372 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); 373 374 return (0); 375 } 376 377 static void 378 alc_miibus_statchg(device_t dev) 379 { 380 struct alc_softc *sc; 381 struct mii_data *mii; 382 struct ifnet *ifp; 383 uint32_t reg; 384 385 sc = device_get_softc(dev); 386 387 mii = device_get_softc(sc->alc_miibus); 388 ifp = sc->alc_ifp; 389 if (mii == NULL || ifp == NULL || 390 (ifp->if_flags & IFF_RUNNING) == 0) 391 return; 392 393 sc->alc_flags &= ~ALC_FLAG_LINK; 394 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 395 (IFM_ACTIVE | IFM_AVALID)) { 396 switch (IFM_SUBTYPE(mii->mii_media_active)) { 397 case IFM_10_T: 398 case IFM_100_TX: 399 sc->alc_flags |= ALC_FLAG_LINK; 400 break; 401 case IFM_1000_T: 402 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 403 sc->alc_flags |= ALC_FLAG_LINK; 404 break; 405 default: 406 break; 407 } 408 } 409 /* Stop Rx/Tx MACs. */ 410 alc_stop_mac(sc); 411 412 /* Program MACs with resolved speed/duplex/flow-control. */ 413 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 414 alc_start_queue(sc); 415 alc_mac_config(sc); 416 /* Re-enable Tx/Rx MACs. */ 417 reg = CSR_READ_4(sc, ALC_MAC_CFG); 418 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 419 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 420 } 421 alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active)); 422 alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active)); 423 } 424 425 static uint32_t 426 alc_miidbg_readreg(struct alc_softc *sc, int reg) 427 { 428 429 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 430 reg); 431 return (alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 432 ALC_MII_DBG_DATA)); 433 } 434 435 static uint32_t 436 alc_miidbg_writereg(struct alc_softc *sc, int reg, int val) 437 { 438 439 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 440 reg); 441 return (alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 442 ALC_MII_DBG_DATA, val)); 443 } 444 445 static uint32_t 446 alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg) 447 { 448 uint32_t clk, v; 449 int i; 450 451 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 452 EXT_MDIO_DEVADDR(devaddr)); 453 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 454 clk = MDIO_CLK_25_128; 455 else 456 clk = MDIO_CLK_25_4; 457 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 458 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 459 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 460 DELAY(5); 461 v = CSR_READ_4(sc, ALC_MDIO); 462 if ((v & MDIO_OP_BUSY) == 0) 463 break; 464 } 465 466 if (i == 0) { 467 device_printf(sc->alc_dev, "phy ext read timeout : %d, %d\n", 468 devaddr, reg); 469 return (0); 470 } 471 472 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 473 } 474 475 static uint32_t 476 alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, int val) 477 { 478 uint32_t clk, v; 479 int i; 480 481 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 482 EXT_MDIO_DEVADDR(devaddr)); 483 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 484 clk = MDIO_CLK_25_128; 485 else 486 clk = MDIO_CLK_25_4; 487 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 488 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | 489 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 490 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 491 DELAY(5); 492 v = CSR_READ_4(sc, ALC_MDIO); 493 if ((v & MDIO_OP_BUSY) == 0) 494 break; 495 } 496 497 if (i == 0) 498 device_printf(sc->alc_dev, "phy ext write timeout : %d, %d\n", 499 devaddr, reg); 500 501 return (0); 502 } 503 504 static void 505 alc_dsp_fixup(struct alc_softc *sc, int media) 506 { 507 uint16_t agc, len, val; 508 509 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 510 return; 511 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0) 512 return; 513 514 /* 515 * Vendor PHY magic. 516 * 1000BT/AZ, wrong cable length 517 */ 518 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 519 len = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6); 520 len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) & 521 EXT_CLDCTL6_CAB_LEN_MASK; 522 agc = alc_miidbg_readreg(sc, MII_DBG_AGC); 523 agc = (agc >> DBG_AGC_2_VGA_SHIFT) & DBG_AGC_2_VGA_MASK; 524 if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G && 525 agc > DBG_AGC_LONG1G_LIMT) || 526 (media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT && 527 agc > DBG_AGC_LONG1G_LIMT)) { 528 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 529 DBG_AZ_ANADECT_LONG); 530 val = alc_miiext_readreg(sc, MII_EXT_ANEG, 531 MII_EXT_ANEG_AFE); 532 val |= ANEG_AFEE_10BT_100M_TH; 533 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, 534 val); 535 } else { 536 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 537 DBG_AZ_ANADECT_DEFAULT); 538 val = alc_miiext_readreg(sc, MII_EXT_ANEG, 539 MII_EXT_ANEG_AFE); 540 val &= ~ANEG_AFEE_10BT_100M_TH; 541 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, 542 val); 543 } 544 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 545 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 546 if (media == IFM_1000_T) { 547 /* 548 * Giga link threshold, raise the tolerance of 549 * noise 50%. 550 */ 551 val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB); 552 val &= ~DBG_MSE20DB_TH_MASK; 553 val |= (DBG_MSE20DB_TH_HI << 554 DBG_MSE20DB_TH_SHIFT); 555 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 556 } else if (media == IFM_100_TX) 557 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 558 DBG_MSE16DB_UP); 559 } 560 } else { 561 val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE); 562 val &= ~ANEG_AFEE_10BT_100M_TH; 563 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val); 564 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 565 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 566 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 567 DBG_MSE16DB_DOWN); 568 val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB); 569 val &= ~DBG_MSE20DB_TH_MASK; 570 val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT); 571 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 572 } 573 } 574 } 575 576 static void 577 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 578 { 579 struct alc_softc *sc; 580 struct mii_data *mii; 581 582 sc = ifp->if_softc; 583 if ((ifp->if_flags & IFF_UP) == 0) { 584 return; 585 } 586 mii = device_get_softc(sc->alc_miibus); 587 588 mii_pollstat(mii); 589 ifmr->ifm_status = mii->mii_media_status; 590 ifmr->ifm_active = mii->mii_media_active; 591 } 592 593 static int 594 alc_mediachange(struct ifnet *ifp) 595 { 596 struct alc_softc *sc; 597 int error; 598 599 sc = ifp->if_softc; 600 ALC_LOCK(sc); 601 error = alc_mediachange_locked(sc); 602 ALC_UNLOCK(sc); 603 604 return (error); 605 } 606 607 static int 608 alc_mediachange_locked(struct alc_softc *sc) 609 { 610 struct mii_data *mii; 611 struct mii_softc *miisc; 612 int error; 613 614 ALC_LOCK_ASSERT(sc); 615 616 mii = device_get_softc(sc->alc_miibus); 617 if (mii->mii_instance != 0) { 618 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 619 mii_phy_reset(miisc); 620 } 621 error = mii_mediachg(mii); 622 623 return (error); 624 } 625 626 static struct alc_ident * 627 alc_find_ident(device_t dev) 628 { 629 struct alc_ident *ident; 630 uint16_t vendor, devid; 631 632 vendor = pci_get_vendor(dev); 633 devid = pci_get_device(dev); 634 for (ident = alc_ident_table; ident->name != NULL; ident++) { 635 if (vendor == ident->vendorid && devid == ident->deviceid) 636 return (ident); 637 } 638 639 return (NULL); 640 } 641 642 static int 643 alc_probe(device_t dev) 644 { 645 struct alc_ident *ident; 646 647 ident = alc_find_ident(dev); 648 if (ident != NULL) { 649 device_set_desc(dev, ident->name); 650 return (BUS_PROBE_DEFAULT); 651 } 652 653 return (ENXIO); 654 } 655 656 static void 657 alc_get_macaddr(struct alc_softc *sc) 658 { 659 660 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 661 alc_get_macaddr_816x(sc); 662 else 663 alc_get_macaddr_813x(sc); 664 } 665 666 static void 667 alc_get_macaddr_813x(struct alc_softc *sc) 668 { 669 uint32_t opt; 670 uint16_t val; 671 int eeprom, i; 672 673 eeprom = 0; 674 opt = CSR_READ_4(sc, ALC_OPT_CFG); 675 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 && 676 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { 677 /* 678 * EEPROM found, let TWSI reload EEPROM configuration. 679 * This will set ethernet address of controller. 680 */ 681 eeprom++; 682 switch (sc->alc_ident->deviceid) { 683 case DEVICEID_ATHEROS_AR8131: 684 case DEVICEID_ATHEROS_AR8132: 685 if ((opt & OPT_CFG_CLK_ENB) == 0) { 686 opt |= OPT_CFG_CLK_ENB; 687 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 688 CSR_READ_4(sc, ALC_OPT_CFG); 689 DELAY(1000); 690 } 691 break; 692 case DEVICEID_ATHEROS_AR8151: 693 case DEVICEID_ATHEROS_AR8151_V2: 694 case DEVICEID_ATHEROS_AR8152_B: 695 case DEVICEID_ATHEROS_AR8152_B2: 696 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 697 ALC_MII_DBG_ADDR, 0x00); 698 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 699 ALC_MII_DBG_DATA); 700 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 701 ALC_MII_DBG_DATA, val & 0xFF7F); 702 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 703 ALC_MII_DBG_ADDR, 0x3B); 704 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 705 ALC_MII_DBG_DATA); 706 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 707 ALC_MII_DBG_DATA, val | 0x0008); 708 DELAY(20); 709 break; 710 } 711 712 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 713 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 714 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 715 CSR_READ_4(sc, ALC_WOL_CFG); 716 717 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | 718 TWSI_CFG_SW_LD_START); 719 for (i = 100; i > 0; i--) { 720 DELAY(1000); 721 if ((CSR_READ_4(sc, ALC_TWSI_CFG) & 722 TWSI_CFG_SW_LD_START) == 0) 723 break; 724 } 725 if (i == 0) 726 device_printf(sc->alc_dev, 727 "reloading EEPROM timeout!\n"); 728 } else { 729 if (bootverbose) 730 device_printf(sc->alc_dev, "EEPROM not found!\n"); 731 } 732 if (eeprom != 0) { 733 switch (sc->alc_ident->deviceid) { 734 case DEVICEID_ATHEROS_AR8131: 735 case DEVICEID_ATHEROS_AR8132: 736 if ((opt & OPT_CFG_CLK_ENB) != 0) { 737 opt &= ~OPT_CFG_CLK_ENB; 738 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 739 CSR_READ_4(sc, ALC_OPT_CFG); 740 DELAY(1000); 741 } 742 break; 743 case DEVICEID_ATHEROS_AR8151: 744 case DEVICEID_ATHEROS_AR8151_V2: 745 case DEVICEID_ATHEROS_AR8152_B: 746 case DEVICEID_ATHEROS_AR8152_B2: 747 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 748 ALC_MII_DBG_ADDR, 0x00); 749 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 750 ALC_MII_DBG_DATA); 751 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 752 ALC_MII_DBG_DATA, val | 0x0080); 753 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 754 ALC_MII_DBG_ADDR, 0x3B); 755 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 756 ALC_MII_DBG_DATA); 757 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 758 ALC_MII_DBG_DATA, val & 0xFFF7); 759 DELAY(20); 760 break; 761 } 762 } 763 764 alc_get_macaddr_par(sc); 765 } 766 767 static void 768 alc_get_macaddr_816x(struct alc_softc *sc) 769 { 770 uint32_t reg; 771 int i, reloaded; 772 773 reloaded = 0; 774 /* Try to reload station address via TWSI. */ 775 for (i = 100; i > 0; i--) { 776 reg = CSR_READ_4(sc, ALC_SLD); 777 if ((reg & (SLD_PROGRESS | SLD_START)) == 0) 778 break; 779 DELAY(1000); 780 } 781 if (i != 0) { 782 CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START); 783 for (i = 100; i > 0; i--) { 784 DELAY(1000); 785 reg = CSR_READ_4(sc, ALC_SLD); 786 if ((reg & SLD_START) == 0) 787 break; 788 } 789 if (i != 0) 790 reloaded++; 791 else if (bootverbose) 792 device_printf(sc->alc_dev, 793 "reloading station address via TWSI timed out!\n"); 794 } 795 796 /* Try to reload station address from EEPROM or FLASH. */ 797 if (reloaded == 0) { 798 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 799 if ((reg & (EEPROM_LD_EEPROM_EXIST | 800 EEPROM_LD_FLASH_EXIST)) != 0) { 801 for (i = 100; i > 0; i--) { 802 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 803 if ((reg & (EEPROM_LD_PROGRESS | 804 EEPROM_LD_START)) == 0) 805 break; 806 DELAY(1000); 807 } 808 if (i != 0) { 809 CSR_WRITE_4(sc, ALC_EEPROM_LD, reg | 810 EEPROM_LD_START); 811 for (i = 100; i > 0; i--) { 812 DELAY(1000); 813 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 814 if ((reg & EEPROM_LD_START) == 0) 815 break; 816 } 817 } else if (bootverbose) 818 device_printf(sc->alc_dev, 819 "reloading EEPROM/FLASH timed out!\n"); 820 } 821 } 822 823 alc_get_macaddr_par(sc); 824 } 825 826 static void 827 alc_get_macaddr_par(struct alc_softc *sc) 828 { 829 uint32_t ea[2]; 830 831 ea[0] = CSR_READ_4(sc, ALC_PAR0); 832 ea[1] = CSR_READ_4(sc, ALC_PAR1); 833 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; 834 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; 835 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; 836 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; 837 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; 838 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; 839 } 840 841 static void 842 alc_disable_l0s_l1(struct alc_softc *sc) 843 { 844 uint32_t pmcfg; 845 846 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 847 /* Another magic from vendor. */ 848 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 849 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | 850 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 851 PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1); 852 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | 853 PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB; 854 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 855 } 856 } 857 858 static void 859 alc_phy_reset(struct alc_softc *sc) 860 { 861 862 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 863 alc_phy_reset_816x(sc); 864 else 865 alc_phy_reset_813x(sc); 866 } 867 868 static void 869 alc_phy_reset_813x(struct alc_softc *sc) 870 { 871 uint16_t data; 872 873 /* Reset magic from Linux. */ 874 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET); 875 CSR_READ_2(sc, ALC_GPHY_CFG); 876 DELAY(10 * 1000); 877 878 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 879 GPHY_CFG_SEL_ANA_RESET); 880 CSR_READ_2(sc, ALC_GPHY_CFG); 881 DELAY(10 * 1000); 882 883 /* DSP fixup, Vendor magic. */ 884 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 885 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 886 ALC_MII_DBG_ADDR, 0x000A); 887 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 888 ALC_MII_DBG_DATA); 889 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 890 ALC_MII_DBG_DATA, data & 0xDFFF); 891 } 892 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 893 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 894 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 895 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 896 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 897 ALC_MII_DBG_ADDR, 0x003B); 898 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 899 ALC_MII_DBG_DATA); 900 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 901 ALC_MII_DBG_DATA, data & 0xFFF7); 902 DELAY(20 * 1000); 903 } 904 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) { 905 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 906 ALC_MII_DBG_ADDR, 0x0029); 907 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 908 ALC_MII_DBG_DATA, 0x929D); 909 } 910 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 911 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 || 912 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 913 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 914 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 915 ALC_MII_DBG_ADDR, 0x0029); 916 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 917 ALC_MII_DBG_DATA, 0xB6DD); 918 } 919 920 /* Load DSP codes, vendor magic. */ 921 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | 922 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); 923 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 924 ALC_MII_DBG_ADDR, MII_ANA_CFG18); 925 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 926 ALC_MII_DBG_DATA, data); 927 928 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | 929 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | 930 ANA_SERDES_EN_LCKDT; 931 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 932 ALC_MII_DBG_ADDR, MII_ANA_CFG5); 933 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 934 ALC_MII_DBG_DATA, data); 935 936 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & 937 ANA_LONG_CABLE_TH_100_MASK) | 938 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & 939 ANA_SHORT_CABLE_TH_100_SHIFT) | 940 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; 941 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 942 ALC_MII_DBG_ADDR, MII_ANA_CFG54); 943 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 944 ALC_MII_DBG_DATA, data); 945 946 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | 947 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | 948 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | 949 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); 950 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 951 ALC_MII_DBG_ADDR, MII_ANA_CFG4); 952 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 953 ALC_MII_DBG_DATA, data); 954 955 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | 956 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | 957 ANA_OEN_125M; 958 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 959 ALC_MII_DBG_ADDR, MII_ANA_CFG0); 960 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 961 ALC_MII_DBG_DATA, data); 962 DELAY(1000); 963 964 /* Disable hibernation. */ 965 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 966 0x0029); 967 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 968 ALC_MII_DBG_DATA); 969 data &= ~0x8000; 970 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 971 data); 972 973 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 974 0x000B); 975 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 976 ALC_MII_DBG_DATA); 977 data &= ~0x8000; 978 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 979 data); 980 } 981 982 static void 983 alc_phy_reset_816x(struct alc_softc *sc) 984 { 985 uint32_t val; 986 987 val = CSR_READ_4(sc, ALC_GPHY_CFG); 988 val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 989 GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON | 990 GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB); 991 val |= GPHY_CFG_SEL_ANA_RESET; 992 #ifdef notyet 993 val |= GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN | GPHY_CFG_SEL_ANA_RESET; 994 #else 995 /* Disable PHY hibernation. */ 996 val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN); 997 #endif 998 CSR_WRITE_4(sc, ALC_GPHY_CFG, val); 999 DELAY(10); 1000 CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET); 1001 DELAY(800); 1002 1003 /* Vendor PHY magic. */ 1004 #ifdef notyet 1005 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, DBG_LEGCYPS_DEFAULT); 1006 alc_miidbg_writereg(sc, MII_DBG_SYSMODCTL, DBG_SYSMODCTL_DEFAULT); 1007 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_VDRVBIAS, 1008 EXT_VDRVBIAS_DEFAULT); 1009 #else 1010 /* Disable PHY hibernation. */ 1011 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, 1012 DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB); 1013 alc_miidbg_writereg(sc, MII_DBG_HIBNEG, 1014 DBG_HIBNEG_DEFAULT & ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE)); 1015 alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT); 1016 #endif 1017 1018 /* XXX Disable EEE. */ 1019 val = CSR_READ_4(sc, ALC_LPI_CTL); 1020 val &= ~LPI_CTL_ENB; 1021 CSR_WRITE_4(sc, ALC_LPI_CTL, val); 1022 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0); 1023 1024 /* PHY power saving. */ 1025 alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT); 1026 alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT); 1027 alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT); 1028 alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT); 1029 val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2); 1030 val &= ~DBG_GREENCFG2_GATE_DFSE_EN; 1031 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val); 1032 1033 /* RTL8139C, 120m issue. */ 1034 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78, 1035 ANEG_NLP78_120M_DEFAULT); 1036 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10, 1037 ANEG_S3DIG10_DEFAULT); 1038 1039 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) { 1040 /* Turn off half amplitude. */ 1041 val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3); 1042 val |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT; 1043 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, val); 1044 /* Turn off Green feature. */ 1045 val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2); 1046 val |= DBG_GREENCFG2_BP_GREEN; 1047 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val); 1048 /* Turn off half bias. */ 1049 val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5); 1050 val |= EXT_CLDCTL5_BP_VD_HLFBIAS; 1051 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, val); 1052 } 1053 } 1054 1055 static void 1056 alc_phy_down(struct alc_softc *sc) 1057 { 1058 uint32_t gphy; 1059 1060 switch (sc->alc_ident->deviceid) { 1061 case DEVICEID_ATHEROS_AR8161: 1062 case DEVICEID_ATHEROS_E2200: 1063 case DEVICEID_ATHEROS_E2400: 1064 case DEVICEID_ATHEROS_E2500: 1065 case DEVICEID_ATHEROS_AR8162: 1066 case DEVICEID_ATHEROS_AR8171: 1067 case DEVICEID_ATHEROS_AR8172: 1068 gphy = CSR_READ_4(sc, ALC_GPHY_CFG); 1069 gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 1070 GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON); 1071 gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 1072 GPHY_CFG_SEL_ANA_RESET; 1073 gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW; 1074 CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy); 1075 break; 1076 case DEVICEID_ATHEROS_AR8151: 1077 case DEVICEID_ATHEROS_AR8151_V2: 1078 case DEVICEID_ATHEROS_AR8152_B: 1079 case DEVICEID_ATHEROS_AR8152_B2: 1080 /* 1081 * GPHY power down caused more problems on AR8151 v2.0. 1082 * When driver is reloaded after GPHY power down, 1083 * accesses to PHY/MAC registers hung the system. Only 1084 * cold boot recovered from it. I'm not sure whether 1085 * AR8151 v1.0 also requires this one though. I don't 1086 * have AR8151 v1.0 controller in hand. 1087 * The only option left is to isolate the PHY and 1088 * initiates power down the PHY which in turn saves 1089 * more power when driver is unloaded. 1090 */ 1091 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 1092 MII_BMCR, BMCR_ISO | BMCR_PDOWN); 1093 break; 1094 default: 1095 /* Force PHY down. */ 1096 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 1097 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | 1098 GPHY_CFG_PWDOWN_HW); 1099 DELAY(1000); 1100 break; 1101 } 1102 } 1103 1104 static void 1105 alc_aspm(struct alc_softc *sc, int init, int media) 1106 { 1107 1108 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 1109 alc_aspm_816x(sc, init); 1110 else 1111 alc_aspm_813x(sc, media); 1112 } 1113 1114 static void 1115 alc_aspm_813x(struct alc_softc *sc, int media) 1116 { 1117 uint32_t pmcfg; 1118 uint16_t linkcfg; 1119 1120 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) 1121 return; 1122 1123 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 1124 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) == 1125 (ALC_FLAG_APS | ALC_FLAG_PCIE)) 1126 linkcfg = CSR_READ_2(sc, sc->alc_expcap + 1127 PCIR_EXPRESS_LINK_CTL); 1128 else 1129 linkcfg = 0; 1130 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; 1131 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK); 1132 pmcfg |= PM_CFG_MAC_ASPM_CHK; 1133 pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT); 1134 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 1135 1136 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 1137 /* Disable extended sync except AR8152 B v1.0 */ 1138 linkcfg &= ~PCIEM_LINK_CTL_EXTENDED_SYNC; 1139 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 1140 sc->alc_rev == ATHEROS_AR8152_B_V10) 1141 linkcfg |= PCIEM_LINK_CTL_EXTENDED_SYNC; 1142 CSR_WRITE_2(sc, sc->alc_expcap + PCIER_LINK_CTL, 1143 linkcfg); 1144 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB | 1145 PM_CFG_HOTRST); 1146 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT << 1147 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1148 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1149 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT << 1150 PM_CFG_PM_REQ_TIMER_SHIFT); 1151 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV; 1152 } 1153 1154 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1155 if ((sc->alc_flags & ALC_FLAG_L0S) != 0) 1156 pmcfg |= PM_CFG_ASPM_L0S_ENB; 1157 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1158 pmcfg |= PM_CFG_ASPM_L1_ENB; 1159 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 1160 if (sc->alc_ident->deviceid == 1161 DEVICEID_ATHEROS_AR8152_B) 1162 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 1163 pmcfg &= ~(PM_CFG_SERDES_L1_ENB | 1164 PM_CFG_SERDES_PLL_L1_ENB | 1165 PM_CFG_SERDES_BUDS_RX_L1_ENB); 1166 pmcfg |= PM_CFG_CLK_SWH_L1; 1167 if (media == IFM_100_TX || media == IFM_1000_T) { 1168 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; 1169 switch (sc->alc_ident->deviceid) { 1170 case DEVICEID_ATHEROS_AR8152_B: 1171 pmcfg |= (7 << 1172 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1173 break; 1174 case DEVICEID_ATHEROS_AR8152_B2: 1175 case DEVICEID_ATHEROS_AR8151_V2: 1176 pmcfg |= (4 << 1177 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1178 break; 1179 default: 1180 pmcfg |= (15 << 1181 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1182 break; 1183 } 1184 } 1185 } else { 1186 pmcfg |= PM_CFG_SERDES_L1_ENB | 1187 PM_CFG_SERDES_PLL_L1_ENB | 1188 PM_CFG_SERDES_BUDS_RX_L1_ENB; 1189 pmcfg &= ~(PM_CFG_CLK_SWH_L1 | 1190 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 1191 } 1192 } else { 1193 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB | 1194 PM_CFG_SERDES_PLL_L1_ENB); 1195 pmcfg |= PM_CFG_CLK_SWH_L1; 1196 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1197 pmcfg |= PM_CFG_ASPM_L1_ENB; 1198 } 1199 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1200 } 1201 1202 static void 1203 alc_aspm_816x(struct alc_softc *sc, int init) 1204 { 1205 uint32_t pmcfg; 1206 1207 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 1208 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK; 1209 pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT; 1210 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1211 pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT; 1212 pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK; 1213 pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT; 1214 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV; 1215 pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S | 1216 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB | 1217 PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | 1218 PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB | 1219 PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST); 1220 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1221 (sc->alc_rev & 0x01) != 0) 1222 pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB; 1223 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1224 /* Link up, enable both L0s, L1s. */ 1225 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1226 PM_CFG_MAC_ASPM_CHK; 1227 } else { 1228 if (init != 0) 1229 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1230 PM_CFG_MAC_ASPM_CHK; 1231 else if ((sc->alc_ifp->if_flags & IFF_RUNNING) != 0) 1232 pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK; 1233 } 1234 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1235 } 1236 1237 static void 1238 alc_init_pcie(struct alc_softc *sc) 1239 { 1240 const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" }; 1241 uint32_t cap, ctl, val; 1242 int state; 1243 1244 /* Clear data link and flow-control protocol error. */ 1245 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); 1246 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); 1247 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); 1248 1249 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 1250 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 1251 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 1252 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, 1253 CSR_READ_4(sc, ALC_PCIE_PHYMISC) | 1254 PCIE_PHYMISC_FORCE_RCV_DET); 1255 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 1256 sc->alc_rev == ATHEROS_AR8152_B_V10) { 1257 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2); 1258 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK | 1259 PCIE_PHYMISC2_SERDES_TH_MASK); 1260 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 1261 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; 1262 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val); 1263 } 1264 /* Disable ASPM L0S and L1. */ 1265 cap = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CAP); 1266 if ((cap & PCIEM_LINK_CAP_ASPM) != 0) { 1267 ctl = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CTL); 1268 if ((ctl & PCIEM_LINK_CTL_RCB) != 0) 1269 sc->alc_rcb = DMA_CFG_RCB_128; 1270 if (bootverbose) 1271 device_printf(sc->alc_dev, "RCB %u bytes\n", 1272 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); 1273 state = ctl & PCIEM_LINK_CTL_ASPMC; 1274 if (state & PCIEM_LINK_CTL_ASPMC_L0S) 1275 sc->alc_flags |= ALC_FLAG_L0S; 1276 if (state & PCIEM_LINK_CTL_ASPMC_L1) 1277 sc->alc_flags |= ALC_FLAG_L1S; 1278 if (bootverbose) 1279 device_printf(sc->alc_dev, "ASPM %s %s\n", 1280 aspm_state[state], 1281 state == 0 ? "disabled" : "enabled"); 1282 alc_disable_l0s_l1(sc); 1283 } else { 1284 if (bootverbose) 1285 device_printf(sc->alc_dev, 1286 "no ASPM support\n"); 1287 } 1288 } else { 1289 val = CSR_READ_4(sc, ALC_PDLL_TRNS1); 1290 val &= ~PDLL_TRNS1_D3PLLOFF_ENB; 1291 CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val); 1292 val = CSR_READ_4(sc, ALC_MASTER_CFG); 1293 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1294 (sc->alc_rev & 0x01) != 0) { 1295 if ((val & MASTER_WAKEN_25M) == 0 || 1296 (val & MASTER_CLK_SEL_DIS) == 0) { 1297 val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS; 1298 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1299 } 1300 } else { 1301 if ((val & MASTER_WAKEN_25M) == 0 || 1302 (val & MASTER_CLK_SEL_DIS) != 0) { 1303 val |= MASTER_WAKEN_25M; 1304 val &= ~MASTER_CLK_SEL_DIS; 1305 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1306 } 1307 } 1308 } 1309 alc_aspm(sc, 1, IFM_UNKNOWN); 1310 } 1311 1312 static void 1313 alc_config_msi(struct alc_softc *sc) 1314 { 1315 uint32_t ctl, mod; 1316 1317 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 1318 /* 1319 * It seems interrupt moderation is controlled by 1320 * ALC_MSI_RETRANS_TIMER register if MSI/MSIX is active. 1321 * Driver uses RX interrupt moderation parameter to 1322 * program ALC_MSI_RETRANS_TIMER register. 1323 */ 1324 ctl = CSR_READ_4(sc, ALC_MSI_RETRANS_TIMER); 1325 ctl &= ~MSI_RETRANS_TIMER_MASK; 1326 ctl &= ~MSI_RETRANS_MASK_SEL_LINE; 1327 mod = ALC_USECS(sc->alc_int_rx_mod); 1328 if (mod == 0) 1329 mod = 1; 1330 ctl |= mod; 1331 if (sc->alc_irq_type == PCI_INTR_TYPE_MSI) 1332 CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl | 1333 MSI_RETRANS_MASK_SEL_LINE); 1334 else 1335 CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, 0); 1336 } 1337 } 1338 1339 static int 1340 alc_attach(device_t dev) 1341 { 1342 struct alc_softc *sc; 1343 struct ifnet *ifp; 1344 uint16_t burst; 1345 int base, error; 1346 u_int intr_flags; 1347 1348 error = 0; 1349 sc = device_get_softc(dev); 1350 sc->alc_dev = dev; 1351 sc->alc_rev = pci_get_revid(dev); 1352 1353 callout_init_mp(&sc->alc_tick_ch); 1354 sc->alc_ident = alc_find_ident(dev); 1355 1356 /* Enable bus mastering */ 1357 pci_enable_busmaster(dev); 1358 1359 /* Map the device. */ 1360 sc->alc_res_rid = PCIR_BAR(0); 1361 sc->alc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1362 &sc->alc_res_rid, RF_ACTIVE); 1363 if (error != 0) { 1364 device_printf(dev, "cannot allocate memory resources.\n"); 1365 goto fail; 1366 } 1367 sc->alc_res_btag = rman_get_bustag(sc->alc_res); 1368 sc->alc_res_bhand = rman_get_bushandle(sc->alc_res); 1369 1370 /* Set PHY address. */ 1371 sc->alc_phyaddr = ALC_PHY_ADDR; 1372 1373 /* 1374 * One odd thing is AR8132 uses the same PHY hardware(F1 1375 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports 1376 * the PHY supports 1000Mbps but that's not true. The PHY 1377 * used in AR8132 can't establish gigabit link even if it 1378 * shows the same PHY model/revision number of AR8131. 1379 */ 1380 switch (sc->alc_ident->deviceid) { 1381 case DEVICEID_ATHEROS_E2200: 1382 case DEVICEID_ATHEROS_E2400: 1383 case DEVICEID_ATHEROS_E2500: 1384 sc->alc_flags |= ALC_FLAG_E2X00; 1385 /* FALLTHROUGH */ 1386 case DEVICEID_ATHEROS_AR8161: 1387 if (pci_get_subvendor(dev) == VENDORID_ATHEROS && 1388 pci_get_subdevice(dev) == 0x0091 && sc->alc_rev == 0) 1389 sc->alc_flags |= ALC_FLAG_LINK_WAR; 1390 /* FALLTHROUGH */ 1391 case DEVICEID_ATHEROS_AR8171: 1392 sc->alc_flags |= ALC_FLAG_AR816X_FAMILY; 1393 break; 1394 case DEVICEID_ATHEROS_AR8162: 1395 case DEVICEID_ATHEROS_AR8172: 1396 sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY; 1397 break; 1398 case DEVICEID_ATHEROS_AR8152_B: 1399 case DEVICEID_ATHEROS_AR8152_B2: 1400 sc->alc_flags |= ALC_FLAG_APS; 1401 /* FALLTHROUGH */ 1402 case DEVICEID_ATHEROS_AR8132: 1403 sc->alc_flags |= ALC_FLAG_FASTETHER; 1404 break; 1405 case DEVICEID_ATHEROS_AR8151: 1406 case DEVICEID_ATHEROS_AR8151_V2: 1407 sc->alc_flags |= ALC_FLAG_APS; 1408 /* FALLTHROUGH */ 1409 default: 1410 break; 1411 } 1412 sc->alc_flags |= ALC_FLAG_JUMBO; 1413 1414 /* 1415 * It seems that AR813x/AR815x has silicon bug for SMB. In 1416 * addition, Atheros said that enabling SMB wouldn't improve 1417 * performance. However I think it's bad to access lots of 1418 * registers to extract MAC statistics. 1419 */ 1420 sc->alc_flags |= ALC_FLAG_SMB_BUG; 1421 1422 /* 1423 * Don't use Tx CMB. It is known to have silicon bug. 1424 */ 1425 sc->alc_flags |= ALC_FLAG_CMB_BUG; 1426 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> 1427 MASTER_CHIP_REV_SHIFT; 1428 if (bootverbose) { 1429 device_printf(dev, "PCI device revision : 0x%04x\n", 1430 sc->alc_rev); 1431 device_printf(dev, "Chip id/revision : 0x%04x\n", 1432 sc->alc_chip_rev); 1433 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 1434 device_printf(dev, "AR816x revision : 0x%x\n", 1435 AR816X_REV(sc->alc_rev)); 1436 } 1437 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", 1438 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, 1439 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); 1440 1441 /* Initialize DMA parameters. */ 1442 sc->alc_dma_rd_burst = 0; 1443 sc->alc_dma_wr_burst = 0; 1444 sc->alc_rcb = DMA_CFG_RCB_64; 1445 if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) { 1446 sc->alc_flags |= ALC_FLAG_PCIE; 1447 sc->alc_expcap = base; 1448 burst = CSR_READ_2(sc, base + PCIER_DEVICE_CTL); 1449 sc->alc_dma_rd_burst = 1450 (burst & PCIEM_CTL_MAX_READ_REQUEST) >> 12; 1451 sc->alc_dma_wr_burst = (burst & PCIEM_CTL_MAX_PAYLOAD) >> 5; 1452 if (bootverbose) { 1453 device_printf(dev, "Read request size : %u bytes.\n", 1454 alc_dma_burst[sc->alc_dma_rd_burst]); 1455 device_printf(dev, "TLP payload size : %u bytes.\n", 1456 alc_dma_burst[sc->alc_dma_wr_burst]); 1457 } 1458 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024) 1459 sc->alc_dma_rd_burst = 3; 1460 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) 1461 sc->alc_dma_wr_burst = 3; 1462 /* 1463 * Force maximum payload size to 128 bytes for E2200/E2400. 1464 * Otherwise it triggers DMA write error. 1465 */ 1466 if ((sc->alc_flags & ALC_FLAG_E2X00) != 0) 1467 sc->alc_dma_wr_burst = 0; 1468 alc_init_pcie(sc); 1469 } 1470 1471 /* Reset PHY. */ 1472 alc_phy_reset(sc); 1473 1474 /* Reset the ethernet controller. */ 1475 alc_stop_mac(sc); 1476 alc_reset(sc); 1477 1478 sc->alc_irq_type = pci_alloc_1intr(dev, alc_msi_enable, 1479 &sc->alc_irq_rid, &intr_flags); 1480 1481 /* Allocate IRQ resources. */ 1482 sc->alc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1483 &sc->alc_irq_rid, intr_flags); 1484 if (error != 0) { 1485 device_printf(dev, "cannot allocate IRQ resources.\n"); 1486 goto fail; 1487 } 1488 1489 /* Create device sysctl node. */ 1490 alc_sysctl_node(sc); 1491 1492 if ((error = alc_dma_alloc(sc)) != 0) 1493 goto fail; 1494 1495 /* Load station address. */ 1496 alc_get_macaddr(sc); 1497 1498 ifp = sc->alc_ifp = &sc->arpcom.ac_if; 1499 ifp->if_softc = sc; 1500 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1501 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1502 ifp->if_ioctl = alc_ioctl; 1503 ifp->if_start = alc_start; 1504 ifp->if_init = alc_init; 1505 ifq_set_maxlen(&ifp->if_snd, ALC_TX_RING_CNT - 1); 1506 ifq_set_ready(&ifp->if_snd); 1507 ifp->if_capabilities = IFCAP_TXCSUM; 1508 ifp->if_hwassist = ALC_CSUM_FEATURES; 1509 #if 0 1510 /* XXX: WOL */ 1511 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) { 1512 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST; 1513 sc->alc_flags |= ALC_FLAG_PM; 1514 sc->alc_pmcap = base; 1515 } 1516 #endif 1517 ifp->if_capenable = ifp->if_capabilities; 1518 1519 /* VLAN capability setup. */ 1520 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1521 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 1522 ifp->if_capenable = ifp->if_capabilities; 1523 1524 /* 1525 * XXX 1526 * It seems enabling Tx checksum offloading makes more trouble. 1527 * Sometimes the controller does not receive any frames when 1528 * Tx checksum offloading is enabled. I'm not sure whether this 1529 * is a bug in Tx checksum offloading logic or I got broken 1530 * sample boards. To safety, don't enable Tx checksum offloading 1531 * by default but give chance to users to toggle it if they know 1532 * their controllers work without problems. 1533 * Fortunately, Tx checksum offloading for AR816x family 1534 * seems to work. 1535 */ 1536 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 1537 ifp->if_capenable &= ~IFCAP_TXCSUM; 1538 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 1539 } 1540 1541 /* Set up MII bus. */ 1542 if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange, 1543 alc_mediastatus)) != 0) { 1544 device_printf(dev, "no PHY found!\n"); 1545 goto fail; 1546 } 1547 1548 ether_ifattach(ifp, sc->alc_eaddr, NULL); 1549 1550 /* Tell the upper layer(s) we support long frames. */ 1551 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1552 1553 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->alc_irq)); 1554 #if 0 1555 /* Create local taskq. */ 1556 TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp); 1557 sc->alc_tq = taskqueue_create("alc_taskq", M_WAITOK, 1558 taskqueue_thread_enqueue, &sc->alc_tq); 1559 if (sc->alc_tq == NULL) { 1560 device_printf(dev, "could not create taskqueue.\n"); 1561 ether_ifdetach(ifp); 1562 error = ENXIO; 1563 goto fail; 1564 } 1565 taskqueue_start_threads(&sc->alc_tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq", 1566 device_get_nameunit(sc->alc_dev)); 1567 1568 alc_config_msi(sc); 1569 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) 1570 msic = ALC_MSIX_MESSAGES; 1571 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 1572 msic = ALC_MSI_MESSAGES; 1573 else 1574 msic = 1; 1575 for (i = 0; i < msic; i++) { 1576 error = bus_setup_intr(dev, sc->alc_irq[i], INTR_MPSAFE, 1577 alc_intr, sc, 1578 &sc->alc_intrhand[i], NULL); 1579 if (error != 0) 1580 break; 1581 } 1582 if (error != 0) { 1583 device_printf(dev, "could not set up interrupt handler.\n"); 1584 taskqueue_free(sc->alc_tq); 1585 sc->alc_tq = NULL; 1586 ether_ifdetach(ifp); 1587 goto fail; 1588 } 1589 #else 1590 alc_config_msi(sc); 1591 error = bus_setup_intr(dev, sc->alc_irq, INTR_MPSAFE, alc_intr, sc, 1592 &sc->alc_intrhand, ifp->if_serializer); 1593 if (error) { 1594 device_printf(dev, "could not set up interrupt handler.\n"); 1595 ether_ifdetach(ifp); 1596 goto fail; 1597 } 1598 #endif 1599 1600 fail: 1601 if (error != 0) 1602 alc_detach(dev); 1603 1604 return (error); 1605 } 1606 1607 static int 1608 alc_detach(device_t dev) 1609 { 1610 struct alc_softc *sc = device_get_softc(dev); 1611 1612 if (device_is_attached(dev)) { 1613 struct ifnet *ifp = sc->alc_ifp; 1614 1615 lwkt_serialize_enter(ifp->if_serializer); 1616 alc_stop(sc); 1617 bus_teardown_intr(dev, sc->alc_irq, sc->alc_intrhand); 1618 lwkt_serialize_exit(ifp->if_serializer); 1619 1620 ether_ifdetach(ifp); 1621 } 1622 1623 if (sc->alc_miibus != NULL) 1624 device_delete_child(dev, sc->alc_miibus); 1625 bus_generic_detach(dev); 1626 1627 if (sc->alc_res != NULL) 1628 alc_phy_down(sc); 1629 1630 if (sc->alc_irq != NULL) { 1631 bus_release_resource(dev, SYS_RES_IRQ, sc->alc_irq_rid, 1632 sc->alc_irq); 1633 } 1634 if (sc->alc_irq_type == PCI_INTR_TYPE_MSI) 1635 pci_release_msi(dev); 1636 1637 if (sc->alc_res != NULL) { 1638 bus_release_resource(dev, SYS_RES_MEMORY, sc->alc_res_rid, 1639 sc->alc_res); 1640 } 1641 1642 alc_dma_free(sc); 1643 1644 return (0); 1645 } 1646 1647 #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 1648 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 1649 #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 1650 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 1651 1652 static void 1653 alc_sysctl_node(struct alc_softc *sc) 1654 { 1655 struct sysctl_ctx_list *ctx; 1656 struct sysctl_oid *tree; 1657 struct sysctl_oid_list *child, *parent; 1658 struct alc_hw_stats *stats; 1659 int error; 1660 1661 stats = &sc->alc_stats; 1662 ctx = device_get_sysctl_ctx(sc->alc_dev); 1663 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev)); 1664 1665 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 1666 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0, 1667 sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation"); 1668 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 1669 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0, 1670 sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation"); 1671 /* Pull in device tunables. */ 1672 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1673 error = resource_int_value(device_get_name(sc->alc_dev), 1674 device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod); 1675 if (error == 0) { 1676 if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN || 1677 sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) { 1678 device_printf(sc->alc_dev, "int_rx_mod value out of " 1679 "range; using default: %d\n", 1680 ALC_IM_RX_TIMER_DEFAULT); 1681 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1682 } 1683 } 1684 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1685 error = resource_int_value(device_get_name(sc->alc_dev), 1686 device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod); 1687 if (error == 0) { 1688 if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN || 1689 sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) { 1690 device_printf(sc->alc_dev, "int_tx_mod value out of " 1691 "range; using default: %d\n", 1692 ALC_IM_TX_TIMER_DEFAULT); 1693 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1694 } 1695 } 1696 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", 1697 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0, 1698 sysctl_hw_alc_proc_limit, "I", 1699 "max number of Rx events to process"); 1700 /* Pull in device tunables. */ 1701 sc->alc_process_limit = ALC_PROC_DEFAULT; 1702 error = resource_int_value(device_get_name(sc->alc_dev), 1703 device_get_unit(sc->alc_dev), "process_limit", 1704 &sc->alc_process_limit); 1705 if (error == 0) { 1706 if (sc->alc_process_limit < ALC_PROC_MIN || 1707 sc->alc_process_limit > ALC_PROC_MAX) { 1708 device_printf(sc->alc_dev, 1709 "process_limit value out of range; " 1710 "using default: %d\n", ALC_PROC_DEFAULT); 1711 sc->alc_process_limit = ALC_PROC_DEFAULT; 1712 } 1713 } 1714 1715 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 1716 NULL, "ALC statistics"); 1717 parent = SYSCTL_CHILDREN(tree); 1718 1719 /* Rx statistics. */ 1720 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 1721 NULL, "Rx MAC statistics"); 1722 child = SYSCTL_CHILDREN(tree); 1723 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1724 &stats->rx_frames, "Good frames"); 1725 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1726 &stats->rx_bcast_frames, "Good broadcast frames"); 1727 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1728 &stats->rx_mcast_frames, "Good multicast frames"); 1729 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1730 &stats->rx_pause_frames, "Pause control frames"); 1731 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1732 &stats->rx_control_frames, "Control frames"); 1733 ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 1734 &stats->rx_crcerrs, "CRC errors"); 1735 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1736 &stats->rx_lenerrs, "Frames with length mismatched"); 1737 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1738 &stats->rx_bytes, "Good octets"); 1739 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1740 &stats->rx_bcast_bytes, "Good broadcast octets"); 1741 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1742 &stats->rx_mcast_bytes, "Good multicast octets"); 1743 ALC_SYSCTL_STAT_ADD32(ctx, child, "runts", 1744 &stats->rx_runts, "Too short frames"); 1745 ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments", 1746 &stats->rx_fragments, "Fragmented frames"); 1747 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1748 &stats->rx_pkts_64, "64 bytes frames"); 1749 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1750 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 1751 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1752 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 1753 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1754 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 1755 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1756 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 1757 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1758 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1759 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1760 &stats->rx_pkts_1519_max, "1519 to max frames"); 1761 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1762 &stats->rx_pkts_truncated, "Truncated frames due to MTU size"); 1763 ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 1764 &stats->rx_fifo_oflows, "FIFO overflows"); 1765 ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs", 1766 &stats->rx_rrs_errs, "Return status write-back errors"); 1767 ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 1768 &stats->rx_alignerrs, "Alignment errors"); 1769 ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered", 1770 &stats->rx_pkts_filtered, 1771 "Frames dropped due to address filtering"); 1772 1773 /* Tx statistics. */ 1774 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 1775 NULL, "Tx MAC statistics"); 1776 child = SYSCTL_CHILDREN(tree); 1777 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1778 &stats->tx_frames, "Good frames"); 1779 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1780 &stats->tx_bcast_frames, "Good broadcast frames"); 1781 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1782 &stats->tx_mcast_frames, "Good multicast frames"); 1783 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1784 &stats->tx_pause_frames, "Pause control frames"); 1785 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1786 &stats->tx_control_frames, "Control frames"); 1787 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 1788 &stats->tx_excess_defer, "Frames with excessive derferrals"); 1789 ALC_SYSCTL_STAT_ADD32(ctx, child, "defers", 1790 &stats->tx_excess_defer, "Frames with derferrals"); 1791 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1792 &stats->tx_bytes, "Good octets"); 1793 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1794 &stats->tx_bcast_bytes, "Good broadcast octets"); 1795 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1796 &stats->tx_mcast_bytes, "Good multicast octets"); 1797 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1798 &stats->tx_pkts_64, "64 bytes frames"); 1799 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1800 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 1801 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1802 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 1803 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1804 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 1805 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1806 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 1807 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1808 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1809 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1810 &stats->tx_pkts_1519_max, "1519 to max frames"); 1811 ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 1812 &stats->tx_single_colls, "Single collisions"); 1813 ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 1814 &stats->tx_multi_colls, "Multiple collisions"); 1815 ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 1816 &stats->tx_late_colls, "Late collisions"); 1817 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", 1818 &stats->tx_excess_colls, "Excessive collisions"); 1819 ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns", 1820 &stats->tx_underrun, "FIFO underruns"); 1821 ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns", 1822 &stats->tx_desc_underrun, "Descriptor write-back errors"); 1823 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1824 &stats->tx_lenerrs, "Frames with length mismatched"); 1825 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1826 &stats->tx_pkts_truncated, "Truncated frames due to MTU size"); 1827 } 1828 1829 #undef ALC_SYSCTL_STAT_ADD32 1830 #undef ALC_SYSCTL_STAT_ADD64 1831 1832 struct alc_dmamap_arg { 1833 bus_addr_t alc_busaddr; 1834 }; 1835 1836 static void 1837 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1838 { 1839 struct alc_dmamap_arg *ctx; 1840 1841 if (error != 0) 1842 return; 1843 1844 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1845 1846 ctx = (struct alc_dmamap_arg *)arg; 1847 ctx->alc_busaddr = segs[0].ds_addr; 1848 } 1849 1850 #ifdef foo 1851 /* 1852 * Normal and high Tx descriptors shares single Tx high address. 1853 * Four Rx descriptor/return rings and CMB shares the same Rx 1854 * high address. 1855 */ 1856 static int 1857 alc_check_boundary(struct alc_softc *sc) 1858 { 1859 bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end; 1860 1861 rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ; 1862 rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ; 1863 cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ; 1864 tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ; 1865 1866 /* 4GB boundary crossing is not allowed. */ 1867 if ((ALC_ADDR_HI(rx_ring_end) != 1868 ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) || 1869 (ALC_ADDR_HI(rr_ring_end) != 1870 ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) || 1871 (ALC_ADDR_HI(cmb_end) != 1872 ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) || 1873 (ALC_ADDR_HI(tx_ring_end) != 1874 ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr))) 1875 return (EFBIG); 1876 /* 1877 * Make sure Rx return descriptor/Rx descriptor/CMB use 1878 * the same high address. 1879 */ 1880 if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) || 1881 (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end))) 1882 return (EFBIG); 1883 1884 return (0); 1885 } 1886 #endif 1887 1888 static int 1889 alc_dma_alloc(struct alc_softc *sc) 1890 { 1891 struct alc_txdesc *txd; 1892 struct alc_rxdesc *rxd; 1893 struct alc_dmamap_arg ctx; 1894 int error, i; 1895 1896 /* Create parent DMA tag. */ 1897 error = bus_dma_tag_create( 1898 sc->alc_cdata.alc_parent_tag, /* parent */ 1899 1, 0, /* alignment, boundary */ 1900 BUS_SPACE_MAXADDR, /* lowaddr */ 1901 BUS_SPACE_MAXADDR, /* highaddr */ 1902 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1903 0, /* nsegments */ 1904 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1905 0, /* flags */ 1906 &sc->alc_cdata.alc_parent_tag); 1907 if (error != 0) { 1908 device_printf(sc->alc_dev, 1909 "could not create parent DMA tag.\n"); 1910 goto fail; 1911 } 1912 1913 /* Create DMA tag for Tx descriptor ring. */ 1914 error = bus_dma_tag_create( 1915 sc->alc_cdata.alc_parent_tag, /* parent */ 1916 ALC_TX_RING_ALIGN, 0, /* alignment, boundary */ 1917 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1918 BUS_SPACE_MAXADDR, /* highaddr */ 1919 ALC_TX_RING_SZ, /* maxsize */ 1920 1, /* nsegments */ 1921 ALC_TX_RING_SZ, /* maxsegsize */ 1922 0, /* flags */ 1923 &sc->alc_cdata.alc_tx_ring_tag); 1924 if (error != 0) { 1925 device_printf(sc->alc_dev, 1926 "could not create Tx ring DMA tag.\n"); 1927 goto fail; 1928 } 1929 1930 /* Create DMA tag for Rx free descriptor ring. */ 1931 error = bus_dma_tag_create( 1932 sc->alc_cdata.alc_parent_tag, /* parent */ 1933 ALC_RX_RING_ALIGN, 0, /* alignment, boundary */ 1934 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1935 BUS_SPACE_MAXADDR, /* highaddr */ 1936 ALC_RX_RING_SZ, /* maxsize */ 1937 1, /* nsegments */ 1938 ALC_RX_RING_SZ, /* maxsegsize */ 1939 0, /* flags */ 1940 &sc->alc_cdata.alc_rx_ring_tag); 1941 if (error != 0) { 1942 device_printf(sc->alc_dev, 1943 "could not create Rx ring DMA tag.\n"); 1944 goto fail; 1945 } 1946 /* Create DMA tag for Rx return descriptor ring. */ 1947 error = bus_dma_tag_create( 1948 sc->alc_cdata.alc_parent_tag, /* parent */ 1949 ALC_RR_RING_ALIGN, 0, /* alignment, boundary */ 1950 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1951 BUS_SPACE_MAXADDR, /* highaddr */ 1952 ALC_RR_RING_SZ, /* maxsize */ 1953 1, /* nsegments */ 1954 ALC_RR_RING_SZ, /* maxsegsize */ 1955 0, /* flags */ 1956 &sc->alc_cdata.alc_rr_ring_tag); 1957 if (error != 0) { 1958 device_printf(sc->alc_dev, 1959 "could not create Rx return ring DMA tag.\n"); 1960 goto fail; 1961 } 1962 1963 /* Create DMA tag for coalescing message block. */ 1964 error = bus_dma_tag_create( 1965 sc->alc_cdata.alc_parent_tag, /* parent */ 1966 ALC_CMB_ALIGN, 0, /* alignment, boundary */ 1967 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1968 BUS_SPACE_MAXADDR, /* highaddr */ 1969 ALC_CMB_SZ, /* maxsize */ 1970 1, /* nsegments */ 1971 ALC_CMB_SZ, /* maxsegsize */ 1972 0, /* flags */ 1973 &sc->alc_cdata.alc_cmb_tag); 1974 if (error != 0) { 1975 device_printf(sc->alc_dev, 1976 "could not create CMB DMA tag.\n"); 1977 goto fail; 1978 } 1979 /* Create DMA tag for status message block. */ 1980 error = bus_dma_tag_create( 1981 sc->alc_cdata.alc_parent_tag, /* parent */ 1982 ALC_SMB_ALIGN, 0, /* alignment, boundary */ 1983 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1984 BUS_SPACE_MAXADDR, /* highaddr */ 1985 ALC_SMB_SZ, /* maxsize */ 1986 1, /* nsegments */ 1987 ALC_SMB_SZ, /* maxsegsize */ 1988 0, /* flags */ 1989 &sc->alc_cdata.alc_smb_tag); 1990 if (error != 0) { 1991 device_printf(sc->alc_dev, 1992 "could not create SMB DMA tag.\n"); 1993 goto fail; 1994 } 1995 1996 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1997 error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag, 1998 (void **)&sc->alc_rdata.alc_tx_ring, 1999 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2000 &sc->alc_cdata.alc_tx_ring_map); 2001 if (error != 0) { 2002 device_printf(sc->alc_dev, 2003 "could not allocate DMA'able memory for Tx ring.\n"); 2004 goto fail; 2005 } 2006 ctx.alc_busaddr = 0; 2007 error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag, 2008 sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring, 2009 ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0); 2010 if (error != 0 || ctx.alc_busaddr == 0) { 2011 device_printf(sc->alc_dev, 2012 "could not load DMA'able memory for Tx ring.\n"); 2013 goto fail; 2014 } 2015 sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr; 2016 2017 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 2018 error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag, 2019 (void **)&sc->alc_rdata.alc_rx_ring, 2020 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2021 &sc->alc_cdata.alc_rx_ring_map); 2022 if (error != 0) { 2023 device_printf(sc->alc_dev, 2024 "could not allocate DMA'able memory for Rx ring.\n"); 2025 goto fail; 2026 } 2027 ctx.alc_busaddr = 0; 2028 error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag, 2029 sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring, 2030 ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0); 2031 if (error != 0 || ctx.alc_busaddr == 0) { 2032 device_printf(sc->alc_dev, 2033 "could not load DMA'able memory for Rx ring.\n"); 2034 goto fail; 2035 } 2036 sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr; 2037 2038 /* Allocate DMA'able memory and load the DMA map for Rx return ring. */ 2039 error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag, 2040 (void **)&sc->alc_rdata.alc_rr_ring, 2041 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2042 &sc->alc_cdata.alc_rr_ring_map); 2043 if (error != 0) { 2044 device_printf(sc->alc_dev, 2045 "could not allocate DMA'able memory for Rx return ring.\n"); 2046 goto fail; 2047 } 2048 ctx.alc_busaddr = 0; 2049 error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag, 2050 sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring, 2051 ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0); 2052 if (error != 0 || ctx.alc_busaddr == 0) { 2053 device_printf(sc->alc_dev, 2054 "could not load DMA'able memory for Tx ring.\n"); 2055 goto fail; 2056 } 2057 sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr; 2058 2059 /* Allocate DMA'able memory and load the DMA map for CMB. */ 2060 error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag, 2061 (void **)&sc->alc_rdata.alc_cmb, 2062 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2063 &sc->alc_cdata.alc_cmb_map); 2064 if (error != 0) { 2065 device_printf(sc->alc_dev, 2066 "could not allocate DMA'able memory for CMB.\n"); 2067 goto fail; 2068 } 2069 ctx.alc_busaddr = 0; 2070 error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag, 2071 sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb, 2072 ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0); 2073 if (error != 0 || ctx.alc_busaddr == 0) { 2074 device_printf(sc->alc_dev, 2075 "could not load DMA'able memory for CMB.\n"); 2076 goto fail; 2077 } 2078 sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr; 2079 2080 /* Allocate DMA'able memory and load the DMA map for SMB. */ 2081 error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag, 2082 (void **)&sc->alc_rdata.alc_smb, 2083 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2084 &sc->alc_cdata.alc_smb_map); 2085 if (error != 0) { 2086 device_printf(sc->alc_dev, 2087 "could not allocate DMA'able memory for SMB.\n"); 2088 goto fail; 2089 } 2090 ctx.alc_busaddr = 0; 2091 error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag, 2092 sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb, 2093 ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0); 2094 if (error != 0 || ctx.alc_busaddr == 0) { 2095 device_printf(sc->alc_dev, 2096 "could not load DMA'able memory for CMB.\n"); 2097 goto fail; 2098 } 2099 sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr; 2100 2101 #ifdef foo 2102 /* 2103 * All of the status blocks and descriptor rings are 2104 * allocated at lower 4GB, their addresses high 32bits 2105 * part are same (all 0). 2106 */ 2107 2108 /* Make sure we've not crossed 4GB boundary. */ 2109 if ((error = alc_check_boundary(sc)) != 0) { 2110 device_printf(sc->alc_dev, "4GB boundary crossed, " 2111 "switching to 32bit DMA addressing mode.\n"); 2112 alc_dma_free(sc); 2113 /* 2114 * Limit max allowable DMA address space to 32bit 2115 * and try again. 2116 */ 2117 lowaddr = BUS_SPACE_MAXADDR_32BIT; 2118 goto again; 2119 } 2120 #endif 2121 2122 /* 2123 * Create Tx buffer parent tag. 2124 * AR81[3567]x allows 64bit DMA addressing of Tx/Rx buffers 2125 * so it needs separate parent DMA tag as parent DMA address 2126 * space could be restricted to be within 32bit address space 2127 * by 4GB boundary crossing. 2128 */ 2129 error = bus_dma_tag_create( 2130 sc->alc_cdata.alc_parent_tag, /* parent */ 2131 1, 0, /* alignment, boundary */ 2132 BUS_SPACE_MAXADDR, /* lowaddr */ 2133 BUS_SPACE_MAXADDR, /* highaddr */ 2134 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2135 0, /* nsegments */ 2136 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2137 0, /* flags */ 2138 &sc->alc_cdata.alc_buffer_tag); 2139 if (error != 0) { 2140 device_printf(sc->alc_dev, 2141 "could not create parent buffer DMA tag.\n"); 2142 goto fail; 2143 } 2144 2145 /* Create DMA tag for Tx buffers. */ 2146 error = bus_dma_tag_create( 2147 sc->alc_cdata.alc_buffer_tag, /* parent */ 2148 1, 0, /* alignment, boundary */ 2149 BUS_SPACE_MAXADDR, /* lowaddr */ 2150 BUS_SPACE_MAXADDR, /* highaddr */ 2151 ALC_TSO_MAXSIZE, /* maxsize */ 2152 ALC_MAXTXSEGS, /* nsegments */ 2153 ALC_TSO_MAXSEGSIZE, /* maxsegsize */ 2154 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, /* flags */ 2155 &sc->alc_cdata.alc_tx_tag); 2156 if (error != 0) { 2157 device_printf(sc->alc_dev, "could not create Tx DMA tag.\n"); 2158 goto fail; 2159 } 2160 2161 /* Create DMA tag for Rx buffers. */ 2162 error = bus_dma_tag_create( 2163 sc->alc_cdata.alc_buffer_tag, /* parent */ 2164 ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */ 2165 BUS_SPACE_MAXADDR, /* lowaddr */ 2166 BUS_SPACE_MAXADDR, /* highaddr */ 2167 MCLBYTES, /* maxsize */ 2168 1, /* nsegments */ 2169 MCLBYTES, /* maxsegsize */ 2170 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED, /* flags */ 2171 &sc->alc_cdata.alc_rx_tag); 2172 if (error != 0) { 2173 device_printf(sc->alc_dev, "could not create Rx DMA tag.\n"); 2174 goto fail; 2175 } 2176 /* Create DMA maps for Tx buffers. */ 2177 for (i = 0; i < ALC_TX_RING_CNT; i++) { 2178 txd = &sc->alc_cdata.alc_txdesc[i]; 2179 txd->tx_m = NULL; 2180 txd->tx_dmamap = NULL; 2181 error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 2182 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2183 &txd->tx_dmamap); 2184 if (error != 0) { 2185 device_printf(sc->alc_dev, 2186 "could not create Tx dmamap.\n"); 2187 goto fail; 2188 } 2189 } 2190 /* Create DMA maps for Rx buffers. */ 2191 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 2192 BUS_DMA_WAITOK, 2193 &sc->alc_cdata.alc_rx_sparemap); 2194 if (error) { 2195 device_printf(sc->alc_dev, 2196 "could not create spare Rx dmamap.\n"); 2197 goto fail; 2198 } 2199 for (i = 0; i < ALC_RX_RING_CNT; i++) { 2200 rxd = &sc->alc_cdata.alc_rxdesc[i]; 2201 rxd->rx_m = NULL; 2202 rxd->rx_dmamap = NULL; 2203 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 2204 BUS_DMA_WAITOK, 2205 &rxd->rx_dmamap); 2206 if (error != 0) { 2207 device_printf(sc->alc_dev, 2208 "could not create Rx dmamap.\n"); 2209 goto fail; 2210 } 2211 } 2212 2213 fail: 2214 return (error); 2215 } 2216 2217 static void 2218 alc_dma_free(struct alc_softc *sc) 2219 { 2220 struct alc_txdesc *txd; 2221 struct alc_rxdesc *rxd; 2222 int i; 2223 2224 /* Tx buffers. */ 2225 if (sc->alc_cdata.alc_tx_tag != NULL) { 2226 for (i = 0; i < ALC_TX_RING_CNT; i++) { 2227 txd = &sc->alc_cdata.alc_txdesc[i]; 2228 if (txd->tx_dmamap != NULL) { 2229 bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag, 2230 txd->tx_dmamap); 2231 txd->tx_dmamap = NULL; 2232 } 2233 } 2234 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag); 2235 sc->alc_cdata.alc_tx_tag = NULL; 2236 } 2237 /* Rx buffers */ 2238 if (sc->alc_cdata.alc_rx_tag != NULL) { 2239 for (i = 0; i < ALC_RX_RING_CNT; i++) { 2240 rxd = &sc->alc_cdata.alc_rxdesc[i]; 2241 if (rxd->rx_dmamap != NULL) { 2242 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 2243 rxd->rx_dmamap); 2244 rxd->rx_dmamap = NULL; 2245 } 2246 } 2247 if (sc->alc_cdata.alc_rx_sparemap != NULL) { 2248 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 2249 sc->alc_cdata.alc_rx_sparemap); 2250 sc->alc_cdata.alc_rx_sparemap = NULL; 2251 } 2252 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag); 2253 sc->alc_cdata.alc_rx_tag = NULL; 2254 } 2255 /* Tx descriptor ring. */ 2256 if (sc->alc_cdata.alc_tx_ring_tag != NULL) { 2257 if (sc->alc_rdata.alc_tx_ring_paddr != 0) 2258 bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag, 2259 sc->alc_cdata.alc_tx_ring_map); 2260 if (sc->alc_rdata.alc_tx_ring != NULL) 2261 bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag, 2262 sc->alc_rdata.alc_tx_ring, 2263 sc->alc_cdata.alc_tx_ring_map); 2264 sc->alc_rdata.alc_tx_ring_paddr = 0; 2265 sc->alc_rdata.alc_tx_ring = NULL; 2266 sc->alc_cdata.alc_tx_ring_map = NULL; 2267 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag); 2268 sc->alc_cdata.alc_tx_ring_tag = NULL; 2269 } 2270 /* Rx ring. */ 2271 if (sc->alc_cdata.alc_rx_ring_tag != NULL) { 2272 if (sc->alc_rdata.alc_rx_ring_paddr != 0) 2273 bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag, 2274 sc->alc_cdata.alc_rx_ring_map); 2275 if (sc->alc_rdata.alc_rx_ring != NULL) 2276 bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag, 2277 sc->alc_rdata.alc_rx_ring, 2278 sc->alc_cdata.alc_rx_ring_map); 2279 sc->alc_rdata.alc_rx_ring_paddr = 0; 2280 sc->alc_rdata.alc_rx_ring = NULL; 2281 sc->alc_cdata.alc_rx_ring_map = NULL; 2282 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag); 2283 sc->alc_cdata.alc_rx_ring_tag = NULL; 2284 } 2285 /* Rx return ring. */ 2286 if (sc->alc_cdata.alc_rr_ring_tag != NULL) { 2287 if (sc->alc_rdata.alc_rr_ring_paddr != 0) 2288 bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag, 2289 sc->alc_cdata.alc_rr_ring_map); 2290 if (sc->alc_rdata.alc_rr_ring != NULL) 2291 bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag, 2292 sc->alc_rdata.alc_rr_ring, 2293 sc->alc_cdata.alc_rr_ring_map); 2294 sc->alc_rdata.alc_rr_ring_paddr = 0; 2295 sc->alc_rdata.alc_rr_ring = NULL; 2296 sc->alc_cdata.alc_rr_ring_map = NULL; 2297 bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag); 2298 sc->alc_cdata.alc_rr_ring_tag = NULL; 2299 } 2300 /* CMB block */ 2301 if (sc->alc_cdata.alc_cmb_tag != NULL) { 2302 if (sc->alc_rdata.alc_cmb_paddr != 0) 2303 bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag, 2304 sc->alc_cdata.alc_cmb_map); 2305 if (sc->alc_rdata.alc_cmb != NULL) 2306 bus_dmamem_free(sc->alc_cdata.alc_cmb_tag, 2307 sc->alc_rdata.alc_cmb, 2308 sc->alc_cdata.alc_cmb_map); 2309 sc->alc_rdata.alc_cmb_paddr = 0; 2310 sc->alc_rdata.alc_cmb = NULL; 2311 sc->alc_cdata.alc_cmb_map = NULL; 2312 bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag); 2313 sc->alc_cdata.alc_cmb_tag = NULL; 2314 } 2315 /* SMB block */ 2316 if (sc->alc_cdata.alc_smb_tag != NULL) { 2317 if (sc->alc_rdata.alc_smb_paddr != 0) 2318 bus_dmamap_unload(sc->alc_cdata.alc_smb_tag, 2319 sc->alc_cdata.alc_smb_map); 2320 if (sc->alc_rdata.alc_smb != NULL) 2321 bus_dmamem_free(sc->alc_cdata.alc_smb_tag, 2322 sc->alc_rdata.alc_smb, 2323 sc->alc_cdata.alc_smb_map); 2324 sc->alc_rdata.alc_smb_paddr = 0; 2325 sc->alc_rdata.alc_smb = NULL; 2326 sc->alc_cdata.alc_smb_map = NULL; 2327 bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag); 2328 sc->alc_cdata.alc_smb_tag = NULL; 2329 } 2330 if (sc->alc_cdata.alc_buffer_tag != NULL) { 2331 bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag); 2332 sc->alc_cdata.alc_buffer_tag = NULL; 2333 } 2334 if (sc->alc_cdata.alc_parent_tag != NULL) { 2335 bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag); 2336 sc->alc_cdata.alc_parent_tag = NULL; 2337 } 2338 } 2339 2340 static int 2341 alc_shutdown(device_t dev) 2342 { 2343 2344 return (alc_suspend(dev)); 2345 } 2346 2347 #if 0 2348 /* XXX: LINK SPEED */ 2349 /* 2350 * Note, this driver resets the link speed to 10/100Mbps by 2351 * restarting auto-negotiation in suspend/shutdown phase but we 2352 * don't know whether that auto-negotiation would succeed or not 2353 * as driver has no control after powering off/suspend operation. 2354 * If the renegotiation fail WOL may not work. Running at 1Gbps 2355 * will draw more power than 375mA at 3.3V which is specified in 2356 * PCI specification and that would result in complete 2357 * shutdowning power to ethernet controller. 2358 * 2359 * TODO 2360 * Save current negotiated media speed/duplex/flow-control to 2361 * softc and restore the same link again after resuming. PHY 2362 * handling such as power down/resetting to 100Mbps may be better 2363 * handled in suspend method in phy driver. 2364 */ 2365 static void 2366 alc_setlinkspeed(struct alc_softc *sc) 2367 { 2368 struct mii_data *mii; 2369 int aneg, i; 2370 2371 mii = device_get_softc(sc->alc_miibus); 2372 mii_pollstat(mii); 2373 aneg = 0; 2374 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 2375 (IFM_ACTIVE | IFM_AVALID)) { 2376 switch IFM_SUBTYPE(mii->mii_media_active) { 2377 case IFM_10_T: 2378 case IFM_100_TX: 2379 return; 2380 case IFM_1000_T: 2381 aneg++; 2382 break; 2383 default: 2384 break; 2385 } 2386 } 2387 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0); 2388 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 2389 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 2390 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 2391 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 2392 DELAY(1000); 2393 if (aneg != 0) { 2394 /* 2395 * Poll link state until alc(4) get a 10/100Mbps link. 2396 */ 2397 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 2398 mii_pollstat(mii); 2399 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 2400 == (IFM_ACTIVE | IFM_AVALID)) { 2401 switch (IFM_SUBTYPE( 2402 mii->mii_media_active)) { 2403 case IFM_10_T: 2404 case IFM_100_TX: 2405 alc_mac_config(sc); 2406 return; 2407 default: 2408 break; 2409 } 2410 } 2411 ALC_UNLOCK(sc); 2412 pause("alclnk", hz); 2413 ALC_LOCK(sc); 2414 } 2415 if (i == MII_ANEGTICKS_GIGE) 2416 device_printf(sc->alc_dev, 2417 "establishing a link failed, WOL may not work!"); 2418 } 2419 /* 2420 * No link, force MAC to have 100Mbps, full-duplex link. 2421 * This is the last resort and may/may not work. 2422 */ 2423 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 2424 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 2425 alc_mac_config(sc); 2426 } 2427 #endif 2428 2429 #if 0 2430 /* XXX: WOL */ 2431 static void 2432 alc_setwol(struct alc_softc *sc) 2433 { 2434 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2435 alc_setwol_816x(sc); 2436 else 2437 alc_setwol_813x(sc); 2438 } 2439 2440 static void 2441 alc_setwol_813x(struct alc_softc *sc) 2442 { 2443 struct ifnet *ifp; 2444 uint32_t reg, pmcs; 2445 uint16_t pmstat; 2446 2447 ALC_LOCK_ASSERT(sc); 2448 2449 alc_disable_l0s_l1(sc); 2450 ifp = sc->alc_ifp; 2451 if ((sc->alc_flags & ALC_FLAG_PM) == 0) { 2452 /* Disable WOL. */ 2453 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2454 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 2455 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 2456 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 2457 /* Force PHY power down. */ 2458 alc_phy_down(sc); 2459 CSR_WRITE_4(sc, ALC_MASTER_CFG, 2460 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 2461 return; 2462 } 2463 2464 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2465 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 2466 alc_setlinkspeed(sc); 2467 CSR_WRITE_4(sc, ALC_MASTER_CFG, 2468 CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS); 2469 } 2470 2471 pmcs = 0; 2472 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2473 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 2474 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); 2475 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2476 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 2477 MAC_CFG_BCAST); 2478 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2479 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 2480 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2481 reg |= MAC_CFG_RX_ENB; 2482 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2483 2484 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 2485 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 2486 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 2487 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 2488 /* WOL disabled, PHY power down. */ 2489 alc_phy_down(sc); 2490 CSR_WRITE_4(sc, ALC_MASTER_CFG, 2491 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 2492 2493 } 2494 /* Request PME. */ 2495 pmstat = pci_read_config(sc->alc_dev, 2496 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 2497 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2498 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2499 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2500 pci_write_config(sc->alc_dev, 2501 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2502 } 2503 2504 static void 2505 alc_setwol_816x(struct alc_softc *sc) 2506 { 2507 struct ifnet *ifp; 2508 uint32_t gphy, mac, master, pmcs, reg; 2509 uint16_t pmstat; 2510 2511 ALC_LOCK_ASSERT(sc); 2512 2513 ifp = sc->alc_ifp; 2514 master = CSR_READ_4(sc, ALC_MASTER_CFG); 2515 master &= ~MASTER_CLK_SEL_DIS; 2516 gphy = CSR_READ_4(sc, ALC_GPHY_CFG); 2517 gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | GPHY_CFG_100AB_ENB | 2518 GPHY_CFG_PHY_PLL_ON); 2519 gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET; 2520 if ((sc->alc_flags & ALC_FLAG_PM) == 0) { 2521 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2522 gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW; 2523 mac = CSR_READ_4(sc, ALC_MAC_CFG); 2524 } else { 2525 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2526 gphy |= GPHY_CFG_EXT_RESET; 2527 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 2528 alc_setlinkspeed(sc); 2529 } 2530 pmcs = 0; 2531 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2532 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 2533 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); 2534 mac = CSR_READ_4(sc, ALC_MAC_CFG); 2535 mac &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 2536 MAC_CFG_BCAST); 2537 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2538 mac |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 2539 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2540 mac |= MAC_CFG_RX_ENB; 2541 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10, 2542 ANEG_S3DIG10_SL); 2543 } 2544 2545 /* Enable OSC. */ 2546 reg = CSR_READ_4(sc, ALC_MISC); 2547 reg &= ~MISC_INTNLOSC_OPEN; 2548 CSR_WRITE_4(sc, ALC_MISC, reg); 2549 reg |= MISC_INTNLOSC_OPEN; 2550 CSR_WRITE_4(sc, ALC_MISC, reg); 2551 CSR_WRITE_4(sc, ALC_MASTER_CFG, master); 2552 CSR_WRITE_4(sc, ALC_MAC_CFG, mac); 2553 CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy); 2554 reg = CSR_READ_4(sc, ALC_PDLL_TRNS1); 2555 reg |= PDLL_TRNS1_D3PLLOFF_ENB; 2556 CSR_WRITE_4(sc, ALC_PDLL_TRNS1, reg); 2557 2558 if ((sc->alc_flags & ALC_FLAG_PM) != 0) { 2559 /* Request PME. */ 2560 pmstat = pci_read_config(sc->alc_dev, 2561 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 2562 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2563 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2564 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2565 pci_write_config(sc->alc_dev, 2566 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2567 } 2568 } 2569 2570 #endif 2571 2572 static int 2573 alc_suspend(device_t dev) 2574 { 2575 struct alc_softc *sc = device_get_softc(dev); 2576 struct ifnet *ifp = &sc->arpcom.ac_if; 2577 2578 lwkt_serialize_enter(ifp->if_serializer); 2579 alc_stop(sc); 2580 #if 0 2581 /* XXX: WOL */ 2582 alc_setwol(sc); 2583 #endif 2584 lwkt_serialize_exit(ifp->if_serializer); 2585 2586 return (0); 2587 } 2588 2589 static int 2590 alc_resume(device_t dev) 2591 { 2592 struct alc_softc *sc = device_get_softc(dev); 2593 struct ifnet *ifp = &sc->arpcom.ac_if; 2594 uint16_t pmstat; 2595 2596 lwkt_serialize_enter(ifp->if_serializer); 2597 2598 if ((sc->alc_flags & ALC_FLAG_PM) != 0) { 2599 /* Disable PME and clear PME status. */ 2600 pmstat = pci_read_config(sc->alc_dev, 2601 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 2602 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2603 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2604 pci_write_config(sc->alc_dev, 2605 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2606 } 2607 } 2608 2609 /* Reset PHY. */ 2610 alc_phy_reset(sc); 2611 if (ifp->if_flags & IFF_UP) 2612 alc_init(sc); 2613 2614 lwkt_serialize_exit(ifp->if_serializer); 2615 2616 return (0); 2617 } 2618 2619 static int 2620 alc_encap(struct alc_softc *sc, struct mbuf **m_head) 2621 { 2622 struct alc_txdesc *txd, *txd_last; 2623 struct tx_desc *desc; 2624 struct mbuf *m; 2625 struct ip *ip; 2626 struct tcphdr *tcp; 2627 bus_dma_segment_t txsegs[ALC_MAXTXSEGS]; 2628 bus_dmamap_t map; 2629 uint32_t cflags, hdrlen, ip_off, poff, vtag; 2630 int error, idx, nsegs, prod; 2631 2632 M_ASSERTPKTHDR((*m_head)); 2633 2634 m = *m_head; 2635 ip = NULL; 2636 tcp = NULL; 2637 ip_off = poff = 0; 2638 if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) { 2639 /* 2640 * AR81[3567]x requires offset of TCP/UDP header in its 2641 * Tx descriptor to perform Tx checksum offloading. TSO 2642 * also requires TCP header offset and modification of 2643 * IP/TCP header. This kind of operation takes many CPU 2644 * cycles on FreeBSD so fast host CPU is required to get 2645 * smooth TSO performance. 2646 */ 2647 struct ether_header *eh; 2648 2649 if (M_WRITABLE(m) == 0) { 2650 /* Get a writable copy. */ 2651 m = m_dup(*m_head, M_NOWAIT); 2652 /* Release original mbufs. */ 2653 m_freem(*m_head); 2654 if (m == NULL) { 2655 *m_head = NULL; 2656 return (ENOBUFS); 2657 } 2658 *m_head = m; 2659 } 2660 2661 ip_off = sizeof(struct ether_header); 2662 m = m_pullup(m, ip_off + sizeof(struct ip)); 2663 if (m == NULL) { 2664 *m_head = NULL; 2665 return (ENOBUFS); 2666 } 2667 eh = mtod(m, struct ether_header *); 2668 /* 2669 * Check if hardware VLAN insertion is off. 2670 * Additional check for LLC/SNAP frame? 2671 */ 2672 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2673 ip_off = sizeof(struct ether_vlan_header); 2674 m = m_pullup(m, ip_off); 2675 if (m == NULL) { 2676 *m_head = NULL; 2677 return (ENOBUFS); 2678 } 2679 } 2680 m = m_pullup(m, ip_off + sizeof(struct ip)); 2681 if (m == NULL) { 2682 *m_head = NULL; 2683 return (ENOBUFS); 2684 } 2685 ip = (struct ip *)(mtod(m, char *) + ip_off); 2686 poff = ip_off + (ip->ip_hl << 2); 2687 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2688 m = m_pullup(m, poff + sizeof(struct tcphdr)); 2689 if (m == NULL) { 2690 *m_head = NULL; 2691 return (ENOBUFS); 2692 } 2693 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 2694 m = m_pullup(m, poff + (tcp->th_off << 2)); 2695 if (m == NULL) { 2696 *m_head = NULL; 2697 return (ENOBUFS); 2698 } 2699 /* 2700 * Due to strict adherence of Microsoft NDIS 2701 * Large Send specification, hardware expects 2702 * a pseudo TCP checksum inserted by upper 2703 * stack. Unfortunately the pseudo TCP 2704 * checksum that NDIS refers to does not include 2705 * TCP payload length so driver should recompute 2706 * the pseudo checksum here. Hopefully this 2707 * wouldn't be much burden on modern CPUs. 2708 * 2709 * Reset IP checksum and recompute TCP pseudo 2710 * checksum as NDIS specification said. 2711 */ 2712 ip = (struct ip *)(mtod(m, char *) + ip_off); 2713 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 2714 ip->ip_sum = 0; 2715 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 2716 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2717 } 2718 *m_head = m; 2719 } 2720 2721 prod = sc->alc_cdata.alc_tx_prod; 2722 txd = &sc->alc_cdata.alc_txdesc[prod]; 2723 txd_last = txd; 2724 map = txd->tx_dmamap; 2725 2726 error = bus_dmamap_load_mbuf_defrag( 2727 sc->alc_cdata.alc_tx_tag, map, m_head, 2728 txsegs, ALC_MAXTXSEGS, &nsegs, BUS_DMA_NOWAIT); 2729 if (error) { 2730 m_freem(*m_head); 2731 *m_head = NULL; 2732 return (error); 2733 } 2734 if (nsegs == 0) { 2735 m_freem(*m_head); 2736 *m_head = NULL; 2737 return (EIO); 2738 } 2739 2740 /* Check descriptor overrun. */ 2741 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) { 2742 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map); 2743 return (ENOBUFS); 2744 } 2745 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE); 2746 2747 m = *m_head; 2748 cflags = TD_ETHERNET; 2749 vtag = 0; 2750 desc = NULL; 2751 idx = 0; 2752 /* Configure VLAN hardware tag insertion. */ 2753 if ((m->m_flags & M_VLANTAG) != 0) { 2754 vtag = htons(m->m_pkthdr.ether_vlantag); 2755 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; 2756 cflags |= TD_INS_VLAN_TAG; 2757 } 2758 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2759 /* Request TSO and set MSS. */ 2760 cflags |= TD_TSO | TD_TSO_DESCV1; 2761 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) & 2762 TD_MSS_MASK; 2763 /* Set TCP header offset. */ 2764 cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) & 2765 TD_TCPHDR_OFFSET_MASK; 2766 /* 2767 * AR81[3567]x requires the first buffer should 2768 * only hold IP/TCP header data. Payload should 2769 * be handled in other descriptors. 2770 */ 2771 hdrlen = poff + (tcp->th_off << 2); 2772 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2773 desc->len = htole32(TX_BYTES(hdrlen | vtag)); 2774 desc->flags = htole32(cflags); 2775 desc->addr = htole64(txsegs[0].ds_addr); 2776 sc->alc_cdata.alc_tx_cnt++; 2777 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2778 if (m->m_len - hdrlen > 0) { 2779 /* Handle remaining payload of the first fragment. */ 2780 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2781 desc->len = htole32(TX_BYTES((m->m_len - hdrlen) | 2782 vtag)); 2783 desc->flags = htole32(cflags); 2784 desc->addr = htole64(txsegs[0].ds_addr + hdrlen); 2785 sc->alc_cdata.alc_tx_cnt++; 2786 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2787 } 2788 /* Handle remaining fragments. */ 2789 idx = 1; 2790 } else if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { 2791 /* Configure Tx checksum offload. */ 2792 #ifdef ALC_USE_CUSTOM_CSUM 2793 cflags |= TD_CUSTOM_CSUM; 2794 /* Set checksum start offset. */ 2795 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & 2796 TD_PLOAD_OFFSET_MASK; 2797 /* Set checksum insertion position of TCP/UDP. */ 2798 cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) << 2799 TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK; 2800 #else 2801 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2802 cflags |= TD_IPCSUM; 2803 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2804 cflags |= TD_TCPCSUM; 2805 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2806 cflags |= TD_UDPCSUM; 2807 /* Set TCP/UDP header offset. */ 2808 cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) & 2809 TD_L4HDR_OFFSET_MASK; 2810 #endif 2811 } 2812 2813 for (; idx < nsegs; idx++) { 2814 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2815 desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag); 2816 desc->flags = htole32(cflags); 2817 desc->addr = htole64(txsegs[idx].ds_addr); 2818 sc->alc_cdata.alc_tx_cnt++; 2819 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2820 } 2821 /* Update producer index. */ 2822 sc->alc_cdata.alc_tx_prod = prod; 2823 2824 /* Finally set EOP on the last descriptor. */ 2825 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; 2826 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2827 desc->flags |= htole32(TD_EOP); 2828 2829 /* Swap dmamap of the first and the last. */ 2830 txd = &sc->alc_cdata.alc_txdesc[prod]; 2831 map = txd_last->tx_dmamap; 2832 txd_last->tx_dmamap = txd->tx_dmamap; 2833 txd->tx_dmamap = map; 2834 txd->tx_m = m; 2835 2836 return (0); 2837 } 2838 2839 static void 2840 alc_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 2841 { 2842 struct alc_softc *sc = ifp->if_softc; 2843 struct mbuf *m_head; 2844 int enq; 2845 2846 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 2847 ASSERT_SERIALIZED(ifp->if_serializer); 2848 2849 /* Reclaim transmitted frames. */ 2850 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) 2851 alc_txeof(sc); 2852 2853 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 2854 return; 2855 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2856 ifq_purge(&ifp->if_snd); 2857 return; 2858 } 2859 2860 for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) { 2861 m_head = ifq_dequeue(&ifp->if_snd); 2862 if (m_head == NULL) 2863 break; 2864 /* 2865 * Pack the data into the transmit ring. If we 2866 * don't have room, set the OACTIVE flag and wait 2867 * for the NIC to drain the ring. 2868 */ 2869 if (alc_encap(sc, &m_head)) { 2870 if (m_head == NULL) 2871 break; 2872 ifq_prepend(&ifp->if_snd, m_head); 2873 ifq_set_oactive(&ifp->if_snd); 2874 break; 2875 } 2876 2877 enq++; 2878 /* 2879 * If there's a BPF listener, bounce a copy of this frame 2880 * to him. 2881 */ 2882 ETHER_BPF_MTAP(ifp, m_head); 2883 } 2884 2885 if (enq > 0) { 2886 /* Sync descriptors. */ 2887 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 2888 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 2889 /* Kick. Assume we're using normal Tx priority queue. */ 2890 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2891 CSR_WRITE_2(sc, ALC_MBOX_TD_PRI0_PROD_IDX, 2892 (uint16_t)sc->alc_cdata.alc_tx_prod); 2893 else 2894 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, 2895 (sc->alc_cdata.alc_tx_prod << 2896 MBOX_TD_PROD_LO_IDX_SHIFT) & 2897 MBOX_TD_PROD_LO_IDX_MASK); 2898 /* Set a timeout in case the chip goes out to lunch. */ 2899 sc->alc_watchdog_timer = ALC_TX_TIMEOUT; 2900 } 2901 } 2902 2903 static void 2904 alc_watchdog(struct alc_softc *sc) 2905 { 2906 struct ifnet *ifp = &sc->arpcom.ac_if; 2907 2908 ASSERT_SERIALIZED(ifp->if_serializer); 2909 2910 if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer) 2911 return; 2912 2913 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2914 if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n"); 2915 IFNET_STAT_INC(ifp, oerrors, 1); 2916 alc_init(sc); 2917 return; 2918 } 2919 if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n"); 2920 IFNET_STAT_INC(ifp, oerrors, 1); 2921 alc_init(sc); 2922 if (!ifq_is_empty(&ifp->if_snd)) 2923 if_devstart(ifp); 2924 } 2925 2926 static int 2927 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 2928 { 2929 struct alc_softc *sc; 2930 struct ifreq *ifr; 2931 struct mii_data *mii; 2932 int error, mask; 2933 2934 ASSERT_SERIALIZED(ifp->if_serializer); 2935 2936 sc = ifp->if_softc; 2937 ifr = (struct ifreq *)data; 2938 error = 0; 2939 switch (cmd) { 2940 case SIOCSIFMTU: 2941 if (ifr->ifr_mtu < ETHERMIN || 2942 ifr->ifr_mtu > (sc->alc_ident->max_framelen - 2943 sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) || 2944 ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 && 2945 ifr->ifr_mtu > ETHERMTU)) { 2946 error = EINVAL; 2947 } else if (ifp->if_mtu != ifr->ifr_mtu) { 2948 ifp->if_mtu = ifr->ifr_mtu; 2949 #if 0 2950 /* AR81[3567]x has 13 bits MSS field. */ 2951 if (ifp->if_mtu > ALC_TSO_MTU && 2952 (ifp->if_capenable & IFCAP_TSO4) != 0) { 2953 ifp->if_capenable &= ~IFCAP_TSO4; 2954 ifp->if_hwassist &= ~CSUM_TSO; 2955 } 2956 #endif 2957 } 2958 break; 2959 case SIOCSIFFLAGS: 2960 if ((ifp->if_flags & IFF_UP) != 0) { 2961 if ((ifp->if_flags & IFF_RUNNING) != 0 && 2962 ((ifp->if_flags ^ sc->alc_if_flags) & 2963 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2964 alc_rxfilter(sc); 2965 else if ((ifp->if_flags & IFF_RUNNING) == 0) 2966 alc_init(sc); 2967 } else if ((ifp->if_flags & IFF_RUNNING) != 0) 2968 alc_stop(sc); 2969 sc->alc_if_flags = ifp->if_flags; 2970 break; 2971 case SIOCADDMULTI: 2972 case SIOCDELMULTI: 2973 if ((ifp->if_flags & IFF_RUNNING) != 0) 2974 alc_rxfilter(sc); 2975 break; 2976 case SIOCSIFMEDIA: 2977 case SIOCGIFMEDIA: 2978 mii = device_get_softc(sc->alc_miibus); 2979 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2980 break; 2981 case SIOCSIFCAP: 2982 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2983 if ((mask & IFCAP_TXCSUM) != 0 && 2984 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2985 ifp->if_capenable ^= IFCAP_TXCSUM; 2986 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2987 ifp->if_hwassist |= ALC_CSUM_FEATURES; 2988 else 2989 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 2990 } 2991 #if 0 2992 /* XXX: WOL */ 2993 if ((mask & IFCAP_WOL_MCAST) != 0 && 2994 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 2995 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2996 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2997 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2998 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2999 #endif 3000 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 3001 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 3002 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3003 alc_rxvlan(sc); 3004 } 3005 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 3006 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 3007 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 3008 3009 /* 3010 * VLAN hardware tagging is required to do checksum 3011 * offload or TSO on VLAN interface. Checksum offload 3012 * on VLAN interface also requires hardware checksum 3013 * offload of parent interface. 3014 */ 3015 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 3016 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 3017 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 3018 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 3019 // XXX VLAN_CAPABILITIES(ifp); 3020 break; 3021 default: 3022 error = ether_ioctl(ifp, cmd, data); 3023 break; 3024 } 3025 3026 return (error); 3027 } 3028 3029 static void 3030 alc_mac_config(struct alc_softc *sc) 3031 { 3032 struct mii_data *mii; 3033 uint32_t reg; 3034 3035 mii = device_get_softc(sc->alc_miibus); 3036 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3037 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 3038 MAC_CFG_SPEED_MASK); 3039 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 3040 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 3041 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 3042 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 3043 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 3044 } 3045 /* Reprogram MAC with resolved speed/duplex. */ 3046 switch (IFM_SUBTYPE(mii->mii_media_active)) { 3047 case IFM_10_T: 3048 case IFM_100_TX: 3049 reg |= MAC_CFG_SPEED_10_100; 3050 break; 3051 case IFM_1000_T: 3052 reg |= MAC_CFG_SPEED_1000; 3053 break; 3054 } 3055 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 3056 reg |= MAC_CFG_FULL_DUPLEX; 3057 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 3058 reg |= MAC_CFG_TX_FC; 3059 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 3060 reg |= MAC_CFG_RX_FC; 3061 } 3062 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3063 } 3064 3065 static void 3066 alc_stats_clear(struct alc_softc *sc) 3067 { 3068 struct smb sb, *smb; 3069 uint32_t *reg; 3070 int i; 3071 3072 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 3073 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3074 sc->alc_cdata.alc_smb_map, 3075 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3076 smb = sc->alc_rdata.alc_smb; 3077 /* Update done, clear. */ 3078 smb->updated = 0; 3079 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3080 sc->alc_cdata.alc_smb_map, 3081 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3082 } else { 3083 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 3084 reg++) { 3085 CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 3086 i += sizeof(uint32_t); 3087 } 3088 /* Read Tx statistics. */ 3089 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 3090 reg++) { 3091 CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 3092 i += sizeof(uint32_t); 3093 } 3094 } 3095 } 3096 3097 static void 3098 alc_stats_update(struct alc_softc *sc) 3099 { 3100 struct alc_hw_stats *stat; 3101 struct smb sb, *smb; 3102 struct ifnet *ifp; 3103 uint32_t *reg; 3104 int i; 3105 3106 ifp = sc->alc_ifp; 3107 stat = &sc->alc_stats; 3108 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 3109 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3110 sc->alc_cdata.alc_smb_map, 3111 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3112 smb = sc->alc_rdata.alc_smb; 3113 if (smb->updated == 0) 3114 return; 3115 } else { 3116 smb = &sb; 3117 /* Read Rx statistics. */ 3118 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 3119 reg++) { 3120 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 3121 i += sizeof(uint32_t); 3122 } 3123 /* Read Tx statistics. */ 3124 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 3125 reg++) { 3126 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 3127 i += sizeof(uint32_t); 3128 } 3129 } 3130 3131 /* Rx stats. */ 3132 stat->rx_frames += smb->rx_frames; 3133 stat->rx_bcast_frames += smb->rx_bcast_frames; 3134 stat->rx_mcast_frames += smb->rx_mcast_frames; 3135 stat->rx_pause_frames += smb->rx_pause_frames; 3136 stat->rx_control_frames += smb->rx_control_frames; 3137 stat->rx_crcerrs += smb->rx_crcerrs; 3138 stat->rx_lenerrs += smb->rx_lenerrs; 3139 stat->rx_bytes += smb->rx_bytes; 3140 stat->rx_runts += smb->rx_runts; 3141 stat->rx_fragments += smb->rx_fragments; 3142 stat->rx_pkts_64 += smb->rx_pkts_64; 3143 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 3144 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 3145 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 3146 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 3147 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 3148 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 3149 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 3150 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 3151 stat->rx_rrs_errs += smb->rx_rrs_errs; 3152 stat->rx_alignerrs += smb->rx_alignerrs; 3153 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 3154 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 3155 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 3156 3157 /* Tx stats. */ 3158 stat->tx_frames += smb->tx_frames; 3159 stat->tx_bcast_frames += smb->tx_bcast_frames; 3160 stat->tx_mcast_frames += smb->tx_mcast_frames; 3161 stat->tx_pause_frames += smb->tx_pause_frames; 3162 stat->tx_excess_defer += smb->tx_excess_defer; 3163 stat->tx_control_frames += smb->tx_control_frames; 3164 stat->tx_deferred += smb->tx_deferred; 3165 stat->tx_bytes += smb->tx_bytes; 3166 stat->tx_pkts_64 += smb->tx_pkts_64; 3167 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 3168 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 3169 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 3170 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 3171 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 3172 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 3173 stat->tx_single_colls += smb->tx_single_colls; 3174 stat->tx_multi_colls += smb->tx_multi_colls; 3175 stat->tx_late_colls += smb->tx_late_colls; 3176 stat->tx_excess_colls += smb->tx_excess_colls; 3177 stat->tx_underrun += smb->tx_underrun; 3178 stat->tx_desc_underrun += smb->tx_desc_underrun; 3179 stat->tx_lenerrs += smb->tx_lenerrs; 3180 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 3181 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 3182 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 3183 3184 /* Update counters in ifnet. */ 3185 IFNET_STAT_INC(ifp, opackets, smb->tx_frames); 3186 3187 IFNET_STAT_INC(ifp, collisions, smb->tx_single_colls + 3188 smb->tx_multi_colls * 2 + smb->tx_late_colls + 3189 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT); 3190 3191 IFNET_STAT_INC(ifp, oerrors, 3192 smb->tx_excess_colls + smb->tx_late_colls + smb->tx_underrun); 3193 3194 IFNET_STAT_INC(ifp, ipackets, smb->rx_frames); 3195 3196 IFNET_STAT_INC(ifp, ierrors, smb->rx_crcerrs + smb->rx_lenerrs + 3197 smb->rx_runts + smb->rx_pkts_truncated + 3198 smb->rx_fifo_oflows + smb->rx_rrs_errs + 3199 smb->rx_alignerrs); 3200 3201 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 3202 /* Update done, clear. */ 3203 smb->updated = 0; 3204 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3205 sc->alc_cdata.alc_smb_map, 3206 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3207 } 3208 } 3209 3210 static void 3211 alc_intr(void *arg) 3212 { 3213 struct alc_softc *sc = arg; 3214 struct ifnet *ifp = &sc->arpcom.ac_if; 3215 uint32_t status; 3216 3217 ASSERT_SERIALIZED(ifp->if_serializer); 3218 3219 status = CSR_READ_4(sc, ALC_INTR_STATUS); 3220 if ((status & ALC_INTRS) == 0) 3221 return; 3222 3223 /* Acknowledge interrupts and disable interrupts. */ 3224 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); 3225 3226 if (ifp->if_flags & IFF_RUNNING) { 3227 if (status & INTR_RX_PKT) { 3228 if (alc_rxintr(sc)) { 3229 alc_init(sc); 3230 return; 3231 } 3232 } 3233 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | 3234 INTR_TXQ_TO_RST)) { 3235 if (status & INTR_DMA_RD_TO_RST) { 3236 if_printf(ifp, 3237 "DMA read error! -- resetting\n"); 3238 } 3239 if (status & INTR_DMA_WR_TO_RST) { 3240 if_printf(ifp, 3241 "DMA write error! -- resetting\n"); 3242 } 3243 if (status & INTR_TXQ_TO_RST) 3244 if_printf(ifp, "TxQ reset! -- resetting\n"); 3245 alc_init(sc); 3246 return; 3247 } 3248 if (!ifq_is_empty(&ifp->if_snd)) 3249 if_devstart(ifp); 3250 3251 /* Re-enable interrupts */ 3252 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); 3253 } 3254 } 3255 3256 static void 3257 alc_txeof(struct alc_softc *sc) 3258 { 3259 struct ifnet *ifp; 3260 struct alc_txdesc *txd; 3261 uint32_t cons, prod; 3262 int prog; 3263 3264 ifp = sc->alc_ifp; 3265 3266 if (sc->alc_cdata.alc_tx_cnt == 0) 3267 return; 3268 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 3269 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE); 3270 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 3271 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 3272 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD); 3273 prod = sc->alc_rdata.alc_cmb->cons; 3274 } else { 3275 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 3276 prod = CSR_READ_2(sc, ALC_MBOX_TD_PRI0_CONS_IDX); 3277 else { 3278 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); 3279 /* Assume we're using normal Tx priority queue. */ 3280 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> 3281 MBOX_TD_CONS_LO_IDX_SHIFT; 3282 } 3283 } 3284 cons = sc->alc_cdata.alc_tx_cons; 3285 /* 3286 * Go through our Tx list and free mbufs for those 3287 * frames which have been transmitted. 3288 */ 3289 for (prog = 0; cons != prod; prog++, 3290 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { 3291 if (sc->alc_cdata.alc_tx_cnt <= 0) 3292 break; 3293 prog++; 3294 ifq_clr_oactive(&ifp->if_snd); 3295 sc->alc_cdata.alc_tx_cnt--; 3296 txd = &sc->alc_cdata.alc_txdesc[cons]; 3297 if (txd->tx_m != NULL) { 3298 /* Reclaim transmitted mbufs. */ 3299 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 3300 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3301 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 3302 txd->tx_dmamap); 3303 m_freem(txd->tx_m); 3304 txd->tx_m = NULL; 3305 } 3306 } 3307 3308 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 3309 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 3310 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD); 3311 sc->alc_cdata.alc_tx_cons = cons; 3312 /* 3313 * Unarm watchdog timer only when there is no pending 3314 * frames in Tx queue. 3315 */ 3316 if (sc->alc_cdata.alc_tx_cnt == 0) 3317 sc->alc_watchdog_timer = 0; 3318 } 3319 3320 static int 3321 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, boolean_t wait) 3322 { 3323 struct mbuf *m; 3324 bus_dma_segment_t segs[1]; 3325 bus_dmamap_t map; 3326 int nsegs; 3327 int error; 3328 3329 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 3330 if (m == NULL) 3331 return (ENOBUFS); 3332 m->m_len = m->m_pkthdr.len = MCLBYTES; 3333 #ifdef foo 3334 /* Hardware require 4 bytes align */ 3335 m_adj(m, ETHER_ALIGN); 3336 #endif 3337 3338 error = bus_dmamap_load_mbuf_segment( 3339 sc->alc_cdata.alc_rx_tag, 3340 sc->alc_cdata.alc_rx_sparemap, 3341 m, segs, 1, &nsegs, BUS_DMA_NOWAIT); 3342 if (error) { 3343 m_freem(m); 3344 return (ENOBUFS); 3345 } 3346 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 3347 3348 if (rxd->rx_m != NULL) { 3349 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 3350 BUS_DMASYNC_POSTREAD); 3351 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap); 3352 } 3353 map = rxd->rx_dmamap; 3354 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; 3355 sc->alc_cdata.alc_rx_sparemap = map; 3356 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 3357 BUS_DMASYNC_PREREAD); 3358 rxd->rx_m = m; 3359 rxd->rx_desc->addr = htole64(segs[0].ds_addr); 3360 return (0); 3361 } 3362 3363 static int 3364 alc_rxintr(struct alc_softc *sc) 3365 { 3366 struct ifnet *ifp; 3367 struct rx_rdesc *rrd; 3368 uint32_t nsegs, status; 3369 int rr_cons, prog; 3370 3371 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 3372 sc->alc_cdata.alc_rr_ring_map, 3373 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3374 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 3375 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE); 3376 rr_cons = sc->alc_cdata.alc_rr_cons; 3377 ifp = sc->alc_ifp; 3378 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) { 3379 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; 3380 status = le32toh(rrd->status); 3381 if ((status & RRD_VALID) == 0) 3382 break; 3383 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo)); 3384 if (nsegs == 0) { 3385 /* This should not happen! */ 3386 device_printf(sc->alc_dev, 3387 "unexpected segment count -- resetting\n"); 3388 return (EIO); 3389 } 3390 alc_rxeof(sc, rrd); 3391 /* Clear Rx return status. */ 3392 rrd->status = 0; 3393 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); 3394 sc->alc_cdata.alc_rx_cons += nsegs; 3395 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; 3396 prog += nsegs; 3397 } 3398 3399 if (prog > 0) { 3400 /* Update the consumer index. */ 3401 sc->alc_cdata.alc_rr_cons = rr_cons; 3402 /* Sync Rx return descriptors. */ 3403 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 3404 sc->alc_cdata.alc_rr_ring_map, 3405 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3406 /* 3407 * Sync updated Rx descriptors such that controller see 3408 * modified buffer addresses. 3409 */ 3410 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 3411 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 3412 /* 3413 * Let controller know availability of new Rx buffers. 3414 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors 3415 * it may be possible to update ALC_MBOX_RD0_PROD_IDX 3416 * only when Rx buffer pre-fetching is required. In 3417 * addition we already set ALC_RX_RD_FREE_THRESH to 3418 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However 3419 * it still seems that pre-fetching needs more 3420 * experimentation. 3421 */ 3422 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 3423 CSR_WRITE_2(sc, ALC_MBOX_RD0_PROD_IDX, 3424 (uint16_t)sc->alc_cdata.alc_rx_cons); 3425 else 3426 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 3427 sc->alc_cdata.alc_rx_cons); 3428 } 3429 3430 return 0; 3431 } 3432 3433 /* Receive a frame. */ 3434 static void 3435 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) 3436 { 3437 struct alc_rxdesc *rxd; 3438 struct ifnet *ifp; 3439 struct mbuf *mp, *m; 3440 uint32_t rdinfo, status, vtag; 3441 int count, nsegs, rx_cons; 3442 3443 ifp = sc->alc_ifp; 3444 status = le32toh(rrd->status); 3445 rdinfo = le32toh(rrd->rdinfo); 3446 rx_cons = RRD_RD_IDX(rdinfo); 3447 nsegs = RRD_RD_CNT(rdinfo); 3448 3449 sc->alc_cdata.alc_rxlen = RRD_BYTES(status); 3450 if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) { 3451 /* 3452 * We want to pass the following frames to upper 3453 * layer regardless of error status of Rx return 3454 * ring. 3455 * 3456 * o IP/TCP/UDP checksum is bad. 3457 * o frame length and protocol specific length 3458 * does not match. 3459 * 3460 * Force network stack compute checksum for 3461 * errored frames. 3462 */ 3463 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK; 3464 if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN | 3465 RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0) 3466 return; 3467 } 3468 3469 for (count = 0; count < nsegs; count++, 3470 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { 3471 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; 3472 mp = rxd->rx_m; 3473 /* Add a new receive buffer to the ring. */ 3474 if (alc_newbuf(sc, rxd, FALSE) != 0) { 3475 IFNET_STAT_INC(ifp, iqdrops, 1); 3476 /* Reuse Rx buffers. */ 3477 if (sc->alc_cdata.alc_rxhead != NULL) 3478 m_freem(sc->alc_cdata.alc_rxhead); 3479 break; 3480 } 3481 3482 /* 3483 * Assume we've received a full sized frame. 3484 * Actual size is fixed when we encounter the end of 3485 * multi-segmented frame. 3486 */ 3487 mp->m_len = sc->alc_buf_size; 3488 3489 /* Chain received mbufs. */ 3490 if (sc->alc_cdata.alc_rxhead == NULL) { 3491 sc->alc_cdata.alc_rxhead = mp; 3492 sc->alc_cdata.alc_rxtail = mp; 3493 } else { 3494 /*mp->m_flags &= ~M_PKTHDR;*/ 3495 sc->alc_cdata.alc_rxprev_tail = 3496 sc->alc_cdata.alc_rxtail; 3497 sc->alc_cdata.alc_rxtail->m_next = mp; 3498 sc->alc_cdata.alc_rxtail = mp; 3499 } 3500 3501 if (count == nsegs - 1) { 3502 /* Last desc. for this frame. */ 3503 m = sc->alc_cdata.alc_rxhead; 3504 /*m->m_flags |= M_PKTHDR;*/ 3505 3506 /* 3507 * It seems that L1C/L2C controller has no way 3508 * to tell hardware to strip CRC bytes. 3509 */ 3510 m->m_pkthdr.len = 3511 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; 3512 if (nsegs > 1) { 3513 /* Set last mbuf size. */ 3514 mp->m_len = sc->alc_cdata.alc_rxlen - 3515 (nsegs - 1) * sc->alc_buf_size; 3516 /* Remove the CRC bytes in chained mbufs. */ 3517 if (mp->m_len <= ETHER_CRC_LEN) { 3518 sc->alc_cdata.alc_rxtail = 3519 sc->alc_cdata.alc_rxprev_tail; 3520 sc->alc_cdata.alc_rxtail->m_len -= 3521 (ETHER_CRC_LEN - mp->m_len); 3522 sc->alc_cdata.alc_rxtail->m_next = NULL; 3523 m_freem(mp); 3524 } else { 3525 mp->m_len -= ETHER_CRC_LEN; 3526 } 3527 } else 3528 m->m_len = m->m_pkthdr.len; 3529 m->m_pkthdr.rcvif = ifp; 3530 /* 3531 * Due to hardware bugs, Rx checksum offloading 3532 * was intentionally disabled. 3533 */ 3534 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 3535 (status & RRD_VLAN_TAG) != 0) { 3536 vtag = RRD_VLAN(le32toh(rrd->vtag)); 3537 m->m_pkthdr.ether_vlantag = ntohs(vtag); 3538 m->m_flags |= M_VLANTAG; 3539 } 3540 3541 /* Pass it on. */ 3542 ifp->if_input(ifp, m, NULL, -1); 3543 } 3544 } 3545 /* Reset mbuf chains. */ 3546 ALC_RXCHAIN_RESET(sc); 3547 } 3548 3549 static void 3550 alc_tick(void *arg) 3551 { 3552 struct alc_softc *sc = arg; 3553 struct ifnet *ifp = &sc->arpcom.ac_if; 3554 struct mii_data *mii; 3555 3556 lwkt_serialize_enter(ifp->if_serializer); 3557 3558 mii = device_get_softc(sc->alc_miibus); 3559 mii_tick(mii); 3560 alc_stats_update(sc); 3561 /* 3562 * alc(4) does not rely on Tx completion interrupts to reclaim 3563 * transferred buffers. Instead Tx completion interrupts are 3564 * used to hint for scheduling Tx task. So it's necessary to 3565 * release transmitted buffers by kicking Tx completion 3566 * handler. This limits the maximum reclamation delay to a hz. 3567 */ 3568 alc_txeof(sc); 3569 alc_watchdog(sc); 3570 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 3571 3572 #if 0 3573 /* poll for debugging */ 3574 alc_intr(sc); 3575 #endif 3576 3577 lwkt_serialize_exit(ifp->if_serializer); 3578 } 3579 3580 static void 3581 alc_osc_reset(struct alc_softc *sc) 3582 { 3583 uint32_t reg; 3584 3585 reg = CSR_READ_4(sc, ALC_MISC3); 3586 reg &= ~MISC3_25M_BY_SW; 3587 reg |= MISC3_25M_NOTO_INTNL; 3588 CSR_WRITE_4(sc, ALC_MISC3, reg); 3589 3590 reg = CSR_READ_4(sc, ALC_MISC); 3591 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) { 3592 /* 3593 * Restore over-current protection default value. 3594 * This value could be reset by MAC reset. 3595 */ 3596 reg &= ~MISC_PSW_OCP_MASK; 3597 reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT); 3598 reg &= ~MISC_INTNLOSC_OPEN; 3599 CSR_WRITE_4(sc, ALC_MISC, reg); 3600 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 3601 reg = CSR_READ_4(sc, ALC_MISC2); 3602 reg &= ~MISC2_CALB_START; 3603 CSR_WRITE_4(sc, ALC_MISC2, reg); 3604 CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START); 3605 3606 } else { 3607 reg &= ~MISC_INTNLOSC_OPEN; 3608 /* Disable isolate for revision A devices. */ 3609 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 3610 reg &= ~MISC_ISO_ENB; 3611 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 3612 CSR_WRITE_4(sc, ALC_MISC, reg); 3613 } 3614 3615 DELAY(20); 3616 } 3617 3618 static void 3619 alc_reset(struct alc_softc *sc) 3620 { 3621 uint32_t pmcfg, reg; 3622 int i; 3623 3624 pmcfg = 0; 3625 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3626 /* Reset workaround. */ 3627 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1); 3628 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 3629 (sc->alc_rev & 0x01) != 0) { 3630 /* Disable L0s/L1s before reset. */ 3631 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 3632 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) 3633 != 0) { 3634 pmcfg &= ~(PM_CFG_ASPM_L0S_ENB | 3635 PM_CFG_ASPM_L1_ENB); 3636 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 3637 } 3638 } 3639 } 3640 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 3641 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET; 3642 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3643 3644 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3645 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 3646 DELAY(10); 3647 if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0) 3648 break; 3649 } 3650 if (i == 0) 3651 device_printf(sc->alc_dev, "MAC reset timeout!\n"); 3652 } 3653 3654 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 3655 DELAY(10); 3656 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) 3657 break; 3658 } 3659 if (i == 0) 3660 device_printf(sc->alc_dev, "master reset timeout!\n"); 3661 3662 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 3663 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3664 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC | 3665 IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 3666 break; 3667 DELAY(10); 3668 } 3669 3670 if (i == 0) 3671 device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg); 3672 3673 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3674 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 3675 (sc->alc_rev & 0x01) != 0) { 3676 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 3677 reg |= MASTER_CLK_SEL_DIS; 3678 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3679 /* Restore L0s/L1s config. */ 3680 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) 3681 != 0) 3682 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 3683 } 3684 3685 alc_osc_reset(sc); 3686 reg = CSR_READ_4(sc, ALC_MISC3); 3687 reg &= ~MISC3_25M_BY_SW; 3688 reg |= MISC3_25M_NOTO_INTNL; 3689 CSR_WRITE_4(sc, ALC_MISC3, reg); 3690 reg = CSR_READ_4(sc, ALC_MISC); 3691 reg &= ~MISC_INTNLOSC_OPEN; 3692 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 3693 reg &= ~MISC_ISO_ENB; 3694 CSR_WRITE_4(sc, ALC_MISC, reg); 3695 DELAY(20); 3696 } 3697 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 3698 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 3699 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2) 3700 CSR_WRITE_4(sc, ALC_SERDES_LOCK, 3701 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN | 3702 SERDES_PHY_CLK_SLOWDOWN); 3703 } 3704 3705 static void 3706 alc_init(void *xsc) 3707 { 3708 struct alc_softc *sc = xsc; 3709 struct ifnet *ifp = &sc->arpcom.ac_if; 3710 struct mii_data *mii; 3711 uint8_t eaddr[ETHER_ADDR_LEN]; 3712 bus_addr_t paddr; 3713 uint32_t reg, rxf_hi, rxf_lo; 3714 3715 ASSERT_SERIALIZED(ifp->if_serializer); 3716 3717 mii = device_get_softc(sc->alc_miibus); 3718 3719 /* 3720 * Cancel any pending I/O. 3721 */ 3722 alc_stop(sc); 3723 /* 3724 * Reset the chip to a known state. 3725 */ 3726 alc_reset(sc); 3727 3728 /* Initialize Rx descriptors. */ 3729 if (alc_init_rx_ring(sc) != 0) { 3730 device_printf(sc->alc_dev, "no memory for Rx buffers.\n"); 3731 alc_stop(sc); 3732 return; 3733 } 3734 alc_init_rr_ring(sc); 3735 alc_init_tx_ring(sc); 3736 alc_init_cmb(sc); 3737 alc_init_smb(sc); 3738 3739 /* Enable all clocks. */ 3740 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3741 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB | 3742 CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB | 3743 CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB | 3744 CLK_GATING_RXMAC_ENB); 3745 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) 3746 CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER, 3747 IDLE_DECISN_TIMER_DEFAULT_1MS); 3748 } else 3749 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0); 3750 3751 /* Reprogram the station address. */ 3752 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3753 CSR_WRITE_4(sc, ALC_PAR0, 3754 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 3755 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); 3756 /* 3757 * Clear WOL status and disable all WOL feature as WOL 3758 * would interfere Rx operation under normal environments. 3759 */ 3760 CSR_READ_4(sc, ALC_WOL_CFG); 3761 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 3762 /* Set Tx descriptor base addresses. */ 3763 paddr = sc->alc_rdata.alc_tx_ring_paddr; 3764 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3765 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3766 /* We don't use high priority ring. */ 3767 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); 3768 /* Set Tx descriptor counter. */ 3769 CSR_WRITE_4(sc, ALC_TD_RING_CNT, 3770 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); 3771 /* Set Rx descriptor base addresses. */ 3772 paddr = sc->alc_rdata.alc_rx_ring_paddr; 3773 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3774 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3775 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3776 /* We use one Rx ring. */ 3777 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); 3778 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); 3779 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); 3780 } 3781 /* Set Rx descriptor counter. */ 3782 CSR_WRITE_4(sc, ALC_RD_RING_CNT, 3783 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); 3784 3785 /* 3786 * Let hardware split jumbo frames into alc_max_buf_sized chunks. 3787 * if it do not fit the buffer size. Rx return descriptor holds 3788 * a counter that indicates how many fragments were made by the 3789 * hardware. The buffer size should be multiple of 8 bytes. 3790 * Since hardware has limit on the size of buffer size, always 3791 * use the maximum value. 3792 * For strict-alignment architectures make sure to reduce buffer 3793 * size by 8 bytes to make room for alignment fixup. 3794 */ 3795 sc->alc_buf_size = RX_BUF_SIZE_MAX; 3796 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); 3797 3798 paddr = sc->alc_rdata.alc_rr_ring_paddr; 3799 /* Set Rx return descriptor base addresses. */ 3800 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3801 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3802 /* We use one Rx return ring. */ 3803 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); 3804 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); 3805 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); 3806 } 3807 /* Set Rx return descriptor counter. */ 3808 CSR_WRITE_4(sc, ALC_RRD_RING_CNT, 3809 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); 3810 paddr = sc->alc_rdata.alc_cmb_paddr; 3811 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3812 paddr = sc->alc_rdata.alc_smb_paddr; 3813 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3814 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3815 3816 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 3817 /* Reconfigure SRAM - Vendor magic. */ 3818 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0); 3819 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100); 3820 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000); 3821 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0); 3822 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0); 3823 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0); 3824 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000); 3825 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000); 3826 } 3827 3828 /* Tell hardware that we're ready to load DMA blocks. */ 3829 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); 3830 3831 /* Configure interrupt moderation timer. */ 3832 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; 3833 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) 3834 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; 3835 CSR_WRITE_4(sc, ALC_IM_TIMER, reg); 3836 /* 3837 * We don't want to automatic interrupt clear as task queue 3838 * for the interrupt should know interrupt status. 3839 */ 3840 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 3841 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); 3842 reg |= MASTER_SA_TIMER_ENB; 3843 if (ALC_USECS(sc->alc_int_rx_mod) != 0) 3844 reg |= MASTER_IM_RX_TIMER_ENB; 3845 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 && 3846 ALC_USECS(sc->alc_int_tx_mod) != 0) 3847 reg |= MASTER_IM_TX_TIMER_ENB; 3848 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3849 /* 3850 * Disable interrupt re-trigger timer. We don't want automatic 3851 * re-triggering of un-ACKed interrupts. 3852 */ 3853 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); 3854 /* Configure CMB. */ 3855 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3856 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3); 3857 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, 3858 ALC_USECS(sc->alc_int_tx_mod)); 3859 } else { 3860 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 3861 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); 3862 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); 3863 } else 3864 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); 3865 } 3866 3867 /* 3868 * Hardware can be configured to issue SMB interrupt based 3869 * on programmed interval. Since there is a callout that is 3870 * invoked for every hz in driver we use that instead of 3871 * relying on periodic SMB interrupt. 3872 */ 3873 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); 3874 /* Clear MAC statistics. */ 3875 alc_stats_clear(sc); 3876 3877 /* 3878 * Always use maximum frame size that controller can support. 3879 * Otherwise received frames that has larger frame length 3880 * than alc(4) MTU would be silently dropped in hardware. This 3881 * would make path-MTU discovery hard as sender wouldn't get 3882 * any responses from receiver. alc(4) supports 3883 * multi-fragmented frames on Rx path so it has no issue on 3884 * assembling fragmented frames. Using maximum frame size also 3885 * removes the need to reinitialize hardware when interface 3886 * MTU configuration was changed. 3887 * 3888 * Be conservative in what you do, be liberal in what you 3889 * accept from others - RFC 793. 3890 */ 3891 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen); 3892 3893 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3894 /* Disable header split(?) */ 3895 CSR_WRITE_4(sc, ALC_HDS_CFG, 0); 3896 3897 /* Configure IPG/IFG parameters. */ 3898 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, 3899 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & 3900 IPG_IFG_IPGT_MASK) | 3901 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & 3902 IPG_IFG_MIFG_MASK) | 3903 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & 3904 IPG_IFG_IPG1_MASK) | 3905 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & 3906 IPG_IFG_IPG2_MASK)); 3907 /* Set parameters for half-duplex media. */ 3908 CSR_WRITE_4(sc, ALC_HDPX_CFG, 3909 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 3910 HDPX_CFG_LCOL_MASK) | 3911 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 3912 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 3913 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 3914 HDPX_CFG_ABEBT_MASK) | 3915 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 3916 HDPX_CFG_JAMIPG_MASK)); 3917 } 3918 3919 /* 3920 * Set TSO/checksum offload threshold. For frames that is 3921 * larger than this threshold, hardware wouldn't do 3922 * TSO/checksum offloading. 3923 */ 3924 reg = (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & 3925 TSO_OFFLOAD_THRESH_MASK; 3926 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 3927 reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB; 3928 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg); 3929 /* Configure TxQ. */ 3930 reg = (alc_dma_burst[sc->alc_dma_rd_burst] << 3931 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; 3932 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 3933 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 3934 reg >>= 1; 3935 } 3936 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & 3937 TXQ_CFG_TD_BURST_MASK; 3938 reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB; 3939 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); 3940 3941 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3942 reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT | 3943 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT | 3944 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT | 3945 HQTD_CFG_BURST_ENB); 3946 CSR_WRITE_4(sc, ALC_HQTD_CFG, reg); 3947 reg = WRR_PRI_RESTRICT_NONE; 3948 reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT | 3949 WRR_PRI_DEFAULT << WRR_PRI1_SHIFT | 3950 WRR_PRI_DEFAULT << WRR_PRI2_SHIFT | 3951 WRR_PRI_DEFAULT << WRR_PRI3_SHIFT); 3952 CSR_WRITE_4(sc, ALC_WRR, reg); 3953 } else { 3954 /* Configure Rx free descriptor pre-fetching. */ 3955 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, 3956 ((RX_RD_FREE_THRESH_HI_DEFAULT << 3957 RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) | 3958 ((RX_RD_FREE_THRESH_LO_DEFAULT << 3959 RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK)); 3960 } 3961 3962 /* 3963 * Configure flow control parameters. 3964 * XON : 80% of Rx FIFO 3965 * XOFF : 30% of Rx FIFO 3966 */ 3967 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3968 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3969 reg &= SRAM_RX_FIFO_LEN_MASK; 3970 reg *= 8; 3971 if (reg > 8 * 1024) 3972 reg -= RX_FIFO_PAUSE_816X_RSVD; 3973 else 3974 reg -= RX_BUF_SIZE_MAX; 3975 reg /= 8; 3976 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3977 ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3978 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3979 (((RX_FIFO_PAUSE_816X_RSVD / 8) << 3980 RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 3981 RX_FIFO_PAUSE_THRESH_HI_MASK)); 3982 } else if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 3983 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) { 3984 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3985 rxf_hi = (reg * 8) / 10; 3986 rxf_lo = (reg * 3) / 10; 3987 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3988 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3989 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3990 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 3991 RX_FIFO_PAUSE_THRESH_HI_MASK)); 3992 } 3993 3994 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3995 /* Disable RSS until I understand L1C/L2C's RSS logic. */ 3996 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); 3997 CSR_WRITE_4(sc, ALC_RSS_CPU, 0); 3998 } 3999 4000 /* Configure RxQ. */ 4001 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 4002 RXQ_CFG_RD_BURST_MASK; 4003 reg |= RXQ_CFG_RSS_MODE_DIS; 4004 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 4005 reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT << 4006 RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) & 4007 RXQ_CFG_816X_IDT_TBL_SIZE_MASK; 4008 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 4009 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; 4010 } else { 4011 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 && 4012 sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2) 4013 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; 4014 } 4015 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 4016 4017 /* Configure DMA parameters. */ 4018 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; 4019 reg |= sc->alc_rcb; 4020 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 4021 reg |= DMA_CFG_CMB_ENB; 4022 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) 4023 reg |= DMA_CFG_SMB_ENB; 4024 else 4025 reg |= DMA_CFG_SMB_DIS; 4026 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << 4027 DMA_CFG_RD_BURST_SHIFT; 4028 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << 4029 DMA_CFG_WR_BURST_SHIFT; 4030 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 4031 DMA_CFG_RD_DELAY_CNT_MASK; 4032 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 4033 DMA_CFG_WR_DELAY_CNT_MASK; 4034 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 4035 switch (AR816X_REV(sc->alc_rev)) { 4036 case AR816X_REV_A0: 4037 case AR816X_REV_A1: 4038 reg |= DMA_CFG_RD_CHNL_SEL_2; 4039 break; 4040 case AR816X_REV_B0: 4041 /* FALLTHROUGH */ 4042 default: 4043 reg |= DMA_CFG_RD_CHNL_SEL_4; 4044 break; 4045 } 4046 } 4047 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 4048 4049 /* 4050 * Configure Tx/Rx MACs. 4051 * - Auto-padding for short frames. 4052 * - Enable CRC generation. 4053 * Actual reconfiguration of MAC for resolved speed/duplex 4054 * is followed after detection of link establishment. 4055 * AR813x/AR815x always does checksum computation regardless 4056 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to 4057 * have bug in protocol field in Rx return structure so 4058 * these controllers can't handle fragmented frames. Disable 4059 * Rx checksum offloading until there is a newer controller 4060 * that has sane implementation. 4061 */ 4062 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 4063 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 4064 MAC_CFG_PREAMBLE_MASK); 4065 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 4066 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 4067 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 4068 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 4069 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 4070 } 4071 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) 4072 reg |= MAC_CFG_SPEED_10_100; 4073 else 4074 reg |= MAC_CFG_SPEED_1000; 4075 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 4076 4077 /* Set up the receive filter. */ 4078 alc_rxfilter(sc); 4079 alc_rxvlan(sc); 4080 4081 /* Acknowledge all pending interrupts and clear it. */ 4082 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); 4083 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 4084 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); 4085 4086 ifp->if_flags |= IFF_RUNNING; 4087 ifq_clr_oactive(&ifp->if_snd); 4088 4089 sc->alc_flags &= ~ALC_FLAG_LINK; 4090 /* Switch to the current media. */ 4091 /*mii_mediachg(mii);*/ 4092 alc_mediachange_locked(sc); 4093 4094 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 4095 4096 } 4097 4098 static void 4099 alc_stop(struct alc_softc *sc) 4100 { 4101 struct ifnet *ifp = &sc->arpcom.ac_if; 4102 struct alc_txdesc *txd; 4103 struct alc_rxdesc *rxd; 4104 uint32_t reg; 4105 int i; 4106 4107 ASSERT_SERIALIZED(ifp->if_serializer); 4108 4109 /* 4110 * Mark the interface down and cancel the watchdog timer. 4111 */ 4112 ifp->if_flags &= ~IFF_RUNNING; 4113 ifq_clr_oactive(&ifp->if_snd); 4114 sc->alc_flags &= ~ALC_FLAG_LINK; 4115 callout_stop(&sc->alc_tick_ch); 4116 sc->alc_watchdog_timer = 0; 4117 alc_stats_update(sc); 4118 /* Disable interrupts. */ 4119 CSR_WRITE_4(sc, ALC_INTR_MASK, 0); 4120 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 4121 /* Disable DMA. */ 4122 reg = CSR_READ_4(sc, ALC_DMA_CFG); 4123 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); 4124 reg |= DMA_CFG_SMB_DIS; 4125 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 4126 DELAY(1000); 4127 /* Stop Rx/Tx MACs. */ 4128 alc_stop_mac(sc); 4129 /* Disable interrupts which might be touched in taskq handler. */ 4130 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 4131 4132 /* Disable L0s/L1s */ 4133 alc_aspm(sc, 0, IFM_UNKNOWN); 4134 /* Reclaim Rx buffers that have been processed. */ 4135 if (sc->alc_cdata.alc_rxhead != NULL) 4136 m_freem(sc->alc_cdata.alc_rxhead); 4137 ALC_RXCHAIN_RESET(sc); 4138 /* 4139 * Free Tx/Rx mbufs still in the queues. 4140 */ 4141 for (i = 0; i < ALC_RX_RING_CNT; i++) { 4142 rxd = &sc->alc_cdata.alc_rxdesc[i]; 4143 if (rxd->rx_m != NULL) { 4144 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, 4145 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4146 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, 4147 rxd->rx_dmamap); 4148 m_freem(rxd->rx_m); 4149 rxd->rx_m = NULL; 4150 } 4151 } 4152 for (i = 0; i < ALC_TX_RING_CNT; i++) { 4153 txd = &sc->alc_cdata.alc_txdesc[i]; 4154 if (txd->tx_m != NULL) { 4155 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 4156 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 4157 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 4158 txd->tx_dmamap); 4159 m_freem(txd->tx_m); 4160 txd->tx_m = NULL; 4161 } 4162 } 4163 } 4164 4165 static void 4166 alc_stop_mac(struct alc_softc *sc) 4167 { 4168 uint32_t reg; 4169 int i; 4170 4171 alc_stop_queue(sc); 4172 /* Disable Rx/Tx MAC. */ 4173 reg = CSR_READ_4(sc, ALC_MAC_CFG); 4174 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 4175 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 4176 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 4177 } 4178 for (i = ALC_TIMEOUT; i > 0; i--) { 4179 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 4180 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0) 4181 break; 4182 DELAY(10); 4183 } 4184 if (i == 0) 4185 device_printf(sc->alc_dev, 4186 "could not disable Rx/Tx MAC(0x%08x)!\n", reg); 4187 } 4188 4189 static void 4190 alc_start_queue(struct alc_softc *sc) 4191 { 4192 uint32_t qcfg[] = { 4193 0, 4194 RXQ_CFG_QUEUE0_ENB, 4195 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, 4196 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, 4197 RXQ_CFG_ENB 4198 }; 4199 uint32_t cfg; 4200 4201 /* Enable RxQ. */ 4202 cfg = CSR_READ_4(sc, ALC_RXQ_CFG); 4203 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 4204 cfg &= ~RXQ_CFG_ENB; 4205 cfg |= qcfg[1]; 4206 } else 4207 cfg |= RXQ_CFG_QUEUE0_ENB; 4208 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); 4209 /* Enable TxQ. */ 4210 cfg = CSR_READ_4(sc, ALC_TXQ_CFG); 4211 cfg |= TXQ_CFG_ENB; 4212 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); 4213 } 4214 4215 static void 4216 alc_stop_queue(struct alc_softc *sc) 4217 { 4218 uint32_t reg; 4219 int i; 4220 4221 /* Disable RxQ. */ 4222 reg = CSR_READ_4(sc, ALC_RXQ_CFG); 4223 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 4224 if ((reg & RXQ_CFG_ENB) != 0) { 4225 reg &= ~RXQ_CFG_ENB; 4226 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 4227 } 4228 } else { 4229 if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) { 4230 reg &= ~RXQ_CFG_QUEUE0_ENB; 4231 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 4232 } 4233 } 4234 /* Disable TxQ. */ 4235 reg = CSR_READ_4(sc, ALC_TXQ_CFG); 4236 if ((reg & TXQ_CFG_ENB) != 0) { 4237 reg &= ~TXQ_CFG_ENB; 4238 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); 4239 } 4240 DELAY(40); 4241 for (i = ALC_TIMEOUT; i > 0; i--) { 4242 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 4243 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 4244 break; 4245 DELAY(10); 4246 } 4247 if (i == 0) 4248 device_printf(sc->alc_dev, 4249 "could not disable RxQ/TxQ (0x%08x)!\n", reg); 4250 } 4251 4252 static void 4253 alc_init_tx_ring(struct alc_softc *sc) 4254 { 4255 struct alc_ring_data *rd; 4256 struct alc_txdesc *txd; 4257 int i; 4258 4259 sc->alc_cdata.alc_tx_prod = 0; 4260 sc->alc_cdata.alc_tx_cons = 0; 4261 sc->alc_cdata.alc_tx_cnt = 0; 4262 4263 rd = &sc->alc_rdata; 4264 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ); 4265 for (i = 0; i < ALC_TX_RING_CNT; i++) { 4266 txd = &sc->alc_cdata.alc_txdesc[i]; 4267 txd->tx_m = NULL; 4268 } 4269 4270 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 4271 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 4272 } 4273 4274 static int 4275 alc_init_rx_ring(struct alc_softc *sc) 4276 { 4277 struct alc_ring_data *rd; 4278 struct alc_rxdesc *rxd; 4279 int i; 4280 4281 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; 4282 rd = &sc->alc_rdata; 4283 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ); 4284 for (i = 0; i < ALC_RX_RING_CNT; i++) { 4285 rxd = &sc->alc_cdata.alc_rxdesc[i]; 4286 rxd->rx_m = NULL; 4287 rxd->rx_desc = &rd->alc_rx_ring[i]; 4288 if (alc_newbuf(sc, rxd, TRUE) != 0) 4289 return (ENOBUFS); 4290 } 4291 4292 /* 4293 * Since controller does not update Rx descriptors, driver 4294 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE 4295 * is enough to ensure coherence. 4296 */ 4297 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 4298 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 4299 /* Let controller know availability of new Rx buffers. */ 4300 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); 4301 4302 return (0); 4303 } 4304 4305 static void 4306 alc_init_rr_ring(struct alc_softc *sc) 4307 { 4308 struct alc_ring_data *rd; 4309 4310 sc->alc_cdata.alc_rr_cons = 0; 4311 ALC_RXCHAIN_RESET(sc); 4312 4313 rd = &sc->alc_rdata; 4314 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ); 4315 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 4316 sc->alc_cdata.alc_rr_ring_map, 4317 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4318 } 4319 4320 static void 4321 alc_init_cmb(struct alc_softc *sc) 4322 { 4323 struct alc_ring_data *rd; 4324 4325 rd = &sc->alc_rdata; 4326 bzero(rd->alc_cmb, ALC_CMB_SZ); 4327 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map, 4328 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4329 } 4330 4331 static void 4332 alc_init_smb(struct alc_softc *sc) 4333 { 4334 struct alc_ring_data *rd; 4335 4336 rd = &sc->alc_rdata; 4337 bzero(rd->alc_smb, ALC_SMB_SZ); 4338 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, 4339 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4340 } 4341 4342 static void 4343 alc_rxvlan(struct alc_softc *sc) 4344 { 4345 struct ifnet *ifp; 4346 uint32_t reg; 4347 4348 ifp = sc->alc_ifp; 4349 reg = CSR_READ_4(sc, ALC_MAC_CFG); 4350 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 4351 reg |= MAC_CFG_VLAN_TAG_STRIP; 4352 else 4353 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 4354 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 4355 } 4356 4357 static void 4358 alc_rxfilter(struct alc_softc *sc) 4359 { 4360 struct ifnet *ifp; 4361 struct ifmultiaddr *ifma; 4362 uint32_t crc; 4363 uint32_t mchash[2]; 4364 uint32_t rxcfg; 4365 4366 ifp = sc->alc_ifp; 4367 4368 bzero(mchash, sizeof(mchash)); 4369 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); 4370 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 4371 if ((ifp->if_flags & IFF_BROADCAST) != 0) 4372 rxcfg |= MAC_CFG_BCAST; 4373 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 4374 if ((ifp->if_flags & IFF_PROMISC) != 0) 4375 rxcfg |= MAC_CFG_PROMISC; 4376 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 4377 rxcfg |= MAC_CFG_ALLMULTI; 4378 mchash[0] = 0xFFFFFFFF; 4379 mchash[1] = 0xFFFFFFFF; 4380 goto chipit; 4381 } 4382 4383 #if 0 4384 /* XXX */ 4385 if_maddr_rlock(ifp); 4386 #endif 4387 TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) { 4388 if (ifma->ifma_addr->sa_family != AF_LINK) 4389 continue; 4390 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 4391 ifma->ifma_addr), ETHER_ADDR_LEN); 4392 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 4393 } 4394 #if 0 4395 /* XXX */ 4396 if_maddr_runlock(ifp); 4397 #endif 4398 4399 chipit: 4400 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); 4401 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); 4402 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); 4403 } 4404 4405 static int 4406 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS) 4407 { 4408 return (sysctl_int_range(oidp, arg1, arg2, req, 4409 ALC_PROC_MIN, ALC_PROC_MAX)); 4410 } 4411 4412 static int 4413 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS) 4414 { 4415 4416 return (sysctl_int_range(oidp, arg1, arg2, req, 4417 ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX)); 4418 } 4419