1 /*- 2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/alc/if_alc.c,v 1.6 2009/09/29 23:03:16 yongari Exp $ 28 */ 29 30 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */ 31 32 #include <sys/param.h> 33 #include <sys/bitops.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/rman.h> 41 #include <sys/serialize.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <sys/sysctl.h> 45 #include <sys/in_cksum.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/bpf.h> 50 #include <net/if_arp.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 #include <net/ifq_var.h> 54 #include <net/vlan/if_vlan_var.h> 55 #include <net/vlan/if_vlan_ether.h> 56 57 #include <netinet/ip.h> 58 #include <netinet/tcp.h> 59 60 #include <dev/netif/mii_layer/mii.h> 61 #include <dev/netif/mii_layer/miivar.h> 62 63 #include <bus/pci/pcireg.h> 64 #include <bus/pci/pcivar.h> 65 #include "pcidevs.h" 66 67 #include <dev/netif/alc/if_alcreg.h> 68 #include <dev/netif/alc/if_alcvar.h> 69 70 /* "device miibus" required. See GENERIC if you get errors here. */ 71 #include "miibus_if.h" 72 73 #undef ALC_USE_CUSTOM_CSUM 74 #ifdef ALC_USE_CUSTOM_CSUM 75 #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 76 #else 77 #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 78 #endif 79 80 #define ALC_LOCK(sc) 81 #define ALC_UNLOCK(sc) 82 #define ALC_LOCK_ASSERT(sc) 83 84 #define PCIER_LINK_CAP PCIER_LINKCAP 85 #define PCIEM_LINK_CAP_ASPM PCIEM_LNKCAP_ASPM_MASK 86 #define PCIER_LINK_CTL PCIER_LINKCTRL 87 #define PCIEM_LINK_CTL_RCB PCIEM_LNKCTL_RCB 88 #define PCIEM_LINK_CTL_ASPMC PCIEM_LNKCTL_ASPM_MASK 89 #define PCIEM_LINK_CTL_ASPMC_L0S PCIEM_LNKCTL_ASPM_L0S 90 #define PCIEM_LINK_CTL_ASPMC_L1 PCIEM_LNKCTL_ASPM_L1 91 #define PCIEM_LINK_CTL_EXTENDED_SYNC PCIEM_LNKCTL_EXTENDED_SYNC 92 #define PCIER_DEVICE_CTL PCIER_DEVCTRL 93 #define PCIEM_CTL_MAX_READ_REQUEST PCIEM_DEVCTL_MAX_READRQ_MASK 94 #define PCIEM_CTL_MAX_PAYLOAD PCIEM_DEVCTL_MAX_PAYLOAD_MASK 95 96 /* Tunables. */ 97 static int alc_msi_enable = 1; 98 TUNABLE_INT("hw.alc.msi.enable", &alc_msi_enable); 99 100 /* 101 * Devices supported by this driver. 102 */ 103 104 static struct alc_ident alc_ident_table[] = { 105 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024, 106 "Atheros AR8131 PCIe Gigabit Ethernet" }, 107 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024, 108 "Atheros AR8132 PCIe Fast Ethernet" }, 109 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024, 110 "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" }, 111 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024, 112 "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" }, 113 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024, 114 "Atheros AR8152 v1.1 PCIe Fast Ethernet" }, 115 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024, 116 "Atheros AR8152 v2.0 PCIe Fast Ethernet" }, 117 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8161, 9 * 1024, 118 "Atheros AR8161 PCIe Gigabit Ethernet" }, 119 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8162, 9 * 1024, 120 "Atheros AR8162 PCIe Fast Ethernet" }, 121 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8171, 9 * 1024, 122 "Atheros AR8171 PCIe Gigabit Ethernet" }, 123 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8172, 9 * 1024, 124 "Atheros AR8172 PCIe Fast Ethernet" }, 125 { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2200, 9 * 1024, 126 "Killer E2200 Gigabit Ethernet" }, 127 { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2400, 9 * 1024, 128 "Killer E2400 Gigabit Ethernet" }, 129 { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2500, 9 * 1024, 130 "Killer E2500 Gigabit Ethernet" }, 131 { 0, 0, 0, NULL} 132 }; 133 134 static int alc_attach(device_t); 135 static int alc_probe(device_t); 136 static int alc_detach(device_t); 137 static int alc_shutdown(device_t); 138 static int alc_suspend(device_t); 139 static int alc_resume(device_t); 140 static int alc_miibus_readreg(device_t, int, int); 141 static void alc_miibus_statchg(device_t); 142 static int alc_miibus_writereg(device_t, int, int, int); 143 static uint32_t alc_miidbg_readreg(struct alc_softc *, int); 144 static uint32_t alc_miidbg_writereg(struct alc_softc *, int, int); 145 static uint32_t alc_miiext_readreg(struct alc_softc *, int, int); 146 static uint32_t alc_miiext_writereg(struct alc_softc *, int, int, int); 147 static void alc_init(void *); 148 static void alc_start(struct ifnet *, struct ifaltq_subque *); 149 static void alc_watchdog(struct alc_softc *); 150 static int alc_mediachange(struct ifnet *); 151 static int alc_mediachange_locked(struct alc_softc *); 152 static void alc_mediastatus(struct ifnet *, struct ifmediareq *); 153 static int alc_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 154 155 static void alc_aspm(struct alc_softc *, int, int); 156 static void alc_aspm_813x(struct alc_softc *, int); 157 static void alc_aspm_816x(struct alc_softc *, int); 158 #ifdef foo 159 static int alc_check_boundary(struct alc_softc *); 160 #endif 161 static void alc_config_msi(struct alc_softc *); 162 static void alc_disable_l0s_l1(struct alc_softc *); 163 static int alc_dma_alloc(struct alc_softc *); 164 static void alc_dma_free(struct alc_softc *); 165 static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int); 166 static void alc_dsp_fixup(struct alc_softc *, int); 167 static int alc_encap(struct alc_softc *, struct mbuf **); 168 static struct alc_ident *alc_find_ident(device_t); 169 static void alc_get_macaddr(struct alc_softc *); 170 static void alc_get_macaddr_813x(struct alc_softc *); 171 static void alc_get_macaddr_816x(struct alc_softc *); 172 static void alc_get_macaddr_par(struct alc_softc *); 173 static void alc_init_cmb(struct alc_softc *); 174 static void alc_init_rr_ring(struct alc_softc *); 175 static int alc_init_rx_ring(struct alc_softc *); 176 static void alc_init_smb(struct alc_softc *); 177 static void alc_init_tx_ring(struct alc_softc *); 178 static void alc_intr(void *); 179 static void alc_mac_config(struct alc_softc *); 180 static uint32_t alc_mii_readreg_813x(struct alc_softc *, int, int); 181 static uint32_t alc_mii_readreg_816x(struct alc_softc *, int, int); 182 static uint32_t alc_mii_writereg_813x(struct alc_softc *, int, int, int); 183 static uint32_t alc_mii_writereg_816x(struct alc_softc *, int, int, int); 184 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, boolean_t); 185 static void alc_osc_reset(struct alc_softc *); 186 static void alc_phy_down(struct alc_softc *); 187 static void alc_phy_reset(struct alc_softc *); 188 static void alc_phy_reset_813x(struct alc_softc *); 189 static void alc_phy_reset_816x(struct alc_softc *); 190 static void alc_reset(struct alc_softc *); 191 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *); 192 static int alc_rxintr(struct alc_softc *); 193 static void alc_rxfilter(struct alc_softc *); 194 static void alc_rxvlan(struct alc_softc *); 195 #if 0 196 static void alc_setlinkspeed(struct alc_softc *); 197 /* XXX: WOL */ 198 static void alc_setwol(struct alc_softc *); 199 static void alc_setwol_813x(struct alc_softc *); 200 static void alc_setwol_816x(struct alc_softc *); 201 #endif 202 static void alc_start_queue(struct alc_softc *); 203 static void alc_stats_clear(struct alc_softc *); 204 static void alc_stats_update(struct alc_softc *); 205 static void alc_stop(struct alc_softc *); 206 static void alc_stop_mac(struct alc_softc *); 207 static void alc_stop_queue(struct alc_softc *); 208 static void alc_sysctl_node(struct alc_softc *); 209 static void alc_tick(void *); 210 static void alc_txeof(struct alc_softc *); 211 static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS); 212 static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS); 213 214 static device_method_t alc_methods[] = { 215 /* Device interface. */ 216 DEVMETHOD(device_probe, alc_probe), 217 DEVMETHOD(device_attach, alc_attach), 218 DEVMETHOD(device_detach, alc_detach), 219 DEVMETHOD(device_shutdown, alc_shutdown), 220 DEVMETHOD(device_suspend, alc_suspend), 221 DEVMETHOD(device_resume, alc_resume), 222 223 /* MII interface. */ 224 DEVMETHOD(miibus_readreg, alc_miibus_readreg), 225 DEVMETHOD(miibus_writereg, alc_miibus_writereg), 226 DEVMETHOD(miibus_statchg, alc_miibus_statchg), 227 228 { NULL, NULL } 229 }; 230 231 static DEFINE_CLASS_0(alc, alc_driver, alc_methods, sizeof(struct alc_softc)); 232 static devclass_t alc_devclass; 233 234 DECLARE_DUMMY_MODULE(if_alc); 235 DRIVER_MODULE(if_alc, pci, alc_driver, alc_devclass, NULL, NULL); 236 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, NULL, NULL); 237 238 static const uint32_t alc_dma_burst[] = 239 { 128, 256, 512, 1024, 2048, 4096, 0, 0 }; 240 241 static int 242 alc_miibus_readreg(device_t dev, int phy, int reg) 243 { 244 struct alc_softc *sc; 245 int v; 246 247 sc = device_get_softc(dev); 248 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 249 v = alc_mii_readreg_816x(sc, phy, reg); 250 else 251 v = alc_mii_readreg_813x(sc, phy, reg); 252 return (v); 253 } 254 255 static uint32_t 256 alc_mii_readreg_813x(struct alc_softc *sc, int phy, int reg) 257 { 258 uint32_t v; 259 int i; 260 261 /* 262 * For AR8132 fast ethernet controller, do not report 1000baseT 263 * capability to mii(4). Even though AR8132 uses the same 264 * model/revision number of F1 gigabit PHY, the PHY has no 265 * ability to establish 1000baseT link. 266 */ 267 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && 268 reg == MII_EXTSR) 269 return (0); 270 271 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 272 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 273 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 274 DELAY(5); 275 v = CSR_READ_4(sc, ALC_MDIO); 276 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 277 break; 278 } 279 280 if (i == 0) { 281 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); 282 return (0); 283 } 284 285 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 286 } 287 288 static uint32_t 289 alc_mii_readreg_816x(struct alc_softc *sc, int phy, int reg) 290 { 291 uint32_t clk, v; 292 int i; 293 294 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 295 clk = MDIO_CLK_25_128; 296 else 297 clk = MDIO_CLK_25_4; 298 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 299 MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg)); 300 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 301 DELAY(5); 302 v = CSR_READ_4(sc, ALC_MDIO); 303 if ((v & MDIO_OP_BUSY) == 0) 304 break; 305 } 306 307 if (i == 0) { 308 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); 309 return (0); 310 } 311 312 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 313 } 314 315 static int 316 alc_miibus_writereg(device_t dev, int phy, int reg, int val) 317 { 318 struct alc_softc *sc; 319 int v; 320 321 sc = device_get_softc(dev); 322 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 323 v = alc_mii_writereg_816x(sc, phy, reg, val); 324 else 325 v = alc_mii_writereg_813x(sc, phy, reg, val); 326 return (v); 327 } 328 329 static uint32_t 330 alc_mii_writereg_813x(struct alc_softc *sc, int phy, int reg, int val) 331 { 332 uint32_t v; 333 int i; 334 335 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 336 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 337 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 338 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 339 DELAY(5); 340 v = CSR_READ_4(sc, ALC_MDIO); 341 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 342 break; 343 } 344 345 if (i == 0) 346 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); 347 348 return (0); 349 } 350 351 static uint32_t 352 alc_mii_writereg_816x(struct alc_softc *sc, int phy, int reg, int val) 353 { 354 uint32_t clk, v; 355 int i; 356 357 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 358 clk = MDIO_CLK_25_128; 359 else 360 clk = MDIO_CLK_25_4; 361 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 362 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) | 363 MDIO_SUP_PREAMBLE | clk); 364 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 365 DELAY(5); 366 v = CSR_READ_4(sc, ALC_MDIO); 367 if ((v & MDIO_OP_BUSY) == 0) 368 break; 369 } 370 371 if (i == 0) 372 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); 373 374 return (0); 375 } 376 377 static void 378 alc_miibus_statchg(device_t dev) 379 { 380 struct alc_softc *sc; 381 struct mii_data *mii; 382 struct ifnet *ifp; 383 uint32_t reg; 384 385 sc = device_get_softc(dev); 386 387 mii = device_get_softc(sc->alc_miibus); 388 ifp = sc->alc_ifp; 389 if (mii == NULL || ifp == NULL || 390 (ifp->if_flags & IFF_RUNNING) == 0) 391 return; 392 393 sc->alc_flags &= ~ALC_FLAG_LINK; 394 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 395 (IFM_ACTIVE | IFM_AVALID)) { 396 switch (IFM_SUBTYPE(mii->mii_media_active)) { 397 case IFM_10_T: 398 case IFM_100_TX: 399 sc->alc_flags |= ALC_FLAG_LINK; 400 break; 401 case IFM_1000_T: 402 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 403 sc->alc_flags |= ALC_FLAG_LINK; 404 break; 405 default: 406 break; 407 } 408 } 409 /* Stop Rx/Tx MACs. */ 410 alc_stop_mac(sc); 411 412 /* Program MACs with resolved speed/duplex/flow-control. */ 413 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 414 alc_start_queue(sc); 415 alc_mac_config(sc); 416 /* Re-enable Tx/Rx MACs. */ 417 reg = CSR_READ_4(sc, ALC_MAC_CFG); 418 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 419 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 420 } 421 alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active)); 422 alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active)); 423 } 424 425 static uint32_t 426 alc_miidbg_readreg(struct alc_softc *sc, int reg) 427 { 428 429 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 430 reg); 431 return (alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 432 ALC_MII_DBG_DATA)); 433 } 434 435 static uint32_t 436 alc_miidbg_writereg(struct alc_softc *sc, int reg, int val) 437 { 438 439 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 440 reg); 441 return (alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 442 ALC_MII_DBG_DATA, val)); 443 } 444 445 static uint32_t 446 alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg) 447 { 448 uint32_t clk, v; 449 int i; 450 451 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 452 EXT_MDIO_DEVADDR(devaddr)); 453 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 454 clk = MDIO_CLK_25_128; 455 else 456 clk = MDIO_CLK_25_4; 457 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 458 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 459 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 460 DELAY(5); 461 v = CSR_READ_4(sc, ALC_MDIO); 462 if ((v & MDIO_OP_BUSY) == 0) 463 break; 464 } 465 466 if (i == 0) { 467 device_printf(sc->alc_dev, "phy ext read timeout : %d, %d\n", 468 devaddr, reg); 469 return (0); 470 } 471 472 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 473 } 474 475 static uint32_t 476 alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, int val) 477 { 478 uint32_t clk, v; 479 int i; 480 481 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 482 EXT_MDIO_DEVADDR(devaddr)); 483 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 484 clk = MDIO_CLK_25_128; 485 else 486 clk = MDIO_CLK_25_4; 487 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 488 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | 489 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 490 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 491 DELAY(5); 492 v = CSR_READ_4(sc, ALC_MDIO); 493 if ((v & MDIO_OP_BUSY) == 0) 494 break; 495 } 496 497 if (i == 0) 498 device_printf(sc->alc_dev, "phy ext write timeout : %d, %d\n", 499 devaddr, reg); 500 501 return (0); 502 } 503 504 static void 505 alc_dsp_fixup(struct alc_softc *sc, int media) 506 { 507 uint16_t agc, len, val; 508 509 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 510 return; 511 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0) 512 return; 513 514 /* 515 * Vendor PHY magic. 516 * 1000BT/AZ, wrong cable length 517 */ 518 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 519 len = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6); 520 len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) & 521 EXT_CLDCTL6_CAB_LEN_MASK; 522 agc = alc_miidbg_readreg(sc, MII_DBG_AGC); 523 agc = (agc >> DBG_AGC_2_VGA_SHIFT) & DBG_AGC_2_VGA_MASK; 524 if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G && 525 agc > DBG_AGC_LONG1G_LIMT) || 526 (media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT && 527 agc > DBG_AGC_LONG1G_LIMT)) { 528 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 529 DBG_AZ_ANADECT_LONG); 530 val = alc_miiext_readreg(sc, MII_EXT_ANEG, 531 MII_EXT_ANEG_AFE); 532 val |= ANEG_AFEE_10BT_100M_TH; 533 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, 534 val); 535 } else { 536 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 537 DBG_AZ_ANADECT_DEFAULT); 538 val = alc_miiext_readreg(sc, MII_EXT_ANEG, 539 MII_EXT_ANEG_AFE); 540 val &= ~ANEG_AFEE_10BT_100M_TH; 541 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, 542 val); 543 } 544 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 545 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 546 if (media == IFM_1000_T) { 547 /* 548 * Giga link threshold, raise the tolerance of 549 * noise 50%. 550 */ 551 val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB); 552 val &= ~DBG_MSE20DB_TH_MASK; 553 val |= (DBG_MSE20DB_TH_HI << 554 DBG_MSE20DB_TH_SHIFT); 555 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 556 } else if (media == IFM_100_TX) 557 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 558 DBG_MSE16DB_UP); 559 } 560 } else { 561 val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE); 562 val &= ~ANEG_AFEE_10BT_100M_TH; 563 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val); 564 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 565 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 566 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 567 DBG_MSE16DB_DOWN); 568 val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB); 569 val &= ~DBG_MSE20DB_TH_MASK; 570 val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT); 571 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 572 } 573 } 574 } 575 576 static void 577 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 578 { 579 struct alc_softc *sc; 580 struct mii_data *mii; 581 582 sc = ifp->if_softc; 583 if ((ifp->if_flags & IFF_UP) == 0) { 584 return; 585 } 586 mii = device_get_softc(sc->alc_miibus); 587 588 mii_pollstat(mii); 589 ifmr->ifm_status = mii->mii_media_status; 590 ifmr->ifm_active = mii->mii_media_active; 591 } 592 593 static int 594 alc_mediachange(struct ifnet *ifp) 595 { 596 struct alc_softc *sc; 597 int error; 598 599 sc = ifp->if_softc; 600 ALC_LOCK(sc); 601 error = alc_mediachange_locked(sc); 602 ALC_UNLOCK(sc); 603 604 return (error); 605 } 606 607 static int 608 alc_mediachange_locked(struct alc_softc *sc) 609 { 610 struct mii_data *mii; 611 struct mii_softc *miisc; 612 int error; 613 614 ALC_LOCK_ASSERT(sc); 615 616 mii = device_get_softc(sc->alc_miibus); 617 if (mii->mii_instance != 0) { 618 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 619 mii_phy_reset(miisc); 620 } 621 error = mii_mediachg(mii); 622 623 return (error); 624 } 625 626 static struct alc_ident * 627 alc_find_ident(device_t dev) 628 { 629 struct alc_ident *ident; 630 uint16_t vendor, devid; 631 632 vendor = pci_get_vendor(dev); 633 devid = pci_get_device(dev); 634 for (ident = alc_ident_table; ident->name != NULL; ident++) { 635 if (vendor == ident->vendorid && devid == ident->deviceid) 636 return (ident); 637 } 638 639 return (NULL); 640 } 641 642 static int 643 alc_probe(device_t dev) 644 { 645 struct alc_ident *ident; 646 647 ident = alc_find_ident(dev); 648 if (ident != NULL) { 649 device_set_desc(dev, ident->name); 650 return (BUS_PROBE_DEFAULT); 651 } 652 653 return (ENXIO); 654 } 655 656 static void 657 alc_get_macaddr(struct alc_softc *sc) 658 { 659 660 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 661 alc_get_macaddr_816x(sc); 662 else 663 alc_get_macaddr_813x(sc); 664 } 665 666 static void 667 alc_get_macaddr_813x(struct alc_softc *sc) 668 { 669 uint32_t opt; 670 uint16_t val; 671 int eeprom, i; 672 673 eeprom = 0; 674 opt = CSR_READ_4(sc, ALC_OPT_CFG); 675 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 && 676 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { 677 /* 678 * EEPROM found, let TWSI reload EEPROM configuration. 679 * This will set ethernet address of controller. 680 */ 681 eeprom++; 682 switch (sc->alc_ident->deviceid) { 683 case DEVICEID_ATHEROS_AR8131: 684 case DEVICEID_ATHEROS_AR8132: 685 if ((opt & OPT_CFG_CLK_ENB) == 0) { 686 opt |= OPT_CFG_CLK_ENB; 687 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 688 CSR_READ_4(sc, ALC_OPT_CFG); 689 DELAY(1000); 690 } 691 break; 692 case DEVICEID_ATHEROS_AR8151: 693 case DEVICEID_ATHEROS_AR8151_V2: 694 case DEVICEID_ATHEROS_AR8152_B: 695 case DEVICEID_ATHEROS_AR8152_B2: 696 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 697 ALC_MII_DBG_ADDR, 0x00); 698 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 699 ALC_MII_DBG_DATA); 700 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 701 ALC_MII_DBG_DATA, val & 0xFF7F); 702 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 703 ALC_MII_DBG_ADDR, 0x3B); 704 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 705 ALC_MII_DBG_DATA); 706 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 707 ALC_MII_DBG_DATA, val | 0x0008); 708 DELAY(20); 709 break; 710 } 711 712 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 713 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 714 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 715 CSR_READ_4(sc, ALC_WOL_CFG); 716 717 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | 718 TWSI_CFG_SW_LD_START); 719 for (i = 100; i > 0; i--) { 720 DELAY(1000); 721 if ((CSR_READ_4(sc, ALC_TWSI_CFG) & 722 TWSI_CFG_SW_LD_START) == 0) 723 break; 724 } 725 if (i == 0) 726 device_printf(sc->alc_dev, 727 "reloading EEPROM timeout!\n"); 728 } else { 729 if (bootverbose) 730 device_printf(sc->alc_dev, "EEPROM not found!\n"); 731 } 732 if (eeprom != 0) { 733 switch (sc->alc_ident->deviceid) { 734 case DEVICEID_ATHEROS_AR8131: 735 case DEVICEID_ATHEROS_AR8132: 736 if ((opt & OPT_CFG_CLK_ENB) != 0) { 737 opt &= ~OPT_CFG_CLK_ENB; 738 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 739 CSR_READ_4(sc, ALC_OPT_CFG); 740 DELAY(1000); 741 } 742 break; 743 case DEVICEID_ATHEROS_AR8151: 744 case DEVICEID_ATHEROS_AR8151_V2: 745 case DEVICEID_ATHEROS_AR8152_B: 746 case DEVICEID_ATHEROS_AR8152_B2: 747 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 748 ALC_MII_DBG_ADDR, 0x00); 749 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 750 ALC_MII_DBG_DATA); 751 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 752 ALC_MII_DBG_DATA, val | 0x0080); 753 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 754 ALC_MII_DBG_ADDR, 0x3B); 755 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 756 ALC_MII_DBG_DATA); 757 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 758 ALC_MII_DBG_DATA, val & 0xFFF7); 759 DELAY(20); 760 break; 761 } 762 } 763 764 alc_get_macaddr_par(sc); 765 } 766 767 static void 768 alc_get_macaddr_816x(struct alc_softc *sc) 769 { 770 uint32_t reg; 771 int i, reloaded; 772 773 reloaded = 0; 774 /* Try to reload station address via TWSI. */ 775 for (i = 100; i > 0; i--) { 776 reg = CSR_READ_4(sc, ALC_SLD); 777 if ((reg & (SLD_PROGRESS | SLD_START)) == 0) 778 break; 779 DELAY(1000); 780 } 781 if (i != 0) { 782 CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START); 783 for (i = 100; i > 0; i--) { 784 DELAY(1000); 785 reg = CSR_READ_4(sc, ALC_SLD); 786 if ((reg & SLD_START) == 0) 787 break; 788 } 789 if (i != 0) 790 reloaded++; 791 else if (bootverbose) 792 device_printf(sc->alc_dev, 793 "reloading station address via TWSI timed out!\n"); 794 } 795 796 /* Try to reload station address from EEPROM or FLASH. */ 797 if (reloaded == 0) { 798 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 799 if ((reg & (EEPROM_LD_EEPROM_EXIST | 800 EEPROM_LD_FLASH_EXIST)) != 0) { 801 for (i = 100; i > 0; i--) { 802 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 803 if ((reg & (EEPROM_LD_PROGRESS | 804 EEPROM_LD_START)) == 0) 805 break; 806 DELAY(1000); 807 } 808 if (i != 0) { 809 CSR_WRITE_4(sc, ALC_EEPROM_LD, reg | 810 EEPROM_LD_START); 811 for (i = 100; i > 0; i--) { 812 DELAY(1000); 813 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 814 if ((reg & EEPROM_LD_START) == 0) 815 break; 816 } 817 } else if (bootverbose) 818 device_printf(sc->alc_dev, 819 "reloading EEPROM/FLASH timed out!\n"); 820 } 821 } 822 823 alc_get_macaddr_par(sc); 824 } 825 826 static void 827 alc_get_macaddr_par(struct alc_softc *sc) 828 { 829 uint32_t ea[2]; 830 831 ea[0] = CSR_READ_4(sc, ALC_PAR0); 832 ea[1] = CSR_READ_4(sc, ALC_PAR1); 833 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; 834 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; 835 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; 836 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; 837 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; 838 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; 839 } 840 841 static void 842 alc_disable_l0s_l1(struct alc_softc *sc) 843 { 844 uint32_t pmcfg; 845 846 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 847 /* Another magic from vendor. */ 848 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 849 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | 850 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 851 PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1); 852 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | 853 PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB; 854 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 855 } 856 } 857 858 static void 859 alc_phy_reset(struct alc_softc *sc) 860 { 861 862 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 863 alc_phy_reset_816x(sc); 864 else 865 alc_phy_reset_813x(sc); 866 } 867 868 static void 869 alc_phy_reset_813x(struct alc_softc *sc) 870 { 871 uint16_t data; 872 873 /* Reset magic from Linux. */ 874 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET); 875 CSR_READ_2(sc, ALC_GPHY_CFG); 876 DELAY(10 * 1000); 877 878 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 879 GPHY_CFG_SEL_ANA_RESET); 880 CSR_READ_2(sc, ALC_GPHY_CFG); 881 DELAY(10 * 1000); 882 883 /* DSP fixup, Vendor magic. */ 884 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 885 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 886 ALC_MII_DBG_ADDR, 0x000A); 887 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 888 ALC_MII_DBG_DATA); 889 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 890 ALC_MII_DBG_DATA, data & 0xDFFF); 891 } 892 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 893 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 894 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 895 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 896 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 897 ALC_MII_DBG_ADDR, 0x003B); 898 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 899 ALC_MII_DBG_DATA); 900 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 901 ALC_MII_DBG_DATA, data & 0xFFF7); 902 DELAY(20 * 1000); 903 } 904 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) { 905 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 906 ALC_MII_DBG_ADDR, 0x0029); 907 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 908 ALC_MII_DBG_DATA, 0x929D); 909 } 910 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 911 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 || 912 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 913 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 914 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 915 ALC_MII_DBG_ADDR, 0x0029); 916 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 917 ALC_MII_DBG_DATA, 0xB6DD); 918 } 919 920 /* Load DSP codes, vendor magic. */ 921 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | 922 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); 923 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 924 ALC_MII_DBG_ADDR, MII_ANA_CFG18); 925 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 926 ALC_MII_DBG_DATA, data); 927 928 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | 929 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | 930 ANA_SERDES_EN_LCKDT; 931 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 932 ALC_MII_DBG_ADDR, MII_ANA_CFG5); 933 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 934 ALC_MII_DBG_DATA, data); 935 936 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & 937 ANA_LONG_CABLE_TH_100_MASK) | 938 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & 939 ANA_SHORT_CABLE_TH_100_SHIFT) | 940 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; 941 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 942 ALC_MII_DBG_ADDR, MII_ANA_CFG54); 943 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 944 ALC_MII_DBG_DATA, data); 945 946 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | 947 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | 948 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | 949 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); 950 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 951 ALC_MII_DBG_ADDR, MII_ANA_CFG4); 952 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 953 ALC_MII_DBG_DATA, data); 954 955 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | 956 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | 957 ANA_OEN_125M; 958 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 959 ALC_MII_DBG_ADDR, MII_ANA_CFG0); 960 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 961 ALC_MII_DBG_DATA, data); 962 DELAY(1000); 963 964 /* Disable hibernation. */ 965 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 966 0x0029); 967 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 968 ALC_MII_DBG_DATA); 969 data &= ~0x8000; 970 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 971 data); 972 973 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 974 0x000B); 975 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 976 ALC_MII_DBG_DATA); 977 data &= ~0x8000; 978 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 979 data); 980 } 981 982 static void 983 alc_phy_reset_816x(struct alc_softc *sc) 984 { 985 uint32_t val; 986 987 val = CSR_READ_4(sc, ALC_GPHY_CFG); 988 val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 989 GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON | 990 GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB); 991 val |= GPHY_CFG_SEL_ANA_RESET; 992 #ifdef notyet 993 val |= GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN | GPHY_CFG_SEL_ANA_RESET; 994 #else 995 /* Disable PHY hibernation. */ 996 val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN); 997 #endif 998 CSR_WRITE_4(sc, ALC_GPHY_CFG, val); 999 DELAY(10); 1000 CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET); 1001 DELAY(800); 1002 1003 /* Vendor PHY magic. */ 1004 #ifdef notyet 1005 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, DBG_LEGCYPS_DEFAULT); 1006 alc_miidbg_writereg(sc, MII_DBG_SYSMODCTL, DBG_SYSMODCTL_DEFAULT); 1007 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_VDRVBIAS, 1008 EXT_VDRVBIAS_DEFAULT); 1009 #else 1010 /* Disable PHY hibernation. */ 1011 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, 1012 DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB); 1013 alc_miidbg_writereg(sc, MII_DBG_HIBNEG, 1014 DBG_HIBNEG_DEFAULT & ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE)); 1015 alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT); 1016 #endif 1017 1018 /* XXX Disable EEE. */ 1019 val = CSR_READ_4(sc, ALC_LPI_CTL); 1020 val &= ~LPI_CTL_ENB; 1021 CSR_WRITE_4(sc, ALC_LPI_CTL, val); 1022 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0); 1023 1024 /* PHY power saving. */ 1025 alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT); 1026 alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT); 1027 alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT); 1028 alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT); 1029 val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2); 1030 val &= ~DBG_GREENCFG2_GATE_DFSE_EN; 1031 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val); 1032 1033 /* RTL8139C, 120m issue. */ 1034 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78, 1035 ANEG_NLP78_120M_DEFAULT); 1036 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10, 1037 ANEG_S3DIG10_DEFAULT); 1038 1039 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) { 1040 /* Turn off half amplitude. */ 1041 val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3); 1042 val |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT; 1043 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, val); 1044 /* Turn off Green feature. */ 1045 val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2); 1046 val |= DBG_GREENCFG2_BP_GREEN; 1047 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val); 1048 /* Turn off half bias. */ 1049 val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5); 1050 val |= EXT_CLDCTL5_BP_VD_HLFBIAS; 1051 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, val); 1052 } 1053 } 1054 1055 static void 1056 alc_phy_down(struct alc_softc *sc) 1057 { 1058 uint32_t gphy; 1059 1060 switch (sc->alc_ident->deviceid) { 1061 case DEVICEID_ATHEROS_AR8161: 1062 case DEVICEID_ATHEROS_E2200: 1063 case DEVICEID_ATHEROS_E2400: 1064 case DEVICEID_ATHEROS_E2500: 1065 case DEVICEID_ATHEROS_AR8162: 1066 case DEVICEID_ATHEROS_AR8171: 1067 case DEVICEID_ATHEROS_AR8172: 1068 gphy = CSR_READ_4(sc, ALC_GPHY_CFG); 1069 gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 1070 GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON); 1071 gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 1072 GPHY_CFG_SEL_ANA_RESET; 1073 gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW; 1074 CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy); 1075 break; 1076 case DEVICEID_ATHEROS_AR8151: 1077 case DEVICEID_ATHEROS_AR8151_V2: 1078 case DEVICEID_ATHEROS_AR8152_B: 1079 case DEVICEID_ATHEROS_AR8152_B2: 1080 /* 1081 * GPHY power down caused more problems on AR8151 v2.0. 1082 * When driver is reloaded after GPHY power down, 1083 * accesses to PHY/MAC registers hung the system. Only 1084 * cold boot recovered from it. I'm not sure whether 1085 * AR8151 v1.0 also requires this one though. I don't 1086 * have AR8151 v1.0 controller in hand. 1087 * The only option left is to isolate the PHY and 1088 * initiates power down the PHY which in turn saves 1089 * more power when driver is unloaded. 1090 */ 1091 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 1092 MII_BMCR, BMCR_ISO | BMCR_PDOWN); 1093 break; 1094 default: 1095 /* Force PHY down. */ 1096 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 1097 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | 1098 GPHY_CFG_PWDOWN_HW); 1099 DELAY(1000); 1100 break; 1101 } 1102 } 1103 1104 static void 1105 alc_aspm(struct alc_softc *sc, int init, int media) 1106 { 1107 1108 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 1109 alc_aspm_816x(sc, init); 1110 else 1111 alc_aspm_813x(sc, media); 1112 } 1113 1114 static void 1115 alc_aspm_813x(struct alc_softc *sc, int media) 1116 { 1117 uint32_t pmcfg; 1118 uint16_t linkcfg; 1119 1120 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) 1121 return; 1122 1123 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 1124 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) == 1125 (ALC_FLAG_APS | ALC_FLAG_PCIE)) 1126 linkcfg = CSR_READ_2(sc, sc->alc_expcap + 1127 PCIR_EXPRESS_LINK_CTL); 1128 else 1129 linkcfg = 0; 1130 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; 1131 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK); 1132 pmcfg |= PM_CFG_MAC_ASPM_CHK; 1133 pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT); 1134 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 1135 1136 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 1137 /* Disable extended sync except AR8152 B v1.0 */ 1138 linkcfg &= ~PCIEM_LINK_CTL_EXTENDED_SYNC; 1139 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 1140 sc->alc_rev == ATHEROS_AR8152_B_V10) 1141 linkcfg |= PCIEM_LINK_CTL_EXTENDED_SYNC; 1142 CSR_WRITE_2(sc, sc->alc_expcap + PCIER_LINK_CTL, 1143 linkcfg); 1144 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB | 1145 PM_CFG_HOTRST); 1146 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT << 1147 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1148 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1149 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT << 1150 PM_CFG_PM_REQ_TIMER_SHIFT); 1151 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV; 1152 } 1153 1154 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1155 if ((sc->alc_flags & ALC_FLAG_L0S) != 0) 1156 pmcfg |= PM_CFG_ASPM_L0S_ENB; 1157 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1158 pmcfg |= PM_CFG_ASPM_L1_ENB; 1159 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 1160 if (sc->alc_ident->deviceid == 1161 DEVICEID_ATHEROS_AR8152_B) 1162 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 1163 pmcfg &= ~(PM_CFG_SERDES_L1_ENB | 1164 PM_CFG_SERDES_PLL_L1_ENB | 1165 PM_CFG_SERDES_BUDS_RX_L1_ENB); 1166 pmcfg |= PM_CFG_CLK_SWH_L1; 1167 if (media == IFM_100_TX || media == IFM_1000_T) { 1168 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; 1169 switch (sc->alc_ident->deviceid) { 1170 case DEVICEID_ATHEROS_AR8152_B: 1171 pmcfg |= (7 << 1172 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1173 break; 1174 case DEVICEID_ATHEROS_AR8152_B2: 1175 case DEVICEID_ATHEROS_AR8151_V2: 1176 pmcfg |= (4 << 1177 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1178 break; 1179 default: 1180 pmcfg |= (15 << 1181 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1182 break; 1183 } 1184 } 1185 } else { 1186 pmcfg |= PM_CFG_SERDES_L1_ENB | 1187 PM_CFG_SERDES_PLL_L1_ENB | 1188 PM_CFG_SERDES_BUDS_RX_L1_ENB; 1189 pmcfg &= ~(PM_CFG_CLK_SWH_L1 | 1190 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 1191 } 1192 } else { 1193 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB | 1194 PM_CFG_SERDES_PLL_L1_ENB); 1195 pmcfg |= PM_CFG_CLK_SWH_L1; 1196 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1197 pmcfg |= PM_CFG_ASPM_L1_ENB; 1198 } 1199 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1200 } 1201 1202 static void 1203 alc_aspm_816x(struct alc_softc *sc, int init) 1204 { 1205 uint32_t pmcfg; 1206 1207 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 1208 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK; 1209 pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT; 1210 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1211 pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT; 1212 pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK; 1213 pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT; 1214 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV; 1215 pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S | 1216 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB | 1217 PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | 1218 PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB | 1219 PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST); 1220 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1221 (sc->alc_rev & 0x01) != 0) 1222 pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB; 1223 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1224 /* Link up, enable both L0s, L1s. */ 1225 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1226 PM_CFG_MAC_ASPM_CHK; 1227 } else { 1228 if (init != 0) 1229 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1230 PM_CFG_MAC_ASPM_CHK; 1231 else if ((sc->alc_ifp->if_flags & IFF_RUNNING) != 0) 1232 pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK; 1233 } 1234 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1235 } 1236 1237 static void 1238 alc_init_pcie(struct alc_softc *sc) 1239 { 1240 const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" }; 1241 uint32_t cap, ctl, val; 1242 int state; 1243 1244 /* Clear data link and flow-control protocol error. */ 1245 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); 1246 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); 1247 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); 1248 1249 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 1250 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 1251 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 1252 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, 1253 CSR_READ_4(sc, ALC_PCIE_PHYMISC) | 1254 PCIE_PHYMISC_FORCE_RCV_DET); 1255 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 1256 sc->alc_rev == ATHEROS_AR8152_B_V10) { 1257 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2); 1258 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK | 1259 PCIE_PHYMISC2_SERDES_TH_MASK); 1260 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 1261 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; 1262 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val); 1263 } 1264 /* Disable ASPM L0S and L1. */ 1265 cap = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CAP); 1266 if ((cap & PCIEM_LINK_CAP_ASPM) != 0) { 1267 ctl = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CTL); 1268 if ((ctl & PCIEM_LINK_CTL_RCB) != 0) 1269 sc->alc_rcb = DMA_CFG_RCB_128; 1270 if (bootverbose) 1271 device_printf(sc->alc_dev, "RCB %u bytes\n", 1272 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); 1273 state = ctl & PCIEM_LINK_CTL_ASPMC; 1274 if (state & PCIEM_LINK_CTL_ASPMC_L0S) 1275 sc->alc_flags |= ALC_FLAG_L0S; 1276 if (state & PCIEM_LINK_CTL_ASPMC_L1) 1277 sc->alc_flags |= ALC_FLAG_L1S; 1278 if (bootverbose) 1279 device_printf(sc->alc_dev, "ASPM %s %s\n", 1280 aspm_state[state], 1281 state == 0 ? "disabled" : "enabled"); 1282 alc_disable_l0s_l1(sc); 1283 } else { 1284 if (bootverbose) 1285 device_printf(sc->alc_dev, 1286 "no ASPM support\n"); 1287 } 1288 } else { 1289 val = CSR_READ_4(sc, ALC_PDLL_TRNS1); 1290 val &= ~PDLL_TRNS1_D3PLLOFF_ENB; 1291 CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val); 1292 val = CSR_READ_4(sc, ALC_MASTER_CFG); 1293 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1294 (sc->alc_rev & 0x01) != 0) { 1295 if ((val & MASTER_WAKEN_25M) == 0 || 1296 (val & MASTER_CLK_SEL_DIS) == 0) { 1297 val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS; 1298 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1299 } 1300 } else { 1301 if ((val & MASTER_WAKEN_25M) == 0 || 1302 (val & MASTER_CLK_SEL_DIS) != 0) { 1303 val |= MASTER_WAKEN_25M; 1304 val &= ~MASTER_CLK_SEL_DIS; 1305 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1306 } 1307 } 1308 } 1309 alc_aspm(sc, 1, IFM_UNKNOWN); 1310 } 1311 1312 static void 1313 alc_config_msi(struct alc_softc *sc) 1314 { 1315 uint32_t ctl, mod; 1316 1317 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 1318 /* 1319 * It seems interrupt moderation is controlled by 1320 * ALC_MSI_RETRANS_TIMER register if MSI/MSIX is active. 1321 * Driver uses RX interrupt moderation parameter to 1322 * program ALC_MSI_RETRANS_TIMER register. 1323 */ 1324 ctl = CSR_READ_4(sc, ALC_MSI_RETRANS_TIMER); 1325 ctl &= ~MSI_RETRANS_TIMER_MASK; 1326 ctl &= ~MSI_RETRANS_MASK_SEL_LINE; 1327 mod = ALC_USECS(sc->alc_int_rx_mod); 1328 if (mod == 0) 1329 mod = 1; 1330 ctl |= mod; 1331 if (sc->alc_irq_type == PCI_INTR_TYPE_MSI) 1332 CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl | 1333 MSI_RETRANS_MASK_SEL_LINE); 1334 else 1335 CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, 0); 1336 } 1337 } 1338 1339 static int 1340 alc_attach(device_t dev) 1341 { 1342 struct alc_softc *sc; 1343 struct ifnet *ifp; 1344 uint16_t burst; 1345 int base, error; 1346 u_int intr_flags; 1347 1348 error = 0; 1349 sc = device_get_softc(dev); 1350 sc->alc_dev = dev; 1351 sc->alc_rev = pci_get_revid(dev); 1352 1353 callout_init_mp(&sc->alc_tick_ch); 1354 sc->alc_ident = alc_find_ident(dev); 1355 1356 /* Enable bus mastering */ 1357 pci_enable_busmaster(dev); 1358 1359 /* Map the device. */ 1360 sc->alc_res_rid = PCIR_BAR(0); 1361 sc->alc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1362 &sc->alc_res_rid, RF_ACTIVE); 1363 if (error != 0) { 1364 device_printf(dev, "cannot allocate memory resources.\n"); 1365 goto fail; 1366 } 1367 sc->alc_res_btag = rman_get_bustag(sc->alc_res); 1368 sc->alc_res_bhand = rman_get_bushandle(sc->alc_res); 1369 1370 /* Set PHY address. */ 1371 sc->alc_phyaddr = ALC_PHY_ADDR; 1372 1373 /* 1374 * One odd thing is AR8132 uses the same PHY hardware(F1 1375 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports 1376 * the PHY supports 1000Mbps but that's not true. The PHY 1377 * used in AR8132 can't establish gigabit link even if it 1378 * shows the same PHY model/revision number of AR8131. 1379 */ 1380 switch (sc->alc_ident->deviceid) { 1381 case DEVICEID_ATHEROS_E2200: 1382 case DEVICEID_ATHEROS_E2400: 1383 case DEVICEID_ATHEROS_E2500: 1384 sc->alc_flags |= ALC_FLAG_E2X00; 1385 /* FALLTHROUGH */ 1386 case DEVICEID_ATHEROS_AR8161: 1387 if (pci_get_subvendor(dev) == VENDORID_ATHEROS && 1388 pci_get_subdevice(dev) == 0x0091 && sc->alc_rev == 0) 1389 sc->alc_flags |= ALC_FLAG_LINK_WAR; 1390 /* FALLTHROUGH */ 1391 case DEVICEID_ATHEROS_AR8171: 1392 sc->alc_flags |= ALC_FLAG_AR816X_FAMILY; 1393 break; 1394 case DEVICEID_ATHEROS_AR8162: 1395 case DEVICEID_ATHEROS_AR8172: 1396 sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY; 1397 break; 1398 case DEVICEID_ATHEROS_AR8152_B: 1399 case DEVICEID_ATHEROS_AR8152_B2: 1400 sc->alc_flags |= ALC_FLAG_APS; 1401 /* FALLTHROUGH */ 1402 case DEVICEID_ATHEROS_AR8132: 1403 sc->alc_flags |= ALC_FLAG_FASTETHER; 1404 break; 1405 case DEVICEID_ATHEROS_AR8151: 1406 case DEVICEID_ATHEROS_AR8151_V2: 1407 sc->alc_flags |= ALC_FLAG_APS; 1408 /* FALLTHROUGH */ 1409 default: 1410 break; 1411 } 1412 sc->alc_flags |= ALC_FLAG_JUMBO; 1413 1414 /* 1415 * It seems that AR813x/AR815x has silicon bug for SMB. In 1416 * addition, Atheros said that enabling SMB wouldn't improve 1417 * performance. However I think it's bad to access lots of 1418 * registers to extract MAC statistics. 1419 */ 1420 sc->alc_flags |= ALC_FLAG_SMB_BUG; 1421 1422 /* 1423 * Don't use Tx CMB. It is known to have silicon bug. 1424 */ 1425 sc->alc_flags |= ALC_FLAG_CMB_BUG; 1426 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> 1427 MASTER_CHIP_REV_SHIFT; 1428 if (bootverbose) { 1429 device_printf(dev, "PCI device revision : 0x%04x\n", 1430 sc->alc_rev); 1431 device_printf(dev, "Chip id/revision : 0x%04x\n", 1432 sc->alc_chip_rev); 1433 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 1434 device_printf(dev, "AR816x revision : 0x%x\n", 1435 AR816X_REV(sc->alc_rev)); 1436 } 1437 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", 1438 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, 1439 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); 1440 1441 /* Initialize DMA parameters. */ 1442 sc->alc_dma_rd_burst = 0; 1443 sc->alc_dma_wr_burst = 0; 1444 sc->alc_rcb = DMA_CFG_RCB_64; 1445 if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) { 1446 sc->alc_flags |= ALC_FLAG_PCIE; 1447 sc->alc_expcap = base; 1448 burst = CSR_READ_2(sc, base + PCIER_DEVICE_CTL); 1449 sc->alc_dma_rd_burst = 1450 (burst & PCIEM_CTL_MAX_READ_REQUEST) >> 12; 1451 sc->alc_dma_wr_burst = (burst & PCIEM_CTL_MAX_PAYLOAD) >> 5; 1452 if (bootverbose) { 1453 device_printf(dev, "Read request size : %u bytes.\n", 1454 alc_dma_burst[sc->alc_dma_rd_burst]); 1455 device_printf(dev, "TLP payload size : %u bytes.\n", 1456 alc_dma_burst[sc->alc_dma_wr_burst]); 1457 } 1458 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024) 1459 sc->alc_dma_rd_burst = 3; 1460 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) 1461 sc->alc_dma_wr_burst = 3; 1462 /* 1463 * Force maximum payload size to 128 bytes for E2200/E2400. 1464 * Otherwise it triggers DMA write error. 1465 */ 1466 if ((sc->alc_flags & ALC_FLAG_E2X00) != 0) 1467 sc->alc_dma_wr_burst = 0; 1468 alc_init_pcie(sc); 1469 } 1470 1471 /* Reset PHY. */ 1472 alc_phy_reset(sc); 1473 1474 /* Reset the ethernet controller. */ 1475 alc_stop_mac(sc); 1476 alc_reset(sc); 1477 1478 sc->alc_irq_type = pci_alloc_1intr(dev, alc_msi_enable, 1479 &sc->alc_irq_rid, &intr_flags); 1480 1481 /* Allocate IRQ resources. */ 1482 sc->alc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1483 &sc->alc_irq_rid, intr_flags); 1484 if (error != 0) { 1485 device_printf(dev, "cannot allocate IRQ resources.\n"); 1486 goto fail; 1487 } 1488 1489 /* Create device sysctl node. */ 1490 alc_sysctl_node(sc); 1491 1492 if ((error = alc_dma_alloc(sc)) != 0) 1493 goto fail; 1494 1495 /* Load station address. */ 1496 alc_get_macaddr(sc); 1497 1498 ifp = sc->alc_ifp = &sc->arpcom.ac_if; 1499 ifp->if_softc = sc; 1500 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1501 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1502 ifp->if_ioctl = alc_ioctl; 1503 ifp->if_start = alc_start; 1504 ifp->if_init = alc_init; 1505 ifq_set_maxlen(&ifp->if_snd, ALC_TX_RING_CNT - 1); 1506 ifq_set_ready(&ifp->if_snd); 1507 ifp->if_capabilities = IFCAP_TXCSUM; 1508 ifp->if_hwassist = ALC_CSUM_FEATURES; 1509 #if 0 1510 /* XXX: WOL */ 1511 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) { 1512 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST; 1513 sc->alc_flags |= ALC_FLAG_PM; 1514 sc->alc_pmcap = base; 1515 } 1516 #endif 1517 ifp->if_capenable = ifp->if_capabilities; 1518 1519 /* VLAN capability setup. */ 1520 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1521 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 1522 ifp->if_capenable = ifp->if_capabilities; 1523 1524 /* 1525 * XXX 1526 * It seems enabling Tx checksum offloading makes more trouble. 1527 * Sometimes the controller does not receive any frames when 1528 * Tx checksum offloading is enabled. I'm not sure whether this 1529 * is a bug in Tx checksum offloading logic or I got broken 1530 * sample boards. To safety, don't enable Tx checksum offloading 1531 * by default but give chance to users to toggle it if they know 1532 * their controllers work without problems. 1533 * Fortunately, Tx checksum offloading for AR816x family 1534 * seems to work. 1535 */ 1536 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 1537 ifp->if_capenable &= ~IFCAP_TXCSUM; 1538 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 1539 } 1540 1541 /* Set up MII bus. */ 1542 if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange, 1543 alc_mediastatus)) != 0) { 1544 device_printf(dev, "no PHY found!\n"); 1545 goto fail; 1546 } 1547 1548 ether_ifattach(ifp, sc->alc_eaddr, NULL); 1549 1550 /* Tell the upper layer(s) we support long frames. */ 1551 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1552 1553 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->alc_irq)); 1554 #if 0 1555 /* Create local taskq. */ 1556 TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp); 1557 sc->alc_tq = taskqueue_create("alc_taskq", M_WAITOK, 1558 taskqueue_thread_enqueue, &sc->alc_tq); 1559 if (sc->alc_tq == NULL) { 1560 device_printf(dev, "could not create taskqueue.\n"); 1561 ether_ifdetach(ifp); 1562 error = ENXIO; 1563 goto fail; 1564 } 1565 taskqueue_start_threads(&sc->alc_tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq", 1566 device_get_nameunit(sc->alc_dev)); 1567 1568 alc_config_msi(sc); 1569 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) 1570 msic = ALC_MSIX_MESSAGES; 1571 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 1572 msic = ALC_MSI_MESSAGES; 1573 else 1574 msic = 1; 1575 for (i = 0; i < msic; i++) { 1576 error = bus_setup_intr(dev, sc->alc_irq[i], INTR_MPSAFE, 1577 alc_intr, sc, 1578 &sc->alc_intrhand[i], NULL); 1579 if (error != 0) 1580 break; 1581 } 1582 if (error != 0) { 1583 device_printf(dev, "could not set up interrupt handler.\n"); 1584 taskqueue_free(sc->alc_tq); 1585 sc->alc_tq = NULL; 1586 ether_ifdetach(ifp); 1587 goto fail; 1588 } 1589 #else 1590 alc_config_msi(sc); 1591 error = bus_setup_intr(dev, sc->alc_irq, INTR_MPSAFE, alc_intr, sc, 1592 &sc->alc_intrhand, ifp->if_serializer); 1593 if (error) { 1594 device_printf(dev, "could not set up interrupt handler.\n"); 1595 ether_ifdetach(ifp); 1596 goto fail; 1597 } 1598 #endif 1599 1600 fail: 1601 if (error != 0) 1602 alc_detach(dev); 1603 1604 return (error); 1605 } 1606 1607 static int 1608 alc_detach(device_t dev) 1609 { 1610 struct alc_softc *sc = device_get_softc(dev); 1611 1612 if (device_is_attached(dev)) { 1613 struct ifnet *ifp = sc->alc_ifp; 1614 1615 lwkt_serialize_enter(ifp->if_serializer); 1616 alc_stop(sc); 1617 bus_teardown_intr(dev, sc->alc_irq, sc->alc_intrhand); 1618 lwkt_serialize_exit(ifp->if_serializer); 1619 1620 ether_ifdetach(ifp); 1621 } 1622 1623 if (sc->alc_miibus != NULL) 1624 device_delete_child(dev, sc->alc_miibus); 1625 bus_generic_detach(dev); 1626 1627 if (sc->alc_res != NULL) 1628 alc_phy_down(sc); 1629 1630 if (sc->alc_irq != NULL) { 1631 bus_release_resource(dev, SYS_RES_IRQ, sc->alc_irq_rid, 1632 sc->alc_irq); 1633 } 1634 if (sc->alc_irq_type == PCI_INTR_TYPE_MSI) 1635 pci_release_msi(dev); 1636 1637 if (sc->alc_res != NULL) { 1638 bus_release_resource(dev, SYS_RES_MEMORY, sc->alc_res_rid, 1639 sc->alc_res); 1640 } 1641 1642 alc_dma_free(sc); 1643 1644 return (0); 1645 } 1646 1647 #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 1648 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 1649 #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 1650 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 1651 1652 static void 1653 alc_sysctl_node(struct alc_softc *sc) 1654 { 1655 struct sysctl_ctx_list *ctx; 1656 struct sysctl_oid *tree; 1657 struct sysctl_oid_list *child, *parent; 1658 struct alc_hw_stats *stats; 1659 int error; 1660 1661 stats = &sc->alc_stats; 1662 ctx = device_get_sysctl_ctx(sc->alc_dev); 1663 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev)); 1664 1665 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 1666 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0, 1667 sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation"); 1668 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 1669 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0, 1670 sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation"); 1671 /* Pull in device tunables. */ 1672 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1673 error = resource_int_value(device_get_name(sc->alc_dev), 1674 device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod); 1675 if (error == 0) { 1676 if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN || 1677 sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) { 1678 device_printf(sc->alc_dev, "int_rx_mod value out of " 1679 "range; using default: %d\n", 1680 ALC_IM_RX_TIMER_DEFAULT); 1681 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1682 } 1683 } 1684 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1685 error = resource_int_value(device_get_name(sc->alc_dev), 1686 device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod); 1687 if (error == 0) { 1688 if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN || 1689 sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) { 1690 device_printf(sc->alc_dev, "int_tx_mod value out of " 1691 "range; using default: %d\n", 1692 ALC_IM_TX_TIMER_DEFAULT); 1693 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1694 } 1695 } 1696 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", 1697 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0, 1698 sysctl_hw_alc_proc_limit, "I", 1699 "max number of Rx events to process"); 1700 /* Pull in device tunables. */ 1701 sc->alc_process_limit = ALC_PROC_DEFAULT; 1702 error = resource_int_value(device_get_name(sc->alc_dev), 1703 device_get_unit(sc->alc_dev), "process_limit", 1704 &sc->alc_process_limit); 1705 if (error == 0) { 1706 if (sc->alc_process_limit < ALC_PROC_MIN || 1707 sc->alc_process_limit > ALC_PROC_MAX) { 1708 device_printf(sc->alc_dev, 1709 "process_limit value out of range; " 1710 "using default: %d\n", ALC_PROC_DEFAULT); 1711 sc->alc_process_limit = ALC_PROC_DEFAULT; 1712 } 1713 } 1714 1715 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 1716 NULL, "ALC statistics"); 1717 parent = SYSCTL_CHILDREN(tree); 1718 1719 /* Rx statistics. */ 1720 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 1721 NULL, "Rx MAC statistics"); 1722 child = SYSCTL_CHILDREN(tree); 1723 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1724 &stats->rx_frames, "Good frames"); 1725 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1726 &stats->rx_bcast_frames, "Good broadcast frames"); 1727 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1728 &stats->rx_mcast_frames, "Good multicast frames"); 1729 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1730 &stats->rx_pause_frames, "Pause control frames"); 1731 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1732 &stats->rx_control_frames, "Control frames"); 1733 ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 1734 &stats->rx_crcerrs, "CRC errors"); 1735 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1736 &stats->rx_lenerrs, "Frames with length mismatched"); 1737 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1738 &stats->rx_bytes, "Good octets"); 1739 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1740 &stats->rx_bcast_bytes, "Good broadcast octets"); 1741 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1742 &stats->rx_mcast_bytes, "Good multicast octets"); 1743 ALC_SYSCTL_STAT_ADD32(ctx, child, "runts", 1744 &stats->rx_runts, "Too short frames"); 1745 ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments", 1746 &stats->rx_fragments, "Fragmented frames"); 1747 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1748 &stats->rx_pkts_64, "64 bytes frames"); 1749 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1750 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 1751 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1752 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 1753 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1754 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 1755 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1756 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 1757 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1758 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1759 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1760 &stats->rx_pkts_1519_max, "1519 to max frames"); 1761 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1762 &stats->rx_pkts_truncated, "Truncated frames due to MTU size"); 1763 ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 1764 &stats->rx_fifo_oflows, "FIFO overflows"); 1765 ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs", 1766 &stats->rx_rrs_errs, "Return status write-back errors"); 1767 ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 1768 &stats->rx_alignerrs, "Alignment errors"); 1769 ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered", 1770 &stats->rx_pkts_filtered, 1771 "Frames dropped due to address filtering"); 1772 1773 /* Tx statistics. */ 1774 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 1775 NULL, "Tx MAC statistics"); 1776 child = SYSCTL_CHILDREN(tree); 1777 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1778 &stats->tx_frames, "Good frames"); 1779 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1780 &stats->tx_bcast_frames, "Good broadcast frames"); 1781 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1782 &stats->tx_mcast_frames, "Good multicast frames"); 1783 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1784 &stats->tx_pause_frames, "Pause control frames"); 1785 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1786 &stats->tx_control_frames, "Control frames"); 1787 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 1788 &stats->tx_excess_defer, "Frames with excessive derferrals"); 1789 ALC_SYSCTL_STAT_ADD32(ctx, child, "defers", 1790 &stats->tx_excess_defer, "Frames with derferrals"); 1791 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1792 &stats->tx_bytes, "Good octets"); 1793 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1794 &stats->tx_bcast_bytes, "Good broadcast octets"); 1795 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1796 &stats->tx_mcast_bytes, "Good multicast octets"); 1797 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1798 &stats->tx_pkts_64, "64 bytes frames"); 1799 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1800 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 1801 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1802 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 1803 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1804 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 1805 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1806 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 1807 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1808 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1809 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1810 &stats->tx_pkts_1519_max, "1519 to max frames"); 1811 ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 1812 &stats->tx_single_colls, "Single collisions"); 1813 ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 1814 &stats->tx_multi_colls, "Multiple collisions"); 1815 ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 1816 &stats->tx_late_colls, "Late collisions"); 1817 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", 1818 &stats->tx_excess_colls, "Excessive collisions"); 1819 ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns", 1820 &stats->tx_underrun, "FIFO underruns"); 1821 ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns", 1822 &stats->tx_desc_underrun, "Descriptor write-back errors"); 1823 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1824 &stats->tx_lenerrs, "Frames with length mismatched"); 1825 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1826 &stats->tx_pkts_truncated, "Truncated frames due to MTU size"); 1827 } 1828 1829 #undef ALC_SYSCTL_STAT_ADD32 1830 #undef ALC_SYSCTL_STAT_ADD64 1831 1832 struct alc_dmamap_arg { 1833 bus_addr_t alc_busaddr; 1834 }; 1835 1836 static void 1837 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1838 { 1839 struct alc_dmamap_arg *ctx; 1840 1841 if (error != 0) 1842 return; 1843 1844 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1845 1846 ctx = (struct alc_dmamap_arg *)arg; 1847 ctx->alc_busaddr = segs[0].ds_addr; 1848 } 1849 1850 #ifdef foo 1851 /* 1852 * Normal and high Tx descriptors shares single Tx high address. 1853 * Four Rx descriptor/return rings and CMB shares the same Rx 1854 * high address. 1855 */ 1856 static int 1857 alc_check_boundary(struct alc_softc *sc) 1858 { 1859 bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end; 1860 1861 rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ; 1862 rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ; 1863 cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ; 1864 tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ; 1865 1866 /* 4GB boundary crossing is not allowed. */ 1867 if ((ALC_ADDR_HI(rx_ring_end) != 1868 ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) || 1869 (ALC_ADDR_HI(rr_ring_end) != 1870 ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) || 1871 (ALC_ADDR_HI(cmb_end) != 1872 ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) || 1873 (ALC_ADDR_HI(tx_ring_end) != 1874 ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr))) 1875 return (EFBIG); 1876 /* 1877 * Make sure Rx return descriptor/Rx descriptor/CMB use 1878 * the same high address. 1879 */ 1880 if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) || 1881 (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end))) 1882 return (EFBIG); 1883 1884 return (0); 1885 } 1886 #endif 1887 1888 static int 1889 alc_dma_alloc(struct alc_softc *sc) 1890 { 1891 struct alc_txdesc *txd; 1892 struct alc_rxdesc *rxd; 1893 struct alc_dmamap_arg ctx; 1894 int error, i; 1895 1896 /* Create parent DMA tag. */ 1897 error = bus_dma_tag_create( 1898 sc->alc_cdata.alc_parent_tag, /* parent */ 1899 1, 0, /* alignment, boundary */ 1900 BUS_SPACE_MAXADDR, /* lowaddr */ 1901 BUS_SPACE_MAXADDR, /* highaddr */ 1902 NULL, NULL, /* filter, filterarg */ 1903 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1904 0, /* nsegments */ 1905 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1906 0, /* flags */ 1907 &sc->alc_cdata.alc_parent_tag); 1908 if (error != 0) { 1909 device_printf(sc->alc_dev, 1910 "could not create parent DMA tag.\n"); 1911 goto fail; 1912 } 1913 1914 /* Create DMA tag for Tx descriptor ring. */ 1915 error = bus_dma_tag_create( 1916 sc->alc_cdata.alc_parent_tag, /* parent */ 1917 ALC_TX_RING_ALIGN, 0, /* alignment, boundary */ 1918 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1919 BUS_SPACE_MAXADDR, /* highaddr */ 1920 NULL, NULL, /* filter, filterarg */ 1921 ALC_TX_RING_SZ, /* maxsize */ 1922 1, /* nsegments */ 1923 ALC_TX_RING_SZ, /* maxsegsize */ 1924 0, /* flags */ 1925 &sc->alc_cdata.alc_tx_ring_tag); 1926 if (error != 0) { 1927 device_printf(sc->alc_dev, 1928 "could not create Tx ring DMA tag.\n"); 1929 goto fail; 1930 } 1931 1932 /* Create DMA tag for Rx free descriptor ring. */ 1933 error = bus_dma_tag_create( 1934 sc->alc_cdata.alc_parent_tag, /* parent */ 1935 ALC_RX_RING_ALIGN, 0, /* alignment, boundary */ 1936 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1937 BUS_SPACE_MAXADDR, /* highaddr */ 1938 NULL, NULL, /* filter, filterarg */ 1939 ALC_RX_RING_SZ, /* maxsize */ 1940 1, /* nsegments */ 1941 ALC_RX_RING_SZ, /* maxsegsize */ 1942 0, /* flags */ 1943 &sc->alc_cdata.alc_rx_ring_tag); 1944 if (error != 0) { 1945 device_printf(sc->alc_dev, 1946 "could not create Rx ring DMA tag.\n"); 1947 goto fail; 1948 } 1949 /* Create DMA tag for Rx return descriptor ring. */ 1950 error = bus_dma_tag_create( 1951 sc->alc_cdata.alc_parent_tag, /* parent */ 1952 ALC_RR_RING_ALIGN, 0, /* alignment, boundary */ 1953 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1954 BUS_SPACE_MAXADDR, /* highaddr */ 1955 NULL, NULL, /* filter, filterarg */ 1956 ALC_RR_RING_SZ, /* maxsize */ 1957 1, /* nsegments */ 1958 ALC_RR_RING_SZ, /* maxsegsize */ 1959 0, /* flags */ 1960 &sc->alc_cdata.alc_rr_ring_tag); 1961 if (error != 0) { 1962 device_printf(sc->alc_dev, 1963 "could not create Rx return ring DMA tag.\n"); 1964 goto fail; 1965 } 1966 1967 /* Create DMA tag for coalescing message block. */ 1968 error = bus_dma_tag_create( 1969 sc->alc_cdata.alc_parent_tag, /* parent */ 1970 ALC_CMB_ALIGN, 0, /* alignment, boundary */ 1971 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1972 BUS_SPACE_MAXADDR, /* highaddr */ 1973 NULL, NULL, /* filter, filterarg */ 1974 ALC_CMB_SZ, /* maxsize */ 1975 1, /* nsegments */ 1976 ALC_CMB_SZ, /* maxsegsize */ 1977 0, /* flags */ 1978 &sc->alc_cdata.alc_cmb_tag); 1979 if (error != 0) { 1980 device_printf(sc->alc_dev, 1981 "could not create CMB DMA tag.\n"); 1982 goto fail; 1983 } 1984 /* Create DMA tag for status message block. */ 1985 error = bus_dma_tag_create( 1986 sc->alc_cdata.alc_parent_tag, /* parent */ 1987 ALC_SMB_ALIGN, 0, /* alignment, boundary */ 1988 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1989 BUS_SPACE_MAXADDR, /* highaddr */ 1990 NULL, NULL, /* filter, filterarg */ 1991 ALC_SMB_SZ, /* maxsize */ 1992 1, /* nsegments */ 1993 ALC_SMB_SZ, /* maxsegsize */ 1994 0, /* flags */ 1995 &sc->alc_cdata.alc_smb_tag); 1996 if (error != 0) { 1997 device_printf(sc->alc_dev, 1998 "could not create SMB DMA tag.\n"); 1999 goto fail; 2000 } 2001 2002 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 2003 error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag, 2004 (void **)&sc->alc_rdata.alc_tx_ring, 2005 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2006 &sc->alc_cdata.alc_tx_ring_map); 2007 if (error != 0) { 2008 device_printf(sc->alc_dev, 2009 "could not allocate DMA'able memory for Tx ring.\n"); 2010 goto fail; 2011 } 2012 ctx.alc_busaddr = 0; 2013 error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag, 2014 sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring, 2015 ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0); 2016 if (error != 0 || ctx.alc_busaddr == 0) { 2017 device_printf(sc->alc_dev, 2018 "could not load DMA'able memory for Tx ring.\n"); 2019 goto fail; 2020 } 2021 sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr; 2022 2023 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 2024 error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag, 2025 (void **)&sc->alc_rdata.alc_rx_ring, 2026 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2027 &sc->alc_cdata.alc_rx_ring_map); 2028 if (error != 0) { 2029 device_printf(sc->alc_dev, 2030 "could not allocate DMA'able memory for Rx ring.\n"); 2031 goto fail; 2032 } 2033 ctx.alc_busaddr = 0; 2034 error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag, 2035 sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring, 2036 ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0); 2037 if (error != 0 || ctx.alc_busaddr == 0) { 2038 device_printf(sc->alc_dev, 2039 "could not load DMA'able memory for Rx ring.\n"); 2040 goto fail; 2041 } 2042 sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr; 2043 2044 /* Allocate DMA'able memory and load the DMA map for Rx return ring. */ 2045 error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag, 2046 (void **)&sc->alc_rdata.alc_rr_ring, 2047 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2048 &sc->alc_cdata.alc_rr_ring_map); 2049 if (error != 0) { 2050 device_printf(sc->alc_dev, 2051 "could not allocate DMA'able memory for Rx return ring.\n"); 2052 goto fail; 2053 } 2054 ctx.alc_busaddr = 0; 2055 error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag, 2056 sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring, 2057 ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0); 2058 if (error != 0 || ctx.alc_busaddr == 0) { 2059 device_printf(sc->alc_dev, 2060 "could not load DMA'able memory for Tx ring.\n"); 2061 goto fail; 2062 } 2063 sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr; 2064 2065 /* Allocate DMA'able memory and load the DMA map for CMB. */ 2066 error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag, 2067 (void **)&sc->alc_rdata.alc_cmb, 2068 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2069 &sc->alc_cdata.alc_cmb_map); 2070 if (error != 0) { 2071 device_printf(sc->alc_dev, 2072 "could not allocate DMA'able memory for CMB.\n"); 2073 goto fail; 2074 } 2075 ctx.alc_busaddr = 0; 2076 error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag, 2077 sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb, 2078 ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0); 2079 if (error != 0 || ctx.alc_busaddr == 0) { 2080 device_printf(sc->alc_dev, 2081 "could not load DMA'able memory for CMB.\n"); 2082 goto fail; 2083 } 2084 sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr; 2085 2086 /* Allocate DMA'able memory and load the DMA map for SMB. */ 2087 error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag, 2088 (void **)&sc->alc_rdata.alc_smb, 2089 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2090 &sc->alc_cdata.alc_smb_map); 2091 if (error != 0) { 2092 device_printf(sc->alc_dev, 2093 "could not allocate DMA'able memory for SMB.\n"); 2094 goto fail; 2095 } 2096 ctx.alc_busaddr = 0; 2097 error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag, 2098 sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb, 2099 ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0); 2100 if (error != 0 || ctx.alc_busaddr == 0) { 2101 device_printf(sc->alc_dev, 2102 "could not load DMA'able memory for CMB.\n"); 2103 goto fail; 2104 } 2105 sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr; 2106 2107 #ifdef foo 2108 /* 2109 * All of the status blocks and descriptor rings are 2110 * allocated at lower 4GB, their addresses high 32bits 2111 * part are same (all 0). 2112 */ 2113 2114 /* Make sure we've not crossed 4GB boundary. */ 2115 if ((error = alc_check_boundary(sc)) != 0) { 2116 device_printf(sc->alc_dev, "4GB boundary crossed, " 2117 "switching to 32bit DMA addressing mode.\n"); 2118 alc_dma_free(sc); 2119 /* 2120 * Limit max allowable DMA address space to 32bit 2121 * and try again. 2122 */ 2123 lowaddr = BUS_SPACE_MAXADDR_32BIT; 2124 goto again; 2125 } 2126 #endif 2127 2128 /* 2129 * Create Tx buffer parent tag. 2130 * AR81[3567]x allows 64bit DMA addressing of Tx/Rx buffers 2131 * so it needs separate parent DMA tag as parent DMA address 2132 * space could be restricted to be within 32bit address space 2133 * by 4GB boundary crossing. 2134 */ 2135 error = bus_dma_tag_create( 2136 sc->alc_cdata.alc_parent_tag, /* parent */ 2137 1, 0, /* alignment, boundary */ 2138 BUS_SPACE_MAXADDR, /* lowaddr */ 2139 BUS_SPACE_MAXADDR, /* highaddr */ 2140 NULL, NULL, /* filter, filterarg */ 2141 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2142 0, /* nsegments */ 2143 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2144 0, /* flags */ 2145 &sc->alc_cdata.alc_buffer_tag); 2146 if (error != 0) { 2147 device_printf(sc->alc_dev, 2148 "could not create parent buffer DMA tag.\n"); 2149 goto fail; 2150 } 2151 2152 /* Create DMA tag for Tx buffers. */ 2153 error = bus_dma_tag_create( 2154 sc->alc_cdata.alc_buffer_tag, /* parent */ 2155 1, 0, /* alignment, boundary */ 2156 BUS_SPACE_MAXADDR, /* lowaddr */ 2157 BUS_SPACE_MAXADDR, /* highaddr */ 2158 NULL, NULL, /* filter, filterarg */ 2159 ALC_TSO_MAXSIZE, /* maxsize */ 2160 ALC_MAXTXSEGS, /* nsegments */ 2161 ALC_TSO_MAXSEGSIZE, /* maxsegsize */ 2162 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, /* flags */ 2163 &sc->alc_cdata.alc_tx_tag); 2164 if (error != 0) { 2165 device_printf(sc->alc_dev, "could not create Tx DMA tag.\n"); 2166 goto fail; 2167 } 2168 2169 /* Create DMA tag for Rx buffers. */ 2170 error = bus_dma_tag_create( 2171 sc->alc_cdata.alc_buffer_tag, /* parent */ 2172 ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */ 2173 BUS_SPACE_MAXADDR, /* lowaddr */ 2174 BUS_SPACE_MAXADDR, /* highaddr */ 2175 NULL, NULL, /* filter, filterarg */ 2176 MCLBYTES, /* maxsize */ 2177 1, /* nsegments */ 2178 MCLBYTES, /* maxsegsize */ 2179 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED, /* flags */ 2180 &sc->alc_cdata.alc_rx_tag); 2181 if (error != 0) { 2182 device_printf(sc->alc_dev, "could not create Rx DMA tag.\n"); 2183 goto fail; 2184 } 2185 /* Create DMA maps for Tx buffers. */ 2186 for (i = 0; i < ALC_TX_RING_CNT; i++) { 2187 txd = &sc->alc_cdata.alc_txdesc[i]; 2188 txd->tx_m = NULL; 2189 txd->tx_dmamap = NULL; 2190 error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 2191 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2192 &txd->tx_dmamap); 2193 if (error != 0) { 2194 device_printf(sc->alc_dev, 2195 "could not create Tx dmamap.\n"); 2196 goto fail; 2197 } 2198 } 2199 /* Create DMA maps for Rx buffers. */ 2200 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 2201 BUS_DMA_WAITOK, 2202 &sc->alc_cdata.alc_rx_sparemap); 2203 if (error) { 2204 device_printf(sc->alc_dev, 2205 "could not create spare Rx dmamap.\n"); 2206 goto fail; 2207 } 2208 for (i = 0; i < ALC_RX_RING_CNT; i++) { 2209 rxd = &sc->alc_cdata.alc_rxdesc[i]; 2210 rxd->rx_m = NULL; 2211 rxd->rx_dmamap = NULL; 2212 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 2213 BUS_DMA_WAITOK, 2214 &rxd->rx_dmamap); 2215 if (error != 0) { 2216 device_printf(sc->alc_dev, 2217 "could not create Rx dmamap.\n"); 2218 goto fail; 2219 } 2220 } 2221 2222 fail: 2223 return (error); 2224 } 2225 2226 static void 2227 alc_dma_free(struct alc_softc *sc) 2228 { 2229 struct alc_txdesc *txd; 2230 struct alc_rxdesc *rxd; 2231 int i; 2232 2233 /* Tx buffers. */ 2234 if (sc->alc_cdata.alc_tx_tag != NULL) { 2235 for (i = 0; i < ALC_TX_RING_CNT; i++) { 2236 txd = &sc->alc_cdata.alc_txdesc[i]; 2237 if (txd->tx_dmamap != NULL) { 2238 bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag, 2239 txd->tx_dmamap); 2240 txd->tx_dmamap = NULL; 2241 } 2242 } 2243 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag); 2244 sc->alc_cdata.alc_tx_tag = NULL; 2245 } 2246 /* Rx buffers */ 2247 if (sc->alc_cdata.alc_rx_tag != NULL) { 2248 for (i = 0; i < ALC_RX_RING_CNT; i++) { 2249 rxd = &sc->alc_cdata.alc_rxdesc[i]; 2250 if (rxd->rx_dmamap != NULL) { 2251 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 2252 rxd->rx_dmamap); 2253 rxd->rx_dmamap = NULL; 2254 } 2255 } 2256 if (sc->alc_cdata.alc_rx_sparemap != NULL) { 2257 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 2258 sc->alc_cdata.alc_rx_sparemap); 2259 sc->alc_cdata.alc_rx_sparemap = NULL; 2260 } 2261 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag); 2262 sc->alc_cdata.alc_rx_tag = NULL; 2263 } 2264 /* Tx descriptor ring. */ 2265 if (sc->alc_cdata.alc_tx_ring_tag != NULL) { 2266 if (sc->alc_rdata.alc_tx_ring_paddr != 0) 2267 bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag, 2268 sc->alc_cdata.alc_tx_ring_map); 2269 if (sc->alc_rdata.alc_tx_ring != NULL) 2270 bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag, 2271 sc->alc_rdata.alc_tx_ring, 2272 sc->alc_cdata.alc_tx_ring_map); 2273 sc->alc_rdata.alc_tx_ring_paddr = 0; 2274 sc->alc_rdata.alc_tx_ring = NULL; 2275 sc->alc_cdata.alc_tx_ring_map = NULL; 2276 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag); 2277 sc->alc_cdata.alc_tx_ring_tag = NULL; 2278 } 2279 /* Rx ring. */ 2280 if (sc->alc_cdata.alc_rx_ring_tag != NULL) { 2281 if (sc->alc_rdata.alc_rx_ring_paddr != 0) 2282 bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag, 2283 sc->alc_cdata.alc_rx_ring_map); 2284 if (sc->alc_rdata.alc_rx_ring != NULL) 2285 bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag, 2286 sc->alc_rdata.alc_rx_ring, 2287 sc->alc_cdata.alc_rx_ring_map); 2288 sc->alc_rdata.alc_rx_ring_paddr = 0; 2289 sc->alc_rdata.alc_rx_ring = NULL; 2290 sc->alc_cdata.alc_rx_ring_map = NULL; 2291 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag); 2292 sc->alc_cdata.alc_rx_ring_tag = NULL; 2293 } 2294 /* Rx return ring. */ 2295 if (sc->alc_cdata.alc_rr_ring_tag != NULL) { 2296 if (sc->alc_rdata.alc_rr_ring_paddr != 0) 2297 bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag, 2298 sc->alc_cdata.alc_rr_ring_map); 2299 if (sc->alc_rdata.alc_rr_ring != NULL) 2300 bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag, 2301 sc->alc_rdata.alc_rr_ring, 2302 sc->alc_cdata.alc_rr_ring_map); 2303 sc->alc_rdata.alc_rr_ring_paddr = 0; 2304 sc->alc_rdata.alc_rr_ring = NULL; 2305 sc->alc_cdata.alc_rr_ring_map = NULL; 2306 bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag); 2307 sc->alc_cdata.alc_rr_ring_tag = NULL; 2308 } 2309 /* CMB block */ 2310 if (sc->alc_cdata.alc_cmb_tag != NULL) { 2311 if (sc->alc_rdata.alc_cmb_paddr != 0) 2312 bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag, 2313 sc->alc_cdata.alc_cmb_map); 2314 if (sc->alc_rdata.alc_cmb != NULL) 2315 bus_dmamem_free(sc->alc_cdata.alc_cmb_tag, 2316 sc->alc_rdata.alc_cmb, 2317 sc->alc_cdata.alc_cmb_map); 2318 sc->alc_rdata.alc_cmb_paddr = 0; 2319 sc->alc_rdata.alc_cmb = NULL; 2320 sc->alc_cdata.alc_cmb_map = NULL; 2321 bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag); 2322 sc->alc_cdata.alc_cmb_tag = NULL; 2323 } 2324 /* SMB block */ 2325 if (sc->alc_cdata.alc_smb_tag != NULL) { 2326 if (sc->alc_rdata.alc_smb_paddr != 0) 2327 bus_dmamap_unload(sc->alc_cdata.alc_smb_tag, 2328 sc->alc_cdata.alc_smb_map); 2329 if (sc->alc_rdata.alc_smb != NULL) 2330 bus_dmamem_free(sc->alc_cdata.alc_smb_tag, 2331 sc->alc_rdata.alc_smb, 2332 sc->alc_cdata.alc_smb_map); 2333 sc->alc_rdata.alc_smb_paddr = 0; 2334 sc->alc_rdata.alc_smb = NULL; 2335 sc->alc_cdata.alc_smb_map = NULL; 2336 bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag); 2337 sc->alc_cdata.alc_smb_tag = NULL; 2338 } 2339 if (sc->alc_cdata.alc_buffer_tag != NULL) { 2340 bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag); 2341 sc->alc_cdata.alc_buffer_tag = NULL; 2342 } 2343 if (sc->alc_cdata.alc_parent_tag != NULL) { 2344 bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag); 2345 sc->alc_cdata.alc_parent_tag = NULL; 2346 } 2347 } 2348 2349 static int 2350 alc_shutdown(device_t dev) 2351 { 2352 2353 return (alc_suspend(dev)); 2354 } 2355 2356 #if 0 2357 /* XXX: LINK SPEED */ 2358 /* 2359 * Note, this driver resets the link speed to 10/100Mbps by 2360 * restarting auto-negotiation in suspend/shutdown phase but we 2361 * don't know whether that auto-negotiation would succeed or not 2362 * as driver has no control after powering off/suspend operation. 2363 * If the renegotiation fail WOL may not work. Running at 1Gbps 2364 * will draw more power than 375mA at 3.3V which is specified in 2365 * PCI specification and that would result in complete 2366 * shutdowning power to ethernet controller. 2367 * 2368 * TODO 2369 * Save current negotiated media speed/duplex/flow-control to 2370 * softc and restore the same link again after resuming. PHY 2371 * handling such as power down/resetting to 100Mbps may be better 2372 * handled in suspend method in phy driver. 2373 */ 2374 static void 2375 alc_setlinkspeed(struct alc_softc *sc) 2376 { 2377 struct mii_data *mii; 2378 int aneg, i; 2379 2380 mii = device_get_softc(sc->alc_miibus); 2381 mii_pollstat(mii); 2382 aneg = 0; 2383 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 2384 (IFM_ACTIVE | IFM_AVALID)) { 2385 switch IFM_SUBTYPE(mii->mii_media_active) { 2386 case IFM_10_T: 2387 case IFM_100_TX: 2388 return; 2389 case IFM_1000_T: 2390 aneg++; 2391 break; 2392 default: 2393 break; 2394 } 2395 } 2396 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0); 2397 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 2398 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 2399 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 2400 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 2401 DELAY(1000); 2402 if (aneg != 0) { 2403 /* 2404 * Poll link state until alc(4) get a 10/100Mbps link. 2405 */ 2406 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 2407 mii_pollstat(mii); 2408 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 2409 == (IFM_ACTIVE | IFM_AVALID)) { 2410 switch (IFM_SUBTYPE( 2411 mii->mii_media_active)) { 2412 case IFM_10_T: 2413 case IFM_100_TX: 2414 alc_mac_config(sc); 2415 return; 2416 default: 2417 break; 2418 } 2419 } 2420 ALC_UNLOCK(sc); 2421 pause("alclnk", hz); 2422 ALC_LOCK(sc); 2423 } 2424 if (i == MII_ANEGTICKS_GIGE) 2425 device_printf(sc->alc_dev, 2426 "establishing a link failed, WOL may not work!"); 2427 } 2428 /* 2429 * No link, force MAC to have 100Mbps, full-duplex link. 2430 * This is the last resort and may/may not work. 2431 */ 2432 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 2433 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 2434 alc_mac_config(sc); 2435 } 2436 #endif 2437 2438 #if 0 2439 /* XXX: WOL */ 2440 static void 2441 alc_setwol(struct alc_softc *sc) 2442 { 2443 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2444 alc_setwol_816x(sc); 2445 else 2446 alc_setwol_813x(sc); 2447 } 2448 2449 static void 2450 alc_setwol_813x(struct alc_softc *sc) 2451 { 2452 struct ifnet *ifp; 2453 uint32_t reg, pmcs; 2454 uint16_t pmstat; 2455 2456 ALC_LOCK_ASSERT(sc); 2457 2458 alc_disable_l0s_l1(sc); 2459 ifp = sc->alc_ifp; 2460 if ((sc->alc_flags & ALC_FLAG_PM) == 0) { 2461 /* Disable WOL. */ 2462 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2463 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 2464 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 2465 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 2466 /* Force PHY power down. */ 2467 alc_phy_down(sc); 2468 CSR_WRITE_4(sc, ALC_MASTER_CFG, 2469 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 2470 return; 2471 } 2472 2473 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2474 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 2475 alc_setlinkspeed(sc); 2476 CSR_WRITE_4(sc, ALC_MASTER_CFG, 2477 CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS); 2478 } 2479 2480 pmcs = 0; 2481 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2482 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 2483 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); 2484 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2485 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 2486 MAC_CFG_BCAST); 2487 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2488 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 2489 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2490 reg |= MAC_CFG_RX_ENB; 2491 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2492 2493 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 2494 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 2495 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 2496 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 2497 /* WOL disabled, PHY power down. */ 2498 alc_phy_down(sc); 2499 CSR_WRITE_4(sc, ALC_MASTER_CFG, 2500 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 2501 2502 } 2503 /* Request PME. */ 2504 pmstat = pci_read_config(sc->alc_dev, 2505 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 2506 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2507 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2508 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2509 pci_write_config(sc->alc_dev, 2510 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2511 } 2512 2513 static void 2514 alc_setwol_816x(struct alc_softc *sc) 2515 { 2516 struct ifnet *ifp; 2517 uint32_t gphy, mac, master, pmcs, reg; 2518 uint16_t pmstat; 2519 2520 ALC_LOCK_ASSERT(sc); 2521 2522 ifp = sc->alc_ifp; 2523 master = CSR_READ_4(sc, ALC_MASTER_CFG); 2524 master &= ~MASTER_CLK_SEL_DIS; 2525 gphy = CSR_READ_4(sc, ALC_GPHY_CFG); 2526 gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | GPHY_CFG_100AB_ENB | 2527 GPHY_CFG_PHY_PLL_ON); 2528 gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET; 2529 if ((sc->alc_flags & ALC_FLAG_PM) == 0) { 2530 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2531 gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW; 2532 mac = CSR_READ_4(sc, ALC_MAC_CFG); 2533 } else { 2534 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2535 gphy |= GPHY_CFG_EXT_RESET; 2536 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 2537 alc_setlinkspeed(sc); 2538 } 2539 pmcs = 0; 2540 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2541 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 2542 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); 2543 mac = CSR_READ_4(sc, ALC_MAC_CFG); 2544 mac &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 2545 MAC_CFG_BCAST); 2546 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2547 mac |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 2548 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2549 mac |= MAC_CFG_RX_ENB; 2550 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10, 2551 ANEG_S3DIG10_SL); 2552 } 2553 2554 /* Enable OSC. */ 2555 reg = CSR_READ_4(sc, ALC_MISC); 2556 reg &= ~MISC_INTNLOSC_OPEN; 2557 CSR_WRITE_4(sc, ALC_MISC, reg); 2558 reg |= MISC_INTNLOSC_OPEN; 2559 CSR_WRITE_4(sc, ALC_MISC, reg); 2560 CSR_WRITE_4(sc, ALC_MASTER_CFG, master); 2561 CSR_WRITE_4(sc, ALC_MAC_CFG, mac); 2562 CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy); 2563 reg = CSR_READ_4(sc, ALC_PDLL_TRNS1); 2564 reg |= PDLL_TRNS1_D3PLLOFF_ENB; 2565 CSR_WRITE_4(sc, ALC_PDLL_TRNS1, reg); 2566 2567 if ((sc->alc_flags & ALC_FLAG_PM) != 0) { 2568 /* Request PME. */ 2569 pmstat = pci_read_config(sc->alc_dev, 2570 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 2571 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2572 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2573 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2574 pci_write_config(sc->alc_dev, 2575 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2576 } 2577 } 2578 2579 #endif 2580 2581 static int 2582 alc_suspend(device_t dev) 2583 { 2584 struct alc_softc *sc = device_get_softc(dev); 2585 struct ifnet *ifp = &sc->arpcom.ac_if; 2586 2587 lwkt_serialize_enter(ifp->if_serializer); 2588 alc_stop(sc); 2589 #if 0 2590 /* XXX: WOL */ 2591 alc_setwol(sc); 2592 #endif 2593 lwkt_serialize_exit(ifp->if_serializer); 2594 2595 return (0); 2596 } 2597 2598 static int 2599 alc_resume(device_t dev) 2600 { 2601 struct alc_softc *sc = device_get_softc(dev); 2602 struct ifnet *ifp = &sc->arpcom.ac_if; 2603 uint16_t pmstat; 2604 2605 lwkt_serialize_enter(ifp->if_serializer); 2606 2607 if ((sc->alc_flags & ALC_FLAG_PM) != 0) { 2608 /* Disable PME and clear PME status. */ 2609 pmstat = pci_read_config(sc->alc_dev, 2610 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 2611 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2612 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2613 pci_write_config(sc->alc_dev, 2614 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2615 } 2616 } 2617 2618 /* Reset PHY. */ 2619 alc_phy_reset(sc); 2620 if (ifp->if_flags & IFF_UP) 2621 alc_init(sc); 2622 2623 lwkt_serialize_exit(ifp->if_serializer); 2624 2625 return (0); 2626 } 2627 2628 static int 2629 alc_encap(struct alc_softc *sc, struct mbuf **m_head) 2630 { 2631 struct alc_txdesc *txd, *txd_last; 2632 struct tx_desc *desc; 2633 struct mbuf *m; 2634 struct ip *ip; 2635 struct tcphdr *tcp; 2636 bus_dma_segment_t txsegs[ALC_MAXTXSEGS]; 2637 bus_dmamap_t map; 2638 uint32_t cflags, hdrlen, ip_off, poff, vtag; 2639 int error, idx, nsegs, prod; 2640 2641 M_ASSERTPKTHDR((*m_head)); 2642 2643 m = *m_head; 2644 ip = NULL; 2645 tcp = NULL; 2646 ip_off = poff = 0; 2647 if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) { 2648 /* 2649 * AR81[3567]x requires offset of TCP/UDP header in its 2650 * Tx descriptor to perform Tx checksum offloading. TSO 2651 * also requires TCP header offset and modification of 2652 * IP/TCP header. This kind of operation takes many CPU 2653 * cycles on FreeBSD so fast host CPU is required to get 2654 * smooth TSO performance. 2655 */ 2656 struct ether_header *eh; 2657 2658 if (M_WRITABLE(m) == 0) { 2659 /* Get a writable copy. */ 2660 m = m_dup(*m_head, M_NOWAIT); 2661 /* Release original mbufs. */ 2662 m_freem(*m_head); 2663 if (m == NULL) { 2664 *m_head = NULL; 2665 return (ENOBUFS); 2666 } 2667 *m_head = m; 2668 } 2669 2670 ip_off = sizeof(struct ether_header); 2671 m = m_pullup(m, ip_off + sizeof(struct ip)); 2672 if (m == NULL) { 2673 *m_head = NULL; 2674 return (ENOBUFS); 2675 } 2676 eh = mtod(m, struct ether_header *); 2677 /* 2678 * Check if hardware VLAN insertion is off. 2679 * Additional check for LLC/SNAP frame? 2680 */ 2681 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2682 ip_off = sizeof(struct ether_vlan_header); 2683 m = m_pullup(m, ip_off); 2684 if (m == NULL) { 2685 *m_head = NULL; 2686 return (ENOBUFS); 2687 } 2688 } 2689 m = m_pullup(m, ip_off + sizeof(struct ip)); 2690 if (m == NULL) { 2691 *m_head = NULL; 2692 return (ENOBUFS); 2693 } 2694 ip = (struct ip *)(mtod(m, char *) + ip_off); 2695 poff = ip_off + (ip->ip_hl << 2); 2696 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2697 m = m_pullup(m, poff + sizeof(struct tcphdr)); 2698 if (m == NULL) { 2699 *m_head = NULL; 2700 return (ENOBUFS); 2701 } 2702 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 2703 m = m_pullup(m, poff + (tcp->th_off << 2)); 2704 if (m == NULL) { 2705 *m_head = NULL; 2706 return (ENOBUFS); 2707 } 2708 /* 2709 * Due to strict adherence of Microsoft NDIS 2710 * Large Send specification, hardware expects 2711 * a pseudo TCP checksum inserted by upper 2712 * stack. Unfortunately the pseudo TCP 2713 * checksum that NDIS refers to does not include 2714 * TCP payload length so driver should recompute 2715 * the pseudo checksum here. Hopefully this 2716 * wouldn't be much burden on modern CPUs. 2717 * 2718 * Reset IP checksum and recompute TCP pseudo 2719 * checksum as NDIS specification said. 2720 */ 2721 ip = (struct ip *)(mtod(m, char *) + ip_off); 2722 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 2723 ip->ip_sum = 0; 2724 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 2725 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2726 } 2727 *m_head = m; 2728 } 2729 2730 prod = sc->alc_cdata.alc_tx_prod; 2731 txd = &sc->alc_cdata.alc_txdesc[prod]; 2732 txd_last = txd; 2733 map = txd->tx_dmamap; 2734 2735 error = bus_dmamap_load_mbuf_defrag( 2736 sc->alc_cdata.alc_tx_tag, map, m_head, 2737 txsegs, ALC_MAXTXSEGS, &nsegs, BUS_DMA_NOWAIT); 2738 if (error) { 2739 m_freem(*m_head); 2740 *m_head = NULL; 2741 return (error); 2742 } 2743 if (nsegs == 0) { 2744 m_freem(*m_head); 2745 *m_head = NULL; 2746 return (EIO); 2747 } 2748 2749 /* Check descriptor overrun. */ 2750 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) { 2751 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map); 2752 return (ENOBUFS); 2753 } 2754 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE); 2755 2756 m = *m_head; 2757 cflags = TD_ETHERNET; 2758 vtag = 0; 2759 desc = NULL; 2760 idx = 0; 2761 /* Configure VLAN hardware tag insertion. */ 2762 if ((m->m_flags & M_VLANTAG) != 0) { 2763 vtag = htons(m->m_pkthdr.ether_vlantag); 2764 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; 2765 cflags |= TD_INS_VLAN_TAG; 2766 } 2767 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2768 /* Request TSO and set MSS. */ 2769 cflags |= TD_TSO | TD_TSO_DESCV1; 2770 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) & 2771 TD_MSS_MASK; 2772 /* Set TCP header offset. */ 2773 cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) & 2774 TD_TCPHDR_OFFSET_MASK; 2775 /* 2776 * AR81[3567]x requires the first buffer should 2777 * only hold IP/TCP header data. Payload should 2778 * be handled in other descriptors. 2779 */ 2780 hdrlen = poff + (tcp->th_off << 2); 2781 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2782 desc->len = htole32(TX_BYTES(hdrlen | vtag)); 2783 desc->flags = htole32(cflags); 2784 desc->addr = htole64(txsegs[0].ds_addr); 2785 sc->alc_cdata.alc_tx_cnt++; 2786 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2787 if (m->m_len - hdrlen > 0) { 2788 /* Handle remaining payload of the first fragment. */ 2789 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2790 desc->len = htole32(TX_BYTES((m->m_len - hdrlen) | 2791 vtag)); 2792 desc->flags = htole32(cflags); 2793 desc->addr = htole64(txsegs[0].ds_addr + hdrlen); 2794 sc->alc_cdata.alc_tx_cnt++; 2795 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2796 } 2797 /* Handle remaining fragments. */ 2798 idx = 1; 2799 } else if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { 2800 /* Configure Tx checksum offload. */ 2801 #ifdef ALC_USE_CUSTOM_CSUM 2802 cflags |= TD_CUSTOM_CSUM; 2803 /* Set checksum start offset. */ 2804 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & 2805 TD_PLOAD_OFFSET_MASK; 2806 /* Set checksum insertion position of TCP/UDP. */ 2807 cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) << 2808 TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK; 2809 #else 2810 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2811 cflags |= TD_IPCSUM; 2812 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2813 cflags |= TD_TCPCSUM; 2814 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2815 cflags |= TD_UDPCSUM; 2816 /* Set TCP/UDP header offset. */ 2817 cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) & 2818 TD_L4HDR_OFFSET_MASK; 2819 #endif 2820 } 2821 2822 for (; idx < nsegs; idx++) { 2823 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2824 desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag); 2825 desc->flags = htole32(cflags); 2826 desc->addr = htole64(txsegs[idx].ds_addr); 2827 sc->alc_cdata.alc_tx_cnt++; 2828 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2829 } 2830 /* Update producer index. */ 2831 sc->alc_cdata.alc_tx_prod = prod; 2832 2833 /* Finally set EOP on the last descriptor. */ 2834 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; 2835 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2836 desc->flags |= htole32(TD_EOP); 2837 2838 /* Swap dmamap of the first and the last. */ 2839 txd = &sc->alc_cdata.alc_txdesc[prod]; 2840 map = txd_last->tx_dmamap; 2841 txd_last->tx_dmamap = txd->tx_dmamap; 2842 txd->tx_dmamap = map; 2843 txd->tx_m = m; 2844 2845 return (0); 2846 } 2847 2848 static void 2849 alc_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 2850 { 2851 struct alc_softc *sc = ifp->if_softc; 2852 struct mbuf *m_head; 2853 int enq; 2854 2855 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 2856 ASSERT_SERIALIZED(ifp->if_serializer); 2857 2858 /* Reclaim transmitted frames. */ 2859 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) 2860 alc_txeof(sc); 2861 2862 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 2863 return; 2864 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2865 ifq_purge(&ifp->if_snd); 2866 return; 2867 } 2868 2869 for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) { 2870 m_head = ifq_dequeue(&ifp->if_snd); 2871 if (m_head == NULL) 2872 break; 2873 /* 2874 * Pack the data into the transmit ring. If we 2875 * don't have room, set the OACTIVE flag and wait 2876 * for the NIC to drain the ring. 2877 */ 2878 if (alc_encap(sc, &m_head)) { 2879 if (m_head == NULL) 2880 break; 2881 ifq_prepend(&ifp->if_snd, m_head); 2882 ifq_set_oactive(&ifp->if_snd); 2883 break; 2884 } 2885 2886 enq++; 2887 /* 2888 * If there's a BPF listener, bounce a copy of this frame 2889 * to him. 2890 */ 2891 ETHER_BPF_MTAP(ifp, m_head); 2892 } 2893 2894 if (enq > 0) { 2895 /* Sync descriptors. */ 2896 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 2897 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 2898 /* Kick. Assume we're using normal Tx priority queue. */ 2899 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2900 CSR_WRITE_2(sc, ALC_MBOX_TD_PRI0_PROD_IDX, 2901 (uint16_t)sc->alc_cdata.alc_tx_prod); 2902 else 2903 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, 2904 (sc->alc_cdata.alc_tx_prod << 2905 MBOX_TD_PROD_LO_IDX_SHIFT) & 2906 MBOX_TD_PROD_LO_IDX_MASK); 2907 /* Set a timeout in case the chip goes out to lunch. */ 2908 sc->alc_watchdog_timer = ALC_TX_TIMEOUT; 2909 } 2910 } 2911 2912 static void 2913 alc_watchdog(struct alc_softc *sc) 2914 { 2915 struct ifnet *ifp = &sc->arpcom.ac_if; 2916 2917 ASSERT_SERIALIZED(ifp->if_serializer); 2918 2919 if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer) 2920 return; 2921 2922 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2923 if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n"); 2924 IFNET_STAT_INC(ifp, oerrors, 1); 2925 alc_init(sc); 2926 return; 2927 } 2928 if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n"); 2929 IFNET_STAT_INC(ifp, oerrors, 1); 2930 alc_init(sc); 2931 if (!ifq_is_empty(&ifp->if_snd)) 2932 if_devstart(ifp); 2933 } 2934 2935 static int 2936 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 2937 { 2938 struct alc_softc *sc; 2939 struct ifreq *ifr; 2940 struct mii_data *mii; 2941 int error, mask; 2942 2943 ASSERT_SERIALIZED(ifp->if_serializer); 2944 2945 sc = ifp->if_softc; 2946 ifr = (struct ifreq *)data; 2947 error = 0; 2948 switch (cmd) { 2949 case SIOCSIFMTU: 2950 if (ifr->ifr_mtu < ETHERMIN || 2951 ifr->ifr_mtu > (sc->alc_ident->max_framelen - 2952 sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) || 2953 ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 && 2954 ifr->ifr_mtu > ETHERMTU)) { 2955 error = EINVAL; 2956 } else if (ifp->if_mtu != ifr->ifr_mtu) { 2957 ifp->if_mtu = ifr->ifr_mtu; 2958 #if 0 2959 /* AR81[3567]x has 13 bits MSS field. */ 2960 if (ifp->if_mtu > ALC_TSO_MTU && 2961 (ifp->if_capenable & IFCAP_TSO4) != 0) { 2962 ifp->if_capenable &= ~IFCAP_TSO4; 2963 ifp->if_hwassist &= ~CSUM_TSO; 2964 } 2965 #endif 2966 } 2967 break; 2968 case SIOCSIFFLAGS: 2969 if ((ifp->if_flags & IFF_UP) != 0) { 2970 if ((ifp->if_flags & IFF_RUNNING) != 0 && 2971 ((ifp->if_flags ^ sc->alc_if_flags) & 2972 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2973 alc_rxfilter(sc); 2974 else if ((ifp->if_flags & IFF_RUNNING) == 0) 2975 alc_init(sc); 2976 } else if ((ifp->if_flags & IFF_RUNNING) != 0) 2977 alc_stop(sc); 2978 sc->alc_if_flags = ifp->if_flags; 2979 break; 2980 case SIOCADDMULTI: 2981 case SIOCDELMULTI: 2982 if ((ifp->if_flags & IFF_RUNNING) != 0) 2983 alc_rxfilter(sc); 2984 break; 2985 case SIOCSIFMEDIA: 2986 case SIOCGIFMEDIA: 2987 mii = device_get_softc(sc->alc_miibus); 2988 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2989 break; 2990 case SIOCSIFCAP: 2991 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2992 if ((mask & IFCAP_TXCSUM) != 0 && 2993 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2994 ifp->if_capenable ^= IFCAP_TXCSUM; 2995 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2996 ifp->if_hwassist |= ALC_CSUM_FEATURES; 2997 else 2998 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 2999 } 3000 #if 0 3001 /* XXX: WOL */ 3002 if ((mask & IFCAP_WOL_MCAST) != 0 && 3003 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 3004 ifp->if_capenable ^= IFCAP_WOL_MCAST; 3005 if ((mask & IFCAP_WOL_MAGIC) != 0 && 3006 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 3007 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3008 #endif 3009 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 3010 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 3011 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3012 alc_rxvlan(sc); 3013 } 3014 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 3015 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 3016 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 3017 3018 /* 3019 * VLAN hardware tagging is required to do checksum 3020 * offload or TSO on VLAN interface. Checksum offload 3021 * on VLAN interface also requires hardware checksum 3022 * offload of parent interface. 3023 */ 3024 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 3025 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 3026 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 3027 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 3028 // XXX VLAN_CAPABILITIES(ifp); 3029 break; 3030 default: 3031 error = ether_ioctl(ifp, cmd, data); 3032 break; 3033 } 3034 3035 return (error); 3036 } 3037 3038 static void 3039 alc_mac_config(struct alc_softc *sc) 3040 { 3041 struct mii_data *mii; 3042 uint32_t reg; 3043 3044 mii = device_get_softc(sc->alc_miibus); 3045 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3046 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 3047 MAC_CFG_SPEED_MASK); 3048 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 3049 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 3050 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 3051 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 3052 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 3053 } 3054 /* Reprogram MAC with resolved speed/duplex. */ 3055 switch (IFM_SUBTYPE(mii->mii_media_active)) { 3056 case IFM_10_T: 3057 case IFM_100_TX: 3058 reg |= MAC_CFG_SPEED_10_100; 3059 break; 3060 case IFM_1000_T: 3061 reg |= MAC_CFG_SPEED_1000; 3062 break; 3063 } 3064 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 3065 reg |= MAC_CFG_FULL_DUPLEX; 3066 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 3067 reg |= MAC_CFG_TX_FC; 3068 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 3069 reg |= MAC_CFG_RX_FC; 3070 } 3071 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3072 } 3073 3074 static void 3075 alc_stats_clear(struct alc_softc *sc) 3076 { 3077 struct smb sb, *smb; 3078 uint32_t *reg; 3079 int i; 3080 3081 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 3082 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3083 sc->alc_cdata.alc_smb_map, 3084 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3085 smb = sc->alc_rdata.alc_smb; 3086 /* Update done, clear. */ 3087 smb->updated = 0; 3088 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3089 sc->alc_cdata.alc_smb_map, 3090 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3091 } else { 3092 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 3093 reg++) { 3094 CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 3095 i += sizeof(uint32_t); 3096 } 3097 /* Read Tx statistics. */ 3098 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 3099 reg++) { 3100 CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 3101 i += sizeof(uint32_t); 3102 } 3103 } 3104 } 3105 3106 static void 3107 alc_stats_update(struct alc_softc *sc) 3108 { 3109 struct alc_hw_stats *stat; 3110 struct smb sb, *smb; 3111 struct ifnet *ifp; 3112 uint32_t *reg; 3113 int i; 3114 3115 ifp = sc->alc_ifp; 3116 stat = &sc->alc_stats; 3117 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 3118 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3119 sc->alc_cdata.alc_smb_map, 3120 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3121 smb = sc->alc_rdata.alc_smb; 3122 if (smb->updated == 0) 3123 return; 3124 } else { 3125 smb = &sb; 3126 /* Read Rx statistics. */ 3127 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 3128 reg++) { 3129 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 3130 i += sizeof(uint32_t); 3131 } 3132 /* Read Tx statistics. */ 3133 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 3134 reg++) { 3135 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 3136 i += sizeof(uint32_t); 3137 } 3138 } 3139 3140 /* Rx stats. */ 3141 stat->rx_frames += smb->rx_frames; 3142 stat->rx_bcast_frames += smb->rx_bcast_frames; 3143 stat->rx_mcast_frames += smb->rx_mcast_frames; 3144 stat->rx_pause_frames += smb->rx_pause_frames; 3145 stat->rx_control_frames += smb->rx_control_frames; 3146 stat->rx_crcerrs += smb->rx_crcerrs; 3147 stat->rx_lenerrs += smb->rx_lenerrs; 3148 stat->rx_bytes += smb->rx_bytes; 3149 stat->rx_runts += smb->rx_runts; 3150 stat->rx_fragments += smb->rx_fragments; 3151 stat->rx_pkts_64 += smb->rx_pkts_64; 3152 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 3153 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 3154 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 3155 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 3156 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 3157 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 3158 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 3159 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 3160 stat->rx_rrs_errs += smb->rx_rrs_errs; 3161 stat->rx_alignerrs += smb->rx_alignerrs; 3162 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 3163 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 3164 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 3165 3166 /* Tx stats. */ 3167 stat->tx_frames += smb->tx_frames; 3168 stat->tx_bcast_frames += smb->tx_bcast_frames; 3169 stat->tx_mcast_frames += smb->tx_mcast_frames; 3170 stat->tx_pause_frames += smb->tx_pause_frames; 3171 stat->tx_excess_defer += smb->tx_excess_defer; 3172 stat->tx_control_frames += smb->tx_control_frames; 3173 stat->tx_deferred += smb->tx_deferred; 3174 stat->tx_bytes += smb->tx_bytes; 3175 stat->tx_pkts_64 += smb->tx_pkts_64; 3176 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 3177 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 3178 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 3179 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 3180 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 3181 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 3182 stat->tx_single_colls += smb->tx_single_colls; 3183 stat->tx_multi_colls += smb->tx_multi_colls; 3184 stat->tx_late_colls += smb->tx_late_colls; 3185 stat->tx_excess_colls += smb->tx_excess_colls; 3186 stat->tx_underrun += smb->tx_underrun; 3187 stat->tx_desc_underrun += smb->tx_desc_underrun; 3188 stat->tx_lenerrs += smb->tx_lenerrs; 3189 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 3190 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 3191 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 3192 3193 /* Update counters in ifnet. */ 3194 IFNET_STAT_INC(ifp, opackets, smb->tx_frames); 3195 3196 IFNET_STAT_INC(ifp, collisions, smb->tx_single_colls + 3197 smb->tx_multi_colls * 2 + smb->tx_late_colls + 3198 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT); 3199 3200 IFNET_STAT_INC(ifp, oerrors, 3201 smb->tx_excess_colls + smb->tx_late_colls + smb->tx_underrun); 3202 3203 IFNET_STAT_INC(ifp, ipackets, smb->rx_frames); 3204 3205 IFNET_STAT_INC(ifp, ierrors, smb->rx_crcerrs + smb->rx_lenerrs + 3206 smb->rx_runts + smb->rx_pkts_truncated + 3207 smb->rx_fifo_oflows + smb->rx_rrs_errs + 3208 smb->rx_alignerrs); 3209 3210 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 3211 /* Update done, clear. */ 3212 smb->updated = 0; 3213 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3214 sc->alc_cdata.alc_smb_map, 3215 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3216 } 3217 } 3218 3219 static void 3220 alc_intr(void *arg) 3221 { 3222 struct alc_softc *sc = arg; 3223 struct ifnet *ifp = &sc->arpcom.ac_if; 3224 uint32_t status; 3225 3226 ASSERT_SERIALIZED(ifp->if_serializer); 3227 3228 status = CSR_READ_4(sc, ALC_INTR_STATUS); 3229 if ((status & ALC_INTRS) == 0) 3230 return; 3231 3232 /* Acknowledge interrupts and disable interrupts. */ 3233 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); 3234 3235 if (ifp->if_flags & IFF_RUNNING) { 3236 if (status & INTR_RX_PKT) { 3237 if (alc_rxintr(sc)) { 3238 alc_init(sc); 3239 return; 3240 } 3241 } 3242 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | 3243 INTR_TXQ_TO_RST)) { 3244 if (status & INTR_DMA_RD_TO_RST) { 3245 if_printf(ifp, 3246 "DMA read error! -- resetting\n"); 3247 } 3248 if (status & INTR_DMA_WR_TO_RST) { 3249 if_printf(ifp, 3250 "DMA write error! -- resetting\n"); 3251 } 3252 if (status & INTR_TXQ_TO_RST) 3253 if_printf(ifp, "TxQ reset! -- resetting\n"); 3254 alc_init(sc); 3255 return; 3256 } 3257 if (!ifq_is_empty(&ifp->if_snd)) 3258 if_devstart(ifp); 3259 3260 /* Re-enable interrupts */ 3261 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); 3262 } 3263 } 3264 3265 static void 3266 alc_txeof(struct alc_softc *sc) 3267 { 3268 struct ifnet *ifp; 3269 struct alc_txdesc *txd; 3270 uint32_t cons, prod; 3271 int prog; 3272 3273 ifp = sc->alc_ifp; 3274 3275 if (sc->alc_cdata.alc_tx_cnt == 0) 3276 return; 3277 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 3278 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE); 3279 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 3280 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 3281 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD); 3282 prod = sc->alc_rdata.alc_cmb->cons; 3283 } else { 3284 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 3285 prod = CSR_READ_2(sc, ALC_MBOX_TD_PRI0_CONS_IDX); 3286 else { 3287 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); 3288 /* Assume we're using normal Tx priority queue. */ 3289 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> 3290 MBOX_TD_CONS_LO_IDX_SHIFT; 3291 } 3292 } 3293 cons = sc->alc_cdata.alc_tx_cons; 3294 /* 3295 * Go through our Tx list and free mbufs for those 3296 * frames which have been transmitted. 3297 */ 3298 for (prog = 0; cons != prod; prog++, 3299 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { 3300 if (sc->alc_cdata.alc_tx_cnt <= 0) 3301 break; 3302 prog++; 3303 ifq_clr_oactive(&ifp->if_snd); 3304 sc->alc_cdata.alc_tx_cnt--; 3305 txd = &sc->alc_cdata.alc_txdesc[cons]; 3306 if (txd->tx_m != NULL) { 3307 /* Reclaim transmitted mbufs. */ 3308 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 3309 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3310 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 3311 txd->tx_dmamap); 3312 m_freem(txd->tx_m); 3313 txd->tx_m = NULL; 3314 } 3315 } 3316 3317 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 3318 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 3319 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD); 3320 sc->alc_cdata.alc_tx_cons = cons; 3321 /* 3322 * Unarm watchdog timer only when there is no pending 3323 * frames in Tx queue. 3324 */ 3325 if (sc->alc_cdata.alc_tx_cnt == 0) 3326 sc->alc_watchdog_timer = 0; 3327 } 3328 3329 static int 3330 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, boolean_t wait) 3331 { 3332 struct mbuf *m; 3333 bus_dma_segment_t segs[1]; 3334 bus_dmamap_t map; 3335 int nsegs; 3336 int error; 3337 3338 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 3339 if (m == NULL) 3340 return (ENOBUFS); 3341 m->m_len = m->m_pkthdr.len = MCLBYTES; 3342 #ifdef foo 3343 /* Hardware require 4 bytes align */ 3344 m_adj(m, ETHER_ALIGN); 3345 #endif 3346 3347 error = bus_dmamap_load_mbuf_segment( 3348 sc->alc_cdata.alc_rx_tag, 3349 sc->alc_cdata.alc_rx_sparemap, 3350 m, segs, 1, &nsegs, BUS_DMA_NOWAIT); 3351 if (error) { 3352 m_freem(m); 3353 return (ENOBUFS); 3354 } 3355 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 3356 3357 if (rxd->rx_m != NULL) { 3358 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 3359 BUS_DMASYNC_POSTREAD); 3360 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap); 3361 } 3362 map = rxd->rx_dmamap; 3363 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; 3364 sc->alc_cdata.alc_rx_sparemap = map; 3365 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 3366 BUS_DMASYNC_PREREAD); 3367 rxd->rx_m = m; 3368 rxd->rx_desc->addr = htole64(segs[0].ds_addr); 3369 return (0); 3370 } 3371 3372 static int 3373 alc_rxintr(struct alc_softc *sc) 3374 { 3375 struct ifnet *ifp; 3376 struct rx_rdesc *rrd; 3377 uint32_t nsegs, status; 3378 int rr_cons, prog; 3379 3380 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 3381 sc->alc_cdata.alc_rr_ring_map, 3382 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3383 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 3384 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE); 3385 rr_cons = sc->alc_cdata.alc_rr_cons; 3386 ifp = sc->alc_ifp; 3387 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) { 3388 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; 3389 status = le32toh(rrd->status); 3390 if ((status & RRD_VALID) == 0) 3391 break; 3392 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo)); 3393 if (nsegs == 0) { 3394 /* This should not happen! */ 3395 device_printf(sc->alc_dev, 3396 "unexpected segment count -- resetting\n"); 3397 return (EIO); 3398 } 3399 alc_rxeof(sc, rrd); 3400 /* Clear Rx return status. */ 3401 rrd->status = 0; 3402 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); 3403 sc->alc_cdata.alc_rx_cons += nsegs; 3404 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; 3405 prog += nsegs; 3406 } 3407 3408 if (prog > 0) { 3409 /* Update the consumer index. */ 3410 sc->alc_cdata.alc_rr_cons = rr_cons; 3411 /* Sync Rx return descriptors. */ 3412 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 3413 sc->alc_cdata.alc_rr_ring_map, 3414 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3415 /* 3416 * Sync updated Rx descriptors such that controller see 3417 * modified buffer addresses. 3418 */ 3419 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 3420 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 3421 /* 3422 * Let controller know availability of new Rx buffers. 3423 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors 3424 * it may be possible to update ALC_MBOX_RD0_PROD_IDX 3425 * only when Rx buffer pre-fetching is required. In 3426 * addition we already set ALC_RX_RD_FREE_THRESH to 3427 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However 3428 * it still seems that pre-fetching needs more 3429 * experimentation. 3430 */ 3431 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 3432 CSR_WRITE_2(sc, ALC_MBOX_RD0_PROD_IDX, 3433 (uint16_t)sc->alc_cdata.alc_rx_cons); 3434 else 3435 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 3436 sc->alc_cdata.alc_rx_cons); 3437 } 3438 3439 return 0; 3440 } 3441 3442 /* Receive a frame. */ 3443 static void 3444 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) 3445 { 3446 struct alc_rxdesc *rxd; 3447 struct ifnet *ifp; 3448 struct mbuf *mp, *m; 3449 uint32_t rdinfo, status, vtag; 3450 int count, nsegs, rx_cons; 3451 3452 ifp = sc->alc_ifp; 3453 status = le32toh(rrd->status); 3454 rdinfo = le32toh(rrd->rdinfo); 3455 rx_cons = RRD_RD_IDX(rdinfo); 3456 nsegs = RRD_RD_CNT(rdinfo); 3457 3458 sc->alc_cdata.alc_rxlen = RRD_BYTES(status); 3459 if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) { 3460 /* 3461 * We want to pass the following frames to upper 3462 * layer regardless of error status of Rx return 3463 * ring. 3464 * 3465 * o IP/TCP/UDP checksum is bad. 3466 * o frame length and protocol specific length 3467 * does not match. 3468 * 3469 * Force network stack compute checksum for 3470 * errored frames. 3471 */ 3472 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK; 3473 if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN | 3474 RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0) 3475 return; 3476 } 3477 3478 for (count = 0; count < nsegs; count++, 3479 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { 3480 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; 3481 mp = rxd->rx_m; 3482 /* Add a new receive buffer to the ring. */ 3483 if (alc_newbuf(sc, rxd, FALSE) != 0) { 3484 IFNET_STAT_INC(ifp, iqdrops, 1); 3485 /* Reuse Rx buffers. */ 3486 if (sc->alc_cdata.alc_rxhead != NULL) 3487 m_freem(sc->alc_cdata.alc_rxhead); 3488 break; 3489 } 3490 3491 /* 3492 * Assume we've received a full sized frame. 3493 * Actual size is fixed when we encounter the end of 3494 * multi-segmented frame. 3495 */ 3496 mp->m_len = sc->alc_buf_size; 3497 3498 /* Chain received mbufs. */ 3499 if (sc->alc_cdata.alc_rxhead == NULL) { 3500 sc->alc_cdata.alc_rxhead = mp; 3501 sc->alc_cdata.alc_rxtail = mp; 3502 } else { 3503 /*mp->m_flags &= ~M_PKTHDR;*/ 3504 sc->alc_cdata.alc_rxprev_tail = 3505 sc->alc_cdata.alc_rxtail; 3506 sc->alc_cdata.alc_rxtail->m_next = mp; 3507 sc->alc_cdata.alc_rxtail = mp; 3508 } 3509 3510 if (count == nsegs - 1) { 3511 /* Last desc. for this frame. */ 3512 m = sc->alc_cdata.alc_rxhead; 3513 /*m->m_flags |= M_PKTHDR;*/ 3514 3515 /* 3516 * It seems that L1C/L2C controller has no way 3517 * to tell hardware to strip CRC bytes. 3518 */ 3519 m->m_pkthdr.len = 3520 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; 3521 if (nsegs > 1) { 3522 /* Set last mbuf size. */ 3523 mp->m_len = sc->alc_cdata.alc_rxlen - 3524 (nsegs - 1) * sc->alc_buf_size; 3525 /* Remove the CRC bytes in chained mbufs. */ 3526 if (mp->m_len <= ETHER_CRC_LEN) { 3527 sc->alc_cdata.alc_rxtail = 3528 sc->alc_cdata.alc_rxprev_tail; 3529 sc->alc_cdata.alc_rxtail->m_len -= 3530 (ETHER_CRC_LEN - mp->m_len); 3531 sc->alc_cdata.alc_rxtail->m_next = NULL; 3532 m_freem(mp); 3533 } else { 3534 mp->m_len -= ETHER_CRC_LEN; 3535 } 3536 } else 3537 m->m_len = m->m_pkthdr.len; 3538 m->m_pkthdr.rcvif = ifp; 3539 /* 3540 * Due to hardware bugs, Rx checksum offloading 3541 * was intentionally disabled. 3542 */ 3543 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 3544 (status & RRD_VLAN_TAG) != 0) { 3545 vtag = RRD_VLAN(le32toh(rrd->vtag)); 3546 m->m_pkthdr.ether_vlantag = ntohs(vtag); 3547 m->m_flags |= M_VLANTAG; 3548 } 3549 3550 /* Pass it on. */ 3551 ifp->if_input(ifp, m, NULL, -1); 3552 } 3553 } 3554 /* Reset mbuf chains. */ 3555 ALC_RXCHAIN_RESET(sc); 3556 } 3557 3558 static void 3559 alc_tick(void *arg) 3560 { 3561 struct alc_softc *sc = arg; 3562 struct ifnet *ifp = &sc->arpcom.ac_if; 3563 struct mii_data *mii; 3564 3565 lwkt_serialize_enter(ifp->if_serializer); 3566 3567 mii = device_get_softc(sc->alc_miibus); 3568 mii_tick(mii); 3569 alc_stats_update(sc); 3570 /* 3571 * alc(4) does not rely on Tx completion interrupts to reclaim 3572 * transferred buffers. Instead Tx completion interrupts are 3573 * used to hint for scheduling Tx task. So it's necessary to 3574 * release transmitted buffers by kicking Tx completion 3575 * handler. This limits the maximum reclamation delay to a hz. 3576 */ 3577 alc_txeof(sc); 3578 alc_watchdog(sc); 3579 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 3580 3581 #if 0 3582 /* poll for debugging */ 3583 alc_intr(sc); 3584 #endif 3585 3586 lwkt_serialize_exit(ifp->if_serializer); 3587 } 3588 3589 static void 3590 alc_osc_reset(struct alc_softc *sc) 3591 { 3592 uint32_t reg; 3593 3594 reg = CSR_READ_4(sc, ALC_MISC3); 3595 reg &= ~MISC3_25M_BY_SW; 3596 reg |= MISC3_25M_NOTO_INTNL; 3597 CSR_WRITE_4(sc, ALC_MISC3, reg); 3598 3599 reg = CSR_READ_4(sc, ALC_MISC); 3600 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) { 3601 /* 3602 * Restore over-current protection default value. 3603 * This value could be reset by MAC reset. 3604 */ 3605 reg &= ~MISC_PSW_OCP_MASK; 3606 reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT); 3607 reg &= ~MISC_INTNLOSC_OPEN; 3608 CSR_WRITE_4(sc, ALC_MISC, reg); 3609 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 3610 reg = CSR_READ_4(sc, ALC_MISC2); 3611 reg &= ~MISC2_CALB_START; 3612 CSR_WRITE_4(sc, ALC_MISC2, reg); 3613 CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START); 3614 3615 } else { 3616 reg &= ~MISC_INTNLOSC_OPEN; 3617 /* Disable isolate for revision A devices. */ 3618 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 3619 reg &= ~MISC_ISO_ENB; 3620 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 3621 CSR_WRITE_4(sc, ALC_MISC, reg); 3622 } 3623 3624 DELAY(20); 3625 } 3626 3627 static void 3628 alc_reset(struct alc_softc *sc) 3629 { 3630 uint32_t pmcfg, reg; 3631 int i; 3632 3633 pmcfg = 0; 3634 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3635 /* Reset workaround. */ 3636 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1); 3637 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 3638 (sc->alc_rev & 0x01) != 0) { 3639 /* Disable L0s/L1s before reset. */ 3640 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 3641 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) 3642 != 0) { 3643 pmcfg &= ~(PM_CFG_ASPM_L0S_ENB | 3644 PM_CFG_ASPM_L1_ENB); 3645 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 3646 } 3647 } 3648 } 3649 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 3650 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET; 3651 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3652 3653 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3654 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 3655 DELAY(10); 3656 if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0) 3657 break; 3658 } 3659 if (i == 0) 3660 device_printf(sc->alc_dev, "MAC reset timeout!\n"); 3661 } 3662 3663 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 3664 DELAY(10); 3665 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) 3666 break; 3667 } 3668 if (i == 0) 3669 device_printf(sc->alc_dev, "master reset timeout!\n"); 3670 3671 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 3672 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3673 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC | 3674 IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 3675 break; 3676 DELAY(10); 3677 } 3678 3679 if (i == 0) 3680 device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg); 3681 3682 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3683 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 3684 (sc->alc_rev & 0x01) != 0) { 3685 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 3686 reg |= MASTER_CLK_SEL_DIS; 3687 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3688 /* Restore L0s/L1s config. */ 3689 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) 3690 != 0) 3691 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 3692 } 3693 3694 alc_osc_reset(sc); 3695 reg = CSR_READ_4(sc, ALC_MISC3); 3696 reg &= ~MISC3_25M_BY_SW; 3697 reg |= MISC3_25M_NOTO_INTNL; 3698 CSR_WRITE_4(sc, ALC_MISC3, reg); 3699 reg = CSR_READ_4(sc, ALC_MISC); 3700 reg &= ~MISC_INTNLOSC_OPEN; 3701 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 3702 reg &= ~MISC_ISO_ENB; 3703 CSR_WRITE_4(sc, ALC_MISC, reg); 3704 DELAY(20); 3705 } 3706 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 3707 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 3708 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2) 3709 CSR_WRITE_4(sc, ALC_SERDES_LOCK, 3710 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN | 3711 SERDES_PHY_CLK_SLOWDOWN); 3712 } 3713 3714 static void 3715 alc_init(void *xsc) 3716 { 3717 struct alc_softc *sc = xsc; 3718 struct ifnet *ifp = &sc->arpcom.ac_if; 3719 struct mii_data *mii; 3720 uint8_t eaddr[ETHER_ADDR_LEN]; 3721 bus_addr_t paddr; 3722 uint32_t reg, rxf_hi, rxf_lo; 3723 3724 ASSERT_SERIALIZED(ifp->if_serializer); 3725 3726 mii = device_get_softc(sc->alc_miibus); 3727 3728 /* 3729 * Cancel any pending I/O. 3730 */ 3731 alc_stop(sc); 3732 /* 3733 * Reset the chip to a known state. 3734 */ 3735 alc_reset(sc); 3736 3737 /* Initialize Rx descriptors. */ 3738 if (alc_init_rx_ring(sc) != 0) { 3739 device_printf(sc->alc_dev, "no memory for Rx buffers.\n"); 3740 alc_stop(sc); 3741 return; 3742 } 3743 alc_init_rr_ring(sc); 3744 alc_init_tx_ring(sc); 3745 alc_init_cmb(sc); 3746 alc_init_smb(sc); 3747 3748 /* Enable all clocks. */ 3749 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3750 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB | 3751 CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB | 3752 CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB | 3753 CLK_GATING_RXMAC_ENB); 3754 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) 3755 CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER, 3756 IDLE_DECISN_TIMER_DEFAULT_1MS); 3757 } else 3758 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0); 3759 3760 /* Reprogram the station address. */ 3761 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3762 CSR_WRITE_4(sc, ALC_PAR0, 3763 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 3764 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); 3765 /* 3766 * Clear WOL status and disable all WOL feature as WOL 3767 * would interfere Rx operation under normal environments. 3768 */ 3769 CSR_READ_4(sc, ALC_WOL_CFG); 3770 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 3771 /* Set Tx descriptor base addresses. */ 3772 paddr = sc->alc_rdata.alc_tx_ring_paddr; 3773 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3774 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3775 /* We don't use high priority ring. */ 3776 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); 3777 /* Set Tx descriptor counter. */ 3778 CSR_WRITE_4(sc, ALC_TD_RING_CNT, 3779 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); 3780 /* Set Rx descriptor base addresses. */ 3781 paddr = sc->alc_rdata.alc_rx_ring_paddr; 3782 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3783 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3784 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3785 /* We use one Rx ring. */ 3786 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); 3787 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); 3788 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); 3789 } 3790 /* Set Rx descriptor counter. */ 3791 CSR_WRITE_4(sc, ALC_RD_RING_CNT, 3792 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); 3793 3794 /* 3795 * Let hardware split jumbo frames into alc_max_buf_sized chunks. 3796 * if it do not fit the buffer size. Rx return descriptor holds 3797 * a counter that indicates how many fragments were made by the 3798 * hardware. The buffer size should be multiple of 8 bytes. 3799 * Since hardware has limit on the size of buffer size, always 3800 * use the maximum value. 3801 * For strict-alignment architectures make sure to reduce buffer 3802 * size by 8 bytes to make room for alignment fixup. 3803 */ 3804 sc->alc_buf_size = RX_BUF_SIZE_MAX; 3805 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); 3806 3807 paddr = sc->alc_rdata.alc_rr_ring_paddr; 3808 /* Set Rx return descriptor base addresses. */ 3809 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3810 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3811 /* We use one Rx return ring. */ 3812 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); 3813 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); 3814 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); 3815 } 3816 /* Set Rx return descriptor counter. */ 3817 CSR_WRITE_4(sc, ALC_RRD_RING_CNT, 3818 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); 3819 paddr = sc->alc_rdata.alc_cmb_paddr; 3820 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3821 paddr = sc->alc_rdata.alc_smb_paddr; 3822 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3823 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3824 3825 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 3826 /* Reconfigure SRAM - Vendor magic. */ 3827 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0); 3828 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100); 3829 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000); 3830 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0); 3831 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0); 3832 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0); 3833 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000); 3834 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000); 3835 } 3836 3837 /* Tell hardware that we're ready to load DMA blocks. */ 3838 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); 3839 3840 /* Configure interrupt moderation timer. */ 3841 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; 3842 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) 3843 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; 3844 CSR_WRITE_4(sc, ALC_IM_TIMER, reg); 3845 /* 3846 * We don't want to automatic interrupt clear as task queue 3847 * for the interrupt should know interrupt status. 3848 */ 3849 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 3850 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); 3851 reg |= MASTER_SA_TIMER_ENB; 3852 if (ALC_USECS(sc->alc_int_rx_mod) != 0) 3853 reg |= MASTER_IM_RX_TIMER_ENB; 3854 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 && 3855 ALC_USECS(sc->alc_int_tx_mod) != 0) 3856 reg |= MASTER_IM_TX_TIMER_ENB; 3857 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3858 /* 3859 * Disable interrupt re-trigger timer. We don't want automatic 3860 * re-triggering of un-ACKed interrupts. 3861 */ 3862 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); 3863 /* Configure CMB. */ 3864 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3865 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3); 3866 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, 3867 ALC_USECS(sc->alc_int_tx_mod)); 3868 } else { 3869 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 3870 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); 3871 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); 3872 } else 3873 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); 3874 } 3875 3876 /* 3877 * Hardware can be configured to issue SMB interrupt based 3878 * on programmed interval. Since there is a callout that is 3879 * invoked for every hz in driver we use that instead of 3880 * relying on periodic SMB interrupt. 3881 */ 3882 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); 3883 /* Clear MAC statistics. */ 3884 alc_stats_clear(sc); 3885 3886 /* 3887 * Always use maximum frame size that controller can support. 3888 * Otherwise received frames that has larger frame length 3889 * than alc(4) MTU would be silently dropped in hardware. This 3890 * would make path-MTU discovery hard as sender wouldn't get 3891 * any responses from receiver. alc(4) supports 3892 * multi-fragmented frames on Rx path so it has no issue on 3893 * assembling fragmented frames. Using maximum frame size also 3894 * removes the need to reinitialize hardware when interface 3895 * MTU configuration was changed. 3896 * 3897 * Be conservative in what you do, be liberal in what you 3898 * accept from others - RFC 793. 3899 */ 3900 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen); 3901 3902 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3903 /* Disable header split(?) */ 3904 CSR_WRITE_4(sc, ALC_HDS_CFG, 0); 3905 3906 /* Configure IPG/IFG parameters. */ 3907 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, 3908 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & 3909 IPG_IFG_IPGT_MASK) | 3910 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & 3911 IPG_IFG_MIFG_MASK) | 3912 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & 3913 IPG_IFG_IPG1_MASK) | 3914 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & 3915 IPG_IFG_IPG2_MASK)); 3916 /* Set parameters for half-duplex media. */ 3917 CSR_WRITE_4(sc, ALC_HDPX_CFG, 3918 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 3919 HDPX_CFG_LCOL_MASK) | 3920 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 3921 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 3922 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 3923 HDPX_CFG_ABEBT_MASK) | 3924 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 3925 HDPX_CFG_JAMIPG_MASK)); 3926 } 3927 3928 /* 3929 * Set TSO/checksum offload threshold. For frames that is 3930 * larger than this threshold, hardware wouldn't do 3931 * TSO/checksum offloading. 3932 */ 3933 reg = (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & 3934 TSO_OFFLOAD_THRESH_MASK; 3935 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 3936 reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB; 3937 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg); 3938 /* Configure TxQ. */ 3939 reg = (alc_dma_burst[sc->alc_dma_rd_burst] << 3940 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; 3941 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 3942 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 3943 reg >>= 1; 3944 } 3945 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & 3946 TXQ_CFG_TD_BURST_MASK; 3947 reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB; 3948 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); 3949 3950 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3951 reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT | 3952 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT | 3953 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT | 3954 HQTD_CFG_BURST_ENB); 3955 CSR_WRITE_4(sc, ALC_HQTD_CFG, reg); 3956 reg = WRR_PRI_RESTRICT_NONE; 3957 reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT | 3958 WRR_PRI_DEFAULT << WRR_PRI1_SHIFT | 3959 WRR_PRI_DEFAULT << WRR_PRI2_SHIFT | 3960 WRR_PRI_DEFAULT << WRR_PRI3_SHIFT); 3961 CSR_WRITE_4(sc, ALC_WRR, reg); 3962 } else { 3963 /* Configure Rx free descriptor pre-fetching. */ 3964 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, 3965 ((RX_RD_FREE_THRESH_HI_DEFAULT << 3966 RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) | 3967 ((RX_RD_FREE_THRESH_LO_DEFAULT << 3968 RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK)); 3969 } 3970 3971 /* 3972 * Configure flow control parameters. 3973 * XON : 80% of Rx FIFO 3974 * XOFF : 30% of Rx FIFO 3975 */ 3976 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3977 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3978 reg &= SRAM_RX_FIFO_LEN_MASK; 3979 reg *= 8; 3980 if (reg > 8 * 1024) 3981 reg -= RX_FIFO_PAUSE_816X_RSVD; 3982 else 3983 reg -= RX_BUF_SIZE_MAX; 3984 reg /= 8; 3985 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3986 ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3987 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3988 (((RX_FIFO_PAUSE_816X_RSVD / 8) << 3989 RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 3990 RX_FIFO_PAUSE_THRESH_HI_MASK)); 3991 } else if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 3992 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) { 3993 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3994 rxf_hi = (reg * 8) / 10; 3995 rxf_lo = (reg * 3) / 10; 3996 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3997 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3998 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3999 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 4000 RX_FIFO_PAUSE_THRESH_HI_MASK)); 4001 } 4002 4003 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 4004 /* Disable RSS until I understand L1C/L2C's RSS logic. */ 4005 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); 4006 CSR_WRITE_4(sc, ALC_RSS_CPU, 0); 4007 } 4008 4009 /* Configure RxQ. */ 4010 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 4011 RXQ_CFG_RD_BURST_MASK; 4012 reg |= RXQ_CFG_RSS_MODE_DIS; 4013 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 4014 reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT << 4015 RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) & 4016 RXQ_CFG_816X_IDT_TBL_SIZE_MASK; 4017 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 4018 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; 4019 } else { 4020 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 && 4021 sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2) 4022 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; 4023 } 4024 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 4025 4026 /* Configure DMA parameters. */ 4027 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; 4028 reg |= sc->alc_rcb; 4029 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 4030 reg |= DMA_CFG_CMB_ENB; 4031 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) 4032 reg |= DMA_CFG_SMB_ENB; 4033 else 4034 reg |= DMA_CFG_SMB_DIS; 4035 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << 4036 DMA_CFG_RD_BURST_SHIFT; 4037 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << 4038 DMA_CFG_WR_BURST_SHIFT; 4039 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 4040 DMA_CFG_RD_DELAY_CNT_MASK; 4041 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 4042 DMA_CFG_WR_DELAY_CNT_MASK; 4043 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 4044 switch (AR816X_REV(sc->alc_rev)) { 4045 case AR816X_REV_A0: 4046 case AR816X_REV_A1: 4047 reg |= DMA_CFG_RD_CHNL_SEL_2; 4048 break; 4049 case AR816X_REV_B0: 4050 /* FALLTHROUGH */ 4051 default: 4052 reg |= DMA_CFG_RD_CHNL_SEL_4; 4053 break; 4054 } 4055 } 4056 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 4057 4058 /* 4059 * Configure Tx/Rx MACs. 4060 * - Auto-padding for short frames. 4061 * - Enable CRC generation. 4062 * Actual reconfiguration of MAC for resolved speed/duplex 4063 * is followed after detection of link establishment. 4064 * AR813x/AR815x always does checksum computation regardless 4065 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to 4066 * have bug in protocol field in Rx return structure so 4067 * these controllers can't handle fragmented frames. Disable 4068 * Rx checksum offloading until there is a newer controller 4069 * that has sane implementation. 4070 */ 4071 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 4072 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 4073 MAC_CFG_PREAMBLE_MASK); 4074 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 4075 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 4076 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 4077 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 4078 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 4079 } 4080 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) 4081 reg |= MAC_CFG_SPEED_10_100; 4082 else 4083 reg |= MAC_CFG_SPEED_1000; 4084 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 4085 4086 /* Set up the receive filter. */ 4087 alc_rxfilter(sc); 4088 alc_rxvlan(sc); 4089 4090 /* Acknowledge all pending interrupts and clear it. */ 4091 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); 4092 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 4093 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); 4094 4095 ifp->if_flags |= IFF_RUNNING; 4096 ifq_clr_oactive(&ifp->if_snd); 4097 4098 sc->alc_flags &= ~ALC_FLAG_LINK; 4099 /* Switch to the current media. */ 4100 /*mii_mediachg(mii);*/ 4101 alc_mediachange_locked(sc); 4102 4103 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 4104 4105 } 4106 4107 static void 4108 alc_stop(struct alc_softc *sc) 4109 { 4110 struct ifnet *ifp = &sc->arpcom.ac_if; 4111 struct alc_txdesc *txd; 4112 struct alc_rxdesc *rxd; 4113 uint32_t reg; 4114 int i; 4115 4116 ASSERT_SERIALIZED(ifp->if_serializer); 4117 4118 /* 4119 * Mark the interface down and cancel the watchdog timer. 4120 */ 4121 ifp->if_flags &= ~IFF_RUNNING; 4122 ifq_clr_oactive(&ifp->if_snd); 4123 sc->alc_flags &= ~ALC_FLAG_LINK; 4124 callout_stop(&sc->alc_tick_ch); 4125 sc->alc_watchdog_timer = 0; 4126 alc_stats_update(sc); 4127 /* Disable interrupts. */ 4128 CSR_WRITE_4(sc, ALC_INTR_MASK, 0); 4129 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 4130 /* Disable DMA. */ 4131 reg = CSR_READ_4(sc, ALC_DMA_CFG); 4132 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); 4133 reg |= DMA_CFG_SMB_DIS; 4134 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 4135 DELAY(1000); 4136 /* Stop Rx/Tx MACs. */ 4137 alc_stop_mac(sc); 4138 /* Disable interrupts which might be touched in taskq handler. */ 4139 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 4140 4141 /* Disable L0s/L1s */ 4142 alc_aspm(sc, 0, IFM_UNKNOWN); 4143 /* Reclaim Rx buffers that have been processed. */ 4144 if (sc->alc_cdata.alc_rxhead != NULL) 4145 m_freem(sc->alc_cdata.alc_rxhead); 4146 ALC_RXCHAIN_RESET(sc); 4147 /* 4148 * Free Tx/Rx mbufs still in the queues. 4149 */ 4150 for (i = 0; i < ALC_RX_RING_CNT; i++) { 4151 rxd = &sc->alc_cdata.alc_rxdesc[i]; 4152 if (rxd->rx_m != NULL) { 4153 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, 4154 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4155 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, 4156 rxd->rx_dmamap); 4157 m_freem(rxd->rx_m); 4158 rxd->rx_m = NULL; 4159 } 4160 } 4161 for (i = 0; i < ALC_TX_RING_CNT; i++) { 4162 txd = &sc->alc_cdata.alc_txdesc[i]; 4163 if (txd->tx_m != NULL) { 4164 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 4165 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 4166 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 4167 txd->tx_dmamap); 4168 m_freem(txd->tx_m); 4169 txd->tx_m = NULL; 4170 } 4171 } 4172 } 4173 4174 static void 4175 alc_stop_mac(struct alc_softc *sc) 4176 { 4177 uint32_t reg; 4178 int i; 4179 4180 alc_stop_queue(sc); 4181 /* Disable Rx/Tx MAC. */ 4182 reg = CSR_READ_4(sc, ALC_MAC_CFG); 4183 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 4184 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 4185 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 4186 } 4187 for (i = ALC_TIMEOUT; i > 0; i--) { 4188 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 4189 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0) 4190 break; 4191 DELAY(10); 4192 } 4193 if (i == 0) 4194 device_printf(sc->alc_dev, 4195 "could not disable Rx/Tx MAC(0x%08x)!\n", reg); 4196 } 4197 4198 static void 4199 alc_start_queue(struct alc_softc *sc) 4200 { 4201 uint32_t qcfg[] = { 4202 0, 4203 RXQ_CFG_QUEUE0_ENB, 4204 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, 4205 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, 4206 RXQ_CFG_ENB 4207 }; 4208 uint32_t cfg; 4209 4210 /* Enable RxQ. */ 4211 cfg = CSR_READ_4(sc, ALC_RXQ_CFG); 4212 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 4213 cfg &= ~RXQ_CFG_ENB; 4214 cfg |= qcfg[1]; 4215 } else 4216 cfg |= RXQ_CFG_QUEUE0_ENB; 4217 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); 4218 /* Enable TxQ. */ 4219 cfg = CSR_READ_4(sc, ALC_TXQ_CFG); 4220 cfg |= TXQ_CFG_ENB; 4221 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); 4222 } 4223 4224 static void 4225 alc_stop_queue(struct alc_softc *sc) 4226 { 4227 uint32_t reg; 4228 int i; 4229 4230 /* Disable RxQ. */ 4231 reg = CSR_READ_4(sc, ALC_RXQ_CFG); 4232 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 4233 if ((reg & RXQ_CFG_ENB) != 0) { 4234 reg &= ~RXQ_CFG_ENB; 4235 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 4236 } 4237 } else { 4238 if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) { 4239 reg &= ~RXQ_CFG_QUEUE0_ENB; 4240 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 4241 } 4242 } 4243 /* Disable TxQ. */ 4244 reg = CSR_READ_4(sc, ALC_TXQ_CFG); 4245 if ((reg & TXQ_CFG_ENB) != 0) { 4246 reg &= ~TXQ_CFG_ENB; 4247 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); 4248 } 4249 DELAY(40); 4250 for (i = ALC_TIMEOUT; i > 0; i--) { 4251 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 4252 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 4253 break; 4254 DELAY(10); 4255 } 4256 if (i == 0) 4257 device_printf(sc->alc_dev, 4258 "could not disable RxQ/TxQ (0x%08x)!\n", reg); 4259 } 4260 4261 static void 4262 alc_init_tx_ring(struct alc_softc *sc) 4263 { 4264 struct alc_ring_data *rd; 4265 struct alc_txdesc *txd; 4266 int i; 4267 4268 sc->alc_cdata.alc_tx_prod = 0; 4269 sc->alc_cdata.alc_tx_cons = 0; 4270 sc->alc_cdata.alc_tx_cnt = 0; 4271 4272 rd = &sc->alc_rdata; 4273 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ); 4274 for (i = 0; i < ALC_TX_RING_CNT; i++) { 4275 txd = &sc->alc_cdata.alc_txdesc[i]; 4276 txd->tx_m = NULL; 4277 } 4278 4279 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 4280 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 4281 } 4282 4283 static int 4284 alc_init_rx_ring(struct alc_softc *sc) 4285 { 4286 struct alc_ring_data *rd; 4287 struct alc_rxdesc *rxd; 4288 int i; 4289 4290 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; 4291 rd = &sc->alc_rdata; 4292 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ); 4293 for (i = 0; i < ALC_RX_RING_CNT; i++) { 4294 rxd = &sc->alc_cdata.alc_rxdesc[i]; 4295 rxd->rx_m = NULL; 4296 rxd->rx_desc = &rd->alc_rx_ring[i]; 4297 if (alc_newbuf(sc, rxd, TRUE) != 0) 4298 return (ENOBUFS); 4299 } 4300 4301 /* 4302 * Since controller does not update Rx descriptors, driver 4303 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE 4304 * is enough to ensure coherence. 4305 */ 4306 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 4307 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 4308 /* Let controller know availability of new Rx buffers. */ 4309 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); 4310 4311 return (0); 4312 } 4313 4314 static void 4315 alc_init_rr_ring(struct alc_softc *sc) 4316 { 4317 struct alc_ring_data *rd; 4318 4319 sc->alc_cdata.alc_rr_cons = 0; 4320 ALC_RXCHAIN_RESET(sc); 4321 4322 rd = &sc->alc_rdata; 4323 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ); 4324 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 4325 sc->alc_cdata.alc_rr_ring_map, 4326 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4327 } 4328 4329 static void 4330 alc_init_cmb(struct alc_softc *sc) 4331 { 4332 struct alc_ring_data *rd; 4333 4334 rd = &sc->alc_rdata; 4335 bzero(rd->alc_cmb, ALC_CMB_SZ); 4336 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map, 4337 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4338 } 4339 4340 static void 4341 alc_init_smb(struct alc_softc *sc) 4342 { 4343 struct alc_ring_data *rd; 4344 4345 rd = &sc->alc_rdata; 4346 bzero(rd->alc_smb, ALC_SMB_SZ); 4347 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, 4348 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4349 } 4350 4351 static void 4352 alc_rxvlan(struct alc_softc *sc) 4353 { 4354 struct ifnet *ifp; 4355 uint32_t reg; 4356 4357 ifp = sc->alc_ifp; 4358 reg = CSR_READ_4(sc, ALC_MAC_CFG); 4359 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 4360 reg |= MAC_CFG_VLAN_TAG_STRIP; 4361 else 4362 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 4363 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 4364 } 4365 4366 static void 4367 alc_rxfilter(struct alc_softc *sc) 4368 { 4369 struct ifnet *ifp; 4370 struct ifmultiaddr *ifma; 4371 uint32_t crc; 4372 uint32_t mchash[2]; 4373 uint32_t rxcfg; 4374 4375 ifp = sc->alc_ifp; 4376 4377 bzero(mchash, sizeof(mchash)); 4378 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); 4379 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 4380 if ((ifp->if_flags & IFF_BROADCAST) != 0) 4381 rxcfg |= MAC_CFG_BCAST; 4382 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 4383 if ((ifp->if_flags & IFF_PROMISC) != 0) 4384 rxcfg |= MAC_CFG_PROMISC; 4385 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 4386 rxcfg |= MAC_CFG_ALLMULTI; 4387 mchash[0] = 0xFFFFFFFF; 4388 mchash[1] = 0xFFFFFFFF; 4389 goto chipit; 4390 } 4391 4392 #if 0 4393 /* XXX */ 4394 if_maddr_rlock(ifp); 4395 #endif 4396 TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) { 4397 if (ifma->ifma_addr->sa_family != AF_LINK) 4398 continue; 4399 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 4400 ifma->ifma_addr), ETHER_ADDR_LEN); 4401 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 4402 } 4403 #if 0 4404 /* XXX */ 4405 if_maddr_runlock(ifp); 4406 #endif 4407 4408 chipit: 4409 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); 4410 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); 4411 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); 4412 } 4413 4414 static int 4415 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS) 4416 { 4417 return (sysctl_int_range(oidp, arg1, arg2, req, 4418 ALC_PROC_MIN, ALC_PROC_MAX)); 4419 } 4420 4421 static int 4422 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS) 4423 { 4424 4425 return (sysctl_int_range(oidp, arg1, arg2, req, 4426 ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX)); 4427 } 4428