1 /* $OpenBSD: if_nfe.c,v 1.117 2016/04/13 10:34:32 mpi Exp $ */ 2 3 /*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22 #include "bpfilter.h" 23 #include "vlan.h" 24 25 #include <sys/param.h> 26 #include <sys/endian.h> 27 #include <sys/systm.h> 28 #include <sys/types.h> 29 #include <sys/sockio.h> 30 #include <sys/mbuf.h> 31 #include <sys/queue.h> 32 #include <sys/kernel.h> 33 #include <sys/device.h> 34 #include <sys/timeout.h> 35 #include <sys/socket.h> 36 37 #include <machine/bus.h> 38 39 #include <net/if.h> 40 #include <net/if_media.h> 41 42 #include <netinet/in.h> 43 #include <netinet/if_ether.h> 44 45 #if NBPFILTER > 0 46 #include <net/bpf.h> 47 #endif 48 49 #include <dev/mii/miivar.h> 50 51 #include <dev/pci/pcireg.h> 52 #include <dev/pci/pcivar.h> 53 #include <dev/pci/pcidevs.h> 54 55 #include <dev/pci/if_nfereg.h> 56 #include <dev/pci/if_nfevar.h> 57 58 int nfe_match(struct device *, void *, void *); 59 void nfe_attach(struct device *, struct device *, void *); 60 int nfe_activate(struct device *, int); 61 void nfe_miibus_statchg(struct device *); 62 int nfe_miibus_readreg(struct device *, int, int); 63 void nfe_miibus_writereg(struct device *, int, int, int); 64 int nfe_intr(void *); 65 int nfe_ioctl(struct ifnet *, u_long, caddr_t); 66 void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 67 void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 68 void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 69 void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 70 void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 71 void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 72 void nfe_rxeof(struct nfe_softc *); 73 void nfe_txeof(struct nfe_softc *); 74 int nfe_encap(struct nfe_softc *, struct mbuf *); 75 void nfe_start(struct ifnet *); 76 void nfe_watchdog(struct ifnet *); 77 int nfe_init(struct ifnet *); 78 void nfe_stop(struct ifnet *, int); 79 int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 80 void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 81 void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 82 int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 83 void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 84 void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 85 int nfe_ifmedia_upd(struct ifnet *); 86 void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 87 void nfe_iff(struct nfe_softc *); 88 void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 89 void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 90 void nfe_tick(void *); 91 #ifndef SMALL_KERNEL 92 int nfe_wol(struct ifnet*, int); 93 #endif 94 95 struct cfattach nfe_ca = { 96 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL, 97 nfe_activate 98 }; 99 100 struct cfdriver nfe_cd = { 101 NULL, "nfe", DV_IFNET 102 }; 103 104 #ifdef NFE_DEBUG 105 int nfedebug = 0; 106 #define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 107 #define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 108 #else 109 #define DPRINTF(x) 110 #define DPRINTFN(n,x) 111 #endif 112 113 const struct pci_matchid nfe_devices[] = { 114 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 115 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 116 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 117 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 118 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 119 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 120 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 121 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 122 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 123 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 124 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 125 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 154 }; 155 156 int 157 nfe_match(struct device *dev, void *match, void *aux) 158 { 159 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 160 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 161 } 162 163 int 164 nfe_activate(struct device *self, int act) 165 { 166 struct nfe_softc *sc = (struct nfe_softc *)self; 167 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 168 int rv = 0; 169 170 switch (act) { 171 case DVACT_SUSPEND: 172 if (ifp->if_flags & IFF_RUNNING) 173 nfe_stop(ifp, 0); 174 rv = config_activate_children(self, act); 175 break; 176 case DVACT_RESUME: 177 if (ifp->if_flags & IFF_UP) 178 nfe_init(ifp); 179 break; 180 default: 181 rv = config_activate_children(self, act); 182 break; 183 } 184 return (rv); 185 } 186 187 188 void 189 nfe_attach(struct device *parent, struct device *self, void *aux) 190 { 191 struct nfe_softc *sc = (struct nfe_softc *)self; 192 struct pci_attach_args *pa = aux; 193 pci_chipset_tag_t pc = pa->pa_pc; 194 pci_intr_handle_t ih; 195 const char *intrstr; 196 struct ifnet *ifp; 197 bus_size_t memsize; 198 pcireg_t memtype; 199 200 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 201 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 202 &sc->sc_memh, NULL, &memsize, 0)) { 203 printf(": can't map mem space\n"); 204 return; 205 } 206 207 if (pci_intr_map(pa, &ih) != 0) { 208 printf(": can't map interrupt\n"); 209 return; 210 } 211 212 intrstr = pci_intr_string(pc, ih); 213 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 214 sc->sc_dev.dv_xname); 215 if (sc->sc_ih == NULL) { 216 printf(": could not establish interrupt"); 217 if (intrstr != NULL) 218 printf(" at %s", intrstr); 219 printf("\n"); 220 return; 221 } 222 printf(": %s", intrstr); 223 224 sc->sc_dmat = pa->pa_dmat; 225 sc->sc_flags = 0; 226 227 switch (PCI_PRODUCT(pa->pa_id)) { 228 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 229 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 230 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 231 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 232 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 233 break; 234 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 235 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 236 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 237 break; 238 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 239 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 240 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 241 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 242 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 243 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 244 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 245 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 246 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 247 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 248 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 249 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 250 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 251 NFE_PWR_MGMT; 252 break; 253 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 254 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 255 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 256 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 257 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 258 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 259 break; 260 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 261 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 262 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 263 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 264 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 265 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 266 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 267 break; 268 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 269 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 270 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 271 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 272 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 273 break; 274 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 275 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 276 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 277 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 278 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 279 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 280 break; 281 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 282 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 283 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 284 NFE_HW_VLAN | NFE_PWR_MGMT; 285 break; 286 } 287 288 if (sc->sc_flags & NFE_PWR_MGMT) { 289 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 290 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 291 DELAY(100); 292 NFE_WRITE(sc, NFE_MAC_RESET, 0); 293 DELAY(100); 294 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 295 NFE_WRITE(sc, NFE_PWR2_CTL, 296 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 297 } 298 299 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 300 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 301 302 /* 303 * Allocate Tx and Rx rings. 304 */ 305 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 306 printf("%s: could not allocate Tx ring\n", 307 sc->sc_dev.dv_xname); 308 return; 309 } 310 311 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 312 printf("%s: could not allocate Rx ring\n", 313 sc->sc_dev.dv_xname); 314 nfe_free_tx_ring(sc, &sc->txq); 315 return; 316 } 317 318 ifp = &sc->sc_arpcom.ac_if; 319 ifp->if_softc = sc; 320 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 321 ifp->if_ioctl = nfe_ioctl; 322 ifp->if_start = nfe_start; 323 ifp->if_watchdog = nfe_watchdog; 324 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 325 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 326 327 ifp->if_capabilities = IFCAP_VLAN_MTU; 328 329 #ifndef SMALL_KERNEL 330 ifp->if_capabilities |= IFCAP_WOL; 331 ifp->if_wol = nfe_wol; 332 nfe_wol(ifp, 0); 333 #endif 334 335 #if NVLAN > 0 336 if (sc->sc_flags & NFE_HW_VLAN) 337 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 338 #endif 339 340 if (sc->sc_flags & NFE_HW_CSUM) { 341 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 342 IFCAP_CSUM_UDPv4; 343 } 344 345 sc->sc_mii.mii_ifp = ifp; 346 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 347 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 348 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 349 350 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 351 nfe_ifmedia_sts); 352 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0); 353 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 354 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 355 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 356 0, NULL); 357 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 358 } else 359 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 360 361 if_attach(ifp); 362 ether_ifattach(ifp); 363 364 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 365 } 366 367 void 368 nfe_miibus_statchg(struct device *dev) 369 { 370 struct nfe_softc *sc = (struct nfe_softc *)dev; 371 struct mii_data *mii = &sc->sc_mii; 372 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 373 374 phy = NFE_READ(sc, NFE_PHY_IFACE); 375 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 376 377 seed = NFE_READ(sc, NFE_RNDSEED); 378 seed &= ~NFE_SEED_MASK; 379 380 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 381 phy |= NFE_PHY_HDX; /* half-duplex */ 382 misc |= NFE_MISC1_HDX; 383 } 384 385 switch (IFM_SUBTYPE(mii->mii_media_active)) { 386 case IFM_1000_T: /* full-duplex only */ 387 link |= NFE_MEDIA_1000T; 388 seed |= NFE_SEED_1000T; 389 phy |= NFE_PHY_1000T; 390 break; 391 case IFM_100_TX: 392 link |= NFE_MEDIA_100TX; 393 seed |= NFE_SEED_100TX; 394 phy |= NFE_PHY_100TX; 395 break; 396 case IFM_10_T: 397 link |= NFE_MEDIA_10T; 398 seed |= NFE_SEED_10T; 399 break; 400 } 401 402 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 403 404 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 405 NFE_WRITE(sc, NFE_MISC1, misc); 406 NFE_WRITE(sc, NFE_LINKSPEED, link); 407 } 408 409 int 410 nfe_miibus_readreg(struct device *dev, int phy, int reg) 411 { 412 struct nfe_softc *sc = (struct nfe_softc *)dev; 413 uint32_t val; 414 int ntries; 415 416 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 417 418 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 419 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 420 DELAY(100); 421 } 422 423 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 424 425 for (ntries = 0; ntries < 1000; ntries++) { 426 DELAY(100); 427 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 428 break; 429 } 430 if (ntries == 1000) { 431 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 432 sc->sc_dev.dv_xname)); 433 return 0; 434 } 435 436 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 437 DPRINTFN(2, ("%s: could not read PHY\n", 438 sc->sc_dev.dv_xname)); 439 return 0; 440 } 441 442 val = NFE_READ(sc, NFE_PHY_DATA); 443 if (val != 0xffffffff && val != 0) 444 sc->mii_phyaddr = phy; 445 446 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 447 sc->sc_dev.dv_xname, phy, reg, val)); 448 449 return val; 450 } 451 452 void 453 nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 454 { 455 struct nfe_softc *sc = (struct nfe_softc *)dev; 456 uint32_t ctl; 457 int ntries; 458 459 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 460 461 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 462 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 463 DELAY(100); 464 } 465 466 NFE_WRITE(sc, NFE_PHY_DATA, val); 467 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 468 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 469 470 for (ntries = 0; ntries < 1000; ntries++) { 471 DELAY(100); 472 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 473 break; 474 } 475 #ifdef NFE_DEBUG 476 if (nfedebug >= 2 && ntries == 1000) 477 printf("could not write to PHY\n"); 478 #endif 479 } 480 481 int 482 nfe_intr(void *arg) 483 { 484 struct nfe_softc *sc = arg; 485 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 486 uint32_t r; 487 488 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 489 return 0; /* not for us */ 490 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 491 492 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 493 494 if (r & NFE_IRQ_LINK) { 495 NFE_READ(sc, NFE_PHY_STATUS); 496 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 497 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 498 } 499 500 if (ifp->if_flags & IFF_RUNNING) { 501 /* check Rx ring */ 502 nfe_rxeof(sc); 503 504 /* check Tx ring */ 505 nfe_txeof(sc); 506 } 507 508 return 1; 509 } 510 511 int 512 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 513 { 514 struct nfe_softc *sc = ifp->if_softc; 515 struct ifreq *ifr = (struct ifreq *)data; 516 int s, error = 0; 517 518 s = splnet(); 519 520 switch (cmd) { 521 case SIOCSIFADDR: 522 ifp->if_flags |= IFF_UP; 523 if (!(ifp->if_flags & IFF_RUNNING)) 524 nfe_init(ifp); 525 break; 526 527 case SIOCSIFFLAGS: 528 if (ifp->if_flags & IFF_UP) { 529 if (ifp->if_flags & IFF_RUNNING) 530 error = ENETRESET; 531 else 532 nfe_init(ifp); 533 } else { 534 if (ifp->if_flags & IFF_RUNNING) 535 nfe_stop(ifp, 1); 536 } 537 break; 538 539 case SIOCSIFMEDIA: 540 case SIOCGIFMEDIA: 541 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 542 break; 543 544 default: 545 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 546 } 547 548 if (error == ENETRESET) { 549 if (ifp->if_flags & IFF_RUNNING) 550 nfe_iff(sc); 551 error = 0; 552 } 553 554 splx(s); 555 return error; 556 } 557 558 void 559 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 560 { 561 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 562 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 563 sizeof (struct nfe_desc32), ops); 564 } 565 566 void 567 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 568 { 569 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 570 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 571 sizeof (struct nfe_desc64), ops); 572 } 573 574 void 575 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 576 { 577 if (end > start) { 578 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 579 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 580 (caddr_t)&sc->txq.desc32[end] - 581 (caddr_t)&sc->txq.desc32[start], ops); 582 return; 583 } 584 /* sync from 'start' to end of ring */ 585 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 586 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 587 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 588 (caddr_t)&sc->txq.desc32[start], ops); 589 590 /* sync from start of ring to 'end' */ 591 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 592 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 593 } 594 595 void 596 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 597 { 598 if (end > start) { 599 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 600 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 601 (caddr_t)&sc->txq.desc64[end] - 602 (caddr_t)&sc->txq.desc64[start], ops); 603 return; 604 } 605 /* sync from 'start' to end of ring */ 606 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 607 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 608 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 609 (caddr_t)&sc->txq.desc64[start], ops); 610 611 /* sync from start of ring to 'end' */ 612 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 613 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 614 } 615 616 void 617 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 618 { 619 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 620 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 621 sizeof (struct nfe_desc32), ops); 622 } 623 624 void 625 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 626 { 627 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 628 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 629 sizeof (struct nfe_desc64), ops); 630 } 631 632 void 633 nfe_rxeof(struct nfe_softc *sc) 634 { 635 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 636 struct nfe_desc32 *desc32; 637 struct nfe_desc64 *desc64; 638 struct nfe_rx_data *data; 639 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 640 struct mbuf *m, *mnew; 641 bus_addr_t physaddr; 642 #if NVLAN > 0 643 uint32_t vtag; 644 #endif 645 uint16_t flags; 646 int error, len; 647 648 for (;;) { 649 data = &sc->rxq.data[sc->rxq.cur]; 650 651 if (sc->sc_flags & NFE_40BIT_ADDR) { 652 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 653 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 654 655 flags = letoh16(desc64->flags); 656 len = letoh16(desc64->length) & 0x3fff; 657 #if NVLAN > 0 658 vtag = letoh32(desc64->physaddr[1]); 659 #endif 660 } else { 661 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 662 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 663 664 flags = letoh16(desc32->flags); 665 len = letoh16(desc32->length) & 0x3fff; 666 } 667 668 if (flags & NFE_RX_READY) 669 break; 670 671 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 672 if (!(flags & NFE_RX_VALID_V1)) 673 goto skip; 674 675 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 676 flags &= ~NFE_RX_ERROR; 677 len--; /* fix buffer length */ 678 } 679 } else { 680 if (!(flags & NFE_RX_VALID_V2)) 681 goto skip; 682 683 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 684 flags &= ~NFE_RX_ERROR; 685 len--; /* fix buffer length */ 686 } 687 } 688 689 if (flags & NFE_RX_ERROR) { 690 ifp->if_ierrors++; 691 goto skip; 692 } 693 694 /* 695 * Try to allocate a new mbuf for this ring element and load 696 * it before processing the current mbuf. If the ring element 697 * cannot be loaded, drop the received packet and reuse the 698 * old mbuf. In the unlikely case that the old mbuf can't be 699 * reloaded either, explicitly panic. 700 */ 701 mnew = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT); 702 if (mnew == NULL) { 703 ifp->if_ierrors++; 704 goto skip; 705 } 706 mnew->m_pkthdr.len = mnew->m_len = MCLBYTES; 707 708 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 709 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 710 bus_dmamap_unload(sc->sc_dmat, data->map); 711 712 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew, 713 BUS_DMA_READ | BUS_DMA_NOWAIT); 714 if (error != 0) { 715 m_freem(mnew); 716 717 /* try to reload the old mbuf */ 718 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, 719 m, BUS_DMA_READ | BUS_DMA_NOWAIT); 720 if (error != 0) { 721 /* very unlikely that it will fail.. */ 722 panic("%s: could not load old rx mbuf", 723 sc->sc_dev.dv_xname); 724 } 725 ifp->if_ierrors++; 726 goto skip; 727 } 728 physaddr = data->map->dm_segs[0].ds_addr; 729 730 /* 731 * New mbuf successfully loaded, update Rx ring and continue 732 * processing. 733 */ 734 m = data->m; 735 data->m = mnew; 736 737 /* finalize mbuf */ 738 m->m_pkthdr.len = m->m_len = len; 739 740 if ((sc->sc_flags & NFE_HW_CSUM) && 741 (flags & NFE_RX_IP_CSUMOK)) { 742 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 743 if (flags & NFE_RX_UDP_CSUMOK) 744 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 745 if (flags & NFE_RX_TCP_CSUMOK) 746 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 747 } 748 749 #if NVLAN > 0 750 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 751 m->m_pkthdr.ether_vtag = vtag & 0xffff; 752 m->m_flags |= M_VLANTAG; 753 } 754 #endif 755 756 ml_enqueue(&ml, m); 757 758 /* update mapping address in h/w descriptor */ 759 if (sc->sc_flags & NFE_40BIT_ADDR) { 760 #if defined(__LP64__) 761 desc64->physaddr[0] = htole32(physaddr >> 32); 762 #endif 763 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 764 } else { 765 desc32->physaddr = htole32(physaddr); 766 } 767 768 skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 769 desc64->length = htole16(sc->rxq.bufsz); 770 desc64->flags = htole16(NFE_RX_READY); 771 772 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 773 } else { 774 desc32->length = htole16(sc->rxq.bufsz); 775 desc32->flags = htole16(NFE_RX_READY); 776 777 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 778 } 779 780 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 781 } 782 if_input(ifp, &ml); 783 } 784 785 void 786 nfe_txeof(struct nfe_softc *sc) 787 { 788 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 789 struct nfe_desc32 *desc32; 790 struct nfe_desc64 *desc64; 791 struct nfe_tx_data *data = NULL; 792 uint16_t flags; 793 794 while (sc->txq.next != sc->txq.cur) { 795 if (sc->sc_flags & NFE_40BIT_ADDR) { 796 desc64 = &sc->txq.desc64[sc->txq.next]; 797 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 798 799 flags = letoh16(desc64->flags); 800 } else { 801 desc32 = &sc->txq.desc32[sc->txq.next]; 802 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 803 804 flags = letoh16(desc32->flags); 805 } 806 807 if (flags & NFE_TX_VALID) 808 break; 809 810 data = &sc->txq.data[sc->txq.next]; 811 812 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 813 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 814 goto skip; 815 816 if ((flags & NFE_TX_ERROR_V1) != 0) { 817 printf("%s: tx v1 error %b\n", 818 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 819 ifp->if_oerrors++; 820 } else 821 ifp->if_opackets++; 822 } else { 823 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 824 goto skip; 825 826 if ((flags & NFE_TX_ERROR_V2) != 0) { 827 printf("%s: tx v2 error %b\n", 828 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 829 ifp->if_oerrors++; 830 } else 831 ifp->if_opackets++; 832 } 833 834 if (data->m == NULL) { /* should not get there */ 835 printf("%s: last fragment bit w/o associated mbuf!\n", 836 sc->sc_dev.dv_xname); 837 goto skip; 838 } 839 840 /* last fragment of the mbuf chain transmitted */ 841 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 842 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 843 bus_dmamap_unload(sc->sc_dmat, data->active); 844 m_freem(data->m); 845 data->m = NULL; 846 847 ifp->if_timer = 0; 848 849 skip: sc->txq.queued--; 850 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 851 } 852 853 if (data != NULL) { /* at least one slot freed */ 854 ifq_clr_oactive(&ifp->if_snd); 855 nfe_start(ifp); 856 } 857 } 858 859 int 860 nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 861 { 862 struct nfe_desc32 *desc32; 863 struct nfe_desc64 *desc64; 864 struct nfe_tx_data *data; 865 bus_dmamap_t map; 866 uint16_t flags = 0; 867 uint32_t vtag = 0; 868 int error, i, first = sc->txq.cur; 869 870 map = sc->txq.data[first].map; 871 872 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 873 if (error != 0) { 874 printf("%s: can't map mbuf (error %d)\n", 875 sc->sc_dev.dv_xname, error); 876 return error; 877 } 878 879 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 880 bus_dmamap_unload(sc->sc_dmat, map); 881 return ENOBUFS; 882 } 883 884 #if NVLAN > 0 885 /* setup h/w VLAN tagging */ 886 if (m0->m_flags & M_VLANTAG) 887 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 888 #endif 889 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 890 flags |= NFE_TX_IP_CSUM; 891 if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) 892 flags |= NFE_TX_TCP_UDP_CSUM; 893 894 for (i = 0; i < map->dm_nsegs; i++) { 895 data = &sc->txq.data[sc->txq.cur]; 896 897 if (sc->sc_flags & NFE_40BIT_ADDR) { 898 desc64 = &sc->txq.desc64[sc->txq.cur]; 899 #if defined(__LP64__) 900 desc64->physaddr[0] = 901 htole32(map->dm_segs[i].ds_addr >> 32); 902 #endif 903 desc64->physaddr[1] = 904 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 905 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 906 desc64->flags = htole16(flags); 907 desc64->vtag = htole32(vtag); 908 } else { 909 desc32 = &sc->txq.desc32[sc->txq.cur]; 910 911 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 912 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 913 desc32->flags = htole16(flags); 914 } 915 916 if (map->dm_nsegs > 1) { 917 /* 918 * Checksum flags and vtag belong to the first fragment 919 * only. 920 */ 921 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 922 vtag = 0; 923 924 /* 925 * Setting of the valid bit in the first descriptor is 926 * deferred until the whole chain is fully setup. 927 */ 928 flags |= NFE_TX_VALID; 929 } 930 931 sc->txq.queued++; 932 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 933 } 934 935 /* the whole mbuf chain has been setup */ 936 if (sc->sc_flags & NFE_40BIT_ADDR) { 937 /* fix last descriptor */ 938 flags |= NFE_TX_LASTFRAG_V2; 939 desc64->flags = htole16(flags); 940 941 /* finally, set the valid bit in the first descriptor */ 942 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 943 } else { 944 /* fix last descriptor */ 945 if (sc->sc_flags & NFE_JUMBO_SUP) 946 flags |= NFE_TX_LASTFRAG_V2; 947 else 948 flags |= NFE_TX_LASTFRAG_V1; 949 desc32->flags = htole16(flags); 950 951 /* finally, set the valid bit in the first descriptor */ 952 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 953 } 954 955 data->m = m0; 956 data->active = map; 957 958 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 959 BUS_DMASYNC_PREWRITE); 960 961 return 0; 962 } 963 964 void 965 nfe_start(struct ifnet *ifp) 966 { 967 struct nfe_softc *sc = ifp->if_softc; 968 int old = sc->txq.cur; 969 struct mbuf *m0; 970 971 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 972 return; 973 974 for (;;) { 975 m0 = ifq_deq_begin(&ifp->if_snd); 976 if (m0 == NULL) 977 break; 978 979 if (nfe_encap(sc, m0) != 0) { 980 ifq_deq_rollback(&ifp->if_snd, m0); 981 ifq_set_oactive(&ifp->if_snd); 982 break; 983 } 984 985 /* packet put in h/w queue, remove from s/w queue */ 986 ifq_deq_commit(&ifp->if_snd, m0); 987 988 #if NBPFILTER > 0 989 if (ifp->if_bpf != NULL) 990 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 991 #endif 992 } 993 if (sc->txq.cur == old) /* nothing sent */ 994 return; 995 996 if (sc->sc_flags & NFE_40BIT_ADDR) 997 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 998 else 999 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1000 1001 /* kick Tx */ 1002 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1003 1004 /* 1005 * Set a timeout in case the chip goes out to lunch. 1006 */ 1007 ifp->if_timer = 5; 1008 } 1009 1010 void 1011 nfe_watchdog(struct ifnet *ifp) 1012 { 1013 struct nfe_softc *sc = ifp->if_softc; 1014 1015 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1016 1017 nfe_init(ifp); 1018 1019 ifp->if_oerrors++; 1020 } 1021 1022 int 1023 nfe_init(struct ifnet *ifp) 1024 { 1025 struct nfe_softc *sc = ifp->if_softc; 1026 uint32_t tmp; 1027 1028 nfe_stop(ifp, 0); 1029 1030 NFE_WRITE(sc, NFE_TX_UNK, 0); 1031 NFE_WRITE(sc, NFE_STATUS, 0); 1032 1033 sc->rxtxctl = NFE_RXTX_BIT2; 1034 if (sc->sc_flags & NFE_40BIT_ADDR) 1035 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1036 else if (sc->sc_flags & NFE_JUMBO_SUP) 1037 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1038 1039 if (sc->sc_flags & NFE_HW_CSUM) 1040 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1041 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1042 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1043 1044 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1045 DELAY(10); 1046 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1047 1048 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1049 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1050 else 1051 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1052 1053 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1054 1055 /* set MAC address */ 1056 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1057 1058 /* tell MAC where rings are in memory */ 1059 #ifdef __LP64__ 1060 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1061 #endif 1062 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1063 #ifdef __LP64__ 1064 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1065 #endif 1066 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1067 1068 NFE_WRITE(sc, NFE_RING_SIZE, 1069 (NFE_RX_RING_COUNT - 1) << 16 | 1070 (NFE_TX_RING_COUNT - 1)); 1071 1072 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1073 1074 /* force MAC to wakeup */ 1075 tmp = NFE_READ(sc, NFE_PWR_STATE); 1076 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1077 DELAY(10); 1078 tmp = NFE_READ(sc, NFE_PWR_STATE); 1079 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1080 1081 #if 1 1082 /* configure interrupts coalescing/mitigation */ 1083 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1084 #else 1085 /* no interrupt mitigation: one interrupt per packet */ 1086 NFE_WRITE(sc, NFE_IMTIMER, 970); 1087 #endif 1088 1089 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1090 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1091 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1092 1093 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1094 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1095 1096 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1097 1098 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1099 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1100 DELAY(10); 1101 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1102 1103 /* program promiscuous mode and multicast filters */ 1104 nfe_iff(sc); 1105 1106 nfe_ifmedia_upd(ifp); 1107 1108 /* enable Rx */ 1109 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1110 1111 /* enable Tx */ 1112 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1113 1114 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1115 1116 /* enable interrupts */ 1117 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1118 1119 timeout_add_sec(&sc->sc_tick_ch, 1); 1120 1121 ifp->if_flags |= IFF_RUNNING; 1122 ifq_clr_oactive(&ifp->if_snd); 1123 1124 return 0; 1125 } 1126 1127 void 1128 nfe_stop(struct ifnet *ifp, int disable) 1129 { 1130 struct nfe_softc *sc = ifp->if_softc; 1131 1132 timeout_del(&sc->sc_tick_ch); 1133 1134 ifp->if_timer = 0; 1135 ifp->if_flags &= ~IFF_RUNNING; 1136 ifq_clr_oactive(&ifp->if_snd); 1137 1138 mii_down(&sc->sc_mii); 1139 1140 /* abort Tx */ 1141 NFE_WRITE(sc, NFE_TX_CTL, 0); 1142 1143 if ((sc->sc_flags & NFE_WOL) == 0) { 1144 /* disable Rx */ 1145 NFE_WRITE(sc, NFE_RX_CTL, 0); 1146 1147 /* disable interrupts */ 1148 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1149 } 1150 1151 /* reset Tx and Rx rings */ 1152 nfe_reset_tx_ring(sc, &sc->txq); 1153 nfe_reset_rx_ring(sc, &sc->rxq); 1154 } 1155 1156 int 1157 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1158 { 1159 struct nfe_desc32 *desc32; 1160 struct nfe_desc64 *desc64; 1161 struct nfe_rx_data *data; 1162 void **desc; 1163 bus_addr_t physaddr; 1164 int i, nsegs, error, descsize; 1165 1166 if (sc->sc_flags & NFE_40BIT_ADDR) { 1167 desc = (void **)&ring->desc64; 1168 descsize = sizeof (struct nfe_desc64); 1169 } else { 1170 desc = (void **)&ring->desc32; 1171 descsize = sizeof (struct nfe_desc32); 1172 } 1173 1174 ring->cur = ring->next = 0; 1175 ring->bufsz = MCLBYTES; 1176 1177 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1178 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1179 if (error != 0) { 1180 printf("%s: could not create desc DMA map\n", 1181 sc->sc_dev.dv_xname); 1182 goto fail; 1183 } 1184 1185 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1186 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1187 if (error != 0) { 1188 printf("%s: could not allocate DMA memory\n", 1189 sc->sc_dev.dv_xname); 1190 goto fail; 1191 } 1192 1193 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1194 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1195 if (error != 0) { 1196 printf("%s: can't map desc DMA memory\n", 1197 sc->sc_dev.dv_xname); 1198 goto fail; 1199 } 1200 1201 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1202 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1203 if (error != 0) { 1204 printf("%s: could not load desc DMA map\n", 1205 sc->sc_dev.dv_xname); 1206 goto fail; 1207 } 1208 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1209 1210 /* 1211 * Pre-allocate Rx buffers and populate Rx ring. 1212 */ 1213 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1214 data = &sc->rxq.data[i]; 1215 1216 data->m = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT); 1217 if (data->m == NULL) { 1218 printf("%s: could not allocate rx mbuf\n", 1219 sc->sc_dev.dv_xname); 1220 error = ENOMEM; 1221 goto fail; 1222 } 1223 data->m->m_pkthdr.len = data->m->m_len = MCLBYTES; 1224 1225 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1226 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1227 if (error != 0) { 1228 printf("%s: could not create DMA map\n", 1229 sc->sc_dev.dv_xname); 1230 goto fail; 1231 } 1232 1233 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, data->m, 1234 BUS_DMA_READ | BUS_DMA_NOWAIT); 1235 if (error != 0) { 1236 printf("%s: could not load rx buf DMA map", 1237 sc->sc_dev.dv_xname); 1238 goto fail; 1239 } 1240 physaddr = data->map->dm_segs[0].ds_addr; 1241 1242 if (sc->sc_flags & NFE_40BIT_ADDR) { 1243 desc64 = &sc->rxq.desc64[i]; 1244 #if defined(__LP64__) 1245 desc64->physaddr[0] = htole32(physaddr >> 32); 1246 #endif 1247 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1248 desc64->length = htole16(sc->rxq.bufsz); 1249 desc64->flags = htole16(NFE_RX_READY); 1250 } else { 1251 desc32 = &sc->rxq.desc32[i]; 1252 desc32->physaddr = htole32(physaddr); 1253 desc32->length = htole16(sc->rxq.bufsz); 1254 desc32->flags = htole16(NFE_RX_READY); 1255 } 1256 } 1257 1258 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1259 BUS_DMASYNC_PREWRITE); 1260 1261 return 0; 1262 1263 fail: nfe_free_rx_ring(sc, ring); 1264 return error; 1265 } 1266 1267 void 1268 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1269 { 1270 int i; 1271 1272 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1273 if (sc->sc_flags & NFE_40BIT_ADDR) { 1274 ring->desc64[i].length = htole16(ring->bufsz); 1275 ring->desc64[i].flags = htole16(NFE_RX_READY); 1276 } else { 1277 ring->desc32[i].length = htole16(ring->bufsz); 1278 ring->desc32[i].flags = htole16(NFE_RX_READY); 1279 } 1280 } 1281 1282 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1283 BUS_DMASYNC_PREWRITE); 1284 1285 ring->cur = ring->next = 0; 1286 } 1287 1288 void 1289 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1290 { 1291 struct nfe_rx_data *data; 1292 void *desc; 1293 int i, descsize; 1294 1295 if (sc->sc_flags & NFE_40BIT_ADDR) { 1296 desc = ring->desc64; 1297 descsize = sizeof (struct nfe_desc64); 1298 } else { 1299 desc = ring->desc32; 1300 descsize = sizeof (struct nfe_desc32); 1301 } 1302 1303 if (desc != NULL) { 1304 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1305 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1306 bus_dmamap_unload(sc->sc_dmat, ring->map); 1307 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1308 NFE_RX_RING_COUNT * descsize); 1309 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1310 } 1311 1312 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1313 data = &ring->data[i]; 1314 1315 if (data->map != NULL) { 1316 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1317 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1318 bus_dmamap_unload(sc->sc_dmat, data->map); 1319 bus_dmamap_destroy(sc->sc_dmat, data->map); 1320 } 1321 if (data->m != NULL) 1322 m_freem(data->m); 1323 } 1324 } 1325 1326 int 1327 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1328 { 1329 int i, nsegs, error; 1330 void **desc; 1331 int descsize; 1332 1333 if (sc->sc_flags & NFE_40BIT_ADDR) { 1334 desc = (void **)&ring->desc64; 1335 descsize = sizeof (struct nfe_desc64); 1336 } else { 1337 desc = (void **)&ring->desc32; 1338 descsize = sizeof (struct nfe_desc32); 1339 } 1340 1341 ring->queued = 0; 1342 ring->cur = ring->next = 0; 1343 1344 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1345 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1346 1347 if (error != 0) { 1348 printf("%s: could not create desc DMA map\n", 1349 sc->sc_dev.dv_xname); 1350 goto fail; 1351 } 1352 1353 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1354 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1355 if (error != 0) { 1356 printf("%s: could not allocate DMA memory\n", 1357 sc->sc_dev.dv_xname); 1358 goto fail; 1359 } 1360 1361 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1362 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1363 if (error != 0) { 1364 printf("%s: can't map desc DMA memory\n", 1365 sc->sc_dev.dv_xname); 1366 goto fail; 1367 } 1368 1369 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1370 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1371 if (error != 0) { 1372 printf("%s: could not load desc DMA map\n", 1373 sc->sc_dev.dv_xname); 1374 goto fail; 1375 } 1376 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1377 1378 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1379 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1380 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1381 &ring->data[i].map); 1382 if (error != 0) { 1383 printf("%s: could not create DMA map\n", 1384 sc->sc_dev.dv_xname); 1385 goto fail; 1386 } 1387 } 1388 1389 return 0; 1390 1391 fail: nfe_free_tx_ring(sc, ring); 1392 return error; 1393 } 1394 1395 void 1396 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1397 { 1398 struct nfe_tx_data *data; 1399 int i; 1400 1401 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1402 if (sc->sc_flags & NFE_40BIT_ADDR) 1403 ring->desc64[i].flags = 0; 1404 else 1405 ring->desc32[i].flags = 0; 1406 1407 data = &ring->data[i]; 1408 1409 if (data->m != NULL) { 1410 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1411 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1412 bus_dmamap_unload(sc->sc_dmat, data->active); 1413 m_freem(data->m); 1414 data->m = NULL; 1415 } 1416 } 1417 1418 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1419 BUS_DMASYNC_PREWRITE); 1420 1421 ring->queued = 0; 1422 ring->cur = ring->next = 0; 1423 } 1424 1425 void 1426 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1427 { 1428 struct nfe_tx_data *data; 1429 void *desc; 1430 int i, descsize; 1431 1432 if (sc->sc_flags & NFE_40BIT_ADDR) { 1433 desc = ring->desc64; 1434 descsize = sizeof (struct nfe_desc64); 1435 } else { 1436 desc = ring->desc32; 1437 descsize = sizeof (struct nfe_desc32); 1438 } 1439 1440 if (desc != NULL) { 1441 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1442 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1443 bus_dmamap_unload(sc->sc_dmat, ring->map); 1444 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1445 NFE_TX_RING_COUNT * descsize); 1446 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1447 } 1448 1449 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1450 data = &ring->data[i]; 1451 1452 if (data->m != NULL) { 1453 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1454 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1455 bus_dmamap_unload(sc->sc_dmat, data->active); 1456 m_freem(data->m); 1457 } 1458 } 1459 1460 /* ..and now actually destroy the DMA mappings */ 1461 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1462 data = &ring->data[i]; 1463 if (data->map == NULL) 1464 continue; 1465 bus_dmamap_destroy(sc->sc_dmat, data->map); 1466 } 1467 } 1468 1469 int 1470 nfe_ifmedia_upd(struct ifnet *ifp) 1471 { 1472 struct nfe_softc *sc = ifp->if_softc; 1473 struct mii_data *mii = &sc->sc_mii; 1474 struct mii_softc *miisc; 1475 1476 if (mii->mii_instance != 0) { 1477 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1478 mii_phy_reset(miisc); 1479 } 1480 return mii_mediachg(mii); 1481 } 1482 1483 void 1484 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1485 { 1486 struct nfe_softc *sc = ifp->if_softc; 1487 struct mii_data *mii = &sc->sc_mii; 1488 1489 mii_pollstat(mii); 1490 ifmr->ifm_status = mii->mii_media_status; 1491 ifmr->ifm_active = mii->mii_media_active; 1492 } 1493 1494 void 1495 nfe_iff(struct nfe_softc *sc) 1496 { 1497 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1498 struct arpcom *ac = &sc->sc_arpcom; 1499 struct ether_multi *enm; 1500 struct ether_multistep step; 1501 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1502 uint32_t filter; 1503 int i; 1504 1505 filter = NFE_RXFILTER_MAGIC; 1506 ifp->if_flags &= ~IFF_ALLMULTI; 1507 1508 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1509 ifp->if_flags |= IFF_ALLMULTI; 1510 if (ifp->if_flags & IFF_PROMISC) 1511 filter |= NFE_PROMISC; 1512 else 1513 filter |= NFE_U2M; 1514 bzero(addr, ETHER_ADDR_LEN); 1515 bzero(mask, ETHER_ADDR_LEN); 1516 } else { 1517 filter |= NFE_U2M; 1518 1519 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1520 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1521 1522 ETHER_FIRST_MULTI(step, ac, enm); 1523 while (enm != NULL) { 1524 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1525 addr[i] &= enm->enm_addrlo[i]; 1526 mask[i] &= ~enm->enm_addrlo[i]; 1527 } 1528 1529 ETHER_NEXT_MULTI(step, enm); 1530 } 1531 1532 for (i = 0; i < ETHER_ADDR_LEN; i++) 1533 mask[i] |= addr[i]; 1534 } 1535 1536 addr[0] |= 0x01; /* make sure multicast bit is set */ 1537 1538 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1539 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1540 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1541 addr[5] << 8 | addr[4]); 1542 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1543 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1544 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1545 mask[5] << 8 | mask[4]); 1546 NFE_WRITE(sc, NFE_RXFILTER, filter); 1547 } 1548 1549 void 1550 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1551 { 1552 uint32_t tmp; 1553 1554 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1555 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1556 addr[0] = (tmp & 0xff); 1557 addr[1] = (tmp >> 8) & 0xff; 1558 addr[2] = (tmp >> 16) & 0xff; 1559 addr[3] = (tmp >> 24) & 0xff; 1560 1561 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1562 addr[4] = (tmp & 0xff); 1563 addr[5] = (tmp >> 8) & 0xff; 1564 1565 } else { 1566 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1567 addr[0] = (tmp >> 8) & 0xff; 1568 addr[1] = (tmp & 0xff); 1569 1570 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1571 addr[2] = (tmp >> 24) & 0xff; 1572 addr[3] = (tmp >> 16) & 0xff; 1573 addr[4] = (tmp >> 8) & 0xff; 1574 addr[5] = (tmp & 0xff); 1575 } 1576 } 1577 1578 void 1579 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1580 { 1581 NFE_WRITE(sc, NFE_MACADDR_LO, 1582 addr[5] << 8 | addr[4]); 1583 NFE_WRITE(sc, NFE_MACADDR_HI, 1584 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1585 } 1586 1587 void 1588 nfe_tick(void *arg) 1589 { 1590 struct nfe_softc *sc = arg; 1591 int s; 1592 1593 s = splnet(); 1594 mii_tick(&sc->sc_mii); 1595 splx(s); 1596 1597 timeout_add_sec(&sc->sc_tick_ch, 1); 1598 } 1599 1600 #ifndef SMALL_KERNEL 1601 int 1602 nfe_wol(struct ifnet *ifp, int enable) 1603 { 1604 struct nfe_softc *sc = ifp->if_softc; 1605 1606 if (enable) { 1607 sc->sc_flags |= NFE_WOL; 1608 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1609 } else { 1610 sc->sc_flags &= ~NFE_WOL; 1611 NFE_WRITE(sc, NFE_WOL_CTL, 0); 1612 } 1613 1614 return 0; 1615 } 1616 #endif 1617