1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.46 2008/10/28 07:30:49 sephe Exp $ */ 3 4 /* 5 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Sepherosa Ziehau <sepherosa@gmail.com> and 9 * Matthew Dillon <dillon@apollo.backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 42 * 43 * Permission to use, copy, modify, and distribute this software for any 44 * purpose with or without fee is hereby granted, provided that the above 45 * copyright notice and this permission notice appear in all copies. 46 * 47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 54 */ 55 56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 57 58 #include "opt_polling.h" 59 60 #include <sys/param.h> 61 #include <sys/endian.h> 62 #include <sys/kernel.h> 63 #include <sys/bus.h> 64 #include <sys/interrupt.h> 65 #include <sys/proc.h> 66 #include <sys/rman.h> 67 #include <sys/serialize.h> 68 #include <sys/socket.h> 69 #include <sys/sockio.h> 70 #include <sys/sysctl.h> 71 72 #include <net/ethernet.h> 73 #include <net/if.h> 74 #include <net/bpf.h> 75 #include <net/if_arp.h> 76 #include <net/if_dl.h> 77 #include <net/if_media.h> 78 #include <net/ifq_var.h> 79 #include <net/if_types.h> 80 #include <net/if_var.h> 81 #include <net/vlan/if_vlan_var.h> 82 #include <net/vlan/if_vlan_ether.h> 83 84 #include <bus/pci/pcireg.h> 85 #include <bus/pci/pcivar.h> 86 #include <bus/pci/pcidevs.h> 87 88 #include <dev/netif/mii_layer/mii.h> 89 #include <dev/netif/mii_layer/miivar.h> 90 91 #include "miibus_if.h" 92 93 #include <dev/netif/nfe/if_nfereg.h> 94 #include <dev/netif/nfe/if_nfevar.h> 95 96 #define NFE_CSUM 97 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 98 99 static int nfe_probe(device_t); 100 static int nfe_attach(device_t); 101 static int nfe_detach(device_t); 102 static void nfe_shutdown(device_t); 103 static int nfe_resume(device_t); 104 static int nfe_suspend(device_t); 105 106 static int nfe_miibus_readreg(device_t, int, int); 107 static void nfe_miibus_writereg(device_t, int, int, int); 108 static void nfe_miibus_statchg(device_t); 109 110 #ifdef DEVICE_POLLING 111 static void nfe_poll(struct ifnet *, enum poll_cmd, int); 112 #endif 113 static void nfe_intr(void *); 114 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 115 static int nfe_rxeof(struct nfe_softc *); 116 static int nfe_txeof(struct nfe_softc *, int); 117 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 118 struct mbuf *); 119 static void nfe_start(struct ifnet *); 120 static void nfe_watchdog(struct ifnet *); 121 static void nfe_init(void *); 122 static void nfe_stop(struct nfe_softc *); 123 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 124 static void nfe_jfree(void *); 125 static void nfe_jref(void *); 126 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 127 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 128 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 129 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 130 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 131 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 132 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 133 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 134 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 135 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 136 static int nfe_ifmedia_upd(struct ifnet *); 137 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 138 static void nfe_setmulti(struct nfe_softc *); 139 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 140 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 141 static void nfe_powerup(device_t); 142 static void nfe_mac_reset(struct nfe_softc *); 143 static void nfe_tick(void *); 144 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 145 int, bus_addr_t); 146 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 147 int); 148 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 149 int); 150 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 151 int); 152 static void nfe_enable_intrs(struct nfe_softc *); 153 static void nfe_disable_intrs(struct nfe_softc *); 154 155 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS); 156 157 #define NFE_DEBUG 158 #ifdef NFE_DEBUG 159 160 static int nfe_debug = 0; 161 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT; 162 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT; 163 /* hw timer simulated interrupt moderation @8000Hz */ 164 static int nfe_imtime = -125; 165 166 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count); 167 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count); 168 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime); 169 TUNABLE_INT("hw.nfe.debug", &nfe_debug); 170 171 #define DPRINTF(sc, fmt, ...) do { \ 172 if ((sc)->sc_debug) { \ 173 if_printf(&(sc)->arpcom.ac_if, \ 174 fmt, __VA_ARGS__); \ 175 } \ 176 } while (0) 177 178 #define DPRINTFN(sc, lv, fmt, ...) do { \ 179 if ((sc)->sc_debug >= (lv)) { \ 180 if_printf(&(sc)->arpcom.ac_if, \ 181 fmt, __VA_ARGS__); \ 182 } \ 183 } while (0) 184 185 #else /* !NFE_DEBUG */ 186 187 #define DPRINTF(sc, fmt, ...) 188 #define DPRINTFN(sc, lv, fmt, ...) 189 190 #endif /* NFE_DEBUG */ 191 192 static const struct nfe_dev { 193 uint16_t vid; 194 uint16_t did; 195 const char *desc; 196 } nfe_devices[] = { 197 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 198 "NVIDIA nForce Fast Ethernet" }, 199 200 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 201 "NVIDIA nForce2 Fast Ethernet" }, 202 203 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 204 "NVIDIA nForce3 Gigabit Ethernet" }, 205 206 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 207 chipset, and possibly also the 400R; it might be both nForce2- and 208 nForce3-based boards can use the same MCPs (= southbridges) */ 209 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 210 "NVIDIA nForce3 Gigabit Ethernet" }, 211 212 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 213 "NVIDIA nForce3 Gigabit Ethernet" }, 214 215 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 216 "NVIDIA nForce3 Gigabit Ethernet" }, 217 218 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 219 "NVIDIA nForce3 Gigabit Ethernet" }, 220 221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 222 "NVIDIA CK804 Gigabit Ethernet" }, 223 224 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 225 "NVIDIA CK804 Gigabit Ethernet" }, 226 227 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 228 "NVIDIA MCP04 Gigabit Ethernet" }, 229 230 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 231 "NVIDIA MCP04 Gigabit Ethernet" }, 232 233 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 234 "NVIDIA MCP51 Gigabit Ethernet" }, 235 236 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 237 "NVIDIA MCP51 Gigabit Ethernet" }, 238 239 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 240 "NVIDIA MCP55 Gigabit Ethernet" }, 241 242 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 243 "NVIDIA MCP55 Gigabit Ethernet" }, 244 245 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 246 "NVIDIA MCP61 Gigabit Ethernet" }, 247 248 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 249 "NVIDIA MCP61 Gigabit Ethernet" }, 250 251 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 252 "NVIDIA MCP61 Gigabit Ethernet" }, 253 254 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 255 "NVIDIA MCP61 Gigabit Ethernet" }, 256 257 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 258 "NVIDIA MCP65 Gigabit Ethernet" }, 259 260 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 261 "NVIDIA MCP65 Gigabit Ethernet" }, 262 263 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 264 "NVIDIA MCP65 Gigabit Ethernet" }, 265 266 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 267 "NVIDIA MCP65 Gigabit Ethernet" }, 268 269 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 270 "NVIDIA MCP67 Gigabit Ethernet" }, 271 272 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 273 "NVIDIA MCP67 Gigabit Ethernet" }, 274 275 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 276 "NVIDIA MCP67 Gigabit Ethernet" }, 277 278 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 279 "NVIDIA MCP67 Gigabit Ethernet" }, 280 281 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 282 "NVIDIA MCP73 Gigabit Ethernet" }, 283 284 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 285 "NVIDIA MCP73 Gigabit Ethernet" }, 286 287 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 288 "NVIDIA MCP73 Gigabit Ethernet" }, 289 290 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 291 "NVIDIA MCP73 Gigabit Ethernet" }, 292 293 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 294 "NVIDIA MCP77 Gigabit Ethernet" }, 295 296 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 297 "NVIDIA MCP77 Gigabit Ethernet" }, 298 299 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 300 "NVIDIA MCP77 Gigabit Ethernet" }, 301 302 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 303 "NVIDIA MCP77 Gigabit Ethernet" }, 304 305 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 306 "NVIDIA MCP79 Gigabit Ethernet" }, 307 308 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 309 "NVIDIA MCP79 Gigabit Ethernet" }, 310 311 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 312 "NVIDIA MCP79 Gigabit Ethernet" }, 313 314 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 315 "NVIDIA MCP79 Gigabit Ethernet" }, 316 317 { 0, 0, NULL } 318 }; 319 320 static device_method_t nfe_methods[] = { 321 /* Device interface */ 322 DEVMETHOD(device_probe, nfe_probe), 323 DEVMETHOD(device_attach, nfe_attach), 324 DEVMETHOD(device_detach, nfe_detach), 325 DEVMETHOD(device_suspend, nfe_suspend), 326 DEVMETHOD(device_resume, nfe_resume), 327 DEVMETHOD(device_shutdown, nfe_shutdown), 328 329 /* Bus interface */ 330 DEVMETHOD(bus_print_child, bus_generic_print_child), 331 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 332 333 /* MII interface */ 334 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 335 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 336 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 337 338 { 0, 0 } 339 }; 340 341 static driver_t nfe_driver = { 342 "nfe", 343 nfe_methods, 344 sizeof(struct nfe_softc) 345 }; 346 347 static devclass_t nfe_devclass; 348 349 DECLARE_DUMMY_MODULE(if_nfe); 350 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 351 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0); 352 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 353 354 static int 355 nfe_probe(device_t dev) 356 { 357 const struct nfe_dev *n; 358 uint16_t vid, did; 359 360 vid = pci_get_vendor(dev); 361 did = pci_get_device(dev); 362 for (n = nfe_devices; n->desc != NULL; ++n) { 363 if (vid == n->vid && did == n->did) { 364 struct nfe_softc *sc = device_get_softc(dev); 365 366 switch (did) { 367 case PCI_PRODUCT_NVIDIA_NFORCE_LAN: 368 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN: 369 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1: 370 sc->sc_caps = NFE_NO_PWRCTL | 371 NFE_FIX_EADDR; 372 break; 373 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 374 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 375 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 376 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 377 sc->sc_caps = NFE_JUMBO_SUP | 378 NFE_HW_CSUM | 379 NFE_NO_PWRCTL | 380 NFE_FIX_EADDR; 381 break; 382 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 383 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 384 sc->sc_caps = NFE_FIX_EADDR; 385 /* FALL THROUGH */ 386 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 387 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 388 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 389 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 390 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 391 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 392 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 393 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 394 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 395 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 396 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 397 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 398 sc->sc_caps |= NFE_40BIT_ADDR; 399 break; 400 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 401 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 402 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 403 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 404 sc->sc_caps = NFE_JUMBO_SUP | 405 NFE_40BIT_ADDR | 406 NFE_HW_CSUM | 407 NFE_NO_PWRCTL | 408 NFE_FIX_EADDR; 409 break; 410 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 411 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 412 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 413 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 414 sc->sc_caps = NFE_JUMBO_SUP | 415 NFE_40BIT_ADDR; 416 break; 417 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 418 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 419 sc->sc_caps = NFE_JUMBO_SUP | 420 NFE_40BIT_ADDR | 421 NFE_HW_CSUM | 422 NFE_HW_VLAN | 423 NFE_FIX_EADDR; 424 break; 425 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 426 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 427 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 428 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 429 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 430 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 431 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 432 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 433 sc->sc_caps = NFE_40BIT_ADDR | 434 NFE_HW_CSUM; 435 break; 436 } 437 438 device_set_desc(dev, n->desc); 439 device_set_async_attach(dev, TRUE); 440 return 0; 441 } 442 } 443 return ENXIO; 444 } 445 446 static int 447 nfe_attach(device_t dev) 448 { 449 struct nfe_softc *sc = device_get_softc(dev); 450 struct ifnet *ifp = &sc->arpcom.ac_if; 451 uint8_t eaddr[ETHER_ADDR_LEN]; 452 bus_addr_t lowaddr; 453 int error; 454 455 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 456 lwkt_serialize_init(&sc->sc_jbuf_serializer); 457 458 /* 459 * Initialize sysctl variables 460 */ 461 sc->sc_rx_ring_count = nfe_rx_ring_count; 462 sc->sc_tx_ring_count = nfe_tx_ring_count; 463 sc->sc_debug = nfe_debug; 464 if (nfe_imtime < 0) { 465 sc->sc_flags |= NFE_F_DYN_IM; 466 sc->sc_imtime = -nfe_imtime; 467 } else { 468 sc->sc_imtime = nfe_imtime; 469 } 470 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 471 472 sc->sc_mem_rid = PCIR_BAR(0); 473 474 if (sc->sc_caps & NFE_40BIT_ADDR) 475 sc->rxtxctl_desc = NFE_RXTX_DESC_V3; 476 else if (sc->sc_caps & NFE_JUMBO_SUP) 477 sc->rxtxctl_desc = NFE_RXTX_DESC_V2; 478 479 #ifndef BURN_BRIDGES 480 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 481 uint32_t mem, irq; 482 483 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 484 irq = pci_read_config(dev, PCIR_INTLINE, 4); 485 486 device_printf(dev, "chip is in D%d power mode " 487 "-- setting to D0\n", pci_get_powerstate(dev)); 488 489 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 490 491 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 492 pci_write_config(dev, PCIR_INTLINE, irq, 4); 493 } 494 #endif /* !BURN_BRIDGE */ 495 496 /* Enable bus mastering */ 497 pci_enable_busmaster(dev); 498 499 /* Allocate IO memory */ 500 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 501 &sc->sc_mem_rid, RF_ACTIVE); 502 if (sc->sc_mem_res == NULL) { 503 device_printf(dev, "cound not allocate io memory\n"); 504 return ENXIO; 505 } 506 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 507 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 508 509 /* Allocate IRQ */ 510 sc->sc_irq_rid = 0; 511 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 512 &sc->sc_irq_rid, 513 RF_SHAREABLE | RF_ACTIVE); 514 if (sc->sc_irq_res == NULL) { 515 device_printf(dev, "could not allocate irq\n"); 516 error = ENXIO; 517 goto fail; 518 } 519 520 /* Disable WOL */ 521 NFE_WRITE(sc, NFE_WOL_CTL, 0); 522 523 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 524 nfe_powerup(dev); 525 526 nfe_get_macaddr(sc, eaddr); 527 528 /* 529 * Allocate top level DMA tag 530 */ 531 if (sc->sc_caps & NFE_40BIT_ADDR) 532 lowaddr = NFE_BUS_SPACE_MAXADDR; 533 else 534 lowaddr = BUS_SPACE_MAXADDR_32BIT; 535 error = bus_dma_tag_create(NULL, /* parent */ 536 1, 0, /* alignment, boundary */ 537 lowaddr, /* lowaddr */ 538 BUS_SPACE_MAXADDR, /* highaddr */ 539 NULL, NULL, /* filter, filterarg */ 540 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 541 0, /* nsegments */ 542 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 543 0, /* flags */ 544 &sc->sc_dtag); 545 if (error) { 546 device_printf(dev, "could not allocate parent dma tag\n"); 547 goto fail; 548 } 549 550 /* 551 * Allocate Tx and Rx rings. 552 */ 553 error = nfe_alloc_tx_ring(sc, &sc->txq); 554 if (error) { 555 device_printf(dev, "could not allocate Tx ring\n"); 556 goto fail; 557 } 558 559 error = nfe_alloc_rx_ring(sc, &sc->rxq); 560 if (error) { 561 device_printf(dev, "could not allocate Rx ring\n"); 562 goto fail; 563 } 564 565 /* 566 * Create sysctl tree 567 */ 568 sysctl_ctx_init(&sc->sc_sysctl_ctx); 569 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 570 SYSCTL_STATIC_CHILDREN(_hw), 571 OID_AUTO, 572 device_get_nameunit(dev), 573 CTLFLAG_RD, 0, ""); 574 if (sc->sc_sysctl_tree == NULL) { 575 device_printf(dev, "can't add sysctl node\n"); 576 error = ENXIO; 577 goto fail; 578 } 579 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 580 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 581 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW, 582 sc, 0, nfe_sysctl_imtime, "I", 583 "Interrupt moderation time (usec). " 584 "0 to disable interrupt moderation."); 585 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 586 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 587 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count, 588 0, "RX ring count"); 589 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 590 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 591 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count, 592 0, "TX ring count"); 593 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 594 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 595 "debug", CTLFLAG_RW, &sc->sc_debug, 596 0, "control debugging printfs"); 597 598 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 599 nfe_ifmedia_sts); 600 if (error) { 601 device_printf(dev, "MII without any phy\n"); 602 goto fail; 603 } 604 605 ifp->if_softc = sc; 606 ifp->if_mtu = ETHERMTU; 607 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 608 ifp->if_ioctl = nfe_ioctl; 609 ifp->if_start = nfe_start; 610 #ifdef DEVICE_POLLING 611 ifp->if_poll = nfe_poll; 612 #endif 613 ifp->if_watchdog = nfe_watchdog; 614 ifp->if_init = nfe_init; 615 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count); 616 ifq_set_ready(&ifp->if_snd); 617 618 ifp->if_capabilities = IFCAP_VLAN_MTU; 619 620 if (sc->sc_caps & NFE_HW_VLAN) 621 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 622 623 #ifdef NFE_CSUM 624 if (sc->sc_caps & NFE_HW_CSUM) { 625 ifp->if_capabilities |= IFCAP_HWCSUM; 626 ifp->if_hwassist = NFE_CSUM_FEATURES; 627 } 628 #else 629 sc->sc_caps &= ~NFE_HW_CSUM; 630 #endif 631 ifp->if_capenable = ifp->if_capabilities; 632 633 callout_init(&sc->sc_tick_ch); 634 635 ether_ifattach(ifp, eaddr, NULL); 636 637 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 638 &sc->sc_ih, ifp->if_serializer); 639 if (error) { 640 device_printf(dev, "could not setup intr\n"); 641 ether_ifdetach(ifp); 642 goto fail; 643 } 644 645 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq_res)); 646 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 647 648 return 0; 649 fail: 650 nfe_detach(dev); 651 return error; 652 } 653 654 static int 655 nfe_detach(device_t dev) 656 { 657 struct nfe_softc *sc = device_get_softc(dev); 658 659 if (device_is_attached(dev)) { 660 struct ifnet *ifp = &sc->arpcom.ac_if; 661 662 lwkt_serialize_enter(ifp->if_serializer); 663 nfe_stop(sc); 664 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 665 lwkt_serialize_exit(ifp->if_serializer); 666 667 ether_ifdetach(ifp); 668 } 669 670 if (sc->sc_miibus != NULL) 671 device_delete_child(dev, sc->sc_miibus); 672 bus_generic_detach(dev); 673 674 if (sc->sc_sysctl_tree != NULL) 675 sysctl_ctx_free(&sc->sc_sysctl_ctx); 676 677 if (sc->sc_irq_res != NULL) { 678 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 679 sc->sc_irq_res); 680 } 681 682 if (sc->sc_mem_res != NULL) { 683 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 684 sc->sc_mem_res); 685 } 686 687 nfe_free_tx_ring(sc, &sc->txq); 688 nfe_free_rx_ring(sc, &sc->rxq); 689 if (sc->sc_dtag != NULL) 690 bus_dma_tag_destroy(sc->sc_dtag); 691 692 return 0; 693 } 694 695 static void 696 nfe_shutdown(device_t dev) 697 { 698 struct nfe_softc *sc = device_get_softc(dev); 699 struct ifnet *ifp = &sc->arpcom.ac_if; 700 701 lwkt_serialize_enter(ifp->if_serializer); 702 nfe_stop(sc); 703 lwkt_serialize_exit(ifp->if_serializer); 704 } 705 706 static int 707 nfe_suspend(device_t dev) 708 { 709 struct nfe_softc *sc = device_get_softc(dev); 710 struct ifnet *ifp = &sc->arpcom.ac_if; 711 712 lwkt_serialize_enter(ifp->if_serializer); 713 nfe_stop(sc); 714 lwkt_serialize_exit(ifp->if_serializer); 715 716 return 0; 717 } 718 719 static int 720 nfe_resume(device_t dev) 721 { 722 struct nfe_softc *sc = device_get_softc(dev); 723 struct ifnet *ifp = &sc->arpcom.ac_if; 724 725 lwkt_serialize_enter(ifp->if_serializer); 726 if (ifp->if_flags & IFF_UP) 727 nfe_init(sc); 728 lwkt_serialize_exit(ifp->if_serializer); 729 730 return 0; 731 } 732 733 static void 734 nfe_miibus_statchg(device_t dev) 735 { 736 struct nfe_softc *sc = device_get_softc(dev); 737 struct mii_data *mii = device_get_softc(sc->sc_miibus); 738 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 739 740 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer); 741 742 phy = NFE_READ(sc, NFE_PHY_IFACE); 743 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 744 745 seed = NFE_READ(sc, NFE_RNDSEED); 746 seed &= ~NFE_SEED_MASK; 747 748 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 749 phy |= NFE_PHY_HDX; /* half-duplex */ 750 misc |= NFE_MISC1_HDX; 751 } 752 753 switch (IFM_SUBTYPE(mii->mii_media_active)) { 754 case IFM_1000_T: /* full-duplex only */ 755 link |= NFE_MEDIA_1000T; 756 seed |= NFE_SEED_1000T; 757 phy |= NFE_PHY_1000T; 758 break; 759 case IFM_100_TX: 760 link |= NFE_MEDIA_100TX; 761 seed |= NFE_SEED_100TX; 762 phy |= NFE_PHY_100TX; 763 break; 764 case IFM_10_T: 765 link |= NFE_MEDIA_10T; 766 seed |= NFE_SEED_10T; 767 break; 768 } 769 770 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 771 772 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 773 NFE_WRITE(sc, NFE_MISC1, misc); 774 NFE_WRITE(sc, NFE_LINKSPEED, link); 775 } 776 777 static int 778 nfe_miibus_readreg(device_t dev, int phy, int reg) 779 { 780 struct nfe_softc *sc = device_get_softc(dev); 781 uint32_t val; 782 int ntries; 783 784 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 785 786 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 787 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 788 DELAY(100); 789 } 790 791 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 792 793 for (ntries = 0; ntries < 1000; ntries++) { 794 DELAY(100); 795 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 796 break; 797 } 798 if (ntries == 1000) { 799 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 800 return 0; 801 } 802 803 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 804 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 805 return 0; 806 } 807 808 val = NFE_READ(sc, NFE_PHY_DATA); 809 if (val != 0xffffffff && val != 0) 810 sc->mii_phyaddr = phy; 811 812 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 813 814 return val; 815 } 816 817 static void 818 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 819 { 820 struct nfe_softc *sc = device_get_softc(dev); 821 uint32_t ctl; 822 int ntries; 823 824 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 825 826 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 827 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 828 DELAY(100); 829 } 830 831 NFE_WRITE(sc, NFE_PHY_DATA, val); 832 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 833 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 834 835 for (ntries = 0; ntries < 1000; ntries++) { 836 DELAY(100); 837 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 838 break; 839 } 840 841 #ifdef NFE_DEBUG 842 if (ntries == 1000) 843 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 844 #endif 845 } 846 847 #ifdef DEVICE_POLLING 848 849 static void 850 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 851 { 852 struct nfe_softc *sc = ifp->if_softc; 853 854 ASSERT_SERIALIZED(ifp->if_serializer); 855 856 switch(cmd) { 857 case POLL_REGISTER: 858 nfe_disable_intrs(sc); 859 break; 860 861 case POLL_DEREGISTER: 862 nfe_enable_intrs(sc); 863 break; 864 865 case POLL_AND_CHECK_STATUS: 866 /* fall through */ 867 case POLL_ONLY: 868 if (ifp->if_flags & IFF_RUNNING) { 869 nfe_rxeof(sc); 870 nfe_txeof(sc, 1); 871 } 872 break; 873 } 874 } 875 876 #endif 877 878 static void 879 nfe_intr(void *arg) 880 { 881 struct nfe_softc *sc = arg; 882 struct ifnet *ifp = &sc->arpcom.ac_if; 883 uint32_t r; 884 885 r = NFE_READ(sc, NFE_IRQ_STATUS); 886 if (r == 0) 887 return; /* not for us */ 888 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 889 890 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 891 892 if (r & NFE_IRQ_LINK) { 893 NFE_READ(sc, NFE_PHY_STATUS); 894 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 895 DPRINTF(sc, "link state changed %s\n", ""); 896 } 897 898 if (ifp->if_flags & IFF_RUNNING) { 899 int ret; 900 901 /* check Rx ring */ 902 ret = nfe_rxeof(sc); 903 904 /* check Tx ring */ 905 ret |= nfe_txeof(sc, 1); 906 907 if (sc->sc_flags & NFE_F_DYN_IM) { 908 if (ret && (sc->sc_flags & NFE_F_IRQ_TIMER) == 0) { 909 /* 910 * Assume that using hardware timer could reduce 911 * the interrupt rate. 912 */ 913 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER); 914 sc->sc_flags |= NFE_F_IRQ_TIMER; 915 } else if (!ret && (sc->sc_flags & NFE_F_IRQ_TIMER)) { 916 /* 917 * Nothing needs to be processed, fall back to 918 * use TX/RX interrupts. 919 */ 920 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER); 921 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 922 923 /* 924 * Recollect, mainly to avoid the possible race 925 * introduced by changing interrupt masks. 926 */ 927 nfe_rxeof(sc); 928 nfe_txeof(sc, 1); 929 } 930 } 931 } 932 } 933 934 static int 935 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 936 { 937 struct nfe_softc *sc = ifp->if_softc; 938 struct ifreq *ifr = (struct ifreq *)data; 939 struct mii_data *mii; 940 int error = 0, mask, jumbo_cap; 941 942 ASSERT_SERIALIZED(ifp->if_serializer); 943 944 switch (cmd) { 945 case SIOCSIFMTU: 946 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL) 947 jumbo_cap = 1; 948 else 949 jumbo_cap = 0; 950 951 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) || 952 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) { 953 return EINVAL; 954 } else if (ifp->if_mtu != ifr->ifr_mtu) { 955 ifp->if_mtu = ifr->ifr_mtu; 956 if (ifp->if_flags & IFF_RUNNING) 957 nfe_init(sc); 958 } 959 break; 960 case SIOCSIFFLAGS: 961 if (ifp->if_flags & IFF_UP) { 962 /* 963 * If only the PROMISC or ALLMULTI flag changes, then 964 * don't do a full re-init of the chip, just update 965 * the Rx filter. 966 */ 967 if ((ifp->if_flags & IFF_RUNNING) && 968 ((ifp->if_flags ^ sc->sc_if_flags) & 969 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 970 nfe_setmulti(sc); 971 } else { 972 if (!(ifp->if_flags & IFF_RUNNING)) 973 nfe_init(sc); 974 } 975 } else { 976 if (ifp->if_flags & IFF_RUNNING) 977 nfe_stop(sc); 978 } 979 sc->sc_if_flags = ifp->if_flags; 980 break; 981 case SIOCADDMULTI: 982 case SIOCDELMULTI: 983 if (ifp->if_flags & IFF_RUNNING) 984 nfe_setmulti(sc); 985 break; 986 case SIOCSIFMEDIA: 987 case SIOCGIFMEDIA: 988 mii = device_get_softc(sc->sc_miibus); 989 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 990 break; 991 case SIOCSIFCAP: 992 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM; 993 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) { 994 ifp->if_capenable ^= mask; 995 if (IFCAP_TXCSUM & ifp->if_capenable) 996 ifp->if_hwassist = NFE_CSUM_FEATURES; 997 else 998 ifp->if_hwassist = 0; 999 1000 if (ifp->if_flags & IFF_RUNNING) 1001 nfe_init(sc); 1002 } 1003 break; 1004 default: 1005 error = ether_ioctl(ifp, cmd, data); 1006 break; 1007 } 1008 return error; 1009 } 1010 1011 static int 1012 nfe_rxeof(struct nfe_softc *sc) 1013 { 1014 struct ifnet *ifp = &sc->arpcom.ac_if; 1015 struct nfe_rx_ring *ring = &sc->rxq; 1016 int reap; 1017 struct mbuf_chain chain[MAXCPU]; 1018 1019 reap = 0; 1020 ether_input_chain_init(chain); 1021 1022 for (;;) { 1023 struct nfe_rx_data *data = &ring->data[ring->cur]; 1024 struct mbuf *m; 1025 uint16_t flags; 1026 int len, error; 1027 1028 if (sc->sc_caps & NFE_40BIT_ADDR) { 1029 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 1030 1031 flags = le16toh(desc64->flags); 1032 len = le16toh(desc64->length) & 0x3fff; 1033 } else { 1034 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 1035 1036 flags = le16toh(desc32->flags); 1037 len = le16toh(desc32->length) & 0x3fff; 1038 } 1039 1040 if (flags & NFE_RX_READY) 1041 break; 1042 1043 reap = 1; 1044 1045 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1046 if (!(flags & NFE_RX_VALID_V1)) 1047 goto skip; 1048 1049 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 1050 flags &= ~NFE_RX_ERROR; 1051 len--; /* fix buffer length */ 1052 } 1053 } else { 1054 if (!(flags & NFE_RX_VALID_V2)) 1055 goto skip; 1056 1057 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 1058 flags &= ~NFE_RX_ERROR; 1059 len--; /* fix buffer length */ 1060 } 1061 } 1062 1063 if (flags & NFE_RX_ERROR) { 1064 ifp->if_ierrors++; 1065 goto skip; 1066 } 1067 1068 m = data->m; 1069 1070 if (sc->sc_flags & NFE_F_USE_JUMBO) 1071 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 1072 else 1073 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 1074 if (error) { 1075 ifp->if_ierrors++; 1076 goto skip; 1077 } 1078 1079 /* finalize mbuf */ 1080 m->m_pkthdr.len = m->m_len = len; 1081 m->m_pkthdr.rcvif = ifp; 1082 1083 if ((ifp->if_capenable & IFCAP_RXCSUM) && 1084 (flags & NFE_RX_CSUMOK)) { 1085 if (flags & NFE_RX_IP_CSUMOK_V2) { 1086 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1087 CSUM_IP_VALID; 1088 } 1089 1090 if (flags & 1091 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) { 1092 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1093 CSUM_PSEUDO_HDR | 1094 CSUM_FRAG_NOT_CHECKED; 1095 m->m_pkthdr.csum_data = 0xffff; 1096 } 1097 } 1098 1099 ifp->if_ipackets++; 1100 ether_input_chain(ifp, m, NULL, chain); 1101 skip: 1102 nfe_set_ready_rxdesc(sc, ring, ring->cur); 1103 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count; 1104 } 1105 1106 if (reap) 1107 ether_input_dispatch(chain); 1108 return reap; 1109 } 1110 1111 static int 1112 nfe_txeof(struct nfe_softc *sc, int start) 1113 { 1114 struct ifnet *ifp = &sc->arpcom.ac_if; 1115 struct nfe_tx_ring *ring = &sc->txq; 1116 struct nfe_tx_data *data = NULL; 1117 1118 while (ring->next != ring->cur) { 1119 uint16_t flags; 1120 1121 if (sc->sc_caps & NFE_40BIT_ADDR) 1122 flags = le16toh(ring->desc64[ring->next].flags); 1123 else 1124 flags = le16toh(ring->desc32[ring->next].flags); 1125 1126 if (flags & NFE_TX_VALID) 1127 break; 1128 1129 data = &ring->data[ring->next]; 1130 1131 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1132 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 1133 goto skip; 1134 1135 if ((flags & NFE_TX_ERROR_V1) != 0) { 1136 if_printf(ifp, "tx v1 error 0x%4b\n", flags, 1137 NFE_V1_TXERR); 1138 ifp->if_oerrors++; 1139 } else { 1140 ifp->if_opackets++; 1141 } 1142 } else { 1143 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 1144 goto skip; 1145 1146 if ((flags & NFE_TX_ERROR_V2) != 0) { 1147 if_printf(ifp, "tx v2 error 0x%4b\n", flags, 1148 NFE_V2_TXERR); 1149 ifp->if_oerrors++; 1150 } else { 1151 ifp->if_opackets++; 1152 } 1153 } 1154 1155 if (data->m == NULL) { /* should not get there */ 1156 if_printf(ifp, 1157 "last fragment bit w/o associated mbuf!\n"); 1158 goto skip; 1159 } 1160 1161 /* last fragment of the mbuf chain transmitted */ 1162 bus_dmamap_unload(ring->data_tag, data->map); 1163 m_freem(data->m); 1164 data->m = NULL; 1165 skip: 1166 ring->queued--; 1167 KKASSERT(ring->queued >= 0); 1168 ring->next = (ring->next + 1) % sc->sc_tx_ring_count; 1169 } 1170 1171 if (sc->sc_tx_ring_count - ring->queued >= 1172 sc->sc_tx_spare + NFE_NSEG_RSVD) 1173 ifp->if_flags &= ~IFF_OACTIVE; 1174 1175 if (ring->queued == 0) 1176 ifp->if_timer = 0; 1177 1178 if (start && !ifq_is_empty(&ifp->if_snd)) 1179 if_devstart(ifp); 1180 1181 if (data != NULL) 1182 return 1; 1183 else 1184 return 0; 1185 } 1186 1187 static int 1188 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 1189 { 1190 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 1191 struct nfe_tx_data *data, *data_map; 1192 bus_dmamap_t map; 1193 struct nfe_desc64 *desc64 = NULL; 1194 struct nfe_desc32 *desc32 = NULL; 1195 uint16_t flags = 0; 1196 uint32_t vtag = 0; 1197 int error, i, j, maxsegs, nsegs; 1198 1199 data = &ring->data[ring->cur]; 1200 map = data->map; 1201 data_map = data; /* Remember who owns the DMA map */ 1202 1203 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD; 1204 if (maxsegs > NFE_MAX_SCATTER) 1205 maxsegs = NFE_MAX_SCATTER; 1206 KASSERT(maxsegs >= sc->sc_tx_spare, 1207 ("no enough segments %d,%d\n", maxsegs, sc->sc_tx_spare)); 1208 1209 error = bus_dmamap_load_mbuf_defrag(ring->data_tag, map, &m0, 1210 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1211 if (error) 1212 goto back; 1213 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1214 1215 error = 0; 1216 1217 /* setup h/w VLAN tagging */ 1218 if (m0->m_flags & M_VLANTAG) 1219 vtag = m0->m_pkthdr.ether_vlantag; 1220 1221 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) { 1222 if (m0->m_pkthdr.csum_flags & CSUM_IP) 1223 flags |= NFE_TX_IP_CSUM; 1224 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1225 flags |= NFE_TX_TCP_CSUM; 1226 } 1227 1228 /* 1229 * XXX urm. somebody is unaware of how hardware works. You 1230 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1231 * the ring until the entire chain is actually *VALID*. Otherwise 1232 * the hardware may encounter a partially initialized chain that 1233 * is marked as being ready to go when it in fact is not ready to 1234 * go. 1235 */ 1236 1237 for (i = 0; i < nsegs; i++) { 1238 j = (ring->cur + i) % sc->sc_tx_ring_count; 1239 data = &ring->data[j]; 1240 1241 if (sc->sc_caps & NFE_40BIT_ADDR) { 1242 desc64 = &ring->desc64[j]; 1243 desc64->physaddr[0] = 1244 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 1245 desc64->physaddr[1] = 1246 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 1247 desc64->length = htole16(segs[i].ds_len - 1); 1248 desc64->vtag = htole32(vtag); 1249 desc64->flags = htole16(flags); 1250 } else { 1251 desc32 = &ring->desc32[j]; 1252 desc32->physaddr = htole32(segs[i].ds_addr); 1253 desc32->length = htole16(segs[i].ds_len - 1); 1254 desc32->flags = htole16(flags); 1255 } 1256 1257 /* csum flags and vtag belong to the first fragment only */ 1258 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1259 vtag = 0; 1260 1261 ring->queued++; 1262 KKASSERT(ring->queued <= sc->sc_tx_ring_count); 1263 } 1264 1265 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1266 if (sc->sc_caps & NFE_40BIT_ADDR) { 1267 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1268 } else { 1269 if (sc->sc_caps & NFE_JUMBO_SUP) 1270 flags = NFE_TX_LASTFRAG_V2; 1271 else 1272 flags = NFE_TX_LASTFRAG_V1; 1273 desc32->flags |= htole16(flags); 1274 } 1275 1276 /* 1277 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1278 * whole mess until the first descriptor in the map is flagged. 1279 */ 1280 for (i = nsegs - 1; i >= 0; --i) { 1281 j = (ring->cur + i) % sc->sc_tx_ring_count; 1282 if (sc->sc_caps & NFE_40BIT_ADDR) { 1283 desc64 = &ring->desc64[j]; 1284 desc64->flags |= htole16(NFE_TX_VALID); 1285 } else { 1286 desc32 = &ring->desc32[j]; 1287 desc32->flags |= htole16(NFE_TX_VALID); 1288 } 1289 } 1290 ring->cur = (ring->cur + nsegs) % sc->sc_tx_ring_count; 1291 1292 /* Exchange DMA map */ 1293 data_map->map = data->map; 1294 data->map = map; 1295 data->m = m0; 1296 back: 1297 if (error) 1298 m_freem(m0); 1299 return error; 1300 } 1301 1302 static void 1303 nfe_start(struct ifnet *ifp) 1304 { 1305 struct nfe_softc *sc = ifp->if_softc; 1306 struct nfe_tx_ring *ring = &sc->txq; 1307 int count = 0, oactive = 0; 1308 struct mbuf *m0; 1309 1310 ASSERT_SERIALIZED(ifp->if_serializer); 1311 1312 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 1313 return; 1314 1315 for (;;) { 1316 int error; 1317 1318 if (sc->sc_tx_ring_count - ring->queued < 1319 sc->sc_tx_spare + NFE_NSEG_RSVD) { 1320 if (oactive) { 1321 ifp->if_flags |= IFF_OACTIVE; 1322 break; 1323 } 1324 1325 nfe_txeof(sc, 0); 1326 oactive = 1; 1327 continue; 1328 } 1329 1330 m0 = ifq_dequeue(&ifp->if_snd, NULL); 1331 if (m0 == NULL) 1332 break; 1333 1334 ETHER_BPF_MTAP(ifp, m0); 1335 1336 error = nfe_encap(sc, ring, m0); 1337 if (error) { 1338 ifp->if_oerrors++; 1339 if (error == EFBIG) { 1340 if (oactive) { 1341 ifp->if_flags |= IFF_OACTIVE; 1342 break; 1343 } 1344 nfe_txeof(sc, 0); 1345 oactive = 1; 1346 } 1347 continue; 1348 } else { 1349 oactive = 0; 1350 } 1351 ++count; 1352 1353 /* 1354 * NOTE: 1355 * `m0' may be freed in nfe_encap(), so 1356 * it should not be touched any more. 1357 */ 1358 } 1359 1360 if (count == 0) /* nothing sent */ 1361 return; 1362 1363 /* Kick Tx */ 1364 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1365 1366 /* 1367 * Set a timeout in case the chip goes out to lunch. 1368 */ 1369 ifp->if_timer = 5; 1370 } 1371 1372 static void 1373 nfe_watchdog(struct ifnet *ifp) 1374 { 1375 struct nfe_softc *sc = ifp->if_softc; 1376 1377 ASSERT_SERIALIZED(ifp->if_serializer); 1378 1379 if (ifp->if_flags & IFF_RUNNING) { 1380 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1381 nfe_txeof(sc, 1); 1382 return; 1383 } 1384 1385 if_printf(ifp, "watchdog timeout\n"); 1386 1387 nfe_init(ifp->if_softc); 1388 1389 ifp->if_oerrors++; 1390 } 1391 1392 static void 1393 nfe_init(void *xsc) 1394 { 1395 struct nfe_softc *sc = xsc; 1396 struct ifnet *ifp = &sc->arpcom.ac_if; 1397 uint32_t tmp; 1398 int error; 1399 1400 ASSERT_SERIALIZED(ifp->if_serializer); 1401 1402 nfe_stop(sc); 1403 1404 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 1405 nfe_mac_reset(sc); 1406 1407 /* 1408 * NOTE: 1409 * Switching between jumbo frames and normal frames should 1410 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring(). 1411 */ 1412 if (ifp->if_mtu > ETHERMTU) { 1413 sc->sc_flags |= NFE_F_USE_JUMBO; 1414 sc->rxq.bufsz = NFE_JBYTES; 1415 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO; 1416 if (bootverbose) 1417 if_printf(ifp, "use jumbo frames\n"); 1418 } else { 1419 sc->sc_flags &= ~NFE_F_USE_JUMBO; 1420 sc->rxq.bufsz = MCLBYTES; 1421 sc->sc_tx_spare = NFE_NSEG_SPARE; 1422 if (bootverbose) 1423 if_printf(ifp, "use non-jumbo frames\n"); 1424 } 1425 1426 error = nfe_init_tx_ring(sc, &sc->txq); 1427 if (error) { 1428 nfe_stop(sc); 1429 return; 1430 } 1431 1432 error = nfe_init_rx_ring(sc, &sc->rxq); 1433 if (error) { 1434 nfe_stop(sc); 1435 return; 1436 } 1437 1438 NFE_WRITE(sc, NFE_TX_POLL, 0); 1439 NFE_WRITE(sc, NFE_STATUS, 0); 1440 1441 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc; 1442 1443 if (ifp->if_capenable & IFCAP_RXCSUM) 1444 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1445 1446 /* 1447 * Although the adapter is capable of stripping VLAN tags from received 1448 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1449 * purpose. This will be done in software by our network stack. 1450 */ 1451 if (sc->sc_caps & NFE_HW_VLAN) 1452 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1453 1454 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1455 DELAY(10); 1456 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1457 1458 if (sc->sc_caps & NFE_HW_VLAN) 1459 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1460 1461 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1462 1463 /* set MAC address */ 1464 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1465 1466 /* tell MAC where rings are in memory */ 1467 if (sc->sc_caps & NFE_40BIT_ADDR) { 1468 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 1469 NFE_ADDR_HI(sc->rxq.physaddr)); 1470 } 1471 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr)); 1472 1473 if (sc->sc_caps & NFE_40BIT_ADDR) { 1474 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, 1475 NFE_ADDR_HI(sc->txq.physaddr)); 1476 } 1477 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 1478 1479 NFE_WRITE(sc, NFE_RING_SIZE, 1480 (sc->sc_rx_ring_count - 1) << 16 | 1481 (sc->sc_tx_ring_count - 1)); 1482 1483 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1484 1485 /* force MAC to wakeup */ 1486 tmp = NFE_READ(sc, NFE_PWR_STATE); 1487 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1488 DELAY(10); 1489 tmp = NFE_READ(sc, NFE_PWR_STATE); 1490 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1491 1492 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1493 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1494 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1495 1496 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1497 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1498 1499 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1500 1501 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1502 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1503 DELAY(10); 1504 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1505 1506 /* set Rx filter */ 1507 nfe_setmulti(sc); 1508 1509 nfe_ifmedia_upd(ifp); 1510 1511 /* enable Rx */ 1512 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1513 1514 /* enable Tx */ 1515 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1516 1517 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1518 1519 #ifdef DEVICE_POLLING 1520 if ((ifp->if_flags & IFF_POLLING)) 1521 nfe_disable_intrs(sc); 1522 else 1523 #endif 1524 nfe_enable_intrs(sc); 1525 1526 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1527 1528 ifp->if_flags |= IFF_RUNNING; 1529 ifp->if_flags &= ~IFF_OACTIVE; 1530 1531 /* 1532 * If we had stuff in the tx ring before its all cleaned out now 1533 * so we are not going to get an interrupt, jump-start any pending 1534 * output. 1535 */ 1536 if (!ifq_is_empty(&ifp->if_snd)) 1537 if_devstart(ifp); 1538 } 1539 1540 static void 1541 nfe_stop(struct nfe_softc *sc) 1542 { 1543 struct ifnet *ifp = &sc->arpcom.ac_if; 1544 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 1545 int i; 1546 1547 ASSERT_SERIALIZED(ifp->if_serializer); 1548 1549 callout_stop(&sc->sc_tick_ch); 1550 1551 ifp->if_timer = 0; 1552 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1553 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 1554 1555 #define WAITMAX 50000 1556 1557 /* 1558 * Abort Tx 1559 */ 1560 NFE_WRITE(sc, NFE_TX_CTL, 0); 1561 for (i = 0; i < WAITMAX; ++i) { 1562 DELAY(100); 1563 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0) 1564 break; 1565 } 1566 if (i == WAITMAX) 1567 if_printf(ifp, "can't stop TX\n"); 1568 DELAY(100); 1569 1570 /* 1571 * Disable Rx 1572 */ 1573 NFE_WRITE(sc, NFE_RX_CTL, 0); 1574 for (i = 0; i < WAITMAX; ++i) { 1575 DELAY(100); 1576 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0) 1577 break; 1578 } 1579 if (i == WAITMAX) 1580 if_printf(ifp, "can't stop RX\n"); 1581 DELAY(100); 1582 1583 #undef WAITMAX 1584 1585 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 1586 DELAY(10); 1587 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1588 1589 /* Disable interrupts */ 1590 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1591 1592 /* Reset Tx and Rx rings */ 1593 nfe_reset_tx_ring(sc, &sc->txq); 1594 nfe_reset_rx_ring(sc, &sc->rxq); 1595 } 1596 1597 static int 1598 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1599 { 1600 int i, j, error, descsize; 1601 bus_dmamem_t dmem; 1602 void **desc; 1603 1604 if (sc->sc_caps & NFE_40BIT_ADDR) { 1605 desc = (void **)&ring->desc64; 1606 descsize = sizeof(struct nfe_desc64); 1607 } else { 1608 desc = (void **)&ring->desc32; 1609 descsize = sizeof(struct nfe_desc32); 1610 } 1611 1612 ring->bufsz = MCLBYTES; 1613 ring->cur = ring->next = 0; 1614 1615 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1616 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1617 sc->sc_rx_ring_count * descsize, 1618 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1619 if (error) { 1620 if_printf(&sc->arpcom.ac_if, 1621 "could not create RX desc ring\n"); 1622 return error; 1623 } 1624 ring->tag = dmem.dmem_tag; 1625 ring->map = dmem.dmem_map; 1626 *desc = dmem.dmem_addr; 1627 ring->physaddr = dmem.dmem_busaddr; 1628 1629 if (sc->sc_caps & NFE_JUMBO_SUP) { 1630 ring->jbuf = 1631 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc), 1632 M_DEVBUF, M_WAITOK | M_ZERO); 1633 1634 error = nfe_jpool_alloc(sc, ring); 1635 if (error) { 1636 if_printf(&sc->arpcom.ac_if, 1637 "could not allocate jumbo frames\n"); 1638 kfree(ring->jbuf, M_DEVBUF); 1639 ring->jbuf = NULL; 1640 /* Allow jumbo frame allocation to fail */ 1641 } 1642 } 1643 1644 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count, 1645 M_DEVBUF, M_WAITOK | M_ZERO); 1646 1647 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1648 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1649 NULL, NULL, 1650 MCLBYTES, 1, MCLBYTES, 1651 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 1652 &ring->data_tag); 1653 if (error) { 1654 if_printf(&sc->arpcom.ac_if, 1655 "could not create RX mbuf DMA tag\n"); 1656 return error; 1657 } 1658 1659 /* Create a spare RX mbuf DMA map */ 1660 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1661 &ring->data_tmpmap); 1662 if (error) { 1663 if_printf(&sc->arpcom.ac_if, 1664 "could not create spare RX mbuf DMA map\n"); 1665 bus_dma_tag_destroy(ring->data_tag); 1666 ring->data_tag = NULL; 1667 return error; 1668 } 1669 1670 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1671 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1672 &ring->data[i].map); 1673 if (error) { 1674 if_printf(&sc->arpcom.ac_if, 1675 "could not create %dth RX mbuf DMA mapn", i); 1676 goto fail; 1677 } 1678 } 1679 return 0; 1680 fail: 1681 for (j = 0; j < i; ++j) 1682 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1683 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1684 bus_dma_tag_destroy(ring->data_tag); 1685 ring->data_tag = NULL; 1686 return error; 1687 } 1688 1689 static void 1690 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1691 { 1692 int i; 1693 1694 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1695 struct nfe_rx_data *data = &ring->data[i]; 1696 1697 if (data->m != NULL) { 1698 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0) 1699 bus_dmamap_unload(ring->data_tag, data->map); 1700 m_freem(data->m); 1701 data->m = NULL; 1702 } 1703 } 1704 1705 ring->cur = ring->next = 0; 1706 } 1707 1708 static int 1709 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1710 { 1711 int i; 1712 1713 for (i = 0; i < sc->sc_rx_ring_count; ++i) { 1714 int error; 1715 1716 /* XXX should use a function pointer */ 1717 if (sc->sc_flags & NFE_F_USE_JUMBO) 1718 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1719 else 1720 error = nfe_newbuf_std(sc, ring, i, 1); 1721 if (error) { 1722 if_printf(&sc->arpcom.ac_if, 1723 "could not allocate RX buffer\n"); 1724 return error; 1725 } 1726 nfe_set_ready_rxdesc(sc, ring, i); 1727 } 1728 return 0; 1729 } 1730 1731 static void 1732 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1733 { 1734 if (ring->data_tag != NULL) { 1735 struct nfe_rx_data *data; 1736 int i; 1737 1738 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1739 data = &ring->data[i]; 1740 1741 if (data->m != NULL) { 1742 bus_dmamap_unload(ring->data_tag, data->map); 1743 m_freem(data->m); 1744 } 1745 bus_dmamap_destroy(ring->data_tag, data->map); 1746 } 1747 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1748 bus_dma_tag_destroy(ring->data_tag); 1749 } 1750 1751 nfe_jpool_free(sc, ring); 1752 1753 if (ring->jbuf != NULL) 1754 kfree(ring->jbuf, M_DEVBUF); 1755 if (ring->data != NULL) 1756 kfree(ring->data, M_DEVBUF); 1757 1758 if (ring->tag != NULL) { 1759 void *desc; 1760 1761 if (sc->sc_caps & NFE_40BIT_ADDR) 1762 desc = ring->desc64; 1763 else 1764 desc = ring->desc32; 1765 1766 bus_dmamap_unload(ring->tag, ring->map); 1767 bus_dmamem_free(ring->tag, desc, ring->map); 1768 bus_dma_tag_destroy(ring->tag); 1769 } 1770 } 1771 1772 static struct nfe_jbuf * 1773 nfe_jalloc(struct nfe_softc *sc) 1774 { 1775 struct ifnet *ifp = &sc->arpcom.ac_if; 1776 struct nfe_jbuf *jbuf; 1777 1778 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1779 1780 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1781 if (jbuf != NULL) { 1782 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1783 jbuf->inuse = 1; 1784 } else { 1785 if_printf(ifp, "no free jumbo buffer\n"); 1786 } 1787 1788 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1789 1790 return jbuf; 1791 } 1792 1793 static void 1794 nfe_jfree(void *arg) 1795 { 1796 struct nfe_jbuf *jbuf = arg; 1797 struct nfe_softc *sc = jbuf->sc; 1798 struct nfe_rx_ring *ring = jbuf->ring; 1799 1800 if (&ring->jbuf[jbuf->slot] != jbuf) 1801 panic("%s: free wrong jumbo buffer\n", __func__); 1802 else if (jbuf->inuse == 0) 1803 panic("%s: jumbo buffer already freed\n", __func__); 1804 1805 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1806 atomic_subtract_int(&jbuf->inuse, 1); 1807 if (jbuf->inuse == 0) 1808 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1809 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1810 } 1811 1812 static void 1813 nfe_jref(void *arg) 1814 { 1815 struct nfe_jbuf *jbuf = arg; 1816 struct nfe_rx_ring *ring = jbuf->ring; 1817 1818 if (&ring->jbuf[jbuf->slot] != jbuf) 1819 panic("%s: ref wrong jumbo buffer\n", __func__); 1820 else if (jbuf->inuse == 0) 1821 panic("%s: jumbo buffer already freed\n", __func__); 1822 1823 atomic_add_int(&jbuf->inuse, 1); 1824 } 1825 1826 static int 1827 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1828 { 1829 struct nfe_jbuf *jbuf; 1830 bus_dmamem_t dmem; 1831 bus_addr_t physaddr; 1832 caddr_t buf; 1833 int i, error; 1834 1835 /* 1836 * Allocate a big chunk of DMA'able memory. 1837 */ 1838 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1839 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1840 NFE_JPOOL_SIZE(sc), 1841 BUS_DMA_WAITOK, &dmem); 1842 if (error) { 1843 if_printf(&sc->arpcom.ac_if, 1844 "could not create jumbo buffer\n"); 1845 return error; 1846 } 1847 ring->jtag = dmem.dmem_tag; 1848 ring->jmap = dmem.dmem_map; 1849 ring->jpool = dmem.dmem_addr; 1850 physaddr = dmem.dmem_busaddr; 1851 1852 /* ..and split it into 9KB chunks */ 1853 SLIST_INIT(&ring->jfreelist); 1854 1855 buf = ring->jpool; 1856 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) { 1857 jbuf = &ring->jbuf[i]; 1858 1859 jbuf->sc = sc; 1860 jbuf->ring = ring; 1861 jbuf->inuse = 0; 1862 jbuf->slot = i; 1863 jbuf->buf = buf; 1864 jbuf->physaddr = physaddr; 1865 1866 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1867 1868 buf += NFE_JBYTES; 1869 physaddr += NFE_JBYTES; 1870 } 1871 1872 return 0; 1873 } 1874 1875 static void 1876 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1877 { 1878 if (ring->jtag != NULL) { 1879 bus_dmamap_unload(ring->jtag, ring->jmap); 1880 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1881 bus_dma_tag_destroy(ring->jtag); 1882 } 1883 } 1884 1885 static int 1886 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1887 { 1888 int i, j, error, descsize; 1889 bus_dmamem_t dmem; 1890 void **desc; 1891 1892 if (sc->sc_caps & NFE_40BIT_ADDR) { 1893 desc = (void **)&ring->desc64; 1894 descsize = sizeof(struct nfe_desc64); 1895 } else { 1896 desc = (void **)&ring->desc32; 1897 descsize = sizeof(struct nfe_desc32); 1898 } 1899 1900 ring->queued = 0; 1901 ring->cur = ring->next = 0; 1902 1903 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1904 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1905 sc->sc_tx_ring_count * descsize, 1906 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1907 if (error) { 1908 if_printf(&sc->arpcom.ac_if, 1909 "could not create TX desc ring\n"); 1910 return error; 1911 } 1912 ring->tag = dmem.dmem_tag; 1913 ring->map = dmem.dmem_map; 1914 *desc = dmem.dmem_addr; 1915 ring->physaddr = dmem.dmem_busaddr; 1916 1917 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count, 1918 M_DEVBUF, M_WAITOK | M_ZERO); 1919 1920 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1921 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1922 NULL, NULL, 1923 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES, 1924 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1925 &ring->data_tag); 1926 if (error) { 1927 if_printf(&sc->arpcom.ac_if, 1928 "could not create TX buf DMA tag\n"); 1929 return error; 1930 } 1931 1932 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1933 error = bus_dmamap_create(ring->data_tag, 1934 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1935 &ring->data[i].map); 1936 if (error) { 1937 if_printf(&sc->arpcom.ac_if, 1938 "could not create %dth TX buf DMA map\n", i); 1939 goto fail; 1940 } 1941 } 1942 1943 return 0; 1944 fail: 1945 for (j = 0; j < i; ++j) 1946 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1947 bus_dma_tag_destroy(ring->data_tag); 1948 ring->data_tag = NULL; 1949 return error; 1950 } 1951 1952 static void 1953 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1954 { 1955 int i; 1956 1957 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1958 struct nfe_tx_data *data = &ring->data[i]; 1959 1960 if (sc->sc_caps & NFE_40BIT_ADDR) 1961 ring->desc64[i].flags = 0; 1962 else 1963 ring->desc32[i].flags = 0; 1964 1965 if (data->m != NULL) { 1966 bus_dmamap_unload(ring->data_tag, data->map); 1967 m_freem(data->m); 1968 data->m = NULL; 1969 } 1970 } 1971 1972 ring->queued = 0; 1973 ring->cur = ring->next = 0; 1974 } 1975 1976 static int 1977 nfe_init_tx_ring(struct nfe_softc *sc __unused, 1978 struct nfe_tx_ring *ring __unused) 1979 { 1980 return 0; 1981 } 1982 1983 static void 1984 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1985 { 1986 if (ring->data_tag != NULL) { 1987 struct nfe_tx_data *data; 1988 int i; 1989 1990 for (i = 0; i < sc->sc_tx_ring_count; ++i) { 1991 data = &ring->data[i]; 1992 1993 if (data->m != NULL) { 1994 bus_dmamap_unload(ring->data_tag, data->map); 1995 m_freem(data->m); 1996 } 1997 bus_dmamap_destroy(ring->data_tag, data->map); 1998 } 1999 2000 bus_dma_tag_destroy(ring->data_tag); 2001 } 2002 2003 if (ring->data != NULL) 2004 kfree(ring->data, M_DEVBUF); 2005 2006 if (ring->tag != NULL) { 2007 void *desc; 2008 2009 if (sc->sc_caps & NFE_40BIT_ADDR) 2010 desc = ring->desc64; 2011 else 2012 desc = ring->desc32; 2013 2014 bus_dmamap_unload(ring->tag, ring->map); 2015 bus_dmamem_free(ring->tag, desc, ring->map); 2016 bus_dma_tag_destroy(ring->tag); 2017 } 2018 } 2019 2020 static int 2021 nfe_ifmedia_upd(struct ifnet *ifp) 2022 { 2023 struct nfe_softc *sc = ifp->if_softc; 2024 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2025 2026 ASSERT_SERIALIZED(ifp->if_serializer); 2027 2028 if (mii->mii_instance != 0) { 2029 struct mii_softc *miisc; 2030 2031 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2032 mii_phy_reset(miisc); 2033 } 2034 mii_mediachg(mii); 2035 2036 return 0; 2037 } 2038 2039 static void 2040 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2041 { 2042 struct nfe_softc *sc = ifp->if_softc; 2043 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2044 2045 ASSERT_SERIALIZED(ifp->if_serializer); 2046 2047 mii_pollstat(mii); 2048 ifmr->ifm_status = mii->mii_media_status; 2049 ifmr->ifm_active = mii->mii_media_active; 2050 } 2051 2052 static void 2053 nfe_setmulti(struct nfe_softc *sc) 2054 { 2055 struct ifnet *ifp = &sc->arpcom.ac_if; 2056 struct ifmultiaddr *ifma; 2057 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2058 uint32_t filter = NFE_RXFILTER_MAGIC; 2059 int i; 2060 2061 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2062 bzero(addr, ETHER_ADDR_LEN); 2063 bzero(mask, ETHER_ADDR_LEN); 2064 goto done; 2065 } 2066 2067 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2068 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2069 2070 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2071 caddr_t maddr; 2072 2073 if (ifma->ifma_addr->sa_family != AF_LINK) 2074 continue; 2075 2076 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 2077 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2078 addr[i] &= maddr[i]; 2079 mask[i] &= ~maddr[i]; 2080 } 2081 } 2082 2083 for (i = 0; i < ETHER_ADDR_LEN; i++) 2084 mask[i] |= addr[i]; 2085 2086 done: 2087 addr[0] |= 0x01; /* make sure multicast bit is set */ 2088 2089 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2090 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2091 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2092 addr[5] << 8 | addr[4]); 2093 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2094 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2095 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2096 mask[5] << 8 | mask[4]); 2097 2098 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 2099 NFE_WRITE(sc, NFE_RXFILTER, filter); 2100 } 2101 2102 static void 2103 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2104 { 2105 uint32_t lo, hi; 2106 2107 lo = NFE_READ(sc, NFE_MACADDR_LO); 2108 hi = NFE_READ(sc, NFE_MACADDR_HI); 2109 if (sc->sc_caps & NFE_FIX_EADDR) { 2110 addr[0] = (lo >> 8) & 0xff; 2111 addr[1] = (lo & 0xff); 2112 2113 addr[2] = (hi >> 24) & 0xff; 2114 addr[3] = (hi >> 16) & 0xff; 2115 addr[4] = (hi >> 8) & 0xff; 2116 addr[5] = (hi & 0xff); 2117 } else { 2118 addr[0] = (hi & 0xff); 2119 addr[1] = (hi >> 8) & 0xff; 2120 addr[2] = (hi >> 16) & 0xff; 2121 addr[3] = (hi >> 24) & 0xff; 2122 2123 addr[4] = (lo & 0xff); 2124 addr[5] = (lo >> 8) & 0xff; 2125 } 2126 } 2127 2128 static void 2129 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 2130 { 2131 NFE_WRITE(sc, NFE_MACADDR_LO, 2132 addr[5] << 8 | addr[4]); 2133 NFE_WRITE(sc, NFE_MACADDR_HI, 2134 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2135 } 2136 2137 static void 2138 nfe_tick(void *arg) 2139 { 2140 struct nfe_softc *sc = arg; 2141 struct ifnet *ifp = &sc->arpcom.ac_if; 2142 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2143 2144 lwkt_serialize_enter(ifp->if_serializer); 2145 2146 mii_tick(mii); 2147 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 2148 2149 lwkt_serialize_exit(ifp->if_serializer); 2150 } 2151 2152 static int 2153 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2154 int wait) 2155 { 2156 struct nfe_rx_data *data = &ring->data[idx]; 2157 bus_dma_segment_t seg; 2158 bus_dmamap_t map; 2159 struct mbuf *m; 2160 int nsegs, error; 2161 2162 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2163 if (m == NULL) 2164 return ENOBUFS; 2165 m->m_len = m->m_pkthdr.len = MCLBYTES; 2166 2167 error = bus_dmamap_load_mbuf_segment(ring->data_tag, ring->data_tmpmap, 2168 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 2169 if (error) { 2170 m_freem(m); 2171 if (wait) { 2172 if_printf(&sc->arpcom.ac_if, 2173 "could map RX mbuf %d\n", error); 2174 } 2175 return error; 2176 } 2177 2178 if (data->m != NULL) { 2179 /* Sync and unload originally mapped mbuf */ 2180 bus_dmamap_sync(ring->data_tag, data->map, 2181 BUS_DMASYNC_POSTREAD); 2182 bus_dmamap_unload(ring->data_tag, data->map); 2183 } 2184 2185 /* Swap this DMA map with tmp DMA map */ 2186 map = data->map; 2187 data->map = ring->data_tmpmap; 2188 ring->data_tmpmap = map; 2189 2190 /* Caller is assumed to have collected the old mbuf */ 2191 data->m = m; 2192 2193 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2194 return 0; 2195 } 2196 2197 static int 2198 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2199 int wait) 2200 { 2201 struct nfe_rx_data *data = &ring->data[idx]; 2202 struct nfe_jbuf *jbuf; 2203 struct mbuf *m; 2204 2205 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2206 if (m == NULL) 2207 return ENOBUFS; 2208 2209 jbuf = nfe_jalloc(sc); 2210 if (jbuf == NULL) { 2211 m_freem(m); 2212 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2213 "-- packet dropped!\n"); 2214 return ENOBUFS; 2215 } 2216 2217 m->m_ext.ext_arg = jbuf; 2218 m->m_ext.ext_buf = jbuf->buf; 2219 m->m_ext.ext_free = nfe_jfree; 2220 m->m_ext.ext_ref = nfe_jref; 2221 m->m_ext.ext_size = NFE_JBYTES; 2222 2223 m->m_data = m->m_ext.ext_buf; 2224 m->m_flags |= M_EXT; 2225 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2226 2227 /* Caller is assumed to have collected the old mbuf */ 2228 data->m = m; 2229 2230 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2231 return 0; 2232 } 2233 2234 static void 2235 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2236 bus_addr_t physaddr) 2237 { 2238 if (sc->sc_caps & NFE_40BIT_ADDR) { 2239 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2240 2241 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr)); 2242 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr)); 2243 } else { 2244 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2245 2246 desc32->physaddr = htole32(physaddr); 2247 } 2248 } 2249 2250 static void 2251 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2252 { 2253 if (sc->sc_caps & NFE_40BIT_ADDR) { 2254 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2255 2256 desc64->length = htole16(ring->bufsz); 2257 desc64->flags = htole16(NFE_RX_READY); 2258 } else { 2259 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2260 2261 desc32->length = htole16(ring->bufsz); 2262 desc32->flags = htole16(NFE_RX_READY); 2263 } 2264 } 2265 2266 static int 2267 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS) 2268 { 2269 struct nfe_softc *sc = arg1; 2270 struct ifnet *ifp = &sc->arpcom.ac_if; 2271 uint32_t flags; 2272 int error, v; 2273 2274 lwkt_serialize_enter(ifp->if_serializer); 2275 2276 flags = sc->sc_flags & ~NFE_F_DYN_IM; 2277 v = sc->sc_imtime; 2278 if (sc->sc_flags & NFE_F_DYN_IM) 2279 v = -v; 2280 2281 error = sysctl_handle_int(oidp, &v, 0, req); 2282 if (error || req->newptr == NULL) 2283 goto back; 2284 2285 if (v < 0) { 2286 flags |= NFE_F_DYN_IM; 2287 v = -v; 2288 } 2289 2290 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) { 2291 int old_imtime = sc->sc_imtime; 2292 uint32_t old_flags = sc->sc_flags; 2293 2294 sc->sc_imtime = v; 2295 sc->sc_flags = flags; 2296 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 2297 2298 if ((ifp->if_flags & (IFF_POLLING | IFF_RUNNING)) 2299 == IFF_RUNNING) { 2300 if (old_imtime * sc->sc_imtime == 0 || 2301 (old_flags ^ sc->sc_flags)) { 2302 ifp->if_init(sc); 2303 } else { 2304 NFE_WRITE(sc, NFE_IMTIMER, 2305 NFE_IMTIME(sc->sc_imtime)); 2306 } 2307 } 2308 } 2309 back: 2310 lwkt_serialize_exit(ifp->if_serializer); 2311 return error; 2312 } 2313 2314 static void 2315 nfe_powerup(device_t dev) 2316 { 2317 struct nfe_softc *sc = device_get_softc(dev); 2318 uint32_t pwr_state; 2319 uint16_t did; 2320 2321 /* 2322 * Bring MAC and PHY out of low power state 2323 */ 2324 2325 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK; 2326 2327 did = pci_get_device(dev); 2328 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 || 2329 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) && 2330 pci_get_revid(dev) >= 0xa3) 2331 pwr_state |= NFE_PWRUP_REV_A3; 2332 2333 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state); 2334 } 2335 2336 static void 2337 nfe_mac_reset(struct nfe_softc *sc) 2338 { 2339 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 2340 uint32_t macaddr_hi, macaddr_lo, tx_poll; 2341 2342 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 2343 2344 /* Save several registers for later restoration */ 2345 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI); 2346 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO); 2347 tx_poll = NFE_READ(sc, NFE_TX_POLL); 2348 2349 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT); 2350 DELAY(100); 2351 2352 NFE_WRITE(sc, NFE_MAC_RESET, 0); 2353 DELAY(100); 2354 2355 /* Restore saved registers */ 2356 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi); 2357 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo); 2358 NFE_WRITE(sc, NFE_TX_POLL, tx_poll); 2359 2360 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 2361 } 2362 2363 static void 2364 nfe_enable_intrs(struct nfe_softc *sc) 2365 { 2366 /* 2367 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER. 2368 * It is unclear how wide the timer is. Base programming does 2369 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so 2370 * we don't get any interrupt moderation. TX moderation is 2371 * possible by using the timer interrupt instead of TX_DONE. 2372 * 2373 * It is unclear whether there are other bits that can be 2374 * set to make the NFE device actually do interrupt moderation 2375 * on the RX side. 2376 * 2377 * For now set a 128uS interval as a placemark, but don't use 2378 * the timer. 2379 */ 2380 if (sc->sc_imtime == 0) 2381 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT); 2382 else 2383 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime)); 2384 2385 /* Enable interrupts */ 2386 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable); 2387 2388 if (sc->sc_irq_enable & NFE_IRQ_TIMER) 2389 sc->sc_flags |= NFE_F_IRQ_TIMER; 2390 else 2391 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2392 } 2393 2394 static void 2395 nfe_disable_intrs(struct nfe_softc *sc) 2396 { 2397 /* Disable interrupts */ 2398 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 2399 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2400 } 2401