1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.46 2008/10/28 07:30:49 sephe Exp $ */ 3 4 /* 5 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Sepherosa Ziehau <sepherosa@gmail.com> and 9 * Matthew Dillon <dillon@apollo.backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 42 * 43 * Permission to use, copy, modify, and distribute this software for any 44 * purpose with or without fee is hereby granted, provided that the above 45 * copyright notice and this permission notice appear in all copies. 46 * 47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 54 */ 55 56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 57 58 #include "opt_polling.h" 59 60 #include <sys/param.h> 61 #include <sys/endian.h> 62 #include <sys/kernel.h> 63 #include <sys/bus.h> 64 #include <sys/interrupt.h> 65 #include <sys/proc.h> 66 #include <sys/rman.h> 67 #include <sys/serialize.h> 68 #include <sys/socket.h> 69 #include <sys/sockio.h> 70 #include <sys/sysctl.h> 71 72 #include <net/ethernet.h> 73 #include <net/if.h> 74 #include <net/bpf.h> 75 #include <net/if_arp.h> 76 #include <net/if_dl.h> 77 #include <net/if_media.h> 78 #include <net/ifq_var.h> 79 #include <net/if_types.h> 80 #include <net/if_var.h> 81 #include <net/vlan/if_vlan_var.h> 82 #include <net/vlan/if_vlan_ether.h> 83 84 #include <bus/pci/pcireg.h> 85 #include <bus/pci/pcivar.h> 86 #include <bus/pci/pcidevs.h> 87 88 #include <dev/netif/mii_layer/mii.h> 89 #include <dev/netif/mii_layer/miivar.h> 90 91 #include "miibus_if.h" 92 93 #include <dev/netif/nfe/if_nfereg.h> 94 #include <dev/netif/nfe/if_nfevar.h> 95 96 #define NFE_CSUM 97 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 98 99 static int nfe_probe(device_t); 100 static int nfe_attach(device_t); 101 static int nfe_detach(device_t); 102 static void nfe_shutdown(device_t); 103 static int nfe_resume(device_t); 104 static int nfe_suspend(device_t); 105 106 static int nfe_miibus_readreg(device_t, int, int); 107 static void nfe_miibus_writereg(device_t, int, int, int); 108 static void nfe_miibus_statchg(device_t); 109 110 #ifdef DEVICE_POLLING 111 static void nfe_poll(struct ifnet *, enum poll_cmd, int); 112 #endif 113 static void nfe_intr(void *); 114 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 115 static int nfe_rxeof(struct nfe_softc *); 116 static int nfe_txeof(struct nfe_softc *, int); 117 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 118 struct mbuf *); 119 static void nfe_start(struct ifnet *); 120 static void nfe_watchdog(struct ifnet *); 121 static void nfe_init(void *); 122 static void nfe_stop(struct nfe_softc *); 123 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 124 static void nfe_jfree(void *); 125 static void nfe_jref(void *); 126 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 127 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 128 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 129 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 130 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 131 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 132 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 133 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 134 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 135 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 136 static int nfe_ifmedia_upd(struct ifnet *); 137 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 138 static void nfe_setmulti(struct nfe_softc *); 139 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 140 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 141 static void nfe_powerup(device_t); 142 static void nfe_mac_reset(struct nfe_softc *); 143 static void nfe_tick(void *); 144 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 145 int, bus_addr_t); 146 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 147 int); 148 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 149 int); 150 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 151 int); 152 static void nfe_enable_intrs(struct nfe_softc *); 153 static void nfe_disable_intrs(struct nfe_softc *); 154 155 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS); 156 157 #define NFE_DEBUG 158 #ifdef NFE_DEBUG 159 160 static int nfe_debug = 0; 161 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT; 162 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT; 163 /* 164 * hw timer simulated interrupt moderation @4000Hz. Negative values 165 * disable the timer when the discrete interrupt rate falls below 166 * the moderation rate. 167 * 168 * XXX 8000Hz might be better but if the interrupt is shared it can 169 * blow out the cpu. 170 */ 171 static int nfe_imtime = -250; /* uS */ 172 173 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count); 174 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count); 175 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime); 176 TUNABLE_INT("hw.nfe.debug", &nfe_debug); 177 178 #define DPRINTF(sc, fmt, ...) do { \ 179 if ((sc)->sc_debug) { \ 180 if_printf(&(sc)->arpcom.ac_if, \ 181 fmt, __VA_ARGS__); \ 182 } \ 183 } while (0) 184 185 #define DPRINTFN(sc, lv, fmt, ...) do { \ 186 if ((sc)->sc_debug >= (lv)) { \ 187 if_printf(&(sc)->arpcom.ac_if, \ 188 fmt, __VA_ARGS__); \ 189 } \ 190 } while (0) 191 192 #else /* !NFE_DEBUG */ 193 194 #define DPRINTF(sc, fmt, ...) 195 #define DPRINTFN(sc, lv, fmt, ...) 196 197 #endif /* NFE_DEBUG */ 198 199 static const struct nfe_dev { 200 uint16_t vid; 201 uint16_t did; 202 const char *desc; 203 } nfe_devices[] = { 204 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 205 "NVIDIA nForce Fast Ethernet" }, 206 207 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 208 "NVIDIA nForce2 Fast Ethernet" }, 209 210 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 211 "NVIDIA nForce3 Gigabit Ethernet" }, 212 213 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 214 chipset, and possibly also the 400R; it might be both nForce2- and 215 nForce3-based boards can use the same MCPs (= southbridges) */ 216 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 217 "NVIDIA nForce3 Gigabit Ethernet" }, 218 219 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 220 "NVIDIA nForce3 Gigabit Ethernet" }, 221 222 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 223 "NVIDIA nForce3 Gigabit Ethernet" }, 224 225 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 226 "NVIDIA nForce3 Gigabit Ethernet" }, 227 228 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 229 "NVIDIA CK804 Gigabit Ethernet" }, 230 231 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 232 "NVIDIA CK804 Gigabit Ethernet" }, 233 234 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 235 "NVIDIA MCP04 Gigabit Ethernet" }, 236 237 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 238 "NVIDIA MCP04 Gigabit Ethernet" }, 239 240 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 241 "NVIDIA MCP51 Gigabit Ethernet" }, 242 243 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 244 "NVIDIA MCP51 Gigabit Ethernet" }, 245 246 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 247 "NVIDIA MCP55 Gigabit Ethernet" }, 248 249 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 250 "NVIDIA MCP55 Gigabit Ethernet" }, 251 252 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 253 "NVIDIA MCP61 Gigabit Ethernet" }, 254 255 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 256 "NVIDIA MCP61 Gigabit Ethernet" }, 257 258 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 259 "NVIDIA MCP61 Gigabit Ethernet" }, 260 261 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 262 "NVIDIA MCP61 Gigabit Ethernet" }, 263 264 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 265 "NVIDIA MCP65 Gigabit Ethernet" }, 266 267 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 268 "NVIDIA MCP65 Gigabit Ethernet" }, 269 270 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 271 "NVIDIA MCP65 Gigabit Ethernet" }, 272 273 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 274 "NVIDIA MCP65 Gigabit Ethernet" }, 275 276 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 277 "NVIDIA MCP67 Gigabit Ethernet" }, 278 279 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 280 "NVIDIA MCP67 Gigabit Ethernet" }, 281 282 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 283 "NVIDIA MCP67 Gigabit Ethernet" }, 284 285 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 286 "NVIDIA MCP67 Gigabit Ethernet" }, 287 288 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 289 "NVIDIA MCP73 Gigabit Ethernet" }, 290 291 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 292 "NVIDIA MCP73 Gigabit Ethernet" }, 293 294 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 295 "NVIDIA MCP73 Gigabit Ethernet" }, 296 297 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 298 "NVIDIA MCP73 Gigabit Ethernet" }, 299 300 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 301 "NVIDIA MCP77 Gigabit Ethernet" }, 302 303 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 304 "NVIDIA MCP77 Gigabit Ethernet" }, 305 306 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 307 "NVIDIA MCP77 Gigabit Ethernet" }, 308 309 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 310 "NVIDIA MCP77 Gigabit Ethernet" }, 311 312 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 313 "NVIDIA MCP79 Gigabit Ethernet" }, 314 315 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 316 "NVIDIA MCP79 Gigabit Ethernet" }, 317 318 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 319 "NVIDIA MCP79 Gigabit Ethernet" }, 320 321 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 322 "NVIDIA MCP79 Gigabit Ethernet" }, 323 324 { 0, 0, NULL } 325 }; 326 327 static device_method_t nfe_methods[] = { 328 /* Device interface */ 329 DEVMETHOD(device_probe, nfe_probe), 330 DEVMETHOD(device_attach, nfe_attach), 331 DEVMETHOD(device_detach, nfe_detach), 332 DEVMETHOD(device_suspend, nfe_suspend), 333 DEVMETHOD(device_resume, nfe_resume), 334 DEVMETHOD(device_shutdown, nfe_shutdown), 335 336 /* Bus interface */ 337 DEVMETHOD(bus_print_child, bus_generic_print_child), 338 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 339 340 /* MII interface */ 341 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 342 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 343 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 344 345 { 0, 0 } 346 }; 347 348 static driver_t nfe_driver = { 349 "nfe", 350 nfe_methods, 351 sizeof(struct nfe_softc) 352 }; 353 354 static devclass_t nfe_devclass; 355 356 DECLARE_DUMMY_MODULE(if_nfe); 357 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 358 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0); 359 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 360 361 static int 362 nfe_probe(device_t dev) 363 { 364 const struct nfe_dev *n; 365 uint16_t vid, did; 366 367 vid = pci_get_vendor(dev); 368 did = pci_get_device(dev); 369 for (n = nfe_devices; n->desc != NULL; ++n) { 370 if (vid == n->vid && did == n->did) { 371 struct nfe_softc *sc = device_get_softc(dev); 372 373 switch (did) { 374 case PCI_PRODUCT_NVIDIA_NFORCE_LAN: 375 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN: 376 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1: 377 sc->sc_caps = NFE_NO_PWRCTL | 378 NFE_FIX_EADDR; 379 break; 380 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 381 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 382 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 383 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 384 sc->sc_caps = NFE_JUMBO_SUP | 385 NFE_HW_CSUM | 386 NFE_NO_PWRCTL | 387 NFE_FIX_EADDR; 388 break; 389 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 390 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 391 sc->sc_caps = NFE_FIX_EADDR; 392 /* FALL THROUGH */ 393 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 394 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 395 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 396 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 397 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 398 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 399 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 400 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 401 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 402 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 403 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 404 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 405 sc->sc_caps |= NFE_40BIT_ADDR; 406 break; 407 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 408 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 409 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 410 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 411 sc->sc_caps = NFE_JUMBO_SUP | 412 NFE_40BIT_ADDR | 413 NFE_HW_CSUM | 414 NFE_NO_PWRCTL | 415 NFE_FIX_EADDR; 416 break; 417 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 418 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 419 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 420 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 421 sc->sc_caps = NFE_JUMBO_SUP | 422 NFE_40BIT_ADDR; 423 break; 424 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 425 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 426 sc->sc_caps = NFE_JUMBO_SUP | 427 NFE_40BIT_ADDR | 428 NFE_HW_CSUM | 429 NFE_HW_VLAN | 430 NFE_FIX_EADDR; 431 break; 432 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 433 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 434 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 435 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 436 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 437 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 438 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 439 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 440 sc->sc_caps = NFE_40BIT_ADDR | 441 NFE_HW_CSUM; 442 break; 443 } 444 445 device_set_desc(dev, n->desc); 446 device_set_async_attach(dev, TRUE); 447 return 0; 448 } 449 } 450 return ENXIO; 451 } 452 453 static int 454 nfe_attach(device_t dev) 455 { 456 struct nfe_softc *sc = device_get_softc(dev); 457 struct ifnet *ifp = &sc->arpcom.ac_if; 458 uint8_t eaddr[ETHER_ADDR_LEN]; 459 bus_addr_t lowaddr; 460 int error; 461 462 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 463 lwkt_serialize_init(&sc->sc_jbuf_serializer); 464 465 /* 466 * Initialize sysctl variables 467 */ 468 sc->sc_rx_ring_count = nfe_rx_ring_count; 469 sc->sc_tx_ring_count = nfe_tx_ring_count; 470 sc->sc_debug = nfe_debug; 471 if (nfe_imtime < 0) { 472 sc->sc_flags |= NFE_F_DYN_IM; 473 sc->sc_imtime = -nfe_imtime; 474 } else { 475 sc->sc_imtime = nfe_imtime; 476 } 477 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 478 479 sc->sc_mem_rid = PCIR_BAR(0); 480 481 if (sc->sc_caps & NFE_40BIT_ADDR) 482 sc->rxtxctl_desc = NFE_RXTX_DESC_V3; 483 else if (sc->sc_caps & NFE_JUMBO_SUP) 484 sc->rxtxctl_desc = NFE_RXTX_DESC_V2; 485 486 #ifndef BURN_BRIDGES 487 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 488 uint32_t mem, irq; 489 490 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 491 irq = pci_read_config(dev, PCIR_INTLINE, 4); 492 493 device_printf(dev, "chip is in D%d power mode " 494 "-- setting to D0\n", pci_get_powerstate(dev)); 495 496 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 497 498 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 499 pci_write_config(dev, PCIR_INTLINE, irq, 4); 500 } 501 #endif /* !BURN_BRIDGE */ 502 503 /* Enable bus mastering */ 504 pci_enable_busmaster(dev); 505 506 /* Allocate IO memory */ 507 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 508 &sc->sc_mem_rid, RF_ACTIVE); 509 if (sc->sc_mem_res == NULL) { 510 device_printf(dev, "could not allocate io memory\n"); 511 return ENXIO; 512 } 513 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 514 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 515 516 /* Allocate IRQ */ 517 sc->sc_irq_rid = 0; 518 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 519 &sc->sc_irq_rid, 520 RF_SHAREABLE | RF_ACTIVE); 521 if (sc->sc_irq_res == NULL) { 522 device_printf(dev, "could not allocate irq\n"); 523 error = ENXIO; 524 goto fail; 525 } 526 527 /* Disable WOL */ 528 NFE_WRITE(sc, NFE_WOL_CTL, 0); 529 530 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 531 nfe_powerup(dev); 532 533 nfe_get_macaddr(sc, eaddr); 534 535 /* 536 * Allocate top level DMA tag 537 */ 538 if (sc->sc_caps & NFE_40BIT_ADDR) 539 lowaddr = NFE_BUS_SPACE_MAXADDR; 540 else 541 lowaddr = BUS_SPACE_MAXADDR_32BIT; 542 error = bus_dma_tag_create(NULL, /* parent */ 543 1, 0, /* alignment, boundary */ 544 lowaddr, /* lowaddr */ 545 BUS_SPACE_MAXADDR, /* highaddr */ 546 NULL, NULL, /* filter, filterarg */ 547 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 548 0, /* nsegments */ 549 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 550 0, /* flags */ 551 &sc->sc_dtag); 552 if (error) { 553 device_printf(dev, "could not allocate parent dma tag\n"); 554 goto fail; 555 } 556 557 /* 558 * Allocate Tx and Rx rings. 559 */ 560 error = nfe_alloc_tx_ring(sc, &sc->txq); 561 if (error) { 562 device_printf(dev, "could not allocate Tx ring\n"); 563 goto fail; 564 } 565 566 error = nfe_alloc_rx_ring(sc, &sc->rxq); 567 if (error) { 568 device_printf(dev, "could not allocate Rx ring\n"); 569 goto fail; 570 } 571 572 /* 573 * Create sysctl tree 574 */ 575 sysctl_ctx_init(&sc->sc_sysctl_ctx); 576 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 577 SYSCTL_STATIC_CHILDREN(_hw), 578 OID_AUTO, 579 device_get_nameunit(dev), 580 CTLFLAG_RD, 0, ""); 581 if (sc->sc_sysctl_tree == NULL) { 582 device_printf(dev, "can't add sysctl node\n"); 583 error = ENXIO; 584 goto fail; 585 } 586 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 587 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 588 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW, 589 sc, 0, nfe_sysctl_imtime, "I", 590 "Interrupt moderation time (usec). " 591 "0 to disable interrupt moderation."); 592 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 593 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 594 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count, 595 0, "RX ring count"); 596 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 597 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 598 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count, 599 0, "TX ring count"); 600 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 601 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 602 "debug", CTLFLAG_RW, &sc->sc_debug, 603 0, "control debugging printfs"); 604 605 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 606 nfe_ifmedia_sts); 607 if (error) { 608 device_printf(dev, "MII without any phy\n"); 609 goto fail; 610 } 611 612 ifp->if_softc = sc; 613 ifp->if_mtu = ETHERMTU; 614 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 615 ifp->if_ioctl = nfe_ioctl; 616 ifp->if_start = nfe_start; 617 #ifdef DEVICE_POLLING 618 ifp->if_poll = nfe_poll; 619 #endif 620 ifp->if_watchdog = nfe_watchdog; 621 ifp->if_init = nfe_init; 622 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count); 623 ifq_set_ready(&ifp->if_snd); 624 625 ifp->if_capabilities = IFCAP_VLAN_MTU; 626 627 if (sc->sc_caps & NFE_HW_VLAN) 628 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 629 630 #ifdef NFE_CSUM 631 if (sc->sc_caps & NFE_HW_CSUM) { 632 ifp->if_capabilities |= IFCAP_HWCSUM; 633 ifp->if_hwassist = NFE_CSUM_FEATURES; 634 } 635 #else 636 sc->sc_caps &= ~NFE_HW_CSUM; 637 #endif 638 ifp->if_capenable = ifp->if_capabilities; 639 640 callout_init(&sc->sc_tick_ch); 641 642 ether_ifattach(ifp, eaddr, NULL); 643 644 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 645 &sc->sc_ih, ifp->if_serializer); 646 if (error) { 647 device_printf(dev, "could not setup intr\n"); 648 ether_ifdetach(ifp); 649 goto fail; 650 } 651 652 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq_res)); 653 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 654 655 return 0; 656 fail: 657 nfe_detach(dev); 658 return error; 659 } 660 661 static int 662 nfe_detach(device_t dev) 663 { 664 struct nfe_softc *sc = device_get_softc(dev); 665 666 if (device_is_attached(dev)) { 667 struct ifnet *ifp = &sc->arpcom.ac_if; 668 669 lwkt_serialize_enter(ifp->if_serializer); 670 nfe_stop(sc); 671 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 672 lwkt_serialize_exit(ifp->if_serializer); 673 674 ether_ifdetach(ifp); 675 } 676 677 if (sc->sc_miibus != NULL) 678 device_delete_child(dev, sc->sc_miibus); 679 bus_generic_detach(dev); 680 681 if (sc->sc_sysctl_tree != NULL) 682 sysctl_ctx_free(&sc->sc_sysctl_ctx); 683 684 if (sc->sc_irq_res != NULL) { 685 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 686 sc->sc_irq_res); 687 } 688 689 if (sc->sc_mem_res != NULL) { 690 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 691 sc->sc_mem_res); 692 } 693 694 nfe_free_tx_ring(sc, &sc->txq); 695 nfe_free_rx_ring(sc, &sc->rxq); 696 if (sc->sc_dtag != NULL) 697 bus_dma_tag_destroy(sc->sc_dtag); 698 699 return 0; 700 } 701 702 static void 703 nfe_shutdown(device_t dev) 704 { 705 struct nfe_softc *sc = device_get_softc(dev); 706 struct ifnet *ifp = &sc->arpcom.ac_if; 707 708 lwkt_serialize_enter(ifp->if_serializer); 709 nfe_stop(sc); 710 lwkt_serialize_exit(ifp->if_serializer); 711 } 712 713 static int 714 nfe_suspend(device_t dev) 715 { 716 struct nfe_softc *sc = device_get_softc(dev); 717 struct ifnet *ifp = &sc->arpcom.ac_if; 718 719 lwkt_serialize_enter(ifp->if_serializer); 720 nfe_stop(sc); 721 lwkt_serialize_exit(ifp->if_serializer); 722 723 return 0; 724 } 725 726 static int 727 nfe_resume(device_t dev) 728 { 729 struct nfe_softc *sc = device_get_softc(dev); 730 struct ifnet *ifp = &sc->arpcom.ac_if; 731 732 lwkt_serialize_enter(ifp->if_serializer); 733 if (ifp->if_flags & IFF_UP) 734 nfe_init(sc); 735 lwkt_serialize_exit(ifp->if_serializer); 736 737 return 0; 738 } 739 740 static void 741 nfe_miibus_statchg(device_t dev) 742 { 743 struct nfe_softc *sc = device_get_softc(dev); 744 struct mii_data *mii = device_get_softc(sc->sc_miibus); 745 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 746 747 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer); 748 749 phy = NFE_READ(sc, NFE_PHY_IFACE); 750 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 751 752 seed = NFE_READ(sc, NFE_RNDSEED); 753 seed &= ~NFE_SEED_MASK; 754 755 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 756 phy |= NFE_PHY_HDX; /* half-duplex */ 757 misc |= NFE_MISC1_HDX; 758 } 759 760 switch (IFM_SUBTYPE(mii->mii_media_active)) { 761 case IFM_1000_T: /* full-duplex only */ 762 link |= NFE_MEDIA_1000T; 763 seed |= NFE_SEED_1000T; 764 phy |= NFE_PHY_1000T; 765 break; 766 case IFM_100_TX: 767 link |= NFE_MEDIA_100TX; 768 seed |= NFE_SEED_100TX; 769 phy |= NFE_PHY_100TX; 770 break; 771 case IFM_10_T: 772 link |= NFE_MEDIA_10T; 773 seed |= NFE_SEED_10T; 774 break; 775 } 776 777 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 778 779 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 780 NFE_WRITE(sc, NFE_MISC1, misc); 781 NFE_WRITE(sc, NFE_LINKSPEED, link); 782 } 783 784 static int 785 nfe_miibus_readreg(device_t dev, int phy, int reg) 786 { 787 struct nfe_softc *sc = device_get_softc(dev); 788 uint32_t val; 789 int ntries; 790 791 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 792 793 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 794 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 795 DELAY(100); 796 } 797 798 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 799 800 for (ntries = 0; ntries < 1000; ntries++) { 801 DELAY(100); 802 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 803 break; 804 } 805 if (ntries == 1000) { 806 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 807 return 0; 808 } 809 810 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 811 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 812 return 0; 813 } 814 815 val = NFE_READ(sc, NFE_PHY_DATA); 816 if (val != 0xffffffff && val != 0) 817 sc->mii_phyaddr = phy; 818 819 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 820 821 return val; 822 } 823 824 static void 825 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 826 { 827 struct nfe_softc *sc = device_get_softc(dev); 828 uint32_t ctl; 829 int ntries; 830 831 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 832 833 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 834 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 835 DELAY(100); 836 } 837 838 NFE_WRITE(sc, NFE_PHY_DATA, val); 839 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 840 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 841 842 for (ntries = 0; ntries < 1000; ntries++) { 843 DELAY(100); 844 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 845 break; 846 } 847 848 #ifdef NFE_DEBUG 849 if (ntries == 1000) 850 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 851 #endif 852 } 853 854 #ifdef DEVICE_POLLING 855 856 static void 857 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 858 { 859 struct nfe_softc *sc = ifp->if_softc; 860 861 ASSERT_SERIALIZED(ifp->if_serializer); 862 863 switch(cmd) { 864 case POLL_REGISTER: 865 nfe_disable_intrs(sc); 866 break; 867 868 case POLL_DEREGISTER: 869 nfe_enable_intrs(sc); 870 break; 871 872 case POLL_AND_CHECK_STATUS: 873 /* fall through */ 874 case POLL_ONLY: 875 if (ifp->if_flags & IFF_RUNNING) { 876 nfe_rxeof(sc); 877 nfe_txeof(sc, 1); 878 } 879 break; 880 } 881 } 882 883 #endif 884 885 static void 886 nfe_intr(void *arg) 887 { 888 struct nfe_softc *sc = arg; 889 struct ifnet *ifp = &sc->arpcom.ac_if; 890 uint32_t r; 891 892 r = NFE_READ(sc, NFE_IRQ_STATUS); 893 if (r == 0) 894 return; /* not for us */ 895 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 896 897 if (sc->sc_rate_second != time_second) { 898 /* 899 * Calculate sc_rate_avg - interrupts per second. 900 */ 901 sc->sc_rate_second = time_second; 902 if (sc->sc_rate_avg < sc->sc_rate_acc) 903 sc->sc_rate_avg = sc->sc_rate_acc; 904 else 905 sc->sc_rate_avg = (sc->sc_rate_avg * 3 + 906 sc->sc_rate_acc) / 4; 907 sc->sc_rate_acc = 0; 908 } else if (sc->sc_rate_avg < sc->sc_rate_acc) { 909 /* 910 * Don't wait for a tick to roll over if we are taking 911 * a lot of interrupts. 912 */ 913 sc->sc_rate_avg = sc->sc_rate_acc; 914 } 915 916 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 917 918 if (r & NFE_IRQ_LINK) { 919 NFE_READ(sc, NFE_PHY_STATUS); 920 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 921 DPRINTF(sc, "link state changed %s\n", ""); 922 } 923 924 if (ifp->if_flags & IFF_RUNNING) { 925 int ret; 926 int rate; 927 928 /* check Rx ring */ 929 ret = nfe_rxeof(sc); 930 931 /* check Tx ring */ 932 ret |= nfe_txeof(sc, 1); 933 934 /* update the rate accumulator */ 935 if (ret) 936 ++sc->sc_rate_acc; 937 938 if (sc->sc_flags & NFE_F_DYN_IM) { 939 rate = 1000000 / sc->sc_imtime; 940 if ((sc->sc_flags & NFE_F_IRQ_TIMER) == 0 && 941 sc->sc_rate_avg > rate) { 942 /* 943 * Use the hardware timer to reduce the 944 * interrupt rate if the discrete interrupt 945 * rate has exceeded our threshold. 946 */ 947 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER); 948 sc->sc_flags |= NFE_F_IRQ_TIMER; 949 } else if ((sc->sc_flags & NFE_F_IRQ_TIMER) && 950 sc->sc_rate_avg <= rate) { 951 /* 952 * Use discrete TX/RX interrupts if the rate 953 * has fallen below our threshold. 954 */ 955 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER); 956 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 957 958 /* 959 * Recollect, mainly to avoid the possible race 960 * introduced by changing interrupt masks. 961 */ 962 nfe_rxeof(sc); 963 nfe_txeof(sc, 1); 964 } 965 } 966 } 967 } 968 969 static int 970 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 971 { 972 struct nfe_softc *sc = ifp->if_softc; 973 struct ifreq *ifr = (struct ifreq *)data; 974 struct mii_data *mii; 975 int error = 0, mask, jumbo_cap; 976 977 ASSERT_SERIALIZED(ifp->if_serializer); 978 979 switch (cmd) { 980 case SIOCSIFMTU: 981 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL) 982 jumbo_cap = 1; 983 else 984 jumbo_cap = 0; 985 986 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) || 987 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) { 988 return EINVAL; 989 } else if (ifp->if_mtu != ifr->ifr_mtu) { 990 ifp->if_mtu = ifr->ifr_mtu; 991 if (ifp->if_flags & IFF_RUNNING) 992 nfe_init(sc); 993 } 994 break; 995 case SIOCSIFFLAGS: 996 if (ifp->if_flags & IFF_UP) { 997 /* 998 * If only the PROMISC or ALLMULTI flag changes, then 999 * don't do a full re-init of the chip, just update 1000 * the Rx filter. 1001 */ 1002 if ((ifp->if_flags & IFF_RUNNING) && 1003 ((ifp->if_flags ^ sc->sc_if_flags) & 1004 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1005 nfe_setmulti(sc); 1006 } else { 1007 if (!(ifp->if_flags & IFF_RUNNING)) 1008 nfe_init(sc); 1009 } 1010 } else { 1011 if (ifp->if_flags & IFF_RUNNING) 1012 nfe_stop(sc); 1013 } 1014 sc->sc_if_flags = ifp->if_flags; 1015 break; 1016 case SIOCADDMULTI: 1017 case SIOCDELMULTI: 1018 if (ifp->if_flags & IFF_RUNNING) 1019 nfe_setmulti(sc); 1020 break; 1021 case SIOCSIFMEDIA: 1022 case SIOCGIFMEDIA: 1023 mii = device_get_softc(sc->sc_miibus); 1024 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1025 break; 1026 case SIOCSIFCAP: 1027 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM; 1028 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) { 1029 ifp->if_capenable ^= mask; 1030 if (IFCAP_TXCSUM & ifp->if_capenable) 1031 ifp->if_hwassist = NFE_CSUM_FEATURES; 1032 else 1033 ifp->if_hwassist = 0; 1034 1035 if (ifp->if_flags & IFF_RUNNING) 1036 nfe_init(sc); 1037 } 1038 break; 1039 default: 1040 error = ether_ioctl(ifp, cmd, data); 1041 break; 1042 } 1043 return error; 1044 } 1045 1046 static int 1047 nfe_rxeof(struct nfe_softc *sc) 1048 { 1049 struct ifnet *ifp = &sc->arpcom.ac_if; 1050 struct nfe_rx_ring *ring = &sc->rxq; 1051 int reap; 1052 struct mbuf_chain chain[MAXCPU]; 1053 1054 reap = 0; 1055 ether_input_chain_init(chain); 1056 1057 for (;;) { 1058 struct nfe_rx_data *data = &ring->data[ring->cur]; 1059 struct mbuf *m; 1060 uint16_t flags; 1061 int len, error; 1062 1063 if (sc->sc_caps & NFE_40BIT_ADDR) { 1064 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 1065 1066 flags = le16toh(desc64->flags); 1067 len = le16toh(desc64->length) & 0x3fff; 1068 } else { 1069 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 1070 1071 flags = le16toh(desc32->flags); 1072 len = le16toh(desc32->length) & 0x3fff; 1073 } 1074 1075 if (flags & NFE_RX_READY) 1076 break; 1077 1078 reap = 1; 1079 1080 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1081 if (!(flags & NFE_RX_VALID_V1)) 1082 goto skip; 1083 1084 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 1085 flags &= ~NFE_RX_ERROR; 1086 len--; /* fix buffer length */ 1087 } 1088 } else { 1089 if (!(flags & NFE_RX_VALID_V2)) 1090 goto skip; 1091 1092 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 1093 flags &= ~NFE_RX_ERROR; 1094 len--; /* fix buffer length */ 1095 } 1096 } 1097 1098 if (flags & NFE_RX_ERROR) { 1099 ifp->if_ierrors++; 1100 goto skip; 1101 } 1102 1103 m = data->m; 1104 1105 if (sc->sc_flags & NFE_F_USE_JUMBO) 1106 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 1107 else 1108 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 1109 if (error) { 1110 ifp->if_ierrors++; 1111 goto skip; 1112 } 1113 1114 /* finalize mbuf */ 1115 m->m_pkthdr.len = m->m_len = len; 1116 m->m_pkthdr.rcvif = ifp; 1117 1118 if ((ifp->if_capenable & IFCAP_RXCSUM) && 1119 (flags & NFE_RX_CSUMOK)) { 1120 if (flags & NFE_RX_IP_CSUMOK_V2) { 1121 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1122 CSUM_IP_VALID; 1123 } 1124 1125 if (flags & 1126 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) { 1127 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1128 CSUM_PSEUDO_HDR | 1129 CSUM_FRAG_NOT_CHECKED; 1130 m->m_pkthdr.csum_data = 0xffff; 1131 } 1132 } 1133 1134 ifp->if_ipackets++; 1135 ether_input_chain(ifp, m, NULL, chain); 1136 skip: 1137 nfe_set_ready_rxdesc(sc, ring, ring->cur); 1138 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count; 1139 } 1140 1141 if (reap) 1142 ether_input_dispatch(chain); 1143 return reap; 1144 } 1145 1146 static int 1147 nfe_txeof(struct nfe_softc *sc, int start) 1148 { 1149 struct ifnet *ifp = &sc->arpcom.ac_if; 1150 struct nfe_tx_ring *ring = &sc->txq; 1151 struct nfe_tx_data *data = NULL; 1152 1153 while (ring->next != ring->cur) { 1154 uint16_t flags; 1155 1156 if (sc->sc_caps & NFE_40BIT_ADDR) 1157 flags = le16toh(ring->desc64[ring->next].flags); 1158 else 1159 flags = le16toh(ring->desc32[ring->next].flags); 1160 1161 if (flags & NFE_TX_VALID) 1162 break; 1163 1164 data = &ring->data[ring->next]; 1165 1166 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1167 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 1168 goto skip; 1169 1170 if ((flags & NFE_TX_ERROR_V1) != 0) { 1171 if_printf(ifp, "tx v1 error 0x%4b\n", flags, 1172 NFE_V1_TXERR); 1173 ifp->if_oerrors++; 1174 } else { 1175 ifp->if_opackets++; 1176 } 1177 } else { 1178 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 1179 goto skip; 1180 1181 if ((flags & NFE_TX_ERROR_V2) != 0) { 1182 if_printf(ifp, "tx v2 error 0x%4b\n", flags, 1183 NFE_V2_TXERR); 1184 ifp->if_oerrors++; 1185 } else { 1186 ifp->if_opackets++; 1187 } 1188 } 1189 1190 if (data->m == NULL) { /* should not get there */ 1191 if_printf(ifp, 1192 "last fragment bit w/o associated mbuf!\n"); 1193 goto skip; 1194 } 1195 1196 /* last fragment of the mbuf chain transmitted */ 1197 bus_dmamap_unload(ring->data_tag, data->map); 1198 m_freem(data->m); 1199 data->m = NULL; 1200 skip: 1201 ring->queued--; 1202 KKASSERT(ring->queued >= 0); 1203 ring->next = (ring->next + 1) % sc->sc_tx_ring_count; 1204 } 1205 1206 if (sc->sc_tx_ring_count - ring->queued >= 1207 sc->sc_tx_spare + NFE_NSEG_RSVD) 1208 ifp->if_flags &= ~IFF_OACTIVE; 1209 1210 if (ring->queued == 0) 1211 ifp->if_timer = 0; 1212 1213 if (start && !ifq_is_empty(&ifp->if_snd)) 1214 if_devstart(ifp); 1215 1216 if (data != NULL) 1217 return 1; 1218 else 1219 return 0; 1220 } 1221 1222 static int 1223 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 1224 { 1225 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 1226 struct nfe_tx_data *data, *data_map; 1227 bus_dmamap_t map; 1228 struct nfe_desc64 *desc64 = NULL; 1229 struct nfe_desc32 *desc32 = NULL; 1230 uint16_t flags = 0; 1231 uint32_t vtag = 0; 1232 int error, i, j, maxsegs, nsegs; 1233 1234 data = &ring->data[ring->cur]; 1235 map = data->map; 1236 data_map = data; /* Remember who owns the DMA map */ 1237 1238 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD; 1239 if (maxsegs > NFE_MAX_SCATTER) 1240 maxsegs = NFE_MAX_SCATTER; 1241 KASSERT(maxsegs >= sc->sc_tx_spare, 1242 ("no enough segments %d,%d\n", maxsegs, sc->sc_tx_spare)); 1243 1244 error = bus_dmamap_load_mbuf_defrag(ring->data_tag, map, &m0, 1245 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1246 if (error) 1247 goto back; 1248 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1249 1250 error = 0; 1251 1252 /* setup h/w VLAN tagging */ 1253 if (m0->m_flags & M_VLANTAG) 1254 vtag = m0->m_pkthdr.ether_vlantag; 1255 1256 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) { 1257 if (m0->m_pkthdr.csum_flags & CSUM_IP) 1258 flags |= NFE_TX_IP_CSUM; 1259 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1260 flags |= NFE_TX_TCP_CSUM; 1261 } 1262 1263 /* 1264 * XXX urm. somebody is unaware of how hardware works. You 1265 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1266 * the ring until the entire chain is actually *VALID*. Otherwise 1267 * the hardware may encounter a partially initialized chain that 1268 * is marked as being ready to go when it in fact is not ready to 1269 * go. 1270 */ 1271 1272 for (i = 0; i < nsegs; i++) { 1273 j = (ring->cur + i) % sc->sc_tx_ring_count; 1274 data = &ring->data[j]; 1275 1276 if (sc->sc_caps & NFE_40BIT_ADDR) { 1277 desc64 = &ring->desc64[j]; 1278 desc64->physaddr[0] = 1279 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 1280 desc64->physaddr[1] = 1281 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 1282 desc64->length = htole16(segs[i].ds_len - 1); 1283 desc64->vtag = htole32(vtag); 1284 desc64->flags = htole16(flags); 1285 } else { 1286 desc32 = &ring->desc32[j]; 1287 desc32->physaddr = htole32(segs[i].ds_addr); 1288 desc32->length = htole16(segs[i].ds_len - 1); 1289 desc32->flags = htole16(flags); 1290 } 1291 1292 /* csum flags and vtag belong to the first fragment only */ 1293 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1294 vtag = 0; 1295 1296 ring->queued++; 1297 KKASSERT(ring->queued <= sc->sc_tx_ring_count); 1298 } 1299 1300 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1301 if (sc->sc_caps & NFE_40BIT_ADDR) { 1302 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1303 } else { 1304 if (sc->sc_caps & NFE_JUMBO_SUP) 1305 flags = NFE_TX_LASTFRAG_V2; 1306 else 1307 flags = NFE_TX_LASTFRAG_V1; 1308 desc32->flags |= htole16(flags); 1309 } 1310 1311 /* 1312 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1313 * whole mess until the first descriptor in the map is flagged. 1314 */ 1315 for (i = nsegs - 1; i >= 0; --i) { 1316 j = (ring->cur + i) % sc->sc_tx_ring_count; 1317 if (sc->sc_caps & NFE_40BIT_ADDR) { 1318 desc64 = &ring->desc64[j]; 1319 desc64->flags |= htole16(NFE_TX_VALID); 1320 } else { 1321 desc32 = &ring->desc32[j]; 1322 desc32->flags |= htole16(NFE_TX_VALID); 1323 } 1324 } 1325 ring->cur = (ring->cur + nsegs) % sc->sc_tx_ring_count; 1326 1327 /* Exchange DMA map */ 1328 data_map->map = data->map; 1329 data->map = map; 1330 data->m = m0; 1331 back: 1332 if (error) 1333 m_freem(m0); 1334 return error; 1335 } 1336 1337 static void 1338 nfe_start(struct ifnet *ifp) 1339 { 1340 struct nfe_softc *sc = ifp->if_softc; 1341 struct nfe_tx_ring *ring = &sc->txq; 1342 int count = 0, oactive = 0; 1343 struct mbuf *m0; 1344 1345 ASSERT_SERIALIZED(ifp->if_serializer); 1346 1347 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 1348 return; 1349 1350 for (;;) { 1351 int error; 1352 1353 if (sc->sc_tx_ring_count - ring->queued < 1354 sc->sc_tx_spare + NFE_NSEG_RSVD) { 1355 if (oactive) { 1356 ifp->if_flags |= IFF_OACTIVE; 1357 break; 1358 } 1359 1360 nfe_txeof(sc, 0); 1361 oactive = 1; 1362 continue; 1363 } 1364 1365 m0 = ifq_dequeue(&ifp->if_snd, NULL); 1366 if (m0 == NULL) 1367 break; 1368 1369 ETHER_BPF_MTAP(ifp, m0); 1370 1371 error = nfe_encap(sc, ring, m0); 1372 if (error) { 1373 ifp->if_oerrors++; 1374 if (error == EFBIG) { 1375 if (oactive) { 1376 ifp->if_flags |= IFF_OACTIVE; 1377 break; 1378 } 1379 nfe_txeof(sc, 0); 1380 oactive = 1; 1381 } 1382 continue; 1383 } else { 1384 oactive = 0; 1385 } 1386 ++count; 1387 1388 /* 1389 * NOTE: 1390 * `m0' may be freed in nfe_encap(), so 1391 * it should not be touched any more. 1392 */ 1393 } 1394 1395 if (count == 0) /* nothing sent */ 1396 return; 1397 1398 /* Kick Tx */ 1399 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1400 1401 /* 1402 * Set a timeout in case the chip goes out to lunch. 1403 */ 1404 ifp->if_timer = 5; 1405 } 1406 1407 static void 1408 nfe_watchdog(struct ifnet *ifp) 1409 { 1410 struct nfe_softc *sc = ifp->if_softc; 1411 1412 ASSERT_SERIALIZED(ifp->if_serializer); 1413 1414 if (ifp->if_flags & IFF_RUNNING) { 1415 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1416 nfe_txeof(sc, 1); 1417 return; 1418 } 1419 1420 if_printf(ifp, "watchdog timeout\n"); 1421 1422 nfe_init(ifp->if_softc); 1423 1424 ifp->if_oerrors++; 1425 } 1426 1427 static void 1428 nfe_init(void *xsc) 1429 { 1430 struct nfe_softc *sc = xsc; 1431 struct ifnet *ifp = &sc->arpcom.ac_if; 1432 uint32_t tmp; 1433 int error; 1434 1435 ASSERT_SERIALIZED(ifp->if_serializer); 1436 1437 nfe_stop(sc); 1438 1439 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 1440 nfe_mac_reset(sc); 1441 1442 /* 1443 * NOTE: 1444 * Switching between jumbo frames and normal frames should 1445 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring(). 1446 */ 1447 if (ifp->if_mtu > ETHERMTU) { 1448 sc->sc_flags |= NFE_F_USE_JUMBO; 1449 sc->rxq.bufsz = NFE_JBYTES; 1450 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO; 1451 if (bootverbose) 1452 if_printf(ifp, "use jumbo frames\n"); 1453 } else { 1454 sc->sc_flags &= ~NFE_F_USE_JUMBO; 1455 sc->rxq.bufsz = MCLBYTES; 1456 sc->sc_tx_spare = NFE_NSEG_SPARE; 1457 if (bootverbose) 1458 if_printf(ifp, "use non-jumbo frames\n"); 1459 } 1460 1461 error = nfe_init_tx_ring(sc, &sc->txq); 1462 if (error) { 1463 nfe_stop(sc); 1464 return; 1465 } 1466 1467 error = nfe_init_rx_ring(sc, &sc->rxq); 1468 if (error) { 1469 nfe_stop(sc); 1470 return; 1471 } 1472 1473 NFE_WRITE(sc, NFE_TX_POLL, 0); 1474 NFE_WRITE(sc, NFE_STATUS, 0); 1475 1476 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc; 1477 1478 if (ifp->if_capenable & IFCAP_RXCSUM) 1479 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1480 1481 /* 1482 * Although the adapter is capable of stripping VLAN tags from received 1483 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1484 * purpose. This will be done in software by our network stack. 1485 */ 1486 if (sc->sc_caps & NFE_HW_VLAN) 1487 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1488 1489 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1490 DELAY(10); 1491 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1492 1493 if (sc->sc_caps & NFE_HW_VLAN) 1494 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1495 1496 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1497 1498 /* set MAC address */ 1499 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1500 1501 /* tell MAC where rings are in memory */ 1502 if (sc->sc_caps & NFE_40BIT_ADDR) { 1503 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 1504 NFE_ADDR_HI(sc->rxq.physaddr)); 1505 } 1506 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr)); 1507 1508 if (sc->sc_caps & NFE_40BIT_ADDR) { 1509 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, 1510 NFE_ADDR_HI(sc->txq.physaddr)); 1511 } 1512 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 1513 1514 NFE_WRITE(sc, NFE_RING_SIZE, 1515 (sc->sc_rx_ring_count - 1) << 16 | 1516 (sc->sc_tx_ring_count - 1)); 1517 1518 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1519 1520 /* force MAC to wakeup */ 1521 tmp = NFE_READ(sc, NFE_PWR_STATE); 1522 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1523 DELAY(10); 1524 tmp = NFE_READ(sc, NFE_PWR_STATE); 1525 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1526 1527 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1528 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1529 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1530 1531 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1532 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1533 1534 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1535 1536 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1537 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1538 DELAY(10); 1539 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1540 1541 /* set Rx filter */ 1542 nfe_setmulti(sc); 1543 1544 nfe_ifmedia_upd(ifp); 1545 1546 /* enable Rx */ 1547 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1548 1549 /* enable Tx */ 1550 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1551 1552 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1553 1554 #ifdef DEVICE_POLLING 1555 if ((ifp->if_flags & IFF_POLLING)) 1556 nfe_disable_intrs(sc); 1557 else 1558 #endif 1559 nfe_enable_intrs(sc); 1560 1561 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1562 1563 ifp->if_flags |= IFF_RUNNING; 1564 ifp->if_flags &= ~IFF_OACTIVE; 1565 1566 /* 1567 * If we had stuff in the tx ring before its all cleaned out now 1568 * so we are not going to get an interrupt, jump-start any pending 1569 * output. 1570 */ 1571 if (!ifq_is_empty(&ifp->if_snd)) 1572 if_devstart(ifp); 1573 } 1574 1575 static void 1576 nfe_stop(struct nfe_softc *sc) 1577 { 1578 struct ifnet *ifp = &sc->arpcom.ac_if; 1579 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 1580 int i; 1581 1582 ASSERT_SERIALIZED(ifp->if_serializer); 1583 1584 callout_stop(&sc->sc_tick_ch); 1585 1586 ifp->if_timer = 0; 1587 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1588 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 1589 1590 #define WAITMAX 50000 1591 1592 /* 1593 * Abort Tx 1594 */ 1595 NFE_WRITE(sc, NFE_TX_CTL, 0); 1596 for (i = 0; i < WAITMAX; ++i) { 1597 DELAY(100); 1598 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0) 1599 break; 1600 } 1601 if (i == WAITMAX) 1602 if_printf(ifp, "can't stop TX\n"); 1603 DELAY(100); 1604 1605 /* 1606 * Disable Rx 1607 */ 1608 NFE_WRITE(sc, NFE_RX_CTL, 0); 1609 for (i = 0; i < WAITMAX; ++i) { 1610 DELAY(100); 1611 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0) 1612 break; 1613 } 1614 if (i == WAITMAX) 1615 if_printf(ifp, "can't stop RX\n"); 1616 DELAY(100); 1617 1618 #undef WAITMAX 1619 1620 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 1621 DELAY(10); 1622 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1623 1624 /* Disable interrupts */ 1625 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1626 1627 /* Reset Tx and Rx rings */ 1628 nfe_reset_tx_ring(sc, &sc->txq); 1629 nfe_reset_rx_ring(sc, &sc->rxq); 1630 } 1631 1632 static int 1633 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1634 { 1635 int i, j, error, descsize; 1636 bus_dmamem_t dmem; 1637 void **desc; 1638 1639 if (sc->sc_caps & NFE_40BIT_ADDR) { 1640 desc = (void **)&ring->desc64; 1641 descsize = sizeof(struct nfe_desc64); 1642 } else { 1643 desc = (void **)&ring->desc32; 1644 descsize = sizeof(struct nfe_desc32); 1645 } 1646 1647 ring->bufsz = MCLBYTES; 1648 ring->cur = ring->next = 0; 1649 1650 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1651 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1652 sc->sc_rx_ring_count * descsize, 1653 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1654 if (error) { 1655 if_printf(&sc->arpcom.ac_if, 1656 "could not create RX desc ring\n"); 1657 return error; 1658 } 1659 ring->tag = dmem.dmem_tag; 1660 ring->map = dmem.dmem_map; 1661 *desc = dmem.dmem_addr; 1662 ring->physaddr = dmem.dmem_busaddr; 1663 1664 if (sc->sc_caps & NFE_JUMBO_SUP) { 1665 ring->jbuf = 1666 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc), 1667 M_DEVBUF, M_WAITOK | M_ZERO); 1668 1669 error = nfe_jpool_alloc(sc, ring); 1670 if (error) { 1671 if_printf(&sc->arpcom.ac_if, 1672 "could not allocate jumbo frames\n"); 1673 kfree(ring->jbuf, M_DEVBUF); 1674 ring->jbuf = NULL; 1675 /* Allow jumbo frame allocation to fail */ 1676 } 1677 } 1678 1679 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count, 1680 M_DEVBUF, M_WAITOK | M_ZERO); 1681 1682 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1683 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1684 NULL, NULL, 1685 MCLBYTES, 1, MCLBYTES, 1686 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 1687 &ring->data_tag); 1688 if (error) { 1689 if_printf(&sc->arpcom.ac_if, 1690 "could not create RX mbuf DMA tag\n"); 1691 return error; 1692 } 1693 1694 /* Create a spare RX mbuf DMA map */ 1695 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1696 &ring->data_tmpmap); 1697 if (error) { 1698 if_printf(&sc->arpcom.ac_if, 1699 "could not create spare RX mbuf DMA map\n"); 1700 bus_dma_tag_destroy(ring->data_tag); 1701 ring->data_tag = NULL; 1702 return error; 1703 } 1704 1705 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1706 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1707 &ring->data[i].map); 1708 if (error) { 1709 if_printf(&sc->arpcom.ac_if, 1710 "could not create %dth RX mbuf DMA mapn", i); 1711 goto fail; 1712 } 1713 } 1714 return 0; 1715 fail: 1716 for (j = 0; j < i; ++j) 1717 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1718 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1719 bus_dma_tag_destroy(ring->data_tag); 1720 ring->data_tag = NULL; 1721 return error; 1722 } 1723 1724 static void 1725 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1726 { 1727 int i; 1728 1729 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1730 struct nfe_rx_data *data = &ring->data[i]; 1731 1732 if (data->m != NULL) { 1733 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0) 1734 bus_dmamap_unload(ring->data_tag, data->map); 1735 m_freem(data->m); 1736 data->m = NULL; 1737 } 1738 } 1739 1740 ring->cur = ring->next = 0; 1741 } 1742 1743 static int 1744 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1745 { 1746 int i; 1747 1748 for (i = 0; i < sc->sc_rx_ring_count; ++i) { 1749 int error; 1750 1751 /* XXX should use a function pointer */ 1752 if (sc->sc_flags & NFE_F_USE_JUMBO) 1753 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1754 else 1755 error = nfe_newbuf_std(sc, ring, i, 1); 1756 if (error) { 1757 if_printf(&sc->arpcom.ac_if, 1758 "could not allocate RX buffer\n"); 1759 return error; 1760 } 1761 nfe_set_ready_rxdesc(sc, ring, i); 1762 } 1763 return 0; 1764 } 1765 1766 static void 1767 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1768 { 1769 if (ring->data_tag != NULL) { 1770 struct nfe_rx_data *data; 1771 int i; 1772 1773 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1774 data = &ring->data[i]; 1775 1776 if (data->m != NULL) { 1777 bus_dmamap_unload(ring->data_tag, data->map); 1778 m_freem(data->m); 1779 } 1780 bus_dmamap_destroy(ring->data_tag, data->map); 1781 } 1782 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1783 bus_dma_tag_destroy(ring->data_tag); 1784 } 1785 1786 nfe_jpool_free(sc, ring); 1787 1788 if (ring->jbuf != NULL) 1789 kfree(ring->jbuf, M_DEVBUF); 1790 if (ring->data != NULL) 1791 kfree(ring->data, M_DEVBUF); 1792 1793 if (ring->tag != NULL) { 1794 void *desc; 1795 1796 if (sc->sc_caps & NFE_40BIT_ADDR) 1797 desc = ring->desc64; 1798 else 1799 desc = ring->desc32; 1800 1801 bus_dmamap_unload(ring->tag, ring->map); 1802 bus_dmamem_free(ring->tag, desc, ring->map); 1803 bus_dma_tag_destroy(ring->tag); 1804 } 1805 } 1806 1807 static struct nfe_jbuf * 1808 nfe_jalloc(struct nfe_softc *sc) 1809 { 1810 struct ifnet *ifp = &sc->arpcom.ac_if; 1811 struct nfe_jbuf *jbuf; 1812 1813 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1814 1815 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1816 if (jbuf != NULL) { 1817 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1818 jbuf->inuse = 1; 1819 } else { 1820 if_printf(ifp, "no free jumbo buffer\n"); 1821 } 1822 1823 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1824 1825 return jbuf; 1826 } 1827 1828 static void 1829 nfe_jfree(void *arg) 1830 { 1831 struct nfe_jbuf *jbuf = arg; 1832 struct nfe_softc *sc = jbuf->sc; 1833 struct nfe_rx_ring *ring = jbuf->ring; 1834 1835 if (&ring->jbuf[jbuf->slot] != jbuf) 1836 panic("%s: free wrong jumbo buffer\n", __func__); 1837 else if (jbuf->inuse == 0) 1838 panic("%s: jumbo buffer already freed\n", __func__); 1839 1840 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1841 atomic_subtract_int(&jbuf->inuse, 1); 1842 if (jbuf->inuse == 0) 1843 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1844 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1845 } 1846 1847 static void 1848 nfe_jref(void *arg) 1849 { 1850 struct nfe_jbuf *jbuf = arg; 1851 struct nfe_rx_ring *ring = jbuf->ring; 1852 1853 if (&ring->jbuf[jbuf->slot] != jbuf) 1854 panic("%s: ref wrong jumbo buffer\n", __func__); 1855 else if (jbuf->inuse == 0) 1856 panic("%s: jumbo buffer already freed\n", __func__); 1857 1858 atomic_add_int(&jbuf->inuse, 1); 1859 } 1860 1861 static int 1862 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1863 { 1864 struct nfe_jbuf *jbuf; 1865 bus_dmamem_t dmem; 1866 bus_addr_t physaddr; 1867 caddr_t buf; 1868 int i, error; 1869 1870 /* 1871 * Allocate a big chunk of DMA'able memory. 1872 */ 1873 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1874 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1875 NFE_JPOOL_SIZE(sc), 1876 BUS_DMA_WAITOK, &dmem); 1877 if (error) { 1878 if_printf(&sc->arpcom.ac_if, 1879 "could not create jumbo buffer\n"); 1880 return error; 1881 } 1882 ring->jtag = dmem.dmem_tag; 1883 ring->jmap = dmem.dmem_map; 1884 ring->jpool = dmem.dmem_addr; 1885 physaddr = dmem.dmem_busaddr; 1886 1887 /* ..and split it into 9KB chunks */ 1888 SLIST_INIT(&ring->jfreelist); 1889 1890 buf = ring->jpool; 1891 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) { 1892 jbuf = &ring->jbuf[i]; 1893 1894 jbuf->sc = sc; 1895 jbuf->ring = ring; 1896 jbuf->inuse = 0; 1897 jbuf->slot = i; 1898 jbuf->buf = buf; 1899 jbuf->physaddr = physaddr; 1900 1901 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1902 1903 buf += NFE_JBYTES; 1904 physaddr += NFE_JBYTES; 1905 } 1906 1907 return 0; 1908 } 1909 1910 static void 1911 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1912 { 1913 if (ring->jtag != NULL) { 1914 bus_dmamap_unload(ring->jtag, ring->jmap); 1915 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1916 bus_dma_tag_destroy(ring->jtag); 1917 } 1918 } 1919 1920 static int 1921 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1922 { 1923 int i, j, error, descsize; 1924 bus_dmamem_t dmem; 1925 void **desc; 1926 1927 if (sc->sc_caps & NFE_40BIT_ADDR) { 1928 desc = (void **)&ring->desc64; 1929 descsize = sizeof(struct nfe_desc64); 1930 } else { 1931 desc = (void **)&ring->desc32; 1932 descsize = sizeof(struct nfe_desc32); 1933 } 1934 1935 ring->queued = 0; 1936 ring->cur = ring->next = 0; 1937 1938 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1939 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1940 sc->sc_tx_ring_count * descsize, 1941 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1942 if (error) { 1943 if_printf(&sc->arpcom.ac_if, 1944 "could not create TX desc ring\n"); 1945 return error; 1946 } 1947 ring->tag = dmem.dmem_tag; 1948 ring->map = dmem.dmem_map; 1949 *desc = dmem.dmem_addr; 1950 ring->physaddr = dmem.dmem_busaddr; 1951 1952 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count, 1953 M_DEVBUF, M_WAITOK | M_ZERO); 1954 1955 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1956 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1957 NULL, NULL, 1958 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES, 1959 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1960 &ring->data_tag); 1961 if (error) { 1962 if_printf(&sc->arpcom.ac_if, 1963 "could not create TX buf DMA tag\n"); 1964 return error; 1965 } 1966 1967 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1968 error = bus_dmamap_create(ring->data_tag, 1969 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1970 &ring->data[i].map); 1971 if (error) { 1972 if_printf(&sc->arpcom.ac_if, 1973 "could not create %dth TX buf DMA map\n", i); 1974 goto fail; 1975 } 1976 } 1977 1978 return 0; 1979 fail: 1980 for (j = 0; j < i; ++j) 1981 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1982 bus_dma_tag_destroy(ring->data_tag); 1983 ring->data_tag = NULL; 1984 return error; 1985 } 1986 1987 static void 1988 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1989 { 1990 int i; 1991 1992 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1993 struct nfe_tx_data *data = &ring->data[i]; 1994 1995 if (sc->sc_caps & NFE_40BIT_ADDR) 1996 ring->desc64[i].flags = 0; 1997 else 1998 ring->desc32[i].flags = 0; 1999 2000 if (data->m != NULL) { 2001 bus_dmamap_unload(ring->data_tag, data->map); 2002 m_freem(data->m); 2003 data->m = NULL; 2004 } 2005 } 2006 2007 ring->queued = 0; 2008 ring->cur = ring->next = 0; 2009 } 2010 2011 static int 2012 nfe_init_tx_ring(struct nfe_softc *sc __unused, 2013 struct nfe_tx_ring *ring __unused) 2014 { 2015 return 0; 2016 } 2017 2018 static void 2019 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 2020 { 2021 if (ring->data_tag != NULL) { 2022 struct nfe_tx_data *data; 2023 int i; 2024 2025 for (i = 0; i < sc->sc_tx_ring_count; ++i) { 2026 data = &ring->data[i]; 2027 2028 if (data->m != NULL) { 2029 bus_dmamap_unload(ring->data_tag, data->map); 2030 m_freem(data->m); 2031 } 2032 bus_dmamap_destroy(ring->data_tag, data->map); 2033 } 2034 2035 bus_dma_tag_destroy(ring->data_tag); 2036 } 2037 2038 if (ring->data != NULL) 2039 kfree(ring->data, M_DEVBUF); 2040 2041 if (ring->tag != NULL) { 2042 void *desc; 2043 2044 if (sc->sc_caps & NFE_40BIT_ADDR) 2045 desc = ring->desc64; 2046 else 2047 desc = ring->desc32; 2048 2049 bus_dmamap_unload(ring->tag, ring->map); 2050 bus_dmamem_free(ring->tag, desc, ring->map); 2051 bus_dma_tag_destroy(ring->tag); 2052 } 2053 } 2054 2055 static int 2056 nfe_ifmedia_upd(struct ifnet *ifp) 2057 { 2058 struct nfe_softc *sc = ifp->if_softc; 2059 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2060 2061 ASSERT_SERIALIZED(ifp->if_serializer); 2062 2063 if (mii->mii_instance != 0) { 2064 struct mii_softc *miisc; 2065 2066 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2067 mii_phy_reset(miisc); 2068 } 2069 mii_mediachg(mii); 2070 2071 return 0; 2072 } 2073 2074 static void 2075 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2076 { 2077 struct nfe_softc *sc = ifp->if_softc; 2078 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2079 2080 ASSERT_SERIALIZED(ifp->if_serializer); 2081 2082 mii_pollstat(mii); 2083 ifmr->ifm_status = mii->mii_media_status; 2084 ifmr->ifm_active = mii->mii_media_active; 2085 } 2086 2087 static void 2088 nfe_setmulti(struct nfe_softc *sc) 2089 { 2090 struct ifnet *ifp = &sc->arpcom.ac_if; 2091 struct ifmultiaddr *ifma; 2092 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2093 uint32_t filter = NFE_RXFILTER_MAGIC; 2094 int i; 2095 2096 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2097 bzero(addr, ETHER_ADDR_LEN); 2098 bzero(mask, ETHER_ADDR_LEN); 2099 goto done; 2100 } 2101 2102 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2103 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2104 2105 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2106 caddr_t maddr; 2107 2108 if (ifma->ifma_addr->sa_family != AF_LINK) 2109 continue; 2110 2111 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 2112 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2113 addr[i] &= maddr[i]; 2114 mask[i] &= ~maddr[i]; 2115 } 2116 } 2117 2118 for (i = 0; i < ETHER_ADDR_LEN; i++) 2119 mask[i] |= addr[i]; 2120 2121 done: 2122 addr[0] |= 0x01; /* make sure multicast bit is set */ 2123 2124 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2125 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2126 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2127 addr[5] << 8 | addr[4]); 2128 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2129 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2130 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2131 mask[5] << 8 | mask[4]); 2132 2133 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 2134 NFE_WRITE(sc, NFE_RXFILTER, filter); 2135 } 2136 2137 static void 2138 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2139 { 2140 uint32_t lo, hi; 2141 2142 lo = NFE_READ(sc, NFE_MACADDR_LO); 2143 hi = NFE_READ(sc, NFE_MACADDR_HI); 2144 if (sc->sc_caps & NFE_FIX_EADDR) { 2145 addr[0] = (lo >> 8) & 0xff; 2146 addr[1] = (lo & 0xff); 2147 2148 addr[2] = (hi >> 24) & 0xff; 2149 addr[3] = (hi >> 16) & 0xff; 2150 addr[4] = (hi >> 8) & 0xff; 2151 addr[5] = (hi & 0xff); 2152 } else { 2153 addr[0] = (hi & 0xff); 2154 addr[1] = (hi >> 8) & 0xff; 2155 addr[2] = (hi >> 16) & 0xff; 2156 addr[3] = (hi >> 24) & 0xff; 2157 2158 addr[4] = (lo & 0xff); 2159 addr[5] = (lo >> 8) & 0xff; 2160 } 2161 } 2162 2163 static void 2164 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 2165 { 2166 NFE_WRITE(sc, NFE_MACADDR_LO, 2167 addr[5] << 8 | addr[4]); 2168 NFE_WRITE(sc, NFE_MACADDR_HI, 2169 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2170 } 2171 2172 static void 2173 nfe_tick(void *arg) 2174 { 2175 struct nfe_softc *sc = arg; 2176 struct ifnet *ifp = &sc->arpcom.ac_if; 2177 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2178 2179 lwkt_serialize_enter(ifp->if_serializer); 2180 2181 mii_tick(mii); 2182 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 2183 2184 lwkt_serialize_exit(ifp->if_serializer); 2185 } 2186 2187 static int 2188 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2189 int wait) 2190 { 2191 struct nfe_rx_data *data = &ring->data[idx]; 2192 bus_dma_segment_t seg; 2193 bus_dmamap_t map; 2194 struct mbuf *m; 2195 int nsegs, error; 2196 2197 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2198 if (m == NULL) 2199 return ENOBUFS; 2200 m->m_len = m->m_pkthdr.len = MCLBYTES; 2201 2202 error = bus_dmamap_load_mbuf_segment(ring->data_tag, ring->data_tmpmap, 2203 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 2204 if (error) { 2205 m_freem(m); 2206 if (wait) { 2207 if_printf(&sc->arpcom.ac_if, 2208 "could map RX mbuf %d\n", error); 2209 } 2210 return error; 2211 } 2212 2213 if (data->m != NULL) { 2214 /* Sync and unload originally mapped mbuf */ 2215 bus_dmamap_sync(ring->data_tag, data->map, 2216 BUS_DMASYNC_POSTREAD); 2217 bus_dmamap_unload(ring->data_tag, data->map); 2218 } 2219 2220 /* Swap this DMA map with tmp DMA map */ 2221 map = data->map; 2222 data->map = ring->data_tmpmap; 2223 ring->data_tmpmap = map; 2224 2225 /* Caller is assumed to have collected the old mbuf */ 2226 data->m = m; 2227 2228 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2229 return 0; 2230 } 2231 2232 static int 2233 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2234 int wait) 2235 { 2236 struct nfe_rx_data *data = &ring->data[idx]; 2237 struct nfe_jbuf *jbuf; 2238 struct mbuf *m; 2239 2240 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2241 if (m == NULL) 2242 return ENOBUFS; 2243 2244 jbuf = nfe_jalloc(sc); 2245 if (jbuf == NULL) { 2246 m_freem(m); 2247 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2248 "-- packet dropped!\n"); 2249 return ENOBUFS; 2250 } 2251 2252 m->m_ext.ext_arg = jbuf; 2253 m->m_ext.ext_buf = jbuf->buf; 2254 m->m_ext.ext_free = nfe_jfree; 2255 m->m_ext.ext_ref = nfe_jref; 2256 m->m_ext.ext_size = NFE_JBYTES; 2257 2258 m->m_data = m->m_ext.ext_buf; 2259 m->m_flags |= M_EXT; 2260 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2261 2262 /* Caller is assumed to have collected the old mbuf */ 2263 data->m = m; 2264 2265 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2266 return 0; 2267 } 2268 2269 static void 2270 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2271 bus_addr_t physaddr) 2272 { 2273 if (sc->sc_caps & NFE_40BIT_ADDR) { 2274 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2275 2276 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr)); 2277 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr)); 2278 } else { 2279 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2280 2281 desc32->physaddr = htole32(physaddr); 2282 } 2283 } 2284 2285 static void 2286 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2287 { 2288 if (sc->sc_caps & NFE_40BIT_ADDR) { 2289 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2290 2291 desc64->length = htole16(ring->bufsz); 2292 desc64->flags = htole16(NFE_RX_READY); 2293 } else { 2294 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2295 2296 desc32->length = htole16(ring->bufsz); 2297 desc32->flags = htole16(NFE_RX_READY); 2298 } 2299 } 2300 2301 static int 2302 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS) 2303 { 2304 struct nfe_softc *sc = arg1; 2305 struct ifnet *ifp = &sc->arpcom.ac_if; 2306 uint32_t flags; 2307 int error, v; 2308 2309 lwkt_serialize_enter(ifp->if_serializer); 2310 2311 flags = sc->sc_flags & ~NFE_F_DYN_IM; 2312 v = sc->sc_imtime; 2313 if (sc->sc_flags & NFE_F_DYN_IM) 2314 v = -v; 2315 2316 error = sysctl_handle_int(oidp, &v, 0, req); 2317 if (error || req->newptr == NULL) 2318 goto back; 2319 2320 if (v < 0) { 2321 flags |= NFE_F_DYN_IM; 2322 v = -v; 2323 } 2324 2325 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) { 2326 if (NFE_IMTIME(v) == 0) 2327 v = 0; 2328 sc->sc_imtime = v; 2329 sc->sc_flags = flags; 2330 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 2331 2332 if ((ifp->if_flags & (IFF_POLLING | IFF_RUNNING)) 2333 == IFF_RUNNING) { 2334 nfe_enable_intrs(sc); 2335 } 2336 } 2337 back: 2338 lwkt_serialize_exit(ifp->if_serializer); 2339 return error; 2340 } 2341 2342 static void 2343 nfe_powerup(device_t dev) 2344 { 2345 struct nfe_softc *sc = device_get_softc(dev); 2346 uint32_t pwr_state; 2347 uint16_t did; 2348 2349 /* 2350 * Bring MAC and PHY out of low power state 2351 */ 2352 2353 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK; 2354 2355 did = pci_get_device(dev); 2356 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 || 2357 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) && 2358 pci_get_revid(dev) >= 0xa3) 2359 pwr_state |= NFE_PWRUP_REV_A3; 2360 2361 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state); 2362 } 2363 2364 static void 2365 nfe_mac_reset(struct nfe_softc *sc) 2366 { 2367 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 2368 uint32_t macaddr_hi, macaddr_lo, tx_poll; 2369 2370 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 2371 2372 /* Save several registers for later restoration */ 2373 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI); 2374 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO); 2375 tx_poll = NFE_READ(sc, NFE_TX_POLL); 2376 2377 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT); 2378 DELAY(100); 2379 2380 NFE_WRITE(sc, NFE_MAC_RESET, 0); 2381 DELAY(100); 2382 2383 /* Restore saved registers */ 2384 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi); 2385 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo); 2386 NFE_WRITE(sc, NFE_TX_POLL, tx_poll); 2387 2388 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 2389 } 2390 2391 static void 2392 nfe_enable_intrs(struct nfe_softc *sc) 2393 { 2394 /* 2395 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER. 2396 * It is unclear how wide the timer is. Base programming does 2397 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so 2398 * we don't get any interrupt moderation. TX moderation is 2399 * possible by using the timer interrupt instead of TX_DONE. 2400 * 2401 * It is unclear whether there are other bits that can be 2402 * set to make the NFE device actually do interrupt moderation 2403 * on the RX side. 2404 * 2405 * For now set a 128uS interval as a placemark, but don't use 2406 * the timer. 2407 */ 2408 if (sc->sc_imtime == 0) 2409 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT); 2410 else 2411 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime)); 2412 2413 /* Enable interrupts */ 2414 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable); 2415 2416 if (sc->sc_irq_enable & NFE_IRQ_TIMER) 2417 sc->sc_flags |= NFE_F_IRQ_TIMER; 2418 else 2419 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2420 } 2421 2422 static void 2423 nfe_disable_intrs(struct nfe_softc *sc) 2424 { 2425 /* Disable interrupts */ 2426 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 2427 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2428 } 2429