1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 3 /* 4 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> and 8 * Matthew Dillon <dillon@apollo.backplane.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 40 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 41 * 42 * Permission to use, copy, modify, and distribute this software for any 43 * purpose with or without fee is hereby granted, provided that the above 44 * copyright notice and this permission notice appear in all copies. 45 * 46 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 47 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 48 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 49 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 50 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 51 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 52 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 53 */ 54 55 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 56 57 #include "opt_polling.h" 58 59 #include <sys/param.h> 60 #include <sys/endian.h> 61 #include <sys/kernel.h> 62 #include <sys/bus.h> 63 #include <sys/interrupt.h> 64 #include <sys/proc.h> 65 #include <sys/rman.h> 66 #include <sys/serialize.h> 67 #include <sys/socket.h> 68 #include <sys/sockio.h> 69 #include <sys/sysctl.h> 70 71 #include <net/ethernet.h> 72 #include <net/if.h> 73 #include <net/bpf.h> 74 #include <net/if_arp.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/ifq_var.h> 78 #include <net/if_types.h> 79 #include <net/if_var.h> 80 #include <net/vlan/if_vlan_var.h> 81 #include <net/vlan/if_vlan_ether.h> 82 83 #include <bus/pci/pcireg.h> 84 #include <bus/pci/pcivar.h> 85 #include <bus/pci/pcidevs.h> 86 87 #include <dev/netif/mii_layer/mii.h> 88 #include <dev/netif/mii_layer/miivar.h> 89 90 #include "miibus_if.h" 91 92 #include <dev/netif/nfe/if_nfereg.h> 93 #include <dev/netif/nfe/if_nfevar.h> 94 95 #define NFE_CSUM 96 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 97 98 static int nfe_probe(device_t); 99 static int nfe_attach(device_t); 100 static int nfe_detach(device_t); 101 static void nfe_shutdown(device_t); 102 static int nfe_resume(device_t); 103 static int nfe_suspend(device_t); 104 105 static int nfe_miibus_readreg(device_t, int, int); 106 static void nfe_miibus_writereg(device_t, int, int, int); 107 static void nfe_miibus_statchg(device_t); 108 109 #ifdef DEVICE_POLLING 110 static void nfe_poll(struct ifnet *, enum poll_cmd, int); 111 static void nfe_disable_intrs(struct nfe_softc *); 112 #endif 113 static void nfe_intr(void *); 114 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 115 static int nfe_rxeof(struct nfe_softc *); 116 static int nfe_txeof(struct nfe_softc *, int); 117 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 118 struct mbuf *); 119 static void nfe_start(struct ifnet *); 120 static void nfe_watchdog(struct ifnet *); 121 static void nfe_init(void *); 122 static void nfe_stop(struct nfe_softc *); 123 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 124 static void nfe_jfree(void *); 125 static void nfe_jref(void *); 126 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 127 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 128 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 129 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 130 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 131 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 132 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 133 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 134 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 135 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 136 static int nfe_ifmedia_upd(struct ifnet *); 137 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 138 static void nfe_setmulti(struct nfe_softc *); 139 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 140 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 141 static void nfe_powerup(device_t); 142 static void nfe_mac_reset(struct nfe_softc *); 143 static void nfe_tick(void *); 144 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 145 int, bus_addr_t); 146 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 147 int); 148 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 149 int); 150 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 151 int); 152 static void nfe_enable_intrs(struct nfe_softc *); 153 154 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS); 155 156 #define NFE_DEBUG 157 #ifdef NFE_DEBUG 158 159 static int nfe_debug = 0; 160 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT; 161 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT; 162 /* 163 * hw timer simulated interrupt moderation @4000Hz. Negative values 164 * disable the timer when the discrete interrupt rate falls below 165 * the moderation rate. 166 * 167 * XXX 8000Hz might be better but if the interrupt is shared it can 168 * blow out the cpu. 169 */ 170 static int nfe_imtime = -250; /* uS */ 171 172 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count); 173 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count); 174 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime); 175 TUNABLE_INT("hw.nfe.debug", &nfe_debug); 176 177 #define DPRINTF(sc, fmt, ...) do { \ 178 if ((sc)->sc_debug) { \ 179 if_printf(&(sc)->arpcom.ac_if, \ 180 fmt, __VA_ARGS__); \ 181 } \ 182 } while (0) 183 184 #define DPRINTFN(sc, lv, fmt, ...) do { \ 185 if ((sc)->sc_debug >= (lv)) { \ 186 if_printf(&(sc)->arpcom.ac_if, \ 187 fmt, __VA_ARGS__); \ 188 } \ 189 } while (0) 190 191 #else /* !NFE_DEBUG */ 192 193 #define DPRINTF(sc, fmt, ...) 194 #define DPRINTFN(sc, lv, fmt, ...) 195 196 #endif /* NFE_DEBUG */ 197 198 static const struct nfe_dev { 199 uint16_t vid; 200 uint16_t did; 201 const char *desc; 202 } nfe_devices[] = { 203 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 204 "NVIDIA nForce Fast Ethernet" }, 205 206 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 207 "NVIDIA nForce2 Fast Ethernet" }, 208 209 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 210 "NVIDIA nForce3 Gigabit Ethernet" }, 211 212 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 213 chipset, and possibly also the 400R; it might be both nForce2- and 214 nForce3-based boards can use the same MCPs (= southbridges) */ 215 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 216 "NVIDIA nForce3 Gigabit Ethernet" }, 217 218 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 219 "NVIDIA nForce3 Gigabit Ethernet" }, 220 221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 222 "NVIDIA nForce3 Gigabit Ethernet" }, 223 224 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 225 "NVIDIA nForce3 Gigabit Ethernet" }, 226 227 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 228 "NVIDIA CK804 Gigabit Ethernet" }, 229 230 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 231 "NVIDIA CK804 Gigabit Ethernet" }, 232 233 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 234 "NVIDIA MCP04 Gigabit Ethernet" }, 235 236 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 237 "NVIDIA MCP04 Gigabit Ethernet" }, 238 239 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 240 "NVIDIA MCP51 Gigabit Ethernet" }, 241 242 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 243 "NVIDIA MCP51 Gigabit Ethernet" }, 244 245 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 246 "NVIDIA MCP55 Gigabit Ethernet" }, 247 248 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 249 "NVIDIA MCP55 Gigabit Ethernet" }, 250 251 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 252 "NVIDIA MCP61 Gigabit Ethernet" }, 253 254 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 255 "NVIDIA MCP61 Gigabit Ethernet" }, 256 257 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 258 "NVIDIA MCP61 Gigabit Ethernet" }, 259 260 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 261 "NVIDIA MCP61 Gigabit Ethernet" }, 262 263 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 264 "NVIDIA MCP65 Gigabit Ethernet" }, 265 266 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 267 "NVIDIA MCP65 Gigabit Ethernet" }, 268 269 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 270 "NVIDIA MCP65 Gigabit Ethernet" }, 271 272 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 273 "NVIDIA MCP65 Gigabit Ethernet" }, 274 275 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 276 "NVIDIA MCP67 Gigabit Ethernet" }, 277 278 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 279 "NVIDIA MCP67 Gigabit Ethernet" }, 280 281 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 282 "NVIDIA MCP67 Gigabit Ethernet" }, 283 284 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 285 "NVIDIA MCP67 Gigabit Ethernet" }, 286 287 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 288 "NVIDIA MCP73 Gigabit Ethernet" }, 289 290 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 291 "NVIDIA MCP73 Gigabit Ethernet" }, 292 293 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 294 "NVIDIA MCP73 Gigabit Ethernet" }, 295 296 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 297 "NVIDIA MCP73 Gigabit Ethernet" }, 298 299 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 300 "NVIDIA MCP77 Gigabit Ethernet" }, 301 302 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 303 "NVIDIA MCP77 Gigabit Ethernet" }, 304 305 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 306 "NVIDIA MCP77 Gigabit Ethernet" }, 307 308 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 309 "NVIDIA MCP77 Gigabit Ethernet" }, 310 311 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 312 "NVIDIA MCP79 Gigabit Ethernet" }, 313 314 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 315 "NVIDIA MCP79 Gigabit Ethernet" }, 316 317 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 318 "NVIDIA MCP79 Gigabit Ethernet" }, 319 320 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 321 "NVIDIA MCP79 Gigabit Ethernet" }, 322 323 { 0, 0, NULL } 324 }; 325 326 static device_method_t nfe_methods[] = { 327 /* Device interface */ 328 DEVMETHOD(device_probe, nfe_probe), 329 DEVMETHOD(device_attach, nfe_attach), 330 DEVMETHOD(device_detach, nfe_detach), 331 DEVMETHOD(device_suspend, nfe_suspend), 332 DEVMETHOD(device_resume, nfe_resume), 333 DEVMETHOD(device_shutdown, nfe_shutdown), 334 335 /* Bus interface */ 336 DEVMETHOD(bus_print_child, bus_generic_print_child), 337 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 338 339 /* MII interface */ 340 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 341 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 342 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 343 344 { 0, 0 } 345 }; 346 347 static driver_t nfe_driver = { 348 "nfe", 349 nfe_methods, 350 sizeof(struct nfe_softc) 351 }; 352 353 static devclass_t nfe_devclass; 354 355 DECLARE_DUMMY_MODULE(if_nfe); 356 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 357 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, NULL, NULL); 358 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, NULL, NULL); 359 360 /* 361 * NOTE: NFE_WORDALIGN support is guesswork right now. 362 */ 363 static int 364 nfe_probe(device_t dev) 365 { 366 const struct nfe_dev *n; 367 uint16_t vid, did; 368 369 vid = pci_get_vendor(dev); 370 did = pci_get_device(dev); 371 for (n = nfe_devices; n->desc != NULL; ++n) { 372 if (vid == n->vid && did == n->did) { 373 struct nfe_softc *sc = device_get_softc(dev); 374 375 switch (did) { 376 case PCI_PRODUCT_NVIDIA_NFORCE_LAN: 377 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN: 378 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1: 379 sc->sc_caps = NFE_NO_PWRCTL | 380 NFE_FIX_EADDR; 381 break; 382 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 383 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 384 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 385 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 386 sc->sc_caps = NFE_JUMBO_SUP | 387 NFE_HW_CSUM | 388 NFE_NO_PWRCTL | 389 NFE_FIX_EADDR; 390 break; 391 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 392 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 393 sc->sc_caps = NFE_FIX_EADDR; 394 /* FALL THROUGH */ 395 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 396 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 397 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 398 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 399 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 400 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 401 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 402 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 403 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 404 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 405 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 406 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 407 sc->sc_caps |= NFE_40BIT_ADDR; 408 break; 409 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 410 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 411 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 412 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 413 sc->sc_caps = NFE_JUMBO_SUP | 414 NFE_40BIT_ADDR | 415 NFE_HW_CSUM | 416 NFE_NO_PWRCTL | 417 NFE_FIX_EADDR; 418 break; 419 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 420 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 421 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 422 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 423 sc->sc_caps = NFE_JUMBO_SUP | 424 NFE_40BIT_ADDR; 425 break; 426 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 427 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 428 sc->sc_caps = NFE_JUMBO_SUP | 429 NFE_40BIT_ADDR | 430 NFE_HW_CSUM | 431 NFE_HW_VLAN | 432 NFE_FIX_EADDR; 433 break; 434 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 435 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 436 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 437 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 438 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 439 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 440 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 441 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 442 sc->sc_caps = NFE_40BIT_ADDR | 443 NFE_HW_CSUM | 444 NFE_WORDALIGN; 445 break; 446 } 447 448 device_set_desc(dev, n->desc); 449 device_set_async_attach(dev, TRUE); 450 return 0; 451 } 452 } 453 return ENXIO; 454 } 455 456 static int 457 nfe_attach(device_t dev) 458 { 459 struct nfe_softc *sc = device_get_softc(dev); 460 struct ifnet *ifp = &sc->arpcom.ac_if; 461 uint8_t eaddr[ETHER_ADDR_LEN]; 462 bus_addr_t lowaddr; 463 int error; 464 465 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 466 lwkt_serialize_init(&sc->sc_jbuf_serializer); 467 468 /* 469 * Initialize sysctl variables 470 */ 471 sc->sc_rx_ring_count = nfe_rx_ring_count; 472 sc->sc_tx_ring_count = nfe_tx_ring_count; 473 sc->sc_debug = nfe_debug; 474 if (nfe_imtime < 0) { 475 sc->sc_flags |= NFE_F_DYN_IM; 476 sc->sc_imtime = -nfe_imtime; 477 } else { 478 sc->sc_imtime = nfe_imtime; 479 } 480 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 481 482 sc->sc_mem_rid = PCIR_BAR(0); 483 484 if (sc->sc_caps & NFE_40BIT_ADDR) 485 sc->rxtxctl_desc = NFE_RXTX_DESC_V3; 486 else if (sc->sc_caps & NFE_JUMBO_SUP) 487 sc->rxtxctl_desc = NFE_RXTX_DESC_V2; 488 489 #ifndef BURN_BRIDGES 490 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 491 uint32_t mem, irq; 492 493 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 494 irq = pci_read_config(dev, PCIR_INTLINE, 4); 495 496 device_printf(dev, "chip is in D%d power mode " 497 "-- setting to D0\n", pci_get_powerstate(dev)); 498 499 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 500 501 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 502 pci_write_config(dev, PCIR_INTLINE, irq, 4); 503 } 504 #endif /* !BURN_BRIDGE */ 505 506 /* Enable bus mastering */ 507 pci_enable_busmaster(dev); 508 509 /* Allocate IO memory */ 510 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 511 &sc->sc_mem_rid, RF_ACTIVE); 512 if (sc->sc_mem_res == NULL) { 513 device_printf(dev, "could not allocate io memory\n"); 514 return ENXIO; 515 } 516 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 517 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 518 519 /* Allocate IRQ */ 520 sc->sc_irq_rid = 0; 521 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 522 &sc->sc_irq_rid, 523 RF_SHAREABLE | RF_ACTIVE); 524 if (sc->sc_irq_res == NULL) { 525 device_printf(dev, "could not allocate irq\n"); 526 error = ENXIO; 527 goto fail; 528 } 529 530 /* Disable WOL */ 531 NFE_WRITE(sc, NFE_WOL_CTL, 0); 532 533 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 534 nfe_powerup(dev); 535 536 nfe_get_macaddr(sc, eaddr); 537 538 /* 539 * Allocate top level DMA tag 540 */ 541 if (sc->sc_caps & NFE_40BIT_ADDR) 542 lowaddr = NFE_BUS_SPACE_MAXADDR; 543 else 544 lowaddr = BUS_SPACE_MAXADDR_32BIT; 545 error = bus_dma_tag_create(NULL, /* parent */ 546 1, 0, /* alignment, boundary */ 547 lowaddr, /* lowaddr */ 548 BUS_SPACE_MAXADDR, /* highaddr */ 549 NULL, NULL, /* filter, filterarg */ 550 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 551 0, /* nsegments */ 552 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 553 0, /* flags */ 554 &sc->sc_dtag); 555 if (error) { 556 device_printf(dev, "could not allocate parent dma tag\n"); 557 goto fail; 558 } 559 560 /* 561 * Allocate Tx and Rx rings. 562 */ 563 error = nfe_alloc_tx_ring(sc, &sc->txq); 564 if (error) { 565 device_printf(dev, "could not allocate Tx ring\n"); 566 goto fail; 567 } 568 569 error = nfe_alloc_rx_ring(sc, &sc->rxq); 570 if (error) { 571 device_printf(dev, "could not allocate Rx ring\n"); 572 goto fail; 573 } 574 575 /* 576 * Create sysctl tree 577 */ 578 sysctl_ctx_init(&sc->sc_sysctl_ctx); 579 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 580 SYSCTL_STATIC_CHILDREN(_hw), 581 OID_AUTO, 582 device_get_nameunit(dev), 583 CTLFLAG_RD, 0, ""); 584 if (sc->sc_sysctl_tree == NULL) { 585 device_printf(dev, "can't add sysctl node\n"); 586 error = ENXIO; 587 goto fail; 588 } 589 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 590 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 591 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW, 592 sc, 0, nfe_sysctl_imtime, "I", 593 "Interrupt moderation time (usec). " 594 "0 to disable interrupt moderation."); 595 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 596 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 597 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count, 598 0, "RX ring count"); 599 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 600 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 601 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count, 602 0, "TX ring count"); 603 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 604 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 605 "debug", CTLFLAG_RW, &sc->sc_debug, 606 0, "control debugging printfs"); 607 608 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 609 nfe_ifmedia_sts); 610 if (error) { 611 device_printf(dev, "MII without any phy\n"); 612 goto fail; 613 } 614 615 ifp->if_softc = sc; 616 ifp->if_mtu = ETHERMTU; 617 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 618 ifp->if_ioctl = nfe_ioctl; 619 ifp->if_start = nfe_start; 620 #ifdef DEVICE_POLLING 621 ifp->if_poll = nfe_poll; 622 #endif 623 ifp->if_watchdog = nfe_watchdog; 624 ifp->if_init = nfe_init; 625 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count); 626 ifq_set_ready(&ifp->if_snd); 627 628 ifp->if_capabilities = IFCAP_VLAN_MTU; 629 630 if (sc->sc_caps & NFE_HW_VLAN) 631 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 632 633 #ifdef NFE_CSUM 634 if (sc->sc_caps & NFE_HW_CSUM) { 635 ifp->if_capabilities |= IFCAP_HWCSUM; 636 ifp->if_hwassist = NFE_CSUM_FEATURES; 637 } 638 #else 639 sc->sc_caps &= ~NFE_HW_CSUM; 640 #endif 641 ifp->if_capenable = ifp->if_capabilities; 642 643 callout_init(&sc->sc_tick_ch); 644 645 ether_ifattach(ifp, eaddr, NULL); 646 647 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 648 &sc->sc_ih, ifp->if_serializer); 649 if (error) { 650 device_printf(dev, "could not setup intr\n"); 651 ether_ifdetach(ifp); 652 goto fail; 653 } 654 655 ifp->if_cpuid = rman_get_cpuid(sc->sc_irq_res); 656 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 657 658 return 0; 659 fail: 660 nfe_detach(dev); 661 return error; 662 } 663 664 static int 665 nfe_detach(device_t dev) 666 { 667 struct nfe_softc *sc = device_get_softc(dev); 668 669 if (device_is_attached(dev)) { 670 struct ifnet *ifp = &sc->arpcom.ac_if; 671 672 lwkt_serialize_enter(ifp->if_serializer); 673 nfe_stop(sc); 674 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 675 lwkt_serialize_exit(ifp->if_serializer); 676 677 ether_ifdetach(ifp); 678 } 679 680 if (sc->sc_miibus != NULL) 681 device_delete_child(dev, sc->sc_miibus); 682 bus_generic_detach(dev); 683 684 if (sc->sc_sysctl_tree != NULL) 685 sysctl_ctx_free(&sc->sc_sysctl_ctx); 686 687 if (sc->sc_irq_res != NULL) { 688 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 689 sc->sc_irq_res); 690 } 691 692 if (sc->sc_mem_res != NULL) { 693 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 694 sc->sc_mem_res); 695 } 696 697 nfe_free_tx_ring(sc, &sc->txq); 698 nfe_free_rx_ring(sc, &sc->rxq); 699 if (sc->sc_dtag != NULL) 700 bus_dma_tag_destroy(sc->sc_dtag); 701 702 return 0; 703 } 704 705 static void 706 nfe_shutdown(device_t dev) 707 { 708 struct nfe_softc *sc = device_get_softc(dev); 709 struct ifnet *ifp = &sc->arpcom.ac_if; 710 711 lwkt_serialize_enter(ifp->if_serializer); 712 nfe_stop(sc); 713 lwkt_serialize_exit(ifp->if_serializer); 714 } 715 716 static int 717 nfe_suspend(device_t dev) 718 { 719 struct nfe_softc *sc = device_get_softc(dev); 720 struct ifnet *ifp = &sc->arpcom.ac_if; 721 722 lwkt_serialize_enter(ifp->if_serializer); 723 nfe_stop(sc); 724 lwkt_serialize_exit(ifp->if_serializer); 725 726 return 0; 727 } 728 729 static int 730 nfe_resume(device_t dev) 731 { 732 struct nfe_softc *sc = device_get_softc(dev); 733 struct ifnet *ifp = &sc->arpcom.ac_if; 734 735 lwkt_serialize_enter(ifp->if_serializer); 736 if (ifp->if_flags & IFF_UP) 737 nfe_init(sc); 738 lwkt_serialize_exit(ifp->if_serializer); 739 740 return 0; 741 } 742 743 static void 744 nfe_miibus_statchg(device_t dev) 745 { 746 struct nfe_softc *sc = device_get_softc(dev); 747 struct mii_data *mii = device_get_softc(sc->sc_miibus); 748 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 749 750 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer); 751 752 phy = NFE_READ(sc, NFE_PHY_IFACE); 753 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 754 755 seed = NFE_READ(sc, NFE_RNDSEED); 756 seed &= ~NFE_SEED_MASK; 757 758 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 759 phy |= NFE_PHY_HDX; /* half-duplex */ 760 misc |= NFE_MISC1_HDX; 761 } 762 763 switch (IFM_SUBTYPE(mii->mii_media_active)) { 764 case IFM_1000_T: /* full-duplex only */ 765 link |= NFE_MEDIA_1000T; 766 seed |= NFE_SEED_1000T; 767 phy |= NFE_PHY_1000T; 768 break; 769 case IFM_100_TX: 770 link |= NFE_MEDIA_100TX; 771 seed |= NFE_SEED_100TX; 772 phy |= NFE_PHY_100TX; 773 break; 774 case IFM_10_T: 775 link |= NFE_MEDIA_10T; 776 seed |= NFE_SEED_10T; 777 break; 778 } 779 780 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 781 782 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 783 NFE_WRITE(sc, NFE_MISC1, misc); 784 NFE_WRITE(sc, NFE_LINKSPEED, link); 785 } 786 787 static int 788 nfe_miibus_readreg(device_t dev, int phy, int reg) 789 { 790 struct nfe_softc *sc = device_get_softc(dev); 791 uint32_t val; 792 int ntries; 793 794 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 795 796 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 797 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 798 DELAY(100); 799 } 800 801 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 802 803 for (ntries = 0; ntries < 1000; ntries++) { 804 DELAY(100); 805 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 806 break; 807 } 808 if (ntries == 1000) { 809 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 810 return 0; 811 } 812 813 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 814 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 815 return 0; 816 } 817 818 val = NFE_READ(sc, NFE_PHY_DATA); 819 if (val != 0xffffffff && val != 0) 820 sc->mii_phyaddr = phy; 821 822 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 823 824 return val; 825 } 826 827 static void 828 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 829 { 830 struct nfe_softc *sc = device_get_softc(dev); 831 uint32_t ctl; 832 int ntries; 833 834 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 835 836 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 837 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 838 DELAY(100); 839 } 840 841 NFE_WRITE(sc, NFE_PHY_DATA, val); 842 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 843 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 844 845 for (ntries = 0; ntries < 1000; ntries++) { 846 DELAY(100); 847 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 848 break; 849 } 850 851 #ifdef NFE_DEBUG 852 if (ntries == 1000) 853 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 854 #endif 855 } 856 857 #ifdef DEVICE_POLLING 858 859 static void 860 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 861 { 862 struct nfe_softc *sc = ifp->if_softc; 863 864 ASSERT_SERIALIZED(ifp->if_serializer); 865 866 switch(cmd) { 867 case POLL_REGISTER: 868 nfe_disable_intrs(sc); 869 break; 870 871 case POLL_DEREGISTER: 872 nfe_enable_intrs(sc); 873 break; 874 875 case POLL_AND_CHECK_STATUS: 876 /* fall through */ 877 case POLL_ONLY: 878 if (ifp->if_flags & IFF_RUNNING) { 879 nfe_rxeof(sc); 880 nfe_txeof(sc, 1); 881 } 882 break; 883 } 884 } 885 886 #endif 887 888 static void 889 nfe_intr(void *arg) 890 { 891 struct nfe_softc *sc = arg; 892 struct ifnet *ifp = &sc->arpcom.ac_if; 893 uint32_t r; 894 895 r = NFE_READ(sc, NFE_IRQ_STATUS); 896 if (r == 0) 897 return; /* not for us */ 898 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 899 900 if (sc->sc_rate_second != time_second) { 901 /* 902 * Calculate sc_rate_avg - interrupts per second. 903 */ 904 sc->sc_rate_second = time_second; 905 if (sc->sc_rate_avg < sc->sc_rate_acc) 906 sc->sc_rate_avg = sc->sc_rate_acc; 907 else 908 sc->sc_rate_avg = (sc->sc_rate_avg * 3 + 909 sc->sc_rate_acc) / 4; 910 sc->sc_rate_acc = 0; 911 } else if (sc->sc_rate_avg < sc->sc_rate_acc) { 912 /* 913 * Don't wait for a tick to roll over if we are taking 914 * a lot of interrupts. 915 */ 916 sc->sc_rate_avg = sc->sc_rate_acc; 917 } 918 919 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 920 921 if (r & NFE_IRQ_LINK) { 922 NFE_READ(sc, NFE_PHY_STATUS); 923 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 924 DPRINTF(sc, "link state changed %s\n", ""); 925 } 926 927 if (ifp->if_flags & IFF_RUNNING) { 928 int ret; 929 int rate; 930 931 /* check Rx ring */ 932 ret = nfe_rxeof(sc); 933 934 /* check Tx ring */ 935 ret |= nfe_txeof(sc, 1); 936 937 /* update the rate accumulator */ 938 if (ret) 939 ++sc->sc_rate_acc; 940 941 if (sc->sc_flags & NFE_F_DYN_IM) { 942 rate = 1000000 / sc->sc_imtime; 943 if ((sc->sc_flags & NFE_F_IRQ_TIMER) == 0 && 944 sc->sc_rate_avg > rate) { 945 /* 946 * Use the hardware timer to reduce the 947 * interrupt rate if the discrete interrupt 948 * rate has exceeded our threshold. 949 */ 950 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER); 951 sc->sc_flags |= NFE_F_IRQ_TIMER; 952 } else if ((sc->sc_flags & NFE_F_IRQ_TIMER) && 953 sc->sc_rate_avg <= rate) { 954 /* 955 * Use discrete TX/RX interrupts if the rate 956 * has fallen below our threshold. 957 */ 958 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER); 959 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 960 961 /* 962 * Recollect, mainly to avoid the possible race 963 * introduced by changing interrupt masks. 964 */ 965 nfe_rxeof(sc); 966 nfe_txeof(sc, 1); 967 } 968 } 969 } 970 } 971 972 static int 973 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 974 { 975 struct nfe_softc *sc = ifp->if_softc; 976 struct ifreq *ifr = (struct ifreq *)data; 977 struct mii_data *mii; 978 int error = 0, mask, jumbo_cap; 979 980 ASSERT_SERIALIZED(ifp->if_serializer); 981 982 switch (cmd) { 983 case SIOCSIFMTU: 984 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL) 985 jumbo_cap = 1; 986 else 987 jumbo_cap = 0; 988 989 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) || 990 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) { 991 return EINVAL; 992 } else if (ifp->if_mtu != ifr->ifr_mtu) { 993 ifp->if_mtu = ifr->ifr_mtu; 994 if (ifp->if_flags & IFF_RUNNING) 995 nfe_init(sc); 996 } 997 break; 998 case SIOCSIFFLAGS: 999 if (ifp->if_flags & IFF_UP) { 1000 /* 1001 * If only the PROMISC or ALLMULTI flag changes, then 1002 * don't do a full re-init of the chip, just update 1003 * the Rx filter. 1004 */ 1005 if ((ifp->if_flags & IFF_RUNNING) && 1006 ((ifp->if_flags ^ sc->sc_if_flags) & 1007 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1008 nfe_setmulti(sc); 1009 } else { 1010 if (!(ifp->if_flags & IFF_RUNNING)) 1011 nfe_init(sc); 1012 } 1013 } else { 1014 if (ifp->if_flags & IFF_RUNNING) 1015 nfe_stop(sc); 1016 } 1017 sc->sc_if_flags = ifp->if_flags; 1018 break; 1019 case SIOCADDMULTI: 1020 case SIOCDELMULTI: 1021 if (ifp->if_flags & IFF_RUNNING) 1022 nfe_setmulti(sc); 1023 break; 1024 case SIOCSIFMEDIA: 1025 case SIOCGIFMEDIA: 1026 mii = device_get_softc(sc->sc_miibus); 1027 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1028 break; 1029 case SIOCSIFCAP: 1030 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM; 1031 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) { 1032 ifp->if_capenable ^= mask; 1033 if (IFCAP_TXCSUM & ifp->if_capenable) 1034 ifp->if_hwassist = NFE_CSUM_FEATURES; 1035 else 1036 ifp->if_hwassist = 0; 1037 1038 if (ifp->if_flags & IFF_RUNNING) 1039 nfe_init(sc); 1040 } 1041 break; 1042 default: 1043 error = ether_ioctl(ifp, cmd, data); 1044 break; 1045 } 1046 return error; 1047 } 1048 1049 static int 1050 nfe_rxeof(struct nfe_softc *sc) 1051 { 1052 struct ifnet *ifp = &sc->arpcom.ac_if; 1053 struct nfe_rx_ring *ring = &sc->rxq; 1054 int reap; 1055 struct mbuf_chain chain[MAXCPU]; 1056 1057 reap = 0; 1058 ether_input_chain_init(chain); 1059 1060 for (;;) { 1061 struct nfe_rx_data *data = &ring->data[ring->cur]; 1062 struct mbuf *m; 1063 uint16_t flags; 1064 int len, error; 1065 1066 if (sc->sc_caps & NFE_40BIT_ADDR) { 1067 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 1068 1069 flags = le16toh(desc64->flags); 1070 len = le16toh(desc64->length) & 0x3fff; 1071 } else { 1072 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 1073 1074 flags = le16toh(desc32->flags); 1075 len = le16toh(desc32->length) & 0x3fff; 1076 } 1077 1078 if (flags & NFE_RX_READY) 1079 break; 1080 1081 reap = 1; 1082 1083 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1084 if (!(flags & NFE_RX_VALID_V1)) 1085 goto skip; 1086 1087 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 1088 flags &= ~NFE_RX_ERROR; 1089 len--; /* fix buffer length */ 1090 } 1091 } else { 1092 if (!(flags & NFE_RX_VALID_V2)) 1093 goto skip; 1094 1095 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 1096 flags &= ~NFE_RX_ERROR; 1097 len--; /* fix buffer length */ 1098 } 1099 } 1100 1101 if (flags & NFE_RX_ERROR) { 1102 ifp->if_ierrors++; 1103 goto skip; 1104 } 1105 1106 m = data->m; 1107 1108 if (sc->sc_flags & NFE_F_USE_JUMBO) 1109 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 1110 else 1111 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 1112 if (error) { 1113 ifp->if_ierrors++; 1114 goto skip; 1115 } 1116 1117 /* finalize mbuf */ 1118 m->m_pkthdr.len = m->m_len = len; 1119 m->m_pkthdr.rcvif = ifp; 1120 1121 if ((ifp->if_capenable & IFCAP_RXCSUM) && 1122 (flags & NFE_RX_CSUMOK)) { 1123 if (flags & NFE_RX_IP_CSUMOK_V2) { 1124 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1125 CSUM_IP_VALID; 1126 } 1127 1128 if (flags & 1129 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) { 1130 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1131 CSUM_PSEUDO_HDR | 1132 CSUM_FRAG_NOT_CHECKED; 1133 m->m_pkthdr.csum_data = 0xffff; 1134 } 1135 } 1136 1137 ifp->if_ipackets++; 1138 ether_input_chain(ifp, m, NULL, chain); 1139 skip: 1140 nfe_set_ready_rxdesc(sc, ring, ring->cur); 1141 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count; 1142 } 1143 1144 if (reap) 1145 ether_input_dispatch(chain); 1146 return reap; 1147 } 1148 1149 static int 1150 nfe_txeof(struct nfe_softc *sc, int start) 1151 { 1152 struct ifnet *ifp = &sc->arpcom.ac_if; 1153 struct nfe_tx_ring *ring = &sc->txq; 1154 struct nfe_tx_data *data = NULL; 1155 1156 while (ring->next != ring->cur) { 1157 uint16_t flags; 1158 1159 if (sc->sc_caps & NFE_40BIT_ADDR) 1160 flags = le16toh(ring->desc64[ring->next].flags); 1161 else 1162 flags = le16toh(ring->desc32[ring->next].flags); 1163 1164 if (flags & NFE_TX_VALID) 1165 break; 1166 1167 data = &ring->data[ring->next]; 1168 1169 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1170 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 1171 goto skip; 1172 1173 if ((flags & NFE_TX_ERROR_V1) != 0) { 1174 if_printf(ifp, "tx v1 error 0x%4b\n", flags, 1175 NFE_V1_TXERR); 1176 ifp->if_oerrors++; 1177 } else { 1178 ifp->if_opackets++; 1179 } 1180 } else { 1181 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 1182 goto skip; 1183 1184 if ((flags & NFE_TX_ERROR_V2) != 0) { 1185 if_printf(ifp, "tx v2 error 0x%4b\n", flags, 1186 NFE_V2_TXERR); 1187 ifp->if_oerrors++; 1188 } else { 1189 ifp->if_opackets++; 1190 } 1191 } 1192 1193 if (data->m == NULL) { /* should not get there */ 1194 if_printf(ifp, 1195 "last fragment bit w/o associated mbuf!\n"); 1196 goto skip; 1197 } 1198 1199 /* last fragment of the mbuf chain transmitted */ 1200 bus_dmamap_unload(ring->data_tag, data->map); 1201 m_freem(data->m); 1202 data->m = NULL; 1203 skip: 1204 ring->queued--; 1205 KKASSERT(ring->queued >= 0); 1206 ring->next = (ring->next + 1) % sc->sc_tx_ring_count; 1207 } 1208 1209 if (sc->sc_tx_ring_count - ring->queued >= 1210 sc->sc_tx_spare + NFE_NSEG_RSVD) 1211 ifp->if_flags &= ~IFF_OACTIVE; 1212 1213 if (ring->queued == 0) 1214 ifp->if_timer = 0; 1215 1216 if (start && !ifq_is_empty(&ifp->if_snd)) 1217 if_devstart(ifp); 1218 1219 if (data != NULL) 1220 return 1; 1221 else 1222 return 0; 1223 } 1224 1225 static int 1226 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 1227 { 1228 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 1229 struct nfe_tx_data *data, *data_map; 1230 bus_dmamap_t map; 1231 struct nfe_desc64 *desc64 = NULL; 1232 struct nfe_desc32 *desc32 = NULL; 1233 uint16_t flags = 0; 1234 uint32_t vtag = 0; 1235 int error, i, j, maxsegs, nsegs; 1236 1237 data = &ring->data[ring->cur]; 1238 map = data->map; 1239 data_map = data; /* Remember who owns the DMA map */ 1240 1241 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD; 1242 if (maxsegs > NFE_MAX_SCATTER) 1243 maxsegs = NFE_MAX_SCATTER; 1244 KASSERT(maxsegs >= sc->sc_tx_spare, 1245 ("no enough segments %d,%d\n", maxsegs, sc->sc_tx_spare)); 1246 1247 error = bus_dmamap_load_mbuf_defrag(ring->data_tag, map, &m0, 1248 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1249 if (error) 1250 goto back; 1251 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1252 1253 error = 0; 1254 1255 /* setup h/w VLAN tagging */ 1256 if (m0->m_flags & M_VLANTAG) 1257 vtag = m0->m_pkthdr.ether_vlantag; 1258 1259 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) { 1260 if (m0->m_pkthdr.csum_flags & CSUM_IP) 1261 flags |= NFE_TX_IP_CSUM; 1262 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1263 flags |= NFE_TX_TCP_CSUM; 1264 } 1265 1266 /* 1267 * XXX urm. somebody is unaware of how hardware works. You 1268 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1269 * the ring until the entire chain is actually *VALID*. Otherwise 1270 * the hardware may encounter a partially initialized chain that 1271 * is marked as being ready to go when it in fact is not ready to 1272 * go. 1273 */ 1274 1275 for (i = 0; i < nsegs; i++) { 1276 j = (ring->cur + i) % sc->sc_tx_ring_count; 1277 data = &ring->data[j]; 1278 1279 if (sc->sc_caps & NFE_40BIT_ADDR) { 1280 desc64 = &ring->desc64[j]; 1281 desc64->physaddr[0] = 1282 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 1283 desc64->physaddr[1] = 1284 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 1285 desc64->length = htole16(segs[i].ds_len - 1); 1286 desc64->vtag = htole32(vtag); 1287 desc64->flags = htole16(flags); 1288 } else { 1289 desc32 = &ring->desc32[j]; 1290 desc32->physaddr = htole32(segs[i].ds_addr); 1291 desc32->length = htole16(segs[i].ds_len - 1); 1292 desc32->flags = htole16(flags); 1293 } 1294 1295 /* csum flags and vtag belong to the first fragment only */ 1296 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1297 vtag = 0; 1298 1299 ring->queued++; 1300 KKASSERT(ring->queued <= sc->sc_tx_ring_count); 1301 } 1302 1303 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1304 if (sc->sc_caps & NFE_40BIT_ADDR) { 1305 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1306 } else { 1307 if (sc->sc_caps & NFE_JUMBO_SUP) 1308 flags = NFE_TX_LASTFRAG_V2; 1309 else 1310 flags = NFE_TX_LASTFRAG_V1; 1311 desc32->flags |= htole16(flags); 1312 } 1313 1314 /* 1315 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1316 * whole mess until the first descriptor in the map is flagged. 1317 */ 1318 for (i = nsegs - 1; i >= 0; --i) { 1319 j = (ring->cur + i) % sc->sc_tx_ring_count; 1320 if (sc->sc_caps & NFE_40BIT_ADDR) { 1321 desc64 = &ring->desc64[j]; 1322 desc64->flags |= htole16(NFE_TX_VALID); 1323 } else { 1324 desc32 = &ring->desc32[j]; 1325 desc32->flags |= htole16(NFE_TX_VALID); 1326 } 1327 } 1328 ring->cur = (ring->cur + nsegs) % sc->sc_tx_ring_count; 1329 1330 /* Exchange DMA map */ 1331 data_map->map = data->map; 1332 data->map = map; 1333 data->m = m0; 1334 back: 1335 if (error) 1336 m_freem(m0); 1337 return error; 1338 } 1339 1340 static void 1341 nfe_start(struct ifnet *ifp) 1342 { 1343 struct nfe_softc *sc = ifp->if_softc; 1344 struct nfe_tx_ring *ring = &sc->txq; 1345 int count = 0, oactive = 0; 1346 struct mbuf *m0; 1347 1348 ASSERT_SERIALIZED(ifp->if_serializer); 1349 1350 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 1351 return; 1352 1353 for (;;) { 1354 int error; 1355 1356 if (sc->sc_tx_ring_count - ring->queued < 1357 sc->sc_tx_spare + NFE_NSEG_RSVD) { 1358 if (oactive) { 1359 ifp->if_flags |= IFF_OACTIVE; 1360 break; 1361 } 1362 1363 nfe_txeof(sc, 0); 1364 oactive = 1; 1365 continue; 1366 } 1367 1368 m0 = ifq_dequeue(&ifp->if_snd, NULL); 1369 if (m0 == NULL) 1370 break; 1371 1372 ETHER_BPF_MTAP(ifp, m0); 1373 1374 error = nfe_encap(sc, ring, m0); 1375 if (error) { 1376 ifp->if_oerrors++; 1377 if (error == EFBIG) { 1378 if (oactive) { 1379 ifp->if_flags |= IFF_OACTIVE; 1380 break; 1381 } 1382 nfe_txeof(sc, 0); 1383 oactive = 1; 1384 } 1385 continue; 1386 } else { 1387 oactive = 0; 1388 } 1389 ++count; 1390 1391 /* 1392 * NOTE: 1393 * `m0' may be freed in nfe_encap(), so 1394 * it should not be touched any more. 1395 */ 1396 } 1397 1398 if (count == 0) /* nothing sent */ 1399 return; 1400 1401 /* Kick Tx */ 1402 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1403 1404 /* 1405 * Set a timeout in case the chip goes out to lunch. 1406 */ 1407 ifp->if_timer = 5; 1408 } 1409 1410 static void 1411 nfe_watchdog(struct ifnet *ifp) 1412 { 1413 struct nfe_softc *sc = ifp->if_softc; 1414 1415 ASSERT_SERIALIZED(ifp->if_serializer); 1416 1417 if (ifp->if_flags & IFF_RUNNING) { 1418 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1419 nfe_txeof(sc, 1); 1420 return; 1421 } 1422 1423 if_printf(ifp, "watchdog timeout\n"); 1424 1425 nfe_init(ifp->if_softc); 1426 1427 ifp->if_oerrors++; 1428 } 1429 1430 static void 1431 nfe_init(void *xsc) 1432 { 1433 struct nfe_softc *sc = xsc; 1434 struct ifnet *ifp = &sc->arpcom.ac_if; 1435 uint32_t tmp; 1436 int error; 1437 1438 ASSERT_SERIALIZED(ifp->if_serializer); 1439 1440 nfe_stop(sc); 1441 1442 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 1443 nfe_mac_reset(sc); 1444 1445 /* 1446 * NOTE: 1447 * Switching between jumbo frames and normal frames should 1448 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring(). 1449 */ 1450 if (ifp->if_mtu > ETHERMTU) { 1451 sc->sc_flags |= NFE_F_USE_JUMBO; 1452 sc->rxq.bufsz = NFE_JBYTES; 1453 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO; 1454 if (bootverbose) 1455 if_printf(ifp, "use jumbo frames\n"); 1456 } else { 1457 sc->sc_flags &= ~NFE_F_USE_JUMBO; 1458 sc->rxq.bufsz = MCLBYTES; 1459 sc->sc_tx_spare = NFE_NSEG_SPARE; 1460 if (bootverbose) 1461 if_printf(ifp, "use non-jumbo frames\n"); 1462 } 1463 1464 error = nfe_init_tx_ring(sc, &sc->txq); 1465 if (error) { 1466 nfe_stop(sc); 1467 return; 1468 } 1469 1470 error = nfe_init_rx_ring(sc, &sc->rxq); 1471 if (error) { 1472 nfe_stop(sc); 1473 return; 1474 } 1475 1476 NFE_WRITE(sc, NFE_TX_POLL, 0); 1477 NFE_WRITE(sc, NFE_STATUS, 0); 1478 1479 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc; 1480 1481 if (ifp->if_capenable & IFCAP_RXCSUM) 1482 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1483 1484 /* 1485 * Although the adapter is capable of stripping VLAN tags from received 1486 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1487 * purpose. This will be done in software by our network stack. 1488 */ 1489 if (sc->sc_caps & NFE_HW_VLAN) 1490 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1491 1492 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1493 DELAY(10); 1494 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1495 1496 if (sc->sc_caps & NFE_HW_VLAN) 1497 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1498 1499 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1500 1501 /* set MAC address */ 1502 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1503 1504 /* tell MAC where rings are in memory */ 1505 if (sc->sc_caps & NFE_40BIT_ADDR) { 1506 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 1507 NFE_ADDR_HI(sc->rxq.physaddr)); 1508 } 1509 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr)); 1510 1511 if (sc->sc_caps & NFE_40BIT_ADDR) { 1512 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, 1513 NFE_ADDR_HI(sc->txq.physaddr)); 1514 } 1515 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 1516 1517 NFE_WRITE(sc, NFE_RING_SIZE, 1518 (sc->sc_rx_ring_count - 1) << 16 | 1519 (sc->sc_tx_ring_count - 1)); 1520 1521 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1522 1523 /* force MAC to wakeup */ 1524 tmp = NFE_READ(sc, NFE_PWR_STATE); 1525 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1526 DELAY(10); 1527 tmp = NFE_READ(sc, NFE_PWR_STATE); 1528 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1529 1530 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1531 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1532 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1533 1534 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1535 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1536 1537 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1538 1539 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1540 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1541 DELAY(10); 1542 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1543 1544 /* set Rx filter */ 1545 nfe_setmulti(sc); 1546 1547 nfe_ifmedia_upd(ifp); 1548 1549 /* enable Rx */ 1550 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1551 1552 /* enable Tx */ 1553 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1554 1555 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1556 1557 #ifdef DEVICE_POLLING 1558 if ((ifp->if_flags & IFF_POLLING)) 1559 nfe_disable_intrs(sc); 1560 else 1561 #endif 1562 nfe_enable_intrs(sc); 1563 1564 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1565 1566 ifp->if_flags |= IFF_RUNNING; 1567 ifp->if_flags &= ~IFF_OACTIVE; 1568 1569 /* 1570 * If we had stuff in the tx ring before its all cleaned out now 1571 * so we are not going to get an interrupt, jump-start any pending 1572 * output. 1573 */ 1574 if (!ifq_is_empty(&ifp->if_snd)) 1575 if_devstart(ifp); 1576 } 1577 1578 static void 1579 nfe_stop(struct nfe_softc *sc) 1580 { 1581 struct ifnet *ifp = &sc->arpcom.ac_if; 1582 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 1583 int i; 1584 1585 ASSERT_SERIALIZED(ifp->if_serializer); 1586 1587 callout_stop(&sc->sc_tick_ch); 1588 1589 ifp->if_timer = 0; 1590 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1591 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 1592 1593 #define WAITMAX 50000 1594 1595 /* 1596 * Abort Tx 1597 */ 1598 NFE_WRITE(sc, NFE_TX_CTL, 0); 1599 for (i = 0; i < WAITMAX; ++i) { 1600 DELAY(100); 1601 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0) 1602 break; 1603 } 1604 if (i == WAITMAX) 1605 if_printf(ifp, "can't stop TX\n"); 1606 DELAY(100); 1607 1608 /* 1609 * Disable Rx 1610 */ 1611 NFE_WRITE(sc, NFE_RX_CTL, 0); 1612 for (i = 0; i < WAITMAX; ++i) { 1613 DELAY(100); 1614 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0) 1615 break; 1616 } 1617 if (i == WAITMAX) 1618 if_printf(ifp, "can't stop RX\n"); 1619 DELAY(100); 1620 1621 #undef WAITMAX 1622 1623 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 1624 DELAY(10); 1625 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1626 1627 /* Disable interrupts */ 1628 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1629 1630 /* Reset Tx and Rx rings */ 1631 nfe_reset_tx_ring(sc, &sc->txq); 1632 nfe_reset_rx_ring(sc, &sc->rxq); 1633 } 1634 1635 static int 1636 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1637 { 1638 int i, j, error, descsize; 1639 bus_dmamem_t dmem; 1640 void **desc; 1641 1642 if (sc->sc_caps & NFE_40BIT_ADDR) { 1643 desc = (void *)&ring->desc64; 1644 descsize = sizeof(struct nfe_desc64); 1645 } else { 1646 desc = (void *)&ring->desc32; 1647 descsize = sizeof(struct nfe_desc32); 1648 } 1649 1650 ring->bufsz = MCLBYTES; 1651 ring->cur = ring->next = 0; 1652 1653 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1654 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1655 sc->sc_rx_ring_count * descsize, 1656 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1657 if (error) { 1658 if_printf(&sc->arpcom.ac_if, 1659 "could not create RX desc ring\n"); 1660 return error; 1661 } 1662 ring->tag = dmem.dmem_tag; 1663 ring->map = dmem.dmem_map; 1664 *desc = dmem.dmem_addr; 1665 ring->physaddr = dmem.dmem_busaddr; 1666 1667 if (sc->sc_caps & NFE_JUMBO_SUP) { 1668 ring->jbuf = 1669 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc), 1670 M_DEVBUF, M_WAITOK | M_ZERO); 1671 1672 error = nfe_jpool_alloc(sc, ring); 1673 if (error) { 1674 if_printf(&sc->arpcom.ac_if, 1675 "could not allocate jumbo frames\n"); 1676 kfree(ring->jbuf, M_DEVBUF); 1677 ring->jbuf = NULL; 1678 /* Allow jumbo frame allocation to fail */ 1679 } 1680 } 1681 1682 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count, 1683 M_DEVBUF, M_WAITOK | M_ZERO); 1684 1685 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1686 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1687 NULL, NULL, 1688 MCLBYTES, 1, MCLBYTES, 1689 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 1690 &ring->data_tag); 1691 if (error) { 1692 if_printf(&sc->arpcom.ac_if, 1693 "could not create RX mbuf DMA tag\n"); 1694 return error; 1695 } 1696 1697 /* Create a spare RX mbuf DMA map */ 1698 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1699 &ring->data_tmpmap); 1700 if (error) { 1701 if_printf(&sc->arpcom.ac_if, 1702 "could not create spare RX mbuf DMA map\n"); 1703 bus_dma_tag_destroy(ring->data_tag); 1704 ring->data_tag = NULL; 1705 return error; 1706 } 1707 1708 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1709 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1710 &ring->data[i].map); 1711 if (error) { 1712 if_printf(&sc->arpcom.ac_if, 1713 "could not create %dth RX mbuf DMA mapn", i); 1714 goto fail; 1715 } 1716 } 1717 return 0; 1718 fail: 1719 for (j = 0; j < i; ++j) 1720 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1721 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1722 bus_dma_tag_destroy(ring->data_tag); 1723 ring->data_tag = NULL; 1724 return error; 1725 } 1726 1727 static void 1728 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1729 { 1730 int i; 1731 1732 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1733 struct nfe_rx_data *data = &ring->data[i]; 1734 1735 if (data->m != NULL) { 1736 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0) 1737 bus_dmamap_unload(ring->data_tag, data->map); 1738 m_freem(data->m); 1739 data->m = NULL; 1740 } 1741 } 1742 1743 ring->cur = ring->next = 0; 1744 } 1745 1746 static int 1747 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1748 { 1749 int i; 1750 1751 for (i = 0; i < sc->sc_rx_ring_count; ++i) { 1752 int error; 1753 1754 /* XXX should use a function pointer */ 1755 if (sc->sc_flags & NFE_F_USE_JUMBO) 1756 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1757 else 1758 error = nfe_newbuf_std(sc, ring, i, 1); 1759 if (error) { 1760 if_printf(&sc->arpcom.ac_if, 1761 "could not allocate RX buffer\n"); 1762 return error; 1763 } 1764 nfe_set_ready_rxdesc(sc, ring, i); 1765 } 1766 return 0; 1767 } 1768 1769 static void 1770 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1771 { 1772 if (ring->data_tag != NULL) { 1773 struct nfe_rx_data *data; 1774 int i; 1775 1776 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1777 data = &ring->data[i]; 1778 1779 if (data->m != NULL) { 1780 bus_dmamap_unload(ring->data_tag, data->map); 1781 m_freem(data->m); 1782 } 1783 bus_dmamap_destroy(ring->data_tag, data->map); 1784 } 1785 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1786 bus_dma_tag_destroy(ring->data_tag); 1787 } 1788 1789 nfe_jpool_free(sc, ring); 1790 1791 if (ring->jbuf != NULL) 1792 kfree(ring->jbuf, M_DEVBUF); 1793 if (ring->data != NULL) 1794 kfree(ring->data, M_DEVBUF); 1795 1796 if (ring->tag != NULL) { 1797 void *desc; 1798 1799 if (sc->sc_caps & NFE_40BIT_ADDR) 1800 desc = ring->desc64; 1801 else 1802 desc = ring->desc32; 1803 1804 bus_dmamap_unload(ring->tag, ring->map); 1805 bus_dmamem_free(ring->tag, desc, ring->map); 1806 bus_dma_tag_destroy(ring->tag); 1807 } 1808 } 1809 1810 static struct nfe_jbuf * 1811 nfe_jalloc(struct nfe_softc *sc) 1812 { 1813 struct ifnet *ifp = &sc->arpcom.ac_if; 1814 struct nfe_jbuf *jbuf; 1815 1816 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1817 1818 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1819 if (jbuf != NULL) { 1820 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1821 jbuf->inuse = 1; 1822 } else { 1823 if_printf(ifp, "no free jumbo buffer\n"); 1824 } 1825 1826 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1827 1828 return jbuf; 1829 } 1830 1831 static void 1832 nfe_jfree(void *arg) 1833 { 1834 struct nfe_jbuf *jbuf = arg; 1835 struct nfe_softc *sc = jbuf->sc; 1836 struct nfe_rx_ring *ring = jbuf->ring; 1837 1838 if (&ring->jbuf[jbuf->slot] != jbuf) 1839 panic("%s: free wrong jumbo buffer\n", __func__); 1840 else if (jbuf->inuse == 0) 1841 panic("%s: jumbo buffer already freed\n", __func__); 1842 1843 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1844 atomic_subtract_int(&jbuf->inuse, 1); 1845 if (jbuf->inuse == 0) 1846 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1847 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1848 } 1849 1850 static void 1851 nfe_jref(void *arg) 1852 { 1853 struct nfe_jbuf *jbuf = arg; 1854 struct nfe_rx_ring *ring = jbuf->ring; 1855 1856 if (&ring->jbuf[jbuf->slot] != jbuf) 1857 panic("%s: ref wrong jumbo buffer\n", __func__); 1858 else if (jbuf->inuse == 0) 1859 panic("%s: jumbo buffer already freed\n", __func__); 1860 1861 atomic_add_int(&jbuf->inuse, 1); 1862 } 1863 1864 static int 1865 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1866 { 1867 struct nfe_jbuf *jbuf; 1868 bus_dmamem_t dmem; 1869 bus_addr_t physaddr; 1870 caddr_t buf; 1871 int i, error; 1872 1873 /* 1874 * Allocate a big chunk of DMA'able memory. 1875 */ 1876 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1877 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1878 NFE_JPOOL_SIZE(sc), 1879 BUS_DMA_WAITOK, &dmem); 1880 if (error) { 1881 if_printf(&sc->arpcom.ac_if, 1882 "could not create jumbo buffer\n"); 1883 return error; 1884 } 1885 ring->jtag = dmem.dmem_tag; 1886 ring->jmap = dmem.dmem_map; 1887 ring->jpool = dmem.dmem_addr; 1888 physaddr = dmem.dmem_busaddr; 1889 1890 /* ..and split it into 9KB chunks */ 1891 SLIST_INIT(&ring->jfreelist); 1892 1893 buf = ring->jpool; 1894 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) { 1895 jbuf = &ring->jbuf[i]; 1896 1897 jbuf->sc = sc; 1898 jbuf->ring = ring; 1899 jbuf->inuse = 0; 1900 jbuf->slot = i; 1901 jbuf->buf = buf; 1902 jbuf->physaddr = physaddr; 1903 1904 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1905 1906 buf += NFE_JBYTES; 1907 physaddr += NFE_JBYTES; 1908 } 1909 1910 return 0; 1911 } 1912 1913 static void 1914 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1915 { 1916 if (ring->jtag != NULL) { 1917 bus_dmamap_unload(ring->jtag, ring->jmap); 1918 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1919 bus_dma_tag_destroy(ring->jtag); 1920 } 1921 } 1922 1923 static int 1924 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1925 { 1926 int i, j, error, descsize; 1927 bus_dmamem_t dmem; 1928 void **desc; 1929 1930 if (sc->sc_caps & NFE_40BIT_ADDR) { 1931 desc = (void *)&ring->desc64; 1932 descsize = sizeof(struct nfe_desc64); 1933 } else { 1934 desc = (void *)&ring->desc32; 1935 descsize = sizeof(struct nfe_desc32); 1936 } 1937 1938 ring->queued = 0; 1939 ring->cur = ring->next = 0; 1940 1941 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1942 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1943 sc->sc_tx_ring_count * descsize, 1944 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1945 if (error) { 1946 if_printf(&sc->arpcom.ac_if, 1947 "could not create TX desc ring\n"); 1948 return error; 1949 } 1950 ring->tag = dmem.dmem_tag; 1951 ring->map = dmem.dmem_map; 1952 *desc = dmem.dmem_addr; 1953 ring->physaddr = dmem.dmem_busaddr; 1954 1955 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count, 1956 M_DEVBUF, M_WAITOK | M_ZERO); 1957 1958 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1959 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1960 NULL, NULL, 1961 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES, 1962 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1963 &ring->data_tag); 1964 if (error) { 1965 if_printf(&sc->arpcom.ac_if, 1966 "could not create TX buf DMA tag\n"); 1967 return error; 1968 } 1969 1970 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1971 error = bus_dmamap_create(ring->data_tag, 1972 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1973 &ring->data[i].map); 1974 if (error) { 1975 if_printf(&sc->arpcom.ac_if, 1976 "could not create %dth TX buf DMA map\n", i); 1977 goto fail; 1978 } 1979 } 1980 1981 return 0; 1982 fail: 1983 for (j = 0; j < i; ++j) 1984 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1985 bus_dma_tag_destroy(ring->data_tag); 1986 ring->data_tag = NULL; 1987 return error; 1988 } 1989 1990 static void 1991 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1992 { 1993 int i; 1994 1995 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1996 struct nfe_tx_data *data = &ring->data[i]; 1997 1998 if (sc->sc_caps & NFE_40BIT_ADDR) 1999 ring->desc64[i].flags = 0; 2000 else 2001 ring->desc32[i].flags = 0; 2002 2003 if (data->m != NULL) { 2004 bus_dmamap_unload(ring->data_tag, data->map); 2005 m_freem(data->m); 2006 data->m = NULL; 2007 } 2008 } 2009 2010 ring->queued = 0; 2011 ring->cur = ring->next = 0; 2012 } 2013 2014 static int 2015 nfe_init_tx_ring(struct nfe_softc *sc __unused, 2016 struct nfe_tx_ring *ring __unused) 2017 { 2018 return 0; 2019 } 2020 2021 static void 2022 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 2023 { 2024 if (ring->data_tag != NULL) { 2025 struct nfe_tx_data *data; 2026 int i; 2027 2028 for (i = 0; i < sc->sc_tx_ring_count; ++i) { 2029 data = &ring->data[i]; 2030 2031 if (data->m != NULL) { 2032 bus_dmamap_unload(ring->data_tag, data->map); 2033 m_freem(data->m); 2034 } 2035 bus_dmamap_destroy(ring->data_tag, data->map); 2036 } 2037 2038 bus_dma_tag_destroy(ring->data_tag); 2039 } 2040 2041 if (ring->data != NULL) 2042 kfree(ring->data, M_DEVBUF); 2043 2044 if (ring->tag != NULL) { 2045 void *desc; 2046 2047 if (sc->sc_caps & NFE_40BIT_ADDR) 2048 desc = ring->desc64; 2049 else 2050 desc = ring->desc32; 2051 2052 bus_dmamap_unload(ring->tag, ring->map); 2053 bus_dmamem_free(ring->tag, desc, ring->map); 2054 bus_dma_tag_destroy(ring->tag); 2055 } 2056 } 2057 2058 static int 2059 nfe_ifmedia_upd(struct ifnet *ifp) 2060 { 2061 struct nfe_softc *sc = ifp->if_softc; 2062 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2063 2064 ASSERT_SERIALIZED(ifp->if_serializer); 2065 2066 if (mii->mii_instance != 0) { 2067 struct mii_softc *miisc; 2068 2069 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2070 mii_phy_reset(miisc); 2071 } 2072 mii_mediachg(mii); 2073 2074 return 0; 2075 } 2076 2077 static void 2078 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2079 { 2080 struct nfe_softc *sc = ifp->if_softc; 2081 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2082 2083 ASSERT_SERIALIZED(ifp->if_serializer); 2084 2085 mii_pollstat(mii); 2086 ifmr->ifm_status = mii->mii_media_status; 2087 ifmr->ifm_active = mii->mii_media_active; 2088 } 2089 2090 static void 2091 nfe_setmulti(struct nfe_softc *sc) 2092 { 2093 struct ifnet *ifp = &sc->arpcom.ac_if; 2094 struct ifmultiaddr *ifma; 2095 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2096 uint32_t filter = NFE_RXFILTER_MAGIC; 2097 int i; 2098 2099 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2100 bzero(addr, ETHER_ADDR_LEN); 2101 bzero(mask, ETHER_ADDR_LEN); 2102 goto done; 2103 } 2104 2105 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2106 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2107 2108 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2109 caddr_t maddr; 2110 2111 if (ifma->ifma_addr->sa_family != AF_LINK) 2112 continue; 2113 2114 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 2115 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2116 addr[i] &= maddr[i]; 2117 mask[i] &= ~maddr[i]; 2118 } 2119 } 2120 2121 for (i = 0; i < ETHER_ADDR_LEN; i++) 2122 mask[i] |= addr[i]; 2123 2124 done: 2125 addr[0] |= 0x01; /* make sure multicast bit is set */ 2126 2127 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2128 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2129 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2130 addr[5] << 8 | addr[4]); 2131 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2132 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2133 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2134 mask[5] << 8 | mask[4]); 2135 2136 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 2137 NFE_WRITE(sc, NFE_RXFILTER, filter); 2138 } 2139 2140 static void 2141 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2142 { 2143 uint32_t lo, hi; 2144 2145 lo = NFE_READ(sc, NFE_MACADDR_LO); 2146 hi = NFE_READ(sc, NFE_MACADDR_HI); 2147 if (sc->sc_caps & NFE_FIX_EADDR) { 2148 addr[0] = (lo >> 8) & 0xff; 2149 addr[1] = (lo & 0xff); 2150 2151 addr[2] = (hi >> 24) & 0xff; 2152 addr[3] = (hi >> 16) & 0xff; 2153 addr[4] = (hi >> 8) & 0xff; 2154 addr[5] = (hi & 0xff); 2155 } else { 2156 addr[0] = (hi & 0xff); 2157 addr[1] = (hi >> 8) & 0xff; 2158 addr[2] = (hi >> 16) & 0xff; 2159 addr[3] = (hi >> 24) & 0xff; 2160 2161 addr[4] = (lo & 0xff); 2162 addr[5] = (lo >> 8) & 0xff; 2163 } 2164 } 2165 2166 static void 2167 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 2168 { 2169 NFE_WRITE(sc, NFE_MACADDR_LO, 2170 addr[5] << 8 | addr[4]); 2171 NFE_WRITE(sc, NFE_MACADDR_HI, 2172 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2173 } 2174 2175 static void 2176 nfe_tick(void *arg) 2177 { 2178 struct nfe_softc *sc = arg; 2179 struct ifnet *ifp = &sc->arpcom.ac_if; 2180 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2181 2182 lwkt_serialize_enter(ifp->if_serializer); 2183 2184 mii_tick(mii); 2185 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 2186 2187 lwkt_serialize_exit(ifp->if_serializer); 2188 } 2189 2190 static int 2191 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2192 int wait) 2193 { 2194 struct nfe_rx_data *data = &ring->data[idx]; 2195 bus_dma_segment_t seg; 2196 bus_dmamap_t map; 2197 struct mbuf *m; 2198 int nsegs, error; 2199 2200 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2201 if (m == NULL) 2202 return ENOBUFS; 2203 m->m_len = m->m_pkthdr.len = MCLBYTES; 2204 2205 /* 2206 * Aligning the payload improves access times. 2207 */ 2208 if (sc->sc_caps & NFE_WORDALIGN) 2209 m_adj(m, ETHER_ALIGN); 2210 2211 error = bus_dmamap_load_mbuf_segment(ring->data_tag, ring->data_tmpmap, 2212 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 2213 if (error) { 2214 m_freem(m); 2215 if (wait) { 2216 if_printf(&sc->arpcom.ac_if, 2217 "could map RX mbuf %d\n", error); 2218 } 2219 return error; 2220 } 2221 2222 if (data->m != NULL) { 2223 /* Sync and unload originally mapped mbuf */ 2224 bus_dmamap_sync(ring->data_tag, data->map, 2225 BUS_DMASYNC_POSTREAD); 2226 bus_dmamap_unload(ring->data_tag, data->map); 2227 } 2228 2229 /* Swap this DMA map with tmp DMA map */ 2230 map = data->map; 2231 data->map = ring->data_tmpmap; 2232 ring->data_tmpmap = map; 2233 2234 /* Caller is assumed to have collected the old mbuf */ 2235 data->m = m; 2236 2237 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2238 return 0; 2239 } 2240 2241 static int 2242 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2243 int wait) 2244 { 2245 struct nfe_rx_data *data = &ring->data[idx]; 2246 struct nfe_jbuf *jbuf; 2247 struct mbuf *m; 2248 2249 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2250 if (m == NULL) 2251 return ENOBUFS; 2252 2253 jbuf = nfe_jalloc(sc); 2254 if (jbuf == NULL) { 2255 m_freem(m); 2256 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2257 "-- packet dropped!\n"); 2258 return ENOBUFS; 2259 } 2260 2261 m->m_ext.ext_arg = jbuf; 2262 m->m_ext.ext_buf = jbuf->buf; 2263 m->m_ext.ext_free = nfe_jfree; 2264 m->m_ext.ext_ref = nfe_jref; 2265 m->m_ext.ext_size = NFE_JBYTES; 2266 2267 m->m_data = m->m_ext.ext_buf; 2268 m->m_flags |= M_EXT; 2269 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2270 2271 /* 2272 * Aligning the payload improves access times. 2273 */ 2274 if (sc->sc_caps & NFE_WORDALIGN) 2275 m_adj(m, ETHER_ALIGN); 2276 2277 /* Caller is assumed to have collected the old mbuf */ 2278 data->m = m; 2279 2280 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2281 return 0; 2282 } 2283 2284 static void 2285 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2286 bus_addr_t physaddr) 2287 { 2288 if (sc->sc_caps & NFE_40BIT_ADDR) { 2289 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2290 2291 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr)); 2292 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr)); 2293 } else { 2294 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2295 2296 desc32->physaddr = htole32(physaddr); 2297 } 2298 } 2299 2300 static void 2301 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2302 { 2303 if (sc->sc_caps & NFE_40BIT_ADDR) { 2304 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2305 2306 desc64->length = htole16(ring->bufsz); 2307 desc64->flags = htole16(NFE_RX_READY); 2308 } else { 2309 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2310 2311 desc32->length = htole16(ring->bufsz); 2312 desc32->flags = htole16(NFE_RX_READY); 2313 } 2314 } 2315 2316 static int 2317 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS) 2318 { 2319 struct nfe_softc *sc = arg1; 2320 struct ifnet *ifp = &sc->arpcom.ac_if; 2321 uint32_t flags; 2322 int error, v; 2323 2324 lwkt_serialize_enter(ifp->if_serializer); 2325 2326 flags = sc->sc_flags & ~NFE_F_DYN_IM; 2327 v = sc->sc_imtime; 2328 if (sc->sc_flags & NFE_F_DYN_IM) 2329 v = -v; 2330 2331 error = sysctl_handle_int(oidp, &v, 0, req); 2332 if (error || req->newptr == NULL) 2333 goto back; 2334 2335 if (v < 0) { 2336 flags |= NFE_F_DYN_IM; 2337 v = -v; 2338 } 2339 2340 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) { 2341 if (NFE_IMTIME(v) == 0) 2342 v = 0; 2343 sc->sc_imtime = v; 2344 sc->sc_flags = flags; 2345 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 2346 2347 if ((ifp->if_flags & (IFF_POLLING | IFF_RUNNING)) 2348 == IFF_RUNNING) { 2349 nfe_enable_intrs(sc); 2350 } 2351 } 2352 back: 2353 lwkt_serialize_exit(ifp->if_serializer); 2354 return error; 2355 } 2356 2357 static void 2358 nfe_powerup(device_t dev) 2359 { 2360 struct nfe_softc *sc = device_get_softc(dev); 2361 uint32_t pwr_state; 2362 uint16_t did; 2363 2364 /* 2365 * Bring MAC and PHY out of low power state 2366 */ 2367 2368 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK; 2369 2370 did = pci_get_device(dev); 2371 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 || 2372 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) && 2373 pci_get_revid(dev) >= 0xa3) 2374 pwr_state |= NFE_PWRUP_REV_A3; 2375 2376 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state); 2377 } 2378 2379 static void 2380 nfe_mac_reset(struct nfe_softc *sc) 2381 { 2382 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 2383 uint32_t macaddr_hi, macaddr_lo, tx_poll; 2384 2385 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 2386 2387 /* Save several registers for later restoration */ 2388 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI); 2389 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO); 2390 tx_poll = NFE_READ(sc, NFE_TX_POLL); 2391 2392 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT); 2393 DELAY(100); 2394 2395 NFE_WRITE(sc, NFE_MAC_RESET, 0); 2396 DELAY(100); 2397 2398 /* Restore saved registers */ 2399 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi); 2400 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo); 2401 NFE_WRITE(sc, NFE_TX_POLL, tx_poll); 2402 2403 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 2404 } 2405 2406 static void 2407 nfe_enable_intrs(struct nfe_softc *sc) 2408 { 2409 /* 2410 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER. 2411 * It is unclear how wide the timer is. Base programming does 2412 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so 2413 * we don't get any interrupt moderation. TX moderation is 2414 * possible by using the timer interrupt instead of TX_DONE. 2415 * 2416 * It is unclear whether there are other bits that can be 2417 * set to make the NFE device actually do interrupt moderation 2418 * on the RX side. 2419 * 2420 * For now set a 128uS interval as a placemark, but don't use 2421 * the timer. 2422 */ 2423 if (sc->sc_imtime == 0) 2424 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT); 2425 else 2426 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime)); 2427 2428 /* Enable interrupts */ 2429 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable); 2430 2431 if (sc->sc_irq_enable & NFE_IRQ_TIMER) 2432 sc->sc_flags |= NFE_F_IRQ_TIMER; 2433 else 2434 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2435 } 2436 2437 #ifdef DEVICE_POLLING 2438 static void 2439 nfe_disable_intrs(struct nfe_softc *sc) 2440 { 2441 /* Disable interrupts */ 2442 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 2443 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2444 } 2445 #endif 2446