1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 3 /* 4 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> and 8 * Matthew Dillon <dillon@apollo.backplane.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 40 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 41 * 42 * Permission to use, copy, modify, and distribute this software for any 43 * purpose with or without fee is hereby granted, provided that the above 44 * copyright notice and this permission notice appear in all copies. 45 * 46 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 47 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 48 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 49 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 50 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 51 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 52 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 53 */ 54 55 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 56 57 #include "opt_ifpoll.h" 58 59 #include <sys/param.h> 60 #include <sys/endian.h> 61 #include <sys/kernel.h> 62 #include <sys/bus.h> 63 #include <sys/interrupt.h> 64 #include <sys/proc.h> 65 #include <sys/rman.h> 66 #include <sys/serialize.h> 67 #include <sys/socket.h> 68 #include <sys/sockio.h> 69 #include <sys/sysctl.h> 70 71 #include <net/ethernet.h> 72 #include <net/if.h> 73 #include <net/bpf.h> 74 #include <net/if_arp.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/if_poll.h> 78 #include <net/ifq_var.h> 79 #include <net/if_types.h> 80 #include <net/if_var.h> 81 #include <net/vlan/if_vlan_var.h> 82 #include <net/vlan/if_vlan_ether.h> 83 84 #include <bus/pci/pcireg.h> 85 #include <bus/pci/pcivar.h> 86 #include "pcidevs.h" 87 88 #include <dev/netif/mii_layer/mii.h> 89 #include <dev/netif/mii_layer/miivar.h> 90 91 #include "miibus_if.h" 92 93 #include <dev/netif/nfe/if_nfereg.h> 94 #include <dev/netif/nfe/if_nfevar.h> 95 96 #define NFE_CSUM 97 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 98 99 static int nfe_probe(device_t); 100 static int nfe_attach(device_t); 101 static int nfe_detach(device_t); 102 static void nfe_shutdown(device_t); 103 static int nfe_resume(device_t); 104 static int nfe_suspend(device_t); 105 106 static int nfe_miibus_readreg(device_t, int, int); 107 static void nfe_miibus_writereg(device_t, int, int, int); 108 static void nfe_miibus_statchg(device_t); 109 110 #ifdef IFPOLL_ENABLE 111 static void nfe_npoll(struct ifnet *, struct ifpoll_info *); 112 static void nfe_npoll_compat(struct ifnet *, void *, int); 113 static void nfe_disable_intrs(struct nfe_softc *); 114 #endif 115 static void nfe_intr(void *); 116 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 117 static int nfe_rxeof(struct nfe_softc *); 118 static int nfe_txeof(struct nfe_softc *, int); 119 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 120 struct mbuf *); 121 static void nfe_start(struct ifnet *, struct ifaltq_subque *); 122 static void nfe_watchdog(struct ifnet *); 123 static void nfe_init(void *); 124 static void nfe_stop(struct nfe_softc *); 125 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 126 static void nfe_jfree(void *); 127 static void nfe_jref(void *); 128 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 129 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 130 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 131 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 132 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 133 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 134 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 135 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 136 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 137 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 138 static int nfe_ifmedia_upd(struct ifnet *); 139 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 140 static void nfe_setmulti(struct nfe_softc *); 141 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 142 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 143 static void nfe_powerup(device_t); 144 static void nfe_mac_reset(struct nfe_softc *); 145 static void nfe_tick(void *); 146 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 147 int, bus_addr_t); 148 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 149 int); 150 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 151 int); 152 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 153 int); 154 static void nfe_enable_intrs(struct nfe_softc *); 155 156 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS); 157 158 #define NFE_DEBUG 159 #ifdef NFE_DEBUG 160 161 static int nfe_debug = 0; 162 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT; 163 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT; 164 /* 165 * hw timer simulated interrupt moderation @4000Hz. Negative values 166 * disable the timer when the discrete interrupt rate falls below 167 * the moderation rate. 168 * 169 * XXX 8000Hz might be better but if the interrupt is shared it can 170 * blow out the cpu. 171 */ 172 static int nfe_imtime = -250; /* uS */ 173 174 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count); 175 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count); 176 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime); 177 TUNABLE_INT("hw.nfe.debug", &nfe_debug); 178 179 #define DPRINTF(sc, fmt, ...) do { \ 180 if ((sc)->sc_debug) { \ 181 if_printf(&(sc)->arpcom.ac_if, \ 182 fmt, __VA_ARGS__); \ 183 } \ 184 } while (0) 185 186 #define DPRINTFN(sc, lv, fmt, ...) do { \ 187 if ((sc)->sc_debug >= (lv)) { \ 188 if_printf(&(sc)->arpcom.ac_if, \ 189 fmt, __VA_ARGS__); \ 190 } \ 191 } while (0) 192 193 #else /* !NFE_DEBUG */ 194 195 #define DPRINTF(sc, fmt, ...) 196 #define DPRINTFN(sc, lv, fmt, ...) 197 198 #endif /* NFE_DEBUG */ 199 200 static const struct nfe_dev { 201 uint16_t vid; 202 uint16_t did; 203 const char *desc; 204 } nfe_devices[] = { 205 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 206 "NVIDIA nForce Fast Ethernet" }, 207 208 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 209 "NVIDIA nForce2 Fast Ethernet" }, 210 211 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 212 "NVIDIA nForce3 Gigabit Ethernet" }, 213 214 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 215 chipset, and possibly also the 400R; it might be both nForce2- and 216 nForce3-based boards can use the same MCPs (= southbridges) */ 217 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 218 "NVIDIA nForce3 Gigabit Ethernet" }, 219 220 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 221 "NVIDIA nForce3 Gigabit Ethernet" }, 222 223 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 224 "NVIDIA nForce3 Gigabit Ethernet" }, 225 226 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 227 "NVIDIA nForce3 Gigabit Ethernet" }, 228 229 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 230 "NVIDIA CK804 Gigabit Ethernet" }, 231 232 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 233 "NVIDIA CK804 Gigabit Ethernet" }, 234 235 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 236 "NVIDIA MCP04 Gigabit Ethernet" }, 237 238 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 239 "NVIDIA MCP04 Gigabit Ethernet" }, 240 241 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 242 "NVIDIA MCP51 Gigabit Ethernet" }, 243 244 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 245 "NVIDIA MCP51 Gigabit Ethernet" }, 246 247 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 248 "NVIDIA MCP55 Gigabit Ethernet" }, 249 250 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 251 "NVIDIA MCP55 Gigabit Ethernet" }, 252 253 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 254 "NVIDIA MCP61 Gigabit Ethernet" }, 255 256 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 257 "NVIDIA MCP61 Gigabit Ethernet" }, 258 259 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 260 "NVIDIA MCP61 Gigabit Ethernet" }, 261 262 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 263 "NVIDIA MCP61 Gigabit Ethernet" }, 264 265 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 266 "NVIDIA MCP65 Gigabit Ethernet" }, 267 268 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 269 "NVIDIA MCP65 Gigabit Ethernet" }, 270 271 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 272 "NVIDIA MCP65 Gigabit Ethernet" }, 273 274 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 275 "NVIDIA MCP65 Gigabit Ethernet" }, 276 277 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 278 "NVIDIA MCP67 Gigabit Ethernet" }, 279 280 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 281 "NVIDIA MCP67 Gigabit Ethernet" }, 282 283 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 284 "NVIDIA MCP67 Gigabit Ethernet" }, 285 286 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 287 "NVIDIA MCP67 Gigabit Ethernet" }, 288 289 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 290 "NVIDIA MCP73 Gigabit Ethernet" }, 291 292 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 293 "NVIDIA MCP73 Gigabit Ethernet" }, 294 295 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 296 "NVIDIA MCP73 Gigabit Ethernet" }, 297 298 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 299 "NVIDIA MCP73 Gigabit Ethernet" }, 300 301 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 302 "NVIDIA MCP77 Gigabit Ethernet" }, 303 304 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 305 "NVIDIA MCP77 Gigabit Ethernet" }, 306 307 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 308 "NVIDIA MCP77 Gigabit Ethernet" }, 309 310 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 311 "NVIDIA MCP77 Gigabit Ethernet" }, 312 313 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 314 "NVIDIA MCP79 Gigabit Ethernet" }, 315 316 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 317 "NVIDIA MCP79 Gigabit Ethernet" }, 318 319 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 320 "NVIDIA MCP79 Gigabit Ethernet" }, 321 322 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 323 "NVIDIA MCP79 Gigabit Ethernet" }, 324 325 { 0, 0, NULL } 326 }; 327 328 static device_method_t nfe_methods[] = { 329 /* Device interface */ 330 DEVMETHOD(device_probe, nfe_probe), 331 DEVMETHOD(device_attach, nfe_attach), 332 DEVMETHOD(device_detach, nfe_detach), 333 DEVMETHOD(device_suspend, nfe_suspend), 334 DEVMETHOD(device_resume, nfe_resume), 335 DEVMETHOD(device_shutdown, nfe_shutdown), 336 337 /* Bus interface */ 338 DEVMETHOD(bus_print_child, bus_generic_print_child), 339 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 340 341 /* MII interface */ 342 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 343 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 344 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 345 346 DEVMETHOD_END 347 }; 348 349 static driver_t nfe_driver = { 350 "nfe", 351 nfe_methods, 352 sizeof(struct nfe_softc) 353 }; 354 355 static devclass_t nfe_devclass; 356 357 DECLARE_DUMMY_MODULE(if_nfe); 358 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 359 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, NULL, NULL); 360 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, NULL, NULL); 361 362 /* 363 * NOTE: NFE_WORDALIGN support is guesswork right now. 364 */ 365 static int 366 nfe_probe(device_t dev) 367 { 368 const struct nfe_dev *n; 369 uint16_t vid, did; 370 371 vid = pci_get_vendor(dev); 372 did = pci_get_device(dev); 373 for (n = nfe_devices; n->desc != NULL; ++n) { 374 if (vid == n->vid && did == n->did) { 375 struct nfe_softc *sc = device_get_softc(dev); 376 377 switch (did) { 378 case PCI_PRODUCT_NVIDIA_NFORCE_LAN: 379 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN: 380 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1: 381 sc->sc_caps = NFE_NO_PWRCTL | 382 NFE_FIX_EADDR; 383 break; 384 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 385 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 386 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 387 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 388 sc->sc_caps = NFE_JUMBO_SUP | 389 NFE_HW_CSUM | 390 NFE_NO_PWRCTL | 391 NFE_FIX_EADDR; 392 break; 393 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 394 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 395 sc->sc_caps = NFE_FIX_EADDR; 396 /* FALL THROUGH */ 397 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 398 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 399 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 400 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 401 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 402 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 403 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 404 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 405 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 406 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 407 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 408 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 409 sc->sc_caps |= NFE_40BIT_ADDR; 410 break; 411 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 412 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 413 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 414 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 415 sc->sc_caps = NFE_JUMBO_SUP | 416 NFE_40BIT_ADDR | 417 NFE_HW_CSUM | 418 NFE_NO_PWRCTL | 419 NFE_FIX_EADDR; 420 break; 421 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 422 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 423 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 424 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 425 sc->sc_caps = NFE_JUMBO_SUP | 426 NFE_40BIT_ADDR; 427 break; 428 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 429 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 430 sc->sc_caps = NFE_JUMBO_SUP | 431 NFE_40BIT_ADDR | 432 NFE_HW_CSUM | 433 NFE_HW_VLAN | 434 NFE_FIX_EADDR; 435 break; 436 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 437 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 438 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 439 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 440 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 441 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 442 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 443 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 444 sc->sc_caps = NFE_40BIT_ADDR | 445 NFE_HW_CSUM | 446 NFE_WORDALIGN; 447 break; 448 } 449 450 device_set_desc(dev, n->desc); 451 device_set_async_attach(dev, TRUE); 452 return 0; 453 } 454 } 455 return ENXIO; 456 } 457 458 static int 459 nfe_attach(device_t dev) 460 { 461 struct nfe_softc *sc = device_get_softc(dev); 462 struct ifnet *ifp = &sc->arpcom.ac_if; 463 struct sysctl_ctx_list *ctx; 464 struct sysctl_oid *tree; 465 uint8_t eaddr[ETHER_ADDR_LEN]; 466 bus_addr_t lowaddr; 467 int error; 468 469 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 470 lwkt_serialize_init(&sc->sc_jbuf_serializer); 471 472 /* 473 * Initialize sysctl variables 474 */ 475 sc->sc_rx_ring_count = nfe_rx_ring_count; 476 sc->sc_tx_ring_count = nfe_tx_ring_count; 477 sc->sc_debug = nfe_debug; 478 if (nfe_imtime < 0) { 479 sc->sc_flags |= NFE_F_DYN_IM; 480 sc->sc_imtime = -nfe_imtime; 481 } else { 482 sc->sc_imtime = nfe_imtime; 483 } 484 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 485 486 sc->sc_mem_rid = PCIR_BAR(0); 487 488 if (sc->sc_caps & NFE_40BIT_ADDR) 489 sc->rxtxctl_desc = NFE_RXTX_DESC_V3; 490 else if (sc->sc_caps & NFE_JUMBO_SUP) 491 sc->rxtxctl_desc = NFE_RXTX_DESC_V2; 492 493 #ifndef BURN_BRIDGES 494 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 495 uint32_t mem, irq; 496 497 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 498 irq = pci_read_config(dev, PCIR_INTLINE, 4); 499 500 device_printf(dev, "chip is in D%d power mode " 501 "-- setting to D0\n", pci_get_powerstate(dev)); 502 503 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 504 505 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 506 pci_write_config(dev, PCIR_INTLINE, irq, 4); 507 } 508 #endif /* !BURN_BRIDGE */ 509 510 /* Enable bus mastering */ 511 pci_enable_busmaster(dev); 512 513 /* Allocate IO memory */ 514 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 515 &sc->sc_mem_rid, RF_ACTIVE); 516 if (sc->sc_mem_res == NULL) { 517 device_printf(dev, "could not allocate io memory\n"); 518 return ENXIO; 519 } 520 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 521 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 522 523 /* Allocate IRQ */ 524 sc->sc_irq_rid = 0; 525 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 526 &sc->sc_irq_rid, 527 RF_SHAREABLE | RF_ACTIVE); 528 if (sc->sc_irq_res == NULL) { 529 device_printf(dev, "could not allocate irq\n"); 530 error = ENXIO; 531 goto fail; 532 } 533 534 /* Disable WOL */ 535 NFE_WRITE(sc, NFE_WOL_CTL, 0); 536 537 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 538 nfe_powerup(dev); 539 540 nfe_get_macaddr(sc, eaddr); 541 542 /* 543 * Allocate top level DMA tag 544 */ 545 if (sc->sc_caps & NFE_40BIT_ADDR) 546 lowaddr = NFE_BUS_SPACE_MAXADDR; 547 else 548 lowaddr = BUS_SPACE_MAXADDR_32BIT; 549 error = bus_dma_tag_create(NULL, /* parent */ 550 1, 0, /* alignment, boundary */ 551 lowaddr, /* lowaddr */ 552 BUS_SPACE_MAXADDR, /* highaddr */ 553 NULL, NULL, /* filter, filterarg */ 554 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 555 0, /* nsegments */ 556 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 557 0, /* flags */ 558 &sc->sc_dtag); 559 if (error) { 560 device_printf(dev, "could not allocate parent dma tag\n"); 561 goto fail; 562 } 563 564 /* 565 * Allocate Tx and Rx rings. 566 */ 567 error = nfe_alloc_tx_ring(sc, &sc->txq); 568 if (error) { 569 device_printf(dev, "could not allocate Tx ring\n"); 570 goto fail; 571 } 572 573 error = nfe_alloc_rx_ring(sc, &sc->rxq); 574 if (error) { 575 device_printf(dev, "could not allocate Rx ring\n"); 576 goto fail; 577 } 578 579 /* 580 * Create sysctl tree 581 */ 582 ctx = device_get_sysctl_ctx(dev); 583 tree = device_get_sysctl_tree(dev); 584 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 585 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW, 586 sc, 0, nfe_sysctl_imtime, "I", 587 "Interrupt moderation time (usec). " 588 "0 to disable interrupt moderation."); 589 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 590 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count, 591 0, "RX ring count"); 592 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 593 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count, 594 0, "TX ring count"); 595 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 596 "debug", CTLFLAG_RW, &sc->sc_debug, 597 0, "control debugging printfs"); 598 599 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 600 nfe_ifmedia_sts); 601 if (error) { 602 device_printf(dev, "MII without any phy\n"); 603 goto fail; 604 } 605 606 ifp->if_softc = sc; 607 ifp->if_mtu = ETHERMTU; 608 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 609 ifp->if_ioctl = nfe_ioctl; 610 ifp->if_start = nfe_start; 611 #ifdef IFPOLL_ENABLE 612 ifp->if_npoll = nfe_npoll; 613 #endif 614 ifp->if_watchdog = nfe_watchdog; 615 ifp->if_init = nfe_init; 616 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count); 617 ifq_set_ready(&ifp->if_snd); 618 619 ifp->if_capabilities = IFCAP_VLAN_MTU; 620 621 if (sc->sc_caps & NFE_HW_VLAN) 622 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 623 624 #ifdef NFE_CSUM 625 if (sc->sc_caps & NFE_HW_CSUM) { 626 ifp->if_capabilities |= IFCAP_HWCSUM; 627 ifp->if_hwassist = NFE_CSUM_FEATURES; 628 } 629 #else 630 sc->sc_caps &= ~NFE_HW_CSUM; 631 #endif 632 ifp->if_capenable = ifp->if_capabilities; 633 634 callout_init(&sc->sc_tick_ch); 635 636 ether_ifattach(ifp, eaddr, NULL); 637 638 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res)); 639 640 #ifdef IFPOLL_ENABLE 641 ifpoll_compat_setup(&sc->sc_npoll, ctx, (struct sysctl_oid *)tree, 642 device_get_unit(dev), ifp->if_serializer); 643 #endif 644 645 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 646 &sc->sc_ih, ifp->if_serializer); 647 if (error) { 648 device_printf(dev, "could not setup intr\n"); 649 ether_ifdetach(ifp); 650 goto fail; 651 } 652 653 return 0; 654 fail: 655 nfe_detach(dev); 656 return error; 657 } 658 659 static int 660 nfe_detach(device_t dev) 661 { 662 struct nfe_softc *sc = device_get_softc(dev); 663 664 if (device_is_attached(dev)) { 665 struct ifnet *ifp = &sc->arpcom.ac_if; 666 667 lwkt_serialize_enter(ifp->if_serializer); 668 nfe_stop(sc); 669 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 670 lwkt_serialize_exit(ifp->if_serializer); 671 672 ether_ifdetach(ifp); 673 } 674 675 if (sc->sc_miibus != NULL) 676 device_delete_child(dev, sc->sc_miibus); 677 bus_generic_detach(dev); 678 679 if (sc->sc_irq_res != NULL) { 680 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 681 sc->sc_irq_res); 682 } 683 684 if (sc->sc_mem_res != NULL) { 685 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 686 sc->sc_mem_res); 687 } 688 689 nfe_free_tx_ring(sc, &sc->txq); 690 nfe_free_rx_ring(sc, &sc->rxq); 691 if (sc->sc_dtag != NULL) 692 bus_dma_tag_destroy(sc->sc_dtag); 693 694 return 0; 695 } 696 697 static void 698 nfe_shutdown(device_t dev) 699 { 700 struct nfe_softc *sc = device_get_softc(dev); 701 struct ifnet *ifp = &sc->arpcom.ac_if; 702 703 lwkt_serialize_enter(ifp->if_serializer); 704 nfe_stop(sc); 705 lwkt_serialize_exit(ifp->if_serializer); 706 } 707 708 static int 709 nfe_suspend(device_t dev) 710 { 711 struct nfe_softc *sc = device_get_softc(dev); 712 struct ifnet *ifp = &sc->arpcom.ac_if; 713 714 lwkt_serialize_enter(ifp->if_serializer); 715 nfe_stop(sc); 716 lwkt_serialize_exit(ifp->if_serializer); 717 718 return 0; 719 } 720 721 static int 722 nfe_resume(device_t dev) 723 { 724 struct nfe_softc *sc = device_get_softc(dev); 725 struct ifnet *ifp = &sc->arpcom.ac_if; 726 727 lwkt_serialize_enter(ifp->if_serializer); 728 if (ifp->if_flags & IFF_UP) 729 nfe_init(sc); 730 lwkt_serialize_exit(ifp->if_serializer); 731 732 return 0; 733 } 734 735 static void 736 nfe_miibus_statchg(device_t dev) 737 { 738 struct nfe_softc *sc = device_get_softc(dev); 739 struct mii_data *mii = device_get_softc(sc->sc_miibus); 740 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 741 742 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer); 743 744 phy = NFE_READ(sc, NFE_PHY_IFACE); 745 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 746 747 seed = NFE_READ(sc, NFE_RNDSEED); 748 seed &= ~NFE_SEED_MASK; 749 750 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 751 phy |= NFE_PHY_HDX; /* half-duplex */ 752 misc |= NFE_MISC1_HDX; 753 } 754 755 switch (IFM_SUBTYPE(mii->mii_media_active)) { 756 case IFM_1000_T: /* full-duplex only */ 757 link |= NFE_MEDIA_1000T; 758 seed |= NFE_SEED_1000T; 759 phy |= NFE_PHY_1000T; 760 break; 761 case IFM_100_TX: 762 link |= NFE_MEDIA_100TX; 763 seed |= NFE_SEED_100TX; 764 phy |= NFE_PHY_100TX; 765 break; 766 case IFM_10_T: 767 link |= NFE_MEDIA_10T; 768 seed |= NFE_SEED_10T; 769 break; 770 } 771 772 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 773 774 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 775 NFE_WRITE(sc, NFE_MISC1, misc); 776 NFE_WRITE(sc, NFE_LINKSPEED, link); 777 } 778 779 static int 780 nfe_miibus_readreg(device_t dev, int phy, int reg) 781 { 782 struct nfe_softc *sc = device_get_softc(dev); 783 uint32_t val; 784 int ntries; 785 786 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 787 788 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 789 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 790 DELAY(100); 791 } 792 793 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 794 795 for (ntries = 0; ntries < 1000; ntries++) { 796 DELAY(100); 797 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 798 break; 799 } 800 if (ntries == 1000) { 801 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 802 return 0; 803 } 804 805 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 806 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 807 return 0; 808 } 809 810 val = NFE_READ(sc, NFE_PHY_DATA); 811 if (val != 0xffffffff && val != 0) 812 sc->mii_phyaddr = phy; 813 814 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 815 816 return val; 817 } 818 819 static void 820 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 821 { 822 struct nfe_softc *sc = device_get_softc(dev); 823 uint32_t ctl; 824 int ntries; 825 826 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 827 828 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 829 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 830 DELAY(100); 831 } 832 833 NFE_WRITE(sc, NFE_PHY_DATA, val); 834 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 835 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 836 837 for (ntries = 0; ntries < 1000; ntries++) { 838 DELAY(100); 839 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 840 break; 841 } 842 843 #ifdef NFE_DEBUG 844 if (ntries == 1000) 845 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 846 #endif 847 } 848 849 #ifdef IFPOLL_ENABLE 850 851 static void 852 nfe_npoll_compat(struct ifnet *ifp, void *arg __unused, int count __unused) 853 { 854 struct nfe_softc *sc = ifp->if_softc; 855 856 ASSERT_SERIALIZED(ifp->if_serializer); 857 858 nfe_rxeof(sc); 859 nfe_txeof(sc, 1); 860 } 861 862 static void 863 nfe_disable_intrs(struct nfe_softc *sc) 864 { 865 /* Disable interrupts */ 866 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 867 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 868 sc->sc_npoll.ifpc_stcount = 0; 869 } 870 871 static void 872 nfe_npoll(struct ifnet *ifp, struct ifpoll_info *info) 873 { 874 struct nfe_softc *sc = ifp->if_softc; 875 876 ASSERT_SERIALIZED(ifp->if_serializer); 877 878 if (info != NULL) { 879 int cpuid = sc->sc_npoll.ifpc_cpuid; 880 881 info->ifpi_rx[cpuid].poll_func = nfe_npoll_compat; 882 info->ifpi_rx[cpuid].arg = NULL; 883 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 884 885 if (ifp->if_flags & IFF_RUNNING) 886 nfe_disable_intrs(sc); 887 ifq_set_cpuid(&ifp->if_snd, cpuid); 888 } else { 889 if (ifp->if_flags & IFF_RUNNING) 890 nfe_enable_intrs(sc); 891 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res)); 892 } 893 } 894 895 #endif /* IFPOLL_ENABLE */ 896 897 static void 898 nfe_intr(void *arg) 899 { 900 struct nfe_softc *sc = arg; 901 struct ifnet *ifp = &sc->arpcom.ac_if; 902 uint32_t r; 903 904 r = NFE_READ(sc, NFE_IRQ_STATUS); 905 if (r == 0) 906 return; /* not for us */ 907 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 908 909 if (sc->sc_rate_second != time_uptime) { 910 /* 911 * Calculate sc_rate_avg - interrupts per second. 912 */ 913 sc->sc_rate_second = time_uptime; 914 if (sc->sc_rate_avg < sc->sc_rate_acc) 915 sc->sc_rate_avg = sc->sc_rate_acc; 916 else 917 sc->sc_rate_avg = (sc->sc_rate_avg * 3 + 918 sc->sc_rate_acc) / 4; 919 sc->sc_rate_acc = 0; 920 } else if (sc->sc_rate_avg < sc->sc_rate_acc) { 921 /* 922 * Don't wait for a tick to roll over if we are taking 923 * a lot of interrupts. 924 */ 925 sc->sc_rate_avg = sc->sc_rate_acc; 926 } 927 928 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 929 930 if (r & NFE_IRQ_LINK) { 931 NFE_READ(sc, NFE_PHY_STATUS); 932 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 933 DPRINTF(sc, "link state changed %s\n", ""); 934 } 935 936 if (ifp->if_flags & IFF_RUNNING) { 937 int ret; 938 int rate; 939 940 /* check Rx ring */ 941 ret = nfe_rxeof(sc); 942 943 /* check Tx ring */ 944 ret |= nfe_txeof(sc, 1); 945 946 /* update the rate accumulator */ 947 if (ret) 948 ++sc->sc_rate_acc; 949 950 if (sc->sc_flags & NFE_F_DYN_IM) { 951 rate = 1000000 / sc->sc_imtime; 952 if ((sc->sc_flags & NFE_F_IRQ_TIMER) == 0 && 953 sc->sc_rate_avg > rate) { 954 /* 955 * Use the hardware timer to reduce the 956 * interrupt rate if the discrete interrupt 957 * rate has exceeded our threshold. 958 */ 959 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER); 960 sc->sc_flags |= NFE_F_IRQ_TIMER; 961 } else if ((sc->sc_flags & NFE_F_IRQ_TIMER) && 962 sc->sc_rate_avg <= rate) { 963 /* 964 * Use discrete TX/RX interrupts if the rate 965 * has fallen below our threshold. 966 */ 967 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER); 968 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 969 970 /* 971 * Recollect, mainly to avoid the possible race 972 * introduced by changing interrupt masks. 973 */ 974 nfe_rxeof(sc); 975 nfe_txeof(sc, 1); 976 } 977 } 978 } 979 } 980 981 static int 982 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 983 { 984 struct nfe_softc *sc = ifp->if_softc; 985 struct ifreq *ifr = (struct ifreq *)data; 986 struct mii_data *mii; 987 int error = 0, mask, jumbo_cap; 988 989 ASSERT_SERIALIZED(ifp->if_serializer); 990 991 switch (cmd) { 992 case SIOCSIFMTU: 993 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL) 994 jumbo_cap = 1; 995 else 996 jumbo_cap = 0; 997 998 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) || 999 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) { 1000 return EINVAL; 1001 } else if (ifp->if_mtu != ifr->ifr_mtu) { 1002 ifp->if_mtu = ifr->ifr_mtu; 1003 if (ifp->if_flags & IFF_RUNNING) 1004 nfe_init(sc); 1005 } 1006 break; 1007 case SIOCSIFFLAGS: 1008 if (ifp->if_flags & IFF_UP) { 1009 /* 1010 * If only the PROMISC or ALLMULTI flag changes, then 1011 * don't do a full re-init of the chip, just update 1012 * the Rx filter. 1013 */ 1014 if ((ifp->if_flags & IFF_RUNNING) && 1015 ((ifp->if_flags ^ sc->sc_if_flags) & 1016 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1017 nfe_setmulti(sc); 1018 } else { 1019 if (!(ifp->if_flags & IFF_RUNNING)) 1020 nfe_init(sc); 1021 } 1022 } else { 1023 if (ifp->if_flags & IFF_RUNNING) 1024 nfe_stop(sc); 1025 } 1026 sc->sc_if_flags = ifp->if_flags; 1027 break; 1028 case SIOCADDMULTI: 1029 case SIOCDELMULTI: 1030 if (ifp->if_flags & IFF_RUNNING) 1031 nfe_setmulti(sc); 1032 break; 1033 case SIOCSIFMEDIA: 1034 case SIOCGIFMEDIA: 1035 mii = device_get_softc(sc->sc_miibus); 1036 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1037 break; 1038 case SIOCSIFCAP: 1039 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM; 1040 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) { 1041 ifp->if_capenable ^= mask; 1042 if (IFCAP_TXCSUM & ifp->if_capenable) 1043 ifp->if_hwassist = NFE_CSUM_FEATURES; 1044 else 1045 ifp->if_hwassist = 0; 1046 1047 if (ifp->if_flags & IFF_RUNNING) 1048 nfe_init(sc); 1049 } 1050 break; 1051 default: 1052 error = ether_ioctl(ifp, cmd, data); 1053 break; 1054 } 1055 return error; 1056 } 1057 1058 static int 1059 nfe_rxeof(struct nfe_softc *sc) 1060 { 1061 struct ifnet *ifp = &sc->arpcom.ac_if; 1062 struct nfe_rx_ring *ring = &sc->rxq; 1063 int reap; 1064 1065 reap = 0; 1066 for (;;) { 1067 struct nfe_rx_data *data = &ring->data[ring->cur]; 1068 struct mbuf *m; 1069 uint16_t flags; 1070 int len, error; 1071 1072 if (sc->sc_caps & NFE_40BIT_ADDR) { 1073 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 1074 1075 flags = le16toh(desc64->flags); 1076 len = le16toh(desc64->length) & 0x3fff; 1077 } else { 1078 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 1079 1080 flags = le16toh(desc32->flags); 1081 len = le16toh(desc32->length) & 0x3fff; 1082 } 1083 1084 if (flags & NFE_RX_READY) 1085 break; 1086 1087 reap = 1; 1088 1089 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1090 if (!(flags & NFE_RX_VALID_V1)) 1091 goto skip; 1092 1093 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 1094 flags &= ~NFE_RX_ERROR; 1095 len--; /* fix buffer length */ 1096 } 1097 } else { 1098 if (!(flags & NFE_RX_VALID_V2)) 1099 goto skip; 1100 1101 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 1102 flags &= ~NFE_RX_ERROR; 1103 len--; /* fix buffer length */ 1104 } 1105 } 1106 1107 if (flags & NFE_RX_ERROR) { 1108 IFNET_STAT_INC(ifp, ierrors, 1); 1109 goto skip; 1110 } 1111 1112 m = data->m; 1113 1114 if (sc->sc_flags & NFE_F_USE_JUMBO) 1115 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 1116 else 1117 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 1118 if (error) { 1119 IFNET_STAT_INC(ifp, ierrors, 1); 1120 goto skip; 1121 } 1122 1123 /* finalize mbuf */ 1124 m->m_pkthdr.len = m->m_len = len; 1125 m->m_pkthdr.rcvif = ifp; 1126 1127 if ((ifp->if_capenable & IFCAP_RXCSUM) && 1128 (flags & NFE_RX_CSUMOK)) { 1129 if (flags & NFE_RX_IP_CSUMOK_V2) { 1130 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1131 CSUM_IP_VALID; 1132 } 1133 1134 if (flags & 1135 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) { 1136 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1137 CSUM_PSEUDO_HDR | 1138 CSUM_FRAG_NOT_CHECKED; 1139 m->m_pkthdr.csum_data = 0xffff; 1140 } 1141 } 1142 1143 IFNET_STAT_INC(ifp, ipackets, 1); 1144 ifp->if_input(ifp, m, NULL, -1); 1145 skip: 1146 nfe_set_ready_rxdesc(sc, ring, ring->cur); 1147 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count; 1148 } 1149 return reap; 1150 } 1151 1152 static int 1153 nfe_txeof(struct nfe_softc *sc, int start) 1154 { 1155 struct ifnet *ifp = &sc->arpcom.ac_if; 1156 struct nfe_tx_ring *ring = &sc->txq; 1157 struct nfe_tx_data *data = NULL; 1158 1159 while (ring->next != ring->cur) { 1160 uint16_t flags; 1161 1162 if (sc->sc_caps & NFE_40BIT_ADDR) 1163 flags = le16toh(ring->desc64[ring->next].flags); 1164 else 1165 flags = le16toh(ring->desc32[ring->next].flags); 1166 1167 if (flags & NFE_TX_VALID) 1168 break; 1169 1170 data = &ring->data[ring->next]; 1171 1172 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1173 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 1174 goto skip; 1175 1176 if ((flags & NFE_TX_ERROR_V1) != 0) { 1177 if_printf(ifp, "tx v1 error 0x%4b\n", flags, 1178 NFE_V1_TXERR); 1179 IFNET_STAT_INC(ifp, oerrors, 1); 1180 } else { 1181 IFNET_STAT_INC(ifp, opackets, 1); 1182 } 1183 } else { 1184 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 1185 goto skip; 1186 1187 if ((flags & NFE_TX_ERROR_V2) != 0) { 1188 if_printf(ifp, "tx v2 error 0x%4b\n", flags, 1189 NFE_V2_TXERR); 1190 IFNET_STAT_INC(ifp, oerrors, 1); 1191 } else { 1192 IFNET_STAT_INC(ifp, opackets, 1); 1193 } 1194 } 1195 1196 if (data->m == NULL) { /* should not get there */ 1197 if_printf(ifp, 1198 "last fragment bit w/o associated mbuf!\n"); 1199 goto skip; 1200 } 1201 1202 /* last fragment of the mbuf chain transmitted */ 1203 bus_dmamap_unload(ring->data_tag, data->map); 1204 m_freem(data->m); 1205 data->m = NULL; 1206 skip: 1207 ring->queued--; 1208 KKASSERT(ring->queued >= 0); 1209 ring->next = (ring->next + 1) % sc->sc_tx_ring_count; 1210 } 1211 1212 if (sc->sc_tx_ring_count - ring->queued >= 1213 sc->sc_tx_spare + NFE_NSEG_RSVD) 1214 ifq_clr_oactive(&ifp->if_snd); 1215 1216 if (ring->queued == 0) 1217 ifp->if_timer = 0; 1218 1219 if (start && !ifq_is_empty(&ifp->if_snd)) 1220 if_devstart(ifp); 1221 1222 if (data != NULL) 1223 return 1; 1224 else 1225 return 0; 1226 } 1227 1228 static int 1229 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 1230 { 1231 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 1232 struct nfe_tx_data *data, *data_map; 1233 bus_dmamap_t map; 1234 struct nfe_desc64 *desc64 = NULL; 1235 struct nfe_desc32 *desc32 = NULL; 1236 uint16_t flags = 0; 1237 uint32_t vtag = 0; 1238 int error, i, j, maxsegs, nsegs; 1239 1240 data = &ring->data[ring->cur]; 1241 map = data->map; 1242 data_map = data; /* Remember who owns the DMA map */ 1243 1244 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD; 1245 if (maxsegs > NFE_MAX_SCATTER) 1246 maxsegs = NFE_MAX_SCATTER; 1247 KASSERT(maxsegs >= sc->sc_tx_spare, 1248 ("not enough segments %d,%d", maxsegs, sc->sc_tx_spare)); 1249 1250 error = bus_dmamap_load_mbuf_defrag(ring->data_tag, map, &m0, 1251 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1252 if (error) 1253 goto back; 1254 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1255 1256 error = 0; 1257 1258 /* setup h/w VLAN tagging */ 1259 if (m0->m_flags & M_VLANTAG) 1260 vtag = m0->m_pkthdr.ether_vlantag; 1261 1262 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) { 1263 if (m0->m_pkthdr.csum_flags & CSUM_IP) 1264 flags |= NFE_TX_IP_CSUM; 1265 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1266 flags |= NFE_TX_TCP_CSUM; 1267 } 1268 1269 /* 1270 * XXX urm. somebody is unaware of how hardware works. You 1271 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1272 * the ring until the entire chain is actually *VALID*. Otherwise 1273 * the hardware may encounter a partially initialized chain that 1274 * is marked as being ready to go when it in fact is not ready to 1275 * go. 1276 */ 1277 1278 for (i = 0; i < nsegs; i++) { 1279 j = (ring->cur + i) % sc->sc_tx_ring_count; 1280 data = &ring->data[j]; 1281 1282 if (sc->sc_caps & NFE_40BIT_ADDR) { 1283 desc64 = &ring->desc64[j]; 1284 desc64->physaddr[0] = 1285 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 1286 desc64->physaddr[1] = 1287 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 1288 desc64->length = htole16(segs[i].ds_len - 1); 1289 desc64->vtag = htole32(vtag); 1290 desc64->flags = htole16(flags); 1291 } else { 1292 desc32 = &ring->desc32[j]; 1293 desc32->physaddr = htole32(segs[i].ds_addr); 1294 desc32->length = htole16(segs[i].ds_len - 1); 1295 desc32->flags = htole16(flags); 1296 } 1297 1298 /* csum flags and vtag belong to the first fragment only */ 1299 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1300 vtag = 0; 1301 1302 ring->queued++; 1303 KKASSERT(ring->queued <= sc->sc_tx_ring_count); 1304 } 1305 1306 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1307 if (sc->sc_caps & NFE_40BIT_ADDR) { 1308 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1309 } else { 1310 if (sc->sc_caps & NFE_JUMBO_SUP) 1311 flags = NFE_TX_LASTFRAG_V2; 1312 else 1313 flags = NFE_TX_LASTFRAG_V1; 1314 desc32->flags |= htole16(flags); 1315 } 1316 1317 /* 1318 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1319 * whole mess until the first descriptor in the map is flagged. 1320 */ 1321 for (i = nsegs - 1; i >= 0; --i) { 1322 j = (ring->cur + i) % sc->sc_tx_ring_count; 1323 if (sc->sc_caps & NFE_40BIT_ADDR) { 1324 desc64 = &ring->desc64[j]; 1325 desc64->flags |= htole16(NFE_TX_VALID); 1326 } else { 1327 desc32 = &ring->desc32[j]; 1328 desc32->flags |= htole16(NFE_TX_VALID); 1329 } 1330 } 1331 ring->cur = (ring->cur + nsegs) % sc->sc_tx_ring_count; 1332 1333 /* Exchange DMA map */ 1334 data_map->map = data->map; 1335 data->map = map; 1336 data->m = m0; 1337 back: 1338 if (error) 1339 m_freem(m0); 1340 return error; 1341 } 1342 1343 static void 1344 nfe_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1345 { 1346 struct nfe_softc *sc = ifp->if_softc; 1347 struct nfe_tx_ring *ring = &sc->txq; 1348 int count = 0, oactive = 0; 1349 struct mbuf *m0; 1350 1351 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1352 ASSERT_SERIALIZED(ifp->if_serializer); 1353 1354 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1355 return; 1356 1357 for (;;) { 1358 int error; 1359 1360 if (sc->sc_tx_ring_count - ring->queued < 1361 sc->sc_tx_spare + NFE_NSEG_RSVD) { 1362 if (oactive) { 1363 ifq_set_oactive(&ifp->if_snd); 1364 break; 1365 } 1366 1367 nfe_txeof(sc, 0); 1368 oactive = 1; 1369 continue; 1370 } 1371 1372 m0 = ifq_dequeue(&ifp->if_snd); 1373 if (m0 == NULL) 1374 break; 1375 1376 ETHER_BPF_MTAP(ifp, m0); 1377 1378 error = nfe_encap(sc, ring, m0); 1379 if (error) { 1380 IFNET_STAT_INC(ifp, oerrors, 1); 1381 if (error == EFBIG) { 1382 if (oactive) { 1383 ifq_set_oactive(&ifp->if_snd); 1384 break; 1385 } 1386 nfe_txeof(sc, 0); 1387 oactive = 1; 1388 } 1389 continue; 1390 } else { 1391 oactive = 0; 1392 } 1393 ++count; 1394 1395 /* 1396 * NOTE: 1397 * `m0' may be freed in nfe_encap(), so 1398 * it should not be touched any more. 1399 */ 1400 } 1401 1402 if (count == 0) /* nothing sent */ 1403 return; 1404 1405 /* Kick Tx */ 1406 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1407 1408 /* 1409 * Set a timeout in case the chip goes out to lunch. 1410 */ 1411 ifp->if_timer = 5; 1412 } 1413 1414 static void 1415 nfe_watchdog(struct ifnet *ifp) 1416 { 1417 struct nfe_softc *sc = ifp->if_softc; 1418 1419 ASSERT_SERIALIZED(ifp->if_serializer); 1420 1421 if (ifp->if_flags & IFF_RUNNING) { 1422 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1423 nfe_txeof(sc, 1); 1424 return; 1425 } 1426 1427 if_printf(ifp, "watchdog timeout\n"); 1428 1429 nfe_init(ifp->if_softc); 1430 1431 IFNET_STAT_INC(ifp, oerrors, 1); 1432 } 1433 1434 static void 1435 nfe_init(void *xsc) 1436 { 1437 struct nfe_softc *sc = xsc; 1438 struct ifnet *ifp = &sc->arpcom.ac_if; 1439 uint32_t tmp; 1440 int error; 1441 1442 ASSERT_SERIALIZED(ifp->if_serializer); 1443 1444 nfe_stop(sc); 1445 1446 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 1447 nfe_mac_reset(sc); 1448 1449 /* 1450 * NOTE: 1451 * Switching between jumbo frames and normal frames should 1452 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring(). 1453 */ 1454 if (ifp->if_mtu > ETHERMTU) { 1455 sc->sc_flags |= NFE_F_USE_JUMBO; 1456 sc->rxq.bufsz = NFE_JBYTES; 1457 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO; 1458 if (bootverbose) 1459 if_printf(ifp, "use jumbo frames\n"); 1460 } else { 1461 sc->sc_flags &= ~NFE_F_USE_JUMBO; 1462 sc->rxq.bufsz = MCLBYTES; 1463 sc->sc_tx_spare = NFE_NSEG_SPARE; 1464 if (bootverbose) 1465 if_printf(ifp, "use non-jumbo frames\n"); 1466 } 1467 1468 error = nfe_init_tx_ring(sc, &sc->txq); 1469 if (error) { 1470 nfe_stop(sc); 1471 return; 1472 } 1473 1474 error = nfe_init_rx_ring(sc, &sc->rxq); 1475 if (error) { 1476 nfe_stop(sc); 1477 return; 1478 } 1479 1480 NFE_WRITE(sc, NFE_TX_POLL, 0); 1481 NFE_WRITE(sc, NFE_STATUS, 0); 1482 1483 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc; 1484 1485 if (ifp->if_capenable & IFCAP_RXCSUM) 1486 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1487 1488 /* 1489 * Although the adapter is capable of stripping VLAN tags from received 1490 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1491 * purpose. This will be done in software by our network stack. 1492 */ 1493 if (sc->sc_caps & NFE_HW_VLAN) 1494 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1495 1496 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1497 DELAY(10); 1498 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1499 1500 if (sc->sc_caps & NFE_HW_VLAN) 1501 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1502 1503 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1504 1505 /* set MAC address */ 1506 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1507 1508 /* tell MAC where rings are in memory */ 1509 if (sc->sc_caps & NFE_40BIT_ADDR) { 1510 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 1511 NFE_ADDR_HI(sc->rxq.physaddr)); 1512 } 1513 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr)); 1514 1515 if (sc->sc_caps & NFE_40BIT_ADDR) { 1516 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, 1517 NFE_ADDR_HI(sc->txq.physaddr)); 1518 } 1519 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 1520 1521 NFE_WRITE(sc, NFE_RING_SIZE, 1522 (sc->sc_rx_ring_count - 1) << 16 | 1523 (sc->sc_tx_ring_count - 1)); 1524 1525 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1526 1527 /* force MAC to wakeup */ 1528 tmp = NFE_READ(sc, NFE_PWR_STATE); 1529 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1530 DELAY(10); 1531 tmp = NFE_READ(sc, NFE_PWR_STATE); 1532 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1533 1534 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1535 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1536 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1537 1538 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1539 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1540 1541 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1542 1543 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1544 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1545 DELAY(10); 1546 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1547 1548 /* set Rx filter */ 1549 nfe_setmulti(sc); 1550 1551 nfe_ifmedia_upd(ifp); 1552 1553 /* enable Rx */ 1554 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1555 1556 /* enable Tx */ 1557 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1558 1559 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1560 1561 #ifdef IFPOLL_ENABLE 1562 if (ifp->if_flags & IFF_NPOLLING) 1563 nfe_disable_intrs(sc); 1564 else 1565 #endif 1566 nfe_enable_intrs(sc); 1567 1568 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1569 1570 ifp->if_flags |= IFF_RUNNING; 1571 ifq_clr_oactive(&ifp->if_snd); 1572 1573 /* 1574 * If we had stuff in the tx ring before its all cleaned out now 1575 * so we are not going to get an interrupt, jump-start any pending 1576 * output. 1577 */ 1578 if (!ifq_is_empty(&ifp->if_snd)) 1579 if_devstart(ifp); 1580 } 1581 1582 static void 1583 nfe_stop(struct nfe_softc *sc) 1584 { 1585 struct ifnet *ifp = &sc->arpcom.ac_if; 1586 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 1587 int i; 1588 1589 ASSERT_SERIALIZED(ifp->if_serializer); 1590 1591 callout_stop(&sc->sc_tick_ch); 1592 1593 ifp->if_timer = 0; 1594 ifp->if_flags &= ~IFF_RUNNING; 1595 ifq_clr_oactive(&ifp->if_snd); 1596 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 1597 1598 #define WAITMAX 50000 1599 1600 /* 1601 * Abort Tx 1602 */ 1603 NFE_WRITE(sc, NFE_TX_CTL, 0); 1604 for (i = 0; i < WAITMAX; ++i) { 1605 DELAY(100); 1606 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0) 1607 break; 1608 } 1609 if (i == WAITMAX) 1610 if_printf(ifp, "can't stop TX\n"); 1611 DELAY(100); 1612 1613 /* 1614 * Disable Rx 1615 */ 1616 NFE_WRITE(sc, NFE_RX_CTL, 0); 1617 for (i = 0; i < WAITMAX; ++i) { 1618 DELAY(100); 1619 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0) 1620 break; 1621 } 1622 if (i == WAITMAX) 1623 if_printf(ifp, "can't stop RX\n"); 1624 DELAY(100); 1625 1626 #undef WAITMAX 1627 1628 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 1629 DELAY(10); 1630 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1631 1632 /* Disable interrupts */ 1633 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1634 1635 /* Reset Tx and Rx rings */ 1636 nfe_reset_tx_ring(sc, &sc->txq); 1637 nfe_reset_rx_ring(sc, &sc->rxq); 1638 } 1639 1640 static int 1641 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1642 { 1643 int i, j, error, descsize; 1644 bus_dmamem_t dmem; 1645 void **desc; 1646 1647 if (sc->sc_caps & NFE_40BIT_ADDR) { 1648 desc = (void *)&ring->desc64; 1649 descsize = sizeof(struct nfe_desc64); 1650 } else { 1651 desc = (void *)&ring->desc32; 1652 descsize = sizeof(struct nfe_desc32); 1653 } 1654 1655 ring->bufsz = MCLBYTES; 1656 ring->cur = ring->next = 0; 1657 1658 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1659 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1660 sc->sc_rx_ring_count * descsize, 1661 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1662 if (error) { 1663 if_printf(&sc->arpcom.ac_if, 1664 "could not create RX desc ring\n"); 1665 return error; 1666 } 1667 ring->tag = dmem.dmem_tag; 1668 ring->map = dmem.dmem_map; 1669 *desc = dmem.dmem_addr; 1670 ring->physaddr = dmem.dmem_busaddr; 1671 1672 if (sc->sc_caps & NFE_JUMBO_SUP) { 1673 ring->jbuf = 1674 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc), 1675 M_DEVBUF, M_WAITOK | M_ZERO); 1676 1677 error = nfe_jpool_alloc(sc, ring); 1678 if (error) { 1679 if_printf(&sc->arpcom.ac_if, 1680 "could not allocate jumbo frames\n"); 1681 kfree(ring->jbuf, M_DEVBUF); 1682 ring->jbuf = NULL; 1683 /* Allow jumbo frame allocation to fail */ 1684 } 1685 } 1686 1687 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count, 1688 M_DEVBUF, M_WAITOK | M_ZERO); 1689 1690 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1691 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1692 NULL, NULL, 1693 MCLBYTES, 1, MCLBYTES, 1694 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 1695 &ring->data_tag); 1696 if (error) { 1697 if_printf(&sc->arpcom.ac_if, 1698 "could not create RX mbuf DMA tag\n"); 1699 return error; 1700 } 1701 1702 /* Create a spare RX mbuf DMA map */ 1703 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1704 &ring->data_tmpmap); 1705 if (error) { 1706 if_printf(&sc->arpcom.ac_if, 1707 "could not create spare RX mbuf DMA map\n"); 1708 bus_dma_tag_destroy(ring->data_tag); 1709 ring->data_tag = NULL; 1710 return error; 1711 } 1712 1713 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1714 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1715 &ring->data[i].map); 1716 if (error) { 1717 if_printf(&sc->arpcom.ac_if, 1718 "could not create %dth RX mbuf DMA mapn", i); 1719 goto fail; 1720 } 1721 } 1722 return 0; 1723 fail: 1724 for (j = 0; j < i; ++j) 1725 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1726 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1727 bus_dma_tag_destroy(ring->data_tag); 1728 ring->data_tag = NULL; 1729 return error; 1730 } 1731 1732 static void 1733 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1734 { 1735 int i; 1736 1737 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1738 struct nfe_rx_data *data = &ring->data[i]; 1739 1740 if (data->m != NULL) { 1741 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0) 1742 bus_dmamap_unload(ring->data_tag, data->map); 1743 m_freem(data->m); 1744 data->m = NULL; 1745 } 1746 } 1747 1748 ring->cur = ring->next = 0; 1749 } 1750 1751 static int 1752 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1753 { 1754 int i; 1755 1756 for (i = 0; i < sc->sc_rx_ring_count; ++i) { 1757 int error; 1758 1759 /* XXX should use a function pointer */ 1760 if (sc->sc_flags & NFE_F_USE_JUMBO) 1761 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1762 else 1763 error = nfe_newbuf_std(sc, ring, i, 1); 1764 if (error) { 1765 if_printf(&sc->arpcom.ac_if, 1766 "could not allocate RX buffer\n"); 1767 return error; 1768 } 1769 nfe_set_ready_rxdesc(sc, ring, i); 1770 } 1771 return 0; 1772 } 1773 1774 static void 1775 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1776 { 1777 if (ring->data_tag != NULL) { 1778 struct nfe_rx_data *data; 1779 int i; 1780 1781 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1782 data = &ring->data[i]; 1783 1784 if (data->m != NULL) { 1785 bus_dmamap_unload(ring->data_tag, data->map); 1786 m_freem(data->m); 1787 } 1788 bus_dmamap_destroy(ring->data_tag, data->map); 1789 } 1790 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1791 bus_dma_tag_destroy(ring->data_tag); 1792 } 1793 1794 nfe_jpool_free(sc, ring); 1795 1796 if (ring->jbuf != NULL) 1797 kfree(ring->jbuf, M_DEVBUF); 1798 if (ring->data != NULL) 1799 kfree(ring->data, M_DEVBUF); 1800 1801 if (ring->tag != NULL) { 1802 void *desc; 1803 1804 if (sc->sc_caps & NFE_40BIT_ADDR) 1805 desc = ring->desc64; 1806 else 1807 desc = ring->desc32; 1808 1809 bus_dmamap_unload(ring->tag, ring->map); 1810 bus_dmamem_free(ring->tag, desc, ring->map); 1811 bus_dma_tag_destroy(ring->tag); 1812 } 1813 } 1814 1815 static struct nfe_jbuf * 1816 nfe_jalloc(struct nfe_softc *sc) 1817 { 1818 struct ifnet *ifp = &sc->arpcom.ac_if; 1819 struct nfe_jbuf *jbuf; 1820 1821 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1822 1823 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1824 if (jbuf != NULL) { 1825 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1826 jbuf->inuse = 1; 1827 } else { 1828 if_printf(ifp, "no free jumbo buffer\n"); 1829 } 1830 1831 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1832 1833 return jbuf; 1834 } 1835 1836 static void 1837 nfe_jfree(void *arg) 1838 { 1839 struct nfe_jbuf *jbuf = arg; 1840 struct nfe_softc *sc = jbuf->sc; 1841 struct nfe_rx_ring *ring = jbuf->ring; 1842 1843 if (&ring->jbuf[jbuf->slot] != jbuf) 1844 panic("%s: free wrong jumbo buffer", __func__); 1845 else if (jbuf->inuse == 0) 1846 panic("%s: jumbo buffer already freed", __func__); 1847 1848 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1849 atomic_subtract_int(&jbuf->inuse, 1); 1850 if (jbuf->inuse == 0) 1851 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1852 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1853 } 1854 1855 static void 1856 nfe_jref(void *arg) 1857 { 1858 struct nfe_jbuf *jbuf = arg; 1859 struct nfe_rx_ring *ring = jbuf->ring; 1860 1861 if (&ring->jbuf[jbuf->slot] != jbuf) 1862 panic("%s: ref wrong jumbo buffer", __func__); 1863 else if (jbuf->inuse == 0) 1864 panic("%s: jumbo buffer already freed", __func__); 1865 1866 atomic_add_int(&jbuf->inuse, 1); 1867 } 1868 1869 static int 1870 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1871 { 1872 struct nfe_jbuf *jbuf; 1873 bus_dmamem_t dmem; 1874 bus_addr_t physaddr; 1875 caddr_t buf; 1876 int i, error; 1877 1878 /* 1879 * Allocate a big chunk of DMA'able memory. 1880 */ 1881 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1882 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1883 NFE_JPOOL_SIZE(sc), 1884 BUS_DMA_WAITOK, &dmem); 1885 if (error) { 1886 if_printf(&sc->arpcom.ac_if, 1887 "could not create jumbo buffer\n"); 1888 return error; 1889 } 1890 ring->jtag = dmem.dmem_tag; 1891 ring->jmap = dmem.dmem_map; 1892 ring->jpool = dmem.dmem_addr; 1893 physaddr = dmem.dmem_busaddr; 1894 1895 /* ..and split it into 9KB chunks */ 1896 SLIST_INIT(&ring->jfreelist); 1897 1898 buf = ring->jpool; 1899 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) { 1900 jbuf = &ring->jbuf[i]; 1901 1902 jbuf->sc = sc; 1903 jbuf->ring = ring; 1904 jbuf->inuse = 0; 1905 jbuf->slot = i; 1906 jbuf->buf = buf; 1907 jbuf->physaddr = physaddr; 1908 1909 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1910 1911 buf += NFE_JBYTES; 1912 physaddr += NFE_JBYTES; 1913 } 1914 1915 return 0; 1916 } 1917 1918 static void 1919 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1920 { 1921 if (ring->jtag != NULL) { 1922 bus_dmamap_unload(ring->jtag, ring->jmap); 1923 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1924 bus_dma_tag_destroy(ring->jtag); 1925 } 1926 } 1927 1928 static int 1929 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1930 { 1931 int i, j, error, descsize; 1932 bus_dmamem_t dmem; 1933 void **desc; 1934 1935 if (sc->sc_caps & NFE_40BIT_ADDR) { 1936 desc = (void *)&ring->desc64; 1937 descsize = sizeof(struct nfe_desc64); 1938 } else { 1939 desc = (void *)&ring->desc32; 1940 descsize = sizeof(struct nfe_desc32); 1941 } 1942 1943 ring->queued = 0; 1944 ring->cur = ring->next = 0; 1945 1946 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1947 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1948 sc->sc_tx_ring_count * descsize, 1949 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1950 if (error) { 1951 if_printf(&sc->arpcom.ac_if, 1952 "could not create TX desc ring\n"); 1953 return error; 1954 } 1955 ring->tag = dmem.dmem_tag; 1956 ring->map = dmem.dmem_map; 1957 *desc = dmem.dmem_addr; 1958 ring->physaddr = dmem.dmem_busaddr; 1959 1960 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count, 1961 M_DEVBUF, M_WAITOK | M_ZERO); 1962 1963 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1964 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1965 NULL, NULL, 1966 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES, 1967 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1968 &ring->data_tag); 1969 if (error) { 1970 if_printf(&sc->arpcom.ac_if, 1971 "could not create TX buf DMA tag\n"); 1972 return error; 1973 } 1974 1975 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1976 error = bus_dmamap_create(ring->data_tag, 1977 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1978 &ring->data[i].map); 1979 if (error) { 1980 if_printf(&sc->arpcom.ac_if, 1981 "could not create %dth TX buf DMA map\n", i); 1982 goto fail; 1983 } 1984 } 1985 1986 return 0; 1987 fail: 1988 for (j = 0; j < i; ++j) 1989 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1990 bus_dma_tag_destroy(ring->data_tag); 1991 ring->data_tag = NULL; 1992 return error; 1993 } 1994 1995 static void 1996 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1997 { 1998 int i; 1999 2000 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2001 struct nfe_tx_data *data = &ring->data[i]; 2002 2003 if (sc->sc_caps & NFE_40BIT_ADDR) 2004 ring->desc64[i].flags = 0; 2005 else 2006 ring->desc32[i].flags = 0; 2007 2008 if (data->m != NULL) { 2009 bus_dmamap_unload(ring->data_tag, data->map); 2010 m_freem(data->m); 2011 data->m = NULL; 2012 } 2013 } 2014 2015 ring->queued = 0; 2016 ring->cur = ring->next = 0; 2017 } 2018 2019 static int 2020 nfe_init_tx_ring(struct nfe_softc *sc __unused, 2021 struct nfe_tx_ring *ring __unused) 2022 { 2023 return 0; 2024 } 2025 2026 static void 2027 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 2028 { 2029 if (ring->data_tag != NULL) { 2030 struct nfe_tx_data *data; 2031 int i; 2032 2033 for (i = 0; i < sc->sc_tx_ring_count; ++i) { 2034 data = &ring->data[i]; 2035 2036 if (data->m != NULL) { 2037 bus_dmamap_unload(ring->data_tag, data->map); 2038 m_freem(data->m); 2039 } 2040 bus_dmamap_destroy(ring->data_tag, data->map); 2041 } 2042 2043 bus_dma_tag_destroy(ring->data_tag); 2044 } 2045 2046 if (ring->data != NULL) 2047 kfree(ring->data, M_DEVBUF); 2048 2049 if (ring->tag != NULL) { 2050 void *desc; 2051 2052 if (sc->sc_caps & NFE_40BIT_ADDR) 2053 desc = ring->desc64; 2054 else 2055 desc = ring->desc32; 2056 2057 bus_dmamap_unload(ring->tag, ring->map); 2058 bus_dmamem_free(ring->tag, desc, ring->map); 2059 bus_dma_tag_destroy(ring->tag); 2060 } 2061 } 2062 2063 static int 2064 nfe_ifmedia_upd(struct ifnet *ifp) 2065 { 2066 struct nfe_softc *sc = ifp->if_softc; 2067 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2068 2069 ASSERT_SERIALIZED(ifp->if_serializer); 2070 2071 if (mii->mii_instance != 0) { 2072 struct mii_softc *miisc; 2073 2074 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2075 mii_phy_reset(miisc); 2076 } 2077 mii_mediachg(mii); 2078 2079 return 0; 2080 } 2081 2082 static void 2083 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2084 { 2085 struct nfe_softc *sc = ifp->if_softc; 2086 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2087 2088 ASSERT_SERIALIZED(ifp->if_serializer); 2089 2090 mii_pollstat(mii); 2091 ifmr->ifm_status = mii->mii_media_status; 2092 ifmr->ifm_active = mii->mii_media_active; 2093 } 2094 2095 static void 2096 nfe_setmulti(struct nfe_softc *sc) 2097 { 2098 struct ifnet *ifp = &sc->arpcom.ac_if; 2099 struct ifmultiaddr *ifma; 2100 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2101 uint32_t filter = NFE_RXFILTER_MAGIC; 2102 int i; 2103 2104 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2105 bzero(addr, ETHER_ADDR_LEN); 2106 bzero(mask, ETHER_ADDR_LEN); 2107 goto done; 2108 } 2109 2110 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2111 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2112 2113 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2114 caddr_t maddr; 2115 2116 if (ifma->ifma_addr->sa_family != AF_LINK) 2117 continue; 2118 2119 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 2120 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2121 addr[i] &= maddr[i]; 2122 mask[i] &= ~maddr[i]; 2123 } 2124 } 2125 2126 for (i = 0; i < ETHER_ADDR_LEN; i++) 2127 mask[i] |= addr[i]; 2128 2129 done: 2130 addr[0] |= 0x01; /* make sure multicast bit is set */ 2131 2132 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2133 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2134 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2135 addr[5] << 8 | addr[4]); 2136 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2137 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2138 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2139 mask[5] << 8 | mask[4]); 2140 2141 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 2142 NFE_WRITE(sc, NFE_RXFILTER, filter); 2143 } 2144 2145 static void 2146 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2147 { 2148 uint32_t lo, hi; 2149 2150 lo = NFE_READ(sc, NFE_MACADDR_LO); 2151 hi = NFE_READ(sc, NFE_MACADDR_HI); 2152 if (sc->sc_caps & NFE_FIX_EADDR) { 2153 addr[0] = (lo >> 8) & 0xff; 2154 addr[1] = (lo & 0xff); 2155 2156 addr[2] = (hi >> 24) & 0xff; 2157 addr[3] = (hi >> 16) & 0xff; 2158 addr[4] = (hi >> 8) & 0xff; 2159 addr[5] = (hi & 0xff); 2160 } else { 2161 addr[0] = (hi & 0xff); 2162 addr[1] = (hi >> 8) & 0xff; 2163 addr[2] = (hi >> 16) & 0xff; 2164 addr[3] = (hi >> 24) & 0xff; 2165 2166 addr[4] = (lo & 0xff); 2167 addr[5] = (lo >> 8) & 0xff; 2168 } 2169 } 2170 2171 static void 2172 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 2173 { 2174 NFE_WRITE(sc, NFE_MACADDR_LO, 2175 addr[5] << 8 | addr[4]); 2176 NFE_WRITE(sc, NFE_MACADDR_HI, 2177 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2178 } 2179 2180 static void 2181 nfe_tick(void *arg) 2182 { 2183 struct nfe_softc *sc = arg; 2184 struct ifnet *ifp = &sc->arpcom.ac_if; 2185 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2186 2187 lwkt_serialize_enter(ifp->if_serializer); 2188 2189 mii_tick(mii); 2190 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 2191 2192 lwkt_serialize_exit(ifp->if_serializer); 2193 } 2194 2195 static int 2196 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2197 int wait) 2198 { 2199 struct nfe_rx_data *data = &ring->data[idx]; 2200 bus_dma_segment_t seg; 2201 bus_dmamap_t map; 2202 struct mbuf *m; 2203 int nsegs, error; 2204 2205 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2206 if (m == NULL) 2207 return ENOBUFS; 2208 m->m_len = m->m_pkthdr.len = MCLBYTES; 2209 2210 /* 2211 * Aligning the payload improves access times. 2212 */ 2213 if (sc->sc_caps & NFE_WORDALIGN) 2214 m_adj(m, ETHER_ALIGN); 2215 2216 error = bus_dmamap_load_mbuf_segment(ring->data_tag, ring->data_tmpmap, 2217 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 2218 if (error) { 2219 m_freem(m); 2220 if (wait) { 2221 if_printf(&sc->arpcom.ac_if, 2222 "could map RX mbuf %d\n", error); 2223 } 2224 return error; 2225 } 2226 2227 if (data->m != NULL) { 2228 /* Sync and unload originally mapped mbuf */ 2229 bus_dmamap_sync(ring->data_tag, data->map, 2230 BUS_DMASYNC_POSTREAD); 2231 bus_dmamap_unload(ring->data_tag, data->map); 2232 } 2233 2234 /* Swap this DMA map with tmp DMA map */ 2235 map = data->map; 2236 data->map = ring->data_tmpmap; 2237 ring->data_tmpmap = map; 2238 2239 /* Caller is assumed to have collected the old mbuf */ 2240 data->m = m; 2241 2242 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2243 return 0; 2244 } 2245 2246 static int 2247 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2248 int wait) 2249 { 2250 struct nfe_rx_data *data = &ring->data[idx]; 2251 struct nfe_jbuf *jbuf; 2252 struct mbuf *m; 2253 2254 MGETHDR(m, wait ? M_WAITOK : M_NOWAIT, MT_DATA); 2255 if (m == NULL) 2256 return ENOBUFS; 2257 2258 jbuf = nfe_jalloc(sc); 2259 if (jbuf == NULL) { 2260 m_freem(m); 2261 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2262 "-- packet dropped!\n"); 2263 return ENOBUFS; 2264 } 2265 2266 m->m_ext.ext_arg = jbuf; 2267 m->m_ext.ext_buf = jbuf->buf; 2268 m->m_ext.ext_free = nfe_jfree; 2269 m->m_ext.ext_ref = nfe_jref; 2270 m->m_ext.ext_size = NFE_JBYTES; 2271 2272 m->m_data = m->m_ext.ext_buf; 2273 m->m_flags |= M_EXT; 2274 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2275 2276 /* 2277 * Aligning the payload improves access times. 2278 */ 2279 if (sc->sc_caps & NFE_WORDALIGN) 2280 m_adj(m, ETHER_ALIGN); 2281 2282 /* Caller is assumed to have collected the old mbuf */ 2283 data->m = m; 2284 2285 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2286 return 0; 2287 } 2288 2289 static void 2290 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2291 bus_addr_t physaddr) 2292 { 2293 if (sc->sc_caps & NFE_40BIT_ADDR) { 2294 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2295 2296 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr)); 2297 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr)); 2298 } else { 2299 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2300 2301 desc32->physaddr = htole32(physaddr); 2302 } 2303 } 2304 2305 static void 2306 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2307 { 2308 if (sc->sc_caps & NFE_40BIT_ADDR) { 2309 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2310 2311 desc64->length = htole16(ring->bufsz); 2312 desc64->flags = htole16(NFE_RX_READY); 2313 } else { 2314 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2315 2316 desc32->length = htole16(ring->bufsz); 2317 desc32->flags = htole16(NFE_RX_READY); 2318 } 2319 } 2320 2321 static int 2322 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS) 2323 { 2324 struct nfe_softc *sc = arg1; 2325 struct ifnet *ifp = &sc->arpcom.ac_if; 2326 uint32_t flags; 2327 int error, v; 2328 2329 lwkt_serialize_enter(ifp->if_serializer); 2330 2331 flags = sc->sc_flags & ~NFE_F_DYN_IM; 2332 v = sc->sc_imtime; 2333 if (sc->sc_flags & NFE_F_DYN_IM) 2334 v = -v; 2335 2336 error = sysctl_handle_int(oidp, &v, 0, req); 2337 if (error || req->newptr == NULL) 2338 goto back; 2339 2340 if (v < 0) { 2341 flags |= NFE_F_DYN_IM; 2342 v = -v; 2343 } 2344 2345 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) { 2346 if (NFE_IMTIME(v) == 0) 2347 v = 0; 2348 sc->sc_imtime = v; 2349 sc->sc_flags = flags; 2350 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 2351 2352 if ((ifp->if_flags & (IFF_NPOLLING | IFF_RUNNING)) 2353 == IFF_RUNNING) { 2354 nfe_enable_intrs(sc); 2355 } 2356 } 2357 back: 2358 lwkt_serialize_exit(ifp->if_serializer); 2359 return error; 2360 } 2361 2362 static void 2363 nfe_powerup(device_t dev) 2364 { 2365 struct nfe_softc *sc = device_get_softc(dev); 2366 uint32_t pwr_state; 2367 uint16_t did; 2368 2369 /* 2370 * Bring MAC and PHY out of low power state 2371 */ 2372 2373 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK; 2374 2375 did = pci_get_device(dev); 2376 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 || 2377 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) && 2378 pci_get_revid(dev) >= 0xa3) 2379 pwr_state |= NFE_PWRUP_REV_A3; 2380 2381 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state); 2382 } 2383 2384 static void 2385 nfe_mac_reset(struct nfe_softc *sc) 2386 { 2387 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 2388 uint32_t macaddr_hi, macaddr_lo, tx_poll; 2389 2390 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 2391 2392 /* Save several registers for later restoration */ 2393 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI); 2394 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO); 2395 tx_poll = NFE_READ(sc, NFE_TX_POLL); 2396 2397 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT); 2398 DELAY(100); 2399 2400 NFE_WRITE(sc, NFE_MAC_RESET, 0); 2401 DELAY(100); 2402 2403 /* Restore saved registers */ 2404 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi); 2405 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo); 2406 NFE_WRITE(sc, NFE_TX_POLL, tx_poll); 2407 2408 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 2409 } 2410 2411 static void 2412 nfe_enable_intrs(struct nfe_softc *sc) 2413 { 2414 /* 2415 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER. 2416 * It is unclear how wide the timer is. Base programming does 2417 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so 2418 * we don't get any interrupt moderation. TX moderation is 2419 * possible by using the timer interrupt instead of TX_DONE. 2420 * 2421 * It is unclear whether there are other bits that can be 2422 * set to make the NFE device actually do interrupt moderation 2423 * on the RX side. 2424 * 2425 * For now set a 128uS interval as a placemark, but don't use 2426 * the timer. 2427 */ 2428 if (sc->sc_imtime == 0) 2429 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT); 2430 else 2431 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime)); 2432 2433 /* Enable interrupts */ 2434 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable); 2435 2436 if (sc->sc_irq_enable & NFE_IRQ_TIMER) 2437 sc->sc_flags |= NFE_F_IRQ_TIMER; 2438 else 2439 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2440 } 2441