1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 3 /* 4 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> and 8 * Matthew Dillon <dillon@apollo.backplane.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 40 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 41 * 42 * Permission to use, copy, modify, and distribute this software for any 43 * purpose with or without fee is hereby granted, provided that the above 44 * copyright notice and this permission notice appear in all copies. 45 * 46 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 47 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 48 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 49 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 50 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 51 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 52 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 53 */ 54 55 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 56 57 #include "opt_ifpoll.h" 58 59 #include <sys/param.h> 60 #include <sys/endian.h> 61 #include <sys/kernel.h> 62 #include <sys/bus.h> 63 #include <sys/interrupt.h> 64 #include <sys/proc.h> 65 #include <sys/rman.h> 66 #include <sys/serialize.h> 67 #include <sys/socket.h> 68 #include <sys/sockio.h> 69 #include <sys/sysctl.h> 70 71 #include <net/ethernet.h> 72 #include <net/if.h> 73 #include <net/bpf.h> 74 #include <net/if_arp.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/if_poll.h> 78 #include <net/ifq_var.h> 79 #include <net/if_types.h> 80 #include <net/if_var.h> 81 #include <net/vlan/if_vlan_var.h> 82 #include <net/vlan/if_vlan_ether.h> 83 84 #include <bus/pci/pcireg.h> 85 #include <bus/pci/pcivar.h> 86 #include "pcidevs.h" 87 88 #include <dev/netif/mii_layer/mii.h> 89 #include <dev/netif/mii_layer/miivar.h> 90 91 #include "miibus_if.h" 92 93 #include <dev/netif/nfe/if_nfereg.h> 94 #include <dev/netif/nfe/if_nfevar.h> 95 96 #define NFE_CSUM 97 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 98 99 static int nfe_probe(device_t); 100 static int nfe_attach(device_t); 101 static int nfe_detach(device_t); 102 static void nfe_shutdown(device_t); 103 static int nfe_resume(device_t); 104 static int nfe_suspend(device_t); 105 106 static int nfe_miibus_readreg(device_t, int, int); 107 static void nfe_miibus_writereg(device_t, int, int, int); 108 static void nfe_miibus_statchg(device_t); 109 110 #ifdef IFPOLL_ENABLE 111 static void nfe_npoll(struct ifnet *, struct ifpoll_info *); 112 static void nfe_npoll_compat(struct ifnet *, void *, int); 113 static void nfe_disable_intrs(struct nfe_softc *); 114 #endif 115 static void nfe_intr(void *); 116 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 117 static int nfe_rxeof(struct nfe_softc *); 118 static int nfe_txeof(struct nfe_softc *, int); 119 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 120 struct mbuf *); 121 static void nfe_start(struct ifnet *, struct ifaltq_subque *); 122 static void nfe_watchdog(struct ifnet *); 123 static void nfe_init(void *); 124 static void nfe_stop(struct nfe_softc *); 125 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 126 static void nfe_jfree(void *); 127 static void nfe_jref(void *); 128 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 129 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 130 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 131 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 132 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 133 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 134 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 135 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 136 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 137 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 138 static int nfe_ifmedia_upd(struct ifnet *); 139 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 140 static void nfe_setmulti(struct nfe_softc *); 141 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 142 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 143 static void nfe_powerup(device_t); 144 static void nfe_mac_reset(struct nfe_softc *); 145 static void nfe_tick(void *); 146 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 147 int, bus_addr_t); 148 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 149 int); 150 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 151 int); 152 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 153 int); 154 static void nfe_enable_intrs(struct nfe_softc *); 155 156 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS); 157 158 #define NFE_DEBUG 159 #ifdef NFE_DEBUG 160 161 static int nfe_debug = 0; 162 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT; 163 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT; 164 /* 165 * hw timer simulated interrupt moderation @4000Hz. Negative values 166 * disable the timer when the discrete interrupt rate falls below 167 * the moderation rate. 168 * 169 * XXX 8000Hz might be better but if the interrupt is shared it can 170 * blow out the cpu. 171 */ 172 static int nfe_imtime = -250; /* uS */ 173 174 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count); 175 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count); 176 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime); 177 TUNABLE_INT("hw.nfe.debug", &nfe_debug); 178 179 #define DPRINTF(sc, fmt, ...) do { \ 180 if ((sc)->sc_debug) { \ 181 if_printf(&(sc)->arpcom.ac_if, \ 182 fmt, __VA_ARGS__); \ 183 } \ 184 } while (0) 185 186 #define DPRINTFN(sc, lv, fmt, ...) do { \ 187 if ((sc)->sc_debug >= (lv)) { \ 188 if_printf(&(sc)->arpcom.ac_if, \ 189 fmt, __VA_ARGS__); \ 190 } \ 191 } while (0) 192 193 #else /* !NFE_DEBUG */ 194 195 #define DPRINTF(sc, fmt, ...) 196 #define DPRINTFN(sc, lv, fmt, ...) 197 198 #endif /* NFE_DEBUG */ 199 200 static const struct nfe_dev { 201 uint16_t vid; 202 uint16_t did; 203 const char *desc; 204 } nfe_devices[] = { 205 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 206 "NVIDIA nForce Fast Ethernet" }, 207 208 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 209 "NVIDIA nForce2 Fast Ethernet" }, 210 211 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 212 "NVIDIA nForce3 Gigabit Ethernet" }, 213 214 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 215 chipset, and possibly also the 400R; it might be both nForce2- and 216 nForce3-based boards can use the same MCPs (= southbridges) */ 217 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 218 "NVIDIA nForce3 Gigabit Ethernet" }, 219 220 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 221 "NVIDIA nForce3 Gigabit Ethernet" }, 222 223 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 224 "NVIDIA nForce3 Gigabit Ethernet" }, 225 226 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 227 "NVIDIA nForce3 Gigabit Ethernet" }, 228 229 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 230 "NVIDIA CK804 Gigabit Ethernet" }, 231 232 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 233 "NVIDIA CK804 Gigabit Ethernet" }, 234 235 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 236 "NVIDIA MCP04 Gigabit Ethernet" }, 237 238 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 239 "NVIDIA MCP04 Gigabit Ethernet" }, 240 241 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 242 "NVIDIA MCP51 Gigabit Ethernet" }, 243 244 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 245 "NVIDIA MCP51 Gigabit Ethernet" }, 246 247 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 248 "NVIDIA MCP55 Gigabit Ethernet" }, 249 250 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 251 "NVIDIA MCP55 Gigabit Ethernet" }, 252 253 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 254 "NVIDIA MCP61 Gigabit Ethernet" }, 255 256 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 257 "NVIDIA MCP61 Gigabit Ethernet" }, 258 259 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 260 "NVIDIA MCP61 Gigabit Ethernet" }, 261 262 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 263 "NVIDIA MCP61 Gigabit Ethernet" }, 264 265 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 266 "NVIDIA MCP65 Gigabit Ethernet" }, 267 268 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 269 "NVIDIA MCP65 Gigabit Ethernet" }, 270 271 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 272 "NVIDIA MCP65 Gigabit Ethernet" }, 273 274 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 275 "NVIDIA MCP65 Gigabit Ethernet" }, 276 277 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 278 "NVIDIA MCP67 Gigabit Ethernet" }, 279 280 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 281 "NVIDIA MCP67 Gigabit Ethernet" }, 282 283 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 284 "NVIDIA MCP67 Gigabit Ethernet" }, 285 286 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 287 "NVIDIA MCP67 Gigabit Ethernet" }, 288 289 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 290 "NVIDIA MCP73 Gigabit Ethernet" }, 291 292 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 293 "NVIDIA MCP73 Gigabit Ethernet" }, 294 295 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 296 "NVIDIA MCP73 Gigabit Ethernet" }, 297 298 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 299 "NVIDIA MCP73 Gigabit Ethernet" }, 300 301 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 302 "NVIDIA MCP77 Gigabit Ethernet" }, 303 304 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 305 "NVIDIA MCP77 Gigabit Ethernet" }, 306 307 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 308 "NVIDIA MCP77 Gigabit Ethernet" }, 309 310 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 311 "NVIDIA MCP77 Gigabit Ethernet" }, 312 313 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 314 "NVIDIA MCP79 Gigabit Ethernet" }, 315 316 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 317 "NVIDIA MCP79 Gigabit Ethernet" }, 318 319 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 320 "NVIDIA MCP79 Gigabit Ethernet" }, 321 322 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 323 "NVIDIA MCP79 Gigabit Ethernet" }, 324 325 { 0, 0, NULL } 326 }; 327 328 static device_method_t nfe_methods[] = { 329 /* Device interface */ 330 DEVMETHOD(device_probe, nfe_probe), 331 DEVMETHOD(device_attach, nfe_attach), 332 DEVMETHOD(device_detach, nfe_detach), 333 DEVMETHOD(device_suspend, nfe_suspend), 334 DEVMETHOD(device_resume, nfe_resume), 335 DEVMETHOD(device_shutdown, nfe_shutdown), 336 337 /* Bus interface */ 338 DEVMETHOD(bus_print_child, bus_generic_print_child), 339 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 340 341 /* MII interface */ 342 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 343 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 344 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 345 346 DEVMETHOD_END 347 }; 348 349 static driver_t nfe_driver = { 350 "nfe", 351 nfe_methods, 352 sizeof(struct nfe_softc) 353 }; 354 355 static devclass_t nfe_devclass; 356 357 DECLARE_DUMMY_MODULE(if_nfe); 358 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 359 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, NULL, NULL); 360 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, NULL, NULL); 361 362 /* 363 * NOTE: NFE_WORDALIGN support is guesswork right now. 364 */ 365 static int 366 nfe_probe(device_t dev) 367 { 368 const struct nfe_dev *n; 369 uint16_t vid, did; 370 371 vid = pci_get_vendor(dev); 372 did = pci_get_device(dev); 373 for (n = nfe_devices; n->desc != NULL; ++n) { 374 if (vid == n->vid && did == n->did) { 375 struct nfe_softc *sc = device_get_softc(dev); 376 377 switch (did) { 378 case PCI_PRODUCT_NVIDIA_NFORCE_LAN: 379 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN: 380 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1: 381 sc->sc_caps = NFE_NO_PWRCTL | 382 NFE_FIX_EADDR; 383 break; 384 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 385 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 386 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 387 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 388 sc->sc_caps = NFE_JUMBO_SUP | 389 NFE_HW_CSUM | 390 NFE_NO_PWRCTL | 391 NFE_FIX_EADDR; 392 break; 393 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 394 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 395 sc->sc_caps = NFE_FIX_EADDR; 396 /* FALL THROUGH */ 397 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 398 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 399 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 400 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 401 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 402 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 403 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 404 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 405 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 406 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 407 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 408 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 409 sc->sc_caps |= NFE_40BIT_ADDR; 410 break; 411 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 412 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 413 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 414 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 415 sc->sc_caps = NFE_JUMBO_SUP | 416 NFE_40BIT_ADDR | 417 NFE_HW_CSUM | 418 NFE_NO_PWRCTL | 419 NFE_FIX_EADDR; 420 break; 421 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 422 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 423 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 424 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 425 sc->sc_caps = NFE_JUMBO_SUP | 426 NFE_40BIT_ADDR; 427 break; 428 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 429 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 430 sc->sc_caps = NFE_JUMBO_SUP | 431 NFE_40BIT_ADDR | 432 NFE_HW_CSUM | 433 NFE_HW_VLAN | 434 NFE_FIX_EADDR; 435 break; 436 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 437 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 438 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 439 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 440 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 441 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 442 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 443 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 444 sc->sc_caps = NFE_40BIT_ADDR | 445 NFE_HW_CSUM | 446 NFE_WORDALIGN; 447 break; 448 } 449 450 device_set_desc(dev, n->desc); 451 device_set_async_attach(dev, TRUE); 452 return 0; 453 } 454 } 455 return ENXIO; 456 } 457 458 static int 459 nfe_attach(device_t dev) 460 { 461 struct nfe_softc *sc = device_get_softc(dev); 462 struct ifnet *ifp = &sc->arpcom.ac_if; 463 struct sysctl_ctx_list *ctx; 464 struct sysctl_oid *tree; 465 uint8_t eaddr[ETHER_ADDR_LEN]; 466 bus_addr_t lowaddr; 467 int error; 468 469 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 470 lwkt_serialize_init(&sc->sc_jbuf_serializer); 471 472 /* 473 * Initialize sysctl variables 474 */ 475 sc->sc_rx_ring_count = nfe_rx_ring_count; 476 sc->sc_tx_ring_count = nfe_tx_ring_count; 477 sc->sc_debug = nfe_debug; 478 if (nfe_imtime < 0) { 479 sc->sc_flags |= NFE_F_DYN_IM; 480 sc->sc_imtime = -nfe_imtime; 481 } else { 482 sc->sc_imtime = nfe_imtime; 483 } 484 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 485 486 sc->sc_mem_rid = PCIR_BAR(0); 487 488 if (sc->sc_caps & NFE_40BIT_ADDR) 489 sc->rxtxctl_desc = NFE_RXTX_DESC_V3; 490 else if (sc->sc_caps & NFE_JUMBO_SUP) 491 sc->rxtxctl_desc = NFE_RXTX_DESC_V2; 492 493 #ifndef BURN_BRIDGES 494 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 495 uint32_t mem, irq; 496 497 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 498 irq = pci_read_config(dev, PCIR_INTLINE, 4); 499 500 device_printf(dev, "chip is in D%d power mode " 501 "-- setting to D0\n", pci_get_powerstate(dev)); 502 503 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 504 505 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 506 pci_write_config(dev, PCIR_INTLINE, irq, 4); 507 } 508 #endif /* !BURN_BRIDGE */ 509 510 /* Enable bus mastering */ 511 pci_enable_busmaster(dev); 512 513 /* Allocate IO memory */ 514 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 515 &sc->sc_mem_rid, RF_ACTIVE); 516 if (sc->sc_mem_res == NULL) { 517 device_printf(dev, "could not allocate io memory\n"); 518 return ENXIO; 519 } 520 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 521 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 522 523 /* Allocate IRQ */ 524 sc->sc_irq_rid = 0; 525 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 526 &sc->sc_irq_rid, 527 RF_SHAREABLE | RF_ACTIVE); 528 if (sc->sc_irq_res == NULL) { 529 device_printf(dev, "could not allocate irq\n"); 530 error = ENXIO; 531 goto fail; 532 } 533 534 /* Disable WOL */ 535 NFE_WRITE(sc, NFE_WOL_CTL, 0); 536 537 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 538 nfe_powerup(dev); 539 540 nfe_get_macaddr(sc, eaddr); 541 542 /* 543 * Allocate top level DMA tag 544 */ 545 if (sc->sc_caps & NFE_40BIT_ADDR) 546 lowaddr = NFE_BUS_SPACE_MAXADDR; 547 else 548 lowaddr = BUS_SPACE_MAXADDR_32BIT; 549 error = bus_dma_tag_create(NULL, /* parent */ 550 1, 0, /* alignment, boundary */ 551 lowaddr, /* lowaddr */ 552 BUS_SPACE_MAXADDR, /* highaddr */ 553 NULL, NULL, /* filter, filterarg */ 554 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 555 0, /* nsegments */ 556 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 557 0, /* flags */ 558 &sc->sc_dtag); 559 if (error) { 560 device_printf(dev, "could not allocate parent dma tag\n"); 561 goto fail; 562 } 563 564 /* 565 * Allocate Tx and Rx rings. 566 */ 567 error = nfe_alloc_tx_ring(sc, &sc->txq); 568 if (error) { 569 device_printf(dev, "could not allocate Tx ring\n"); 570 goto fail; 571 } 572 573 error = nfe_alloc_rx_ring(sc, &sc->rxq); 574 if (error) { 575 device_printf(dev, "could not allocate Rx ring\n"); 576 goto fail; 577 } 578 579 /* 580 * Create sysctl tree 581 */ 582 ctx = device_get_sysctl_ctx(dev); 583 tree = device_get_sysctl_tree(dev); 584 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 585 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW, 586 sc, 0, nfe_sysctl_imtime, "I", 587 "Interrupt moderation time (usec). " 588 "0 to disable interrupt moderation."); 589 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 590 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count, 591 0, "RX ring count"); 592 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 593 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count, 594 0, "TX ring count"); 595 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 596 "debug", CTLFLAG_RW, &sc->sc_debug, 597 0, "control debugging printfs"); 598 599 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 600 nfe_ifmedia_sts); 601 if (error) { 602 device_printf(dev, "MII without any phy\n"); 603 goto fail; 604 } 605 606 ifp->if_softc = sc; 607 ifp->if_mtu = ETHERMTU; 608 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 609 ifp->if_ioctl = nfe_ioctl; 610 ifp->if_start = nfe_start; 611 #ifdef IFPOLL_ENABLE 612 ifp->if_npoll = nfe_npoll; 613 #endif 614 ifp->if_watchdog = nfe_watchdog; 615 ifp->if_init = nfe_init; 616 ifp->if_nmbclusters = sc->sc_rx_ring_count; 617 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count); 618 ifq_set_ready(&ifp->if_snd); 619 620 ifp->if_capabilities = IFCAP_VLAN_MTU; 621 622 if (sc->sc_caps & NFE_HW_VLAN) 623 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 624 625 #ifdef NFE_CSUM 626 if (sc->sc_caps & NFE_HW_CSUM) { 627 ifp->if_capabilities |= IFCAP_HWCSUM; 628 ifp->if_hwassist = NFE_CSUM_FEATURES; 629 } 630 #else 631 sc->sc_caps &= ~NFE_HW_CSUM; 632 #endif 633 ifp->if_capenable = ifp->if_capabilities; 634 635 callout_init(&sc->sc_tick_ch); 636 637 ether_ifattach(ifp, eaddr, NULL); 638 639 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res)); 640 641 #ifdef IFPOLL_ENABLE 642 ifpoll_compat_setup(&sc->sc_npoll, ctx, (struct sysctl_oid *)tree, 643 device_get_unit(dev), ifp->if_serializer); 644 #endif 645 646 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 647 &sc->sc_ih, ifp->if_serializer); 648 if (error) { 649 device_printf(dev, "could not setup intr\n"); 650 ether_ifdetach(ifp); 651 goto fail; 652 } 653 654 return 0; 655 fail: 656 nfe_detach(dev); 657 return error; 658 } 659 660 static int 661 nfe_detach(device_t dev) 662 { 663 struct nfe_softc *sc = device_get_softc(dev); 664 665 if (device_is_attached(dev)) { 666 struct ifnet *ifp = &sc->arpcom.ac_if; 667 668 lwkt_serialize_enter(ifp->if_serializer); 669 nfe_stop(sc); 670 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 671 lwkt_serialize_exit(ifp->if_serializer); 672 673 ether_ifdetach(ifp); 674 } 675 676 if (sc->sc_miibus != NULL) 677 device_delete_child(dev, sc->sc_miibus); 678 bus_generic_detach(dev); 679 680 if (sc->sc_irq_res != NULL) { 681 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 682 sc->sc_irq_res); 683 } 684 685 if (sc->sc_mem_res != NULL) { 686 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 687 sc->sc_mem_res); 688 } 689 690 nfe_free_tx_ring(sc, &sc->txq); 691 nfe_free_rx_ring(sc, &sc->rxq); 692 if (sc->sc_dtag != NULL) 693 bus_dma_tag_destroy(sc->sc_dtag); 694 695 return 0; 696 } 697 698 static void 699 nfe_shutdown(device_t dev) 700 { 701 struct nfe_softc *sc = device_get_softc(dev); 702 struct ifnet *ifp = &sc->arpcom.ac_if; 703 704 lwkt_serialize_enter(ifp->if_serializer); 705 nfe_stop(sc); 706 lwkt_serialize_exit(ifp->if_serializer); 707 } 708 709 static int 710 nfe_suspend(device_t dev) 711 { 712 struct nfe_softc *sc = device_get_softc(dev); 713 struct ifnet *ifp = &sc->arpcom.ac_if; 714 715 lwkt_serialize_enter(ifp->if_serializer); 716 nfe_stop(sc); 717 lwkt_serialize_exit(ifp->if_serializer); 718 719 return 0; 720 } 721 722 static int 723 nfe_resume(device_t dev) 724 { 725 struct nfe_softc *sc = device_get_softc(dev); 726 struct ifnet *ifp = &sc->arpcom.ac_if; 727 728 lwkt_serialize_enter(ifp->if_serializer); 729 if (ifp->if_flags & IFF_UP) 730 nfe_init(sc); 731 lwkt_serialize_exit(ifp->if_serializer); 732 733 return 0; 734 } 735 736 static void 737 nfe_miibus_statchg(device_t dev) 738 { 739 struct nfe_softc *sc = device_get_softc(dev); 740 struct mii_data *mii = device_get_softc(sc->sc_miibus); 741 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 742 743 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer); 744 745 phy = NFE_READ(sc, NFE_PHY_IFACE); 746 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 747 748 seed = NFE_READ(sc, NFE_RNDSEED); 749 seed &= ~NFE_SEED_MASK; 750 751 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 752 phy |= NFE_PHY_HDX; /* half-duplex */ 753 misc |= NFE_MISC1_HDX; 754 } 755 756 switch (IFM_SUBTYPE(mii->mii_media_active)) { 757 case IFM_1000_T: /* full-duplex only */ 758 link |= NFE_MEDIA_1000T; 759 seed |= NFE_SEED_1000T; 760 phy |= NFE_PHY_1000T; 761 break; 762 case IFM_100_TX: 763 link |= NFE_MEDIA_100TX; 764 seed |= NFE_SEED_100TX; 765 phy |= NFE_PHY_100TX; 766 break; 767 case IFM_10_T: 768 link |= NFE_MEDIA_10T; 769 seed |= NFE_SEED_10T; 770 break; 771 } 772 773 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 774 775 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 776 NFE_WRITE(sc, NFE_MISC1, misc); 777 NFE_WRITE(sc, NFE_LINKSPEED, link); 778 } 779 780 static int 781 nfe_miibus_readreg(device_t dev, int phy, int reg) 782 { 783 struct nfe_softc *sc = device_get_softc(dev); 784 uint32_t val; 785 int ntries; 786 787 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 788 789 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 790 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 791 DELAY(100); 792 } 793 794 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 795 796 for (ntries = 0; ntries < 1000; ntries++) { 797 DELAY(100); 798 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 799 break; 800 } 801 if (ntries == 1000) { 802 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 803 return 0; 804 } 805 806 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 807 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 808 return 0; 809 } 810 811 val = NFE_READ(sc, NFE_PHY_DATA); 812 if (val != 0xffffffff && val != 0) 813 sc->mii_phyaddr = phy; 814 815 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 816 817 return val; 818 } 819 820 static void 821 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 822 { 823 struct nfe_softc *sc = device_get_softc(dev); 824 uint32_t ctl; 825 int ntries; 826 827 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 828 829 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 830 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 831 DELAY(100); 832 } 833 834 NFE_WRITE(sc, NFE_PHY_DATA, val); 835 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 836 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 837 838 for (ntries = 0; ntries < 1000; ntries++) { 839 DELAY(100); 840 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 841 break; 842 } 843 844 #ifdef NFE_DEBUG 845 if (ntries == 1000) 846 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 847 #endif 848 } 849 850 #ifdef IFPOLL_ENABLE 851 852 static void 853 nfe_npoll_compat(struct ifnet *ifp, void *arg __unused, int count __unused) 854 { 855 struct nfe_softc *sc = ifp->if_softc; 856 857 ASSERT_SERIALIZED(ifp->if_serializer); 858 859 nfe_rxeof(sc); 860 nfe_txeof(sc, 1); 861 } 862 863 static void 864 nfe_disable_intrs(struct nfe_softc *sc) 865 { 866 /* Disable interrupts */ 867 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 868 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 869 sc->sc_npoll.ifpc_stcount = 0; 870 } 871 872 static void 873 nfe_npoll(struct ifnet *ifp, struct ifpoll_info *info) 874 { 875 struct nfe_softc *sc = ifp->if_softc; 876 877 ASSERT_SERIALIZED(ifp->if_serializer); 878 879 if (info != NULL) { 880 int cpuid = sc->sc_npoll.ifpc_cpuid; 881 882 info->ifpi_rx[cpuid].poll_func = nfe_npoll_compat; 883 info->ifpi_rx[cpuid].arg = NULL; 884 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 885 886 if (ifp->if_flags & IFF_RUNNING) 887 nfe_disable_intrs(sc); 888 ifq_set_cpuid(&ifp->if_snd, cpuid); 889 } else { 890 if (ifp->if_flags & IFF_RUNNING) 891 nfe_enable_intrs(sc); 892 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res)); 893 } 894 } 895 896 #endif /* IFPOLL_ENABLE */ 897 898 static void 899 nfe_intr(void *arg) 900 { 901 struct nfe_softc *sc = arg; 902 struct ifnet *ifp = &sc->arpcom.ac_if; 903 uint32_t r; 904 905 r = NFE_READ(sc, NFE_IRQ_STATUS); 906 if (r == 0) 907 return; /* not for us */ 908 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 909 910 if (sc->sc_rate_second != time_uptime) { 911 /* 912 * Calculate sc_rate_avg - interrupts per second. 913 */ 914 sc->sc_rate_second = time_uptime; 915 if (sc->sc_rate_avg < sc->sc_rate_acc) 916 sc->sc_rate_avg = sc->sc_rate_acc; 917 else 918 sc->sc_rate_avg = (sc->sc_rate_avg * 3 + 919 sc->sc_rate_acc) / 4; 920 sc->sc_rate_acc = 0; 921 } else if (sc->sc_rate_avg < sc->sc_rate_acc) { 922 /* 923 * Don't wait for a tick to roll over if we are taking 924 * a lot of interrupts. 925 */ 926 sc->sc_rate_avg = sc->sc_rate_acc; 927 } 928 929 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 930 931 if (r & NFE_IRQ_LINK) { 932 NFE_READ(sc, NFE_PHY_STATUS); 933 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 934 DPRINTF(sc, "link state changed %s\n", ""); 935 } 936 937 if (ifp->if_flags & IFF_RUNNING) { 938 int ret; 939 int rate; 940 941 /* check Rx ring */ 942 ret = nfe_rxeof(sc); 943 944 /* check Tx ring */ 945 ret |= nfe_txeof(sc, 1); 946 947 /* update the rate accumulator */ 948 if (ret) 949 ++sc->sc_rate_acc; 950 951 if (sc->sc_flags & NFE_F_DYN_IM) { 952 rate = 1000000 / sc->sc_imtime; 953 if ((sc->sc_flags & NFE_F_IRQ_TIMER) == 0 && 954 sc->sc_rate_avg > rate) { 955 /* 956 * Use the hardware timer to reduce the 957 * interrupt rate if the discrete interrupt 958 * rate has exceeded our threshold. 959 */ 960 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER); 961 sc->sc_flags |= NFE_F_IRQ_TIMER; 962 } else if ((sc->sc_flags & NFE_F_IRQ_TIMER) && 963 sc->sc_rate_avg <= rate) { 964 /* 965 * Use discrete TX/RX interrupts if the rate 966 * has fallen below our threshold. 967 */ 968 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER); 969 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 970 971 /* 972 * Recollect, mainly to avoid the possible race 973 * introduced by changing interrupt masks. 974 */ 975 nfe_rxeof(sc); 976 nfe_txeof(sc, 1); 977 } 978 } 979 } 980 } 981 982 static int 983 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 984 { 985 struct nfe_softc *sc = ifp->if_softc; 986 struct ifreq *ifr = (struct ifreq *)data; 987 struct mii_data *mii; 988 int error = 0, mask, jumbo_cap; 989 990 ASSERT_SERIALIZED(ifp->if_serializer); 991 992 switch (cmd) { 993 case SIOCSIFMTU: 994 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL) 995 jumbo_cap = 1; 996 else 997 jumbo_cap = 0; 998 999 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) || 1000 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) { 1001 return EINVAL; 1002 } else if (ifp->if_mtu != ifr->ifr_mtu) { 1003 ifp->if_mtu = ifr->ifr_mtu; 1004 if (ifp->if_flags & IFF_RUNNING) 1005 nfe_init(sc); 1006 } 1007 break; 1008 case SIOCSIFFLAGS: 1009 if (ifp->if_flags & IFF_UP) { 1010 /* 1011 * If only the PROMISC or ALLMULTI flag changes, then 1012 * don't do a full re-init of the chip, just update 1013 * the Rx filter. 1014 */ 1015 if ((ifp->if_flags & IFF_RUNNING) && 1016 ((ifp->if_flags ^ sc->sc_if_flags) & 1017 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1018 nfe_setmulti(sc); 1019 } else { 1020 if (!(ifp->if_flags & IFF_RUNNING)) 1021 nfe_init(sc); 1022 } 1023 } else { 1024 if (ifp->if_flags & IFF_RUNNING) 1025 nfe_stop(sc); 1026 } 1027 sc->sc_if_flags = ifp->if_flags; 1028 break; 1029 case SIOCADDMULTI: 1030 case SIOCDELMULTI: 1031 if (ifp->if_flags & IFF_RUNNING) 1032 nfe_setmulti(sc); 1033 break; 1034 case SIOCSIFMEDIA: 1035 case SIOCGIFMEDIA: 1036 mii = device_get_softc(sc->sc_miibus); 1037 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1038 break; 1039 case SIOCSIFCAP: 1040 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM; 1041 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) { 1042 ifp->if_capenable ^= mask; 1043 if (IFCAP_TXCSUM & ifp->if_capenable) 1044 ifp->if_hwassist = NFE_CSUM_FEATURES; 1045 else 1046 ifp->if_hwassist = 0; 1047 1048 if (ifp->if_flags & IFF_RUNNING) 1049 nfe_init(sc); 1050 } 1051 break; 1052 default: 1053 error = ether_ioctl(ifp, cmd, data); 1054 break; 1055 } 1056 return error; 1057 } 1058 1059 static int 1060 nfe_rxeof(struct nfe_softc *sc) 1061 { 1062 struct ifnet *ifp = &sc->arpcom.ac_if; 1063 struct nfe_rx_ring *ring = &sc->rxq; 1064 int reap; 1065 1066 reap = 0; 1067 for (;;) { 1068 struct nfe_rx_data *data = &ring->data[ring->cur]; 1069 struct mbuf *m; 1070 uint16_t flags; 1071 int len, error; 1072 1073 if (sc->sc_caps & NFE_40BIT_ADDR) { 1074 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 1075 1076 flags = le16toh(desc64->flags); 1077 len = le16toh(desc64->length) & 0x3fff; 1078 } else { 1079 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 1080 1081 flags = le16toh(desc32->flags); 1082 len = le16toh(desc32->length) & 0x3fff; 1083 } 1084 1085 if (flags & NFE_RX_READY) 1086 break; 1087 1088 reap = 1; 1089 1090 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1091 if (!(flags & NFE_RX_VALID_V1)) 1092 goto skip; 1093 1094 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 1095 flags &= ~NFE_RX_ERROR; 1096 len--; /* fix buffer length */ 1097 } 1098 } else { 1099 if (!(flags & NFE_RX_VALID_V2)) 1100 goto skip; 1101 1102 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 1103 flags &= ~NFE_RX_ERROR; 1104 len--; /* fix buffer length */ 1105 } 1106 } 1107 1108 if (flags & NFE_RX_ERROR) { 1109 IFNET_STAT_INC(ifp, ierrors, 1); 1110 goto skip; 1111 } 1112 1113 m = data->m; 1114 1115 if (sc->sc_flags & NFE_F_USE_JUMBO) 1116 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 1117 else 1118 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 1119 if (error) { 1120 IFNET_STAT_INC(ifp, ierrors, 1); 1121 goto skip; 1122 } 1123 1124 /* finalize mbuf */ 1125 m->m_pkthdr.len = m->m_len = len; 1126 m->m_pkthdr.rcvif = ifp; 1127 1128 if ((ifp->if_capenable & IFCAP_RXCSUM) && 1129 (flags & NFE_RX_CSUMOK)) { 1130 if (flags & NFE_RX_IP_CSUMOK_V2) { 1131 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1132 CSUM_IP_VALID; 1133 } 1134 1135 if (flags & 1136 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) { 1137 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1138 CSUM_PSEUDO_HDR | 1139 CSUM_FRAG_NOT_CHECKED; 1140 m->m_pkthdr.csum_data = 0xffff; 1141 } 1142 } 1143 1144 IFNET_STAT_INC(ifp, ipackets, 1); 1145 ifp->if_input(ifp, m, NULL, -1); 1146 skip: 1147 nfe_set_ready_rxdesc(sc, ring, ring->cur); 1148 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count; 1149 } 1150 return reap; 1151 } 1152 1153 static int 1154 nfe_txeof(struct nfe_softc *sc, int start) 1155 { 1156 struct ifnet *ifp = &sc->arpcom.ac_if; 1157 struct nfe_tx_ring *ring = &sc->txq; 1158 struct nfe_tx_data *data = NULL; 1159 1160 while (ring->next != ring->cur) { 1161 uint16_t flags; 1162 1163 if (sc->sc_caps & NFE_40BIT_ADDR) 1164 flags = le16toh(ring->desc64[ring->next].flags); 1165 else 1166 flags = le16toh(ring->desc32[ring->next].flags); 1167 1168 if (flags & NFE_TX_VALID) 1169 break; 1170 1171 data = &ring->data[ring->next]; 1172 1173 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1174 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 1175 goto skip; 1176 1177 if ((flags & NFE_TX_ERROR_V1) != 0) { 1178 if_printf(ifp, "tx v1 error 0x%4b\n", flags, 1179 NFE_V1_TXERR); 1180 IFNET_STAT_INC(ifp, oerrors, 1); 1181 } else { 1182 IFNET_STAT_INC(ifp, opackets, 1); 1183 } 1184 } else { 1185 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 1186 goto skip; 1187 1188 if ((flags & NFE_TX_ERROR_V2) != 0) { 1189 if_printf(ifp, "tx v2 error 0x%4b\n", flags, 1190 NFE_V2_TXERR); 1191 IFNET_STAT_INC(ifp, oerrors, 1); 1192 } else { 1193 IFNET_STAT_INC(ifp, opackets, 1); 1194 } 1195 } 1196 1197 if (data->m == NULL) { /* should not get there */ 1198 if_printf(ifp, 1199 "last fragment bit w/o associated mbuf!\n"); 1200 goto skip; 1201 } 1202 1203 /* last fragment of the mbuf chain transmitted */ 1204 bus_dmamap_unload(ring->data_tag, data->map); 1205 m_freem(data->m); 1206 data->m = NULL; 1207 skip: 1208 ring->queued--; 1209 KKASSERT(ring->queued >= 0); 1210 ring->next = (ring->next + 1) % sc->sc_tx_ring_count; 1211 } 1212 1213 if (sc->sc_tx_ring_count - ring->queued >= 1214 sc->sc_tx_spare + NFE_NSEG_RSVD) 1215 ifq_clr_oactive(&ifp->if_snd); 1216 1217 if (ring->queued == 0) 1218 ifp->if_timer = 0; 1219 1220 if (start && !ifq_is_empty(&ifp->if_snd)) 1221 if_devstart(ifp); 1222 1223 if (data != NULL) 1224 return 1; 1225 else 1226 return 0; 1227 } 1228 1229 static int 1230 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 1231 { 1232 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 1233 struct nfe_tx_data *data, *data_map; 1234 bus_dmamap_t map; 1235 struct nfe_desc64 *desc64 = NULL; 1236 struct nfe_desc32 *desc32 = NULL; 1237 uint16_t flags = 0; 1238 uint32_t vtag = 0; 1239 int error, i, j, maxsegs, nsegs; 1240 1241 data = &ring->data[ring->cur]; 1242 map = data->map; 1243 data_map = data; /* Remember who owns the DMA map */ 1244 1245 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD; 1246 if (maxsegs > NFE_MAX_SCATTER) 1247 maxsegs = NFE_MAX_SCATTER; 1248 KASSERT(maxsegs >= sc->sc_tx_spare, 1249 ("not enough segments %d,%d", maxsegs, sc->sc_tx_spare)); 1250 1251 error = bus_dmamap_load_mbuf_defrag(ring->data_tag, map, &m0, 1252 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1253 if (error) 1254 goto back; 1255 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1256 1257 error = 0; 1258 1259 /* setup h/w VLAN tagging */ 1260 if (m0->m_flags & M_VLANTAG) 1261 vtag = m0->m_pkthdr.ether_vlantag; 1262 1263 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) { 1264 if (m0->m_pkthdr.csum_flags & CSUM_IP) 1265 flags |= NFE_TX_IP_CSUM; 1266 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1267 flags |= NFE_TX_TCP_CSUM; 1268 } 1269 1270 /* 1271 * XXX urm. somebody is unaware of how hardware works. You 1272 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1273 * the ring until the entire chain is actually *VALID*. Otherwise 1274 * the hardware may encounter a partially initialized chain that 1275 * is marked as being ready to go when it in fact is not ready to 1276 * go. 1277 */ 1278 1279 for (i = 0; i < nsegs; i++) { 1280 j = (ring->cur + i) % sc->sc_tx_ring_count; 1281 data = &ring->data[j]; 1282 1283 if (sc->sc_caps & NFE_40BIT_ADDR) { 1284 desc64 = &ring->desc64[j]; 1285 desc64->physaddr[0] = 1286 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 1287 desc64->physaddr[1] = 1288 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 1289 desc64->length = htole16(segs[i].ds_len - 1); 1290 desc64->vtag = htole32(vtag); 1291 desc64->flags = htole16(flags); 1292 } else { 1293 desc32 = &ring->desc32[j]; 1294 desc32->physaddr = htole32(segs[i].ds_addr); 1295 desc32->length = htole16(segs[i].ds_len - 1); 1296 desc32->flags = htole16(flags); 1297 } 1298 1299 /* csum flags and vtag belong to the first fragment only */ 1300 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1301 vtag = 0; 1302 1303 ring->queued++; 1304 KKASSERT(ring->queued <= sc->sc_tx_ring_count); 1305 } 1306 1307 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1308 if (sc->sc_caps & NFE_40BIT_ADDR) { 1309 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1310 } else { 1311 if (sc->sc_caps & NFE_JUMBO_SUP) 1312 flags = NFE_TX_LASTFRAG_V2; 1313 else 1314 flags = NFE_TX_LASTFRAG_V1; 1315 desc32->flags |= htole16(flags); 1316 } 1317 1318 /* 1319 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1320 * whole mess until the first descriptor in the map is flagged. 1321 */ 1322 for (i = nsegs - 1; i >= 0; --i) { 1323 j = (ring->cur + i) % sc->sc_tx_ring_count; 1324 if (sc->sc_caps & NFE_40BIT_ADDR) { 1325 desc64 = &ring->desc64[j]; 1326 desc64->flags |= htole16(NFE_TX_VALID); 1327 } else { 1328 desc32 = &ring->desc32[j]; 1329 desc32->flags |= htole16(NFE_TX_VALID); 1330 } 1331 } 1332 ring->cur = (ring->cur + nsegs) % sc->sc_tx_ring_count; 1333 1334 /* Exchange DMA map */ 1335 data_map->map = data->map; 1336 data->map = map; 1337 data->m = m0; 1338 back: 1339 if (error) 1340 m_freem(m0); 1341 return error; 1342 } 1343 1344 static void 1345 nfe_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1346 { 1347 struct nfe_softc *sc = ifp->if_softc; 1348 struct nfe_tx_ring *ring = &sc->txq; 1349 int count = 0, oactive = 0; 1350 struct mbuf *m0; 1351 1352 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1353 ASSERT_SERIALIZED(ifp->if_serializer); 1354 1355 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1356 return; 1357 1358 for (;;) { 1359 int error; 1360 1361 if (sc->sc_tx_ring_count - ring->queued < 1362 sc->sc_tx_spare + NFE_NSEG_RSVD) { 1363 if (oactive) { 1364 ifq_set_oactive(&ifp->if_snd); 1365 break; 1366 } 1367 1368 nfe_txeof(sc, 0); 1369 oactive = 1; 1370 continue; 1371 } 1372 1373 m0 = ifq_dequeue(&ifp->if_snd); 1374 if (m0 == NULL) 1375 break; 1376 1377 ETHER_BPF_MTAP(ifp, m0); 1378 1379 error = nfe_encap(sc, ring, m0); 1380 if (error) { 1381 IFNET_STAT_INC(ifp, oerrors, 1); 1382 if (error == EFBIG) { 1383 if (oactive) { 1384 ifq_set_oactive(&ifp->if_snd); 1385 break; 1386 } 1387 nfe_txeof(sc, 0); 1388 oactive = 1; 1389 } 1390 continue; 1391 } else { 1392 oactive = 0; 1393 } 1394 ++count; 1395 1396 /* 1397 * NOTE: 1398 * `m0' may be freed in nfe_encap(), so 1399 * it should not be touched any more. 1400 */ 1401 } 1402 1403 if (count == 0) /* nothing sent */ 1404 return; 1405 1406 /* Kick Tx */ 1407 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1408 1409 /* 1410 * Set a timeout in case the chip goes out to lunch. 1411 */ 1412 ifp->if_timer = 5; 1413 } 1414 1415 static void 1416 nfe_watchdog(struct ifnet *ifp) 1417 { 1418 struct nfe_softc *sc = ifp->if_softc; 1419 1420 ASSERT_SERIALIZED(ifp->if_serializer); 1421 1422 if (ifp->if_flags & IFF_RUNNING) { 1423 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1424 nfe_txeof(sc, 1); 1425 return; 1426 } 1427 1428 if_printf(ifp, "watchdog timeout\n"); 1429 1430 nfe_init(ifp->if_softc); 1431 1432 IFNET_STAT_INC(ifp, oerrors, 1); 1433 } 1434 1435 static void 1436 nfe_init(void *xsc) 1437 { 1438 struct nfe_softc *sc = xsc; 1439 struct ifnet *ifp = &sc->arpcom.ac_if; 1440 uint32_t tmp; 1441 int error; 1442 1443 ASSERT_SERIALIZED(ifp->if_serializer); 1444 1445 nfe_stop(sc); 1446 1447 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 1448 nfe_mac_reset(sc); 1449 1450 /* 1451 * NOTE: 1452 * Switching between jumbo frames and normal frames should 1453 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring(). 1454 */ 1455 if (ifp->if_mtu > ETHERMTU) { 1456 sc->sc_flags |= NFE_F_USE_JUMBO; 1457 sc->rxq.bufsz = NFE_JBYTES; 1458 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO; 1459 if (bootverbose) 1460 if_printf(ifp, "use jumbo frames\n"); 1461 } else { 1462 sc->sc_flags &= ~NFE_F_USE_JUMBO; 1463 sc->rxq.bufsz = MCLBYTES; 1464 sc->sc_tx_spare = NFE_NSEG_SPARE; 1465 if (bootverbose) 1466 if_printf(ifp, "use non-jumbo frames\n"); 1467 } 1468 1469 error = nfe_init_tx_ring(sc, &sc->txq); 1470 if (error) { 1471 nfe_stop(sc); 1472 return; 1473 } 1474 1475 error = nfe_init_rx_ring(sc, &sc->rxq); 1476 if (error) { 1477 nfe_stop(sc); 1478 return; 1479 } 1480 1481 NFE_WRITE(sc, NFE_TX_POLL, 0); 1482 NFE_WRITE(sc, NFE_STATUS, 0); 1483 1484 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc; 1485 1486 if (ifp->if_capenable & IFCAP_RXCSUM) 1487 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1488 1489 /* 1490 * Although the adapter is capable of stripping VLAN tags from received 1491 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1492 * purpose. This will be done in software by our network stack. 1493 */ 1494 if (sc->sc_caps & NFE_HW_VLAN) 1495 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1496 1497 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1498 DELAY(10); 1499 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1500 1501 if (sc->sc_caps & NFE_HW_VLAN) 1502 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1503 1504 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1505 1506 /* set MAC address */ 1507 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1508 1509 /* tell MAC where rings are in memory */ 1510 if (sc->sc_caps & NFE_40BIT_ADDR) { 1511 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 1512 NFE_ADDR_HI(sc->rxq.physaddr)); 1513 } 1514 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr)); 1515 1516 if (sc->sc_caps & NFE_40BIT_ADDR) { 1517 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, 1518 NFE_ADDR_HI(sc->txq.physaddr)); 1519 } 1520 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 1521 1522 NFE_WRITE(sc, NFE_RING_SIZE, 1523 (sc->sc_rx_ring_count - 1) << 16 | 1524 (sc->sc_tx_ring_count - 1)); 1525 1526 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1527 1528 /* force MAC to wakeup */ 1529 tmp = NFE_READ(sc, NFE_PWR_STATE); 1530 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1531 DELAY(10); 1532 tmp = NFE_READ(sc, NFE_PWR_STATE); 1533 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1534 1535 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1536 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1537 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1538 1539 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1540 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1541 1542 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1543 1544 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1545 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1546 DELAY(10); 1547 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1548 1549 /* set Rx filter */ 1550 nfe_setmulti(sc); 1551 1552 nfe_ifmedia_upd(ifp); 1553 1554 /* enable Rx */ 1555 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1556 1557 /* enable Tx */ 1558 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1559 1560 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1561 1562 #ifdef IFPOLL_ENABLE 1563 if (ifp->if_flags & IFF_NPOLLING) 1564 nfe_disable_intrs(sc); 1565 else 1566 #endif 1567 nfe_enable_intrs(sc); 1568 1569 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1570 1571 ifp->if_flags |= IFF_RUNNING; 1572 ifq_clr_oactive(&ifp->if_snd); 1573 1574 /* 1575 * If we had stuff in the tx ring before its all cleaned out now 1576 * so we are not going to get an interrupt, jump-start any pending 1577 * output. 1578 */ 1579 if (!ifq_is_empty(&ifp->if_snd)) 1580 if_devstart(ifp); 1581 } 1582 1583 static void 1584 nfe_stop(struct nfe_softc *sc) 1585 { 1586 struct ifnet *ifp = &sc->arpcom.ac_if; 1587 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 1588 int i; 1589 1590 ASSERT_SERIALIZED(ifp->if_serializer); 1591 1592 callout_stop(&sc->sc_tick_ch); 1593 1594 ifp->if_timer = 0; 1595 ifp->if_flags &= ~IFF_RUNNING; 1596 ifq_clr_oactive(&ifp->if_snd); 1597 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 1598 1599 #define WAITMAX 50000 1600 1601 /* 1602 * Abort Tx 1603 */ 1604 NFE_WRITE(sc, NFE_TX_CTL, 0); 1605 for (i = 0; i < WAITMAX; ++i) { 1606 DELAY(100); 1607 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0) 1608 break; 1609 } 1610 if (i == WAITMAX) 1611 if_printf(ifp, "can't stop TX\n"); 1612 DELAY(100); 1613 1614 /* 1615 * Disable Rx 1616 */ 1617 NFE_WRITE(sc, NFE_RX_CTL, 0); 1618 for (i = 0; i < WAITMAX; ++i) { 1619 DELAY(100); 1620 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0) 1621 break; 1622 } 1623 if (i == WAITMAX) 1624 if_printf(ifp, "can't stop RX\n"); 1625 DELAY(100); 1626 1627 #undef WAITMAX 1628 1629 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 1630 DELAY(10); 1631 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1632 1633 /* Disable interrupts */ 1634 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1635 1636 /* Reset Tx and Rx rings */ 1637 nfe_reset_tx_ring(sc, &sc->txq); 1638 nfe_reset_rx_ring(sc, &sc->rxq); 1639 } 1640 1641 static int 1642 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1643 { 1644 int i, j, error, descsize; 1645 bus_dmamem_t dmem; 1646 void **desc; 1647 1648 if (sc->sc_caps & NFE_40BIT_ADDR) { 1649 desc = (void *)&ring->desc64; 1650 descsize = sizeof(struct nfe_desc64); 1651 } else { 1652 desc = (void *)&ring->desc32; 1653 descsize = sizeof(struct nfe_desc32); 1654 } 1655 1656 ring->bufsz = MCLBYTES; 1657 ring->cur = ring->next = 0; 1658 1659 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1660 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1661 sc->sc_rx_ring_count * descsize, 1662 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1663 if (error) { 1664 if_printf(&sc->arpcom.ac_if, 1665 "could not create RX desc ring\n"); 1666 return error; 1667 } 1668 ring->tag = dmem.dmem_tag; 1669 ring->map = dmem.dmem_map; 1670 *desc = dmem.dmem_addr; 1671 ring->physaddr = dmem.dmem_busaddr; 1672 1673 if (sc->sc_caps & NFE_JUMBO_SUP) { 1674 ring->jbuf = 1675 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc), 1676 M_DEVBUF, M_WAITOK | M_ZERO); 1677 1678 error = nfe_jpool_alloc(sc, ring); 1679 if (error) { 1680 if_printf(&sc->arpcom.ac_if, 1681 "could not allocate jumbo frames\n"); 1682 kfree(ring->jbuf, M_DEVBUF); 1683 ring->jbuf = NULL; 1684 /* Allow jumbo frame allocation to fail */ 1685 } 1686 } 1687 1688 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count, 1689 M_DEVBUF, M_WAITOK | M_ZERO); 1690 1691 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1692 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1693 NULL, NULL, 1694 MCLBYTES, 1, MCLBYTES, 1695 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 1696 &ring->data_tag); 1697 if (error) { 1698 if_printf(&sc->arpcom.ac_if, 1699 "could not create RX mbuf DMA tag\n"); 1700 return error; 1701 } 1702 1703 /* Create a spare RX mbuf DMA map */ 1704 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1705 &ring->data_tmpmap); 1706 if (error) { 1707 if_printf(&sc->arpcom.ac_if, 1708 "could not create spare RX mbuf DMA map\n"); 1709 bus_dma_tag_destroy(ring->data_tag); 1710 ring->data_tag = NULL; 1711 return error; 1712 } 1713 1714 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1715 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1716 &ring->data[i].map); 1717 if (error) { 1718 if_printf(&sc->arpcom.ac_if, 1719 "could not create %dth RX mbuf DMA mapn", i); 1720 goto fail; 1721 } 1722 } 1723 return 0; 1724 fail: 1725 for (j = 0; j < i; ++j) 1726 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1727 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1728 bus_dma_tag_destroy(ring->data_tag); 1729 ring->data_tag = NULL; 1730 return error; 1731 } 1732 1733 static void 1734 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1735 { 1736 int i; 1737 1738 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1739 struct nfe_rx_data *data = &ring->data[i]; 1740 1741 if (data->m != NULL) { 1742 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0) 1743 bus_dmamap_unload(ring->data_tag, data->map); 1744 m_freem(data->m); 1745 data->m = NULL; 1746 } 1747 } 1748 1749 ring->cur = ring->next = 0; 1750 } 1751 1752 static int 1753 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1754 { 1755 int i; 1756 1757 for (i = 0; i < sc->sc_rx_ring_count; ++i) { 1758 int error; 1759 1760 /* XXX should use a function pointer */ 1761 if (sc->sc_flags & NFE_F_USE_JUMBO) 1762 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1763 else 1764 error = nfe_newbuf_std(sc, ring, i, 1); 1765 if (error) { 1766 if_printf(&sc->arpcom.ac_if, 1767 "could not allocate RX buffer\n"); 1768 return error; 1769 } 1770 nfe_set_ready_rxdesc(sc, ring, i); 1771 } 1772 return 0; 1773 } 1774 1775 static void 1776 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1777 { 1778 if (ring->data_tag != NULL) { 1779 struct nfe_rx_data *data; 1780 int i; 1781 1782 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1783 data = &ring->data[i]; 1784 1785 if (data->m != NULL) { 1786 bus_dmamap_unload(ring->data_tag, data->map); 1787 m_freem(data->m); 1788 } 1789 bus_dmamap_destroy(ring->data_tag, data->map); 1790 } 1791 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1792 bus_dma_tag_destroy(ring->data_tag); 1793 } 1794 1795 nfe_jpool_free(sc, ring); 1796 1797 if (ring->jbuf != NULL) 1798 kfree(ring->jbuf, M_DEVBUF); 1799 if (ring->data != NULL) 1800 kfree(ring->data, M_DEVBUF); 1801 1802 if (ring->tag != NULL) { 1803 void *desc; 1804 1805 if (sc->sc_caps & NFE_40BIT_ADDR) 1806 desc = ring->desc64; 1807 else 1808 desc = ring->desc32; 1809 1810 bus_dmamap_unload(ring->tag, ring->map); 1811 bus_dmamem_free(ring->tag, desc, ring->map); 1812 bus_dma_tag_destroy(ring->tag); 1813 } 1814 } 1815 1816 static struct nfe_jbuf * 1817 nfe_jalloc(struct nfe_softc *sc) 1818 { 1819 struct ifnet *ifp = &sc->arpcom.ac_if; 1820 struct nfe_jbuf *jbuf; 1821 1822 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1823 1824 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1825 if (jbuf != NULL) { 1826 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1827 jbuf->inuse = 1; 1828 } else { 1829 if_printf(ifp, "no free jumbo buffer\n"); 1830 } 1831 1832 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1833 1834 return jbuf; 1835 } 1836 1837 static void 1838 nfe_jfree(void *arg) 1839 { 1840 struct nfe_jbuf *jbuf = arg; 1841 struct nfe_softc *sc = jbuf->sc; 1842 struct nfe_rx_ring *ring = jbuf->ring; 1843 1844 if (&ring->jbuf[jbuf->slot] != jbuf) 1845 panic("%s: free wrong jumbo buffer", __func__); 1846 else if (jbuf->inuse == 0) 1847 panic("%s: jumbo buffer already freed", __func__); 1848 1849 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1850 atomic_subtract_int(&jbuf->inuse, 1); 1851 if (jbuf->inuse == 0) 1852 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1853 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1854 } 1855 1856 static void 1857 nfe_jref(void *arg) 1858 { 1859 struct nfe_jbuf *jbuf = arg; 1860 struct nfe_rx_ring *ring = jbuf->ring; 1861 1862 if (&ring->jbuf[jbuf->slot] != jbuf) 1863 panic("%s: ref wrong jumbo buffer", __func__); 1864 else if (jbuf->inuse == 0) 1865 panic("%s: jumbo buffer already freed", __func__); 1866 1867 atomic_add_int(&jbuf->inuse, 1); 1868 } 1869 1870 static int 1871 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1872 { 1873 struct nfe_jbuf *jbuf; 1874 bus_dmamem_t dmem; 1875 bus_addr_t physaddr; 1876 caddr_t buf; 1877 int i, error; 1878 1879 /* 1880 * Allocate a big chunk of DMA'able memory. 1881 */ 1882 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1883 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1884 NFE_JPOOL_SIZE(sc), 1885 BUS_DMA_WAITOK, &dmem); 1886 if (error) { 1887 if_printf(&sc->arpcom.ac_if, 1888 "could not create jumbo buffer\n"); 1889 return error; 1890 } 1891 ring->jtag = dmem.dmem_tag; 1892 ring->jmap = dmem.dmem_map; 1893 ring->jpool = dmem.dmem_addr; 1894 physaddr = dmem.dmem_busaddr; 1895 1896 /* ..and split it into 9KB chunks */ 1897 SLIST_INIT(&ring->jfreelist); 1898 1899 buf = ring->jpool; 1900 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) { 1901 jbuf = &ring->jbuf[i]; 1902 1903 jbuf->sc = sc; 1904 jbuf->ring = ring; 1905 jbuf->inuse = 0; 1906 jbuf->slot = i; 1907 jbuf->buf = buf; 1908 jbuf->physaddr = physaddr; 1909 1910 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1911 1912 buf += NFE_JBYTES; 1913 physaddr += NFE_JBYTES; 1914 } 1915 1916 return 0; 1917 } 1918 1919 static void 1920 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1921 { 1922 if (ring->jtag != NULL) { 1923 bus_dmamap_unload(ring->jtag, ring->jmap); 1924 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1925 bus_dma_tag_destroy(ring->jtag); 1926 } 1927 } 1928 1929 static int 1930 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1931 { 1932 int i, j, error, descsize; 1933 bus_dmamem_t dmem; 1934 void **desc; 1935 1936 if (sc->sc_caps & NFE_40BIT_ADDR) { 1937 desc = (void *)&ring->desc64; 1938 descsize = sizeof(struct nfe_desc64); 1939 } else { 1940 desc = (void *)&ring->desc32; 1941 descsize = sizeof(struct nfe_desc32); 1942 } 1943 1944 ring->queued = 0; 1945 ring->cur = ring->next = 0; 1946 1947 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1948 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1949 sc->sc_tx_ring_count * descsize, 1950 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1951 if (error) { 1952 if_printf(&sc->arpcom.ac_if, 1953 "could not create TX desc ring\n"); 1954 return error; 1955 } 1956 ring->tag = dmem.dmem_tag; 1957 ring->map = dmem.dmem_map; 1958 *desc = dmem.dmem_addr; 1959 ring->physaddr = dmem.dmem_busaddr; 1960 1961 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count, 1962 M_DEVBUF, M_WAITOK | M_ZERO); 1963 1964 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1965 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1966 NULL, NULL, 1967 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES, 1968 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1969 &ring->data_tag); 1970 if (error) { 1971 if_printf(&sc->arpcom.ac_if, 1972 "could not create TX buf DMA tag\n"); 1973 return error; 1974 } 1975 1976 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1977 error = bus_dmamap_create(ring->data_tag, 1978 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1979 &ring->data[i].map); 1980 if (error) { 1981 if_printf(&sc->arpcom.ac_if, 1982 "could not create %dth TX buf DMA map\n", i); 1983 goto fail; 1984 } 1985 } 1986 1987 return 0; 1988 fail: 1989 for (j = 0; j < i; ++j) 1990 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1991 bus_dma_tag_destroy(ring->data_tag); 1992 ring->data_tag = NULL; 1993 return error; 1994 } 1995 1996 static void 1997 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1998 { 1999 int i; 2000 2001 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2002 struct nfe_tx_data *data = &ring->data[i]; 2003 2004 if (sc->sc_caps & NFE_40BIT_ADDR) 2005 ring->desc64[i].flags = 0; 2006 else 2007 ring->desc32[i].flags = 0; 2008 2009 if (data->m != NULL) { 2010 bus_dmamap_unload(ring->data_tag, data->map); 2011 m_freem(data->m); 2012 data->m = NULL; 2013 } 2014 } 2015 2016 ring->queued = 0; 2017 ring->cur = ring->next = 0; 2018 } 2019 2020 static int 2021 nfe_init_tx_ring(struct nfe_softc *sc __unused, 2022 struct nfe_tx_ring *ring __unused) 2023 { 2024 return 0; 2025 } 2026 2027 static void 2028 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 2029 { 2030 if (ring->data_tag != NULL) { 2031 struct nfe_tx_data *data; 2032 int i; 2033 2034 for (i = 0; i < sc->sc_tx_ring_count; ++i) { 2035 data = &ring->data[i]; 2036 2037 if (data->m != NULL) { 2038 bus_dmamap_unload(ring->data_tag, data->map); 2039 m_freem(data->m); 2040 } 2041 bus_dmamap_destroy(ring->data_tag, data->map); 2042 } 2043 2044 bus_dma_tag_destroy(ring->data_tag); 2045 } 2046 2047 if (ring->data != NULL) 2048 kfree(ring->data, M_DEVBUF); 2049 2050 if (ring->tag != NULL) { 2051 void *desc; 2052 2053 if (sc->sc_caps & NFE_40BIT_ADDR) 2054 desc = ring->desc64; 2055 else 2056 desc = ring->desc32; 2057 2058 bus_dmamap_unload(ring->tag, ring->map); 2059 bus_dmamem_free(ring->tag, desc, ring->map); 2060 bus_dma_tag_destroy(ring->tag); 2061 } 2062 } 2063 2064 static int 2065 nfe_ifmedia_upd(struct ifnet *ifp) 2066 { 2067 struct nfe_softc *sc = ifp->if_softc; 2068 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2069 2070 ASSERT_SERIALIZED(ifp->if_serializer); 2071 2072 if (mii->mii_instance != 0) { 2073 struct mii_softc *miisc; 2074 2075 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2076 mii_phy_reset(miisc); 2077 } 2078 mii_mediachg(mii); 2079 2080 return 0; 2081 } 2082 2083 static void 2084 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2085 { 2086 struct nfe_softc *sc = ifp->if_softc; 2087 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2088 2089 ASSERT_SERIALIZED(ifp->if_serializer); 2090 2091 mii_pollstat(mii); 2092 ifmr->ifm_status = mii->mii_media_status; 2093 ifmr->ifm_active = mii->mii_media_active; 2094 } 2095 2096 static void 2097 nfe_setmulti(struct nfe_softc *sc) 2098 { 2099 struct ifnet *ifp = &sc->arpcom.ac_if; 2100 struct ifmultiaddr *ifma; 2101 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2102 uint32_t filter = NFE_RXFILTER_MAGIC; 2103 int i; 2104 2105 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2106 bzero(addr, ETHER_ADDR_LEN); 2107 bzero(mask, ETHER_ADDR_LEN); 2108 goto done; 2109 } 2110 2111 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2112 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2113 2114 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2115 caddr_t maddr; 2116 2117 if (ifma->ifma_addr->sa_family != AF_LINK) 2118 continue; 2119 2120 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 2121 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2122 addr[i] &= maddr[i]; 2123 mask[i] &= ~maddr[i]; 2124 } 2125 } 2126 2127 for (i = 0; i < ETHER_ADDR_LEN; i++) 2128 mask[i] |= addr[i]; 2129 2130 done: 2131 addr[0] |= 0x01; /* make sure multicast bit is set */ 2132 2133 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2134 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2135 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2136 addr[5] << 8 | addr[4]); 2137 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2138 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2139 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2140 mask[5] << 8 | mask[4]); 2141 2142 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 2143 NFE_WRITE(sc, NFE_RXFILTER, filter); 2144 } 2145 2146 static void 2147 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2148 { 2149 uint32_t lo, hi; 2150 2151 lo = NFE_READ(sc, NFE_MACADDR_LO); 2152 hi = NFE_READ(sc, NFE_MACADDR_HI); 2153 if (sc->sc_caps & NFE_FIX_EADDR) { 2154 addr[0] = (lo >> 8) & 0xff; 2155 addr[1] = (lo & 0xff); 2156 2157 addr[2] = (hi >> 24) & 0xff; 2158 addr[3] = (hi >> 16) & 0xff; 2159 addr[4] = (hi >> 8) & 0xff; 2160 addr[5] = (hi & 0xff); 2161 } else { 2162 addr[0] = (hi & 0xff); 2163 addr[1] = (hi >> 8) & 0xff; 2164 addr[2] = (hi >> 16) & 0xff; 2165 addr[3] = (hi >> 24) & 0xff; 2166 2167 addr[4] = (lo & 0xff); 2168 addr[5] = (lo >> 8) & 0xff; 2169 } 2170 } 2171 2172 static void 2173 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 2174 { 2175 NFE_WRITE(sc, NFE_MACADDR_LO, 2176 addr[5] << 8 | addr[4]); 2177 NFE_WRITE(sc, NFE_MACADDR_HI, 2178 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2179 } 2180 2181 static void 2182 nfe_tick(void *arg) 2183 { 2184 struct nfe_softc *sc = arg; 2185 struct ifnet *ifp = &sc->arpcom.ac_if; 2186 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2187 2188 lwkt_serialize_enter(ifp->if_serializer); 2189 2190 mii_tick(mii); 2191 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 2192 2193 lwkt_serialize_exit(ifp->if_serializer); 2194 } 2195 2196 static int 2197 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2198 int wait) 2199 { 2200 struct nfe_rx_data *data = &ring->data[idx]; 2201 bus_dma_segment_t seg; 2202 bus_dmamap_t map; 2203 struct mbuf *m; 2204 int nsegs, error; 2205 2206 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2207 if (m == NULL) 2208 return ENOBUFS; 2209 m->m_len = m->m_pkthdr.len = MCLBYTES; 2210 2211 /* 2212 * Aligning the payload improves access times. 2213 */ 2214 if (sc->sc_caps & NFE_WORDALIGN) 2215 m_adj(m, ETHER_ALIGN); 2216 2217 error = bus_dmamap_load_mbuf_segment(ring->data_tag, ring->data_tmpmap, 2218 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 2219 if (error) { 2220 m_freem(m); 2221 if (wait) { 2222 if_printf(&sc->arpcom.ac_if, 2223 "could map RX mbuf %d\n", error); 2224 } 2225 return error; 2226 } 2227 2228 if (data->m != NULL) { 2229 /* Sync and unload originally mapped mbuf */ 2230 bus_dmamap_sync(ring->data_tag, data->map, 2231 BUS_DMASYNC_POSTREAD); 2232 bus_dmamap_unload(ring->data_tag, data->map); 2233 } 2234 2235 /* Swap this DMA map with tmp DMA map */ 2236 map = data->map; 2237 data->map = ring->data_tmpmap; 2238 ring->data_tmpmap = map; 2239 2240 /* Caller is assumed to have collected the old mbuf */ 2241 data->m = m; 2242 2243 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2244 return 0; 2245 } 2246 2247 static int 2248 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2249 int wait) 2250 { 2251 struct nfe_rx_data *data = &ring->data[idx]; 2252 struct nfe_jbuf *jbuf; 2253 struct mbuf *m; 2254 2255 MGETHDR(m, wait ? M_WAITOK : M_NOWAIT, MT_DATA); 2256 if (m == NULL) 2257 return ENOBUFS; 2258 2259 jbuf = nfe_jalloc(sc); 2260 if (jbuf == NULL) { 2261 m_freem(m); 2262 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2263 "-- packet dropped!\n"); 2264 return ENOBUFS; 2265 } 2266 2267 m->m_ext.ext_arg = jbuf; 2268 m->m_ext.ext_buf = jbuf->buf; 2269 m->m_ext.ext_free = nfe_jfree; 2270 m->m_ext.ext_ref = nfe_jref; 2271 m->m_ext.ext_size = NFE_JBYTES; 2272 2273 m->m_data = m->m_ext.ext_buf; 2274 m->m_flags |= M_EXT; 2275 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2276 2277 /* 2278 * Aligning the payload improves access times. 2279 */ 2280 if (sc->sc_caps & NFE_WORDALIGN) 2281 m_adj(m, ETHER_ALIGN); 2282 2283 /* Caller is assumed to have collected the old mbuf */ 2284 data->m = m; 2285 2286 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2287 return 0; 2288 } 2289 2290 static void 2291 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2292 bus_addr_t physaddr) 2293 { 2294 if (sc->sc_caps & NFE_40BIT_ADDR) { 2295 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2296 2297 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr)); 2298 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr)); 2299 } else { 2300 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2301 2302 desc32->physaddr = htole32(physaddr); 2303 } 2304 } 2305 2306 static void 2307 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2308 { 2309 if (sc->sc_caps & NFE_40BIT_ADDR) { 2310 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2311 2312 desc64->length = htole16(ring->bufsz); 2313 desc64->flags = htole16(NFE_RX_READY); 2314 } else { 2315 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2316 2317 desc32->length = htole16(ring->bufsz); 2318 desc32->flags = htole16(NFE_RX_READY); 2319 } 2320 } 2321 2322 static int 2323 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS) 2324 { 2325 struct nfe_softc *sc = arg1; 2326 struct ifnet *ifp = &sc->arpcom.ac_if; 2327 uint32_t flags; 2328 int error, v; 2329 2330 lwkt_serialize_enter(ifp->if_serializer); 2331 2332 flags = sc->sc_flags & ~NFE_F_DYN_IM; 2333 v = sc->sc_imtime; 2334 if (sc->sc_flags & NFE_F_DYN_IM) 2335 v = -v; 2336 2337 error = sysctl_handle_int(oidp, &v, 0, req); 2338 if (error || req->newptr == NULL) 2339 goto back; 2340 2341 if (v < 0) { 2342 flags |= NFE_F_DYN_IM; 2343 v = -v; 2344 } 2345 2346 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) { 2347 if (NFE_IMTIME(v) == 0) 2348 v = 0; 2349 sc->sc_imtime = v; 2350 sc->sc_flags = flags; 2351 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 2352 2353 if ((ifp->if_flags & (IFF_NPOLLING | IFF_RUNNING)) 2354 == IFF_RUNNING) { 2355 nfe_enable_intrs(sc); 2356 } 2357 } 2358 back: 2359 lwkt_serialize_exit(ifp->if_serializer); 2360 return error; 2361 } 2362 2363 static void 2364 nfe_powerup(device_t dev) 2365 { 2366 struct nfe_softc *sc = device_get_softc(dev); 2367 uint32_t pwr_state; 2368 uint16_t did; 2369 2370 /* 2371 * Bring MAC and PHY out of low power state 2372 */ 2373 2374 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK; 2375 2376 did = pci_get_device(dev); 2377 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 || 2378 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) && 2379 pci_get_revid(dev) >= 0xa3) 2380 pwr_state |= NFE_PWRUP_REV_A3; 2381 2382 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state); 2383 } 2384 2385 static void 2386 nfe_mac_reset(struct nfe_softc *sc) 2387 { 2388 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 2389 uint32_t macaddr_hi, macaddr_lo, tx_poll; 2390 2391 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 2392 2393 /* Save several registers for later restoration */ 2394 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI); 2395 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO); 2396 tx_poll = NFE_READ(sc, NFE_TX_POLL); 2397 2398 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT); 2399 DELAY(100); 2400 2401 NFE_WRITE(sc, NFE_MAC_RESET, 0); 2402 DELAY(100); 2403 2404 /* Restore saved registers */ 2405 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi); 2406 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo); 2407 NFE_WRITE(sc, NFE_TX_POLL, tx_poll); 2408 2409 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 2410 } 2411 2412 static void 2413 nfe_enable_intrs(struct nfe_softc *sc) 2414 { 2415 /* 2416 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER. 2417 * It is unclear how wide the timer is. Base programming does 2418 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so 2419 * we don't get any interrupt moderation. TX moderation is 2420 * possible by using the timer interrupt instead of TX_DONE. 2421 * 2422 * It is unclear whether there are other bits that can be 2423 * set to make the NFE device actually do interrupt moderation 2424 * on the RX side. 2425 * 2426 * For now set a 128uS interval as a placemark, but don't use 2427 * the timer. 2428 */ 2429 if (sc->sc_imtime == 0) 2430 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT); 2431 else 2432 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime)); 2433 2434 /* Enable interrupts */ 2435 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable); 2436 2437 if (sc->sc_irq_enable & NFE_IRQ_TIMER) 2438 sc->sc_flags |= NFE_F_IRQ_TIMER; 2439 else 2440 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2441 } 2442