1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 3 /* 4 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> and 8 * Matthew Dillon <dillon@apollo.backplane.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 40 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 41 * 42 * Permission to use, copy, modify, and distribute this software for any 43 * purpose with or without fee is hereby granted, provided that the above 44 * copyright notice and this permission notice appear in all copies. 45 * 46 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 47 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 48 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 49 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 50 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 51 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 52 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 53 */ 54 55 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 56 57 #include "opt_ifpoll.h" 58 59 #include <sys/param.h> 60 #include <sys/endian.h> 61 #include <sys/kernel.h> 62 #include <sys/malloc.h> 63 #include <sys/bus.h> 64 #include <sys/interrupt.h> 65 #include <sys/proc.h> 66 #include <sys/rman.h> 67 #include <sys/serialize.h> 68 #include <sys/socket.h> 69 #include <sys/sockio.h> 70 #include <sys/sysctl.h> 71 72 #include <net/ethernet.h> 73 #include <net/if.h> 74 #include <net/bpf.h> 75 #include <net/if_arp.h> 76 #include <net/if_dl.h> 77 #include <net/if_media.h> 78 #include <net/if_poll.h> 79 #include <net/ifq_var.h> 80 #include <net/if_types.h> 81 #include <net/if_var.h> 82 #include <net/vlan/if_vlan_var.h> 83 #include <net/vlan/if_vlan_ether.h> 84 85 #include <bus/pci/pcireg.h> 86 #include <bus/pci/pcivar.h> 87 #include "pcidevs.h" 88 89 #include <dev/netif/mii_layer/mii.h> 90 #include <dev/netif/mii_layer/miivar.h> 91 92 #include "miibus_if.h" 93 94 #include <dev/netif/nfe/if_nfereg.h> 95 #include <dev/netif/nfe/if_nfevar.h> 96 97 #define NFE_CSUM 98 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 99 100 static int nfe_probe(device_t); 101 static int nfe_attach(device_t); 102 static int nfe_detach(device_t); 103 static void nfe_shutdown(device_t); 104 static int nfe_resume(device_t); 105 static int nfe_suspend(device_t); 106 107 static int nfe_miibus_readreg(device_t, int, int); 108 static void nfe_miibus_writereg(device_t, int, int, int); 109 static void nfe_miibus_statchg(device_t); 110 111 #ifdef IFPOLL_ENABLE 112 static void nfe_npoll(struct ifnet *, struct ifpoll_info *); 113 static void nfe_npoll_compat(struct ifnet *, void *, int); 114 static void nfe_disable_intrs(struct nfe_softc *); 115 #endif 116 static void nfe_intr(void *); 117 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 118 static int nfe_rxeof(struct nfe_softc *); 119 static int nfe_txeof(struct nfe_softc *, int); 120 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 121 struct mbuf *); 122 static void nfe_start(struct ifnet *, struct ifaltq_subque *); 123 static void nfe_watchdog(struct ifnet *); 124 static void nfe_init(void *); 125 static void nfe_stop(struct nfe_softc *); 126 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 127 static void nfe_jfree(void *); 128 static void nfe_jref(void *); 129 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 130 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 131 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 132 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 133 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 134 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 135 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 136 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 137 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 138 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 139 static int nfe_ifmedia_upd(struct ifnet *); 140 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 141 static void nfe_setmulti(struct nfe_softc *); 142 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 143 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 144 static void nfe_powerup(device_t); 145 static void nfe_mac_reset(struct nfe_softc *); 146 static void nfe_tick(void *); 147 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 148 int, bus_addr_t); 149 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 150 int); 151 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 152 int); 153 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 154 int); 155 static void nfe_enable_intrs(struct nfe_softc *); 156 157 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS); 158 159 #define NFE_DEBUG 160 #ifdef NFE_DEBUG 161 162 static int nfe_debug = 0; 163 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT; 164 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT; 165 /* 166 * hw timer simulated interrupt moderation @4000Hz. Negative values 167 * disable the timer when the discrete interrupt rate falls below 168 * the moderation rate. 169 * 170 * XXX 8000Hz might be better but if the interrupt is shared it can 171 * blow out the cpu. 172 */ 173 static int nfe_imtime = -250; /* uS */ 174 175 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count); 176 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count); 177 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime); 178 TUNABLE_INT("hw.nfe.debug", &nfe_debug); 179 180 #define DPRINTF(sc, fmt, ...) do { \ 181 if ((sc)->sc_debug) { \ 182 if_printf(&(sc)->arpcom.ac_if, \ 183 fmt, __VA_ARGS__); \ 184 } \ 185 } while (0) 186 187 #define DPRINTFN(sc, lv, fmt, ...) do { \ 188 if ((sc)->sc_debug >= (lv)) { \ 189 if_printf(&(sc)->arpcom.ac_if, \ 190 fmt, __VA_ARGS__); \ 191 } \ 192 } while (0) 193 194 #else /* !NFE_DEBUG */ 195 196 #define DPRINTF(sc, fmt, ...) 197 #define DPRINTFN(sc, lv, fmt, ...) 198 199 #endif /* NFE_DEBUG */ 200 201 static const struct nfe_dev { 202 uint16_t vid; 203 uint16_t did; 204 const char *desc; 205 } nfe_devices[] = { 206 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 207 "NVIDIA nForce Fast Ethernet" }, 208 209 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 210 "NVIDIA nForce2 Fast Ethernet" }, 211 212 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 213 "NVIDIA nForce3 Gigabit Ethernet" }, 214 215 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 216 chipset, and possibly also the 400R; it might be both nForce2- and 217 nForce3-based boards can use the same MCPs (= southbridges) */ 218 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 219 "NVIDIA nForce3 Gigabit Ethernet" }, 220 221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 222 "NVIDIA nForce3 Gigabit Ethernet" }, 223 224 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 225 "NVIDIA nForce3 Gigabit Ethernet" }, 226 227 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 228 "NVIDIA nForce3 Gigabit Ethernet" }, 229 230 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 231 "NVIDIA CK804 Gigabit Ethernet" }, 232 233 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 234 "NVIDIA CK804 Gigabit Ethernet" }, 235 236 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 237 "NVIDIA MCP04 Gigabit Ethernet" }, 238 239 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 240 "NVIDIA MCP04 Gigabit Ethernet" }, 241 242 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 243 "NVIDIA MCP51 Gigabit Ethernet" }, 244 245 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 246 "NVIDIA MCP51 Gigabit Ethernet" }, 247 248 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 249 "NVIDIA MCP55 Gigabit Ethernet" }, 250 251 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 252 "NVIDIA MCP55 Gigabit Ethernet" }, 253 254 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 255 "NVIDIA MCP61 Gigabit Ethernet" }, 256 257 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 258 "NVIDIA MCP61 Gigabit Ethernet" }, 259 260 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 261 "NVIDIA MCP61 Gigabit Ethernet" }, 262 263 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 264 "NVIDIA MCP61 Gigabit Ethernet" }, 265 266 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 267 "NVIDIA MCP65 Gigabit Ethernet" }, 268 269 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 270 "NVIDIA MCP65 Gigabit Ethernet" }, 271 272 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 273 "NVIDIA MCP65 Gigabit Ethernet" }, 274 275 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 276 "NVIDIA MCP65 Gigabit Ethernet" }, 277 278 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 279 "NVIDIA MCP67 Gigabit Ethernet" }, 280 281 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 282 "NVIDIA MCP67 Gigabit Ethernet" }, 283 284 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 285 "NVIDIA MCP67 Gigabit Ethernet" }, 286 287 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 288 "NVIDIA MCP67 Gigabit Ethernet" }, 289 290 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 291 "NVIDIA MCP73 Gigabit Ethernet" }, 292 293 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 294 "NVIDIA MCP73 Gigabit Ethernet" }, 295 296 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 297 "NVIDIA MCP73 Gigabit Ethernet" }, 298 299 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 300 "NVIDIA MCP73 Gigabit Ethernet" }, 301 302 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 303 "NVIDIA MCP77 Gigabit Ethernet" }, 304 305 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 306 "NVIDIA MCP77 Gigabit Ethernet" }, 307 308 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 309 "NVIDIA MCP77 Gigabit Ethernet" }, 310 311 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 312 "NVIDIA MCP77 Gigabit Ethernet" }, 313 314 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 315 "NVIDIA MCP79 Gigabit Ethernet" }, 316 317 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 318 "NVIDIA MCP79 Gigabit Ethernet" }, 319 320 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 321 "NVIDIA MCP79 Gigabit Ethernet" }, 322 323 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 324 "NVIDIA MCP79 Gigabit Ethernet" }, 325 326 { 0, 0, NULL } 327 }; 328 329 static device_method_t nfe_methods[] = { 330 /* Device interface */ 331 DEVMETHOD(device_probe, nfe_probe), 332 DEVMETHOD(device_attach, nfe_attach), 333 DEVMETHOD(device_detach, nfe_detach), 334 DEVMETHOD(device_suspend, nfe_suspend), 335 DEVMETHOD(device_resume, nfe_resume), 336 DEVMETHOD(device_shutdown, nfe_shutdown), 337 338 /* Bus interface */ 339 DEVMETHOD(bus_print_child, bus_generic_print_child), 340 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 341 342 /* MII interface */ 343 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 344 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 345 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 346 347 DEVMETHOD_END 348 }; 349 350 static driver_t nfe_driver = { 351 "nfe", 352 nfe_methods, 353 sizeof(struct nfe_softc) 354 }; 355 356 static devclass_t nfe_devclass; 357 358 DECLARE_DUMMY_MODULE(if_nfe); 359 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 360 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, NULL, NULL); 361 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, NULL, NULL); 362 363 /* 364 * NOTE: NFE_WORDALIGN support is guesswork right now. 365 */ 366 static int 367 nfe_probe(device_t dev) 368 { 369 const struct nfe_dev *n; 370 uint16_t vid, did; 371 372 vid = pci_get_vendor(dev); 373 did = pci_get_device(dev); 374 for (n = nfe_devices; n->desc != NULL; ++n) { 375 if (vid == n->vid && did == n->did) { 376 struct nfe_softc *sc = device_get_softc(dev); 377 378 switch (did) { 379 case PCI_PRODUCT_NVIDIA_NFORCE_LAN: 380 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN: 381 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1: 382 sc->sc_caps = NFE_NO_PWRCTL | 383 NFE_FIX_EADDR; 384 break; 385 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 386 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 387 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 388 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 389 sc->sc_caps = NFE_JUMBO_SUP | 390 NFE_HW_CSUM | 391 NFE_NO_PWRCTL | 392 NFE_FIX_EADDR; 393 break; 394 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 395 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 396 sc->sc_caps = NFE_FIX_EADDR; 397 /* FALL THROUGH */ 398 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 399 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 400 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 401 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 402 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 403 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 404 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 405 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 406 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 407 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 408 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 409 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 410 sc->sc_caps |= NFE_40BIT_ADDR; 411 break; 412 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 413 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 414 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 415 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 416 sc->sc_caps = NFE_JUMBO_SUP | 417 NFE_40BIT_ADDR | 418 NFE_HW_CSUM | 419 NFE_NO_PWRCTL | 420 NFE_FIX_EADDR; 421 break; 422 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 423 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 424 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 425 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 426 sc->sc_caps = NFE_JUMBO_SUP | 427 NFE_40BIT_ADDR; 428 break; 429 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 430 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 431 sc->sc_caps = NFE_JUMBO_SUP | 432 NFE_40BIT_ADDR | 433 NFE_HW_CSUM | 434 NFE_HW_VLAN | 435 NFE_FIX_EADDR; 436 break; 437 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 438 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 439 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 440 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 441 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 442 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 443 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 444 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 445 sc->sc_caps = NFE_40BIT_ADDR | 446 NFE_HW_CSUM | 447 NFE_WORDALIGN; 448 break; 449 } 450 451 device_set_desc(dev, n->desc); 452 device_set_async_attach(dev, TRUE); 453 return 0; 454 } 455 } 456 return ENXIO; 457 } 458 459 static int 460 nfe_attach(device_t dev) 461 { 462 struct nfe_softc *sc = device_get_softc(dev); 463 struct ifnet *ifp = &sc->arpcom.ac_if; 464 struct sysctl_ctx_list *ctx; 465 struct sysctl_oid *tree; 466 uint8_t eaddr[ETHER_ADDR_LEN]; 467 bus_addr_t lowaddr; 468 int error; 469 470 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 471 lwkt_serialize_init(&sc->sc_jbuf_serializer); 472 473 /* 474 * Initialize sysctl variables 475 */ 476 sc->sc_rx_ring_count = nfe_rx_ring_count; 477 sc->sc_tx_ring_count = nfe_tx_ring_count; 478 sc->sc_debug = nfe_debug; 479 if (nfe_imtime < 0) { 480 sc->sc_flags |= NFE_F_DYN_IM; 481 sc->sc_imtime = -nfe_imtime; 482 } else { 483 sc->sc_imtime = nfe_imtime; 484 } 485 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 486 487 sc->sc_mem_rid = PCIR_BAR(0); 488 489 if (sc->sc_caps & NFE_40BIT_ADDR) 490 sc->rxtxctl_desc = NFE_RXTX_DESC_V3; 491 else if (sc->sc_caps & NFE_JUMBO_SUP) 492 sc->rxtxctl_desc = NFE_RXTX_DESC_V2; 493 494 #ifndef BURN_BRIDGES 495 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 496 uint32_t mem, irq; 497 498 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 499 irq = pci_read_config(dev, PCIR_INTLINE, 4); 500 501 device_printf(dev, "chip is in D%d power mode " 502 "-- setting to D0\n", pci_get_powerstate(dev)); 503 504 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 505 506 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 507 pci_write_config(dev, PCIR_INTLINE, irq, 4); 508 } 509 #endif /* !BURN_BRIDGE */ 510 511 /* Enable bus mastering */ 512 pci_enable_busmaster(dev); 513 514 /* Allocate IO memory */ 515 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 516 &sc->sc_mem_rid, RF_ACTIVE); 517 if (sc->sc_mem_res == NULL) { 518 device_printf(dev, "could not allocate io memory\n"); 519 return ENXIO; 520 } 521 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 522 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 523 524 /* Allocate IRQ */ 525 sc->sc_irq_rid = 0; 526 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 527 &sc->sc_irq_rid, 528 RF_SHAREABLE | RF_ACTIVE); 529 if (sc->sc_irq_res == NULL) { 530 device_printf(dev, "could not allocate irq\n"); 531 error = ENXIO; 532 goto fail; 533 } 534 535 /* Disable WOL */ 536 NFE_WRITE(sc, NFE_WOL_CTL, 0); 537 538 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 539 nfe_powerup(dev); 540 541 nfe_get_macaddr(sc, eaddr); 542 543 /* 544 * Allocate top level DMA tag 545 */ 546 if (sc->sc_caps & NFE_40BIT_ADDR) 547 lowaddr = NFE_BUS_SPACE_MAXADDR; 548 else 549 lowaddr = BUS_SPACE_MAXADDR_32BIT; 550 error = bus_dma_tag_create(NULL, /* parent */ 551 1, 0, /* alignment, boundary */ 552 lowaddr, /* lowaddr */ 553 BUS_SPACE_MAXADDR, /* highaddr */ 554 NULL, NULL, /* filter, filterarg */ 555 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 556 0, /* nsegments */ 557 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 558 0, /* flags */ 559 &sc->sc_dtag); 560 if (error) { 561 device_printf(dev, "could not allocate parent dma tag\n"); 562 goto fail; 563 } 564 565 /* 566 * Allocate Tx and Rx rings. 567 */ 568 error = nfe_alloc_tx_ring(sc, &sc->txq); 569 if (error) { 570 device_printf(dev, "could not allocate Tx ring\n"); 571 goto fail; 572 } 573 574 error = nfe_alloc_rx_ring(sc, &sc->rxq); 575 if (error) { 576 device_printf(dev, "could not allocate Rx ring\n"); 577 goto fail; 578 } 579 580 /* 581 * Create sysctl tree 582 */ 583 ctx = device_get_sysctl_ctx(dev); 584 tree = device_get_sysctl_tree(dev); 585 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 586 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW, 587 sc, 0, nfe_sysctl_imtime, "I", 588 "Interrupt moderation time (usec). " 589 "0 to disable interrupt moderation."); 590 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 591 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count, 592 0, "RX ring count"); 593 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 594 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count, 595 0, "TX ring count"); 596 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 597 "debug", CTLFLAG_RW, &sc->sc_debug, 598 0, "control debugging printfs"); 599 600 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 601 nfe_ifmedia_sts); 602 if (error) { 603 device_printf(dev, "MII without any phy\n"); 604 goto fail; 605 } 606 607 ifp->if_softc = sc; 608 ifp->if_mtu = ETHERMTU; 609 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 610 ifp->if_ioctl = nfe_ioctl; 611 ifp->if_start = nfe_start; 612 #ifdef IFPOLL_ENABLE 613 ifp->if_npoll = nfe_npoll; 614 #endif 615 ifp->if_watchdog = nfe_watchdog; 616 ifp->if_init = nfe_init; 617 ifp->if_nmbclusters = sc->sc_rx_ring_count; 618 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count); 619 ifq_set_ready(&ifp->if_snd); 620 621 ifp->if_capabilities = IFCAP_VLAN_MTU; 622 623 if (sc->sc_caps & NFE_HW_VLAN) 624 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 625 626 #ifdef NFE_CSUM 627 if (sc->sc_caps & NFE_HW_CSUM) { 628 ifp->if_capabilities |= IFCAP_HWCSUM; 629 ifp->if_hwassist = NFE_CSUM_FEATURES; 630 } 631 #else 632 sc->sc_caps &= ~NFE_HW_CSUM; 633 #endif 634 ifp->if_capenable = ifp->if_capabilities; 635 636 callout_init(&sc->sc_tick_ch); 637 638 ether_ifattach(ifp, eaddr, NULL); 639 640 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res)); 641 642 #ifdef IFPOLL_ENABLE 643 ifpoll_compat_setup(&sc->sc_npoll, ctx, (struct sysctl_oid *)tree, 644 device_get_unit(dev), ifp->if_serializer); 645 #endif 646 647 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 648 &sc->sc_ih, ifp->if_serializer); 649 if (error) { 650 device_printf(dev, "could not setup intr\n"); 651 ether_ifdetach(ifp); 652 goto fail; 653 } 654 655 return 0; 656 fail: 657 nfe_detach(dev); 658 return error; 659 } 660 661 static int 662 nfe_detach(device_t dev) 663 { 664 struct nfe_softc *sc = device_get_softc(dev); 665 666 if (device_is_attached(dev)) { 667 struct ifnet *ifp = &sc->arpcom.ac_if; 668 669 lwkt_serialize_enter(ifp->if_serializer); 670 nfe_stop(sc); 671 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 672 lwkt_serialize_exit(ifp->if_serializer); 673 674 ether_ifdetach(ifp); 675 } 676 677 if (sc->sc_miibus != NULL) 678 device_delete_child(dev, sc->sc_miibus); 679 bus_generic_detach(dev); 680 681 if (sc->sc_irq_res != NULL) { 682 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 683 sc->sc_irq_res); 684 } 685 686 if (sc->sc_mem_res != NULL) { 687 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 688 sc->sc_mem_res); 689 } 690 691 nfe_free_tx_ring(sc, &sc->txq); 692 nfe_free_rx_ring(sc, &sc->rxq); 693 if (sc->sc_dtag != NULL) 694 bus_dma_tag_destroy(sc->sc_dtag); 695 696 return 0; 697 } 698 699 static void 700 nfe_shutdown(device_t dev) 701 { 702 struct nfe_softc *sc = device_get_softc(dev); 703 struct ifnet *ifp = &sc->arpcom.ac_if; 704 705 lwkt_serialize_enter(ifp->if_serializer); 706 nfe_stop(sc); 707 lwkt_serialize_exit(ifp->if_serializer); 708 } 709 710 static int 711 nfe_suspend(device_t dev) 712 { 713 struct nfe_softc *sc = device_get_softc(dev); 714 struct ifnet *ifp = &sc->arpcom.ac_if; 715 716 lwkt_serialize_enter(ifp->if_serializer); 717 nfe_stop(sc); 718 lwkt_serialize_exit(ifp->if_serializer); 719 720 return 0; 721 } 722 723 static int 724 nfe_resume(device_t dev) 725 { 726 struct nfe_softc *sc = device_get_softc(dev); 727 struct ifnet *ifp = &sc->arpcom.ac_if; 728 729 lwkt_serialize_enter(ifp->if_serializer); 730 if (ifp->if_flags & IFF_UP) 731 nfe_init(sc); 732 lwkt_serialize_exit(ifp->if_serializer); 733 734 return 0; 735 } 736 737 static void 738 nfe_miibus_statchg(device_t dev) 739 { 740 struct nfe_softc *sc = device_get_softc(dev); 741 struct mii_data *mii = device_get_softc(sc->sc_miibus); 742 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 743 744 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer); 745 746 phy = NFE_READ(sc, NFE_PHY_IFACE); 747 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 748 749 seed = NFE_READ(sc, NFE_RNDSEED); 750 seed &= ~NFE_SEED_MASK; 751 752 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 753 phy |= NFE_PHY_HDX; /* half-duplex */ 754 misc |= NFE_MISC1_HDX; 755 } 756 757 switch (IFM_SUBTYPE(mii->mii_media_active)) { 758 case IFM_1000_T: /* full-duplex only */ 759 link |= NFE_MEDIA_1000T; 760 seed |= NFE_SEED_1000T; 761 phy |= NFE_PHY_1000T; 762 break; 763 case IFM_100_TX: 764 link |= NFE_MEDIA_100TX; 765 seed |= NFE_SEED_100TX; 766 phy |= NFE_PHY_100TX; 767 break; 768 case IFM_10_T: 769 link |= NFE_MEDIA_10T; 770 seed |= NFE_SEED_10T; 771 break; 772 } 773 774 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 775 776 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 777 NFE_WRITE(sc, NFE_MISC1, misc); 778 NFE_WRITE(sc, NFE_LINKSPEED, link); 779 } 780 781 static int 782 nfe_miibus_readreg(device_t dev, int phy, int reg) 783 { 784 struct nfe_softc *sc = device_get_softc(dev); 785 uint32_t val; 786 int ntries; 787 788 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 789 790 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 791 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 792 DELAY(100); 793 } 794 795 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 796 797 for (ntries = 0; ntries < 1000; ntries++) { 798 DELAY(100); 799 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 800 break; 801 } 802 if (ntries == 1000) { 803 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 804 return 0; 805 } 806 807 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 808 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 809 return 0; 810 } 811 812 val = NFE_READ(sc, NFE_PHY_DATA); 813 if (val != 0xffffffff && val != 0) 814 sc->mii_phyaddr = phy; 815 816 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 817 818 return val; 819 } 820 821 static void 822 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 823 { 824 struct nfe_softc *sc = device_get_softc(dev); 825 uint32_t ctl; 826 int ntries; 827 828 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 829 830 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 831 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 832 DELAY(100); 833 } 834 835 NFE_WRITE(sc, NFE_PHY_DATA, val); 836 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 837 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 838 839 for (ntries = 0; ntries < 1000; ntries++) { 840 DELAY(100); 841 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 842 break; 843 } 844 845 #ifdef NFE_DEBUG 846 if (ntries == 1000) 847 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 848 #endif 849 } 850 851 #ifdef IFPOLL_ENABLE 852 853 static void 854 nfe_npoll_compat(struct ifnet *ifp, void *arg __unused, int count __unused) 855 { 856 struct nfe_softc *sc = ifp->if_softc; 857 858 ASSERT_SERIALIZED(ifp->if_serializer); 859 860 nfe_rxeof(sc); 861 nfe_txeof(sc, 1); 862 } 863 864 static void 865 nfe_disable_intrs(struct nfe_softc *sc) 866 { 867 /* Disable interrupts */ 868 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 869 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 870 sc->sc_npoll.ifpc_stcount = 0; 871 } 872 873 static void 874 nfe_npoll(struct ifnet *ifp, struct ifpoll_info *info) 875 { 876 struct nfe_softc *sc = ifp->if_softc; 877 878 ASSERT_SERIALIZED(ifp->if_serializer); 879 880 if (info != NULL) { 881 int cpuid = sc->sc_npoll.ifpc_cpuid; 882 883 info->ifpi_rx[cpuid].poll_func = nfe_npoll_compat; 884 info->ifpi_rx[cpuid].arg = NULL; 885 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 886 887 if (ifp->if_flags & IFF_RUNNING) 888 nfe_disable_intrs(sc); 889 ifq_set_cpuid(&ifp->if_snd, cpuid); 890 } else { 891 if (ifp->if_flags & IFF_RUNNING) 892 nfe_enable_intrs(sc); 893 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res)); 894 } 895 } 896 897 #endif /* IFPOLL_ENABLE */ 898 899 static void 900 nfe_intr(void *arg) 901 { 902 struct nfe_softc *sc = arg; 903 struct ifnet *ifp = &sc->arpcom.ac_if; 904 uint32_t r; 905 906 r = NFE_READ(sc, NFE_IRQ_STATUS); 907 if (r == 0) 908 return; /* not for us */ 909 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 910 911 if (sc->sc_rate_second != time_uptime) { 912 /* 913 * Calculate sc_rate_avg - interrupts per second. 914 */ 915 sc->sc_rate_second = time_uptime; 916 if (sc->sc_rate_avg < sc->sc_rate_acc) 917 sc->sc_rate_avg = sc->sc_rate_acc; 918 else 919 sc->sc_rate_avg = (sc->sc_rate_avg * 3 + 920 sc->sc_rate_acc) / 4; 921 sc->sc_rate_acc = 0; 922 } else if (sc->sc_rate_avg < sc->sc_rate_acc) { 923 /* 924 * Don't wait for a tick to roll over if we are taking 925 * a lot of interrupts. 926 */ 927 sc->sc_rate_avg = sc->sc_rate_acc; 928 } 929 930 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 931 932 if (r & NFE_IRQ_LINK) { 933 NFE_READ(sc, NFE_PHY_STATUS); 934 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 935 DPRINTF(sc, "link state changed %s\n", ""); 936 } 937 938 if (ifp->if_flags & IFF_RUNNING) { 939 int ret; 940 int rate; 941 942 /* check Rx ring */ 943 ret = nfe_rxeof(sc); 944 945 /* check Tx ring */ 946 ret |= nfe_txeof(sc, 1); 947 948 /* update the rate accumulator */ 949 if (ret) 950 ++sc->sc_rate_acc; 951 952 if (sc->sc_flags & NFE_F_DYN_IM) { 953 rate = 1000000 / sc->sc_imtime; 954 if ((sc->sc_flags & NFE_F_IRQ_TIMER) == 0 && 955 sc->sc_rate_avg > rate) { 956 /* 957 * Use the hardware timer to reduce the 958 * interrupt rate if the discrete interrupt 959 * rate has exceeded our threshold. 960 */ 961 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER); 962 sc->sc_flags |= NFE_F_IRQ_TIMER; 963 } else if ((sc->sc_flags & NFE_F_IRQ_TIMER) && 964 sc->sc_rate_avg <= rate) { 965 /* 966 * Use discrete TX/RX interrupts if the rate 967 * has fallen below our threshold. 968 */ 969 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER); 970 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 971 972 /* 973 * Recollect, mainly to avoid the possible race 974 * introduced by changing interrupt masks. 975 */ 976 nfe_rxeof(sc); 977 nfe_txeof(sc, 1); 978 } 979 } 980 } 981 } 982 983 static int 984 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 985 { 986 struct nfe_softc *sc = ifp->if_softc; 987 struct ifreq *ifr = (struct ifreq *)data; 988 struct mii_data *mii; 989 int error = 0, mask, jumbo_cap; 990 991 ASSERT_SERIALIZED(ifp->if_serializer); 992 993 switch (cmd) { 994 case SIOCSIFMTU: 995 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL) 996 jumbo_cap = 1; 997 else 998 jumbo_cap = 0; 999 1000 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) || 1001 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) { 1002 return EINVAL; 1003 } else if (ifp->if_mtu != ifr->ifr_mtu) { 1004 ifp->if_mtu = ifr->ifr_mtu; 1005 if (ifp->if_flags & IFF_RUNNING) 1006 nfe_init(sc); 1007 } 1008 break; 1009 case SIOCSIFFLAGS: 1010 if (ifp->if_flags & IFF_UP) { 1011 /* 1012 * If only the PROMISC or ALLMULTI flag changes, then 1013 * don't do a full re-init of the chip, just update 1014 * the Rx filter. 1015 */ 1016 if ((ifp->if_flags & IFF_RUNNING) && 1017 ((ifp->if_flags ^ sc->sc_if_flags) & 1018 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1019 nfe_setmulti(sc); 1020 } else { 1021 if (!(ifp->if_flags & IFF_RUNNING)) 1022 nfe_init(sc); 1023 } 1024 } else { 1025 if (ifp->if_flags & IFF_RUNNING) 1026 nfe_stop(sc); 1027 } 1028 sc->sc_if_flags = ifp->if_flags; 1029 break; 1030 case SIOCADDMULTI: 1031 case SIOCDELMULTI: 1032 if (ifp->if_flags & IFF_RUNNING) 1033 nfe_setmulti(sc); 1034 break; 1035 case SIOCSIFMEDIA: 1036 case SIOCGIFMEDIA: 1037 mii = device_get_softc(sc->sc_miibus); 1038 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1039 break; 1040 case SIOCSIFCAP: 1041 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM; 1042 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) { 1043 ifp->if_capenable ^= mask; 1044 if (IFCAP_TXCSUM & ifp->if_capenable) 1045 ifp->if_hwassist = NFE_CSUM_FEATURES; 1046 else 1047 ifp->if_hwassist = 0; 1048 1049 if (ifp->if_flags & IFF_RUNNING) 1050 nfe_init(sc); 1051 } 1052 break; 1053 default: 1054 error = ether_ioctl(ifp, cmd, data); 1055 break; 1056 } 1057 return error; 1058 } 1059 1060 static int 1061 nfe_rxeof(struct nfe_softc *sc) 1062 { 1063 struct ifnet *ifp = &sc->arpcom.ac_if; 1064 struct nfe_rx_ring *ring = &sc->rxq; 1065 int reap; 1066 1067 reap = 0; 1068 for (;;) { 1069 struct nfe_rx_data *data = &ring->data[ring->cur]; 1070 struct mbuf *m; 1071 uint16_t flags; 1072 int len, error; 1073 1074 if (sc->sc_caps & NFE_40BIT_ADDR) { 1075 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 1076 1077 flags = le16toh(desc64->flags); 1078 len = le16toh(desc64->length) & 0x3fff; 1079 } else { 1080 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 1081 1082 flags = le16toh(desc32->flags); 1083 len = le16toh(desc32->length) & 0x3fff; 1084 } 1085 1086 if (flags & NFE_RX_READY) 1087 break; 1088 1089 reap = 1; 1090 1091 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1092 if (!(flags & NFE_RX_VALID_V1)) 1093 goto skip; 1094 1095 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 1096 flags &= ~NFE_RX_ERROR; 1097 len--; /* fix buffer length */ 1098 } 1099 } else { 1100 if (!(flags & NFE_RX_VALID_V2)) 1101 goto skip; 1102 1103 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 1104 flags &= ~NFE_RX_ERROR; 1105 len--; /* fix buffer length */ 1106 } 1107 } 1108 1109 if (flags & NFE_RX_ERROR) { 1110 IFNET_STAT_INC(ifp, ierrors, 1); 1111 goto skip; 1112 } 1113 1114 m = data->m; 1115 1116 if (sc->sc_flags & NFE_F_USE_JUMBO) 1117 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 1118 else 1119 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 1120 if (error) { 1121 IFNET_STAT_INC(ifp, ierrors, 1); 1122 goto skip; 1123 } 1124 1125 /* finalize mbuf */ 1126 m->m_pkthdr.len = m->m_len = len; 1127 m->m_pkthdr.rcvif = ifp; 1128 1129 if ((ifp->if_capenable & IFCAP_RXCSUM) && 1130 (flags & NFE_RX_CSUMOK)) { 1131 if (flags & NFE_RX_IP_CSUMOK_V2) { 1132 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1133 CSUM_IP_VALID; 1134 } 1135 1136 if (flags & 1137 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) { 1138 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1139 CSUM_PSEUDO_HDR | 1140 CSUM_FRAG_NOT_CHECKED; 1141 m->m_pkthdr.csum_data = 0xffff; 1142 } 1143 } 1144 1145 IFNET_STAT_INC(ifp, ipackets, 1); 1146 ifp->if_input(ifp, m, NULL, -1); 1147 skip: 1148 nfe_set_ready_rxdesc(sc, ring, ring->cur); 1149 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count; 1150 } 1151 return reap; 1152 } 1153 1154 static int 1155 nfe_txeof(struct nfe_softc *sc, int start) 1156 { 1157 struct ifnet *ifp = &sc->arpcom.ac_if; 1158 struct nfe_tx_ring *ring = &sc->txq; 1159 struct nfe_tx_data *data = NULL; 1160 1161 while (ring->next != ring->cur) { 1162 uint16_t flags; 1163 1164 if (sc->sc_caps & NFE_40BIT_ADDR) 1165 flags = le16toh(ring->desc64[ring->next].flags); 1166 else 1167 flags = le16toh(ring->desc32[ring->next].flags); 1168 1169 if (flags & NFE_TX_VALID) 1170 break; 1171 1172 data = &ring->data[ring->next]; 1173 1174 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1175 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 1176 goto skip; 1177 1178 if ((flags & NFE_TX_ERROR_V1) != 0) { 1179 if_printf(ifp, "tx v1 error 0x%pb%i\n", 1180 NFE_V1_TXERR, flags); 1181 IFNET_STAT_INC(ifp, oerrors, 1); 1182 } else { 1183 IFNET_STAT_INC(ifp, opackets, 1); 1184 } 1185 } else { 1186 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 1187 goto skip; 1188 1189 if ((flags & NFE_TX_ERROR_V2) != 0) { 1190 if_printf(ifp, "tx v2 error 0x%pb%i\n", 1191 NFE_V2_TXERR, flags); 1192 IFNET_STAT_INC(ifp, oerrors, 1); 1193 } else { 1194 IFNET_STAT_INC(ifp, opackets, 1); 1195 } 1196 } 1197 1198 if (data->m == NULL) { /* should not get there */ 1199 if_printf(ifp, 1200 "last fragment bit w/o associated mbuf!\n"); 1201 goto skip; 1202 } 1203 1204 /* last fragment of the mbuf chain transmitted */ 1205 bus_dmamap_unload(ring->data_tag, data->map); 1206 m_freem(data->m); 1207 data->m = NULL; 1208 skip: 1209 ring->queued--; 1210 KKASSERT(ring->queued >= 0); 1211 ring->next = (ring->next + 1) % sc->sc_tx_ring_count; 1212 } 1213 1214 if (sc->sc_tx_ring_count - ring->queued >= 1215 sc->sc_tx_spare + NFE_NSEG_RSVD) 1216 ifq_clr_oactive(&ifp->if_snd); 1217 1218 if (ring->queued == 0) 1219 ifp->if_timer = 0; 1220 1221 if (start && !ifq_is_empty(&ifp->if_snd)) 1222 if_devstart(ifp); 1223 1224 if (data != NULL) 1225 return 1; 1226 else 1227 return 0; 1228 } 1229 1230 static int 1231 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 1232 { 1233 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 1234 struct nfe_tx_data *data, *data_map; 1235 bus_dmamap_t map; 1236 struct nfe_desc64 *desc64 = NULL; 1237 struct nfe_desc32 *desc32 = NULL; 1238 uint16_t flags = 0; 1239 uint32_t vtag = 0; 1240 int error, i, j, maxsegs, nsegs; 1241 1242 data = &ring->data[ring->cur]; 1243 map = data->map; 1244 data_map = data; /* Remember who owns the DMA map */ 1245 1246 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD; 1247 if (maxsegs > NFE_MAX_SCATTER) 1248 maxsegs = NFE_MAX_SCATTER; 1249 KASSERT(maxsegs >= sc->sc_tx_spare, 1250 ("not enough segments %d,%d", maxsegs, sc->sc_tx_spare)); 1251 1252 error = bus_dmamap_load_mbuf_defrag(ring->data_tag, map, &m0, 1253 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1254 if (error) 1255 goto back; 1256 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1257 1258 error = 0; 1259 1260 /* setup h/w VLAN tagging */ 1261 if (m0->m_flags & M_VLANTAG) 1262 vtag = m0->m_pkthdr.ether_vlantag; 1263 1264 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) { 1265 if (m0->m_pkthdr.csum_flags & CSUM_IP) 1266 flags |= NFE_TX_IP_CSUM; 1267 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1268 flags |= NFE_TX_TCP_CSUM; 1269 } 1270 1271 /* 1272 * XXX urm. somebody is unaware of how hardware works. You 1273 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1274 * the ring until the entire chain is actually *VALID*. Otherwise 1275 * the hardware may encounter a partially initialized chain that 1276 * is marked as being ready to go when it in fact is not ready to 1277 * go. 1278 */ 1279 1280 for (i = 0; i < nsegs; i++) { 1281 j = (ring->cur + i) % sc->sc_tx_ring_count; 1282 data = &ring->data[j]; 1283 1284 if (sc->sc_caps & NFE_40BIT_ADDR) { 1285 desc64 = &ring->desc64[j]; 1286 desc64->physaddr[0] = 1287 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 1288 desc64->physaddr[1] = 1289 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 1290 desc64->length = htole16(segs[i].ds_len - 1); 1291 desc64->vtag = htole32(vtag); 1292 desc64->flags = htole16(flags); 1293 } else { 1294 desc32 = &ring->desc32[j]; 1295 desc32->physaddr = htole32(segs[i].ds_addr); 1296 desc32->length = htole16(segs[i].ds_len - 1); 1297 desc32->flags = htole16(flags); 1298 } 1299 1300 /* csum flags and vtag belong to the first fragment only */ 1301 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1302 vtag = 0; 1303 1304 ring->queued++; 1305 KKASSERT(ring->queued <= sc->sc_tx_ring_count); 1306 } 1307 1308 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1309 if (sc->sc_caps & NFE_40BIT_ADDR) { 1310 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1311 } else { 1312 if (sc->sc_caps & NFE_JUMBO_SUP) 1313 flags = NFE_TX_LASTFRAG_V2; 1314 else 1315 flags = NFE_TX_LASTFRAG_V1; 1316 desc32->flags |= htole16(flags); 1317 } 1318 1319 /* 1320 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1321 * whole mess until the first descriptor in the map is flagged. 1322 */ 1323 for (i = nsegs - 1; i >= 0; --i) { 1324 j = (ring->cur + i) % sc->sc_tx_ring_count; 1325 if (sc->sc_caps & NFE_40BIT_ADDR) { 1326 desc64 = &ring->desc64[j]; 1327 desc64->flags |= htole16(NFE_TX_VALID); 1328 } else { 1329 desc32 = &ring->desc32[j]; 1330 desc32->flags |= htole16(NFE_TX_VALID); 1331 } 1332 } 1333 ring->cur = (ring->cur + nsegs) % sc->sc_tx_ring_count; 1334 1335 /* Exchange DMA map */ 1336 data_map->map = data->map; 1337 data->map = map; 1338 data->m = m0; 1339 back: 1340 if (error) 1341 m_freem(m0); 1342 return error; 1343 } 1344 1345 static void 1346 nfe_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1347 { 1348 struct nfe_softc *sc = ifp->if_softc; 1349 struct nfe_tx_ring *ring = &sc->txq; 1350 int count = 0, oactive = 0; 1351 struct mbuf *m0; 1352 1353 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1354 ASSERT_SERIALIZED(ifp->if_serializer); 1355 1356 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1357 return; 1358 1359 for (;;) { 1360 int error; 1361 1362 if (sc->sc_tx_ring_count - ring->queued < 1363 sc->sc_tx_spare + NFE_NSEG_RSVD) { 1364 if (oactive) { 1365 ifq_set_oactive(&ifp->if_snd); 1366 break; 1367 } 1368 1369 nfe_txeof(sc, 0); 1370 oactive = 1; 1371 continue; 1372 } 1373 1374 m0 = ifq_dequeue(&ifp->if_snd); 1375 if (m0 == NULL) 1376 break; 1377 1378 ETHER_BPF_MTAP(ifp, m0); 1379 1380 error = nfe_encap(sc, ring, m0); 1381 if (error) { 1382 IFNET_STAT_INC(ifp, oerrors, 1); 1383 if (error == EFBIG) { 1384 if (oactive) { 1385 ifq_set_oactive(&ifp->if_snd); 1386 break; 1387 } 1388 nfe_txeof(sc, 0); 1389 oactive = 1; 1390 } 1391 continue; 1392 } else { 1393 oactive = 0; 1394 } 1395 ++count; 1396 1397 /* 1398 * NOTE: 1399 * `m0' may be freed in nfe_encap(), so 1400 * it should not be touched any more. 1401 */ 1402 } 1403 1404 if (count == 0) /* nothing sent */ 1405 return; 1406 1407 /* Kick Tx */ 1408 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1409 1410 /* 1411 * Set a timeout in case the chip goes out to lunch. 1412 */ 1413 ifp->if_timer = 5; 1414 } 1415 1416 static void 1417 nfe_watchdog(struct ifnet *ifp) 1418 { 1419 struct nfe_softc *sc = ifp->if_softc; 1420 1421 ASSERT_SERIALIZED(ifp->if_serializer); 1422 1423 if (ifp->if_flags & IFF_RUNNING) { 1424 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1425 nfe_txeof(sc, 1); 1426 return; 1427 } 1428 1429 if_printf(ifp, "watchdog timeout\n"); 1430 1431 nfe_init(ifp->if_softc); 1432 1433 IFNET_STAT_INC(ifp, oerrors, 1); 1434 } 1435 1436 static void 1437 nfe_init(void *xsc) 1438 { 1439 struct nfe_softc *sc = xsc; 1440 struct ifnet *ifp = &sc->arpcom.ac_if; 1441 uint32_t tmp; 1442 int error; 1443 1444 ASSERT_SERIALIZED(ifp->if_serializer); 1445 1446 nfe_stop(sc); 1447 1448 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 1449 nfe_mac_reset(sc); 1450 1451 /* 1452 * NOTE: 1453 * Switching between jumbo frames and normal frames should 1454 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring(). 1455 */ 1456 if (ifp->if_mtu > ETHERMTU) { 1457 sc->sc_flags |= NFE_F_USE_JUMBO; 1458 sc->rxq.bufsz = NFE_JBYTES; 1459 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO; 1460 if (bootverbose) 1461 if_printf(ifp, "use jumbo frames\n"); 1462 } else { 1463 sc->sc_flags &= ~NFE_F_USE_JUMBO; 1464 sc->rxq.bufsz = MCLBYTES; 1465 sc->sc_tx_spare = NFE_NSEG_SPARE; 1466 if (bootverbose) 1467 if_printf(ifp, "use non-jumbo frames\n"); 1468 } 1469 1470 error = nfe_init_tx_ring(sc, &sc->txq); 1471 if (error) { 1472 nfe_stop(sc); 1473 return; 1474 } 1475 1476 error = nfe_init_rx_ring(sc, &sc->rxq); 1477 if (error) { 1478 nfe_stop(sc); 1479 return; 1480 } 1481 1482 NFE_WRITE(sc, NFE_TX_POLL, 0); 1483 NFE_WRITE(sc, NFE_STATUS, 0); 1484 1485 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc; 1486 1487 if (ifp->if_capenable & IFCAP_RXCSUM) 1488 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1489 1490 /* 1491 * Although the adapter is capable of stripping VLAN tags from received 1492 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1493 * purpose. This will be done in software by our network stack. 1494 */ 1495 if (sc->sc_caps & NFE_HW_VLAN) 1496 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1497 1498 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1499 DELAY(10); 1500 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1501 1502 if (sc->sc_caps & NFE_HW_VLAN) 1503 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1504 1505 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1506 1507 /* set MAC address */ 1508 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1509 1510 /* tell MAC where rings are in memory */ 1511 if (sc->sc_caps & NFE_40BIT_ADDR) { 1512 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 1513 NFE_ADDR_HI(sc->rxq.physaddr)); 1514 } 1515 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr)); 1516 1517 if (sc->sc_caps & NFE_40BIT_ADDR) { 1518 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, 1519 NFE_ADDR_HI(sc->txq.physaddr)); 1520 } 1521 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 1522 1523 NFE_WRITE(sc, NFE_RING_SIZE, 1524 (sc->sc_rx_ring_count - 1) << 16 | 1525 (sc->sc_tx_ring_count - 1)); 1526 1527 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1528 1529 /* force MAC to wakeup */ 1530 tmp = NFE_READ(sc, NFE_PWR_STATE); 1531 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1532 DELAY(10); 1533 tmp = NFE_READ(sc, NFE_PWR_STATE); 1534 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1535 1536 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1537 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1538 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1539 1540 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1541 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1542 1543 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1544 1545 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1546 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1547 DELAY(10); 1548 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1549 1550 /* set Rx filter */ 1551 nfe_setmulti(sc); 1552 1553 nfe_ifmedia_upd(ifp); 1554 1555 /* enable Rx */ 1556 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1557 1558 /* enable Tx */ 1559 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1560 1561 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1562 1563 #ifdef IFPOLL_ENABLE 1564 if (ifp->if_flags & IFF_NPOLLING) 1565 nfe_disable_intrs(sc); 1566 else 1567 #endif 1568 nfe_enable_intrs(sc); 1569 1570 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1571 1572 ifp->if_flags |= IFF_RUNNING; 1573 ifq_clr_oactive(&ifp->if_snd); 1574 1575 /* 1576 * If we had stuff in the tx ring before its all cleaned out now 1577 * so we are not going to get an interrupt, jump-start any pending 1578 * output. 1579 */ 1580 if (!ifq_is_empty(&ifp->if_snd)) 1581 if_devstart(ifp); 1582 } 1583 1584 static void 1585 nfe_stop(struct nfe_softc *sc) 1586 { 1587 struct ifnet *ifp = &sc->arpcom.ac_if; 1588 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 1589 int i; 1590 1591 ASSERT_SERIALIZED(ifp->if_serializer); 1592 1593 callout_stop(&sc->sc_tick_ch); 1594 1595 ifp->if_timer = 0; 1596 ifp->if_flags &= ~IFF_RUNNING; 1597 ifq_clr_oactive(&ifp->if_snd); 1598 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 1599 1600 #define WAITMAX 50000 1601 1602 /* 1603 * Abort Tx 1604 */ 1605 NFE_WRITE(sc, NFE_TX_CTL, 0); 1606 for (i = 0; i < WAITMAX; ++i) { 1607 DELAY(100); 1608 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0) 1609 break; 1610 } 1611 if (i == WAITMAX) 1612 if_printf(ifp, "can't stop TX\n"); 1613 DELAY(100); 1614 1615 /* 1616 * Disable Rx 1617 */ 1618 NFE_WRITE(sc, NFE_RX_CTL, 0); 1619 for (i = 0; i < WAITMAX; ++i) { 1620 DELAY(100); 1621 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0) 1622 break; 1623 } 1624 if (i == WAITMAX) 1625 if_printf(ifp, "can't stop RX\n"); 1626 DELAY(100); 1627 1628 #undef WAITMAX 1629 1630 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 1631 DELAY(10); 1632 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1633 1634 /* Disable interrupts */ 1635 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1636 1637 /* Reset Tx and Rx rings */ 1638 nfe_reset_tx_ring(sc, &sc->txq); 1639 nfe_reset_rx_ring(sc, &sc->rxq); 1640 } 1641 1642 static int 1643 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1644 { 1645 int i, j, error, descsize; 1646 bus_dmamem_t dmem; 1647 void **desc; 1648 1649 if (sc->sc_caps & NFE_40BIT_ADDR) { 1650 desc = (void *)&ring->desc64; 1651 descsize = sizeof(struct nfe_desc64); 1652 } else { 1653 desc = (void *)&ring->desc32; 1654 descsize = sizeof(struct nfe_desc32); 1655 } 1656 1657 ring->bufsz = MCLBYTES; 1658 ring->cur = ring->next = 0; 1659 1660 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1661 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1662 sc->sc_rx_ring_count * descsize, 1663 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1664 if (error) { 1665 if_printf(&sc->arpcom.ac_if, 1666 "could not create RX desc ring\n"); 1667 return error; 1668 } 1669 ring->tag = dmem.dmem_tag; 1670 ring->map = dmem.dmem_map; 1671 *desc = dmem.dmem_addr; 1672 ring->physaddr = dmem.dmem_busaddr; 1673 1674 if (sc->sc_caps & NFE_JUMBO_SUP) { 1675 ring->jbuf = 1676 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc), 1677 M_DEVBUF, M_WAITOK | M_ZERO); 1678 1679 error = nfe_jpool_alloc(sc, ring); 1680 if (error) { 1681 if_printf(&sc->arpcom.ac_if, 1682 "could not allocate jumbo frames\n"); 1683 kfree(ring->jbuf, M_DEVBUF); 1684 ring->jbuf = NULL; 1685 /* Allow jumbo frame allocation to fail */ 1686 } 1687 } 1688 1689 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count, 1690 M_DEVBUF, M_WAITOK | M_ZERO); 1691 1692 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1693 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1694 NULL, NULL, 1695 MCLBYTES, 1, MCLBYTES, 1696 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 1697 &ring->data_tag); 1698 if (error) { 1699 if_printf(&sc->arpcom.ac_if, 1700 "could not create RX mbuf DMA tag\n"); 1701 return error; 1702 } 1703 1704 /* Create a spare RX mbuf DMA map */ 1705 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1706 &ring->data_tmpmap); 1707 if (error) { 1708 if_printf(&sc->arpcom.ac_if, 1709 "could not create spare RX mbuf DMA map\n"); 1710 bus_dma_tag_destroy(ring->data_tag); 1711 ring->data_tag = NULL; 1712 return error; 1713 } 1714 1715 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1716 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1717 &ring->data[i].map); 1718 if (error) { 1719 if_printf(&sc->arpcom.ac_if, 1720 "could not create %dth RX mbuf DMA mapn", i); 1721 goto fail; 1722 } 1723 } 1724 return 0; 1725 fail: 1726 for (j = 0; j < i; ++j) 1727 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1728 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1729 bus_dma_tag_destroy(ring->data_tag); 1730 ring->data_tag = NULL; 1731 return error; 1732 } 1733 1734 static void 1735 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1736 { 1737 int i; 1738 1739 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1740 struct nfe_rx_data *data = &ring->data[i]; 1741 1742 if (data->m != NULL) { 1743 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0) 1744 bus_dmamap_unload(ring->data_tag, data->map); 1745 m_freem(data->m); 1746 data->m = NULL; 1747 } 1748 } 1749 1750 ring->cur = ring->next = 0; 1751 } 1752 1753 static int 1754 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1755 { 1756 int i; 1757 1758 for (i = 0; i < sc->sc_rx_ring_count; ++i) { 1759 int error; 1760 1761 /* XXX should use a function pointer */ 1762 if (sc->sc_flags & NFE_F_USE_JUMBO) 1763 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1764 else 1765 error = nfe_newbuf_std(sc, ring, i, 1); 1766 if (error) { 1767 if_printf(&sc->arpcom.ac_if, 1768 "could not allocate RX buffer\n"); 1769 return error; 1770 } 1771 nfe_set_ready_rxdesc(sc, ring, i); 1772 } 1773 return 0; 1774 } 1775 1776 static void 1777 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1778 { 1779 if (ring->data_tag != NULL) { 1780 struct nfe_rx_data *data; 1781 int i; 1782 1783 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1784 data = &ring->data[i]; 1785 1786 if (data->m != NULL) { 1787 bus_dmamap_unload(ring->data_tag, data->map); 1788 m_freem(data->m); 1789 } 1790 bus_dmamap_destroy(ring->data_tag, data->map); 1791 } 1792 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1793 bus_dma_tag_destroy(ring->data_tag); 1794 } 1795 1796 nfe_jpool_free(sc, ring); 1797 1798 if (ring->jbuf != NULL) 1799 kfree(ring->jbuf, M_DEVBUF); 1800 if (ring->data != NULL) 1801 kfree(ring->data, M_DEVBUF); 1802 1803 if (ring->tag != NULL) { 1804 void *desc; 1805 1806 if (sc->sc_caps & NFE_40BIT_ADDR) 1807 desc = ring->desc64; 1808 else 1809 desc = ring->desc32; 1810 1811 bus_dmamap_unload(ring->tag, ring->map); 1812 bus_dmamem_free(ring->tag, desc, ring->map); 1813 bus_dma_tag_destroy(ring->tag); 1814 } 1815 } 1816 1817 static struct nfe_jbuf * 1818 nfe_jalloc(struct nfe_softc *sc) 1819 { 1820 struct ifnet *ifp = &sc->arpcom.ac_if; 1821 struct nfe_jbuf *jbuf; 1822 1823 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1824 1825 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1826 if (jbuf != NULL) { 1827 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1828 jbuf->inuse = 1; 1829 } else { 1830 if_printf(ifp, "no free jumbo buffer\n"); 1831 } 1832 1833 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1834 1835 return jbuf; 1836 } 1837 1838 static void 1839 nfe_jfree(void *arg) 1840 { 1841 struct nfe_jbuf *jbuf = arg; 1842 struct nfe_softc *sc = jbuf->sc; 1843 struct nfe_rx_ring *ring = jbuf->ring; 1844 1845 if (&ring->jbuf[jbuf->slot] != jbuf) 1846 panic("%s: free wrong jumbo buffer", __func__); 1847 else if (jbuf->inuse == 0) 1848 panic("%s: jumbo buffer already freed", __func__); 1849 1850 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1851 atomic_subtract_int(&jbuf->inuse, 1); 1852 if (jbuf->inuse == 0) 1853 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1854 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1855 } 1856 1857 static void 1858 nfe_jref(void *arg) 1859 { 1860 struct nfe_jbuf *jbuf = arg; 1861 struct nfe_rx_ring *ring = jbuf->ring; 1862 1863 if (&ring->jbuf[jbuf->slot] != jbuf) 1864 panic("%s: ref wrong jumbo buffer", __func__); 1865 else if (jbuf->inuse == 0) 1866 panic("%s: jumbo buffer already freed", __func__); 1867 1868 atomic_add_int(&jbuf->inuse, 1); 1869 } 1870 1871 static int 1872 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1873 { 1874 struct nfe_jbuf *jbuf; 1875 bus_dmamem_t dmem; 1876 bus_addr_t physaddr; 1877 caddr_t buf; 1878 int i, error; 1879 1880 /* 1881 * Allocate a big chunk of DMA'able memory. 1882 */ 1883 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1884 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1885 NFE_JPOOL_SIZE(sc), 1886 BUS_DMA_WAITOK, &dmem); 1887 if (error) { 1888 if_printf(&sc->arpcom.ac_if, 1889 "could not create jumbo buffer\n"); 1890 return error; 1891 } 1892 ring->jtag = dmem.dmem_tag; 1893 ring->jmap = dmem.dmem_map; 1894 ring->jpool = dmem.dmem_addr; 1895 physaddr = dmem.dmem_busaddr; 1896 1897 /* ..and split it into 9KB chunks */ 1898 SLIST_INIT(&ring->jfreelist); 1899 1900 buf = ring->jpool; 1901 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) { 1902 jbuf = &ring->jbuf[i]; 1903 1904 jbuf->sc = sc; 1905 jbuf->ring = ring; 1906 jbuf->inuse = 0; 1907 jbuf->slot = i; 1908 jbuf->buf = buf; 1909 jbuf->physaddr = physaddr; 1910 1911 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1912 1913 buf += NFE_JBYTES; 1914 physaddr += NFE_JBYTES; 1915 } 1916 1917 return 0; 1918 } 1919 1920 static void 1921 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1922 { 1923 if (ring->jtag != NULL) { 1924 bus_dmamap_unload(ring->jtag, ring->jmap); 1925 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1926 bus_dma_tag_destroy(ring->jtag); 1927 } 1928 } 1929 1930 static int 1931 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1932 { 1933 int i, j, error, descsize; 1934 bus_dmamem_t dmem; 1935 void **desc; 1936 1937 if (sc->sc_caps & NFE_40BIT_ADDR) { 1938 desc = (void *)&ring->desc64; 1939 descsize = sizeof(struct nfe_desc64); 1940 } else { 1941 desc = (void *)&ring->desc32; 1942 descsize = sizeof(struct nfe_desc32); 1943 } 1944 1945 ring->queued = 0; 1946 ring->cur = ring->next = 0; 1947 1948 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1949 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1950 sc->sc_tx_ring_count * descsize, 1951 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1952 if (error) { 1953 if_printf(&sc->arpcom.ac_if, 1954 "could not create TX desc ring\n"); 1955 return error; 1956 } 1957 ring->tag = dmem.dmem_tag; 1958 ring->map = dmem.dmem_map; 1959 *desc = dmem.dmem_addr; 1960 ring->physaddr = dmem.dmem_busaddr; 1961 1962 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count, 1963 M_DEVBUF, M_WAITOK | M_ZERO); 1964 1965 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1966 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1967 NULL, NULL, 1968 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES, 1969 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1970 &ring->data_tag); 1971 if (error) { 1972 if_printf(&sc->arpcom.ac_if, 1973 "could not create TX buf DMA tag\n"); 1974 return error; 1975 } 1976 1977 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1978 error = bus_dmamap_create(ring->data_tag, 1979 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1980 &ring->data[i].map); 1981 if (error) { 1982 if_printf(&sc->arpcom.ac_if, 1983 "could not create %dth TX buf DMA map\n", i); 1984 goto fail; 1985 } 1986 } 1987 1988 return 0; 1989 fail: 1990 for (j = 0; j < i; ++j) 1991 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1992 bus_dma_tag_destroy(ring->data_tag); 1993 ring->data_tag = NULL; 1994 return error; 1995 } 1996 1997 static void 1998 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1999 { 2000 int i; 2001 2002 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2003 struct nfe_tx_data *data = &ring->data[i]; 2004 2005 if (sc->sc_caps & NFE_40BIT_ADDR) 2006 ring->desc64[i].flags = 0; 2007 else 2008 ring->desc32[i].flags = 0; 2009 2010 if (data->m != NULL) { 2011 bus_dmamap_unload(ring->data_tag, data->map); 2012 m_freem(data->m); 2013 data->m = NULL; 2014 } 2015 } 2016 2017 ring->queued = 0; 2018 ring->cur = ring->next = 0; 2019 } 2020 2021 static int 2022 nfe_init_tx_ring(struct nfe_softc *sc __unused, 2023 struct nfe_tx_ring *ring __unused) 2024 { 2025 return 0; 2026 } 2027 2028 static void 2029 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 2030 { 2031 if (ring->data_tag != NULL) { 2032 struct nfe_tx_data *data; 2033 int i; 2034 2035 for (i = 0; i < sc->sc_tx_ring_count; ++i) { 2036 data = &ring->data[i]; 2037 2038 if (data->m != NULL) { 2039 bus_dmamap_unload(ring->data_tag, data->map); 2040 m_freem(data->m); 2041 } 2042 bus_dmamap_destroy(ring->data_tag, data->map); 2043 } 2044 2045 bus_dma_tag_destroy(ring->data_tag); 2046 } 2047 2048 if (ring->data != NULL) 2049 kfree(ring->data, M_DEVBUF); 2050 2051 if (ring->tag != NULL) { 2052 void *desc; 2053 2054 if (sc->sc_caps & NFE_40BIT_ADDR) 2055 desc = ring->desc64; 2056 else 2057 desc = ring->desc32; 2058 2059 bus_dmamap_unload(ring->tag, ring->map); 2060 bus_dmamem_free(ring->tag, desc, ring->map); 2061 bus_dma_tag_destroy(ring->tag); 2062 } 2063 } 2064 2065 static int 2066 nfe_ifmedia_upd(struct ifnet *ifp) 2067 { 2068 struct nfe_softc *sc = ifp->if_softc; 2069 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2070 2071 ASSERT_SERIALIZED(ifp->if_serializer); 2072 2073 if (mii->mii_instance != 0) { 2074 struct mii_softc *miisc; 2075 2076 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2077 mii_phy_reset(miisc); 2078 } 2079 mii_mediachg(mii); 2080 2081 return 0; 2082 } 2083 2084 static void 2085 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2086 { 2087 struct nfe_softc *sc = ifp->if_softc; 2088 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2089 2090 ASSERT_SERIALIZED(ifp->if_serializer); 2091 2092 mii_pollstat(mii); 2093 ifmr->ifm_status = mii->mii_media_status; 2094 ifmr->ifm_active = mii->mii_media_active; 2095 } 2096 2097 static void 2098 nfe_setmulti(struct nfe_softc *sc) 2099 { 2100 struct ifnet *ifp = &sc->arpcom.ac_if; 2101 struct ifmultiaddr *ifma; 2102 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2103 uint32_t filter = NFE_RXFILTER_MAGIC; 2104 int i; 2105 2106 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2107 bzero(addr, ETHER_ADDR_LEN); 2108 bzero(mask, ETHER_ADDR_LEN); 2109 goto done; 2110 } 2111 2112 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2113 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2114 2115 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2116 caddr_t maddr; 2117 2118 if (ifma->ifma_addr->sa_family != AF_LINK) 2119 continue; 2120 2121 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 2122 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2123 addr[i] &= maddr[i]; 2124 mask[i] &= ~maddr[i]; 2125 } 2126 } 2127 2128 for (i = 0; i < ETHER_ADDR_LEN; i++) 2129 mask[i] |= addr[i]; 2130 2131 done: 2132 addr[0] |= 0x01; /* make sure multicast bit is set */ 2133 2134 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2135 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2136 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2137 addr[5] << 8 | addr[4]); 2138 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2139 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2140 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2141 mask[5] << 8 | mask[4]); 2142 2143 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 2144 NFE_WRITE(sc, NFE_RXFILTER, filter); 2145 } 2146 2147 static void 2148 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2149 { 2150 uint32_t lo, hi; 2151 2152 lo = NFE_READ(sc, NFE_MACADDR_LO); 2153 hi = NFE_READ(sc, NFE_MACADDR_HI); 2154 if (sc->sc_caps & NFE_FIX_EADDR) { 2155 addr[0] = (lo >> 8) & 0xff; 2156 addr[1] = (lo & 0xff); 2157 2158 addr[2] = (hi >> 24) & 0xff; 2159 addr[3] = (hi >> 16) & 0xff; 2160 addr[4] = (hi >> 8) & 0xff; 2161 addr[5] = (hi & 0xff); 2162 } else { 2163 addr[0] = (hi & 0xff); 2164 addr[1] = (hi >> 8) & 0xff; 2165 addr[2] = (hi >> 16) & 0xff; 2166 addr[3] = (hi >> 24) & 0xff; 2167 2168 addr[4] = (lo & 0xff); 2169 addr[5] = (lo >> 8) & 0xff; 2170 } 2171 } 2172 2173 static void 2174 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 2175 { 2176 NFE_WRITE(sc, NFE_MACADDR_LO, 2177 addr[5] << 8 | addr[4]); 2178 NFE_WRITE(sc, NFE_MACADDR_HI, 2179 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2180 } 2181 2182 static void 2183 nfe_tick(void *arg) 2184 { 2185 struct nfe_softc *sc = arg; 2186 struct ifnet *ifp = &sc->arpcom.ac_if; 2187 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2188 2189 lwkt_serialize_enter(ifp->if_serializer); 2190 2191 mii_tick(mii); 2192 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 2193 2194 lwkt_serialize_exit(ifp->if_serializer); 2195 } 2196 2197 static int 2198 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2199 int wait) 2200 { 2201 struct nfe_rx_data *data = &ring->data[idx]; 2202 bus_dma_segment_t seg; 2203 bus_dmamap_t map; 2204 struct mbuf *m; 2205 int nsegs, error; 2206 2207 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2208 if (m == NULL) 2209 return ENOBUFS; 2210 m->m_len = m->m_pkthdr.len = MCLBYTES; 2211 2212 /* 2213 * Aligning the payload improves access times. 2214 */ 2215 if (sc->sc_caps & NFE_WORDALIGN) 2216 m_adj(m, ETHER_ALIGN); 2217 2218 error = bus_dmamap_load_mbuf_segment(ring->data_tag, ring->data_tmpmap, 2219 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 2220 if (error) { 2221 m_freem(m); 2222 if (wait) { 2223 if_printf(&sc->arpcom.ac_if, 2224 "could map RX mbuf %d\n", error); 2225 } 2226 return error; 2227 } 2228 2229 if (data->m != NULL) { 2230 /* Sync and unload originally mapped mbuf */ 2231 bus_dmamap_sync(ring->data_tag, data->map, 2232 BUS_DMASYNC_POSTREAD); 2233 bus_dmamap_unload(ring->data_tag, data->map); 2234 } 2235 2236 /* Swap this DMA map with tmp DMA map */ 2237 map = data->map; 2238 data->map = ring->data_tmpmap; 2239 ring->data_tmpmap = map; 2240 2241 /* Caller is assumed to have collected the old mbuf */ 2242 data->m = m; 2243 2244 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2245 return 0; 2246 } 2247 2248 static int 2249 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2250 int wait) 2251 { 2252 struct nfe_rx_data *data = &ring->data[idx]; 2253 struct nfe_jbuf *jbuf; 2254 struct mbuf *m; 2255 2256 MGETHDR(m, wait ? M_WAITOK : M_NOWAIT, MT_DATA); 2257 if (m == NULL) 2258 return ENOBUFS; 2259 2260 jbuf = nfe_jalloc(sc); 2261 if (jbuf == NULL) { 2262 m_freem(m); 2263 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2264 "-- packet dropped!\n"); 2265 return ENOBUFS; 2266 } 2267 2268 m->m_ext.ext_arg = jbuf; 2269 m->m_ext.ext_buf = jbuf->buf; 2270 m->m_ext.ext_free = nfe_jfree; 2271 m->m_ext.ext_ref = nfe_jref; 2272 m->m_ext.ext_size = NFE_JBYTES; 2273 2274 m->m_data = m->m_ext.ext_buf; 2275 m->m_flags |= M_EXT; 2276 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2277 2278 /* 2279 * Aligning the payload improves access times. 2280 */ 2281 if (sc->sc_caps & NFE_WORDALIGN) 2282 m_adj(m, ETHER_ALIGN); 2283 2284 /* Caller is assumed to have collected the old mbuf */ 2285 data->m = m; 2286 2287 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2288 return 0; 2289 } 2290 2291 static void 2292 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2293 bus_addr_t physaddr) 2294 { 2295 if (sc->sc_caps & NFE_40BIT_ADDR) { 2296 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2297 2298 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr)); 2299 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr)); 2300 } else { 2301 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2302 2303 desc32->physaddr = htole32(physaddr); 2304 } 2305 } 2306 2307 static void 2308 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2309 { 2310 if (sc->sc_caps & NFE_40BIT_ADDR) { 2311 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2312 2313 desc64->length = htole16(ring->bufsz); 2314 desc64->flags = htole16(NFE_RX_READY); 2315 } else { 2316 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2317 2318 desc32->length = htole16(ring->bufsz); 2319 desc32->flags = htole16(NFE_RX_READY); 2320 } 2321 } 2322 2323 static int 2324 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS) 2325 { 2326 struct nfe_softc *sc = arg1; 2327 struct ifnet *ifp = &sc->arpcom.ac_if; 2328 uint32_t flags; 2329 int error, v; 2330 2331 lwkt_serialize_enter(ifp->if_serializer); 2332 2333 flags = sc->sc_flags & ~NFE_F_DYN_IM; 2334 v = sc->sc_imtime; 2335 if (sc->sc_flags & NFE_F_DYN_IM) 2336 v = -v; 2337 2338 error = sysctl_handle_int(oidp, &v, 0, req); 2339 if (error || req->newptr == NULL) 2340 goto back; 2341 2342 if (v < 0) { 2343 flags |= NFE_F_DYN_IM; 2344 v = -v; 2345 } 2346 2347 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) { 2348 if (NFE_IMTIME(v) == 0) 2349 v = 0; 2350 sc->sc_imtime = v; 2351 sc->sc_flags = flags; 2352 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 2353 2354 if ((ifp->if_flags & (IFF_NPOLLING | IFF_RUNNING)) 2355 == IFF_RUNNING) { 2356 nfe_enable_intrs(sc); 2357 } 2358 } 2359 back: 2360 lwkt_serialize_exit(ifp->if_serializer); 2361 return error; 2362 } 2363 2364 static void 2365 nfe_powerup(device_t dev) 2366 { 2367 struct nfe_softc *sc = device_get_softc(dev); 2368 uint32_t pwr_state; 2369 uint16_t did; 2370 2371 /* 2372 * Bring MAC and PHY out of low power state 2373 */ 2374 2375 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK; 2376 2377 did = pci_get_device(dev); 2378 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 || 2379 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) && 2380 pci_get_revid(dev) >= 0xa3) 2381 pwr_state |= NFE_PWRUP_REV_A3; 2382 2383 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state); 2384 } 2385 2386 static void 2387 nfe_mac_reset(struct nfe_softc *sc) 2388 { 2389 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 2390 uint32_t macaddr_hi, macaddr_lo, tx_poll; 2391 2392 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 2393 2394 /* Save several registers for later restoration */ 2395 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI); 2396 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO); 2397 tx_poll = NFE_READ(sc, NFE_TX_POLL); 2398 2399 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT); 2400 DELAY(100); 2401 2402 NFE_WRITE(sc, NFE_MAC_RESET, 0); 2403 DELAY(100); 2404 2405 /* Restore saved registers */ 2406 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi); 2407 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo); 2408 NFE_WRITE(sc, NFE_TX_POLL, tx_poll); 2409 2410 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 2411 } 2412 2413 static void 2414 nfe_enable_intrs(struct nfe_softc *sc) 2415 { 2416 /* 2417 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER. 2418 * It is unclear how wide the timer is. Base programming does 2419 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so 2420 * we don't get any interrupt moderation. TX moderation is 2421 * possible by using the timer interrupt instead of TX_DONE. 2422 * 2423 * It is unclear whether there are other bits that can be 2424 * set to make the NFE device actually do interrupt moderation 2425 * on the RX side. 2426 * 2427 * For now set a 128uS interval as a placemark, but don't use 2428 * the timer. 2429 */ 2430 if (sc->sc_imtime == 0) 2431 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT); 2432 else 2433 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime)); 2434 2435 /* Enable interrupts */ 2436 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable); 2437 2438 if (sc->sc_irq_enable & NFE_IRQ_TIMER) 2439 sc->sc_flags |= NFE_F_IRQ_TIMER; 2440 else 2441 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2442 } 2443