1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.46 2008/10/28 07:30:49 sephe Exp $ */ 3 4 /* 5 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Sepherosa Ziehau <sepherosa@gmail.com> and 9 * Matthew Dillon <dillon@apollo.backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 42 * 43 * Permission to use, copy, modify, and distribute this software for any 44 * purpose with or without fee is hereby granted, provided that the above 45 * copyright notice and this permission notice appear in all copies. 46 * 47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 54 */ 55 56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 57 58 #include "opt_polling.h" 59 60 #include <sys/param.h> 61 #include <sys/endian.h> 62 #include <sys/kernel.h> 63 #include <sys/bus.h> 64 #include <sys/interrupt.h> 65 #include <sys/proc.h> 66 #include <sys/rman.h> 67 #include <sys/serialize.h> 68 #include <sys/socket.h> 69 #include <sys/sockio.h> 70 #include <sys/sysctl.h> 71 72 #include <net/ethernet.h> 73 #include <net/if.h> 74 #include <net/bpf.h> 75 #include <net/if_arp.h> 76 #include <net/if_dl.h> 77 #include <net/if_media.h> 78 #include <net/ifq_var.h> 79 #include <net/if_types.h> 80 #include <net/if_var.h> 81 #include <net/vlan/if_vlan_var.h> 82 #include <net/vlan/if_vlan_ether.h> 83 84 #include <bus/pci/pcireg.h> 85 #include <bus/pci/pcivar.h> 86 #include <bus/pci/pcidevs.h> 87 88 #include <dev/netif/mii_layer/mii.h> 89 #include <dev/netif/mii_layer/miivar.h> 90 91 #include "miibus_if.h" 92 93 #include <dev/netif/nfe/if_nfereg.h> 94 #include <dev/netif/nfe/if_nfevar.h> 95 96 #define NFE_CSUM 97 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 98 99 static int nfe_probe(device_t); 100 static int nfe_attach(device_t); 101 static int nfe_detach(device_t); 102 static void nfe_shutdown(device_t); 103 static int nfe_resume(device_t); 104 static int nfe_suspend(device_t); 105 106 static int nfe_miibus_readreg(device_t, int, int); 107 static void nfe_miibus_writereg(device_t, int, int, int); 108 static void nfe_miibus_statchg(device_t); 109 110 #ifdef DEVICE_POLLING 111 static void nfe_poll(struct ifnet *, enum poll_cmd, int); 112 #endif 113 static void nfe_intr(void *); 114 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 115 static int nfe_rxeof(struct nfe_softc *); 116 static int nfe_txeof(struct nfe_softc *, int); 117 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 118 struct mbuf *); 119 static void nfe_start(struct ifnet *); 120 static void nfe_watchdog(struct ifnet *); 121 static void nfe_init(void *); 122 static void nfe_stop(struct nfe_softc *); 123 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 124 static void nfe_jfree(void *); 125 static void nfe_jref(void *); 126 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 127 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 128 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 129 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 130 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 131 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 132 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 133 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 134 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 135 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 136 static int nfe_ifmedia_upd(struct ifnet *); 137 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 138 static void nfe_setmulti(struct nfe_softc *); 139 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 140 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 141 static void nfe_powerup(device_t); 142 static void nfe_mac_reset(struct nfe_softc *); 143 static void nfe_tick(void *); 144 static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int); 145 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t, 146 int); 147 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 148 int, bus_addr_t); 149 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 150 int); 151 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 152 int); 153 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 154 int); 155 static void nfe_enable_intrs(struct nfe_softc *); 156 static void nfe_disable_intrs(struct nfe_softc *); 157 158 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS); 159 160 #define NFE_DEBUG 161 #ifdef NFE_DEBUG 162 163 static int nfe_debug = 0; 164 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT; 165 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT; 166 /* hw timer simulated interrupt moderation @8000Hz */ 167 static int nfe_imtime = -125; 168 169 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count); 170 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count); 171 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime); 172 TUNABLE_INT("hw.nfe.debug", &nfe_debug); 173 174 #define DPRINTF(sc, fmt, ...) do { \ 175 if ((sc)->sc_debug) { \ 176 if_printf(&(sc)->arpcom.ac_if, \ 177 fmt, __VA_ARGS__); \ 178 } \ 179 } while (0) 180 181 #define DPRINTFN(sc, lv, fmt, ...) do { \ 182 if ((sc)->sc_debug >= (lv)) { \ 183 if_printf(&(sc)->arpcom.ac_if, \ 184 fmt, __VA_ARGS__); \ 185 } \ 186 } while (0) 187 188 #else /* !NFE_DEBUG */ 189 190 #define DPRINTF(sc, fmt, ...) 191 #define DPRINTFN(sc, lv, fmt, ...) 192 193 #endif /* NFE_DEBUG */ 194 195 struct nfe_dma_ctx { 196 int nsegs; 197 bus_dma_segment_t *segs; 198 }; 199 200 static const struct nfe_dev { 201 uint16_t vid; 202 uint16_t did; 203 const char *desc; 204 } nfe_devices[] = { 205 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 206 "NVIDIA nForce Fast Ethernet" }, 207 208 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 209 "NVIDIA nForce2 Fast Ethernet" }, 210 211 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 212 "NVIDIA nForce3 Gigabit Ethernet" }, 213 214 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 215 chipset, and possibly also the 400R; it might be both nForce2- and 216 nForce3-based boards can use the same MCPs (= southbridges) */ 217 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 218 "NVIDIA nForce3 Gigabit Ethernet" }, 219 220 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 221 "NVIDIA nForce3 Gigabit Ethernet" }, 222 223 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 224 "NVIDIA nForce3 Gigabit Ethernet" }, 225 226 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 227 "NVIDIA nForce3 Gigabit Ethernet" }, 228 229 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 230 "NVIDIA CK804 Gigabit Ethernet" }, 231 232 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 233 "NVIDIA CK804 Gigabit Ethernet" }, 234 235 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 236 "NVIDIA MCP04 Gigabit Ethernet" }, 237 238 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 239 "NVIDIA MCP04 Gigabit Ethernet" }, 240 241 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 242 "NVIDIA MCP51 Gigabit Ethernet" }, 243 244 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 245 "NVIDIA MCP51 Gigabit Ethernet" }, 246 247 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 248 "NVIDIA MCP55 Gigabit Ethernet" }, 249 250 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 251 "NVIDIA MCP55 Gigabit Ethernet" }, 252 253 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 254 "NVIDIA MCP61 Gigabit Ethernet" }, 255 256 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 257 "NVIDIA MCP61 Gigabit Ethernet" }, 258 259 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 260 "NVIDIA MCP61 Gigabit Ethernet" }, 261 262 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 263 "NVIDIA MCP61 Gigabit Ethernet" }, 264 265 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 266 "NVIDIA MCP65 Gigabit Ethernet" }, 267 268 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 269 "NVIDIA MCP65 Gigabit Ethernet" }, 270 271 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 272 "NVIDIA MCP65 Gigabit Ethernet" }, 273 274 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 275 "NVIDIA MCP65 Gigabit Ethernet" }, 276 277 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 278 "NVIDIA MCP67 Gigabit Ethernet" }, 279 280 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 281 "NVIDIA MCP67 Gigabit Ethernet" }, 282 283 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 284 "NVIDIA MCP67 Gigabit Ethernet" }, 285 286 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 287 "NVIDIA MCP67 Gigabit Ethernet" }, 288 289 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 290 "NVIDIA MCP73 Gigabit Ethernet" }, 291 292 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 293 "NVIDIA MCP73 Gigabit Ethernet" }, 294 295 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 296 "NVIDIA MCP73 Gigabit Ethernet" }, 297 298 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 299 "NVIDIA MCP73 Gigabit Ethernet" }, 300 301 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 302 "NVIDIA MCP77 Gigabit Ethernet" }, 303 304 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 305 "NVIDIA MCP77 Gigabit Ethernet" }, 306 307 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 308 "NVIDIA MCP77 Gigabit Ethernet" }, 309 310 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 311 "NVIDIA MCP77 Gigabit Ethernet" }, 312 313 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 314 "NVIDIA MCP79 Gigabit Ethernet" }, 315 316 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 317 "NVIDIA MCP79 Gigabit Ethernet" }, 318 319 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 320 "NVIDIA MCP79 Gigabit Ethernet" }, 321 322 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 323 "NVIDIA MCP79 Gigabit Ethernet" }, 324 325 { 0, 0, NULL } 326 }; 327 328 static device_method_t nfe_methods[] = { 329 /* Device interface */ 330 DEVMETHOD(device_probe, nfe_probe), 331 DEVMETHOD(device_attach, nfe_attach), 332 DEVMETHOD(device_detach, nfe_detach), 333 DEVMETHOD(device_suspend, nfe_suspend), 334 DEVMETHOD(device_resume, nfe_resume), 335 DEVMETHOD(device_shutdown, nfe_shutdown), 336 337 /* Bus interface */ 338 DEVMETHOD(bus_print_child, bus_generic_print_child), 339 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 340 341 /* MII interface */ 342 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 343 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 344 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 345 346 { 0, 0 } 347 }; 348 349 static driver_t nfe_driver = { 350 "nfe", 351 nfe_methods, 352 sizeof(struct nfe_softc) 353 }; 354 355 static devclass_t nfe_devclass; 356 357 DECLARE_DUMMY_MODULE(if_nfe); 358 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 359 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0); 360 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 361 362 static int 363 nfe_probe(device_t dev) 364 { 365 const struct nfe_dev *n; 366 uint16_t vid, did; 367 368 vid = pci_get_vendor(dev); 369 did = pci_get_device(dev); 370 for (n = nfe_devices; n->desc != NULL; ++n) { 371 if (vid == n->vid && did == n->did) { 372 struct nfe_softc *sc = device_get_softc(dev); 373 374 switch (did) { 375 case PCI_PRODUCT_NVIDIA_NFORCE_LAN: 376 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN: 377 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1: 378 sc->sc_caps = NFE_NO_PWRCTL | 379 NFE_FIX_EADDR; 380 break; 381 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 382 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 383 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 384 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 385 sc->sc_caps = NFE_JUMBO_SUP | 386 NFE_HW_CSUM | 387 NFE_NO_PWRCTL | 388 NFE_FIX_EADDR; 389 break; 390 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 391 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 392 sc->sc_caps = NFE_FIX_EADDR; 393 /* FALL THROUGH */ 394 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 395 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 396 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 397 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 398 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 399 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 400 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 401 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 402 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 403 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 404 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 405 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 406 sc->sc_caps |= NFE_40BIT_ADDR; 407 break; 408 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 409 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 410 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 411 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 412 sc->sc_caps = NFE_JUMBO_SUP | 413 NFE_40BIT_ADDR | 414 NFE_HW_CSUM | 415 NFE_NO_PWRCTL | 416 NFE_FIX_EADDR; 417 break; 418 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 419 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 420 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 421 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 422 sc->sc_caps = NFE_JUMBO_SUP | 423 NFE_40BIT_ADDR; 424 break; 425 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 426 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 427 sc->sc_caps = NFE_JUMBO_SUP | 428 NFE_40BIT_ADDR | 429 NFE_HW_CSUM | 430 NFE_HW_VLAN | 431 NFE_FIX_EADDR; 432 break; 433 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 434 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 435 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 436 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 437 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 438 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 439 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 440 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 441 sc->sc_caps = NFE_40BIT_ADDR | 442 NFE_HW_CSUM; 443 break; 444 } 445 446 device_set_desc(dev, n->desc); 447 device_set_async_attach(dev, TRUE); 448 return 0; 449 } 450 } 451 return ENXIO; 452 } 453 454 static int 455 nfe_attach(device_t dev) 456 { 457 struct nfe_softc *sc = device_get_softc(dev); 458 struct ifnet *ifp = &sc->arpcom.ac_if; 459 uint8_t eaddr[ETHER_ADDR_LEN]; 460 int error; 461 462 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 463 lwkt_serialize_init(&sc->sc_jbuf_serializer); 464 465 /* 466 * Initialize sysctl variables 467 */ 468 sc->sc_rx_ring_count = nfe_rx_ring_count; 469 sc->sc_tx_ring_count = nfe_tx_ring_count; 470 sc->sc_debug = nfe_debug; 471 if (nfe_imtime < 0) { 472 sc->sc_flags |= NFE_F_DYN_IM; 473 sc->sc_imtime = -nfe_imtime; 474 } else { 475 sc->sc_imtime = nfe_imtime; 476 } 477 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 478 479 sc->sc_mem_rid = PCIR_BAR(0); 480 481 if (sc->sc_caps & NFE_40BIT_ADDR) 482 sc->rxtxctl_desc = NFE_RXTX_DESC_V3; 483 else if (sc->sc_caps & NFE_JUMBO_SUP) 484 sc->rxtxctl_desc = NFE_RXTX_DESC_V2; 485 486 #ifndef BURN_BRIDGES 487 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 488 uint32_t mem, irq; 489 490 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 491 irq = pci_read_config(dev, PCIR_INTLINE, 4); 492 493 device_printf(dev, "chip is in D%d power mode " 494 "-- setting to D0\n", pci_get_powerstate(dev)); 495 496 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 497 498 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 499 pci_write_config(dev, PCIR_INTLINE, irq, 4); 500 } 501 #endif /* !BURN_BRIDGE */ 502 503 /* Enable bus mastering */ 504 pci_enable_busmaster(dev); 505 506 /* Allocate IO memory */ 507 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 508 &sc->sc_mem_rid, RF_ACTIVE); 509 if (sc->sc_mem_res == NULL) { 510 device_printf(dev, "cound not allocate io memory\n"); 511 return ENXIO; 512 } 513 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 514 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 515 516 /* Allocate IRQ */ 517 sc->sc_irq_rid = 0; 518 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 519 &sc->sc_irq_rid, 520 RF_SHAREABLE | RF_ACTIVE); 521 if (sc->sc_irq_res == NULL) { 522 device_printf(dev, "could not allocate irq\n"); 523 error = ENXIO; 524 goto fail; 525 } 526 527 /* Disable WOL */ 528 NFE_WRITE(sc, NFE_WOL_CTL, 0); 529 530 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 531 nfe_powerup(dev); 532 533 nfe_get_macaddr(sc, eaddr); 534 535 /* 536 * Allocate Tx and Rx rings. 537 */ 538 error = nfe_alloc_tx_ring(sc, &sc->txq); 539 if (error) { 540 device_printf(dev, "could not allocate Tx ring\n"); 541 goto fail; 542 } 543 544 error = nfe_alloc_rx_ring(sc, &sc->rxq); 545 if (error) { 546 device_printf(dev, "could not allocate Rx ring\n"); 547 goto fail; 548 } 549 550 /* 551 * Create sysctl tree 552 */ 553 sysctl_ctx_init(&sc->sc_sysctl_ctx); 554 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 555 SYSCTL_STATIC_CHILDREN(_hw), 556 OID_AUTO, 557 device_get_nameunit(dev), 558 CTLFLAG_RD, 0, ""); 559 if (sc->sc_sysctl_tree == NULL) { 560 device_printf(dev, "can't add sysctl node\n"); 561 error = ENXIO; 562 goto fail; 563 } 564 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 565 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 566 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW, 567 sc, 0, nfe_sysctl_imtime, "I", 568 "Interrupt moderation time (usec). " 569 "0 to disable interrupt moderation."); 570 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 571 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 572 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count, 573 0, "RX ring count"); 574 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 575 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 576 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count, 577 0, "TX ring count"); 578 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 579 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 580 "debug", CTLFLAG_RW, &sc->sc_debug, 581 0, "control debugging printfs"); 582 583 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 584 nfe_ifmedia_sts); 585 if (error) { 586 device_printf(dev, "MII without any phy\n"); 587 goto fail; 588 } 589 590 ifp->if_softc = sc; 591 ifp->if_mtu = ETHERMTU; 592 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 593 ifp->if_ioctl = nfe_ioctl; 594 ifp->if_start = nfe_start; 595 #ifdef DEVICE_POLLING 596 ifp->if_poll = nfe_poll; 597 #endif 598 ifp->if_watchdog = nfe_watchdog; 599 ifp->if_init = nfe_init; 600 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count); 601 ifq_set_ready(&ifp->if_snd); 602 603 ifp->if_capabilities = IFCAP_VLAN_MTU; 604 605 if (sc->sc_caps & NFE_HW_VLAN) 606 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 607 608 #ifdef NFE_CSUM 609 if (sc->sc_caps & NFE_HW_CSUM) { 610 ifp->if_capabilities |= IFCAP_HWCSUM; 611 ifp->if_hwassist = NFE_CSUM_FEATURES; 612 } 613 #else 614 sc->sc_caps &= ~NFE_HW_CSUM; 615 #endif 616 ifp->if_capenable = ifp->if_capabilities; 617 618 callout_init(&sc->sc_tick_ch); 619 620 ether_ifattach(ifp, eaddr, NULL); 621 622 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 623 &sc->sc_ih, ifp->if_serializer); 624 if (error) { 625 device_printf(dev, "could not setup intr\n"); 626 ether_ifdetach(ifp); 627 goto fail; 628 } 629 630 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq_res)); 631 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 632 633 return 0; 634 fail: 635 nfe_detach(dev); 636 return error; 637 } 638 639 static int 640 nfe_detach(device_t dev) 641 { 642 struct nfe_softc *sc = device_get_softc(dev); 643 644 if (device_is_attached(dev)) { 645 struct ifnet *ifp = &sc->arpcom.ac_if; 646 647 lwkt_serialize_enter(ifp->if_serializer); 648 nfe_stop(sc); 649 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 650 lwkt_serialize_exit(ifp->if_serializer); 651 652 ether_ifdetach(ifp); 653 } 654 655 if (sc->sc_miibus != NULL) 656 device_delete_child(dev, sc->sc_miibus); 657 bus_generic_detach(dev); 658 659 if (sc->sc_sysctl_tree != NULL) 660 sysctl_ctx_free(&sc->sc_sysctl_ctx); 661 662 if (sc->sc_irq_res != NULL) { 663 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 664 sc->sc_irq_res); 665 } 666 667 if (sc->sc_mem_res != NULL) { 668 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 669 sc->sc_mem_res); 670 } 671 672 nfe_free_tx_ring(sc, &sc->txq); 673 nfe_free_rx_ring(sc, &sc->rxq); 674 675 return 0; 676 } 677 678 static void 679 nfe_shutdown(device_t dev) 680 { 681 struct nfe_softc *sc = device_get_softc(dev); 682 struct ifnet *ifp = &sc->arpcom.ac_if; 683 684 lwkt_serialize_enter(ifp->if_serializer); 685 nfe_stop(sc); 686 lwkt_serialize_exit(ifp->if_serializer); 687 } 688 689 static int 690 nfe_suspend(device_t dev) 691 { 692 struct nfe_softc *sc = device_get_softc(dev); 693 struct ifnet *ifp = &sc->arpcom.ac_if; 694 695 lwkt_serialize_enter(ifp->if_serializer); 696 nfe_stop(sc); 697 lwkt_serialize_exit(ifp->if_serializer); 698 699 return 0; 700 } 701 702 static int 703 nfe_resume(device_t dev) 704 { 705 struct nfe_softc *sc = device_get_softc(dev); 706 struct ifnet *ifp = &sc->arpcom.ac_if; 707 708 lwkt_serialize_enter(ifp->if_serializer); 709 if (ifp->if_flags & IFF_UP) 710 nfe_init(sc); 711 lwkt_serialize_exit(ifp->if_serializer); 712 713 return 0; 714 } 715 716 static void 717 nfe_miibus_statchg(device_t dev) 718 { 719 struct nfe_softc *sc = device_get_softc(dev); 720 struct mii_data *mii = device_get_softc(sc->sc_miibus); 721 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 722 723 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer); 724 725 phy = NFE_READ(sc, NFE_PHY_IFACE); 726 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 727 728 seed = NFE_READ(sc, NFE_RNDSEED); 729 seed &= ~NFE_SEED_MASK; 730 731 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 732 phy |= NFE_PHY_HDX; /* half-duplex */ 733 misc |= NFE_MISC1_HDX; 734 } 735 736 switch (IFM_SUBTYPE(mii->mii_media_active)) { 737 case IFM_1000_T: /* full-duplex only */ 738 link |= NFE_MEDIA_1000T; 739 seed |= NFE_SEED_1000T; 740 phy |= NFE_PHY_1000T; 741 break; 742 case IFM_100_TX: 743 link |= NFE_MEDIA_100TX; 744 seed |= NFE_SEED_100TX; 745 phy |= NFE_PHY_100TX; 746 break; 747 case IFM_10_T: 748 link |= NFE_MEDIA_10T; 749 seed |= NFE_SEED_10T; 750 break; 751 } 752 753 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 754 755 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 756 NFE_WRITE(sc, NFE_MISC1, misc); 757 NFE_WRITE(sc, NFE_LINKSPEED, link); 758 } 759 760 static int 761 nfe_miibus_readreg(device_t dev, int phy, int reg) 762 { 763 struct nfe_softc *sc = device_get_softc(dev); 764 uint32_t val; 765 int ntries; 766 767 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 768 769 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 770 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 771 DELAY(100); 772 } 773 774 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 775 776 for (ntries = 0; ntries < 1000; ntries++) { 777 DELAY(100); 778 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 779 break; 780 } 781 if (ntries == 1000) { 782 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 783 return 0; 784 } 785 786 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 787 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 788 return 0; 789 } 790 791 val = NFE_READ(sc, NFE_PHY_DATA); 792 if (val != 0xffffffff && val != 0) 793 sc->mii_phyaddr = phy; 794 795 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 796 797 return val; 798 } 799 800 static void 801 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 802 { 803 struct nfe_softc *sc = device_get_softc(dev); 804 uint32_t ctl; 805 int ntries; 806 807 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 808 809 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 810 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 811 DELAY(100); 812 } 813 814 NFE_WRITE(sc, NFE_PHY_DATA, val); 815 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 816 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 817 818 for (ntries = 0; ntries < 1000; ntries++) { 819 DELAY(100); 820 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 821 break; 822 } 823 824 #ifdef NFE_DEBUG 825 if (ntries == 1000) 826 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 827 #endif 828 } 829 830 #ifdef DEVICE_POLLING 831 832 static void 833 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 834 { 835 struct nfe_softc *sc = ifp->if_softc; 836 837 ASSERT_SERIALIZED(ifp->if_serializer); 838 839 switch(cmd) { 840 case POLL_REGISTER: 841 nfe_disable_intrs(sc); 842 break; 843 844 case POLL_DEREGISTER: 845 nfe_enable_intrs(sc); 846 break; 847 848 case POLL_AND_CHECK_STATUS: 849 /* fall through */ 850 case POLL_ONLY: 851 if (ifp->if_flags & IFF_RUNNING) { 852 nfe_rxeof(sc); 853 nfe_txeof(sc, 1); 854 } 855 break; 856 } 857 } 858 859 #endif 860 861 static void 862 nfe_intr(void *arg) 863 { 864 struct nfe_softc *sc = arg; 865 struct ifnet *ifp = &sc->arpcom.ac_if; 866 uint32_t r; 867 868 r = NFE_READ(sc, NFE_IRQ_STATUS); 869 if (r == 0) 870 return; /* not for us */ 871 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 872 873 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 874 875 if (r & NFE_IRQ_LINK) { 876 NFE_READ(sc, NFE_PHY_STATUS); 877 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 878 DPRINTF(sc, "link state changed %s\n", ""); 879 } 880 881 if (ifp->if_flags & IFF_RUNNING) { 882 int ret; 883 884 /* check Rx ring */ 885 ret = nfe_rxeof(sc); 886 887 /* check Tx ring */ 888 ret |= nfe_txeof(sc, 1); 889 890 if (sc->sc_flags & NFE_F_DYN_IM) { 891 if (ret && (sc->sc_flags & NFE_F_IRQ_TIMER) == 0) { 892 /* 893 * Assume that using hardware timer could reduce 894 * the interrupt rate. 895 */ 896 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER); 897 sc->sc_flags |= NFE_F_IRQ_TIMER; 898 } else if (!ret && (sc->sc_flags & NFE_F_IRQ_TIMER)) { 899 /* 900 * Nothing needs to be processed, fall back to 901 * use TX/RX interrupts. 902 */ 903 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER); 904 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 905 906 /* 907 * Recollect, mainly to avoid the possible race 908 * introduced by changing interrupt masks. 909 */ 910 nfe_rxeof(sc); 911 nfe_txeof(sc, 1); 912 } 913 } 914 } 915 } 916 917 static int 918 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 919 { 920 struct nfe_softc *sc = ifp->if_softc; 921 struct ifreq *ifr = (struct ifreq *)data; 922 struct mii_data *mii; 923 int error = 0, mask, jumbo_cap; 924 925 ASSERT_SERIALIZED(ifp->if_serializer); 926 927 switch (cmd) { 928 case SIOCSIFMTU: 929 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL) 930 jumbo_cap = 1; 931 else 932 jumbo_cap = 0; 933 934 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) || 935 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) { 936 return EINVAL; 937 } else if (ifp->if_mtu != ifr->ifr_mtu) { 938 ifp->if_mtu = ifr->ifr_mtu; 939 if (ifp->if_flags & IFF_RUNNING) 940 nfe_init(sc); 941 } 942 break; 943 case SIOCSIFFLAGS: 944 if (ifp->if_flags & IFF_UP) { 945 /* 946 * If only the PROMISC or ALLMULTI flag changes, then 947 * don't do a full re-init of the chip, just update 948 * the Rx filter. 949 */ 950 if ((ifp->if_flags & IFF_RUNNING) && 951 ((ifp->if_flags ^ sc->sc_if_flags) & 952 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 953 nfe_setmulti(sc); 954 } else { 955 if (!(ifp->if_flags & IFF_RUNNING)) 956 nfe_init(sc); 957 } 958 } else { 959 if (ifp->if_flags & IFF_RUNNING) 960 nfe_stop(sc); 961 } 962 sc->sc_if_flags = ifp->if_flags; 963 break; 964 case SIOCADDMULTI: 965 case SIOCDELMULTI: 966 if (ifp->if_flags & IFF_RUNNING) 967 nfe_setmulti(sc); 968 break; 969 case SIOCSIFMEDIA: 970 case SIOCGIFMEDIA: 971 mii = device_get_softc(sc->sc_miibus); 972 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 973 break; 974 case SIOCSIFCAP: 975 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM; 976 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) { 977 ifp->if_capenable ^= mask; 978 if (IFCAP_TXCSUM & ifp->if_capenable) 979 ifp->if_hwassist = NFE_CSUM_FEATURES; 980 else 981 ifp->if_hwassist = 0; 982 983 if (ifp->if_flags & IFF_RUNNING) 984 nfe_init(sc); 985 } 986 break; 987 default: 988 error = ether_ioctl(ifp, cmd, data); 989 break; 990 } 991 return error; 992 } 993 994 static int 995 nfe_rxeof(struct nfe_softc *sc) 996 { 997 struct ifnet *ifp = &sc->arpcom.ac_if; 998 struct nfe_rx_ring *ring = &sc->rxq; 999 int reap; 1000 struct mbuf_chain chain[MAXCPU]; 1001 1002 reap = 0; 1003 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD); 1004 1005 ether_input_chain_init(chain); 1006 1007 for (;;) { 1008 struct nfe_rx_data *data = &ring->data[ring->cur]; 1009 struct mbuf *m; 1010 uint16_t flags; 1011 int len, error; 1012 1013 if (sc->sc_caps & NFE_40BIT_ADDR) { 1014 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 1015 1016 flags = le16toh(desc64->flags); 1017 len = le16toh(desc64->length) & 0x3fff; 1018 } else { 1019 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 1020 1021 flags = le16toh(desc32->flags); 1022 len = le16toh(desc32->length) & 0x3fff; 1023 } 1024 1025 if (flags & NFE_RX_READY) 1026 break; 1027 1028 reap = 1; 1029 1030 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1031 if (!(flags & NFE_RX_VALID_V1)) 1032 goto skip; 1033 1034 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 1035 flags &= ~NFE_RX_ERROR; 1036 len--; /* fix buffer length */ 1037 } 1038 } else { 1039 if (!(flags & NFE_RX_VALID_V2)) 1040 goto skip; 1041 1042 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 1043 flags &= ~NFE_RX_ERROR; 1044 len--; /* fix buffer length */ 1045 } 1046 } 1047 1048 if (flags & NFE_RX_ERROR) { 1049 ifp->if_ierrors++; 1050 goto skip; 1051 } 1052 1053 m = data->m; 1054 1055 if (sc->sc_flags & NFE_F_USE_JUMBO) 1056 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 1057 else 1058 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 1059 if (error) { 1060 ifp->if_ierrors++; 1061 goto skip; 1062 } 1063 1064 /* finalize mbuf */ 1065 m->m_pkthdr.len = m->m_len = len; 1066 m->m_pkthdr.rcvif = ifp; 1067 1068 if ((ifp->if_capenable & IFCAP_RXCSUM) && 1069 (flags & NFE_RX_CSUMOK)) { 1070 if (flags & NFE_RX_IP_CSUMOK_V2) { 1071 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1072 CSUM_IP_VALID; 1073 } 1074 1075 if (flags & 1076 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) { 1077 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1078 CSUM_PSEUDO_HDR | 1079 CSUM_FRAG_NOT_CHECKED; 1080 m->m_pkthdr.csum_data = 0xffff; 1081 } 1082 } 1083 1084 ifp->if_ipackets++; 1085 ether_input_chain(ifp, m, chain); 1086 skip: 1087 nfe_set_ready_rxdesc(sc, ring, ring->cur); 1088 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count; 1089 } 1090 1091 if (reap) { 1092 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1093 ether_input_dispatch(chain); 1094 } 1095 return reap; 1096 } 1097 1098 static int 1099 nfe_txeof(struct nfe_softc *sc, int start) 1100 { 1101 struct ifnet *ifp = &sc->arpcom.ac_if; 1102 struct nfe_tx_ring *ring = &sc->txq; 1103 struct nfe_tx_data *data = NULL; 1104 1105 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD); 1106 while (ring->next != ring->cur) { 1107 uint16_t flags; 1108 1109 if (sc->sc_caps & NFE_40BIT_ADDR) 1110 flags = le16toh(ring->desc64[ring->next].flags); 1111 else 1112 flags = le16toh(ring->desc32[ring->next].flags); 1113 1114 if (flags & NFE_TX_VALID) 1115 break; 1116 1117 data = &ring->data[ring->next]; 1118 1119 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1120 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 1121 goto skip; 1122 1123 if ((flags & NFE_TX_ERROR_V1) != 0) { 1124 if_printf(ifp, "tx v1 error 0x%4b\n", flags, 1125 NFE_V1_TXERR); 1126 ifp->if_oerrors++; 1127 } else { 1128 ifp->if_opackets++; 1129 } 1130 } else { 1131 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 1132 goto skip; 1133 1134 if ((flags & NFE_TX_ERROR_V2) != 0) { 1135 if_printf(ifp, "tx v2 error 0x%4b\n", flags, 1136 NFE_V2_TXERR); 1137 ifp->if_oerrors++; 1138 } else { 1139 ifp->if_opackets++; 1140 } 1141 } 1142 1143 if (data->m == NULL) { /* should not get there */ 1144 if_printf(ifp, 1145 "last fragment bit w/o associated mbuf!\n"); 1146 goto skip; 1147 } 1148 1149 /* last fragment of the mbuf chain transmitted */ 1150 bus_dmamap_sync(ring->data_tag, data->map, 1151 BUS_DMASYNC_POSTWRITE); 1152 bus_dmamap_unload(ring->data_tag, data->map); 1153 m_freem(data->m); 1154 data->m = NULL; 1155 skip: 1156 ring->queued--; 1157 KKASSERT(ring->queued >= 0); 1158 ring->next = (ring->next + 1) % sc->sc_tx_ring_count; 1159 } 1160 1161 if (sc->sc_tx_ring_count - ring->queued >= 1162 sc->sc_tx_spare + NFE_NSEG_RSVD) 1163 ifp->if_flags &= ~IFF_OACTIVE; 1164 1165 if (ring->queued == 0) 1166 ifp->if_timer = 0; 1167 1168 if (start && !ifq_is_empty(&ifp->if_snd)) 1169 if_devstart(ifp); 1170 1171 if (data != NULL) 1172 return 1; 1173 else 1174 return 0; 1175 } 1176 1177 static int 1178 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 1179 { 1180 struct nfe_dma_ctx ctx; 1181 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 1182 struct nfe_tx_data *data, *data_map; 1183 bus_dmamap_t map; 1184 struct nfe_desc64 *desc64 = NULL; 1185 struct nfe_desc32 *desc32 = NULL; 1186 uint16_t flags = 0; 1187 uint32_t vtag = 0; 1188 int error, i, j, maxsegs; 1189 1190 data = &ring->data[ring->cur]; 1191 map = data->map; 1192 data_map = data; /* Remember who owns the DMA map */ 1193 1194 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD; 1195 if (maxsegs > NFE_MAX_SCATTER) 1196 maxsegs = NFE_MAX_SCATTER; 1197 KASSERT(maxsegs >= sc->sc_tx_spare, 1198 ("no enough segments %d,%d\n", maxsegs, sc->sc_tx_spare)); 1199 1200 ctx.nsegs = maxsegs; 1201 ctx.segs = segs; 1202 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0, 1203 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT); 1204 if (!error && ctx.nsegs == 0) { 1205 bus_dmamap_unload(ring->data_tag, map); 1206 error = EFBIG; 1207 } 1208 if (error && error != EFBIG) { 1209 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n"); 1210 goto back; 1211 } 1212 if (error) { /* error == EFBIG */ 1213 struct mbuf *m_new; 1214 1215 m_new = m_defrag(m0, MB_DONTWAIT); 1216 if (m_new == NULL) { 1217 if_printf(&sc->arpcom.ac_if, 1218 "could not defrag TX mbuf\n"); 1219 error = ENOBUFS; 1220 goto back; 1221 } else { 1222 m0 = m_new; 1223 } 1224 1225 ctx.nsegs = maxsegs; 1226 ctx.segs = segs; 1227 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0, 1228 nfe_buf_dma_addr, &ctx, 1229 BUS_DMA_NOWAIT); 1230 if (error || ctx.nsegs == 0) { 1231 if (!error) { 1232 bus_dmamap_unload(ring->data_tag, map); 1233 error = EFBIG; 1234 } 1235 if_printf(&sc->arpcom.ac_if, 1236 "could not map defraged TX mbuf\n"); 1237 goto back; 1238 } 1239 } 1240 1241 error = 0; 1242 1243 /* setup h/w VLAN tagging */ 1244 if (m0->m_flags & M_VLANTAG) 1245 vtag = m0->m_pkthdr.ether_vlantag; 1246 1247 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) { 1248 if (m0->m_pkthdr.csum_flags & CSUM_IP) 1249 flags |= NFE_TX_IP_CSUM; 1250 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1251 flags |= NFE_TX_TCP_CSUM; 1252 } 1253 1254 /* 1255 * XXX urm. somebody is unaware of how hardware works. You 1256 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1257 * the ring until the entire chain is actually *VALID*. Otherwise 1258 * the hardware may encounter a partially initialized chain that 1259 * is marked as being ready to go when it in fact is not ready to 1260 * go. 1261 */ 1262 1263 for (i = 0; i < ctx.nsegs; i++) { 1264 j = (ring->cur + i) % sc->sc_tx_ring_count; 1265 data = &ring->data[j]; 1266 1267 if (sc->sc_caps & NFE_40BIT_ADDR) { 1268 desc64 = &ring->desc64[j]; 1269 #if defined(__LP64__) 1270 desc64->physaddr[0] = 1271 htole32(segs[i].ds_addr >> 32); 1272 #endif 1273 desc64->physaddr[1] = 1274 htole32(segs[i].ds_addr & 0xffffffff); 1275 desc64->length = htole16(segs[i].ds_len - 1); 1276 desc64->vtag = htole32(vtag); 1277 desc64->flags = htole16(flags); 1278 } else { 1279 desc32 = &ring->desc32[j]; 1280 desc32->physaddr = htole32(segs[i].ds_addr); 1281 desc32->length = htole16(segs[i].ds_len - 1); 1282 desc32->flags = htole16(flags); 1283 } 1284 1285 /* csum flags and vtag belong to the first fragment only */ 1286 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1287 vtag = 0; 1288 1289 ring->queued++; 1290 KKASSERT(ring->queued <= sc->sc_tx_ring_count); 1291 } 1292 1293 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1294 if (sc->sc_caps & NFE_40BIT_ADDR) { 1295 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1296 } else { 1297 if (sc->sc_caps & NFE_JUMBO_SUP) 1298 flags = NFE_TX_LASTFRAG_V2; 1299 else 1300 flags = NFE_TX_LASTFRAG_V1; 1301 desc32->flags |= htole16(flags); 1302 } 1303 1304 /* 1305 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1306 * whole mess until the first descriptor in the map is flagged. 1307 */ 1308 for (i = ctx.nsegs - 1; i >= 0; --i) { 1309 j = (ring->cur + i) % sc->sc_tx_ring_count; 1310 if (sc->sc_caps & NFE_40BIT_ADDR) { 1311 desc64 = &ring->desc64[j]; 1312 desc64->flags |= htole16(NFE_TX_VALID); 1313 } else { 1314 desc32 = &ring->desc32[j]; 1315 desc32->flags |= htole16(NFE_TX_VALID); 1316 } 1317 } 1318 ring->cur = (ring->cur + ctx.nsegs) % sc->sc_tx_ring_count; 1319 1320 /* Exchange DMA map */ 1321 data_map->map = data->map; 1322 data->map = map; 1323 data->m = m0; 1324 1325 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1326 back: 1327 if (error) 1328 m_freem(m0); 1329 return error; 1330 } 1331 1332 static void 1333 nfe_start(struct ifnet *ifp) 1334 { 1335 struct nfe_softc *sc = ifp->if_softc; 1336 struct nfe_tx_ring *ring = &sc->txq; 1337 int count = 0, oactive = 0; 1338 struct mbuf *m0; 1339 1340 ASSERT_SERIALIZED(ifp->if_serializer); 1341 1342 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 1343 return; 1344 1345 for (;;) { 1346 int error; 1347 1348 if (sc->sc_tx_ring_count - ring->queued < 1349 sc->sc_tx_spare + NFE_NSEG_RSVD) { 1350 if (oactive) { 1351 ifp->if_flags |= IFF_OACTIVE; 1352 break; 1353 } 1354 1355 nfe_txeof(sc, 0); 1356 oactive = 1; 1357 continue; 1358 } 1359 1360 m0 = ifq_dequeue(&ifp->if_snd, NULL); 1361 if (m0 == NULL) 1362 break; 1363 1364 ETHER_BPF_MTAP(ifp, m0); 1365 1366 error = nfe_encap(sc, ring, m0); 1367 if (error) { 1368 ifp->if_oerrors++; 1369 if (error == EFBIG) { 1370 if (oactive) { 1371 ifp->if_flags |= IFF_OACTIVE; 1372 break; 1373 } 1374 nfe_txeof(sc, 0); 1375 oactive = 1; 1376 } 1377 continue; 1378 } else { 1379 oactive = 0; 1380 } 1381 ++count; 1382 1383 /* 1384 * NOTE: 1385 * `m0' may be freed in nfe_encap(), so 1386 * it should not be touched any more. 1387 */ 1388 } 1389 if (count == 0) /* nothing sent */ 1390 return; 1391 1392 /* Sync TX descriptor ring */ 1393 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1394 1395 /* Kick Tx */ 1396 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1397 1398 /* 1399 * Set a timeout in case the chip goes out to lunch. 1400 */ 1401 ifp->if_timer = 5; 1402 } 1403 1404 static void 1405 nfe_watchdog(struct ifnet *ifp) 1406 { 1407 struct nfe_softc *sc = ifp->if_softc; 1408 1409 ASSERT_SERIALIZED(ifp->if_serializer); 1410 1411 if (ifp->if_flags & IFF_RUNNING) { 1412 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1413 nfe_txeof(sc, 1); 1414 return; 1415 } 1416 1417 if_printf(ifp, "watchdog timeout\n"); 1418 1419 nfe_init(ifp->if_softc); 1420 1421 ifp->if_oerrors++; 1422 } 1423 1424 static void 1425 nfe_init(void *xsc) 1426 { 1427 struct nfe_softc *sc = xsc; 1428 struct ifnet *ifp = &sc->arpcom.ac_if; 1429 uint32_t tmp; 1430 int error; 1431 1432 ASSERT_SERIALIZED(ifp->if_serializer); 1433 1434 nfe_stop(sc); 1435 1436 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 1437 nfe_mac_reset(sc); 1438 1439 /* 1440 * NOTE: 1441 * Switching between jumbo frames and normal frames should 1442 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring(). 1443 */ 1444 if (ifp->if_mtu > ETHERMTU) { 1445 sc->sc_flags |= NFE_F_USE_JUMBO; 1446 sc->rxq.bufsz = NFE_JBYTES; 1447 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO; 1448 if (bootverbose) 1449 if_printf(ifp, "use jumbo frames\n"); 1450 } else { 1451 sc->sc_flags &= ~NFE_F_USE_JUMBO; 1452 sc->rxq.bufsz = MCLBYTES; 1453 sc->sc_tx_spare = NFE_NSEG_SPARE; 1454 if (bootverbose) 1455 if_printf(ifp, "use non-jumbo frames\n"); 1456 } 1457 1458 error = nfe_init_tx_ring(sc, &sc->txq); 1459 if (error) { 1460 nfe_stop(sc); 1461 return; 1462 } 1463 1464 error = nfe_init_rx_ring(sc, &sc->rxq); 1465 if (error) { 1466 nfe_stop(sc); 1467 return; 1468 } 1469 1470 NFE_WRITE(sc, NFE_TX_POLL, 0); 1471 NFE_WRITE(sc, NFE_STATUS, 0); 1472 1473 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc; 1474 1475 if (ifp->if_capenable & IFCAP_RXCSUM) 1476 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1477 1478 /* 1479 * Although the adapter is capable of stripping VLAN tags from received 1480 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1481 * purpose. This will be done in software by our network stack. 1482 */ 1483 if (sc->sc_caps & NFE_HW_VLAN) 1484 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1485 1486 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1487 DELAY(10); 1488 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1489 1490 if (sc->sc_caps & NFE_HW_VLAN) 1491 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1492 1493 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1494 1495 /* set MAC address */ 1496 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1497 1498 /* tell MAC where rings are in memory */ 1499 #ifdef __LP64__ 1500 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1501 #endif 1502 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1503 #ifdef __LP64__ 1504 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1505 #endif 1506 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1507 1508 NFE_WRITE(sc, NFE_RING_SIZE, 1509 (sc->sc_rx_ring_count - 1) << 16 | 1510 (sc->sc_tx_ring_count - 1)); 1511 1512 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1513 1514 /* force MAC to wakeup */ 1515 tmp = NFE_READ(sc, NFE_PWR_STATE); 1516 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1517 DELAY(10); 1518 tmp = NFE_READ(sc, NFE_PWR_STATE); 1519 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1520 1521 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1522 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1523 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1524 1525 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1526 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1527 1528 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1529 1530 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1531 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1532 DELAY(10); 1533 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1534 1535 /* set Rx filter */ 1536 nfe_setmulti(sc); 1537 1538 nfe_ifmedia_upd(ifp); 1539 1540 /* enable Rx */ 1541 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1542 1543 /* enable Tx */ 1544 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1545 1546 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1547 1548 #ifdef DEVICE_POLLING 1549 if ((ifp->if_flags & IFF_POLLING)) 1550 nfe_disable_intrs(sc); 1551 else 1552 #endif 1553 nfe_enable_intrs(sc); 1554 1555 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1556 1557 ifp->if_flags |= IFF_RUNNING; 1558 ifp->if_flags &= ~IFF_OACTIVE; 1559 1560 /* 1561 * If we had stuff in the tx ring before its all cleaned out now 1562 * so we are not going to get an interrupt, jump-start any pending 1563 * output. 1564 */ 1565 if (!ifq_is_empty(&ifp->if_snd)) 1566 if_devstart(ifp); 1567 } 1568 1569 static void 1570 nfe_stop(struct nfe_softc *sc) 1571 { 1572 struct ifnet *ifp = &sc->arpcom.ac_if; 1573 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 1574 int i; 1575 1576 ASSERT_SERIALIZED(ifp->if_serializer); 1577 1578 callout_stop(&sc->sc_tick_ch); 1579 1580 ifp->if_timer = 0; 1581 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1582 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 1583 1584 #define WAITMAX 50000 1585 1586 /* 1587 * Abort Tx 1588 */ 1589 NFE_WRITE(sc, NFE_TX_CTL, 0); 1590 for (i = 0; i < WAITMAX; ++i) { 1591 DELAY(100); 1592 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0) 1593 break; 1594 } 1595 if (i == WAITMAX) 1596 if_printf(ifp, "can't stop TX\n"); 1597 DELAY(100); 1598 1599 /* 1600 * Disable Rx 1601 */ 1602 NFE_WRITE(sc, NFE_RX_CTL, 0); 1603 for (i = 0; i < WAITMAX; ++i) { 1604 DELAY(100); 1605 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0) 1606 break; 1607 } 1608 if (i == WAITMAX) 1609 if_printf(ifp, "can't stop RX\n"); 1610 DELAY(100); 1611 1612 #undef WAITMAX 1613 1614 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 1615 DELAY(10); 1616 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1617 1618 /* Disable interrupts */ 1619 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1620 1621 /* Reset Tx and Rx rings */ 1622 nfe_reset_tx_ring(sc, &sc->txq); 1623 nfe_reset_rx_ring(sc, &sc->rxq); 1624 } 1625 1626 static int 1627 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1628 { 1629 int i, j, error, descsize; 1630 void **desc; 1631 1632 if (sc->sc_caps & NFE_40BIT_ADDR) { 1633 desc = (void **)&ring->desc64; 1634 descsize = sizeof(struct nfe_desc64); 1635 } else { 1636 desc = (void **)&ring->desc32; 1637 descsize = sizeof(struct nfe_desc32); 1638 } 1639 1640 ring->bufsz = MCLBYTES; 1641 ring->cur = ring->next = 0; 1642 1643 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1644 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1645 NULL, NULL, 1646 sc->sc_rx_ring_count * descsize, 1, 1647 BUS_SPACE_MAXSIZE_32BIT, 1648 0, &ring->tag); 1649 if (error) { 1650 if_printf(&sc->arpcom.ac_if, 1651 "could not create desc RX DMA tag\n"); 1652 return error; 1653 } 1654 1655 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1656 &ring->map); 1657 if (error) { 1658 if_printf(&sc->arpcom.ac_if, 1659 "could not allocate RX desc DMA memory\n"); 1660 bus_dma_tag_destroy(ring->tag); 1661 ring->tag = NULL; 1662 return error; 1663 } 1664 1665 error = bus_dmamap_load(ring->tag, ring->map, *desc, 1666 sc->sc_rx_ring_count * descsize, 1667 nfe_ring_dma_addr, &ring->physaddr, 1668 BUS_DMA_WAITOK); 1669 if (error) { 1670 if_printf(&sc->arpcom.ac_if, 1671 "could not load RX desc DMA map\n"); 1672 bus_dmamem_free(ring->tag, *desc, ring->map); 1673 bus_dma_tag_destroy(ring->tag); 1674 ring->tag = NULL; 1675 return error; 1676 } 1677 1678 if (sc->sc_caps & NFE_JUMBO_SUP) { 1679 ring->jbuf = 1680 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc), 1681 M_DEVBUF, M_WAITOK | M_ZERO); 1682 1683 error = nfe_jpool_alloc(sc, ring); 1684 if (error) { 1685 if_printf(&sc->arpcom.ac_if, 1686 "could not allocate jumbo frames\n"); 1687 kfree(ring->jbuf, M_DEVBUF); 1688 ring->jbuf = NULL; 1689 /* Allow jumbo frame allocation to fail */ 1690 } 1691 } 1692 1693 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count, 1694 M_DEVBUF, M_WAITOK | M_ZERO); 1695 1696 error = bus_dma_tag_create(NULL, 1, 0, 1697 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1698 NULL, NULL, 1699 MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 1700 BUS_DMA_ALLOCNOW, &ring->data_tag); 1701 if (error) { 1702 if_printf(&sc->arpcom.ac_if, 1703 "could not create RX mbuf DMA tag\n"); 1704 return error; 1705 } 1706 1707 /* Create a spare RX mbuf DMA map */ 1708 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap); 1709 if (error) { 1710 if_printf(&sc->arpcom.ac_if, 1711 "could not create spare RX mbuf DMA map\n"); 1712 bus_dma_tag_destroy(ring->data_tag); 1713 ring->data_tag = NULL; 1714 return error; 1715 } 1716 1717 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1718 error = bus_dmamap_create(ring->data_tag, 0, 1719 &ring->data[i].map); 1720 if (error) { 1721 if_printf(&sc->arpcom.ac_if, 1722 "could not create %dth RX mbuf DMA mapn", i); 1723 goto fail; 1724 } 1725 } 1726 return 0; 1727 fail: 1728 for (j = 0; j < i; ++j) 1729 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1730 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1731 bus_dma_tag_destroy(ring->data_tag); 1732 ring->data_tag = NULL; 1733 return error; 1734 } 1735 1736 static void 1737 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1738 { 1739 int i; 1740 1741 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1742 struct nfe_rx_data *data = &ring->data[i]; 1743 1744 if (data->m != NULL) { 1745 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0) 1746 bus_dmamap_unload(ring->data_tag, data->map); 1747 m_freem(data->m); 1748 data->m = NULL; 1749 } 1750 } 1751 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1752 1753 ring->cur = ring->next = 0; 1754 } 1755 1756 static int 1757 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1758 { 1759 int i; 1760 1761 for (i = 0; i < sc->sc_rx_ring_count; ++i) { 1762 int error; 1763 1764 /* XXX should use a function pointer */ 1765 if (sc->sc_flags & NFE_F_USE_JUMBO) 1766 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1767 else 1768 error = nfe_newbuf_std(sc, ring, i, 1); 1769 if (error) { 1770 if_printf(&sc->arpcom.ac_if, 1771 "could not allocate RX buffer\n"); 1772 return error; 1773 } 1774 1775 nfe_set_ready_rxdesc(sc, ring, i); 1776 } 1777 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1778 1779 return 0; 1780 } 1781 1782 static void 1783 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1784 { 1785 if (ring->data_tag != NULL) { 1786 struct nfe_rx_data *data; 1787 int i; 1788 1789 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1790 data = &ring->data[i]; 1791 1792 if (data->m != NULL) { 1793 bus_dmamap_unload(ring->data_tag, data->map); 1794 m_freem(data->m); 1795 } 1796 bus_dmamap_destroy(ring->data_tag, data->map); 1797 } 1798 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1799 bus_dma_tag_destroy(ring->data_tag); 1800 } 1801 1802 nfe_jpool_free(sc, ring); 1803 1804 if (ring->jbuf != NULL) 1805 kfree(ring->jbuf, M_DEVBUF); 1806 if (ring->data != NULL) 1807 kfree(ring->data, M_DEVBUF); 1808 1809 if (ring->tag != NULL) { 1810 void *desc; 1811 1812 if (sc->sc_caps & NFE_40BIT_ADDR) 1813 desc = ring->desc64; 1814 else 1815 desc = ring->desc32; 1816 1817 bus_dmamap_unload(ring->tag, ring->map); 1818 bus_dmamem_free(ring->tag, desc, ring->map); 1819 bus_dma_tag_destroy(ring->tag); 1820 } 1821 } 1822 1823 static struct nfe_jbuf * 1824 nfe_jalloc(struct nfe_softc *sc) 1825 { 1826 struct ifnet *ifp = &sc->arpcom.ac_if; 1827 struct nfe_jbuf *jbuf; 1828 1829 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1830 1831 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1832 if (jbuf != NULL) { 1833 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1834 jbuf->inuse = 1; 1835 } else { 1836 if_printf(ifp, "no free jumbo buffer\n"); 1837 } 1838 1839 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1840 1841 return jbuf; 1842 } 1843 1844 static void 1845 nfe_jfree(void *arg) 1846 { 1847 struct nfe_jbuf *jbuf = arg; 1848 struct nfe_softc *sc = jbuf->sc; 1849 struct nfe_rx_ring *ring = jbuf->ring; 1850 1851 if (&ring->jbuf[jbuf->slot] != jbuf) 1852 panic("%s: free wrong jumbo buffer\n", __func__); 1853 else if (jbuf->inuse == 0) 1854 panic("%s: jumbo buffer already freed\n", __func__); 1855 1856 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1857 atomic_subtract_int(&jbuf->inuse, 1); 1858 if (jbuf->inuse == 0) 1859 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1860 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1861 } 1862 1863 static void 1864 nfe_jref(void *arg) 1865 { 1866 struct nfe_jbuf *jbuf = arg; 1867 struct nfe_rx_ring *ring = jbuf->ring; 1868 1869 if (&ring->jbuf[jbuf->slot] != jbuf) 1870 panic("%s: ref wrong jumbo buffer\n", __func__); 1871 else if (jbuf->inuse == 0) 1872 panic("%s: jumbo buffer already freed\n", __func__); 1873 1874 atomic_add_int(&jbuf->inuse, 1); 1875 } 1876 1877 static int 1878 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1879 { 1880 struct nfe_jbuf *jbuf; 1881 bus_addr_t physaddr; 1882 caddr_t buf; 1883 int i, error; 1884 1885 /* 1886 * Allocate a big chunk of DMA'able memory. 1887 */ 1888 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1889 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1890 NULL, NULL, 1891 NFE_JPOOL_SIZE(sc), 1, 1892 BUS_SPACE_MAXSIZE_32BIT, 1893 0, &ring->jtag); 1894 if (error) { 1895 if_printf(&sc->arpcom.ac_if, 1896 "could not create jumbo DMA tag\n"); 1897 return error; 1898 } 1899 1900 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool, 1901 BUS_DMA_WAITOK, &ring->jmap); 1902 if (error) { 1903 if_printf(&sc->arpcom.ac_if, 1904 "could not allocate jumbo DMA memory\n"); 1905 bus_dma_tag_destroy(ring->jtag); 1906 ring->jtag = NULL; 1907 return error; 1908 } 1909 1910 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool, 1911 NFE_JPOOL_SIZE(sc), 1912 nfe_ring_dma_addr, &physaddr, BUS_DMA_WAITOK); 1913 if (error) { 1914 if_printf(&sc->arpcom.ac_if, 1915 "could not load jumbo DMA map\n"); 1916 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1917 bus_dma_tag_destroy(ring->jtag); 1918 ring->jtag = NULL; 1919 return error; 1920 } 1921 1922 /* ..and split it into 9KB chunks */ 1923 SLIST_INIT(&ring->jfreelist); 1924 1925 buf = ring->jpool; 1926 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) { 1927 jbuf = &ring->jbuf[i]; 1928 1929 jbuf->sc = sc; 1930 jbuf->ring = ring; 1931 jbuf->inuse = 0; 1932 jbuf->slot = i; 1933 jbuf->buf = buf; 1934 jbuf->physaddr = physaddr; 1935 1936 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1937 1938 buf += NFE_JBYTES; 1939 physaddr += NFE_JBYTES; 1940 } 1941 1942 return 0; 1943 } 1944 1945 static void 1946 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1947 { 1948 if (ring->jtag != NULL) { 1949 bus_dmamap_unload(ring->jtag, ring->jmap); 1950 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1951 bus_dma_tag_destroy(ring->jtag); 1952 } 1953 } 1954 1955 static int 1956 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1957 { 1958 int i, j, error, descsize; 1959 void **desc; 1960 1961 if (sc->sc_caps & NFE_40BIT_ADDR) { 1962 desc = (void **)&ring->desc64; 1963 descsize = sizeof(struct nfe_desc64); 1964 } else { 1965 desc = (void **)&ring->desc32; 1966 descsize = sizeof(struct nfe_desc32); 1967 } 1968 1969 ring->queued = 0; 1970 ring->cur = ring->next = 0; 1971 1972 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1973 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1974 NULL, NULL, 1975 sc->sc_tx_ring_count * descsize, 1, 1976 BUS_SPACE_MAXSIZE_32BIT, 1977 0, &ring->tag); 1978 if (error) { 1979 if_printf(&sc->arpcom.ac_if, 1980 "could not create TX desc DMA map\n"); 1981 return error; 1982 } 1983 1984 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1985 &ring->map); 1986 if (error) { 1987 if_printf(&sc->arpcom.ac_if, 1988 "could not allocate TX desc DMA memory\n"); 1989 bus_dma_tag_destroy(ring->tag); 1990 ring->tag = NULL; 1991 return error; 1992 } 1993 1994 error = bus_dmamap_load(ring->tag, ring->map, *desc, 1995 sc->sc_tx_ring_count * descsize, 1996 nfe_ring_dma_addr, &ring->physaddr, 1997 BUS_DMA_WAITOK); 1998 if (error) { 1999 if_printf(&sc->arpcom.ac_if, 2000 "could not load TX desc DMA map\n"); 2001 bus_dmamem_free(ring->tag, *desc, ring->map); 2002 bus_dma_tag_destroy(ring->tag); 2003 ring->tag = NULL; 2004 return error; 2005 } 2006 2007 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count, 2008 M_DEVBUF, M_WAITOK | M_ZERO); 2009 2010 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 2011 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 2012 NULL, NULL, 2013 NFE_JBYTES, NFE_MAX_SCATTER, 2014 BUS_SPACE_MAXSIZE_32BIT, 2015 BUS_DMA_ALLOCNOW, &ring->data_tag); 2016 if (error) { 2017 if_printf(&sc->arpcom.ac_if, 2018 "could not create TX buf DMA tag\n"); 2019 return error; 2020 } 2021 2022 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2023 error = bus_dmamap_create(ring->data_tag, 0, 2024 &ring->data[i].map); 2025 if (error) { 2026 if_printf(&sc->arpcom.ac_if, 2027 "could not create %dth TX buf DMA map\n", i); 2028 goto fail; 2029 } 2030 } 2031 2032 return 0; 2033 fail: 2034 for (j = 0; j < i; ++j) 2035 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 2036 bus_dma_tag_destroy(ring->data_tag); 2037 ring->data_tag = NULL; 2038 return error; 2039 } 2040 2041 static void 2042 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 2043 { 2044 int i; 2045 2046 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2047 struct nfe_tx_data *data = &ring->data[i]; 2048 2049 if (sc->sc_caps & NFE_40BIT_ADDR) 2050 ring->desc64[i].flags = 0; 2051 else 2052 ring->desc32[i].flags = 0; 2053 2054 if (data->m != NULL) { 2055 bus_dmamap_sync(ring->data_tag, data->map, 2056 BUS_DMASYNC_POSTWRITE); 2057 bus_dmamap_unload(ring->data_tag, data->map); 2058 m_freem(data->m); 2059 data->m = NULL; 2060 } 2061 } 2062 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 2063 2064 ring->queued = 0; 2065 ring->cur = ring->next = 0; 2066 } 2067 2068 static int 2069 nfe_init_tx_ring(struct nfe_softc *sc __unused, 2070 struct nfe_tx_ring *ring __unused) 2071 { 2072 return 0; 2073 } 2074 2075 static void 2076 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 2077 { 2078 if (ring->data_tag != NULL) { 2079 struct nfe_tx_data *data; 2080 int i; 2081 2082 for (i = 0; i < sc->sc_tx_ring_count; ++i) { 2083 data = &ring->data[i]; 2084 2085 if (data->m != NULL) { 2086 bus_dmamap_unload(ring->data_tag, data->map); 2087 m_freem(data->m); 2088 } 2089 bus_dmamap_destroy(ring->data_tag, data->map); 2090 } 2091 2092 bus_dma_tag_destroy(ring->data_tag); 2093 } 2094 2095 if (ring->data != NULL) 2096 kfree(ring->data, M_DEVBUF); 2097 2098 if (ring->tag != NULL) { 2099 void *desc; 2100 2101 if (sc->sc_caps & NFE_40BIT_ADDR) 2102 desc = ring->desc64; 2103 else 2104 desc = ring->desc32; 2105 2106 bus_dmamap_unload(ring->tag, ring->map); 2107 bus_dmamem_free(ring->tag, desc, ring->map); 2108 bus_dma_tag_destroy(ring->tag); 2109 } 2110 } 2111 2112 static int 2113 nfe_ifmedia_upd(struct ifnet *ifp) 2114 { 2115 struct nfe_softc *sc = ifp->if_softc; 2116 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2117 2118 ASSERT_SERIALIZED(ifp->if_serializer); 2119 2120 if (mii->mii_instance != 0) { 2121 struct mii_softc *miisc; 2122 2123 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2124 mii_phy_reset(miisc); 2125 } 2126 mii_mediachg(mii); 2127 2128 return 0; 2129 } 2130 2131 static void 2132 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2133 { 2134 struct nfe_softc *sc = ifp->if_softc; 2135 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2136 2137 ASSERT_SERIALIZED(ifp->if_serializer); 2138 2139 mii_pollstat(mii); 2140 ifmr->ifm_status = mii->mii_media_status; 2141 ifmr->ifm_active = mii->mii_media_active; 2142 } 2143 2144 static void 2145 nfe_setmulti(struct nfe_softc *sc) 2146 { 2147 struct ifnet *ifp = &sc->arpcom.ac_if; 2148 struct ifmultiaddr *ifma; 2149 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2150 uint32_t filter = NFE_RXFILTER_MAGIC; 2151 int i; 2152 2153 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2154 bzero(addr, ETHER_ADDR_LEN); 2155 bzero(mask, ETHER_ADDR_LEN); 2156 goto done; 2157 } 2158 2159 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2160 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2161 2162 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2163 caddr_t maddr; 2164 2165 if (ifma->ifma_addr->sa_family != AF_LINK) 2166 continue; 2167 2168 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 2169 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2170 addr[i] &= maddr[i]; 2171 mask[i] &= ~maddr[i]; 2172 } 2173 } 2174 2175 for (i = 0; i < ETHER_ADDR_LEN; i++) 2176 mask[i] |= addr[i]; 2177 2178 done: 2179 addr[0] |= 0x01; /* make sure multicast bit is set */ 2180 2181 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2182 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2183 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2184 addr[5] << 8 | addr[4]); 2185 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2186 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2187 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2188 mask[5] << 8 | mask[4]); 2189 2190 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 2191 NFE_WRITE(sc, NFE_RXFILTER, filter); 2192 } 2193 2194 static void 2195 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2196 { 2197 uint32_t lo, hi; 2198 2199 lo = NFE_READ(sc, NFE_MACADDR_LO); 2200 hi = NFE_READ(sc, NFE_MACADDR_HI); 2201 if (sc->sc_caps & NFE_FIX_EADDR) { 2202 addr[0] = (lo >> 8) & 0xff; 2203 addr[1] = (lo & 0xff); 2204 2205 addr[2] = (hi >> 24) & 0xff; 2206 addr[3] = (hi >> 16) & 0xff; 2207 addr[4] = (hi >> 8) & 0xff; 2208 addr[5] = (hi & 0xff); 2209 } else { 2210 addr[0] = (hi & 0xff); 2211 addr[1] = (hi >> 8) & 0xff; 2212 addr[2] = (hi >> 16) & 0xff; 2213 addr[3] = (hi >> 24) & 0xff; 2214 2215 addr[4] = (lo & 0xff); 2216 addr[5] = (lo >> 8) & 0xff; 2217 } 2218 } 2219 2220 static void 2221 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 2222 { 2223 NFE_WRITE(sc, NFE_MACADDR_LO, 2224 addr[5] << 8 | addr[4]); 2225 NFE_WRITE(sc, NFE_MACADDR_HI, 2226 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2227 } 2228 2229 static void 2230 nfe_tick(void *arg) 2231 { 2232 struct nfe_softc *sc = arg; 2233 struct ifnet *ifp = &sc->arpcom.ac_if; 2234 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2235 2236 lwkt_serialize_enter(ifp->if_serializer); 2237 2238 mii_tick(mii); 2239 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 2240 2241 lwkt_serialize_exit(ifp->if_serializer); 2242 } 2243 2244 static void 2245 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 2246 { 2247 if (error) 2248 return; 2249 2250 KASSERT(nseg == 1, ("too many segments, should be 1\n")); 2251 2252 *((uint32_t *)arg) = seg->ds_addr; 2253 } 2254 2255 static void 2256 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs, 2257 bus_size_t mapsz __unused, int error) 2258 { 2259 struct nfe_dma_ctx *ctx = arg; 2260 int i; 2261 2262 if (error) 2263 return; 2264 2265 if (nsegs > ctx->nsegs) { 2266 ctx->nsegs = 0; 2267 return; 2268 } 2269 2270 ctx->nsegs = nsegs; 2271 for (i = 0; i < nsegs; ++i) 2272 ctx->segs[i] = segs[i]; 2273 } 2274 2275 static int 2276 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2277 int wait) 2278 { 2279 struct nfe_rx_data *data = &ring->data[idx]; 2280 struct nfe_dma_ctx ctx; 2281 bus_dma_segment_t seg; 2282 bus_dmamap_t map; 2283 struct mbuf *m; 2284 int error; 2285 2286 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2287 if (m == NULL) 2288 return ENOBUFS; 2289 m->m_len = m->m_pkthdr.len = MCLBYTES; 2290 2291 ctx.nsegs = 1; 2292 ctx.segs = &seg; 2293 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap, 2294 m, nfe_buf_dma_addr, &ctx, 2295 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2296 if (error || ctx.nsegs == 0) { 2297 if (!error) { 2298 bus_dmamap_unload(ring->data_tag, ring->data_tmpmap); 2299 error = EFBIG; 2300 if_printf(&sc->arpcom.ac_if, "too many segments?!\n"); 2301 } 2302 m_freem(m); 2303 2304 if (wait) { 2305 if_printf(&sc->arpcom.ac_if, 2306 "could map RX mbuf %d\n", error); 2307 } 2308 return error; 2309 } 2310 2311 /* Unload originally mapped mbuf */ 2312 bus_dmamap_unload(ring->data_tag, data->map); 2313 2314 /* Swap this DMA map with tmp DMA map */ 2315 map = data->map; 2316 data->map = ring->data_tmpmap; 2317 ring->data_tmpmap = map; 2318 2319 /* Caller is assumed to have collected the old mbuf */ 2320 data->m = m; 2321 2322 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2323 2324 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD); 2325 return 0; 2326 } 2327 2328 static int 2329 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2330 int wait) 2331 { 2332 struct nfe_rx_data *data = &ring->data[idx]; 2333 struct nfe_jbuf *jbuf; 2334 struct mbuf *m; 2335 2336 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2337 if (m == NULL) 2338 return ENOBUFS; 2339 2340 jbuf = nfe_jalloc(sc); 2341 if (jbuf == NULL) { 2342 m_freem(m); 2343 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2344 "-- packet dropped!\n"); 2345 return ENOBUFS; 2346 } 2347 2348 m->m_ext.ext_arg = jbuf; 2349 m->m_ext.ext_buf = jbuf->buf; 2350 m->m_ext.ext_free = nfe_jfree; 2351 m->m_ext.ext_ref = nfe_jref; 2352 m->m_ext.ext_size = NFE_JBYTES; 2353 2354 m->m_data = m->m_ext.ext_buf; 2355 m->m_flags |= M_EXT; 2356 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2357 2358 /* Caller is assumed to have collected the old mbuf */ 2359 data->m = m; 2360 2361 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2362 2363 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD); 2364 return 0; 2365 } 2366 2367 static void 2368 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2369 bus_addr_t physaddr) 2370 { 2371 if (sc->sc_caps & NFE_40BIT_ADDR) { 2372 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2373 2374 #if defined(__LP64__) 2375 desc64->physaddr[0] = htole32(physaddr >> 32); 2376 #endif 2377 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 2378 } else { 2379 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2380 2381 desc32->physaddr = htole32(physaddr); 2382 } 2383 } 2384 2385 static void 2386 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2387 { 2388 if (sc->sc_caps & NFE_40BIT_ADDR) { 2389 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2390 2391 desc64->length = htole16(ring->bufsz); 2392 desc64->flags = htole16(NFE_RX_READY); 2393 } else { 2394 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2395 2396 desc32->length = htole16(ring->bufsz); 2397 desc32->flags = htole16(NFE_RX_READY); 2398 } 2399 } 2400 2401 static int 2402 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS) 2403 { 2404 struct nfe_softc *sc = arg1; 2405 struct ifnet *ifp = &sc->arpcom.ac_if; 2406 uint32_t flags; 2407 int error, v; 2408 2409 lwkt_serialize_enter(ifp->if_serializer); 2410 2411 flags = sc->sc_flags & ~NFE_F_DYN_IM; 2412 v = sc->sc_imtime; 2413 if (sc->sc_flags & NFE_F_DYN_IM) 2414 v = -v; 2415 2416 error = sysctl_handle_int(oidp, &v, 0, req); 2417 if (error || req->newptr == NULL) 2418 goto back; 2419 2420 if (v < 0) { 2421 flags |= NFE_F_DYN_IM; 2422 v = -v; 2423 } 2424 2425 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) { 2426 int old_imtime = sc->sc_imtime; 2427 uint32_t old_flags = sc->sc_flags; 2428 2429 sc->sc_imtime = v; 2430 sc->sc_flags = flags; 2431 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 2432 2433 if ((ifp->if_flags & (IFF_POLLING | IFF_RUNNING)) 2434 == IFF_RUNNING) { 2435 if (old_imtime * sc->sc_imtime == 0 || 2436 (old_flags ^ sc->sc_flags)) { 2437 ifp->if_init(sc); 2438 } else { 2439 NFE_WRITE(sc, NFE_IMTIMER, 2440 NFE_IMTIME(sc->sc_imtime)); 2441 } 2442 } 2443 } 2444 back: 2445 lwkt_serialize_exit(ifp->if_serializer); 2446 return error; 2447 } 2448 2449 static void 2450 nfe_powerup(device_t dev) 2451 { 2452 struct nfe_softc *sc = device_get_softc(dev); 2453 uint32_t pwr_state; 2454 uint16_t did; 2455 2456 /* 2457 * Bring MAC and PHY out of low power state 2458 */ 2459 2460 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK; 2461 2462 did = pci_get_device(dev); 2463 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 || 2464 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) && 2465 pci_get_revid(dev) >= 0xa3) 2466 pwr_state |= NFE_PWRUP_REV_A3; 2467 2468 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state); 2469 } 2470 2471 static void 2472 nfe_mac_reset(struct nfe_softc *sc) 2473 { 2474 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 2475 uint32_t macaddr_hi, macaddr_lo, tx_poll; 2476 2477 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 2478 2479 /* Save several registers for later restoration */ 2480 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI); 2481 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO); 2482 tx_poll = NFE_READ(sc, NFE_TX_POLL); 2483 2484 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT); 2485 DELAY(100); 2486 2487 NFE_WRITE(sc, NFE_MAC_RESET, 0); 2488 DELAY(100); 2489 2490 /* Restore saved registers */ 2491 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi); 2492 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo); 2493 NFE_WRITE(sc, NFE_TX_POLL, tx_poll); 2494 2495 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 2496 } 2497 2498 static void 2499 nfe_enable_intrs(struct nfe_softc *sc) 2500 { 2501 /* 2502 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER. 2503 * It is unclear how wide the timer is. Base programming does 2504 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so 2505 * we don't get any interrupt moderation. TX moderation is 2506 * possible by using the timer interrupt instead of TX_DONE. 2507 * 2508 * It is unclear whether there are other bits that can be 2509 * set to make the NFE device actually do interrupt moderation 2510 * on the RX side. 2511 * 2512 * For now set a 128uS interval as a placemark, but don't use 2513 * the timer. 2514 */ 2515 if (sc->sc_imtime == 0) 2516 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT); 2517 else 2518 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime)); 2519 2520 /* Enable interrupts */ 2521 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable); 2522 2523 if (sc->sc_irq_enable & NFE_IRQ_TIMER) 2524 sc->sc_flags |= NFE_F_IRQ_TIMER; 2525 else 2526 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2527 } 2528 2529 static void 2530 nfe_disable_intrs(struct nfe_softc *sc) 2531 { 2532 /* Disable interrupts */ 2533 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 2534 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2535 } 2536