1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.16 2007/08/14 13:30:35 sephe Exp $ */ 3 4 /* 5 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Sepherosa Ziehau <sepherosa@gmail.com> and 9 * Matthew Dillon <dillon@apollo.backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 42 * 43 * Permission to use, copy, modify, and distribute this software for any 44 * purpose with or without fee is hereby granted, provided that the above 45 * copyright notice and this permission notice appear in all copies. 46 * 47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 54 */ 55 56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 57 58 #include "opt_polling.h" 59 60 #include <sys/param.h> 61 #include <sys/endian.h> 62 #include <sys/kernel.h> 63 #include <sys/bus.h> 64 #include <sys/proc.h> 65 #include <sys/rman.h> 66 #include <sys/serialize.h> 67 #include <sys/socket.h> 68 #include <sys/sockio.h> 69 #include <sys/sysctl.h> 70 71 #include <net/ethernet.h> 72 #include <net/if.h> 73 #include <net/bpf.h> 74 #include <net/if_arp.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/ifq_var.h> 78 #include <net/if_types.h> 79 #include <net/if_var.h> 80 #include <net/vlan/if_vlan_var.h> 81 82 #include <bus/pci/pcireg.h> 83 #include <bus/pci/pcivar.h> 84 #include <bus/pci/pcidevs.h> 85 86 #include <dev/netif/mii_layer/mii.h> 87 #include <dev/netif/mii_layer/miivar.h> 88 89 #include "miibus_if.h" 90 91 #include <dev/netif/nfe/if_nfereg.h> 92 #include <dev/netif/nfe/if_nfevar.h> 93 94 #define NFE_CSUM 95 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 96 97 static int nfe_probe(device_t); 98 static int nfe_attach(device_t); 99 static int nfe_detach(device_t); 100 static void nfe_shutdown(device_t); 101 static int nfe_resume(device_t); 102 static int nfe_suspend(device_t); 103 104 static int nfe_miibus_readreg(device_t, int, int); 105 static void nfe_miibus_writereg(device_t, int, int, int); 106 static void nfe_miibus_statchg(device_t); 107 108 #ifdef DEVICE_POLLING 109 static void nfe_poll(struct ifnet *, enum poll_cmd, int); 110 #endif 111 static void nfe_intr(void *); 112 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 113 static void nfe_rxeof(struct nfe_softc *); 114 static void nfe_txeof(struct nfe_softc *); 115 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 116 struct mbuf *); 117 static void nfe_start(struct ifnet *); 118 static void nfe_watchdog(struct ifnet *); 119 static void nfe_init(void *); 120 static void nfe_stop(struct nfe_softc *); 121 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 122 static void nfe_jfree(void *); 123 static void nfe_jref(void *); 124 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 125 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 126 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 127 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 128 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 129 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 130 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 131 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 132 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 133 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 134 static int nfe_ifmedia_upd(struct ifnet *); 135 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 136 static void nfe_setmulti(struct nfe_softc *); 137 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 138 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 139 static void nfe_tick(void *); 140 static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int); 141 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t, 142 int); 143 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 144 int, bus_addr_t); 145 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 146 int); 147 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 148 int); 149 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 150 int); 151 152 #define NFE_DEBUG 153 #ifdef NFE_DEBUG 154 155 static int nfe_debug = 0; 156 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT; 157 158 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count); 159 160 SYSCTL_NODE(_hw, OID_AUTO, nfe, CTLFLAG_RD, 0, "nVidia GigE parameters"); 161 SYSCTL_INT(_hw_nfe, OID_AUTO, rx_ring_count, CTLFLAG_RD, &nfe_rx_ring_count, 162 NFE_RX_RING_DEF_COUNT, "rx ring count"); 163 SYSCTL_INT(_hw_nfe, OID_AUTO, debug, CTLFLAG_RW, &nfe_debug, 0, 164 "control debugging printfs"); 165 166 #define DPRINTF(sc, fmt, ...) do { \ 167 if (nfe_debug) { \ 168 if_printf(&(sc)->arpcom.ac_if, \ 169 fmt, __VA_ARGS__); \ 170 } \ 171 } while (0) 172 173 #define DPRINTFN(sc, lv, fmt, ...) do { \ 174 if (nfe_debug >= (lv)) { \ 175 if_printf(&(sc)->arpcom.ac_if, \ 176 fmt, __VA_ARGS__); \ 177 } \ 178 } while (0) 179 180 #else /* !NFE_DEBUG */ 181 182 #define DPRINTF(sc, fmt, ...) 183 #define DPRINTFN(sc, lv, fmt, ...) 184 185 #endif /* NFE_DEBUG */ 186 187 struct nfe_dma_ctx { 188 int nsegs; 189 bus_dma_segment_t *segs; 190 }; 191 192 static const struct nfe_dev { 193 uint16_t vid; 194 uint16_t did; 195 const char *desc; 196 } nfe_devices[] = { 197 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 198 "NVIDIA nForce Fast Ethernet" }, 199 200 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 201 "NVIDIA nForce2 Fast Ethernet" }, 202 203 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 204 "NVIDIA nForce3 Gigabit Ethernet" }, 205 206 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 207 chipset, and possibly also the 400R; it might be both nForce2- and 208 nForce3-based boards can use the same MCPs (= southbridges) */ 209 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 210 "NVIDIA nForce3 Gigabit Ethernet" }, 211 212 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 213 "NVIDIA nForce3 Gigabit Ethernet" }, 214 215 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 216 "NVIDIA nForce3 Gigabit Ethernet" }, 217 218 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 219 "NVIDIA nForce3 Gigabit Ethernet" }, 220 221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 222 "NVIDIA CK804 Gigabit Ethernet" }, 223 224 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 225 "NVIDIA CK804 Gigabit Ethernet" }, 226 227 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 228 "NVIDIA MCP04 Gigabit Ethernet" }, 229 230 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 231 "NVIDIA MCP04 Gigabit Ethernet" }, 232 233 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 234 "NVIDIA MCP51 Gigabit Ethernet" }, 235 236 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 237 "NVIDIA MCP51 Gigabit Ethernet" }, 238 239 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 240 "NVIDIA MCP55 Gigabit Ethernet" }, 241 242 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 243 "NVIDIA MCP55 Gigabit Ethernet" }, 244 245 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 246 "NVIDIA MCP61 Gigabit Ethernet" }, 247 248 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 249 "NVIDIA MCP61 Gigabit Ethernet" }, 250 251 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 252 "NVIDIA MCP61 Gigabit Ethernet" }, 253 254 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 255 "NVIDIA MCP61 Gigabit Ethernet" }, 256 257 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 258 "NVIDIA MCP65 Gigabit Ethernet" }, 259 260 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 261 "NVIDIA MCP65 Gigabit Ethernet" }, 262 263 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 264 "NVIDIA MCP65 Gigabit Ethernet" }, 265 266 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 267 "NVIDIA MCP65 Gigabit Ethernet" }, 268 269 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 270 "NVIDIA MCP67 Gigabit Ethernet" }, 271 272 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 273 "NVIDIA MCP67 Gigabit Ethernet" }, 274 275 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 276 "NVIDIA MCP67 Gigabit Ethernet" }, 277 278 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 279 "NVIDIA MCP67 Gigabit Ethernet" } 280 }; 281 282 static device_method_t nfe_methods[] = { 283 /* Device interface */ 284 DEVMETHOD(device_probe, nfe_probe), 285 DEVMETHOD(device_attach, nfe_attach), 286 DEVMETHOD(device_detach, nfe_detach), 287 DEVMETHOD(device_suspend, nfe_suspend), 288 DEVMETHOD(device_resume, nfe_resume), 289 DEVMETHOD(device_shutdown, nfe_shutdown), 290 291 /* Bus interface */ 292 DEVMETHOD(bus_print_child, bus_generic_print_child), 293 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 294 295 /* MII interface */ 296 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 297 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 298 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 299 300 { 0, 0 } 301 }; 302 303 static driver_t nfe_driver = { 304 "nfe", 305 nfe_methods, 306 sizeof(struct nfe_softc) 307 }; 308 309 static devclass_t nfe_devclass; 310 311 DECLARE_DUMMY_MODULE(if_nfe); 312 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 313 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0); 314 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 315 316 static int 317 nfe_probe(device_t dev) 318 { 319 const struct nfe_dev *n; 320 uint16_t vid, did; 321 322 vid = pci_get_vendor(dev); 323 did = pci_get_device(dev); 324 for (n = nfe_devices; n->desc != NULL; ++n) { 325 if (vid == n->vid && did == n->did) { 326 struct nfe_softc *sc = device_get_softc(dev); 327 328 switch (did) { 329 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 330 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 331 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 332 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 333 sc->sc_flags = NFE_JUMBO_SUP | 334 NFE_HW_CSUM; 335 break; 336 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 337 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 338 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 339 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 340 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 341 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 342 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 343 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 344 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 345 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 346 sc->sc_flags = NFE_40BIT_ADDR; 347 break; 348 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 349 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 350 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 351 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 352 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 353 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 354 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 355 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 356 sc->sc_flags = NFE_JUMBO_SUP | 357 NFE_40BIT_ADDR | 358 NFE_HW_CSUM; 359 break; 360 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 361 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 362 sc->sc_flags = NFE_JUMBO_SUP | 363 NFE_40BIT_ADDR | 364 NFE_HW_CSUM | 365 NFE_HW_VLAN; 366 break; 367 } 368 369 device_set_desc(dev, n->desc); 370 device_set_async_attach(dev, TRUE); 371 return 0; 372 } 373 } 374 return ENXIO; 375 } 376 377 static int 378 nfe_attach(device_t dev) 379 { 380 struct nfe_softc *sc = device_get_softc(dev); 381 struct ifnet *ifp = &sc->arpcom.ac_if; 382 uint8_t eaddr[ETHER_ADDR_LEN]; 383 int error; 384 385 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 386 lwkt_serialize_init(&sc->sc_jbuf_serializer); 387 388 sc->sc_mem_rid = PCIR_BAR(0); 389 390 #ifndef BURN_BRIDGES 391 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 392 uint32_t mem, irq; 393 394 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 395 irq = pci_read_config(dev, PCIR_INTLINE, 4); 396 397 device_printf(dev, "chip is in D%d power mode " 398 "-- setting to D0\n", pci_get_powerstate(dev)); 399 400 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 401 402 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 403 pci_write_config(dev, PCIR_INTLINE, irq, 4); 404 } 405 #endif /* !BURN_BRIDGE */ 406 407 /* Enable bus mastering */ 408 pci_enable_busmaster(dev); 409 410 /* Allocate IO memory */ 411 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 412 &sc->sc_mem_rid, RF_ACTIVE); 413 if (sc->sc_mem_res == NULL) { 414 device_printf(dev, "cound not allocate io memory\n"); 415 return ENXIO; 416 } 417 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 418 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 419 420 /* Allocate IRQ */ 421 sc->sc_irq_rid = 0; 422 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 423 &sc->sc_irq_rid, 424 RF_SHAREABLE | RF_ACTIVE); 425 if (sc->sc_irq_res == NULL) { 426 device_printf(dev, "could not allocate irq\n"); 427 error = ENXIO; 428 goto fail; 429 } 430 431 nfe_get_macaddr(sc, eaddr); 432 433 /* 434 * Allocate Tx and Rx rings. 435 */ 436 error = nfe_alloc_tx_ring(sc, &sc->txq); 437 if (error) { 438 device_printf(dev, "could not allocate Tx ring\n"); 439 goto fail; 440 } 441 442 error = nfe_alloc_rx_ring(sc, &sc->rxq); 443 if (error) { 444 device_printf(dev, "could not allocate Rx ring\n"); 445 goto fail; 446 } 447 448 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 449 nfe_ifmedia_sts); 450 if (error) { 451 device_printf(dev, "MII without any phy\n"); 452 goto fail; 453 } 454 455 ifp->if_softc = sc; 456 ifp->if_mtu = ETHERMTU; 457 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 458 ifp->if_ioctl = nfe_ioctl; 459 ifp->if_start = nfe_start; 460 #ifdef DEVICE_POLLING 461 ifp->if_poll = nfe_poll; 462 #endif 463 ifp->if_watchdog = nfe_watchdog; 464 ifp->if_init = nfe_init; 465 ifq_set_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN); 466 ifq_set_ready(&ifp->if_snd); 467 468 ifp->if_capabilities = IFCAP_VLAN_MTU; 469 470 if (sc->sc_flags & NFE_HW_VLAN) 471 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 472 473 #ifdef NFE_CSUM 474 if (sc->sc_flags & NFE_HW_CSUM) { 475 ifp->if_capabilities |= IFCAP_HWCSUM; 476 ifp->if_hwassist = NFE_CSUM_FEATURES; 477 } 478 #else 479 sc->sc_flags &= ~NFE_HW_CSUM; 480 #endif 481 ifp->if_capenable = ifp->if_capabilities; 482 483 callout_init(&sc->sc_tick_ch); 484 485 ether_ifattach(ifp, eaddr, NULL); 486 487 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 488 &sc->sc_ih, ifp->if_serializer); 489 if (error) { 490 device_printf(dev, "could not setup intr\n"); 491 ether_ifdetach(ifp); 492 goto fail; 493 } 494 495 return 0; 496 fail: 497 nfe_detach(dev); 498 return error; 499 } 500 501 static int 502 nfe_detach(device_t dev) 503 { 504 struct nfe_softc *sc = device_get_softc(dev); 505 506 if (device_is_attached(dev)) { 507 struct ifnet *ifp = &sc->arpcom.ac_if; 508 509 lwkt_serialize_enter(ifp->if_serializer); 510 nfe_stop(sc); 511 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 512 lwkt_serialize_exit(ifp->if_serializer); 513 514 ether_ifdetach(ifp); 515 } 516 517 if (sc->sc_miibus != NULL) 518 device_delete_child(dev, sc->sc_miibus); 519 bus_generic_detach(dev); 520 521 if (sc->sc_irq_res != NULL) { 522 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 523 sc->sc_irq_res); 524 } 525 526 if (sc->sc_mem_res != NULL) { 527 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 528 sc->sc_mem_res); 529 } 530 531 nfe_free_tx_ring(sc, &sc->txq); 532 nfe_free_rx_ring(sc, &sc->rxq); 533 534 return 0; 535 } 536 537 static void 538 nfe_shutdown(device_t dev) 539 { 540 struct nfe_softc *sc = device_get_softc(dev); 541 struct ifnet *ifp = &sc->arpcom.ac_if; 542 543 lwkt_serialize_enter(ifp->if_serializer); 544 nfe_stop(sc); 545 lwkt_serialize_exit(ifp->if_serializer); 546 } 547 548 static int 549 nfe_suspend(device_t dev) 550 { 551 struct nfe_softc *sc = device_get_softc(dev); 552 struct ifnet *ifp = &sc->arpcom.ac_if; 553 554 lwkt_serialize_enter(ifp->if_serializer); 555 nfe_stop(sc); 556 lwkt_serialize_exit(ifp->if_serializer); 557 558 return 0; 559 } 560 561 static int 562 nfe_resume(device_t dev) 563 { 564 struct nfe_softc *sc = device_get_softc(dev); 565 struct ifnet *ifp = &sc->arpcom.ac_if; 566 567 lwkt_serialize_enter(ifp->if_serializer); 568 if (ifp->if_flags & IFF_UP) 569 nfe_init(sc); 570 lwkt_serialize_exit(ifp->if_serializer); 571 572 return 0; 573 } 574 575 static void 576 nfe_miibus_statchg(device_t dev) 577 { 578 struct nfe_softc *sc = device_get_softc(dev); 579 struct mii_data *mii = device_get_softc(sc->sc_miibus); 580 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 581 582 phy = NFE_READ(sc, NFE_PHY_IFACE); 583 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 584 585 seed = NFE_READ(sc, NFE_RNDSEED); 586 seed &= ~NFE_SEED_MASK; 587 588 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 589 phy |= NFE_PHY_HDX; /* half-duplex */ 590 misc |= NFE_MISC1_HDX; 591 } 592 593 switch (IFM_SUBTYPE(mii->mii_media_active)) { 594 case IFM_1000_T: /* full-duplex only */ 595 link |= NFE_MEDIA_1000T; 596 seed |= NFE_SEED_1000T; 597 phy |= NFE_PHY_1000T; 598 break; 599 case IFM_100_TX: 600 link |= NFE_MEDIA_100TX; 601 seed |= NFE_SEED_100TX; 602 phy |= NFE_PHY_100TX; 603 break; 604 case IFM_10_T: 605 link |= NFE_MEDIA_10T; 606 seed |= NFE_SEED_10T; 607 break; 608 } 609 610 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 611 612 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 613 NFE_WRITE(sc, NFE_MISC1, misc); 614 NFE_WRITE(sc, NFE_LINKSPEED, link); 615 } 616 617 static int 618 nfe_miibus_readreg(device_t dev, int phy, int reg) 619 { 620 struct nfe_softc *sc = device_get_softc(dev); 621 uint32_t val; 622 int ntries; 623 624 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 625 626 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 627 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 628 DELAY(100); 629 } 630 631 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 632 633 for (ntries = 0; ntries < 1000; ntries++) { 634 DELAY(100); 635 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 636 break; 637 } 638 if (ntries == 1000) { 639 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 640 return 0; 641 } 642 643 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 644 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 645 return 0; 646 } 647 648 val = NFE_READ(sc, NFE_PHY_DATA); 649 if (val != 0xffffffff && val != 0) 650 sc->mii_phyaddr = phy; 651 652 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 653 654 return val; 655 } 656 657 static void 658 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 659 { 660 struct nfe_softc *sc = device_get_softc(dev); 661 uint32_t ctl; 662 int ntries; 663 664 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 665 666 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 667 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 668 DELAY(100); 669 } 670 671 NFE_WRITE(sc, NFE_PHY_DATA, val); 672 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 673 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 674 675 for (ntries = 0; ntries < 1000; ntries++) { 676 DELAY(100); 677 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 678 break; 679 } 680 681 #ifdef NFE_DEBUG 682 if (ntries == 1000) 683 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 684 #endif 685 } 686 687 #ifdef DEVICE_POLLING 688 689 static void 690 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 691 { 692 struct nfe_softc *sc = ifp->if_softc; 693 694 switch(cmd) { 695 case POLL_REGISTER: 696 /* Disable interrupts */ 697 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 698 break; 699 case POLL_DEREGISTER: 700 /* enable interrupts */ 701 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 702 break; 703 case POLL_AND_CHECK_STATUS: 704 /* fall through */ 705 case POLL_ONLY: 706 if (ifp->if_flags & IFF_RUNNING) { 707 nfe_rxeof(sc); 708 nfe_txeof(sc); 709 } 710 break; 711 } 712 } 713 714 #endif 715 716 static void 717 nfe_intr(void *arg) 718 { 719 struct nfe_softc *sc = arg; 720 struct ifnet *ifp = &sc->arpcom.ac_if; 721 uint32_t r; 722 723 r = NFE_READ(sc, NFE_IRQ_STATUS); 724 if (r == 0) 725 return; /* not for us */ 726 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 727 728 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 729 730 if (r & NFE_IRQ_LINK) { 731 NFE_READ(sc, NFE_PHY_STATUS); 732 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 733 DPRINTF(sc, "link state changed %s\n", ""); 734 } 735 736 if (ifp->if_flags & IFF_RUNNING) { 737 /* check Rx ring */ 738 nfe_rxeof(sc); 739 740 /* check Tx ring */ 741 nfe_txeof(sc); 742 } 743 } 744 745 static int 746 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 747 { 748 struct nfe_softc *sc = ifp->if_softc; 749 struct ifreq *ifr = (struct ifreq *)data; 750 struct mii_data *mii; 751 int error = 0, mask; 752 753 switch (cmd) { 754 case SIOCSIFMTU: 755 if (((sc->sc_flags & NFE_JUMBO_SUP) && 756 ifr->ifr_mtu > NFE_JUMBO_MTU) || 757 ((sc->sc_flags & NFE_JUMBO_SUP) == 0 && 758 ifr->ifr_mtu > ETHERMTU)) { 759 return EINVAL; 760 } else if (ifp->if_mtu != ifr->ifr_mtu) { 761 ifp->if_mtu = ifr->ifr_mtu; 762 nfe_init(sc); 763 } 764 break; 765 case SIOCSIFFLAGS: 766 if (ifp->if_flags & IFF_UP) { 767 /* 768 * If only the PROMISC or ALLMULTI flag changes, then 769 * don't do a full re-init of the chip, just update 770 * the Rx filter. 771 */ 772 if ((ifp->if_flags & IFF_RUNNING) && 773 ((ifp->if_flags ^ sc->sc_if_flags) & 774 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 775 nfe_setmulti(sc); 776 } else { 777 if (!(ifp->if_flags & IFF_RUNNING)) 778 nfe_init(sc); 779 } 780 } else { 781 if (ifp->if_flags & IFF_RUNNING) 782 nfe_stop(sc); 783 } 784 sc->sc_if_flags = ifp->if_flags; 785 break; 786 case SIOCADDMULTI: 787 case SIOCDELMULTI: 788 if (ifp->if_flags & IFF_RUNNING) 789 nfe_setmulti(sc); 790 break; 791 case SIOCSIFMEDIA: 792 case SIOCGIFMEDIA: 793 mii = device_get_softc(sc->sc_miibus); 794 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 795 break; 796 case SIOCSIFCAP: 797 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM; 798 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) { 799 ifp->if_capenable ^= mask; 800 if (IFCAP_TXCSUM & ifp->if_capenable) 801 ifp->if_hwassist = NFE_CSUM_FEATURES; 802 else 803 ifp->if_hwassist = 0; 804 805 if (ifp->if_flags & IFF_RUNNING) 806 nfe_init(sc); 807 } 808 break; 809 default: 810 error = ether_ioctl(ifp, cmd, data); 811 break; 812 } 813 return error; 814 } 815 816 static void 817 nfe_rxeof(struct nfe_softc *sc) 818 { 819 struct ifnet *ifp = &sc->arpcom.ac_if; 820 struct nfe_rx_ring *ring = &sc->rxq; 821 int reap; 822 823 reap = 0; 824 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD); 825 826 for (;;) { 827 struct nfe_rx_data *data = &ring->data[ring->cur]; 828 struct mbuf *m; 829 uint16_t flags; 830 int len, error; 831 832 if (sc->sc_flags & NFE_40BIT_ADDR) { 833 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 834 835 flags = le16toh(desc64->flags); 836 len = le16toh(desc64->length) & 0x3fff; 837 } else { 838 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 839 840 flags = le16toh(desc32->flags); 841 len = le16toh(desc32->length) & 0x3fff; 842 } 843 844 if (flags & NFE_RX_READY) 845 break; 846 847 reap = 1; 848 849 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 850 if (!(flags & NFE_RX_VALID_V1)) 851 goto skip; 852 853 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 854 flags &= ~NFE_RX_ERROR; 855 len--; /* fix buffer length */ 856 } 857 } else { 858 if (!(flags & NFE_RX_VALID_V2)) 859 goto skip; 860 861 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 862 flags &= ~NFE_RX_ERROR; 863 len--; /* fix buffer length */ 864 } 865 } 866 867 if (flags & NFE_RX_ERROR) { 868 ifp->if_ierrors++; 869 goto skip; 870 } 871 872 m = data->m; 873 874 if (sc->sc_flags & NFE_USE_JUMBO) 875 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 876 else 877 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 878 if (error) { 879 ifp->if_ierrors++; 880 goto skip; 881 } 882 883 /* finalize mbuf */ 884 m->m_pkthdr.len = m->m_len = len; 885 m->m_pkthdr.rcvif = ifp; 886 887 if ((ifp->if_capenable & IFCAP_RXCSUM) && 888 (flags & NFE_RX_CSUMOK)) { 889 if (flags & NFE_RX_IP_CSUMOK_V2) { 890 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 891 CSUM_IP_VALID; 892 } 893 894 if (flags & 895 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) { 896 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 897 CSUM_PSEUDO_HDR | 898 CSUM_FRAG_NOT_CHECKED; 899 m->m_pkthdr.csum_data = 0xffff; 900 } 901 } 902 903 ifp->if_ipackets++; 904 ifp->if_input(ifp, m); 905 skip: 906 nfe_set_ready_rxdesc(sc, ring, ring->cur); 907 sc->rxq.cur = (sc->rxq.cur + 1) % nfe_rx_ring_count; 908 } 909 910 if (reap) 911 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 912 } 913 914 static void 915 nfe_txeof(struct nfe_softc *sc) 916 { 917 struct ifnet *ifp = &sc->arpcom.ac_if; 918 struct nfe_tx_ring *ring = &sc->txq; 919 struct nfe_tx_data *data = NULL; 920 921 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD); 922 while (ring->next != ring->cur) { 923 uint16_t flags; 924 925 if (sc->sc_flags & NFE_40BIT_ADDR) 926 flags = le16toh(ring->desc64[ring->next].flags); 927 else 928 flags = le16toh(ring->desc32[ring->next].flags); 929 930 if (flags & NFE_TX_VALID) 931 break; 932 933 data = &ring->data[ring->next]; 934 935 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 936 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 937 goto skip; 938 939 if ((flags & NFE_TX_ERROR_V1) != 0) { 940 if_printf(ifp, "tx v1 error 0x%4b\n", flags, 941 NFE_V1_TXERR); 942 ifp->if_oerrors++; 943 } else { 944 ifp->if_opackets++; 945 } 946 } else { 947 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 948 goto skip; 949 950 if ((flags & NFE_TX_ERROR_V2) != 0) { 951 if_printf(ifp, "tx v2 error 0x%4b\n", flags, 952 NFE_V2_TXERR); 953 ifp->if_oerrors++; 954 } else { 955 ifp->if_opackets++; 956 } 957 } 958 959 if (data->m == NULL) { /* should not get there */ 960 if_printf(ifp, 961 "last fragment bit w/o associated mbuf!\n"); 962 goto skip; 963 } 964 965 /* last fragment of the mbuf chain transmitted */ 966 bus_dmamap_sync(ring->data_tag, data->map, 967 BUS_DMASYNC_POSTWRITE); 968 bus_dmamap_unload(ring->data_tag, data->map); 969 m_freem(data->m); 970 data->m = NULL; 971 972 ifp->if_timer = 0; 973 skip: 974 ring->queued--; 975 KKASSERT(ring->queued >= 0); 976 ring->next = (ring->next + 1) % NFE_TX_RING_COUNT; 977 } 978 979 if (data != NULL) { /* at least one slot freed */ 980 ifp->if_flags &= ~IFF_OACTIVE; 981 ifp->if_start(ifp); 982 } 983 } 984 985 static int 986 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 987 { 988 struct nfe_dma_ctx ctx; 989 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 990 struct nfe_tx_data *data, *data_map; 991 bus_dmamap_t map; 992 struct nfe_desc64 *desc64 = NULL; 993 struct nfe_desc32 *desc32 = NULL; 994 uint16_t flags = 0; 995 uint32_t vtag = 0; 996 int error, i, j; 997 998 data = &ring->data[ring->cur]; 999 map = data->map; 1000 data_map = data; /* Remember who owns the DMA map */ 1001 1002 ctx.nsegs = NFE_MAX_SCATTER; 1003 ctx.segs = segs; 1004 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0, 1005 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT); 1006 if (error && error != EFBIG) { 1007 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n"); 1008 goto back; 1009 } 1010 1011 if (error) { /* error == EFBIG */ 1012 struct mbuf *m_new; 1013 1014 m_new = m_defrag(m0, MB_DONTWAIT); 1015 if (m_new == NULL) { 1016 if_printf(&sc->arpcom.ac_if, 1017 "could not defrag TX mbuf\n"); 1018 error = ENOBUFS; 1019 goto back; 1020 } else { 1021 m0 = m_new; 1022 } 1023 1024 ctx.nsegs = NFE_MAX_SCATTER; 1025 ctx.segs = segs; 1026 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0, 1027 nfe_buf_dma_addr, &ctx, 1028 BUS_DMA_NOWAIT); 1029 if (error) { 1030 if_printf(&sc->arpcom.ac_if, 1031 "could not map defraged TX mbuf\n"); 1032 goto back; 1033 } 1034 } 1035 1036 error = 0; 1037 1038 if (ring->queued + ctx.nsegs >= NFE_TX_RING_COUNT - 1) { 1039 bus_dmamap_unload(ring->data_tag, map); 1040 error = ENOBUFS; 1041 goto back; 1042 } 1043 1044 /* setup h/w VLAN tagging */ 1045 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && 1046 m0->m_pkthdr.rcvif != NULL && 1047 m0->m_pkthdr.rcvif->if_type == IFT_L2VLAN) { 1048 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 1049 1050 if (ifv != NULL) 1051 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag); 1052 } 1053 1054 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) { 1055 if (m0->m_pkthdr.csum_flags & CSUM_IP) 1056 flags |= NFE_TX_IP_CSUM; 1057 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1058 flags |= NFE_TX_TCP_CSUM; 1059 } 1060 1061 /* 1062 * XXX urm. somebody is unaware of how hardware works. You 1063 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1064 * the ring until the entire chain is actually *VALID*. Otherwise 1065 * the hardware may encounter a partially initialized chain that 1066 * is marked as being ready to go when it in fact is not ready to 1067 * go. 1068 */ 1069 1070 for (i = 0; i < ctx.nsegs; i++) { 1071 j = (ring->cur + i) % NFE_TX_RING_COUNT; 1072 data = &ring->data[j]; 1073 1074 if (sc->sc_flags & NFE_40BIT_ADDR) { 1075 desc64 = &ring->desc64[j]; 1076 #if defined(__LP64__) 1077 desc64->physaddr[0] = 1078 htole32(segs[i].ds_addr >> 32); 1079 #endif 1080 desc64->physaddr[1] = 1081 htole32(segs[i].ds_addr & 0xffffffff); 1082 desc64->length = htole16(segs[i].ds_len - 1); 1083 desc64->vtag = htole32(vtag); 1084 desc64->flags = htole16(flags); 1085 } else { 1086 desc32 = &ring->desc32[j]; 1087 desc32->physaddr = htole32(segs[i].ds_addr); 1088 desc32->length = htole16(segs[i].ds_len - 1); 1089 desc32->flags = htole16(flags); 1090 } 1091 1092 /* csum flags and vtag belong to the first fragment only */ 1093 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1094 vtag = 0; 1095 1096 ring->queued++; 1097 KKASSERT(ring->queued <= NFE_TX_RING_COUNT); 1098 } 1099 1100 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1101 if (sc->sc_flags & NFE_40BIT_ADDR) { 1102 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1103 } else { 1104 if (sc->sc_flags & NFE_JUMBO_SUP) 1105 flags = NFE_TX_LASTFRAG_V2; 1106 else 1107 flags = NFE_TX_LASTFRAG_V1; 1108 desc32->flags |= htole16(flags); 1109 } 1110 1111 /* 1112 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1113 * whole mess until the first descriptor in the map is flagged. 1114 */ 1115 for (i = ctx.nsegs - 1; i >= 0; --i) { 1116 j = (ring->cur + i) % NFE_TX_RING_COUNT; 1117 if (sc->sc_flags & NFE_40BIT_ADDR) { 1118 desc64 = &ring->desc64[j]; 1119 desc64->flags |= htole16(NFE_TX_VALID); 1120 } else { 1121 desc32 = &ring->desc32[j]; 1122 desc32->flags |= htole16(NFE_TX_VALID); 1123 } 1124 } 1125 ring->cur = (ring->cur + ctx.nsegs) % NFE_TX_RING_COUNT; 1126 1127 /* Exchange DMA map */ 1128 data_map->map = data->map; 1129 data->map = map; 1130 data->m = m0; 1131 1132 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1133 back: 1134 if (error) 1135 m_freem(m0); 1136 return error; 1137 } 1138 1139 static void 1140 nfe_start(struct ifnet *ifp) 1141 { 1142 struct nfe_softc *sc = ifp->if_softc; 1143 struct nfe_tx_ring *ring = &sc->txq; 1144 int count = 0; 1145 struct mbuf *m0; 1146 1147 if (ifp->if_flags & IFF_OACTIVE) 1148 return; 1149 1150 if (ifq_is_empty(&ifp->if_snd)) 1151 return; 1152 1153 for (;;) { 1154 m0 = ifq_dequeue(&ifp->if_snd, NULL); 1155 if (m0 == NULL) 1156 break; 1157 1158 BPF_MTAP(ifp, m0); 1159 1160 if (nfe_encap(sc, ring, m0) != 0) { 1161 ifp->if_flags |= IFF_OACTIVE; 1162 break; 1163 } 1164 ++count; 1165 1166 /* 1167 * NOTE: 1168 * `m0' may be freed in nfe_encap(), so 1169 * it should not be touched any more. 1170 */ 1171 } 1172 if (count == 0) /* nothing sent */ 1173 return; 1174 1175 /* Sync TX descriptor ring */ 1176 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1177 1178 /* Kick Tx */ 1179 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1180 1181 /* 1182 * Set a timeout in case the chip goes out to lunch. 1183 */ 1184 ifp->if_timer = 5; 1185 } 1186 1187 static void 1188 nfe_watchdog(struct ifnet *ifp) 1189 { 1190 struct nfe_softc *sc = ifp->if_softc; 1191 1192 if (ifp->if_flags & IFF_RUNNING) { 1193 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1194 nfe_txeof(sc); 1195 return; 1196 } 1197 1198 if_printf(ifp, "watchdog timeout\n"); 1199 1200 nfe_init(ifp->if_softc); 1201 1202 ifp->if_oerrors++; 1203 } 1204 1205 static void 1206 nfe_init(void *xsc) 1207 { 1208 struct nfe_softc *sc = xsc; 1209 struct ifnet *ifp = &sc->arpcom.ac_if; 1210 uint32_t tmp; 1211 int error; 1212 1213 nfe_stop(sc); 1214 1215 /* 1216 * NOTE: 1217 * Switching between jumbo frames and normal frames should 1218 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring(). 1219 */ 1220 if (ifp->if_mtu > ETHERMTU) { 1221 sc->sc_flags |= NFE_USE_JUMBO; 1222 sc->rxq.bufsz = NFE_JBYTES; 1223 if (bootverbose) 1224 if_printf(ifp, "use jumbo frames\n"); 1225 } else { 1226 sc->sc_flags &= ~NFE_USE_JUMBO; 1227 sc->rxq.bufsz = MCLBYTES; 1228 if (bootverbose) 1229 if_printf(ifp, "use non-jumbo frames\n"); 1230 } 1231 1232 error = nfe_init_tx_ring(sc, &sc->txq); 1233 if (error) { 1234 nfe_stop(sc); 1235 return; 1236 } 1237 1238 error = nfe_init_rx_ring(sc, &sc->rxq); 1239 if (error) { 1240 nfe_stop(sc); 1241 return; 1242 } 1243 1244 NFE_WRITE(sc, NFE_TX_UNK, 0); 1245 NFE_WRITE(sc, NFE_STATUS, 0); 1246 1247 sc->rxtxctl = NFE_RXTX_BIT2; 1248 if (sc->sc_flags & NFE_40BIT_ADDR) 1249 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1250 else if (sc->sc_flags & NFE_JUMBO_SUP) 1251 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1252 1253 if (ifp->if_capenable & IFCAP_RXCSUM) 1254 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1255 1256 /* 1257 * Although the adapter is capable of stripping VLAN tags from received 1258 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1259 * purpose. This will be done in software by our network stack. 1260 */ 1261 if (sc->sc_flags & NFE_HW_VLAN) 1262 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1263 1264 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1265 DELAY(10); 1266 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1267 1268 if (sc->sc_flags & NFE_HW_VLAN) 1269 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1270 1271 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1272 1273 /* set MAC address */ 1274 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1275 1276 /* tell MAC where rings are in memory */ 1277 #ifdef __LP64__ 1278 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1279 #endif 1280 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1281 #ifdef __LP64__ 1282 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1283 #endif 1284 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1285 1286 NFE_WRITE(sc, NFE_RING_SIZE, 1287 (nfe_rx_ring_count - 1) << 16 | 1288 (NFE_TX_RING_COUNT - 1)); 1289 1290 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1291 1292 /* force MAC to wakeup */ 1293 tmp = NFE_READ(sc, NFE_PWR_STATE); 1294 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1295 DELAY(10); 1296 tmp = NFE_READ(sc, NFE_PWR_STATE); 1297 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1298 1299 /* 1300 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER. 1301 * It is unclear how wide the timer is. Base programming does 1302 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so 1303 * we don't get any interrupt moderation. TX moderation is 1304 * possible by using the timer interrupt instead of TX_DONE. 1305 * 1306 * It is unclear whether there are other bits that can be 1307 * set to make the NFE device actually do interrupt moderation 1308 * on the RX side. 1309 * 1310 * For now set a 128uS interval as a placemark, but don't use 1311 * the timer. 1312 */ 1313 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1314 1315 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1316 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1317 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1318 1319 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1320 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1321 1322 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1323 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1324 1325 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1326 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1327 DELAY(10); 1328 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1329 1330 /* set Rx filter */ 1331 nfe_setmulti(sc); 1332 1333 nfe_ifmedia_upd(ifp); 1334 1335 /* enable Rx */ 1336 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1337 1338 /* enable Tx */ 1339 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1340 1341 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1342 1343 #ifdef DEVICE_POLLING 1344 if ((ifp->if_flags & IFF_POLLING) == 0) 1345 #endif 1346 /* enable interrupts */ 1347 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1348 1349 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1350 1351 ifp->if_flags |= IFF_RUNNING; 1352 ifp->if_flags &= ~IFF_OACTIVE; 1353 1354 /* 1355 * If we had stuff in the tx ring before its all cleaned out now 1356 * so we are not going to get an interrupt, jump-start any pending 1357 * output. 1358 */ 1359 ifp->if_start(ifp); 1360 } 1361 1362 static void 1363 nfe_stop(struct nfe_softc *sc) 1364 { 1365 struct ifnet *ifp = &sc->arpcom.ac_if; 1366 1367 callout_stop(&sc->sc_tick_ch); 1368 1369 ifp->if_timer = 0; 1370 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1371 1372 /* 1373 * Are NFE_TX_CTL and NFE_RX_CTL polled by the chip microcontroller 1374 * or do they directly reset/terminate the DMA hardware? Nobody 1375 * knows. 1376 * 1377 * Add two delays: 1378 * 1379 * (1) Delay before zeroing out NFE_TX_CTL. This seems to help a 1380 * watchdog timeout that occurs after a stop/init sequence. I am 1381 * theorizing that a TX KICK occuring just prior to a reinit (e.g. 1382 * due to dhclient) is queueing an interrupt to the microcontroller 1383 * which gets delayed until after we clear the control registers 1384 * down below, resulting in mass confusion. TX KICK is clearly 1385 * hardware aided whereas the other bits in the control register 1386 * are more likely to be polled by the microcontroller. 1387 * 1388 * (2) Delay after zeroing out TX and RX CTL registers, under the 1389 * assumption that primary DMA is initiated and terminated by 1390 * the microcontroller and not hardware (and anyway, one can hardly 1391 * expect the DMA engine to just instantly stop!). We don't want 1392 * to rip the rings out from under it before it has had a chance to 1393 * actually stop! 1394 */ 1395 DELAY(1000); 1396 1397 /* Abort Tx */ 1398 NFE_WRITE(sc, NFE_TX_CTL, 0); 1399 1400 /* Disable Rx */ 1401 NFE_WRITE(sc, NFE_RX_CTL, 0); 1402 1403 /* Disable interrupts */ 1404 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1405 1406 DELAY(1000); 1407 1408 /* Reset Tx and Rx rings */ 1409 nfe_reset_tx_ring(sc, &sc->txq); 1410 nfe_reset_rx_ring(sc, &sc->rxq); 1411 } 1412 1413 static int 1414 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1415 { 1416 int i, j, error, descsize; 1417 void **desc; 1418 1419 if (sc->sc_flags & NFE_40BIT_ADDR) { 1420 desc = (void **)&ring->desc64; 1421 descsize = sizeof(struct nfe_desc64); 1422 } else { 1423 desc = (void **)&ring->desc32; 1424 descsize = sizeof(struct nfe_desc32); 1425 } 1426 1427 ring->jbuf = kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT, 1428 M_DEVBUF, M_WAITOK | M_ZERO); 1429 ring->data = kmalloc(sizeof(struct nfe_rx_data) * nfe_rx_ring_count, 1430 M_DEVBUF, M_WAITOK | M_ZERO); 1431 1432 ring->bufsz = MCLBYTES; 1433 ring->cur = ring->next = 0; 1434 1435 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1436 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1437 NULL, NULL, 1438 nfe_rx_ring_count * descsize, 1, 1439 nfe_rx_ring_count * descsize, 1440 0, &ring->tag); 1441 if (error) { 1442 if_printf(&sc->arpcom.ac_if, 1443 "could not create desc RX DMA tag\n"); 1444 return error; 1445 } 1446 1447 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1448 &ring->map); 1449 if (error) { 1450 if_printf(&sc->arpcom.ac_if, 1451 "could not allocate RX desc DMA memory\n"); 1452 bus_dma_tag_destroy(ring->tag); 1453 ring->tag = NULL; 1454 return error; 1455 } 1456 1457 error = bus_dmamap_load(ring->tag, ring->map, *desc, 1458 nfe_rx_ring_count * descsize, 1459 nfe_ring_dma_addr, &ring->physaddr, 1460 BUS_DMA_WAITOK); 1461 if (error) { 1462 if_printf(&sc->arpcom.ac_if, 1463 "could not load RX desc DMA map\n"); 1464 bus_dmamem_free(ring->tag, *desc, ring->map); 1465 bus_dma_tag_destroy(ring->tag); 1466 ring->tag = NULL; 1467 return error; 1468 } 1469 1470 if (sc->sc_flags & NFE_JUMBO_SUP) { 1471 error = nfe_jpool_alloc(sc, ring); 1472 if (error) { 1473 if_printf(&sc->arpcom.ac_if, 1474 "could not allocate jumbo frames\n"); 1475 return error; 1476 } 1477 } 1478 1479 error = bus_dma_tag_create(NULL, 1, 0, 1480 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1481 NULL, NULL, 1482 MCLBYTES, 1, MCLBYTES, 1483 0, &ring->data_tag); 1484 if (error) { 1485 if_printf(&sc->arpcom.ac_if, 1486 "could not create RX mbuf DMA tag\n"); 1487 return error; 1488 } 1489 1490 /* Create a spare RX mbuf DMA map */ 1491 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap); 1492 if (error) { 1493 if_printf(&sc->arpcom.ac_if, 1494 "could not create spare RX mbuf DMA map\n"); 1495 bus_dma_tag_destroy(ring->data_tag); 1496 ring->data_tag = NULL; 1497 return error; 1498 } 1499 1500 for (i = 0; i < nfe_rx_ring_count; i++) { 1501 error = bus_dmamap_create(ring->data_tag, 0, 1502 &ring->data[i].map); 1503 if (error) { 1504 if_printf(&sc->arpcom.ac_if, 1505 "could not create %dth RX mbuf DMA mapn", i); 1506 goto fail; 1507 } 1508 } 1509 return 0; 1510 fail: 1511 for (j = 0; j < i; ++j) 1512 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1513 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1514 bus_dma_tag_destroy(ring->data_tag); 1515 ring->data_tag = NULL; 1516 return error; 1517 } 1518 1519 static void 1520 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1521 { 1522 int i; 1523 1524 for (i = 0; i < nfe_rx_ring_count; i++) { 1525 struct nfe_rx_data *data = &ring->data[i]; 1526 1527 if (data->m != NULL) { 1528 if ((sc->sc_flags & NFE_USE_JUMBO) == 0) 1529 bus_dmamap_unload(ring->data_tag, data->map); 1530 m_freem(data->m); 1531 data->m = NULL; 1532 } 1533 } 1534 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1535 1536 ring->cur = ring->next = 0; 1537 } 1538 1539 static int 1540 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1541 { 1542 int i; 1543 1544 for (i = 0; i < nfe_rx_ring_count; ++i) { 1545 int error; 1546 1547 /* XXX should use a function pointer */ 1548 if (sc->sc_flags & NFE_USE_JUMBO) 1549 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1550 else 1551 error = nfe_newbuf_std(sc, ring, i, 1); 1552 if (error) { 1553 if_printf(&sc->arpcom.ac_if, 1554 "could not allocate RX buffer\n"); 1555 return error; 1556 } 1557 1558 nfe_set_ready_rxdesc(sc, ring, i); 1559 } 1560 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1561 1562 return 0; 1563 } 1564 1565 static void 1566 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1567 { 1568 if (ring->data_tag != NULL) { 1569 struct nfe_rx_data *data; 1570 int i; 1571 1572 for (i = 0; i < nfe_rx_ring_count; i++) { 1573 data = &ring->data[i]; 1574 1575 if (data->m != NULL) { 1576 bus_dmamap_unload(ring->data_tag, data->map); 1577 m_freem(data->m); 1578 } 1579 bus_dmamap_destroy(ring->data_tag, data->map); 1580 } 1581 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1582 bus_dma_tag_destroy(ring->data_tag); 1583 } 1584 1585 nfe_jpool_free(sc, ring); 1586 1587 if (ring->jbuf != NULL) 1588 kfree(ring->jbuf, M_DEVBUF); 1589 if (ring->data != NULL) 1590 kfree(ring->data, M_DEVBUF); 1591 1592 if (ring->tag != NULL) { 1593 void *desc; 1594 1595 if (sc->sc_flags & NFE_40BIT_ADDR) 1596 desc = ring->desc64; 1597 else 1598 desc = ring->desc32; 1599 1600 bus_dmamap_unload(ring->tag, ring->map); 1601 bus_dmamem_free(ring->tag, desc, ring->map); 1602 bus_dma_tag_destroy(ring->tag); 1603 } 1604 } 1605 1606 static struct nfe_jbuf * 1607 nfe_jalloc(struct nfe_softc *sc) 1608 { 1609 struct ifnet *ifp = &sc->arpcom.ac_if; 1610 struct nfe_jbuf *jbuf; 1611 1612 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1613 1614 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1615 if (jbuf != NULL) { 1616 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1617 jbuf->inuse = 1; 1618 } else { 1619 if_printf(ifp, "no free jumbo buffer\n"); 1620 } 1621 1622 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1623 1624 return jbuf; 1625 } 1626 1627 static void 1628 nfe_jfree(void *arg) 1629 { 1630 struct nfe_jbuf *jbuf = arg; 1631 struct nfe_softc *sc = jbuf->sc; 1632 struct nfe_rx_ring *ring = jbuf->ring; 1633 1634 if (&ring->jbuf[jbuf->slot] != jbuf) 1635 panic("%s: free wrong jumbo buffer\n", __func__); 1636 else if (jbuf->inuse == 0) 1637 panic("%s: jumbo buffer already freed\n", __func__); 1638 1639 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1640 atomic_subtract_int(&jbuf->inuse, 1); 1641 if (jbuf->inuse == 0) 1642 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1643 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1644 } 1645 1646 static void 1647 nfe_jref(void *arg) 1648 { 1649 struct nfe_jbuf *jbuf = arg; 1650 struct nfe_rx_ring *ring = jbuf->ring; 1651 1652 if (&ring->jbuf[jbuf->slot] != jbuf) 1653 panic("%s: ref wrong jumbo buffer\n", __func__); 1654 else if (jbuf->inuse == 0) 1655 panic("%s: jumbo buffer already freed\n", __func__); 1656 1657 atomic_add_int(&jbuf->inuse, 1); 1658 } 1659 1660 static int 1661 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1662 { 1663 struct nfe_jbuf *jbuf; 1664 bus_addr_t physaddr; 1665 caddr_t buf; 1666 int i, error; 1667 1668 /* 1669 * Allocate a big chunk of DMA'able memory. 1670 */ 1671 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1672 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1673 NULL, NULL, 1674 NFE_JPOOL_SIZE, 1, NFE_JPOOL_SIZE, 1675 0, &ring->jtag); 1676 if (error) { 1677 if_printf(&sc->arpcom.ac_if, 1678 "could not create jumbo DMA tag\n"); 1679 return error; 1680 } 1681 1682 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool, 1683 BUS_DMA_WAITOK, &ring->jmap); 1684 if (error) { 1685 if_printf(&sc->arpcom.ac_if, 1686 "could not allocate jumbo DMA memory\n"); 1687 bus_dma_tag_destroy(ring->jtag); 1688 ring->jtag = NULL; 1689 return error; 1690 } 1691 1692 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool, 1693 NFE_JPOOL_SIZE, nfe_ring_dma_addr, &physaddr, 1694 BUS_DMA_WAITOK); 1695 if (error) { 1696 if_printf(&sc->arpcom.ac_if, 1697 "could not load jumbo DMA map\n"); 1698 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1699 bus_dma_tag_destroy(ring->jtag); 1700 ring->jtag = NULL; 1701 return error; 1702 } 1703 1704 /* ..and split it into 9KB chunks */ 1705 SLIST_INIT(&ring->jfreelist); 1706 1707 buf = ring->jpool; 1708 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1709 jbuf = &ring->jbuf[i]; 1710 1711 jbuf->sc = sc; 1712 jbuf->ring = ring; 1713 jbuf->inuse = 0; 1714 jbuf->slot = i; 1715 jbuf->buf = buf; 1716 jbuf->physaddr = physaddr; 1717 1718 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1719 1720 buf += NFE_JBYTES; 1721 physaddr += NFE_JBYTES; 1722 } 1723 1724 return 0; 1725 } 1726 1727 static void 1728 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1729 { 1730 if (ring->jtag != NULL) { 1731 bus_dmamap_unload(ring->jtag, ring->jmap); 1732 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1733 bus_dma_tag_destroy(ring->jtag); 1734 } 1735 } 1736 1737 static int 1738 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1739 { 1740 int i, j, error, descsize; 1741 void **desc; 1742 1743 if (sc->sc_flags & NFE_40BIT_ADDR) { 1744 desc = (void **)&ring->desc64; 1745 descsize = sizeof(struct nfe_desc64); 1746 } else { 1747 desc = (void **)&ring->desc32; 1748 descsize = sizeof(struct nfe_desc32); 1749 } 1750 1751 ring->queued = 0; 1752 ring->cur = ring->next = 0; 1753 1754 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1755 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1756 NULL, NULL, 1757 NFE_TX_RING_COUNT * descsize, 1, 1758 NFE_TX_RING_COUNT * descsize, 1759 0, &ring->tag); 1760 if (error) { 1761 if_printf(&sc->arpcom.ac_if, 1762 "could not create TX desc DMA map\n"); 1763 return error; 1764 } 1765 1766 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1767 &ring->map); 1768 if (error) { 1769 if_printf(&sc->arpcom.ac_if, 1770 "could not allocate TX desc DMA memory\n"); 1771 bus_dma_tag_destroy(ring->tag); 1772 ring->tag = NULL; 1773 return error; 1774 } 1775 1776 error = bus_dmamap_load(ring->tag, ring->map, *desc, 1777 NFE_TX_RING_COUNT * descsize, 1778 nfe_ring_dma_addr, &ring->physaddr, 1779 BUS_DMA_WAITOK); 1780 if (error) { 1781 if_printf(&sc->arpcom.ac_if, 1782 "could not load TX desc DMA map\n"); 1783 bus_dmamem_free(ring->tag, *desc, ring->map); 1784 bus_dma_tag_destroy(ring->tag); 1785 ring->tag = NULL; 1786 return error; 1787 } 1788 1789 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1790 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1791 NULL, NULL, 1792 NFE_JBYTES * NFE_MAX_SCATTER, 1793 NFE_MAX_SCATTER, NFE_JBYTES, 1794 0, &ring->data_tag); 1795 if (error) { 1796 if_printf(&sc->arpcom.ac_if, 1797 "could not create TX buf DMA tag\n"); 1798 return error; 1799 } 1800 1801 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1802 error = bus_dmamap_create(ring->data_tag, 0, 1803 &ring->data[i].map); 1804 if (error) { 1805 if_printf(&sc->arpcom.ac_if, 1806 "could not create %dth TX buf DMA map\n", i); 1807 goto fail; 1808 } 1809 } 1810 1811 return 0; 1812 fail: 1813 for (j = 0; j < i; ++j) 1814 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1815 bus_dma_tag_destroy(ring->data_tag); 1816 ring->data_tag = NULL; 1817 return error; 1818 } 1819 1820 static void 1821 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1822 { 1823 int i; 1824 1825 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1826 struct nfe_tx_data *data = &ring->data[i]; 1827 1828 if (sc->sc_flags & NFE_40BIT_ADDR) 1829 ring->desc64[i].flags = 0; 1830 else 1831 ring->desc32[i].flags = 0; 1832 1833 if (data->m != NULL) { 1834 bus_dmamap_sync(ring->data_tag, data->map, 1835 BUS_DMASYNC_POSTWRITE); 1836 bus_dmamap_unload(ring->data_tag, data->map); 1837 m_freem(data->m); 1838 data->m = NULL; 1839 } 1840 } 1841 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1842 1843 ring->queued = 0; 1844 ring->cur = ring->next = 0; 1845 } 1846 1847 static int 1848 nfe_init_tx_ring(struct nfe_softc *sc __unused, 1849 struct nfe_tx_ring *ring __unused) 1850 { 1851 return 0; 1852 } 1853 1854 static void 1855 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1856 { 1857 if (ring->data_tag != NULL) { 1858 struct nfe_tx_data *data; 1859 int i; 1860 1861 for (i = 0; i < NFE_TX_RING_COUNT; ++i) { 1862 data = &ring->data[i]; 1863 1864 if (data->m != NULL) { 1865 bus_dmamap_unload(ring->data_tag, data->map); 1866 m_freem(data->m); 1867 } 1868 bus_dmamap_destroy(ring->data_tag, data->map); 1869 } 1870 1871 bus_dma_tag_destroy(ring->data_tag); 1872 } 1873 1874 if (ring->tag != NULL) { 1875 void *desc; 1876 1877 if (sc->sc_flags & NFE_40BIT_ADDR) 1878 desc = ring->desc64; 1879 else 1880 desc = ring->desc32; 1881 1882 bus_dmamap_unload(ring->tag, ring->map); 1883 bus_dmamem_free(ring->tag, desc, ring->map); 1884 bus_dma_tag_destroy(ring->tag); 1885 } 1886 } 1887 1888 static int 1889 nfe_ifmedia_upd(struct ifnet *ifp) 1890 { 1891 struct nfe_softc *sc = ifp->if_softc; 1892 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1893 1894 if (mii->mii_instance != 0) { 1895 struct mii_softc *miisc; 1896 1897 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1898 mii_phy_reset(miisc); 1899 } 1900 mii_mediachg(mii); 1901 1902 return 0; 1903 } 1904 1905 static void 1906 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1907 { 1908 struct nfe_softc *sc = ifp->if_softc; 1909 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1910 1911 mii_pollstat(mii); 1912 ifmr->ifm_status = mii->mii_media_status; 1913 ifmr->ifm_active = mii->mii_media_active; 1914 } 1915 1916 static void 1917 nfe_setmulti(struct nfe_softc *sc) 1918 { 1919 struct ifnet *ifp = &sc->arpcom.ac_if; 1920 struct ifmultiaddr *ifma; 1921 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1922 uint32_t filter = NFE_RXFILTER_MAGIC; 1923 int i; 1924 1925 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1926 bzero(addr, ETHER_ADDR_LEN); 1927 bzero(mask, ETHER_ADDR_LEN); 1928 goto done; 1929 } 1930 1931 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1932 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1933 1934 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1935 caddr_t maddr; 1936 1937 if (ifma->ifma_addr->sa_family != AF_LINK) 1938 continue; 1939 1940 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 1941 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1942 addr[i] &= maddr[i]; 1943 mask[i] &= ~maddr[i]; 1944 } 1945 } 1946 1947 for (i = 0; i < ETHER_ADDR_LEN; i++) 1948 mask[i] |= addr[i]; 1949 1950 done: 1951 addr[0] |= 0x01; /* make sure multicast bit is set */ 1952 1953 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1954 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1955 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1956 addr[5] << 8 | addr[4]); 1957 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1958 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1959 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1960 mask[5] << 8 | mask[4]); 1961 1962 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1963 NFE_WRITE(sc, NFE_RXFILTER, filter); 1964 } 1965 1966 static void 1967 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1968 { 1969 uint32_t tmp; 1970 1971 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1972 addr[0] = (tmp >> 8) & 0xff; 1973 addr[1] = (tmp & 0xff); 1974 1975 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1976 addr[2] = (tmp >> 24) & 0xff; 1977 addr[3] = (tmp >> 16) & 0xff; 1978 addr[4] = (tmp >> 8) & 0xff; 1979 addr[5] = (tmp & 0xff); 1980 } 1981 1982 static void 1983 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1984 { 1985 NFE_WRITE(sc, NFE_MACADDR_LO, 1986 addr[5] << 8 | addr[4]); 1987 NFE_WRITE(sc, NFE_MACADDR_HI, 1988 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1989 } 1990 1991 static void 1992 nfe_tick(void *arg) 1993 { 1994 struct nfe_softc *sc = arg; 1995 struct ifnet *ifp = &sc->arpcom.ac_if; 1996 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1997 1998 lwkt_serialize_enter(ifp->if_serializer); 1999 2000 mii_tick(mii); 2001 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 2002 2003 lwkt_serialize_exit(ifp->if_serializer); 2004 } 2005 2006 static void 2007 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 2008 { 2009 if (error) 2010 return; 2011 2012 KASSERT(nseg == 1, ("too many segments, should be 1\n")); 2013 2014 *((uint32_t *)arg) = seg->ds_addr; 2015 } 2016 2017 static void 2018 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs, 2019 bus_size_t mapsz __unused, int error) 2020 { 2021 struct nfe_dma_ctx *ctx = arg; 2022 int i; 2023 2024 if (error) 2025 return; 2026 2027 KASSERT(nsegs <= ctx->nsegs, 2028 ("too many segments(%d), should be <= %d\n", 2029 nsegs, ctx->nsegs)); 2030 2031 ctx->nsegs = nsegs; 2032 for (i = 0; i < nsegs; ++i) 2033 ctx->segs[i] = segs[i]; 2034 } 2035 2036 static int 2037 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2038 int wait) 2039 { 2040 struct nfe_rx_data *data = &ring->data[idx]; 2041 struct nfe_dma_ctx ctx; 2042 bus_dma_segment_t seg; 2043 bus_dmamap_t map; 2044 struct mbuf *m; 2045 int error; 2046 2047 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2048 if (m == NULL) 2049 return ENOBUFS; 2050 m->m_len = m->m_pkthdr.len = MCLBYTES; 2051 2052 ctx.nsegs = 1; 2053 ctx.segs = &seg; 2054 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap, 2055 m, nfe_buf_dma_addr, &ctx, 2056 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2057 if (error) { 2058 m_freem(m); 2059 if_printf(&sc->arpcom.ac_if, "could map RX mbuf %d\n", error); 2060 return error; 2061 } 2062 2063 /* Unload originally mapped mbuf */ 2064 bus_dmamap_unload(ring->data_tag, data->map); 2065 2066 /* Swap this DMA map with tmp DMA map */ 2067 map = data->map; 2068 data->map = ring->data_tmpmap; 2069 ring->data_tmpmap = map; 2070 2071 /* Caller is assumed to have collected the old mbuf */ 2072 data->m = m; 2073 2074 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2075 2076 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD); 2077 return 0; 2078 } 2079 2080 static int 2081 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2082 int wait) 2083 { 2084 struct nfe_rx_data *data = &ring->data[idx]; 2085 struct nfe_jbuf *jbuf; 2086 struct mbuf *m; 2087 2088 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2089 if (m == NULL) 2090 return ENOBUFS; 2091 2092 jbuf = nfe_jalloc(sc); 2093 if (jbuf == NULL) { 2094 m_freem(m); 2095 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2096 "-- packet dropped!\n"); 2097 return ENOBUFS; 2098 } 2099 2100 m->m_ext.ext_arg = jbuf; 2101 m->m_ext.ext_buf = jbuf->buf; 2102 m->m_ext.ext_free = nfe_jfree; 2103 m->m_ext.ext_ref = nfe_jref; 2104 m->m_ext.ext_size = NFE_JBYTES; 2105 2106 m->m_data = m->m_ext.ext_buf; 2107 m->m_flags |= M_EXT; 2108 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2109 2110 /* Caller is assumed to have collected the old mbuf */ 2111 data->m = m; 2112 2113 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2114 2115 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD); 2116 return 0; 2117 } 2118 2119 static void 2120 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2121 bus_addr_t physaddr) 2122 { 2123 if (sc->sc_flags & NFE_40BIT_ADDR) { 2124 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2125 2126 #if defined(__LP64__) 2127 desc64->physaddr[0] = htole32(physaddr >> 32); 2128 #endif 2129 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 2130 } else { 2131 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2132 2133 desc32->physaddr = htole32(physaddr); 2134 } 2135 } 2136 2137 static void 2138 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2139 { 2140 if (sc->sc_flags & NFE_40BIT_ADDR) { 2141 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2142 2143 desc64->length = htole16(ring->bufsz); 2144 desc64->flags = htole16(NFE_RX_READY); 2145 } else { 2146 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2147 2148 desc32->length = htole16(ring->bufsz); 2149 desc32->flags = htole16(NFE_RX_READY); 2150 } 2151 } 2152