1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.7 2006/12/24 04:58:27 sephe Exp $ */ 3 4 /* 5 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Sepherosa Ziehau <sepherosa@gmail.com> and 9 * Matthew Dillon <dillon@apollo.backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 42 * 43 * Permission to use, copy, modify, and distribute this software for any 44 * purpose with or without fee is hereby granted, provided that the above 45 * copyright notice and this permission notice appear in all copies. 46 * 47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 54 */ 55 56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 57 58 #include "opt_polling.h" 59 60 #include <sys/param.h> 61 #include <sys/endian.h> 62 #include <sys/kernel.h> 63 #include <sys/bus.h> 64 #include <sys/proc.h> 65 #include <sys/rman.h> 66 #include <sys/serialize.h> 67 #include <sys/socket.h> 68 #include <sys/sockio.h> 69 #include <sys/sysctl.h> 70 71 #include <net/ethernet.h> 72 #include <net/if.h> 73 #include <net/bpf.h> 74 #include <net/if_arp.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/ifq_var.h> 78 #include <net/if_types.h> 79 #include <net/if_var.h> 80 #include <net/vlan/if_vlan_var.h> 81 82 #include <bus/pci/pcireg.h> 83 #include <bus/pci/pcivar.h> 84 #include <bus/pci/pcidevs.h> 85 86 #include <dev/netif/mii_layer/mii.h> 87 #include <dev/netif/mii_layer/miivar.h> 88 89 #include "miibus_if.h" 90 91 #include "if_nfereg.h" 92 #include "if_nfevar.h" 93 94 static int nfe_probe(device_t); 95 static int nfe_attach(device_t); 96 static int nfe_detach(device_t); 97 static void nfe_shutdown(device_t); 98 static int nfe_resume(device_t); 99 static int nfe_suspend(device_t); 100 101 static int nfe_miibus_readreg(device_t, int, int); 102 static void nfe_miibus_writereg(device_t, int, int, int); 103 static void nfe_miibus_statchg(device_t); 104 105 #ifdef DEVICE_POLLING 106 static void nfe_poll(struct ifnet *, enum poll_cmd, int); 107 #endif 108 static void nfe_intr(void *); 109 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 110 static void nfe_rxeof(struct nfe_softc *); 111 static void nfe_txeof(struct nfe_softc *); 112 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 113 struct mbuf *); 114 static void nfe_start(struct ifnet *); 115 static void nfe_watchdog(struct ifnet *); 116 static void nfe_init(void *); 117 static void nfe_stop(struct nfe_softc *); 118 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 119 static void nfe_jfree(void *); 120 static void nfe_jref(void *); 121 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 122 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 123 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 124 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 125 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 126 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 127 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 128 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 129 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 130 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 131 static int nfe_ifmedia_upd(struct ifnet *); 132 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 133 static void nfe_setmulti(struct nfe_softc *); 134 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 135 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 136 static void nfe_tick(void *); 137 static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int); 138 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t, 139 int); 140 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 141 int, bus_addr_t); 142 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 143 int); 144 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 145 int); 146 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 147 int); 148 149 #define NFE_DEBUG 150 #ifdef NFE_DEBUG 151 152 static int nfe_debug = 0; 153 154 SYSCTL_NODE(_hw, OID_AUTO, nfe, CTLFLAG_RD, 0, "nVidia GigE parameters"); 155 SYSCTL_INT(_hw_nfe, OID_AUTO, debug, CTLFLAG_RW, &nfe_debug, 0, 156 "control debugging printfs"); 157 158 #define DPRINTF(sc, fmt, ...) do { \ 159 if (nfe_debug) { \ 160 if_printf(&(sc)->arpcom.ac_if, \ 161 fmt, __VA_ARGS__); \ 162 } \ 163 } while (0) 164 165 #define DPRINTFN(sc, lv, fmt, ...) do { \ 166 if (nfe_debug >= (lv)) { \ 167 if_printf(&(sc)->arpcom.ac_if, \ 168 fmt, __VA_ARGS__); \ 169 } \ 170 } while (0) 171 172 #else /* !NFE_DEBUG */ 173 174 #define DPRINTF(sc, fmt, ...) 175 #define DPRINTFN(sc, lv, fmt, ...) 176 177 #endif /* NFE_DEBUG */ 178 179 struct nfe_dma_ctx { 180 int nsegs; 181 bus_dma_segment_t *segs; 182 }; 183 184 static const struct nfe_dev { 185 uint16_t vid; 186 uint16_t did; 187 const char *desc; 188 } nfe_devices[] = { 189 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 190 "NVIDIA nForce Fast Ethernet" }, 191 192 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 193 "NVIDIA nForce2 Fast Ethernet" }, 194 195 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 196 "NVIDIA nForce3 Gigabit Ethernet" }, 197 198 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 199 chipset, and possibly also the 400R; it might be both nForce2- and 200 nForce3-based boards can use the same MCPs (= southbridges) */ 201 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 202 "NVIDIA nForce3 Gigabit Ethernet" }, 203 204 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 205 "NVIDIA nForce3 Gigabit Ethernet" }, 206 207 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 208 "NVIDIA nForce3 Gigabit Ethernet" }, 209 210 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 211 "NVIDIA nForce3 Gigabit Ethernet" }, 212 213 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 214 "NVIDIA CK804 Gigabit Ethernet" }, 215 216 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 217 "NVIDIA CK804 Gigabit Ethernet" }, 218 219 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 220 "NVIDIA MCP04 Gigabit Ethernet" }, 221 222 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 223 "NVIDIA MCP04 Gigabit Ethernet" }, 224 225 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 226 "NVIDIA MCP51 Gigabit Ethernet" }, 227 228 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 229 "NVIDIA MCP51 Gigabit Ethernet" }, 230 231 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 232 "NVIDIA MCP55 Gigabit Ethernet" }, 233 234 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 235 "NVIDIA MCP55 Gigabit Ethernet" }, 236 237 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 238 "NVIDIA MCP61 Gigabit Ethernet" }, 239 240 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 241 "NVIDIA MCP61 Gigabit Ethernet" }, 242 243 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 244 "NVIDIA MCP61 Gigabit Ethernet" }, 245 246 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 247 "NVIDIA MCP61 Gigabit Ethernet" }, 248 249 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 250 "NVIDIA MCP65 Gigabit Ethernet" }, 251 252 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 253 "NVIDIA MCP65 Gigabit Ethernet" }, 254 255 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 256 "NVIDIA MCP65 Gigabit Ethernet" }, 257 258 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 259 "NVIDIA MCP65 Gigabit Ethernet" }, 260 261 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 262 "NVIDIA MCP67 Gigabit Ethernet" }, 263 264 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 265 "NVIDIA MCP67 Gigabit Ethernet" }, 266 267 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 268 "NVIDIA MCP67 Gigabit Ethernet" }, 269 270 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 271 "NVIDIA MCP67 Gigabit Ethernet" } 272 }; 273 274 static device_method_t nfe_methods[] = { 275 /* Device interface */ 276 DEVMETHOD(device_probe, nfe_probe), 277 DEVMETHOD(device_attach, nfe_attach), 278 DEVMETHOD(device_detach, nfe_detach), 279 DEVMETHOD(device_suspend, nfe_suspend), 280 DEVMETHOD(device_resume, nfe_resume), 281 DEVMETHOD(device_shutdown, nfe_shutdown), 282 283 /* Bus interface */ 284 DEVMETHOD(bus_print_child, bus_generic_print_child), 285 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 286 287 /* MII interface */ 288 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 289 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 290 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 291 292 { 0, 0 } 293 }; 294 295 static driver_t nfe_driver = { 296 "nfe", 297 nfe_methods, 298 sizeof(struct nfe_softc) 299 }; 300 301 static devclass_t nfe_devclass; 302 303 DECLARE_DUMMY_MODULE(if_nfe); 304 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 305 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0); 306 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 307 308 static int 309 nfe_probe(device_t dev) 310 { 311 const struct nfe_dev *n; 312 uint16_t vid, did; 313 314 vid = pci_get_vendor(dev); 315 did = pci_get_device(dev); 316 for (n = nfe_devices; n->desc != NULL; ++n) { 317 if (vid == n->vid && did == n->did) { 318 struct nfe_softc *sc = device_get_softc(dev); 319 320 switch (did) { 321 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 322 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 323 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 324 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 325 sc->sc_flags = NFE_JUMBO_SUP | 326 NFE_HW_CSUM; 327 break; 328 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 329 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 330 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 331 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 332 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 333 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 334 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 335 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 336 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 337 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 338 sc->sc_flags = NFE_40BIT_ADDR; 339 break; 340 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 341 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 342 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 343 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 344 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 345 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 346 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 347 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 348 sc->sc_flags = NFE_JUMBO_SUP | 349 NFE_40BIT_ADDR | 350 NFE_HW_CSUM; 351 break; 352 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 353 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 354 sc->sc_flags = NFE_JUMBO_SUP | 355 NFE_40BIT_ADDR | 356 NFE_HW_CSUM | 357 NFE_HW_VLAN; 358 break; 359 } 360 361 /* Enable jumbo frames for adapters that support it */ 362 if (sc->sc_flags & NFE_JUMBO_SUP) 363 sc->sc_flags |= NFE_USE_JUMBO; 364 365 device_set_desc(dev, n->desc); 366 return 0; 367 } 368 } 369 return ENXIO; 370 } 371 372 static int 373 nfe_attach(device_t dev) 374 { 375 struct nfe_softc *sc = device_get_softc(dev); 376 struct ifnet *ifp = &sc->arpcom.ac_if; 377 uint8_t eaddr[ETHER_ADDR_LEN]; 378 int error; 379 380 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 381 lwkt_serialize_init(&sc->sc_jbuf_serializer); 382 383 sc->sc_mem_rid = PCIR_BAR(0); 384 385 #ifndef BURN_BRIDGES 386 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 387 uint32_t mem, irq; 388 389 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 390 irq = pci_read_config(dev, PCIR_INTLINE, 4); 391 392 device_printf(dev, "chip is in D%d power mode " 393 "-- setting to D0\n", pci_get_powerstate(dev)); 394 395 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 396 397 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 398 pci_write_config(dev, PCIR_INTLINE, irq, 4); 399 } 400 #endif /* !BURN_BRIDGE */ 401 402 /* Enable bus mastering */ 403 pci_enable_busmaster(dev); 404 405 /* Allocate IO memory */ 406 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 407 &sc->sc_mem_rid, RF_ACTIVE); 408 if (sc->sc_mem_res == NULL) { 409 device_printf(dev, "cound not allocate io memory\n"); 410 return ENXIO; 411 } 412 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 413 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 414 415 /* Allocate IRQ */ 416 sc->sc_irq_rid = 0; 417 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 418 &sc->sc_irq_rid, 419 RF_SHAREABLE | RF_ACTIVE); 420 if (sc->sc_irq_res == NULL) { 421 device_printf(dev, "could not allocate irq\n"); 422 error = ENXIO; 423 goto fail; 424 } 425 426 nfe_get_macaddr(sc, eaddr); 427 428 /* 429 * Allocate Tx and Rx rings. 430 */ 431 error = nfe_alloc_tx_ring(sc, &sc->txq); 432 if (error) { 433 device_printf(dev, "could not allocate Tx ring\n"); 434 goto fail; 435 } 436 437 error = nfe_alloc_rx_ring(sc, &sc->rxq); 438 if (error) { 439 device_printf(dev, "could not allocate Rx ring\n"); 440 goto fail; 441 } 442 443 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 444 nfe_ifmedia_sts); 445 if (error) { 446 device_printf(dev, "MII without any phy\n"); 447 goto fail; 448 } 449 450 ifp->if_softc = sc; 451 ifp->if_mtu = ETHERMTU; 452 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 453 ifp->if_ioctl = nfe_ioctl; 454 ifp->if_start = nfe_start; 455 #ifdef DEVICE_POLLING 456 ifp->if_poll = nfe_poll; 457 #endif 458 ifp->if_watchdog = nfe_watchdog; 459 ifp->if_init = nfe_init; 460 ifq_set_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN); 461 ifq_set_ready(&ifp->if_snd); 462 463 ifp->if_capabilities = IFCAP_VLAN_MTU; 464 465 #if 0 466 if (sc->sc_flags & NFE_USE_JUMBO) 467 ifp->if_hardmtu = NFE_JUMBO_MTU; 468 #endif 469 470 if (sc->sc_flags & NFE_HW_VLAN) 471 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 472 473 #ifdef NFE_CSUM 474 if (sc->sc_flags & NFE_HW_CSUM) { 475 #if 0 476 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 477 IFCAP_CSUM_UDPv4; 478 #else 479 ifp->if_capabilities = IFCAP_HWCSUM; 480 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 481 #endif 482 } 483 #endif 484 ifp->if_capenable = ifp->if_capabilities; 485 486 callout_init(&sc->sc_tick_ch); 487 488 ether_ifattach(ifp, eaddr, NULL); 489 490 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 491 &sc->sc_ih, ifp->if_serializer); 492 if (error) { 493 device_printf(dev, "could not setup intr\n"); 494 ether_ifdetach(ifp); 495 goto fail; 496 } 497 498 return 0; 499 fail: 500 nfe_detach(dev); 501 return error; 502 } 503 504 static int 505 nfe_detach(device_t dev) 506 { 507 struct nfe_softc *sc = device_get_softc(dev); 508 509 if (device_is_attached(dev)) { 510 struct ifnet *ifp = &sc->arpcom.ac_if; 511 512 lwkt_serialize_enter(ifp->if_serializer); 513 nfe_stop(sc); 514 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 515 lwkt_serialize_exit(ifp->if_serializer); 516 517 ether_ifdetach(ifp); 518 } 519 520 if (sc->sc_miibus != NULL) 521 device_delete_child(dev, sc->sc_miibus); 522 bus_generic_detach(dev); 523 524 if (sc->sc_irq_res != NULL) { 525 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 526 sc->sc_irq_res); 527 } 528 529 if (sc->sc_mem_res != NULL) { 530 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 531 sc->sc_mem_res); 532 } 533 534 nfe_free_tx_ring(sc, &sc->txq); 535 nfe_free_rx_ring(sc, &sc->rxq); 536 537 return 0; 538 } 539 540 static void 541 nfe_shutdown(device_t dev) 542 { 543 struct nfe_softc *sc = device_get_softc(dev); 544 struct ifnet *ifp = &sc->arpcom.ac_if; 545 546 lwkt_serialize_enter(ifp->if_serializer); 547 nfe_stop(sc); 548 lwkt_serialize_exit(ifp->if_serializer); 549 } 550 551 static int 552 nfe_suspend(device_t dev) 553 { 554 struct nfe_softc *sc = device_get_softc(dev); 555 struct ifnet *ifp = &sc->arpcom.ac_if; 556 557 lwkt_serialize_enter(ifp->if_serializer); 558 nfe_stop(sc); 559 lwkt_serialize_exit(ifp->if_serializer); 560 561 return 0; 562 } 563 564 static int 565 nfe_resume(device_t dev) 566 { 567 struct nfe_softc *sc = device_get_softc(dev); 568 struct ifnet *ifp = &sc->arpcom.ac_if; 569 570 lwkt_serialize_enter(ifp->if_serializer); 571 if (ifp->if_flags & IFF_UP) { 572 nfe_init(sc); 573 if (ifp->if_flags & IFF_RUNNING) 574 ifp->if_start(ifp); 575 } 576 lwkt_serialize_exit(ifp->if_serializer); 577 578 return 0; 579 } 580 581 static void 582 nfe_miibus_statchg(device_t dev) 583 { 584 struct nfe_softc *sc = device_get_softc(dev); 585 struct mii_data *mii = device_get_softc(sc->sc_miibus); 586 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 587 588 phy = NFE_READ(sc, NFE_PHY_IFACE); 589 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 590 591 seed = NFE_READ(sc, NFE_RNDSEED); 592 seed &= ~NFE_SEED_MASK; 593 594 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 595 phy |= NFE_PHY_HDX; /* half-duplex */ 596 misc |= NFE_MISC1_HDX; 597 } 598 599 switch (IFM_SUBTYPE(mii->mii_media_active)) { 600 case IFM_1000_T: /* full-duplex only */ 601 link |= NFE_MEDIA_1000T; 602 seed |= NFE_SEED_1000T; 603 phy |= NFE_PHY_1000T; 604 break; 605 case IFM_100_TX: 606 link |= NFE_MEDIA_100TX; 607 seed |= NFE_SEED_100TX; 608 phy |= NFE_PHY_100TX; 609 break; 610 case IFM_10_T: 611 link |= NFE_MEDIA_10T; 612 seed |= NFE_SEED_10T; 613 break; 614 } 615 616 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 617 618 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 619 NFE_WRITE(sc, NFE_MISC1, misc); 620 NFE_WRITE(sc, NFE_LINKSPEED, link); 621 } 622 623 static int 624 nfe_miibus_readreg(device_t dev, int phy, int reg) 625 { 626 struct nfe_softc *sc = device_get_softc(dev); 627 uint32_t val; 628 int ntries; 629 630 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 631 632 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 633 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 634 DELAY(100); 635 } 636 637 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 638 639 for (ntries = 0; ntries < 1000; ntries++) { 640 DELAY(100); 641 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 642 break; 643 } 644 if (ntries == 1000) { 645 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 646 return 0; 647 } 648 649 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 650 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 651 return 0; 652 } 653 654 val = NFE_READ(sc, NFE_PHY_DATA); 655 if (val != 0xffffffff && val != 0) 656 sc->mii_phyaddr = phy; 657 658 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 659 660 return val; 661 } 662 663 static void 664 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 665 { 666 struct nfe_softc *sc = device_get_softc(dev); 667 uint32_t ctl; 668 int ntries; 669 670 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 671 672 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 673 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 674 DELAY(100); 675 } 676 677 NFE_WRITE(sc, NFE_PHY_DATA, val); 678 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 679 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 680 681 for (ntries = 0; ntries < 1000; ntries++) { 682 DELAY(100); 683 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 684 break; 685 } 686 687 #ifdef NFE_DEBUG 688 if (ntries == 1000) 689 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 690 #endif 691 } 692 693 #ifdef DEVICE_POLLING 694 695 static void 696 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 697 { 698 struct nfe_softc *sc = ifp->if_softc; 699 700 switch(cmd) { 701 case POLL_REGISTER: 702 /* Disable interrupts */ 703 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 704 break; 705 case POLL_DEREGISTER: 706 /* enable interrupts */ 707 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 708 break; 709 case POLL_AND_CHECK_STATUS: 710 /* fall through */ 711 case POLL_ONLY: 712 if (ifp->if_flags & IFF_RUNNING) { 713 nfe_rxeof(sc); 714 nfe_txeof(sc); 715 } 716 break; 717 } 718 } 719 720 #endif 721 722 static void 723 nfe_intr(void *arg) 724 { 725 struct nfe_softc *sc = arg; 726 struct ifnet *ifp = &sc->arpcom.ac_if; 727 uint32_t r; 728 729 r = NFE_READ(sc, NFE_IRQ_STATUS); 730 if (r == 0) 731 return; /* not for us */ 732 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 733 734 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 735 736 if (r & NFE_IRQ_LINK) { 737 NFE_READ(sc, NFE_PHY_STATUS); 738 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 739 DPRINTF(sc, "link state changed %s\n", ""); 740 } 741 742 if (ifp->if_flags & IFF_RUNNING) { 743 /* check Rx ring */ 744 nfe_rxeof(sc); 745 746 /* check Tx ring */ 747 nfe_txeof(sc); 748 } 749 } 750 751 static int 752 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 753 { 754 struct nfe_softc *sc = ifp->if_softc; 755 struct ifreq *ifr = (struct ifreq *)data; 756 struct mii_data *mii; 757 int error = 0, mask; 758 759 switch (cmd) { 760 case SIOCSIFMTU: 761 /* XXX NFE_USE_JUMBO should be set here */ 762 break; 763 case SIOCSIFFLAGS: 764 if (ifp->if_flags & IFF_UP) { 765 /* 766 * If only the PROMISC or ALLMULTI flag changes, then 767 * don't do a full re-init of the chip, just update 768 * the Rx filter. 769 */ 770 if ((ifp->if_flags & IFF_RUNNING) && 771 ((ifp->if_flags ^ sc->sc_if_flags) & 772 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 773 nfe_setmulti(sc); 774 } else { 775 if (!(ifp->if_flags & IFF_RUNNING)) 776 nfe_init(sc); 777 } 778 } else { 779 if (ifp->if_flags & IFF_RUNNING) 780 nfe_stop(sc); 781 } 782 sc->sc_if_flags = ifp->if_flags; 783 break; 784 case SIOCADDMULTI: 785 case SIOCDELMULTI: 786 if (ifp->if_flags & IFF_RUNNING) 787 nfe_setmulti(sc); 788 break; 789 case SIOCSIFMEDIA: 790 case SIOCGIFMEDIA: 791 mii = device_get_softc(sc->sc_miibus); 792 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 793 break; 794 case SIOCSIFCAP: 795 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 796 if (mask & IFCAP_HWCSUM) { 797 if (IFCAP_HWCSUM & ifp->if_capenable) 798 ifp->if_capenable &= ~IFCAP_HWCSUM; 799 else 800 ifp->if_capenable |= IFCAP_HWCSUM; 801 } 802 break; 803 default: 804 error = ether_ioctl(ifp, cmd, data); 805 break; 806 } 807 return error; 808 } 809 810 static void 811 nfe_rxeof(struct nfe_softc *sc) 812 { 813 struct ifnet *ifp = &sc->arpcom.ac_if; 814 struct nfe_rx_ring *ring = &sc->rxq; 815 int reap; 816 817 reap = 0; 818 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD); 819 820 for (;;) { 821 struct nfe_rx_data *data = &ring->data[ring->cur]; 822 struct mbuf *m; 823 uint16_t flags; 824 int len, error; 825 826 if (sc->sc_flags & NFE_40BIT_ADDR) { 827 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 828 829 flags = le16toh(desc64->flags); 830 len = le16toh(desc64->length) & 0x3fff; 831 } else { 832 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 833 834 flags = le16toh(desc32->flags); 835 len = le16toh(desc32->length) & 0x3fff; 836 } 837 838 if (flags & NFE_RX_READY) 839 break; 840 841 reap = 1; 842 843 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 844 if (!(flags & NFE_RX_VALID_V1)) 845 goto skip; 846 847 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 848 flags &= ~NFE_RX_ERROR; 849 len--; /* fix buffer length */ 850 } 851 } else { 852 if (!(flags & NFE_RX_VALID_V2)) 853 goto skip; 854 855 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 856 flags &= ~NFE_RX_ERROR; 857 len--; /* fix buffer length */ 858 } 859 } 860 861 if (flags & NFE_RX_ERROR) { 862 ifp->if_ierrors++; 863 goto skip; 864 } 865 866 m = data->m; 867 868 if (sc->sc_flags & NFE_USE_JUMBO) 869 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 870 else 871 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 872 if (error) { 873 ifp->if_ierrors++; 874 goto skip; 875 } 876 877 /* finalize mbuf */ 878 m->m_pkthdr.len = m->m_len = len; 879 m->m_pkthdr.rcvif = ifp; 880 881 #ifdef notyet 882 if (sc->sc_flags & NFE_HW_CSUM) { 883 if (flags & NFE_RX_IP_CSUMOK) 884 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 885 if (flags & NFE_RX_UDP_CSUMOK) 886 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 887 if (flags & NFE_RX_TCP_CSUMOK) 888 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 889 } 890 #elif defined(NFE_CSUM) 891 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 892 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 893 #endif 894 895 ifp->if_ipackets++; 896 ifp->if_input(ifp, m); 897 skip: 898 nfe_set_ready_rxdesc(sc, ring, ring->cur); 899 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 900 } 901 902 if (reap) 903 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 904 } 905 906 static void 907 nfe_txeof(struct nfe_softc *sc) 908 { 909 struct ifnet *ifp = &sc->arpcom.ac_if; 910 struct nfe_tx_ring *ring = &sc->txq; 911 struct nfe_tx_data *data = NULL; 912 913 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD); 914 while (ring->next != ring->cur) { 915 uint16_t flags; 916 917 if (sc->sc_flags & NFE_40BIT_ADDR) 918 flags = le16toh(ring->desc64[ring->next].flags); 919 else 920 flags = le16toh(ring->desc32[ring->next].flags); 921 922 if (flags & NFE_TX_VALID) 923 break; 924 925 data = &ring->data[ring->next]; 926 927 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 928 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 929 goto skip; 930 931 if ((flags & NFE_TX_ERROR_V1) != 0) { 932 if_printf(ifp, "tx v1 error 0x%4b\n", flags, 933 NFE_V1_TXERR); 934 ifp->if_oerrors++; 935 } else { 936 ifp->if_opackets++; 937 } 938 } else { 939 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 940 goto skip; 941 942 if ((flags & NFE_TX_ERROR_V2) != 0) { 943 if_printf(ifp, "tx v2 error 0x%4b\n", flags, 944 NFE_V2_TXERR); 945 ifp->if_oerrors++; 946 } else { 947 ifp->if_opackets++; 948 } 949 } 950 951 if (data->m == NULL) { /* should not get there */ 952 if_printf(ifp, 953 "last fragment bit w/o associated mbuf!\n"); 954 goto skip; 955 } 956 957 /* last fragment of the mbuf chain transmitted */ 958 bus_dmamap_sync(ring->data_tag, data->map, 959 BUS_DMASYNC_POSTWRITE); 960 bus_dmamap_unload(ring->data_tag, data->map); 961 m_freem(data->m); 962 data->m = NULL; 963 964 ifp->if_timer = 0; 965 skip: 966 ring->queued--; 967 KKASSERT(ring->queued >= 0); 968 ring->next = (ring->next + 1) % NFE_TX_RING_COUNT; 969 } 970 971 if (data != NULL) { /* at least one slot freed */ 972 ifp->if_flags &= ~IFF_OACTIVE; 973 ifp->if_start(ifp); 974 } 975 } 976 977 static int 978 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 979 { 980 struct nfe_dma_ctx ctx; 981 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 982 struct nfe_tx_data *data, *data_map; 983 bus_dmamap_t map; 984 struct nfe_desc64 *desc64 = NULL; 985 struct nfe_desc32 *desc32 = NULL; 986 uint16_t flags = 0; 987 uint32_t vtag = 0; 988 int error, i, j; 989 990 data = &ring->data[ring->cur]; 991 map = data->map; 992 data_map = data; /* Remember who owns the DMA map */ 993 994 ctx.nsegs = NFE_MAX_SCATTER; 995 ctx.segs = segs; 996 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0, 997 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT); 998 if (error && error != EFBIG) { 999 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n"); 1000 goto back; 1001 } 1002 1003 if (error) { /* error == EFBIG */ 1004 struct mbuf *m_new; 1005 1006 m_new = m_defrag(m0, MB_DONTWAIT); 1007 if (m_new == NULL) { 1008 if_printf(&sc->arpcom.ac_if, 1009 "could not defrag TX mbuf\n"); 1010 error = ENOBUFS; 1011 goto back; 1012 } else { 1013 m0 = m_new; 1014 } 1015 1016 ctx.nsegs = NFE_MAX_SCATTER; 1017 ctx.segs = segs; 1018 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0, 1019 nfe_buf_dma_addr, &ctx, 1020 BUS_DMA_NOWAIT); 1021 if (error) { 1022 if_printf(&sc->arpcom.ac_if, 1023 "could not map defraged TX mbuf\n"); 1024 goto back; 1025 } 1026 } 1027 1028 error = 0; 1029 1030 if (ring->queued + ctx.nsegs >= NFE_TX_RING_COUNT - 1) { 1031 bus_dmamap_unload(ring->data_tag, map); 1032 error = ENOBUFS; 1033 goto back; 1034 } 1035 1036 /* setup h/w VLAN tagging */ 1037 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && 1038 m0->m_pkthdr.rcvif != NULL && 1039 m0->m_pkthdr.rcvif->if_type == IFT_L2VLAN) { 1040 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 1041 1042 if (ifv != NULL) 1043 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag); 1044 } 1045 1046 #ifdef NFE_CSUM 1047 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1048 flags |= NFE_TX_IP_CSUM; 1049 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 1050 flags |= NFE_TX_TCP_CSUM; 1051 #endif 1052 1053 /* 1054 * XXX urm. somebody is unaware of how hardware works. You 1055 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1056 * the ring until the entire chain is actually *VALID*. Otherwise 1057 * the hardware may encounter a partially initialized chain that 1058 * is marked as being ready to go when it in fact is not ready to 1059 * go. 1060 */ 1061 1062 for (i = 0; i < ctx.nsegs; i++) { 1063 j = (ring->cur + i) % NFE_TX_RING_COUNT; 1064 data = &ring->data[j]; 1065 1066 if (sc->sc_flags & NFE_40BIT_ADDR) { 1067 desc64 = &ring->desc64[j]; 1068 #if defined(__LP64__) 1069 desc64->physaddr[0] = 1070 htole32(segs[i].ds_addr >> 32); 1071 #endif 1072 desc64->physaddr[1] = 1073 htole32(segs[i].ds_addr & 0xffffffff); 1074 desc64->length = htole16(segs[i].ds_len - 1); 1075 desc64->vtag = htole32(vtag); 1076 desc64->flags = htole16(flags); 1077 } else { 1078 desc32 = &ring->desc32[j]; 1079 desc32->physaddr = htole32(segs[i].ds_addr); 1080 desc32->length = htole16(segs[i].ds_len - 1); 1081 desc32->flags = htole16(flags); 1082 } 1083 1084 /* csum flags and vtag belong to the first fragment only */ 1085 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1086 vtag = 0; 1087 1088 ring->queued++; 1089 KKASSERT(ring->queued <= NFE_TX_RING_COUNT); 1090 } 1091 1092 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1093 if (sc->sc_flags & NFE_40BIT_ADDR) { 1094 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1095 } else { 1096 if (sc->sc_flags & NFE_JUMBO_SUP) 1097 flags = NFE_TX_LASTFRAG_V2; 1098 else 1099 flags = NFE_TX_LASTFRAG_V1; 1100 desc32->flags |= htole16(flags); 1101 } 1102 1103 /* 1104 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1105 * whole mess until the first descriptor in the map is flagged. 1106 */ 1107 for (i = ctx.nsegs - 1; i >= 0; --i) { 1108 j = (ring->cur + i) % NFE_TX_RING_COUNT; 1109 if (sc->sc_flags & NFE_40BIT_ADDR) { 1110 desc64 = &ring->desc64[j]; 1111 desc64->flags |= htole16(NFE_TX_VALID); 1112 } else { 1113 desc32 = &ring->desc32[j]; 1114 desc32->flags |= htole16(NFE_TX_VALID); 1115 } 1116 } 1117 ring->cur = (ring->cur + ctx.nsegs) % NFE_TX_RING_COUNT; 1118 1119 /* Exchange DMA map */ 1120 data_map->map = data->map; 1121 data->map = map; 1122 data->m = m0; 1123 1124 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1125 back: 1126 if (error) 1127 m_freem(m0); 1128 return error; 1129 } 1130 1131 static void 1132 nfe_start(struct ifnet *ifp) 1133 { 1134 struct nfe_softc *sc = ifp->if_softc; 1135 struct nfe_tx_ring *ring = &sc->txq; 1136 int count = 0; 1137 struct mbuf *m0; 1138 1139 if (ifp->if_flags & IFF_OACTIVE) 1140 return; 1141 1142 if (ifq_is_empty(&ifp->if_snd)) 1143 return; 1144 1145 for (;;) { 1146 m0 = ifq_dequeue(&ifp->if_snd, NULL); 1147 if (m0 == NULL) 1148 break; 1149 1150 BPF_MTAP(ifp, m0); 1151 1152 if (nfe_encap(sc, ring, m0) != 0) { 1153 ifp->if_flags |= IFF_OACTIVE; 1154 break; 1155 } 1156 ++count; 1157 1158 /* 1159 * NOTE: 1160 * `m0' may be freed in nfe_encap(), so 1161 * it should not be touched any more. 1162 */ 1163 } 1164 if (count == 0) /* nothing sent */ 1165 return; 1166 1167 /* Sync TX descriptor ring */ 1168 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1169 1170 /* Kick Tx */ 1171 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1172 1173 /* 1174 * Set a timeout in case the chip goes out to lunch. 1175 */ 1176 ifp->if_timer = 5; 1177 } 1178 1179 static void 1180 nfe_watchdog(struct ifnet *ifp) 1181 { 1182 struct nfe_softc *sc = ifp->if_softc; 1183 1184 if (ifp->if_flags & IFF_RUNNING) { 1185 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1186 nfe_txeof(sc); 1187 return; 1188 } 1189 1190 if_printf(ifp, "watchdog timeout\n"); 1191 1192 nfe_init(ifp->if_softc); 1193 1194 ifp->if_oerrors++; 1195 1196 if (!ifq_is_empty(&ifp->if_snd)) 1197 ifp->if_start(ifp); 1198 } 1199 1200 static void 1201 nfe_init(void *xsc) 1202 { 1203 struct nfe_softc *sc = xsc; 1204 struct ifnet *ifp = &sc->arpcom.ac_if; 1205 uint32_t tmp; 1206 int error; 1207 1208 nfe_stop(sc); 1209 1210 error = nfe_init_tx_ring(sc, &sc->txq); 1211 if (error) { 1212 nfe_stop(sc); 1213 return; 1214 } 1215 1216 error = nfe_init_rx_ring(sc, &sc->rxq); 1217 if (error) { 1218 nfe_stop(sc); 1219 return; 1220 } 1221 1222 NFE_WRITE(sc, NFE_TX_UNK, 0); 1223 NFE_WRITE(sc, NFE_STATUS, 0); 1224 1225 sc->rxtxctl = NFE_RXTX_BIT2; 1226 if (sc->sc_flags & NFE_40BIT_ADDR) 1227 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1228 else if (sc->sc_flags & NFE_JUMBO_SUP) 1229 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1230 #ifdef NFE_CSUM 1231 if (sc->sc_flags & NFE_HW_CSUM) 1232 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1233 #endif 1234 1235 /* 1236 * Although the adapter is capable of stripping VLAN tags from received 1237 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1238 * purpose. This will be done in software by our network stack. 1239 */ 1240 if (sc->sc_flags & NFE_HW_VLAN) 1241 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1242 1243 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1244 DELAY(10); 1245 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1246 1247 if (sc->sc_flags & NFE_HW_VLAN) 1248 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1249 1250 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1251 1252 /* set MAC address */ 1253 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1254 1255 /* tell MAC where rings are in memory */ 1256 #ifdef __LP64__ 1257 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1258 #endif 1259 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1260 #ifdef __LP64__ 1261 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1262 #endif 1263 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1264 1265 NFE_WRITE(sc, NFE_RING_SIZE, 1266 (NFE_RX_RING_COUNT - 1) << 16 | 1267 (NFE_TX_RING_COUNT - 1)); 1268 1269 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1270 1271 /* force MAC to wakeup */ 1272 tmp = NFE_READ(sc, NFE_PWR_STATE); 1273 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1274 DELAY(10); 1275 tmp = NFE_READ(sc, NFE_PWR_STATE); 1276 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1277 1278 #if 1 1279 /* configure interrupts coalescing/mitigation */ 1280 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1281 #else 1282 /* no interrupt mitigation: one interrupt per packet */ 1283 NFE_WRITE(sc, NFE_IMTIMER, 970); 1284 #endif 1285 1286 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1287 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1288 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1289 1290 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1291 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1292 1293 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1294 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1295 1296 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1297 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1298 DELAY(10); 1299 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1300 1301 /* set Rx filter */ 1302 nfe_setmulti(sc); 1303 1304 nfe_ifmedia_upd(ifp); 1305 1306 /* enable Rx */ 1307 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1308 1309 /* enable Tx */ 1310 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1311 1312 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1313 1314 #ifdef DEVICE_POLLING 1315 if ((ifp->if_flags & IFF_POLLING) == 0) 1316 #endif 1317 /* enable interrupts */ 1318 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1319 1320 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1321 1322 ifp->if_flags |= IFF_RUNNING; 1323 ifp->if_flags &= ~IFF_OACTIVE; 1324 } 1325 1326 static void 1327 nfe_stop(struct nfe_softc *sc) 1328 { 1329 struct ifnet *ifp = &sc->arpcom.ac_if; 1330 1331 callout_stop(&sc->sc_tick_ch); 1332 1333 ifp->if_timer = 0; 1334 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1335 1336 /* Abort Tx */ 1337 NFE_WRITE(sc, NFE_TX_CTL, 0); 1338 1339 /* Disable Rx */ 1340 NFE_WRITE(sc, NFE_RX_CTL, 0); 1341 1342 /* Disable interrupts */ 1343 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1344 1345 /* Reset Tx and Rx rings */ 1346 nfe_reset_tx_ring(sc, &sc->txq); 1347 nfe_reset_rx_ring(sc, &sc->rxq); 1348 } 1349 1350 static int 1351 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1352 { 1353 int i, j, error, descsize; 1354 void **desc; 1355 1356 if (sc->sc_flags & NFE_40BIT_ADDR) { 1357 desc = (void **)&ring->desc64; 1358 descsize = sizeof(struct nfe_desc64); 1359 } else { 1360 desc = (void **)&ring->desc32; 1361 descsize = sizeof(struct nfe_desc32); 1362 } 1363 1364 ring->bufsz = MCLBYTES; 1365 ring->cur = ring->next = 0; 1366 1367 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1368 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1369 NULL, NULL, 1370 NFE_RX_RING_COUNT * descsize, 1, 1371 NFE_RX_RING_COUNT * descsize, 1372 0, &ring->tag); 1373 if (error) { 1374 if_printf(&sc->arpcom.ac_if, 1375 "could not create desc RX DMA tag\n"); 1376 return error; 1377 } 1378 1379 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1380 &ring->map); 1381 if (error) { 1382 if_printf(&sc->arpcom.ac_if, 1383 "could not allocate RX desc DMA memory\n"); 1384 bus_dma_tag_destroy(ring->tag); 1385 ring->tag = NULL; 1386 return error; 1387 } 1388 1389 error = bus_dmamap_load(ring->tag, ring->map, *desc, 1390 NFE_RX_RING_COUNT * descsize, 1391 nfe_ring_dma_addr, &ring->physaddr, 1392 BUS_DMA_WAITOK); 1393 if (error) { 1394 if_printf(&sc->arpcom.ac_if, 1395 "could not load RX desc DMA map\n"); 1396 bus_dmamem_free(ring->tag, *desc, ring->map); 1397 bus_dma_tag_destroy(ring->tag); 1398 ring->tag = NULL; 1399 return error; 1400 } 1401 1402 if (sc->sc_flags & NFE_USE_JUMBO) { 1403 ring->bufsz = NFE_JBYTES; 1404 1405 error = nfe_jpool_alloc(sc, ring); 1406 if (error) { 1407 if_printf(&sc->arpcom.ac_if, 1408 "could not allocate jumbo frames\n"); 1409 return error; 1410 } 1411 } 1412 1413 error = bus_dma_tag_create(NULL, 1, 0, 1414 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1415 NULL, NULL, 1416 MCLBYTES, 1, MCLBYTES, 1417 0, &ring->data_tag); 1418 if (error) { 1419 if_printf(&sc->arpcom.ac_if, 1420 "could not create RX mbuf DMA tag\n"); 1421 return error; 1422 } 1423 1424 /* Create a spare RX mbuf DMA map */ 1425 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap); 1426 if (error) { 1427 if_printf(&sc->arpcom.ac_if, 1428 "could not create spare RX mbuf DMA map\n"); 1429 bus_dma_tag_destroy(ring->data_tag); 1430 ring->data_tag = NULL; 1431 return error; 1432 } 1433 1434 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1435 error = bus_dmamap_create(ring->data_tag, 0, 1436 &ring->data[i].map); 1437 if (error) { 1438 if_printf(&sc->arpcom.ac_if, 1439 "could not create %dth RX mbuf DMA mapn", i); 1440 goto fail; 1441 } 1442 } 1443 return 0; 1444 fail: 1445 for (j = 0; j < i; ++j) 1446 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1447 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1448 bus_dma_tag_destroy(ring->data_tag); 1449 ring->data_tag = NULL; 1450 return error; 1451 } 1452 1453 static void 1454 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1455 { 1456 int i; 1457 1458 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1459 struct nfe_rx_data *data = &ring->data[i]; 1460 1461 if (data->m != NULL) { 1462 bus_dmamap_unload(ring->data_tag, data->map); 1463 m_freem(data->m); 1464 data->m = NULL; 1465 } 1466 } 1467 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1468 1469 ring->cur = ring->next = 0; 1470 } 1471 1472 static int 1473 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1474 { 1475 int i; 1476 1477 for (i = 0; i < NFE_RX_RING_COUNT; ++i) { 1478 int error; 1479 1480 /* XXX should use a function pointer */ 1481 if (sc->sc_flags & NFE_USE_JUMBO) 1482 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1483 else 1484 error = nfe_newbuf_std(sc, ring, i, 1); 1485 if (error) { 1486 if_printf(&sc->arpcom.ac_if, 1487 "could not allocate RX buffer\n"); 1488 return error; 1489 } 1490 1491 nfe_set_ready_rxdesc(sc, ring, i); 1492 } 1493 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1494 1495 return 0; 1496 } 1497 1498 static void 1499 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1500 { 1501 if (ring->data_tag != NULL) { 1502 struct nfe_rx_data *data; 1503 int i; 1504 1505 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1506 data = &ring->data[i]; 1507 1508 if (data->m != NULL) { 1509 bus_dmamap_unload(ring->data_tag, data->map); 1510 m_freem(data->m); 1511 } 1512 bus_dmamap_destroy(ring->data_tag, data->map); 1513 } 1514 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1515 bus_dma_tag_destroy(ring->data_tag); 1516 } 1517 1518 nfe_jpool_free(sc, ring); 1519 1520 if (ring->tag != NULL) { 1521 void *desc; 1522 1523 if (sc->sc_flags & NFE_40BIT_ADDR) 1524 desc = ring->desc64; 1525 else 1526 desc = ring->desc32; 1527 1528 bus_dmamap_unload(ring->tag, ring->map); 1529 bus_dmamem_free(ring->tag, desc, ring->map); 1530 bus_dma_tag_destroy(ring->tag); 1531 } 1532 } 1533 1534 static struct nfe_jbuf * 1535 nfe_jalloc(struct nfe_softc *sc) 1536 { 1537 struct ifnet *ifp = &sc->arpcom.ac_if; 1538 struct nfe_jbuf *jbuf; 1539 1540 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1541 1542 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1543 if (jbuf != NULL) { 1544 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1545 jbuf->inuse = 1; 1546 } else { 1547 if_printf(ifp, "no free jumbo buffer\n"); 1548 } 1549 1550 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1551 1552 return jbuf; 1553 } 1554 1555 static void 1556 nfe_jfree(void *arg) 1557 { 1558 struct nfe_jbuf *jbuf = arg; 1559 struct nfe_softc *sc = jbuf->sc; 1560 struct nfe_rx_ring *ring = jbuf->ring; 1561 1562 if (&ring->jbuf[jbuf->slot] != jbuf) 1563 panic("%s: free wrong jumbo buffer\n", __func__); 1564 else if (jbuf->inuse == 0) 1565 panic("%s: jumbo buffer already freed\n", __func__); 1566 1567 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1568 atomic_subtract_int(&jbuf->inuse, 1); 1569 if (jbuf->inuse == 0) 1570 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1571 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1572 } 1573 1574 static void 1575 nfe_jref(void *arg) 1576 { 1577 struct nfe_jbuf *jbuf = arg; 1578 struct nfe_rx_ring *ring = jbuf->ring; 1579 1580 if (&ring->jbuf[jbuf->slot] != jbuf) 1581 panic("%s: ref wrong jumbo buffer\n", __func__); 1582 else if (jbuf->inuse == 0) 1583 panic("%s: jumbo buffer already freed\n", __func__); 1584 1585 atomic_add_int(&jbuf->inuse, 1); 1586 } 1587 1588 static int 1589 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1590 { 1591 struct nfe_jbuf *jbuf; 1592 bus_addr_t physaddr; 1593 caddr_t buf; 1594 int i, error; 1595 1596 /* 1597 * Allocate a big chunk of DMA'able memory. 1598 */ 1599 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1600 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1601 NULL, NULL, 1602 NFE_JPOOL_SIZE, 1, NFE_JPOOL_SIZE, 1603 0, &ring->jtag); 1604 if (error) { 1605 if_printf(&sc->arpcom.ac_if, 1606 "could not create jumbo DMA tag\n"); 1607 return error; 1608 } 1609 1610 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool, 1611 BUS_DMA_WAITOK, &ring->jmap); 1612 if (error) { 1613 if_printf(&sc->arpcom.ac_if, 1614 "could not allocate jumbo DMA memory\n"); 1615 bus_dma_tag_destroy(ring->jtag); 1616 ring->jtag = NULL; 1617 return error; 1618 } 1619 1620 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool, 1621 NFE_JPOOL_SIZE, nfe_ring_dma_addr, &physaddr, 1622 BUS_DMA_WAITOK); 1623 if (error) { 1624 if_printf(&sc->arpcom.ac_if, 1625 "could not load jumbo DMA map\n"); 1626 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1627 bus_dma_tag_destroy(ring->jtag); 1628 ring->jtag = NULL; 1629 return error; 1630 } 1631 1632 /* ..and split it into 9KB chunks */ 1633 SLIST_INIT(&ring->jfreelist); 1634 1635 buf = ring->jpool; 1636 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1637 jbuf = &ring->jbuf[i]; 1638 1639 jbuf->sc = sc; 1640 jbuf->ring = ring; 1641 jbuf->inuse = 0; 1642 jbuf->slot = i; 1643 jbuf->buf = buf; 1644 jbuf->physaddr = physaddr; 1645 1646 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1647 1648 buf += NFE_JBYTES; 1649 physaddr += NFE_JBYTES; 1650 } 1651 1652 return 0; 1653 } 1654 1655 static void 1656 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1657 { 1658 if (ring->jtag != NULL) { 1659 bus_dmamap_unload(ring->jtag, ring->jmap); 1660 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1661 bus_dma_tag_destroy(ring->jtag); 1662 } 1663 } 1664 1665 static int 1666 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1667 { 1668 int i, j, error, descsize; 1669 void **desc; 1670 1671 if (sc->sc_flags & NFE_40BIT_ADDR) { 1672 desc = (void **)&ring->desc64; 1673 descsize = sizeof(struct nfe_desc64); 1674 } else { 1675 desc = (void **)&ring->desc32; 1676 descsize = sizeof(struct nfe_desc32); 1677 } 1678 1679 ring->queued = 0; 1680 ring->cur = ring->next = 0; 1681 1682 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1683 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1684 NULL, NULL, 1685 NFE_TX_RING_COUNT * descsize, 1, 1686 NFE_TX_RING_COUNT * descsize, 1687 0, &ring->tag); 1688 if (error) { 1689 if_printf(&sc->arpcom.ac_if, 1690 "could not create TX desc DMA map\n"); 1691 return error; 1692 } 1693 1694 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1695 &ring->map); 1696 if (error) { 1697 if_printf(&sc->arpcom.ac_if, 1698 "could not allocate TX desc DMA memory\n"); 1699 bus_dma_tag_destroy(ring->tag); 1700 ring->tag = NULL; 1701 return error; 1702 } 1703 1704 error = bus_dmamap_load(ring->tag, ring->map, *desc, 1705 NFE_TX_RING_COUNT * descsize, 1706 nfe_ring_dma_addr, &ring->physaddr, 1707 BUS_DMA_WAITOK); 1708 if (error) { 1709 if_printf(&sc->arpcom.ac_if, 1710 "could not load TX desc DMA map\n"); 1711 bus_dmamem_free(ring->tag, *desc, ring->map); 1712 bus_dma_tag_destroy(ring->tag); 1713 ring->tag = NULL; 1714 return error; 1715 } 1716 1717 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1718 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1719 NULL, NULL, 1720 NFE_JBYTES * NFE_MAX_SCATTER, 1721 NFE_MAX_SCATTER, NFE_JBYTES, 1722 0, &ring->data_tag); 1723 if (error) { 1724 if_printf(&sc->arpcom.ac_if, 1725 "could not create TX buf DMA tag\n"); 1726 return error; 1727 } 1728 1729 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1730 error = bus_dmamap_create(ring->data_tag, 0, 1731 &ring->data[i].map); 1732 if (error) { 1733 if_printf(&sc->arpcom.ac_if, 1734 "could not create %dth TX buf DMA map\n", i); 1735 goto fail; 1736 } 1737 } 1738 1739 return 0; 1740 fail: 1741 for (j = 0; j < i; ++j) 1742 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1743 bus_dma_tag_destroy(ring->data_tag); 1744 ring->data_tag = NULL; 1745 return error; 1746 } 1747 1748 static void 1749 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1750 { 1751 int i; 1752 1753 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1754 struct nfe_tx_data *data = &ring->data[i]; 1755 1756 if (sc->sc_flags & NFE_40BIT_ADDR) 1757 ring->desc64[i].flags = 0; 1758 else 1759 ring->desc32[i].flags = 0; 1760 1761 if (data->m != NULL) { 1762 bus_dmamap_sync(ring->data_tag, data->map, 1763 BUS_DMASYNC_POSTWRITE); 1764 bus_dmamap_unload(ring->data_tag, data->map); 1765 m_freem(data->m); 1766 data->m = NULL; 1767 } 1768 } 1769 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1770 1771 ring->queued = 0; 1772 ring->cur = ring->next = 0; 1773 } 1774 1775 static int 1776 nfe_init_tx_ring(struct nfe_softc *sc __unused, 1777 struct nfe_tx_ring *ring __unused) 1778 { 1779 return 0; 1780 } 1781 1782 static void 1783 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1784 { 1785 if (ring->data_tag != NULL) { 1786 struct nfe_tx_data *data; 1787 int i; 1788 1789 for (i = 0; i < NFE_TX_RING_COUNT; ++i) { 1790 data = &ring->data[i]; 1791 1792 if (data->m != NULL) { 1793 bus_dmamap_unload(ring->data_tag, data->map); 1794 m_freem(data->m); 1795 } 1796 bus_dmamap_destroy(ring->data_tag, data->map); 1797 } 1798 1799 bus_dma_tag_destroy(ring->data_tag); 1800 } 1801 1802 if (ring->tag != NULL) { 1803 void *desc; 1804 1805 if (sc->sc_flags & NFE_40BIT_ADDR) 1806 desc = ring->desc64; 1807 else 1808 desc = ring->desc32; 1809 1810 bus_dmamap_unload(ring->tag, ring->map); 1811 bus_dmamem_free(ring->tag, desc, ring->map); 1812 bus_dma_tag_destroy(ring->tag); 1813 } 1814 } 1815 1816 static int 1817 nfe_ifmedia_upd(struct ifnet *ifp) 1818 { 1819 struct nfe_softc *sc = ifp->if_softc; 1820 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1821 1822 if (mii->mii_instance != 0) { 1823 struct mii_softc *miisc; 1824 1825 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1826 mii_phy_reset(miisc); 1827 } 1828 mii_mediachg(mii); 1829 1830 return 0; 1831 } 1832 1833 static void 1834 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1835 { 1836 struct nfe_softc *sc = ifp->if_softc; 1837 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1838 1839 mii_pollstat(mii); 1840 ifmr->ifm_status = mii->mii_media_status; 1841 ifmr->ifm_active = mii->mii_media_active; 1842 } 1843 1844 static void 1845 nfe_setmulti(struct nfe_softc *sc) 1846 { 1847 struct ifnet *ifp = &sc->arpcom.ac_if; 1848 struct ifmultiaddr *ifma; 1849 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1850 uint32_t filter = NFE_RXFILTER_MAGIC; 1851 int i; 1852 1853 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1854 bzero(addr, ETHER_ADDR_LEN); 1855 bzero(mask, ETHER_ADDR_LEN); 1856 goto done; 1857 } 1858 1859 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1860 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1861 1862 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1863 caddr_t maddr; 1864 1865 if (ifma->ifma_addr->sa_family != AF_LINK) 1866 continue; 1867 1868 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 1869 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1870 addr[i] &= maddr[i]; 1871 mask[i] &= ~maddr[i]; 1872 } 1873 } 1874 1875 for (i = 0; i < ETHER_ADDR_LEN; i++) 1876 mask[i] |= addr[i]; 1877 1878 done: 1879 addr[0] |= 0x01; /* make sure multicast bit is set */ 1880 1881 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1882 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1883 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1884 addr[5] << 8 | addr[4]); 1885 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1886 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1887 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1888 mask[5] << 8 | mask[4]); 1889 1890 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1891 NFE_WRITE(sc, NFE_RXFILTER, filter); 1892 } 1893 1894 static void 1895 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1896 { 1897 uint32_t tmp; 1898 1899 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1900 addr[0] = (tmp >> 8) & 0xff; 1901 addr[1] = (tmp & 0xff); 1902 1903 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1904 addr[2] = (tmp >> 24) & 0xff; 1905 addr[3] = (tmp >> 16) & 0xff; 1906 addr[4] = (tmp >> 8) & 0xff; 1907 addr[5] = (tmp & 0xff); 1908 } 1909 1910 static void 1911 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1912 { 1913 NFE_WRITE(sc, NFE_MACADDR_LO, 1914 addr[5] << 8 | addr[4]); 1915 NFE_WRITE(sc, NFE_MACADDR_HI, 1916 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1917 } 1918 1919 static void 1920 nfe_tick(void *arg) 1921 { 1922 struct nfe_softc *sc = arg; 1923 struct ifnet *ifp = &sc->arpcom.ac_if; 1924 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1925 1926 lwkt_serialize_enter(ifp->if_serializer); 1927 1928 mii_tick(mii); 1929 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1930 1931 lwkt_serialize_exit(ifp->if_serializer); 1932 } 1933 1934 static void 1935 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 1936 { 1937 if (error) 1938 return; 1939 1940 KASSERT(nseg == 1, ("too many segments, should be 1\n")); 1941 1942 *((uint32_t *)arg) = seg->ds_addr; 1943 } 1944 1945 static void 1946 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs, 1947 bus_size_t mapsz __unused, int error) 1948 { 1949 struct nfe_dma_ctx *ctx = arg; 1950 int i; 1951 1952 if (error) 1953 return; 1954 1955 KASSERT(nsegs <= ctx->nsegs, 1956 ("too many segments(%d), should be <= %d\n", 1957 nsegs, ctx->nsegs)); 1958 1959 ctx->nsegs = nsegs; 1960 for (i = 0; i < nsegs; ++i) 1961 ctx->segs[i] = segs[i]; 1962 } 1963 1964 static int 1965 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 1966 int wait) 1967 { 1968 struct nfe_rx_data *data = &ring->data[idx]; 1969 struct nfe_dma_ctx ctx; 1970 bus_dma_segment_t seg; 1971 bus_dmamap_t map; 1972 struct mbuf *m; 1973 int error; 1974 1975 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 1976 if (m == NULL) 1977 return ENOBUFS; 1978 m->m_len = m->m_pkthdr.len = MCLBYTES; 1979 1980 ctx.nsegs = 1; 1981 ctx.segs = &seg; 1982 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap, 1983 m, nfe_buf_dma_addr, &ctx, 1984 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 1985 if (error) { 1986 m_freem(m); 1987 if_printf(&sc->arpcom.ac_if, "could map RX mbuf %d\n", error); 1988 return error; 1989 } 1990 1991 /* Unload originally mapped mbuf */ 1992 bus_dmamap_unload(ring->data_tag, data->map); 1993 1994 /* Swap this DMA map with tmp DMA map */ 1995 map = data->map; 1996 data->map = ring->data_tmpmap; 1997 ring->data_tmpmap = map; 1998 1999 /* Caller is assumed to have collected the old mbuf */ 2000 data->m = m; 2001 2002 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2003 2004 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD); 2005 return 0; 2006 } 2007 2008 static int 2009 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2010 int wait) 2011 { 2012 struct nfe_rx_data *data = &ring->data[idx]; 2013 struct nfe_jbuf *jbuf; 2014 struct mbuf *m; 2015 2016 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2017 if (m == NULL) 2018 return ENOBUFS; 2019 2020 jbuf = nfe_jalloc(sc); 2021 if (jbuf == NULL) { 2022 m_freem(m); 2023 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2024 "-- packet dropped!\n"); 2025 return ENOBUFS; 2026 } 2027 2028 m->m_ext.ext_arg = jbuf; 2029 m->m_ext.ext_buf = jbuf->buf; 2030 m->m_ext.ext_free = nfe_jfree; 2031 m->m_ext.ext_ref = nfe_jref; 2032 m->m_ext.ext_size = NFE_JBYTES; 2033 2034 m->m_data = m->m_ext.ext_buf; 2035 m->m_flags |= M_EXT; 2036 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2037 2038 /* Caller is assumed to have collected the old mbuf */ 2039 data->m = m; 2040 2041 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2042 2043 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD); 2044 return 0; 2045 } 2046 2047 static void 2048 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2049 bus_addr_t physaddr) 2050 { 2051 if (sc->sc_flags & NFE_40BIT_ADDR) { 2052 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2053 2054 #if defined(__LP64__) 2055 desc64->physaddr[0] = htole32(physaddr >> 32); 2056 #endif 2057 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 2058 } else { 2059 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2060 2061 desc32->physaddr = htole32(physaddr); 2062 } 2063 } 2064 2065 static void 2066 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2067 { 2068 if (sc->sc_flags & NFE_40BIT_ADDR) { 2069 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2070 2071 desc64->length = htole16(ring->bufsz); 2072 desc64->flags = htole16(NFE_RX_READY); 2073 } else { 2074 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2075 2076 desc32->length = htole16(ring->bufsz); 2077 desc32->flags = htole16(NFE_RX_READY); 2078 } 2079 } 2080