1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.15 2008/07/27 10:06:56 sephe Exp $ 35 */ 36 37 #include "opt_ethernet.h" 38 39 #include <sys/param.h> 40 #include <sys/bitops.h> 41 #include <sys/endian.h> 42 #include <sys/kernel.h> 43 #include <sys/bus.h> 44 #include <sys/interrupt.h> 45 #include <sys/malloc.h> 46 #include <sys/proc.h> 47 #include <sys/rman.h> 48 #include <sys/serialize.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/bpf.h> 56 #include <net/if_arp.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 #include <net/ifq_var.h> 60 #include <net/vlan/if_vlan_var.h> 61 62 #include <dev/netif/mii_layer/miivar.h> 63 64 #include <bus/pci/pcireg.h> 65 #include <bus/pci/pcivar.h> 66 #include <bus/pci/pcidevs.h> 67 68 #include <dev/netif/et/if_etreg.h> 69 #include <dev/netif/et/if_etvar.h> 70 71 #include "miibus_if.h" 72 73 static int et_probe(device_t); 74 static int et_attach(device_t); 75 static int et_detach(device_t); 76 static int et_shutdown(device_t); 77 78 static int et_miibus_readreg(device_t, int, int); 79 static int et_miibus_writereg(device_t, int, int, int); 80 static void et_miibus_statchg(device_t); 81 82 static void et_init(void *); 83 static int et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 84 static void et_start(struct ifnet *); 85 static void et_watchdog(struct ifnet *); 86 static int et_ifmedia_upd(struct ifnet *); 87 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 88 89 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 90 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 91 92 static void et_intr(void *); 93 static void et_enable_intrs(struct et_softc *, uint32_t); 94 static void et_disable_intrs(struct et_softc *); 95 static void et_rxeof(struct et_softc *); 96 static void et_txeof(struct et_softc *, int); 97 98 static int et_dma_alloc(device_t); 99 static void et_dma_free(device_t); 100 static int et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *, 101 void **, bus_addr_t *, bus_dmamap_t *); 102 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 103 static int et_dma_mbuf_create(device_t); 104 static void et_dma_mbuf_destroy(device_t, int, const int[]); 105 static int et_jumbo_mem_alloc(device_t); 106 static void et_jumbo_mem_free(device_t); 107 static void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int); 108 static void et_dma_buf_addr(void *, bus_dma_segment_t *, int, 109 bus_size_t, int); 110 static int et_init_tx_ring(struct et_softc *); 111 static int et_init_rx_ring(struct et_softc *); 112 static void et_free_tx_ring(struct et_softc *); 113 static void et_free_rx_ring(struct et_softc *); 114 static int et_encap(struct et_softc *, struct mbuf **); 115 static struct et_jslot * 116 et_jalloc(struct et_jumbo_data *); 117 static void et_jfree(void *); 118 static void et_jref(void *); 119 static int et_newbuf(struct et_rxbuf_data *, int, int, int); 120 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 121 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 122 static int et_newbuf_jumbo(struct et_rxbuf_data *, int, int); 123 124 static void et_stop(struct et_softc *); 125 static int et_chip_init(struct et_softc *); 126 static void et_chip_attach(struct et_softc *); 127 static void et_init_mac(struct et_softc *); 128 static void et_init_rxmac(struct et_softc *); 129 static void et_init_txmac(struct et_softc *); 130 static int et_init_rxdma(struct et_softc *); 131 static int et_init_txdma(struct et_softc *); 132 static int et_start_rxdma(struct et_softc *); 133 static int et_start_txdma(struct et_softc *); 134 static int et_stop_rxdma(struct et_softc *); 135 static int et_stop_txdma(struct et_softc *); 136 static int et_enable_txrx(struct et_softc *, int); 137 static void et_reset(struct et_softc *); 138 static int et_bus_config(device_t); 139 static void et_get_eaddr(device_t, uint8_t[]); 140 static void et_setmulti(struct et_softc *); 141 static void et_tick(void *); 142 static void et_setmedia(struct et_softc *); 143 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t); 144 145 static const struct et_dev { 146 uint16_t vid; 147 uint16_t did; 148 const char *desc; 149 } et_devices[] = { 150 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 151 "Agere ET1310 Gigabit Ethernet" }, 152 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 153 "Agere ET1310 Fast Ethernet" }, 154 { 0, 0, NULL } 155 }; 156 157 static device_method_t et_methods[] = { 158 DEVMETHOD(device_probe, et_probe), 159 DEVMETHOD(device_attach, et_attach), 160 DEVMETHOD(device_detach, et_detach), 161 DEVMETHOD(device_shutdown, et_shutdown), 162 #if 0 163 DEVMETHOD(device_suspend, et_suspend), 164 DEVMETHOD(device_resume, et_resume), 165 #endif 166 167 DEVMETHOD(bus_print_child, bus_generic_print_child), 168 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 169 170 DEVMETHOD(miibus_readreg, et_miibus_readreg), 171 DEVMETHOD(miibus_writereg, et_miibus_writereg), 172 DEVMETHOD(miibus_statchg, et_miibus_statchg), 173 174 { 0, 0 } 175 }; 176 177 static driver_t et_driver = { 178 "et", 179 et_methods, 180 sizeof(struct et_softc) 181 }; 182 183 static devclass_t et_devclass; 184 185 DECLARE_DUMMY_MODULE(if_et); 186 MODULE_DEPEND(if_et, miibus, 1, 1, 1); 187 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, 0, 0); 188 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0); 189 190 static int et_rx_intr_npkts = 129; 191 static int et_rx_intr_delay = 25; /* x4 usec */ 192 static int et_tx_intr_nsegs = 256; 193 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 194 195 TUNABLE_INT("hw.et.timer", &et_timer); 196 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 197 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay); 198 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs); 199 200 struct et_bsize { 201 int bufsize; 202 int jumbo; 203 et_newbuf_t newbuf; 204 }; 205 206 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = { 207 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 208 .newbuf = et_newbuf_hdr }, 209 { .bufsize = ET_RXDMA_CTRL_RING1_2048, .jumbo = 0, 210 .newbuf = et_newbuf_cluster }, 211 }; 212 213 static const struct et_bsize et_bufsize_jumbo[ET_RX_NRING] = { 214 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 215 .newbuf = et_newbuf_hdr }, 216 { .bufsize = ET_RXDMA_CTRL_RING1_16384, .jumbo = 1, 217 .newbuf = et_newbuf_jumbo }, 218 }; 219 220 static int 221 et_probe(device_t dev) 222 { 223 const struct et_dev *d; 224 uint16_t did, vid; 225 226 vid = pci_get_vendor(dev); 227 did = pci_get_device(dev); 228 229 for (d = et_devices; d->desc != NULL; ++d) { 230 if (vid == d->vid && did == d->did) { 231 device_set_desc(dev, d->desc); 232 return 0; 233 } 234 } 235 return ENXIO; 236 } 237 238 static int 239 et_attach(device_t dev) 240 { 241 struct et_softc *sc = device_get_softc(dev); 242 struct ifnet *ifp = &sc->arpcom.ac_if; 243 uint8_t eaddr[ETHER_ADDR_LEN]; 244 int error; 245 246 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 247 callout_init(&sc->sc_tick); 248 249 /* 250 * Initialize tunables 251 */ 252 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 253 sc->sc_rx_intr_delay = et_rx_intr_delay; 254 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 255 sc->sc_timer = et_timer; 256 257 #ifndef BURN_BRIDGES 258 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 259 uint32_t irq, mem; 260 261 irq = pci_read_config(dev, PCIR_INTLINE, 4); 262 mem = pci_read_config(dev, ET_PCIR_BAR, 4); 263 264 device_printf(dev, "chip is in D%d power mode " 265 "-- setting to D0\n", pci_get_powerstate(dev)); 266 267 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 268 269 pci_write_config(dev, PCIR_INTLINE, irq, 4); 270 pci_write_config(dev, ET_PCIR_BAR, mem, 4); 271 } 272 #endif /* !BURN_BRIDGE */ 273 274 /* Enable bus mastering */ 275 pci_enable_busmaster(dev); 276 277 /* 278 * Allocate IO memory 279 */ 280 sc->sc_mem_rid = ET_PCIR_BAR; 281 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 282 &sc->sc_mem_rid, RF_ACTIVE); 283 if (sc->sc_mem_res == NULL) { 284 device_printf(dev, "can't allocate IO memory\n"); 285 return ENXIO; 286 } 287 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res); 288 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res); 289 290 /* 291 * Allocate IRQ 292 */ 293 sc->sc_irq_rid = 0; 294 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 295 &sc->sc_irq_rid, 296 RF_SHAREABLE | RF_ACTIVE); 297 if (sc->sc_irq_res == NULL) { 298 device_printf(dev, "can't allocate irq\n"); 299 error = ENXIO; 300 goto fail; 301 } 302 303 /* 304 * Create sysctl tree 305 */ 306 sysctl_ctx_init(&sc->sc_sysctl_ctx); 307 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 308 SYSCTL_STATIC_CHILDREN(_hw), 309 OID_AUTO, 310 device_get_nameunit(dev), 311 CTLFLAG_RD, 0, ""); 312 if (sc->sc_sysctl_tree == NULL) { 313 device_printf(dev, "can't add sysctl node\n"); 314 error = ENXIO; 315 goto fail; 316 } 317 318 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 319 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 320 OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW, 321 sc, 0, et_sysctl_rx_intr_npkts, "I", 322 "RX IM, # packets per RX interrupt"); 323 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 324 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 325 OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW, 326 sc, 0, et_sysctl_rx_intr_delay, "I", 327 "RX IM, RX interrupt delay (x10 usec)"); 328 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 329 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 330 "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 331 "TX IM, # segments per TX interrupt"); 332 SYSCTL_ADD_UINT(&sc->sc_sysctl_ctx, 333 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 334 "timer", CTLFLAG_RW, &sc->sc_timer, 0, 335 "TX timer"); 336 337 error = et_bus_config(dev); 338 if (error) 339 goto fail; 340 341 et_get_eaddr(dev, eaddr); 342 343 CSR_WRITE_4(sc, ET_PM, 344 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 345 346 et_reset(sc); 347 348 et_disable_intrs(sc); 349 350 error = et_dma_alloc(dev); 351 if (error) 352 goto fail; 353 354 ifp->if_softc = sc; 355 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 356 ifp->if_init = et_init; 357 ifp->if_ioctl = et_ioctl; 358 ifp->if_start = et_start; 359 ifp->if_watchdog = et_watchdog; 360 ifp->if_mtu = ETHERMTU; 361 ifp->if_capabilities = IFCAP_VLAN_MTU; 362 ifp->if_capenable = ifp->if_capabilities; 363 ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC); 364 ifq_set_ready(&ifp->if_snd); 365 366 et_chip_attach(sc); 367 368 error = mii_phy_probe(dev, &sc->sc_miibus, 369 et_ifmedia_upd, et_ifmedia_sts); 370 if (error) { 371 device_printf(dev, "can't probe any PHY\n"); 372 goto fail; 373 } 374 375 ether_ifattach(ifp, eaddr, NULL); 376 377 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc, 378 &sc->sc_irq_handle, ifp->if_serializer); 379 if (error) { 380 ether_ifdetach(ifp); 381 device_printf(dev, "can't setup intr\n"); 382 goto fail; 383 } 384 385 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq_res)); 386 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 387 388 return 0; 389 fail: 390 et_detach(dev); 391 return error; 392 } 393 394 static int 395 et_detach(device_t dev) 396 { 397 struct et_softc *sc = device_get_softc(dev); 398 399 if (device_is_attached(dev)) { 400 struct ifnet *ifp = &sc->arpcom.ac_if; 401 402 lwkt_serialize_enter(ifp->if_serializer); 403 et_stop(sc); 404 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 405 lwkt_serialize_exit(ifp->if_serializer); 406 407 ether_ifdetach(ifp); 408 } 409 410 if (sc->sc_sysctl_tree != NULL) 411 sysctl_ctx_free(&sc->sc_sysctl_ctx); 412 413 if (sc->sc_miibus != NULL) 414 device_delete_child(dev, sc->sc_miibus); 415 bus_generic_detach(dev); 416 417 if (sc->sc_irq_res != NULL) { 418 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 419 sc->sc_irq_res); 420 } 421 422 if (sc->sc_mem_res != NULL) { 423 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 424 sc->sc_mem_res); 425 } 426 427 et_dma_free(dev); 428 429 return 0; 430 } 431 432 static int 433 et_shutdown(device_t dev) 434 { 435 struct et_softc *sc = device_get_softc(dev); 436 struct ifnet *ifp = &sc->arpcom.ac_if; 437 438 lwkt_serialize_enter(ifp->if_serializer); 439 et_stop(sc); 440 lwkt_serialize_exit(ifp->if_serializer); 441 return 0; 442 } 443 444 static int 445 et_miibus_readreg(device_t dev, int phy, int reg) 446 { 447 struct et_softc *sc = device_get_softc(dev); 448 uint32_t val; 449 int i, ret; 450 451 /* Stop any pending operations */ 452 CSR_WRITE_4(sc, ET_MII_CMD, 0); 453 454 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 455 __SHIFTIN(reg, ET_MII_ADDR_REG); 456 CSR_WRITE_4(sc, ET_MII_ADDR, val); 457 458 /* Start reading */ 459 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 460 461 #define NRETRY 50 462 463 for (i = 0; i < NRETRY; ++i) { 464 val = CSR_READ_4(sc, ET_MII_IND); 465 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 466 break; 467 DELAY(50); 468 } 469 if (i == NRETRY) { 470 if_printf(&sc->arpcom.ac_if, 471 "read phy %d, reg %d timed out\n", phy, reg); 472 ret = 0; 473 goto back; 474 } 475 476 #undef NRETRY 477 478 val = CSR_READ_4(sc, ET_MII_STAT); 479 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 480 481 back: 482 /* Make sure that the current operation is stopped */ 483 CSR_WRITE_4(sc, ET_MII_CMD, 0); 484 return ret; 485 } 486 487 static int 488 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 489 { 490 struct et_softc *sc = device_get_softc(dev); 491 uint32_t val; 492 int i; 493 494 /* Stop any pending operations */ 495 CSR_WRITE_4(sc, ET_MII_CMD, 0); 496 497 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 498 __SHIFTIN(reg, ET_MII_ADDR_REG); 499 CSR_WRITE_4(sc, ET_MII_ADDR, val); 500 501 /* Start writing */ 502 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 503 504 #define NRETRY 100 505 506 for (i = 0; i < NRETRY; ++i) { 507 val = CSR_READ_4(sc, ET_MII_IND); 508 if ((val & ET_MII_IND_BUSY) == 0) 509 break; 510 DELAY(50); 511 } 512 if (i == NRETRY) { 513 if_printf(&sc->arpcom.ac_if, 514 "write phy %d, reg %d timed out\n", phy, reg); 515 et_miibus_readreg(dev, phy, reg); 516 } 517 518 #undef NRETRY 519 520 /* Make sure that the current operation is stopped */ 521 CSR_WRITE_4(sc, ET_MII_CMD, 0); 522 return 0; 523 } 524 525 static void 526 et_miibus_statchg(device_t dev) 527 { 528 et_setmedia(device_get_softc(dev)); 529 } 530 531 static int 532 et_ifmedia_upd(struct ifnet *ifp) 533 { 534 struct et_softc *sc = ifp->if_softc; 535 struct mii_data *mii = device_get_softc(sc->sc_miibus); 536 537 if (mii->mii_instance != 0) { 538 struct mii_softc *miisc; 539 540 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 541 mii_phy_reset(miisc); 542 } 543 mii_mediachg(mii); 544 545 return 0; 546 } 547 548 static void 549 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 550 { 551 struct et_softc *sc = ifp->if_softc; 552 struct mii_data *mii = device_get_softc(sc->sc_miibus); 553 554 mii_pollstat(mii); 555 ifmr->ifm_active = mii->mii_media_active; 556 ifmr->ifm_status = mii->mii_media_status; 557 } 558 559 static void 560 et_stop(struct et_softc *sc) 561 { 562 struct ifnet *ifp = &sc->arpcom.ac_if; 563 564 ASSERT_SERIALIZED(ifp->if_serializer); 565 566 callout_stop(&sc->sc_tick); 567 568 et_stop_rxdma(sc); 569 et_stop_txdma(sc); 570 571 et_disable_intrs(sc); 572 573 et_free_tx_ring(sc); 574 et_free_rx_ring(sc); 575 576 et_reset(sc); 577 578 sc->sc_tx = 0; 579 sc->sc_tx_intr = 0; 580 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 581 582 ifp->if_timer = 0; 583 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 584 } 585 586 static int 587 et_bus_config(device_t dev) 588 { 589 uint32_t val, max_plsz; 590 uint16_t ack_latency, replay_timer; 591 592 /* 593 * Test whether EEPROM is valid 594 * NOTE: Read twice to get the correct value 595 */ 596 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 597 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 598 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 599 device_printf(dev, "EEPROM status error 0x%02x\n", val); 600 return ENXIO; 601 } 602 603 /* TODO: LED */ 604 605 /* 606 * Configure ACK latency and replay timer according to 607 * max playload size 608 */ 609 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4); 610 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 611 612 switch (max_plsz) { 613 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 614 ack_latency = ET_PCIV_ACK_LATENCY_128; 615 replay_timer = ET_PCIV_REPLAY_TIMER_128; 616 break; 617 618 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 619 ack_latency = ET_PCIV_ACK_LATENCY_256; 620 replay_timer = ET_PCIV_REPLAY_TIMER_256; 621 break; 622 623 default: 624 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2); 625 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2); 626 device_printf(dev, "ack latency %u, replay timer %u\n", 627 ack_latency, replay_timer); 628 break; 629 } 630 if (ack_latency != 0) { 631 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 632 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2); 633 } 634 635 /* 636 * Set L0s and L1 latency timer to 2us 637 */ 638 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 639 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1); 640 641 /* 642 * Set max read request size to 2048 bytes 643 */ 644 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2); 645 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 646 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 647 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2); 648 649 return 0; 650 } 651 652 static void 653 et_get_eaddr(device_t dev, uint8_t eaddr[]) 654 { 655 uint32_t val; 656 int i; 657 658 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 659 for (i = 0; i < 4; ++i) 660 eaddr[i] = (val >> (8 * i)) & 0xff; 661 662 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 663 for (; i < ETHER_ADDR_LEN; ++i) 664 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 665 } 666 667 static void 668 et_reset(struct et_softc *sc) 669 { 670 CSR_WRITE_4(sc, ET_MAC_CFG1, 671 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 672 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 673 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 674 675 CSR_WRITE_4(sc, ET_SWRST, 676 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 677 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 678 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 679 680 CSR_WRITE_4(sc, ET_MAC_CFG1, 681 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 682 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 683 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 684 } 685 686 static void 687 et_disable_intrs(struct et_softc *sc) 688 { 689 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 690 } 691 692 static void 693 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 694 { 695 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 696 } 697 698 static int 699 et_dma_alloc(device_t dev) 700 { 701 struct et_softc *sc = device_get_softc(dev); 702 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 703 struct et_txstatus_data *txsd = &sc->sc_tx_status; 704 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 705 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 706 int i, error; 707 708 /* 709 * Create top level DMA tag 710 */ 711 error = bus_dma_tag_create(NULL, 1, 0, 712 BUS_SPACE_MAXADDR_32BIT, 713 BUS_SPACE_MAXADDR, 714 NULL, NULL, 715 MAXBSIZE, 716 BUS_SPACE_UNRESTRICTED, 717 BUS_SPACE_MAXSIZE_32BIT, 718 0, &sc->sc_dtag); 719 if (error) { 720 device_printf(dev, "can't create DMA tag\n"); 721 return error; 722 } 723 724 /* 725 * Create TX ring DMA stuffs 726 */ 727 error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag, 728 (void **)&tx_ring->tr_desc, 729 &tx_ring->tr_paddr, &tx_ring->tr_dmap); 730 if (error) { 731 device_printf(dev, "can't create TX ring DMA stuffs\n"); 732 return error; 733 } 734 735 /* 736 * Create TX status DMA stuffs 737 */ 738 error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag, 739 (void **)&txsd->txsd_status, 740 &txsd->txsd_paddr, &txsd->txsd_dmap); 741 if (error) { 742 device_printf(dev, "can't create TX status DMA stuffs\n"); 743 return error; 744 } 745 746 /* 747 * Create DMA stuffs for RX rings 748 */ 749 for (i = 0; i < ET_RX_NRING; ++i) { 750 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 751 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 752 753 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 754 755 error = et_dma_mem_create(dev, ET_RX_RING_SIZE, 756 &rx_ring->rr_dtag, 757 (void **)&rx_ring->rr_desc, 758 &rx_ring->rr_paddr, 759 &rx_ring->rr_dmap); 760 if (error) { 761 device_printf(dev, "can't create DMA stuffs for " 762 "the %d RX ring\n", i); 763 return error; 764 } 765 rx_ring->rr_posreg = rx_ring_posreg[i]; 766 } 767 768 /* 769 * Create RX stat ring DMA stuffs 770 */ 771 error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE, 772 &rxst_ring->rsr_dtag, 773 (void **)&rxst_ring->rsr_stat, 774 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap); 775 if (error) { 776 device_printf(dev, "can't create RX stat ring DMA stuffs\n"); 777 return error; 778 } 779 780 /* 781 * Create RX status DMA stuffs 782 */ 783 error = et_dma_mem_create(dev, sizeof(struct et_rxstatus), 784 &rxsd->rxsd_dtag, 785 (void **)&rxsd->rxsd_status, 786 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap); 787 if (error) { 788 device_printf(dev, "can't create RX status DMA stuffs\n"); 789 return error; 790 } 791 792 /* 793 * Create mbuf DMA stuffs 794 */ 795 error = et_dma_mbuf_create(dev); 796 if (error) 797 return error; 798 799 /* 800 * Create jumbo buffer DMA stuffs 801 * NOTE: Allow it to fail 802 */ 803 if (et_jumbo_mem_alloc(dev) == 0) 804 sc->sc_flags |= ET_FLAG_JUMBO; 805 806 return 0; 807 } 808 809 static void 810 et_dma_free(device_t dev) 811 { 812 struct et_softc *sc = device_get_softc(dev); 813 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 814 struct et_txstatus_data *txsd = &sc->sc_tx_status; 815 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 816 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 817 int i, rx_done[ET_RX_NRING]; 818 819 /* 820 * Destroy TX ring DMA stuffs 821 */ 822 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc, 823 tx_ring->tr_dmap); 824 825 /* 826 * Destroy TX status DMA stuffs 827 */ 828 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status, 829 txsd->txsd_dmap); 830 831 /* 832 * Destroy DMA stuffs for RX rings 833 */ 834 for (i = 0; i < ET_RX_NRING; ++i) { 835 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 836 837 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc, 838 rx_ring->rr_dmap); 839 } 840 841 /* 842 * Destroy RX stat ring DMA stuffs 843 */ 844 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat, 845 rxst_ring->rsr_dmap); 846 847 /* 848 * Destroy RX status DMA stuffs 849 */ 850 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status, 851 rxsd->rxsd_dmap); 852 853 /* 854 * Destroy mbuf DMA stuffs 855 */ 856 for (i = 0; i < ET_RX_NRING; ++i) 857 rx_done[i] = ET_RX_NDESC; 858 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done); 859 860 /* 861 * Destroy jumbo buffer DMA stuffs 862 */ 863 if (sc->sc_flags & ET_FLAG_JUMBO) 864 et_jumbo_mem_free(dev); 865 866 /* 867 * Destroy top level DMA tag 868 */ 869 if (sc->sc_dtag != NULL) 870 bus_dma_tag_destroy(sc->sc_dtag); 871 } 872 873 static int 874 et_dma_mbuf_create(device_t dev) 875 { 876 struct et_softc *sc = device_get_softc(dev); 877 struct et_txbuf_data *tbd = &sc->sc_tx_data; 878 int i, error, rx_done[ET_RX_NRING]; 879 880 /* 881 * Create mbuf DMA tag 882 */ 883 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 884 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 885 NULL, NULL, 886 ET_JUMBO_FRAMELEN, ET_NSEG_MAX, 887 BUS_SPACE_MAXSIZE_32BIT, 888 BUS_DMA_ALLOCNOW, &sc->sc_mbuf_dtag); 889 if (error) { 890 device_printf(dev, "can't create mbuf DMA tag\n"); 891 return error; 892 } 893 894 /* 895 * Create spare DMA map for RX mbufs 896 */ 897 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap); 898 if (error) { 899 device_printf(dev, "can't create spare mbuf DMA map\n"); 900 bus_dma_tag_destroy(sc->sc_mbuf_dtag); 901 sc->sc_mbuf_dtag = NULL; 902 return error; 903 } 904 905 /* 906 * Create DMA maps for RX mbufs 907 */ 908 bzero(rx_done, sizeof(rx_done)); 909 for (i = 0; i < ET_RX_NRING; ++i) { 910 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 911 int j; 912 913 for (j = 0; j < ET_RX_NDESC; ++j) { 914 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, 915 &rbd->rbd_buf[j].rb_dmap); 916 if (error) { 917 device_printf(dev, "can't create %d RX mbuf " 918 "for %d RX ring\n", j, i); 919 rx_done[i] = j; 920 et_dma_mbuf_destroy(dev, 0, rx_done); 921 return error; 922 } 923 } 924 rx_done[i] = ET_RX_NDESC; 925 926 rbd->rbd_softc = sc; 927 rbd->rbd_ring = &sc->sc_rx_ring[i]; 928 } 929 930 /* 931 * Create DMA maps for TX mbufs 932 */ 933 for (i = 0; i < ET_TX_NDESC; ++i) { 934 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, 935 &tbd->tbd_buf[i].tb_dmap); 936 if (error) { 937 device_printf(dev, "can't create %d TX mbuf " 938 "DMA map\n", i); 939 et_dma_mbuf_destroy(dev, i, rx_done); 940 return error; 941 } 942 } 943 944 return 0; 945 } 946 947 static void 948 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[]) 949 { 950 struct et_softc *sc = device_get_softc(dev); 951 struct et_txbuf_data *tbd = &sc->sc_tx_data; 952 int i; 953 954 if (sc->sc_mbuf_dtag == NULL) 955 return; 956 957 /* 958 * Destroy DMA maps for RX mbufs 959 */ 960 for (i = 0; i < ET_RX_NRING; ++i) { 961 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 962 int j; 963 964 for (j = 0; j < rx_done[i]; ++j) { 965 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 966 967 KASSERT(rb->rb_mbuf == NULL, 968 ("RX mbuf in %d RX ring is not freed yet\n", i)); 969 bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap); 970 } 971 } 972 973 /* 974 * Destroy DMA maps for TX mbufs 975 */ 976 for (i = 0; i < tx_done; ++i) { 977 struct et_txbuf *tb = &tbd->tbd_buf[i]; 978 979 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 980 bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap); 981 } 982 983 /* 984 * Destroy spare mbuf DMA map 985 */ 986 bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap); 987 988 /* 989 * Destroy mbuf DMA tag 990 */ 991 bus_dma_tag_destroy(sc->sc_mbuf_dtag); 992 sc->sc_mbuf_dtag = NULL; 993 } 994 995 static int 996 et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 997 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 998 { 999 struct et_softc *sc = device_get_softc(dev); 1000 int error; 1001 1002 error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0, 1003 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1004 NULL, NULL, 1005 size, 1, BUS_SPACE_MAXSIZE_32BIT, 1006 0, dtag); 1007 if (error) { 1008 device_printf(dev, "can't create DMA tag\n"); 1009 return error; 1010 } 1011 1012 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1013 dmap); 1014 if (error) { 1015 device_printf(dev, "can't allocate DMA mem\n"); 1016 bus_dma_tag_destroy(*dtag); 1017 *dtag = NULL; 1018 return error; 1019 } 1020 1021 error = bus_dmamap_load(*dtag, *dmap, *addr, size, 1022 et_dma_ring_addr, paddr, BUS_DMA_WAITOK); 1023 if (error) { 1024 device_printf(dev, "can't load DMA mem\n"); 1025 bus_dmamem_free(*dtag, *addr, *dmap); 1026 bus_dma_tag_destroy(*dtag); 1027 *dtag = NULL; 1028 return error; 1029 } 1030 return 0; 1031 } 1032 1033 static void 1034 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 1035 { 1036 if (dtag != NULL) { 1037 bus_dmamap_unload(dtag, dmap); 1038 bus_dmamem_free(dtag, addr, dmap); 1039 bus_dma_tag_destroy(dtag); 1040 } 1041 } 1042 1043 static void 1044 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 1045 { 1046 KASSERT(nseg == 1, ("too many segments\n")); 1047 *((bus_addr_t *)arg) = seg->ds_addr; 1048 } 1049 1050 static void 1051 et_chip_attach(struct et_softc *sc) 1052 { 1053 uint32_t val; 1054 1055 /* 1056 * Perform minimal initialization 1057 */ 1058 1059 /* Disable loopback */ 1060 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1061 1062 /* Reset MAC */ 1063 CSR_WRITE_4(sc, ET_MAC_CFG1, 1064 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1065 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1066 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1067 1068 /* 1069 * Setup half duplex mode 1070 */ 1071 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1072 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1073 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1074 ET_MAC_HDX_EXC_DEFER; 1075 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1076 1077 /* Clear MAC control */ 1078 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1079 1080 /* Reset MII */ 1081 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1082 1083 /* Bring MAC out of reset state */ 1084 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1085 1086 /* Enable memory controllers */ 1087 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1088 } 1089 1090 static void 1091 et_intr(void *xsc) 1092 { 1093 struct et_softc *sc = xsc; 1094 struct ifnet *ifp = &sc->arpcom.ac_if; 1095 uint32_t intrs; 1096 1097 ASSERT_SERIALIZED(ifp->if_serializer); 1098 1099 if ((ifp->if_flags & IFF_RUNNING) == 0) 1100 return; 1101 1102 et_disable_intrs(sc); 1103 1104 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1105 intrs &= ET_INTRS; 1106 if (intrs == 0) /* Not interested */ 1107 goto back; 1108 1109 if (intrs & ET_INTR_RXEOF) 1110 et_rxeof(sc); 1111 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1112 et_txeof(sc, 1); 1113 if (intrs & ET_INTR_TIMER) 1114 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1115 back: 1116 et_enable_intrs(sc, ET_INTRS); 1117 } 1118 1119 static void 1120 et_init(void *xsc) 1121 { 1122 struct et_softc *sc = xsc; 1123 struct ifnet *ifp = &sc->arpcom.ac_if; 1124 const struct et_bsize *arr; 1125 int error, i; 1126 1127 ASSERT_SERIALIZED(ifp->if_serializer); 1128 1129 et_stop(sc); 1130 1131 arr = ET_FRAMELEN(ifp->if_mtu) < MCLBYTES ? 1132 et_bufsize_std : et_bufsize_jumbo; 1133 for (i = 0; i < ET_RX_NRING; ++i) { 1134 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize; 1135 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf; 1136 sc->sc_rx_data[i].rbd_jumbo = arr[i].jumbo; 1137 } 1138 1139 error = et_init_tx_ring(sc); 1140 if (error) 1141 goto back; 1142 1143 error = et_init_rx_ring(sc); 1144 if (error) 1145 goto back; 1146 1147 error = et_chip_init(sc); 1148 if (error) 1149 goto back; 1150 1151 error = et_enable_txrx(sc, 1); 1152 if (error) 1153 goto back; 1154 1155 et_enable_intrs(sc, ET_INTRS); 1156 1157 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1158 1159 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1160 1161 ifp->if_flags |= IFF_RUNNING; 1162 ifp->if_flags &= ~IFF_OACTIVE; 1163 back: 1164 if (error) 1165 et_stop(sc); 1166 } 1167 1168 static int 1169 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1170 { 1171 struct et_softc *sc = ifp->if_softc; 1172 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1173 struct ifreq *ifr = (struct ifreq *)data; 1174 int error = 0, max_framelen; 1175 1176 ASSERT_SERIALIZED(ifp->if_serializer); 1177 1178 switch (cmd) { 1179 case SIOCSIFFLAGS: 1180 if (ifp->if_flags & IFF_UP) { 1181 if (ifp->if_flags & IFF_RUNNING) { 1182 if ((ifp->if_flags ^ sc->sc_if_flags) & 1183 (IFF_ALLMULTI | IFF_PROMISC)) 1184 et_setmulti(sc); 1185 } else { 1186 et_init(sc); 1187 } 1188 } else { 1189 if (ifp->if_flags & IFF_RUNNING) 1190 et_stop(sc); 1191 } 1192 sc->sc_if_flags = ifp->if_flags; 1193 break; 1194 1195 case SIOCSIFMEDIA: 1196 case SIOCGIFMEDIA: 1197 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1198 break; 1199 1200 case SIOCADDMULTI: 1201 case SIOCDELMULTI: 1202 if (ifp->if_flags & IFF_RUNNING) 1203 et_setmulti(sc); 1204 break; 1205 1206 case SIOCSIFMTU: 1207 if (sc->sc_flags & ET_FLAG_JUMBO) 1208 max_framelen = ET_JUMBO_FRAMELEN; 1209 else 1210 max_framelen = MCLBYTES - 1; 1211 1212 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) { 1213 error = EOPNOTSUPP; 1214 break; 1215 } 1216 1217 ifp->if_mtu = ifr->ifr_mtu; 1218 if (ifp->if_flags & IFF_RUNNING) 1219 et_init(sc); 1220 break; 1221 1222 default: 1223 error = ether_ioctl(ifp, cmd, data); 1224 break; 1225 } 1226 return error; 1227 } 1228 1229 static void 1230 et_start(struct ifnet *ifp) 1231 { 1232 struct et_softc *sc = ifp->if_softc; 1233 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1234 int trans, oactive; 1235 1236 ASSERT_SERIALIZED(ifp->if_serializer); 1237 1238 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) { 1239 ifq_purge(&ifp->if_snd); 1240 return; 1241 } 1242 1243 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1244 return; 1245 1246 oactive = 0; 1247 trans = 0; 1248 for (;;) { 1249 struct mbuf *m; 1250 int error; 1251 1252 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1253 if (oactive) { 1254 ifp->if_flags |= IFF_OACTIVE; 1255 break; 1256 } 1257 1258 et_txeof(sc, 0); 1259 oactive = 1; 1260 continue; 1261 } 1262 1263 m = ifq_dequeue(&ifp->if_snd, NULL); 1264 if (m == NULL) 1265 break; 1266 1267 error = et_encap(sc, &m); 1268 if (error) { 1269 ifp->if_oerrors++; 1270 KKASSERT(m == NULL); 1271 1272 if (error == EFBIG) { 1273 /* 1274 * Excessive fragmented packets 1275 */ 1276 if (oactive) { 1277 ifp->if_flags |= IFF_OACTIVE; 1278 break; 1279 } 1280 et_txeof(sc, 0); 1281 oactive = 1; 1282 } 1283 continue; 1284 } else { 1285 oactive = 0; 1286 } 1287 trans = 1; 1288 1289 BPF_MTAP(ifp, m); 1290 } 1291 1292 if (trans) 1293 ifp->if_timer = 5; 1294 } 1295 1296 static void 1297 et_watchdog(struct ifnet *ifp) 1298 { 1299 ASSERT_SERIALIZED(ifp->if_serializer); 1300 1301 if_printf(ifp, "watchdog timed out\n"); 1302 1303 ifp->if_init(ifp->if_softc); 1304 if_devstart(ifp); 1305 } 1306 1307 static int 1308 et_stop_rxdma(struct et_softc *sc) 1309 { 1310 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1311 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1312 1313 DELAY(5); 1314 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1315 if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n"); 1316 return ETIMEDOUT; 1317 } 1318 return 0; 1319 } 1320 1321 static int 1322 et_stop_txdma(struct et_softc *sc) 1323 { 1324 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1325 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1326 return 0; 1327 } 1328 1329 static void 1330 et_free_tx_ring(struct et_softc *sc) 1331 { 1332 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1333 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1334 int i; 1335 1336 for (i = 0; i < ET_TX_NDESC; ++i) { 1337 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1338 1339 if (tb->tb_mbuf != NULL) { 1340 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 1341 m_freem(tb->tb_mbuf); 1342 tb->tb_mbuf = NULL; 1343 } 1344 } 1345 1346 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1347 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1348 BUS_DMASYNC_PREWRITE); 1349 } 1350 1351 static void 1352 et_free_rx_ring(struct et_softc *sc) 1353 { 1354 int n; 1355 1356 for (n = 0; n < ET_RX_NRING; ++n) { 1357 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1358 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1359 int i; 1360 1361 for (i = 0; i < ET_RX_NDESC; ++i) { 1362 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1363 1364 if (rb->rb_mbuf != NULL) { 1365 if (!rbd->rbd_jumbo) { 1366 bus_dmamap_unload(sc->sc_mbuf_dtag, 1367 rb->rb_dmap); 1368 } 1369 m_freem(rb->rb_mbuf); 1370 rb->rb_mbuf = NULL; 1371 } 1372 } 1373 1374 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1375 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap, 1376 BUS_DMASYNC_PREWRITE); 1377 } 1378 } 1379 1380 static void 1381 et_setmulti(struct et_softc *sc) 1382 { 1383 struct ifnet *ifp = &sc->arpcom.ac_if; 1384 uint32_t hash[4] = { 0, 0, 0, 0 }; 1385 uint32_t rxmac_ctrl, pktfilt; 1386 struct ifmultiaddr *ifma; 1387 int i, count; 1388 1389 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1390 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1391 1392 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1393 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1394 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1395 goto back; 1396 } 1397 1398 count = 0; 1399 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1400 uint32_t *hp, h; 1401 1402 if (ifma->ifma_addr->sa_family != AF_LINK) 1403 continue; 1404 1405 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1406 ifma->ifma_addr), ETHER_ADDR_LEN); 1407 h = (h & 0x3f800000) >> 23; 1408 1409 hp = &hash[0]; 1410 if (h >= 32 && h < 64) { 1411 h -= 32; 1412 hp = &hash[1]; 1413 } else if (h >= 64 && h < 96) { 1414 h -= 64; 1415 hp = &hash[2]; 1416 } else if (h >= 96) { 1417 h -= 96; 1418 hp = &hash[3]; 1419 } 1420 *hp |= (1 << h); 1421 1422 ++count; 1423 } 1424 1425 for (i = 0; i < 4; ++i) 1426 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1427 1428 if (count > 0) 1429 pktfilt |= ET_PKTFILT_MCAST; 1430 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1431 back: 1432 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1433 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1434 } 1435 1436 static int 1437 et_chip_init(struct et_softc *sc) 1438 { 1439 struct ifnet *ifp = &sc->arpcom.ac_if; 1440 uint32_t rxq_end; 1441 int error, frame_len, rxmem_size; 1442 1443 /* 1444 * Split 16Kbytes internal memory between TX and RX 1445 * according to frame length. 1446 */ 1447 frame_len = ET_FRAMELEN(ifp->if_mtu); 1448 if (frame_len < 2048) { 1449 rxmem_size = ET_MEM_RXSIZE_DEFAULT; 1450 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) { 1451 rxmem_size = ET_MEM_SIZE / 2; 1452 } else { 1453 rxmem_size = ET_MEM_SIZE - 1454 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT); 1455 } 1456 rxq_end = ET_QUEUE_ADDR(rxmem_size); 1457 1458 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START); 1459 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end); 1460 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1); 1461 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END); 1462 1463 /* No loopback */ 1464 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1465 1466 /* Clear MSI configure */ 1467 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1468 1469 /* Disable timer */ 1470 CSR_WRITE_4(sc, ET_TIMER, 0); 1471 1472 /* Initialize MAC */ 1473 et_init_mac(sc); 1474 1475 /* Enable memory controllers */ 1476 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1477 1478 /* Initialize RX MAC */ 1479 et_init_rxmac(sc); 1480 1481 /* Initialize TX MAC */ 1482 et_init_txmac(sc); 1483 1484 /* Initialize RX DMA engine */ 1485 error = et_init_rxdma(sc); 1486 if (error) 1487 return error; 1488 1489 /* Initialize TX DMA engine */ 1490 error = et_init_txdma(sc); 1491 if (error) 1492 return error; 1493 1494 return 0; 1495 } 1496 1497 static int 1498 et_init_tx_ring(struct et_softc *sc) 1499 { 1500 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1501 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1502 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1503 1504 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1505 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1506 BUS_DMASYNC_PREWRITE); 1507 1508 tbd->tbd_start_index = 0; 1509 tbd->tbd_start_wrap = 0; 1510 tbd->tbd_used = 0; 1511 1512 bzero(txsd->txsd_status, sizeof(uint32_t)); 1513 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap, 1514 BUS_DMASYNC_PREWRITE); 1515 return 0; 1516 } 1517 1518 static int 1519 et_init_rx_ring(struct et_softc *sc) 1520 { 1521 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1522 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1523 int n; 1524 1525 for (n = 0; n < ET_RX_NRING; ++n) { 1526 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1527 int i, error; 1528 1529 for (i = 0; i < ET_RX_NDESC; ++i) { 1530 error = rbd->rbd_newbuf(rbd, i, 1); 1531 if (error) { 1532 if_printf(&sc->arpcom.ac_if, "%d ring %d buf, " 1533 "newbuf failed: %d\n", n, i, error); 1534 return error; 1535 } 1536 } 1537 } 1538 1539 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1540 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1541 BUS_DMASYNC_PREWRITE); 1542 1543 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1544 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1545 BUS_DMASYNC_PREWRITE); 1546 1547 return 0; 1548 } 1549 1550 static void 1551 et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs, 1552 bus_size_t mapsz __unused, int error) 1553 { 1554 struct et_dmamap_ctx *ctx = xctx; 1555 int i; 1556 1557 if (error) 1558 return; 1559 1560 if (nsegs > ctx->nsegs) { 1561 ctx->nsegs = 0; 1562 return; 1563 } 1564 1565 ctx->nsegs = nsegs; 1566 for (i = 0; i < nsegs; ++i) 1567 ctx->segs[i] = segs[i]; 1568 } 1569 1570 static int 1571 et_init_rxdma(struct et_softc *sc) 1572 { 1573 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1574 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1575 struct et_rxdesc_ring *rx_ring; 1576 int error; 1577 1578 error = et_stop_rxdma(sc); 1579 if (error) { 1580 if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n"); 1581 return error; 1582 } 1583 1584 /* 1585 * Install RX status 1586 */ 1587 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1588 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1589 1590 /* 1591 * Install RX stat ring 1592 */ 1593 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1594 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1595 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1596 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1597 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1598 1599 /* Match ET_RXSTAT_POS */ 1600 rxst_ring->rsr_index = 0; 1601 rxst_ring->rsr_wrap = 0; 1602 1603 /* 1604 * Install the 2nd RX descriptor ring 1605 */ 1606 rx_ring = &sc->sc_rx_ring[1]; 1607 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1608 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1609 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1610 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1611 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1612 1613 /* Match ET_RX_RING1_POS */ 1614 rx_ring->rr_index = 0; 1615 rx_ring->rr_wrap = 1; 1616 1617 /* 1618 * Install the 1st RX descriptor ring 1619 */ 1620 rx_ring = &sc->sc_rx_ring[0]; 1621 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1622 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1623 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1624 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1625 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1626 1627 /* Match ET_RX_RING0_POS */ 1628 rx_ring->rr_index = 0; 1629 rx_ring->rr_wrap = 1; 1630 1631 /* 1632 * RX intr moderation 1633 */ 1634 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1635 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1636 1637 return 0; 1638 } 1639 1640 static int 1641 et_init_txdma(struct et_softc *sc) 1642 { 1643 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1644 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1645 int error; 1646 1647 error = et_stop_txdma(sc); 1648 if (error) { 1649 if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n"); 1650 return error; 1651 } 1652 1653 /* 1654 * Install TX descriptor ring 1655 */ 1656 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1657 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1658 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1659 1660 /* 1661 * Install TX status 1662 */ 1663 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1664 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1665 1666 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1667 1668 /* Match ET_TX_READY_POS */ 1669 tx_ring->tr_ready_index = 0; 1670 tx_ring->tr_ready_wrap = 0; 1671 1672 return 0; 1673 } 1674 1675 static void 1676 et_init_mac(struct et_softc *sc) 1677 { 1678 struct ifnet *ifp = &sc->arpcom.ac_if; 1679 const uint8_t *eaddr = IF_LLADDR(ifp); 1680 uint32_t val; 1681 1682 /* Reset MAC */ 1683 CSR_WRITE_4(sc, ET_MAC_CFG1, 1684 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1685 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1686 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1687 1688 /* 1689 * Setup inter packet gap 1690 */ 1691 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1692 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1693 __SHIFTIN(80, ET_IPG_MINIFG) | 1694 __SHIFTIN(96, ET_IPG_B2B); 1695 CSR_WRITE_4(sc, ET_IPG, val); 1696 1697 /* 1698 * Setup half duplex mode 1699 */ 1700 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1701 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1702 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1703 ET_MAC_HDX_EXC_DEFER; 1704 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1705 1706 /* Clear MAC control */ 1707 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1708 1709 /* Reset MII */ 1710 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1711 1712 /* 1713 * Set MAC address 1714 */ 1715 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1716 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1717 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1718 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1719 1720 /* Set max frame length */ 1721 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu)); 1722 1723 /* Bring MAC out of reset state */ 1724 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1725 } 1726 1727 static void 1728 et_init_rxmac(struct et_softc *sc) 1729 { 1730 struct ifnet *ifp = &sc->arpcom.ac_if; 1731 const uint8_t *eaddr = IF_LLADDR(ifp); 1732 uint32_t val; 1733 int i; 1734 1735 /* Disable RX MAC and WOL */ 1736 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1737 1738 /* 1739 * Clear all WOL related registers 1740 */ 1741 for (i = 0; i < 3; ++i) 1742 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1743 for (i = 0; i < 20; ++i) 1744 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1745 1746 /* 1747 * Set WOL source address. XXX is this necessary? 1748 */ 1749 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1750 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1751 val = (eaddr[0] << 8) | eaddr[1]; 1752 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1753 1754 /* Clear packet filters */ 1755 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1756 1757 /* No ucast filtering */ 1758 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1759 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1760 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1761 1762 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) { 1763 /* 1764 * In order to transmit jumbo packets greater than 1765 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between 1766 * RX MAC and RX DMA needs to be reduced in size to 1767 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In 1768 * order to implement this, we must use "cut through" 1769 * mode in the RX MAC, which chops packets down into 1770 * segments. In this case we selected 256 bytes, 1771 * since this is the size of the PCI-Express TLP's 1772 * that the ET1310 uses. 1773 */ 1774 val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) | 1775 ET_RXMAC_MC_SEGSZ_ENABLE; 1776 } else { 1777 val = 0; 1778 } 1779 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1780 1781 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1782 1783 /* Initialize RX MAC management register */ 1784 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1785 1786 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1787 1788 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1789 ET_RXMAC_MGT_PASS_ECRC | 1790 ET_RXMAC_MGT_PASS_ELEN | 1791 ET_RXMAC_MGT_PASS_ETRUNC | 1792 ET_RXMAC_MGT_CHECK_PKT); 1793 1794 /* 1795 * Configure runt filtering (may not work on certain chip generation) 1796 */ 1797 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1798 CSR_WRITE_4(sc, ET_PKTFILT, val); 1799 1800 /* Enable RX MAC but leave WOL disabled */ 1801 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1802 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1803 1804 /* 1805 * Setup multicast hash and allmulti/promisc mode 1806 */ 1807 et_setmulti(sc); 1808 } 1809 1810 static void 1811 et_init_txmac(struct et_softc *sc) 1812 { 1813 /* Disable TX MAC and FC(?) */ 1814 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1815 1816 /* No flow control yet */ 1817 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1818 1819 /* Enable TX MAC but leave FC(?) diabled */ 1820 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1821 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1822 } 1823 1824 static int 1825 et_start_rxdma(struct et_softc *sc) 1826 { 1827 uint32_t val = 0; 1828 1829 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1830 ET_RXDMA_CTRL_RING0_SIZE) | 1831 ET_RXDMA_CTRL_RING0_ENABLE; 1832 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1833 ET_RXDMA_CTRL_RING1_SIZE) | 1834 ET_RXDMA_CTRL_RING1_ENABLE; 1835 1836 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1837 1838 DELAY(5); 1839 1840 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1841 if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n"); 1842 return ETIMEDOUT; 1843 } 1844 return 0; 1845 } 1846 1847 static int 1848 et_start_txdma(struct et_softc *sc) 1849 { 1850 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1851 return 0; 1852 } 1853 1854 static int 1855 et_enable_txrx(struct et_softc *sc, int media_upd) 1856 { 1857 struct ifnet *ifp = &sc->arpcom.ac_if; 1858 uint32_t val; 1859 int i, error; 1860 1861 val = CSR_READ_4(sc, ET_MAC_CFG1); 1862 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1863 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1864 ET_MAC_CFG1_LOOPBACK); 1865 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1866 1867 if (media_upd) 1868 et_ifmedia_upd(ifp); 1869 else 1870 et_setmedia(sc); 1871 1872 #define NRETRY 100 1873 1874 for (i = 0; i < NRETRY; ++i) { 1875 val = CSR_READ_4(sc, ET_MAC_CFG1); 1876 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1877 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1878 break; 1879 1880 DELAY(10); 1881 } 1882 if (i == NRETRY) { 1883 if_printf(ifp, "can't enable RX/TX\n"); 1884 return 0; 1885 } 1886 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 1887 1888 #undef NRETRY 1889 1890 /* 1891 * Start TX/RX DMA engine 1892 */ 1893 error = et_start_rxdma(sc); 1894 if (error) 1895 return error; 1896 1897 error = et_start_txdma(sc); 1898 if (error) 1899 return error; 1900 1901 return 0; 1902 } 1903 1904 static void 1905 et_rxeof(struct et_softc *sc) 1906 { 1907 struct ifnet *ifp = &sc->arpcom.ac_if; 1908 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1909 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1910 uint32_t rxs_stat_ring; 1911 int rxst_wrap, rxst_index; 1912 #ifdef ETHER_INPUT_CHAIN 1913 struct mbuf_chain chain[MAXCPU]; 1914 #endif 1915 1916 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1917 return; 1918 1919 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1920 BUS_DMASYNC_POSTREAD); 1921 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1922 BUS_DMASYNC_POSTREAD); 1923 1924 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1925 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1926 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1927 1928 #ifdef ETHER_INPUT_CHAIN 1929 ether_input_chain_init(chain); 1930 #endif 1931 1932 while (rxst_index != rxst_ring->rsr_index || 1933 rxst_wrap != rxst_ring->rsr_wrap) { 1934 struct et_rxbuf_data *rbd; 1935 struct et_rxdesc_ring *rx_ring; 1936 struct et_rxstat *st; 1937 struct mbuf *m; 1938 int buflen, buf_idx, ring_idx; 1939 uint32_t rxstat_pos, rxring_pos; 1940 1941 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1942 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1943 1944 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1945 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1946 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1947 1948 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1949 rxst_ring->rsr_index = 0; 1950 rxst_ring->rsr_wrap ^= 1; 1951 } 1952 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1953 ET_RXSTAT_POS_INDEX); 1954 if (rxst_ring->rsr_wrap) 1955 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1956 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1957 1958 if (ring_idx >= ET_RX_NRING) { 1959 ifp->if_ierrors++; 1960 if_printf(ifp, "invalid ring index %d\n", ring_idx); 1961 continue; 1962 } 1963 if (buf_idx >= ET_RX_NDESC) { 1964 ifp->if_ierrors++; 1965 if_printf(ifp, "invalid buf index %d\n", buf_idx); 1966 continue; 1967 } 1968 1969 rbd = &sc->sc_rx_data[ring_idx]; 1970 m = rbd->rbd_buf[buf_idx].rb_mbuf; 1971 1972 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1973 if (buflen < ETHER_CRC_LEN) { 1974 m_freem(m); 1975 ifp->if_ierrors++; 1976 } else { 1977 m->m_pkthdr.len = m->m_len = buflen; 1978 m->m_pkthdr.rcvif = ifp; 1979 1980 m_adj(m, -ETHER_CRC_LEN); 1981 1982 ifp->if_ipackets++; 1983 #ifdef ETHER_INPUT_CHAIN 1984 ether_input_chain2(ifp, m, chain); 1985 #else 1986 ifp->if_input(ifp, m); 1987 #endif 1988 } 1989 } else { 1990 ifp->if_ierrors++; 1991 } 1992 m = NULL; /* Catch invalid reference */ 1993 1994 rx_ring = &sc->sc_rx_ring[ring_idx]; 1995 1996 if (buf_idx != rx_ring->rr_index) { 1997 if_printf(ifp, "WARNING!! ring %d, " 1998 "buf_idx %d, rr_idx %d\n", 1999 ring_idx, buf_idx, rx_ring->rr_index); 2000 } 2001 2002 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 2003 if (++rx_ring->rr_index == ET_RX_NDESC) { 2004 rx_ring->rr_index = 0; 2005 rx_ring->rr_wrap ^= 1; 2006 } 2007 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 2008 if (rx_ring->rr_wrap) 2009 rxring_pos |= ET_RX_RING_POS_WRAP; 2010 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 2011 } 2012 2013 #ifdef ETHER_INPUT_CHAIN 2014 ether_input_dispatch(chain); 2015 #endif 2016 } 2017 2018 static int 2019 et_encap(struct et_softc *sc, struct mbuf **m0) 2020 { 2021 struct mbuf *m = *m0; 2022 bus_dma_segment_t segs[ET_NSEG_MAX]; 2023 struct et_dmamap_ctx ctx; 2024 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 2025 struct et_txbuf_data *tbd = &sc->sc_tx_data; 2026 struct et_txdesc *td; 2027 bus_dmamap_t map; 2028 int error, maxsegs, first_idx, last_idx, i; 2029 uint32_t tx_ready_pos, last_td_ctrl2; 2030 2031 maxsegs = ET_TX_NDESC - tbd->tbd_used; 2032 if (maxsegs > ET_NSEG_MAX) 2033 maxsegs = ET_NSEG_MAX; 2034 KASSERT(maxsegs >= ET_NSEG_SPARE, 2035 ("not enough spare TX desc (%d)\n", maxsegs)); 2036 2037 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 2038 first_idx = tx_ring->tr_ready_index; 2039 map = tbd->tbd_buf[first_idx].tb_dmap; 2040 2041 ctx.nsegs = maxsegs; 2042 ctx.segs = segs; 2043 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m, 2044 et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT); 2045 if (!error && ctx.nsegs == 0) { 2046 bus_dmamap_unload(sc->sc_mbuf_dtag, map); 2047 error = EFBIG; 2048 } 2049 if (error && error != EFBIG) { 2050 if_printf(&sc->arpcom.ac_if, "can't load TX mbuf, error %d\n", 2051 error); 2052 goto back; 2053 } 2054 if (error) { /* error == EFBIG */ 2055 struct mbuf *m_new; 2056 2057 m_new = m_defrag(m, MB_DONTWAIT); 2058 if (m_new == NULL) { 2059 if_printf(&sc->arpcom.ac_if, "can't defrag TX mbuf\n"); 2060 error = ENOBUFS; 2061 goto back; 2062 } else { 2063 *m0 = m = m_new; 2064 } 2065 2066 ctx.nsegs = maxsegs; 2067 ctx.segs = segs; 2068 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m, 2069 et_dma_buf_addr, &ctx, 2070 BUS_DMA_NOWAIT); 2071 if (error || ctx.nsegs == 0) { 2072 if (ctx.nsegs == 0) { 2073 bus_dmamap_unload(sc->sc_mbuf_dtag, map); 2074 error = EFBIG; 2075 } 2076 if_printf(&sc->arpcom.ac_if, 2077 "can't load defraged TX mbuf\n"); 2078 goto back; 2079 } 2080 } 2081 2082 bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE); 2083 2084 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 2085 sc->sc_tx += ctx.nsegs; 2086 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 2087 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 2088 last_td_ctrl2 |= ET_TDCTRL2_INTR; 2089 } 2090 2091 last_idx = -1; 2092 for (i = 0; i < ctx.nsegs; ++i) { 2093 int idx; 2094 2095 idx = (first_idx + i) % ET_TX_NDESC; 2096 td = &tx_ring->tr_desc[idx]; 2097 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr); 2098 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr); 2099 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN); 2100 2101 if (i == ctx.nsegs - 1) { /* Last frag */ 2102 td->td_ctrl2 = last_td_ctrl2; 2103 last_idx = idx; 2104 } 2105 2106 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 2107 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 2108 tx_ring->tr_ready_index = 0; 2109 tx_ring->tr_ready_wrap ^= 1; 2110 } 2111 } 2112 td = &tx_ring->tr_desc[first_idx]; 2113 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 2114 2115 KKASSERT(last_idx >= 0); 2116 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 2117 tbd->tbd_buf[last_idx].tb_dmap = map; 2118 tbd->tbd_buf[last_idx].tb_mbuf = m; 2119 2120 tbd->tbd_used += ctx.nsegs; 2121 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 2122 2123 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2124 BUS_DMASYNC_PREWRITE); 2125 2126 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 2127 ET_TX_READY_POS_INDEX); 2128 if (tx_ring->tr_ready_wrap) 2129 tx_ready_pos |= ET_TX_READY_POS_WRAP; 2130 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 2131 2132 error = 0; 2133 back: 2134 if (error) { 2135 m_freem(m); 2136 *m0 = NULL; 2137 } 2138 return error; 2139 } 2140 2141 static void 2142 et_txeof(struct et_softc *sc, int start) 2143 { 2144 struct ifnet *ifp = &sc->arpcom.ac_if; 2145 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 2146 struct et_txbuf_data *tbd = &sc->sc_tx_data; 2147 uint32_t tx_done; 2148 int end, wrap; 2149 2150 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2151 return; 2152 2153 if (tbd->tbd_used == 0) 2154 return; 2155 2156 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2157 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 2158 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2159 2160 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2161 struct et_txbuf *tb; 2162 2163 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 2164 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2165 2166 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2167 sizeof(struct et_txdesc)); 2168 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2169 BUS_DMASYNC_PREWRITE); 2170 2171 if (tb->tb_mbuf != NULL) { 2172 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 2173 m_freem(tb->tb_mbuf); 2174 tb->tb_mbuf = NULL; 2175 ifp->if_opackets++; 2176 } 2177 2178 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2179 tbd->tbd_start_index = 0; 2180 tbd->tbd_start_wrap ^= 1; 2181 } 2182 2183 KKASSERT(tbd->tbd_used > 0); 2184 tbd->tbd_used--; 2185 } 2186 2187 if (tbd->tbd_used == 0) 2188 ifp->if_timer = 0; 2189 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2190 ifp->if_flags &= ~IFF_OACTIVE; 2191 2192 if (start) 2193 if_devstart(ifp); 2194 } 2195 2196 static void 2197 et_tick(void *xsc) 2198 { 2199 struct et_softc *sc = xsc; 2200 struct ifnet *ifp = &sc->arpcom.ac_if; 2201 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2202 2203 lwkt_serialize_enter(ifp->if_serializer); 2204 2205 mii_tick(mii); 2206 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 && 2207 (mii->mii_media_status & IFM_ACTIVE) && 2208 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2209 if_printf(ifp, "Link up, enable TX/RX\n"); 2210 if (et_enable_txrx(sc, 0) == 0) 2211 if_devstart(ifp); 2212 } 2213 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2214 2215 lwkt_serialize_exit(ifp->if_serializer); 2216 } 2217 2218 static int 2219 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2220 { 2221 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2222 } 2223 2224 static int 2225 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2226 { 2227 return et_newbuf(rbd, buf_idx, init, MHLEN); 2228 } 2229 2230 static int 2231 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2232 { 2233 struct et_softc *sc = rbd->rbd_softc; 2234 struct et_rxbuf *rb; 2235 struct mbuf *m; 2236 struct et_dmamap_ctx ctx; 2237 bus_dma_segment_t seg; 2238 bus_dmamap_t dmap; 2239 int error, len; 2240 2241 KASSERT(!rbd->rbd_jumbo, ("calling %s with jumbo ring\n", __func__)); 2242 2243 KKASSERT(buf_idx < ET_RX_NDESC); 2244 rb = &rbd->rbd_buf[buf_idx]; 2245 2246 m = m_getl(len0, init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR, &len); 2247 if (m == NULL) { 2248 error = ENOBUFS; 2249 2250 if (init) { 2251 if_printf(&sc->arpcom.ac_if, 2252 "m_getl failed, size %d\n", len0); 2253 return error; 2254 } else { 2255 goto back; 2256 } 2257 } 2258 m->m_len = m->m_pkthdr.len = len; 2259 2260 /* 2261 * Try load RX mbuf into temporary DMA tag 2262 */ 2263 ctx.nsegs = 1; 2264 ctx.segs = &seg; 2265 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m, 2266 et_dma_buf_addr, &ctx, 2267 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2268 if (error || ctx.nsegs == 0) { 2269 if (!error) { 2270 bus_dmamap_unload(sc->sc_mbuf_dtag, 2271 sc->sc_mbuf_tmp_dmap); 2272 error = EFBIG; 2273 if_printf(&sc->arpcom.ac_if, "too many segments?!\n"); 2274 } 2275 m_freem(m); 2276 2277 if (init) { 2278 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2279 return error; 2280 } else { 2281 goto back; 2282 } 2283 } 2284 2285 if (!init) { 2286 bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap, 2287 BUS_DMASYNC_POSTREAD); 2288 bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap); 2289 } 2290 rb->rb_mbuf = m; 2291 rb->rb_paddr = seg.ds_addr; 2292 2293 /* 2294 * Swap RX buf's DMA map with the loaded temporary one 2295 */ 2296 dmap = rb->rb_dmap; 2297 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2298 sc->sc_mbuf_tmp_dmap = dmap; 2299 2300 error = 0; 2301 back: 2302 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2303 return error; 2304 } 2305 2306 static int 2307 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2308 { 2309 struct et_softc *sc = arg1; 2310 struct ifnet *ifp = &sc->arpcom.ac_if; 2311 int error = 0, v; 2312 2313 lwkt_serialize_enter(ifp->if_serializer); 2314 2315 v = sc->sc_rx_intr_npkts; 2316 error = sysctl_handle_int(oidp, &v, 0, req); 2317 if (error || req->newptr == NULL) 2318 goto back; 2319 if (v <= 0) { 2320 error = EINVAL; 2321 goto back; 2322 } 2323 2324 if (sc->sc_rx_intr_npkts != v) { 2325 if (ifp->if_flags & IFF_RUNNING) 2326 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2327 sc->sc_rx_intr_npkts = v; 2328 } 2329 back: 2330 lwkt_serialize_exit(ifp->if_serializer); 2331 return error; 2332 } 2333 2334 static int 2335 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2336 { 2337 struct et_softc *sc = arg1; 2338 struct ifnet *ifp = &sc->arpcom.ac_if; 2339 int error = 0, v; 2340 2341 lwkt_serialize_enter(ifp->if_serializer); 2342 2343 v = sc->sc_rx_intr_delay; 2344 error = sysctl_handle_int(oidp, &v, 0, req); 2345 if (error || req->newptr == NULL) 2346 goto back; 2347 if (v <= 0) { 2348 error = EINVAL; 2349 goto back; 2350 } 2351 2352 if (sc->sc_rx_intr_delay != v) { 2353 if (ifp->if_flags & IFF_RUNNING) 2354 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2355 sc->sc_rx_intr_delay = v; 2356 } 2357 back: 2358 lwkt_serialize_exit(ifp->if_serializer); 2359 return error; 2360 } 2361 2362 static void 2363 et_setmedia(struct et_softc *sc) 2364 { 2365 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2366 uint32_t cfg2, ctrl; 2367 2368 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 2369 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 2370 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 2371 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 2372 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 2373 2374 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 2375 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 2376 2377 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 2378 cfg2 |= ET_MAC_CFG2_MODE_GMII; 2379 } else { 2380 cfg2 |= ET_MAC_CFG2_MODE_MII; 2381 ctrl |= ET_MAC_CTRL_MODE_MII; 2382 } 2383 2384 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 2385 cfg2 |= ET_MAC_CFG2_FDX; 2386 else 2387 ctrl |= ET_MAC_CTRL_GHDX; 2388 2389 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 2390 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 2391 } 2392 2393 static int 2394 et_jumbo_mem_alloc(device_t dev) 2395 { 2396 struct et_softc *sc = device_get_softc(dev); 2397 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2398 bus_addr_t paddr; 2399 uint8_t *buf; 2400 int error, i; 2401 2402 error = et_dma_mem_create(dev, ET_JUMBO_MEM_SIZE, &jd->jd_dtag, 2403 &jd->jd_buf, &paddr, &jd->jd_dmap); 2404 if (error) { 2405 device_printf(dev, "can't create jumbo DMA stuffs\n"); 2406 return error; 2407 } 2408 2409 jd->jd_slots = kmalloc(sizeof(*jd->jd_slots) * ET_JSLOTS, M_DEVBUF, 2410 M_WAITOK | M_ZERO); 2411 lwkt_serialize_init(&jd->jd_serializer); 2412 SLIST_INIT(&jd->jd_free_slots); 2413 2414 buf = jd->jd_buf; 2415 for (i = 0; i < ET_JSLOTS; ++i) { 2416 struct et_jslot *jslot = &jd->jd_slots[i]; 2417 2418 jslot->jslot_data = jd; 2419 jslot->jslot_buf = buf; 2420 jslot->jslot_paddr = paddr; 2421 jslot->jslot_inuse = 0; 2422 jslot->jslot_index = i; 2423 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, jslot_link); 2424 2425 buf += ET_JLEN; 2426 paddr += ET_JLEN; 2427 } 2428 return 0; 2429 } 2430 2431 static void 2432 et_jumbo_mem_free(device_t dev) 2433 { 2434 struct et_softc *sc = device_get_softc(dev); 2435 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2436 2437 KKASSERT(sc->sc_flags & ET_FLAG_JUMBO); 2438 2439 kfree(jd->jd_slots, M_DEVBUF); 2440 et_dma_mem_destroy(jd->jd_dtag, jd->jd_buf, jd->jd_dmap); 2441 } 2442 2443 static struct et_jslot * 2444 et_jalloc(struct et_jumbo_data *jd) 2445 { 2446 struct et_jslot *jslot; 2447 2448 lwkt_serialize_enter(&jd->jd_serializer); 2449 2450 jslot = SLIST_FIRST(&jd->jd_free_slots); 2451 if (jslot) { 2452 SLIST_REMOVE_HEAD(&jd->jd_free_slots, jslot_link); 2453 jslot->jslot_inuse = 1; 2454 } 2455 2456 lwkt_serialize_exit(&jd->jd_serializer); 2457 return jslot; 2458 } 2459 2460 static void 2461 et_jfree(void *xjslot) 2462 { 2463 struct et_jslot *jslot = xjslot; 2464 struct et_jumbo_data *jd = jslot->jslot_data; 2465 2466 if (&jd->jd_slots[jslot->jslot_index] != jslot) { 2467 panic("%s wrong jslot!?\n", __func__); 2468 } else if (jslot->jslot_inuse == 0) { 2469 panic("%s jslot already freed\n", __func__); 2470 } else { 2471 lwkt_serialize_enter(&jd->jd_serializer); 2472 2473 atomic_subtract_int(&jslot->jslot_inuse, 1); 2474 if (jslot->jslot_inuse == 0) { 2475 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, 2476 jslot_link); 2477 } 2478 2479 lwkt_serialize_exit(&jd->jd_serializer); 2480 } 2481 } 2482 2483 static void 2484 et_jref(void *xjslot) 2485 { 2486 struct et_jslot *jslot = xjslot; 2487 struct et_jumbo_data *jd = jslot->jslot_data; 2488 2489 if (&jd->jd_slots[jslot->jslot_index] != jslot) 2490 panic("%s wrong jslot!?\n", __func__); 2491 else if (jslot->jslot_inuse == 0) 2492 panic("%s jslot already freed\n", __func__); 2493 else 2494 atomic_add_int(&jslot->jslot_inuse, 1); 2495 } 2496 2497 static int 2498 et_newbuf_jumbo(struct et_rxbuf_data *rbd, int buf_idx, int init) 2499 { 2500 struct et_softc *sc = rbd->rbd_softc; 2501 struct et_rxbuf *rb; 2502 struct mbuf *m; 2503 struct et_jslot *jslot; 2504 int error; 2505 2506 KASSERT(rbd->rbd_jumbo, ("calling %s with non-jumbo ring\n", __func__)); 2507 2508 KKASSERT(buf_idx < ET_RX_NDESC); 2509 rb = &rbd->rbd_buf[buf_idx]; 2510 2511 error = ENOBUFS; 2512 2513 MGETHDR(m, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2514 if (m == NULL) { 2515 if (init) { 2516 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n"); 2517 return error; 2518 } else { 2519 goto back; 2520 } 2521 } 2522 2523 jslot = et_jalloc(&sc->sc_jumbo_data); 2524 if (jslot == NULL) { 2525 m_freem(m); 2526 2527 if (init) { 2528 if_printf(&sc->arpcom.ac_if, 2529 "jslot allocation failed\n"); 2530 return error; 2531 } else { 2532 goto back; 2533 } 2534 } 2535 2536 m->m_ext.ext_arg = jslot; 2537 m->m_ext.ext_buf = jslot->jslot_buf; 2538 m->m_ext.ext_free = et_jfree; 2539 m->m_ext.ext_ref = et_jref; 2540 m->m_ext.ext_size = ET_JUMBO_FRAMELEN; 2541 m->m_flags |= M_EXT; 2542 m->m_data = m->m_ext.ext_buf; 2543 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2544 2545 rb->rb_mbuf = m; 2546 rb->rb_paddr = jslot->jslot_paddr; 2547 2548 error = 0; 2549 back: 2550 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2551 return error; 2552 } 2553 2554 static void 2555 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr) 2556 { 2557 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring; 2558 struct et_rxdesc *desc; 2559 2560 KKASSERT(buf_idx < ET_RX_NDESC); 2561 desc = &rx_ring->rr_desc[buf_idx]; 2562 2563 desc->rd_addr_hi = ET_ADDR_HI(paddr); 2564 desc->rd_addr_lo = ET_ADDR_LO(paddr); 2565 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2566 2567 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap, 2568 BUS_DMASYNC_PREWRITE); 2569 } 2570