1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.17 2008/09/17 08:51:29 sephe Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/bitops.h> 39 #include <sys/endian.h> 40 #include <sys/kernel.h> 41 #include <sys/bus.h> 42 #include <sys/interrupt.h> 43 #include <sys/malloc.h> 44 #include <sys/proc.h> 45 #include <sys/rman.h> 46 #include <sys/serialize.h> 47 #include <sys/socket.h> 48 #include <sys/sockio.h> 49 #include <sys/sysctl.h> 50 51 #include <net/ethernet.h> 52 #include <net/if.h> 53 #include <net/bpf.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/vlan/if_vlan_var.h> 59 60 #include <dev/netif/mii_layer/miivar.h> 61 62 #include <bus/pci/pcireg.h> 63 #include <bus/pci/pcivar.h> 64 #include <bus/pci/pcidevs.h> 65 66 #include <dev/netif/et/if_etreg.h> 67 #include <dev/netif/et/if_etvar.h> 68 69 #include "miibus_if.h" 70 71 static int et_probe(device_t); 72 static int et_attach(device_t); 73 static int et_detach(device_t); 74 static int et_shutdown(device_t); 75 76 static int et_miibus_readreg(device_t, int, int); 77 static int et_miibus_writereg(device_t, int, int, int); 78 static void et_miibus_statchg(device_t); 79 80 static void et_init(void *); 81 static int et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 82 static void et_start(struct ifnet *); 83 static void et_watchdog(struct ifnet *); 84 static int et_ifmedia_upd(struct ifnet *); 85 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 86 87 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 88 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 89 90 static void et_intr(void *); 91 static void et_enable_intrs(struct et_softc *, uint32_t); 92 static void et_disable_intrs(struct et_softc *); 93 static void et_rxeof(struct et_softc *); 94 static void et_txeof(struct et_softc *, int); 95 96 static int et_dma_alloc(device_t); 97 static void et_dma_free(device_t); 98 static int et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *, 99 void **, bus_addr_t *, bus_dmamap_t *); 100 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 101 static int et_dma_mbuf_create(device_t); 102 static void et_dma_mbuf_destroy(device_t, int, const int[]); 103 static int et_jumbo_mem_alloc(device_t); 104 static void et_jumbo_mem_free(device_t); 105 static void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int); 106 static void et_dma_buf_addr(void *, bus_dma_segment_t *, int, 107 bus_size_t, int); 108 static int et_init_tx_ring(struct et_softc *); 109 static int et_init_rx_ring(struct et_softc *); 110 static void et_free_tx_ring(struct et_softc *); 111 static void et_free_rx_ring(struct et_softc *); 112 static int et_encap(struct et_softc *, struct mbuf **); 113 static struct et_jslot * 114 et_jalloc(struct et_jumbo_data *); 115 static void et_jfree(void *); 116 static void et_jref(void *); 117 static int et_newbuf(struct et_rxbuf_data *, int, int, int); 118 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 119 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 120 static int et_newbuf_jumbo(struct et_rxbuf_data *, int, int); 121 122 static void et_stop(struct et_softc *); 123 static int et_chip_init(struct et_softc *); 124 static void et_chip_attach(struct et_softc *); 125 static void et_init_mac(struct et_softc *); 126 static void et_init_rxmac(struct et_softc *); 127 static void et_init_txmac(struct et_softc *); 128 static int et_init_rxdma(struct et_softc *); 129 static int et_init_txdma(struct et_softc *); 130 static int et_start_rxdma(struct et_softc *); 131 static int et_start_txdma(struct et_softc *); 132 static int et_stop_rxdma(struct et_softc *); 133 static int et_stop_txdma(struct et_softc *); 134 static int et_enable_txrx(struct et_softc *, int); 135 static void et_reset(struct et_softc *); 136 static int et_bus_config(device_t); 137 static void et_get_eaddr(device_t, uint8_t[]); 138 static void et_setmulti(struct et_softc *); 139 static void et_tick(void *); 140 static void et_setmedia(struct et_softc *); 141 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t); 142 143 static const struct et_dev { 144 uint16_t vid; 145 uint16_t did; 146 const char *desc; 147 } et_devices[] = { 148 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 149 "Agere ET1310 Gigabit Ethernet" }, 150 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 151 "Agere ET1310 Fast Ethernet" }, 152 { 0, 0, NULL } 153 }; 154 155 static device_method_t et_methods[] = { 156 DEVMETHOD(device_probe, et_probe), 157 DEVMETHOD(device_attach, et_attach), 158 DEVMETHOD(device_detach, et_detach), 159 DEVMETHOD(device_shutdown, et_shutdown), 160 #if 0 161 DEVMETHOD(device_suspend, et_suspend), 162 DEVMETHOD(device_resume, et_resume), 163 #endif 164 165 DEVMETHOD(bus_print_child, bus_generic_print_child), 166 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 167 168 DEVMETHOD(miibus_readreg, et_miibus_readreg), 169 DEVMETHOD(miibus_writereg, et_miibus_writereg), 170 DEVMETHOD(miibus_statchg, et_miibus_statchg), 171 172 { 0, 0 } 173 }; 174 175 static driver_t et_driver = { 176 "et", 177 et_methods, 178 sizeof(struct et_softc) 179 }; 180 181 static devclass_t et_devclass; 182 183 DECLARE_DUMMY_MODULE(if_et); 184 MODULE_DEPEND(if_et, miibus, 1, 1, 1); 185 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, 0, 0); 186 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0); 187 188 static int et_rx_intr_npkts = 129; 189 static int et_rx_intr_delay = 25; /* x4 usec */ 190 static int et_tx_intr_nsegs = 256; 191 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 192 193 TUNABLE_INT("hw.et.timer", &et_timer); 194 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 195 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay); 196 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs); 197 198 struct et_bsize { 199 int bufsize; 200 int jumbo; 201 et_newbuf_t newbuf; 202 }; 203 204 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = { 205 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 206 .newbuf = et_newbuf_hdr }, 207 { .bufsize = ET_RXDMA_CTRL_RING1_2048, .jumbo = 0, 208 .newbuf = et_newbuf_cluster }, 209 }; 210 211 static const struct et_bsize et_bufsize_jumbo[ET_RX_NRING] = { 212 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 213 .newbuf = et_newbuf_hdr }, 214 { .bufsize = ET_RXDMA_CTRL_RING1_16384, .jumbo = 1, 215 .newbuf = et_newbuf_jumbo }, 216 }; 217 218 static int 219 et_probe(device_t dev) 220 { 221 const struct et_dev *d; 222 uint16_t did, vid; 223 224 vid = pci_get_vendor(dev); 225 did = pci_get_device(dev); 226 227 for (d = et_devices; d->desc != NULL; ++d) { 228 if (vid == d->vid && did == d->did) { 229 device_set_desc(dev, d->desc); 230 return 0; 231 } 232 } 233 return ENXIO; 234 } 235 236 static int 237 et_attach(device_t dev) 238 { 239 struct et_softc *sc = device_get_softc(dev); 240 struct ifnet *ifp = &sc->arpcom.ac_if; 241 uint8_t eaddr[ETHER_ADDR_LEN]; 242 int error; 243 244 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 245 callout_init(&sc->sc_tick); 246 247 /* 248 * Initialize tunables 249 */ 250 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 251 sc->sc_rx_intr_delay = et_rx_intr_delay; 252 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 253 sc->sc_timer = et_timer; 254 255 #ifndef BURN_BRIDGES 256 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 257 uint32_t irq, mem; 258 259 irq = pci_read_config(dev, PCIR_INTLINE, 4); 260 mem = pci_read_config(dev, ET_PCIR_BAR, 4); 261 262 device_printf(dev, "chip is in D%d power mode " 263 "-- setting to D0\n", pci_get_powerstate(dev)); 264 265 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 266 267 pci_write_config(dev, PCIR_INTLINE, irq, 4); 268 pci_write_config(dev, ET_PCIR_BAR, mem, 4); 269 } 270 #endif /* !BURN_BRIDGE */ 271 272 /* Enable bus mastering */ 273 pci_enable_busmaster(dev); 274 275 /* 276 * Allocate IO memory 277 */ 278 sc->sc_mem_rid = ET_PCIR_BAR; 279 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 280 &sc->sc_mem_rid, RF_ACTIVE); 281 if (sc->sc_mem_res == NULL) { 282 device_printf(dev, "can't allocate IO memory\n"); 283 return ENXIO; 284 } 285 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res); 286 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res); 287 288 /* 289 * Allocate IRQ 290 */ 291 sc->sc_irq_rid = 0; 292 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 293 &sc->sc_irq_rid, 294 RF_SHAREABLE | RF_ACTIVE); 295 if (sc->sc_irq_res == NULL) { 296 device_printf(dev, "can't allocate irq\n"); 297 error = ENXIO; 298 goto fail; 299 } 300 301 /* 302 * Create sysctl tree 303 */ 304 sysctl_ctx_init(&sc->sc_sysctl_ctx); 305 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 306 SYSCTL_STATIC_CHILDREN(_hw), 307 OID_AUTO, 308 device_get_nameunit(dev), 309 CTLFLAG_RD, 0, ""); 310 if (sc->sc_sysctl_tree == NULL) { 311 device_printf(dev, "can't add sysctl node\n"); 312 error = ENXIO; 313 goto fail; 314 } 315 316 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 317 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 318 OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW, 319 sc, 0, et_sysctl_rx_intr_npkts, "I", 320 "RX IM, # packets per RX interrupt"); 321 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 322 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 323 OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW, 324 sc, 0, et_sysctl_rx_intr_delay, "I", 325 "RX IM, RX interrupt delay (x10 usec)"); 326 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 327 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 328 "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 329 "TX IM, # segments per TX interrupt"); 330 SYSCTL_ADD_UINT(&sc->sc_sysctl_ctx, 331 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 332 "timer", CTLFLAG_RW, &sc->sc_timer, 0, 333 "TX timer"); 334 335 error = et_bus_config(dev); 336 if (error) 337 goto fail; 338 339 et_get_eaddr(dev, eaddr); 340 341 CSR_WRITE_4(sc, ET_PM, 342 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 343 344 et_reset(sc); 345 346 et_disable_intrs(sc); 347 348 error = et_dma_alloc(dev); 349 if (error) 350 goto fail; 351 352 ifp->if_softc = sc; 353 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 354 ifp->if_init = et_init; 355 ifp->if_ioctl = et_ioctl; 356 ifp->if_start = et_start; 357 ifp->if_watchdog = et_watchdog; 358 ifp->if_mtu = ETHERMTU; 359 ifp->if_capabilities = IFCAP_VLAN_MTU; 360 ifp->if_capenable = ifp->if_capabilities; 361 ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC); 362 ifq_set_ready(&ifp->if_snd); 363 364 et_chip_attach(sc); 365 366 error = mii_phy_probe(dev, &sc->sc_miibus, 367 et_ifmedia_upd, et_ifmedia_sts); 368 if (error) { 369 device_printf(dev, "can't probe any PHY\n"); 370 goto fail; 371 } 372 373 ether_ifattach(ifp, eaddr, NULL); 374 375 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc, 376 &sc->sc_irq_handle, ifp->if_serializer); 377 if (error) { 378 ether_ifdetach(ifp); 379 device_printf(dev, "can't setup intr\n"); 380 goto fail; 381 } 382 383 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq_res)); 384 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 385 386 return 0; 387 fail: 388 et_detach(dev); 389 return error; 390 } 391 392 static int 393 et_detach(device_t dev) 394 { 395 struct et_softc *sc = device_get_softc(dev); 396 397 if (device_is_attached(dev)) { 398 struct ifnet *ifp = &sc->arpcom.ac_if; 399 400 lwkt_serialize_enter(ifp->if_serializer); 401 et_stop(sc); 402 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 403 lwkt_serialize_exit(ifp->if_serializer); 404 405 ether_ifdetach(ifp); 406 } 407 408 if (sc->sc_sysctl_tree != NULL) 409 sysctl_ctx_free(&sc->sc_sysctl_ctx); 410 411 if (sc->sc_miibus != NULL) 412 device_delete_child(dev, sc->sc_miibus); 413 bus_generic_detach(dev); 414 415 if (sc->sc_irq_res != NULL) { 416 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 417 sc->sc_irq_res); 418 } 419 420 if (sc->sc_mem_res != NULL) { 421 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 422 sc->sc_mem_res); 423 } 424 425 et_dma_free(dev); 426 427 return 0; 428 } 429 430 static int 431 et_shutdown(device_t dev) 432 { 433 struct et_softc *sc = device_get_softc(dev); 434 struct ifnet *ifp = &sc->arpcom.ac_if; 435 436 lwkt_serialize_enter(ifp->if_serializer); 437 et_stop(sc); 438 lwkt_serialize_exit(ifp->if_serializer); 439 return 0; 440 } 441 442 static int 443 et_miibus_readreg(device_t dev, int phy, int reg) 444 { 445 struct et_softc *sc = device_get_softc(dev); 446 uint32_t val; 447 int i, ret; 448 449 /* Stop any pending operations */ 450 CSR_WRITE_4(sc, ET_MII_CMD, 0); 451 452 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 453 __SHIFTIN(reg, ET_MII_ADDR_REG); 454 CSR_WRITE_4(sc, ET_MII_ADDR, val); 455 456 /* Start reading */ 457 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 458 459 #define NRETRY 50 460 461 for (i = 0; i < NRETRY; ++i) { 462 val = CSR_READ_4(sc, ET_MII_IND); 463 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 464 break; 465 DELAY(50); 466 } 467 if (i == NRETRY) { 468 if_printf(&sc->arpcom.ac_if, 469 "read phy %d, reg %d timed out\n", phy, reg); 470 ret = 0; 471 goto back; 472 } 473 474 #undef NRETRY 475 476 val = CSR_READ_4(sc, ET_MII_STAT); 477 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 478 479 back: 480 /* Make sure that the current operation is stopped */ 481 CSR_WRITE_4(sc, ET_MII_CMD, 0); 482 return ret; 483 } 484 485 static int 486 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 487 { 488 struct et_softc *sc = device_get_softc(dev); 489 uint32_t val; 490 int i; 491 492 /* Stop any pending operations */ 493 CSR_WRITE_4(sc, ET_MII_CMD, 0); 494 495 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 496 __SHIFTIN(reg, ET_MII_ADDR_REG); 497 CSR_WRITE_4(sc, ET_MII_ADDR, val); 498 499 /* Start writing */ 500 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 501 502 #define NRETRY 100 503 504 for (i = 0; i < NRETRY; ++i) { 505 val = CSR_READ_4(sc, ET_MII_IND); 506 if ((val & ET_MII_IND_BUSY) == 0) 507 break; 508 DELAY(50); 509 } 510 if (i == NRETRY) { 511 if_printf(&sc->arpcom.ac_if, 512 "write phy %d, reg %d timed out\n", phy, reg); 513 et_miibus_readreg(dev, phy, reg); 514 } 515 516 #undef NRETRY 517 518 /* Make sure that the current operation is stopped */ 519 CSR_WRITE_4(sc, ET_MII_CMD, 0); 520 return 0; 521 } 522 523 static void 524 et_miibus_statchg(device_t dev) 525 { 526 et_setmedia(device_get_softc(dev)); 527 } 528 529 static int 530 et_ifmedia_upd(struct ifnet *ifp) 531 { 532 struct et_softc *sc = ifp->if_softc; 533 struct mii_data *mii = device_get_softc(sc->sc_miibus); 534 535 if (mii->mii_instance != 0) { 536 struct mii_softc *miisc; 537 538 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 539 mii_phy_reset(miisc); 540 } 541 mii_mediachg(mii); 542 543 return 0; 544 } 545 546 static void 547 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 548 { 549 struct et_softc *sc = ifp->if_softc; 550 struct mii_data *mii = device_get_softc(sc->sc_miibus); 551 552 mii_pollstat(mii); 553 ifmr->ifm_active = mii->mii_media_active; 554 ifmr->ifm_status = mii->mii_media_status; 555 } 556 557 static void 558 et_stop(struct et_softc *sc) 559 { 560 struct ifnet *ifp = &sc->arpcom.ac_if; 561 562 ASSERT_SERIALIZED(ifp->if_serializer); 563 564 callout_stop(&sc->sc_tick); 565 566 et_stop_rxdma(sc); 567 et_stop_txdma(sc); 568 569 et_disable_intrs(sc); 570 571 et_free_tx_ring(sc); 572 et_free_rx_ring(sc); 573 574 et_reset(sc); 575 576 sc->sc_tx = 0; 577 sc->sc_tx_intr = 0; 578 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 579 580 ifp->if_timer = 0; 581 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 582 } 583 584 static int 585 et_bus_config(device_t dev) 586 { 587 uint32_t val, max_plsz; 588 uint16_t ack_latency, replay_timer; 589 590 /* 591 * Test whether EEPROM is valid 592 * NOTE: Read twice to get the correct value 593 */ 594 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 595 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 596 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 597 device_printf(dev, "EEPROM status error 0x%02x\n", val); 598 return ENXIO; 599 } 600 601 /* TODO: LED */ 602 603 /* 604 * Configure ACK latency and replay timer according to 605 * max playload size 606 */ 607 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4); 608 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 609 610 switch (max_plsz) { 611 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 612 ack_latency = ET_PCIV_ACK_LATENCY_128; 613 replay_timer = ET_PCIV_REPLAY_TIMER_128; 614 break; 615 616 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 617 ack_latency = ET_PCIV_ACK_LATENCY_256; 618 replay_timer = ET_PCIV_REPLAY_TIMER_256; 619 break; 620 621 default: 622 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2); 623 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2); 624 device_printf(dev, "ack latency %u, replay timer %u\n", 625 ack_latency, replay_timer); 626 break; 627 } 628 if (ack_latency != 0) { 629 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 630 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2); 631 } 632 633 /* 634 * Set L0s and L1 latency timer to 2us 635 */ 636 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 637 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1); 638 639 /* 640 * Set max read request size to 2048 bytes 641 */ 642 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2); 643 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 644 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 645 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2); 646 647 return 0; 648 } 649 650 static void 651 et_get_eaddr(device_t dev, uint8_t eaddr[]) 652 { 653 uint32_t val; 654 int i; 655 656 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 657 for (i = 0; i < 4; ++i) 658 eaddr[i] = (val >> (8 * i)) & 0xff; 659 660 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 661 for (; i < ETHER_ADDR_LEN; ++i) 662 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 663 } 664 665 static void 666 et_reset(struct et_softc *sc) 667 { 668 CSR_WRITE_4(sc, ET_MAC_CFG1, 669 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 670 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 671 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 672 673 CSR_WRITE_4(sc, ET_SWRST, 674 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 675 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 676 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 677 678 CSR_WRITE_4(sc, ET_MAC_CFG1, 679 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 680 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 681 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 682 } 683 684 static void 685 et_disable_intrs(struct et_softc *sc) 686 { 687 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 688 } 689 690 static void 691 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 692 { 693 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 694 } 695 696 static int 697 et_dma_alloc(device_t dev) 698 { 699 struct et_softc *sc = device_get_softc(dev); 700 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 701 struct et_txstatus_data *txsd = &sc->sc_tx_status; 702 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 703 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 704 int i, error; 705 706 /* 707 * Create top level DMA tag 708 */ 709 error = bus_dma_tag_create(NULL, 1, 0, 710 BUS_SPACE_MAXADDR_32BIT, 711 BUS_SPACE_MAXADDR, 712 NULL, NULL, 713 MAXBSIZE, 714 BUS_SPACE_UNRESTRICTED, 715 BUS_SPACE_MAXSIZE_32BIT, 716 0, &sc->sc_dtag); 717 if (error) { 718 device_printf(dev, "can't create DMA tag\n"); 719 return error; 720 } 721 722 /* 723 * Create TX ring DMA stuffs 724 */ 725 error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag, 726 (void **)&tx_ring->tr_desc, 727 &tx_ring->tr_paddr, &tx_ring->tr_dmap); 728 if (error) { 729 device_printf(dev, "can't create TX ring DMA stuffs\n"); 730 return error; 731 } 732 733 /* 734 * Create TX status DMA stuffs 735 */ 736 error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag, 737 (void **)&txsd->txsd_status, 738 &txsd->txsd_paddr, &txsd->txsd_dmap); 739 if (error) { 740 device_printf(dev, "can't create TX status DMA stuffs\n"); 741 return error; 742 } 743 744 /* 745 * Create DMA stuffs for RX rings 746 */ 747 for (i = 0; i < ET_RX_NRING; ++i) { 748 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 749 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 750 751 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 752 753 error = et_dma_mem_create(dev, ET_RX_RING_SIZE, 754 &rx_ring->rr_dtag, 755 (void **)&rx_ring->rr_desc, 756 &rx_ring->rr_paddr, 757 &rx_ring->rr_dmap); 758 if (error) { 759 device_printf(dev, "can't create DMA stuffs for " 760 "the %d RX ring\n", i); 761 return error; 762 } 763 rx_ring->rr_posreg = rx_ring_posreg[i]; 764 } 765 766 /* 767 * Create RX stat ring DMA stuffs 768 */ 769 error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE, 770 &rxst_ring->rsr_dtag, 771 (void **)&rxst_ring->rsr_stat, 772 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap); 773 if (error) { 774 device_printf(dev, "can't create RX stat ring DMA stuffs\n"); 775 return error; 776 } 777 778 /* 779 * Create RX status DMA stuffs 780 */ 781 error = et_dma_mem_create(dev, sizeof(struct et_rxstatus), 782 &rxsd->rxsd_dtag, 783 (void **)&rxsd->rxsd_status, 784 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap); 785 if (error) { 786 device_printf(dev, "can't create RX status DMA stuffs\n"); 787 return error; 788 } 789 790 /* 791 * Create mbuf DMA stuffs 792 */ 793 error = et_dma_mbuf_create(dev); 794 if (error) 795 return error; 796 797 /* 798 * Create jumbo buffer DMA stuffs 799 * NOTE: Allow it to fail 800 */ 801 if (et_jumbo_mem_alloc(dev) == 0) 802 sc->sc_flags |= ET_FLAG_JUMBO; 803 804 return 0; 805 } 806 807 static void 808 et_dma_free(device_t dev) 809 { 810 struct et_softc *sc = device_get_softc(dev); 811 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 812 struct et_txstatus_data *txsd = &sc->sc_tx_status; 813 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 814 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 815 int i, rx_done[ET_RX_NRING]; 816 817 /* 818 * Destroy TX ring DMA stuffs 819 */ 820 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc, 821 tx_ring->tr_dmap); 822 823 /* 824 * Destroy TX status DMA stuffs 825 */ 826 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status, 827 txsd->txsd_dmap); 828 829 /* 830 * Destroy DMA stuffs for RX rings 831 */ 832 for (i = 0; i < ET_RX_NRING; ++i) { 833 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 834 835 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc, 836 rx_ring->rr_dmap); 837 } 838 839 /* 840 * Destroy RX stat ring DMA stuffs 841 */ 842 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat, 843 rxst_ring->rsr_dmap); 844 845 /* 846 * Destroy RX status DMA stuffs 847 */ 848 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status, 849 rxsd->rxsd_dmap); 850 851 /* 852 * Destroy mbuf DMA stuffs 853 */ 854 for (i = 0; i < ET_RX_NRING; ++i) 855 rx_done[i] = ET_RX_NDESC; 856 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done); 857 858 /* 859 * Destroy jumbo buffer DMA stuffs 860 */ 861 if (sc->sc_flags & ET_FLAG_JUMBO) 862 et_jumbo_mem_free(dev); 863 864 /* 865 * Destroy top level DMA tag 866 */ 867 if (sc->sc_dtag != NULL) 868 bus_dma_tag_destroy(sc->sc_dtag); 869 } 870 871 static int 872 et_dma_mbuf_create(device_t dev) 873 { 874 struct et_softc *sc = device_get_softc(dev); 875 struct et_txbuf_data *tbd = &sc->sc_tx_data; 876 int i, error, rx_done[ET_RX_NRING]; 877 878 /* 879 * Create mbuf DMA tag 880 */ 881 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 882 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 883 NULL, NULL, 884 ET_JUMBO_FRAMELEN, ET_NSEG_MAX, 885 BUS_SPACE_MAXSIZE_32BIT, 886 BUS_DMA_ALLOCNOW, &sc->sc_mbuf_dtag); 887 if (error) { 888 device_printf(dev, "can't create mbuf DMA tag\n"); 889 return error; 890 } 891 892 /* 893 * Create spare DMA map for RX mbufs 894 */ 895 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap); 896 if (error) { 897 device_printf(dev, "can't create spare mbuf DMA map\n"); 898 bus_dma_tag_destroy(sc->sc_mbuf_dtag); 899 sc->sc_mbuf_dtag = NULL; 900 return error; 901 } 902 903 /* 904 * Create DMA maps for RX mbufs 905 */ 906 bzero(rx_done, sizeof(rx_done)); 907 for (i = 0; i < ET_RX_NRING; ++i) { 908 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 909 int j; 910 911 for (j = 0; j < ET_RX_NDESC; ++j) { 912 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, 913 &rbd->rbd_buf[j].rb_dmap); 914 if (error) { 915 device_printf(dev, "can't create %d RX mbuf " 916 "for %d RX ring\n", j, i); 917 rx_done[i] = j; 918 et_dma_mbuf_destroy(dev, 0, rx_done); 919 return error; 920 } 921 } 922 rx_done[i] = ET_RX_NDESC; 923 924 rbd->rbd_softc = sc; 925 rbd->rbd_ring = &sc->sc_rx_ring[i]; 926 } 927 928 /* 929 * Create DMA maps for TX mbufs 930 */ 931 for (i = 0; i < ET_TX_NDESC; ++i) { 932 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, 933 &tbd->tbd_buf[i].tb_dmap); 934 if (error) { 935 device_printf(dev, "can't create %d TX mbuf " 936 "DMA map\n", i); 937 et_dma_mbuf_destroy(dev, i, rx_done); 938 return error; 939 } 940 } 941 942 return 0; 943 } 944 945 static void 946 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[]) 947 { 948 struct et_softc *sc = device_get_softc(dev); 949 struct et_txbuf_data *tbd = &sc->sc_tx_data; 950 int i; 951 952 if (sc->sc_mbuf_dtag == NULL) 953 return; 954 955 /* 956 * Destroy DMA maps for RX mbufs 957 */ 958 for (i = 0; i < ET_RX_NRING; ++i) { 959 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 960 int j; 961 962 for (j = 0; j < rx_done[i]; ++j) { 963 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 964 965 KASSERT(rb->rb_mbuf == NULL, 966 ("RX mbuf in %d RX ring is not freed yet\n", i)); 967 bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap); 968 } 969 } 970 971 /* 972 * Destroy DMA maps for TX mbufs 973 */ 974 for (i = 0; i < tx_done; ++i) { 975 struct et_txbuf *tb = &tbd->tbd_buf[i]; 976 977 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 978 bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap); 979 } 980 981 /* 982 * Destroy spare mbuf DMA map 983 */ 984 bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap); 985 986 /* 987 * Destroy mbuf DMA tag 988 */ 989 bus_dma_tag_destroy(sc->sc_mbuf_dtag); 990 sc->sc_mbuf_dtag = NULL; 991 } 992 993 static int 994 et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 995 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 996 { 997 struct et_softc *sc = device_get_softc(dev); 998 int error; 999 1000 error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0, 1001 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1002 NULL, NULL, 1003 size, 1, BUS_SPACE_MAXSIZE_32BIT, 1004 0, dtag); 1005 if (error) { 1006 device_printf(dev, "can't create DMA tag\n"); 1007 return error; 1008 } 1009 1010 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1011 dmap); 1012 if (error) { 1013 device_printf(dev, "can't allocate DMA mem\n"); 1014 bus_dma_tag_destroy(*dtag); 1015 *dtag = NULL; 1016 return error; 1017 } 1018 1019 error = bus_dmamap_load(*dtag, *dmap, *addr, size, 1020 et_dma_ring_addr, paddr, BUS_DMA_WAITOK); 1021 if (error) { 1022 device_printf(dev, "can't load DMA mem\n"); 1023 bus_dmamem_free(*dtag, *addr, *dmap); 1024 bus_dma_tag_destroy(*dtag); 1025 *dtag = NULL; 1026 return error; 1027 } 1028 return 0; 1029 } 1030 1031 static void 1032 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 1033 { 1034 if (dtag != NULL) { 1035 bus_dmamap_unload(dtag, dmap); 1036 bus_dmamem_free(dtag, addr, dmap); 1037 bus_dma_tag_destroy(dtag); 1038 } 1039 } 1040 1041 static void 1042 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 1043 { 1044 KASSERT(nseg == 1, ("too many segments\n")); 1045 *((bus_addr_t *)arg) = seg->ds_addr; 1046 } 1047 1048 static void 1049 et_chip_attach(struct et_softc *sc) 1050 { 1051 uint32_t val; 1052 1053 /* 1054 * Perform minimal initialization 1055 */ 1056 1057 /* Disable loopback */ 1058 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1059 1060 /* Reset MAC */ 1061 CSR_WRITE_4(sc, ET_MAC_CFG1, 1062 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1063 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1064 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1065 1066 /* 1067 * Setup half duplex mode 1068 */ 1069 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1070 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1071 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1072 ET_MAC_HDX_EXC_DEFER; 1073 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1074 1075 /* Clear MAC control */ 1076 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1077 1078 /* Reset MII */ 1079 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1080 1081 /* Bring MAC out of reset state */ 1082 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1083 1084 /* Enable memory controllers */ 1085 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1086 } 1087 1088 static void 1089 et_intr(void *xsc) 1090 { 1091 struct et_softc *sc = xsc; 1092 struct ifnet *ifp = &sc->arpcom.ac_if; 1093 uint32_t intrs; 1094 1095 ASSERT_SERIALIZED(ifp->if_serializer); 1096 1097 if ((ifp->if_flags & IFF_RUNNING) == 0) 1098 return; 1099 1100 et_disable_intrs(sc); 1101 1102 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1103 intrs &= ET_INTRS; 1104 if (intrs == 0) /* Not interested */ 1105 goto back; 1106 1107 if (intrs & ET_INTR_RXEOF) 1108 et_rxeof(sc); 1109 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1110 et_txeof(sc, 1); 1111 if (intrs & ET_INTR_TIMER) 1112 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1113 back: 1114 et_enable_intrs(sc, ET_INTRS); 1115 } 1116 1117 static void 1118 et_init(void *xsc) 1119 { 1120 struct et_softc *sc = xsc; 1121 struct ifnet *ifp = &sc->arpcom.ac_if; 1122 const struct et_bsize *arr; 1123 int error, i; 1124 1125 ASSERT_SERIALIZED(ifp->if_serializer); 1126 1127 et_stop(sc); 1128 1129 arr = ET_FRAMELEN(ifp->if_mtu) < MCLBYTES ? 1130 et_bufsize_std : et_bufsize_jumbo; 1131 for (i = 0; i < ET_RX_NRING; ++i) { 1132 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize; 1133 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf; 1134 sc->sc_rx_data[i].rbd_jumbo = arr[i].jumbo; 1135 } 1136 1137 error = et_init_tx_ring(sc); 1138 if (error) 1139 goto back; 1140 1141 error = et_init_rx_ring(sc); 1142 if (error) 1143 goto back; 1144 1145 error = et_chip_init(sc); 1146 if (error) 1147 goto back; 1148 1149 error = et_enable_txrx(sc, 1); 1150 if (error) 1151 goto back; 1152 1153 et_enable_intrs(sc, ET_INTRS); 1154 1155 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1156 1157 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1158 1159 ifp->if_flags |= IFF_RUNNING; 1160 ifp->if_flags &= ~IFF_OACTIVE; 1161 back: 1162 if (error) 1163 et_stop(sc); 1164 } 1165 1166 static int 1167 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1168 { 1169 struct et_softc *sc = ifp->if_softc; 1170 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1171 struct ifreq *ifr = (struct ifreq *)data; 1172 int error = 0, max_framelen; 1173 1174 ASSERT_SERIALIZED(ifp->if_serializer); 1175 1176 switch (cmd) { 1177 case SIOCSIFFLAGS: 1178 if (ifp->if_flags & IFF_UP) { 1179 if (ifp->if_flags & IFF_RUNNING) { 1180 if ((ifp->if_flags ^ sc->sc_if_flags) & 1181 (IFF_ALLMULTI | IFF_PROMISC)) 1182 et_setmulti(sc); 1183 } else { 1184 et_init(sc); 1185 } 1186 } else { 1187 if (ifp->if_flags & IFF_RUNNING) 1188 et_stop(sc); 1189 } 1190 sc->sc_if_flags = ifp->if_flags; 1191 break; 1192 1193 case SIOCSIFMEDIA: 1194 case SIOCGIFMEDIA: 1195 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1196 break; 1197 1198 case SIOCADDMULTI: 1199 case SIOCDELMULTI: 1200 if (ifp->if_flags & IFF_RUNNING) 1201 et_setmulti(sc); 1202 break; 1203 1204 case SIOCSIFMTU: 1205 if (sc->sc_flags & ET_FLAG_JUMBO) 1206 max_framelen = ET_JUMBO_FRAMELEN; 1207 else 1208 max_framelen = MCLBYTES - 1; 1209 1210 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) { 1211 error = EOPNOTSUPP; 1212 break; 1213 } 1214 1215 ifp->if_mtu = ifr->ifr_mtu; 1216 if (ifp->if_flags & IFF_RUNNING) 1217 et_init(sc); 1218 break; 1219 1220 default: 1221 error = ether_ioctl(ifp, cmd, data); 1222 break; 1223 } 1224 return error; 1225 } 1226 1227 static void 1228 et_start(struct ifnet *ifp) 1229 { 1230 struct et_softc *sc = ifp->if_softc; 1231 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1232 int trans, oactive; 1233 1234 ASSERT_SERIALIZED(ifp->if_serializer); 1235 1236 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) { 1237 ifq_purge(&ifp->if_snd); 1238 return; 1239 } 1240 1241 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1242 return; 1243 1244 oactive = 0; 1245 trans = 0; 1246 for (;;) { 1247 struct mbuf *m; 1248 int error; 1249 1250 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1251 if (oactive) { 1252 ifp->if_flags |= IFF_OACTIVE; 1253 break; 1254 } 1255 1256 et_txeof(sc, 0); 1257 oactive = 1; 1258 continue; 1259 } 1260 1261 m = ifq_dequeue(&ifp->if_snd, NULL); 1262 if (m == NULL) 1263 break; 1264 1265 error = et_encap(sc, &m); 1266 if (error) { 1267 ifp->if_oerrors++; 1268 KKASSERT(m == NULL); 1269 1270 if (error == EFBIG) { 1271 /* 1272 * Excessive fragmented packets 1273 */ 1274 if (oactive) { 1275 ifp->if_flags |= IFF_OACTIVE; 1276 break; 1277 } 1278 et_txeof(sc, 0); 1279 oactive = 1; 1280 } 1281 continue; 1282 } else { 1283 oactive = 0; 1284 } 1285 trans = 1; 1286 1287 BPF_MTAP(ifp, m); 1288 } 1289 1290 if (trans) 1291 ifp->if_timer = 5; 1292 } 1293 1294 static void 1295 et_watchdog(struct ifnet *ifp) 1296 { 1297 ASSERT_SERIALIZED(ifp->if_serializer); 1298 1299 if_printf(ifp, "watchdog timed out\n"); 1300 1301 ifp->if_init(ifp->if_softc); 1302 if_devstart(ifp); 1303 } 1304 1305 static int 1306 et_stop_rxdma(struct et_softc *sc) 1307 { 1308 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1309 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1310 1311 DELAY(5); 1312 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1313 if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n"); 1314 return ETIMEDOUT; 1315 } 1316 return 0; 1317 } 1318 1319 static int 1320 et_stop_txdma(struct et_softc *sc) 1321 { 1322 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1323 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1324 return 0; 1325 } 1326 1327 static void 1328 et_free_tx_ring(struct et_softc *sc) 1329 { 1330 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1331 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1332 int i; 1333 1334 for (i = 0; i < ET_TX_NDESC; ++i) { 1335 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1336 1337 if (tb->tb_mbuf != NULL) { 1338 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 1339 m_freem(tb->tb_mbuf); 1340 tb->tb_mbuf = NULL; 1341 } 1342 } 1343 1344 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1345 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1346 BUS_DMASYNC_PREWRITE); 1347 } 1348 1349 static void 1350 et_free_rx_ring(struct et_softc *sc) 1351 { 1352 int n; 1353 1354 for (n = 0; n < ET_RX_NRING; ++n) { 1355 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1356 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1357 int i; 1358 1359 for (i = 0; i < ET_RX_NDESC; ++i) { 1360 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1361 1362 if (rb->rb_mbuf != NULL) { 1363 if (!rbd->rbd_jumbo) { 1364 bus_dmamap_unload(sc->sc_mbuf_dtag, 1365 rb->rb_dmap); 1366 } 1367 m_freem(rb->rb_mbuf); 1368 rb->rb_mbuf = NULL; 1369 } 1370 } 1371 1372 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1373 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap, 1374 BUS_DMASYNC_PREWRITE); 1375 } 1376 } 1377 1378 static void 1379 et_setmulti(struct et_softc *sc) 1380 { 1381 struct ifnet *ifp = &sc->arpcom.ac_if; 1382 uint32_t hash[4] = { 0, 0, 0, 0 }; 1383 uint32_t rxmac_ctrl, pktfilt; 1384 struct ifmultiaddr *ifma; 1385 int i, count; 1386 1387 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1388 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1389 1390 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1391 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1392 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1393 goto back; 1394 } 1395 1396 count = 0; 1397 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1398 uint32_t *hp, h; 1399 1400 if (ifma->ifma_addr->sa_family != AF_LINK) 1401 continue; 1402 1403 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1404 ifma->ifma_addr), ETHER_ADDR_LEN); 1405 h = (h & 0x3f800000) >> 23; 1406 1407 hp = &hash[0]; 1408 if (h >= 32 && h < 64) { 1409 h -= 32; 1410 hp = &hash[1]; 1411 } else if (h >= 64 && h < 96) { 1412 h -= 64; 1413 hp = &hash[2]; 1414 } else if (h >= 96) { 1415 h -= 96; 1416 hp = &hash[3]; 1417 } 1418 *hp |= (1 << h); 1419 1420 ++count; 1421 } 1422 1423 for (i = 0; i < 4; ++i) 1424 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1425 1426 if (count > 0) 1427 pktfilt |= ET_PKTFILT_MCAST; 1428 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1429 back: 1430 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1431 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1432 } 1433 1434 static int 1435 et_chip_init(struct et_softc *sc) 1436 { 1437 struct ifnet *ifp = &sc->arpcom.ac_if; 1438 uint32_t rxq_end; 1439 int error, frame_len, rxmem_size; 1440 1441 /* 1442 * Split 16Kbytes internal memory between TX and RX 1443 * according to frame length. 1444 */ 1445 frame_len = ET_FRAMELEN(ifp->if_mtu); 1446 if (frame_len < 2048) { 1447 rxmem_size = ET_MEM_RXSIZE_DEFAULT; 1448 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) { 1449 rxmem_size = ET_MEM_SIZE / 2; 1450 } else { 1451 rxmem_size = ET_MEM_SIZE - 1452 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT); 1453 } 1454 rxq_end = ET_QUEUE_ADDR(rxmem_size); 1455 1456 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START); 1457 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end); 1458 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1); 1459 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END); 1460 1461 /* No loopback */ 1462 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1463 1464 /* Clear MSI configure */ 1465 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1466 1467 /* Disable timer */ 1468 CSR_WRITE_4(sc, ET_TIMER, 0); 1469 1470 /* Initialize MAC */ 1471 et_init_mac(sc); 1472 1473 /* Enable memory controllers */ 1474 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1475 1476 /* Initialize RX MAC */ 1477 et_init_rxmac(sc); 1478 1479 /* Initialize TX MAC */ 1480 et_init_txmac(sc); 1481 1482 /* Initialize RX DMA engine */ 1483 error = et_init_rxdma(sc); 1484 if (error) 1485 return error; 1486 1487 /* Initialize TX DMA engine */ 1488 error = et_init_txdma(sc); 1489 if (error) 1490 return error; 1491 1492 return 0; 1493 } 1494 1495 static int 1496 et_init_tx_ring(struct et_softc *sc) 1497 { 1498 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1499 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1500 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1501 1502 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1503 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1504 BUS_DMASYNC_PREWRITE); 1505 1506 tbd->tbd_start_index = 0; 1507 tbd->tbd_start_wrap = 0; 1508 tbd->tbd_used = 0; 1509 1510 bzero(txsd->txsd_status, sizeof(uint32_t)); 1511 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap, 1512 BUS_DMASYNC_PREWRITE); 1513 return 0; 1514 } 1515 1516 static int 1517 et_init_rx_ring(struct et_softc *sc) 1518 { 1519 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1520 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1521 int n; 1522 1523 for (n = 0; n < ET_RX_NRING; ++n) { 1524 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1525 int i, error; 1526 1527 for (i = 0; i < ET_RX_NDESC; ++i) { 1528 error = rbd->rbd_newbuf(rbd, i, 1); 1529 if (error) { 1530 if_printf(&sc->arpcom.ac_if, "%d ring %d buf, " 1531 "newbuf failed: %d\n", n, i, error); 1532 return error; 1533 } 1534 } 1535 } 1536 1537 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1538 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1539 BUS_DMASYNC_PREWRITE); 1540 1541 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1542 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1543 BUS_DMASYNC_PREWRITE); 1544 1545 return 0; 1546 } 1547 1548 static void 1549 et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs, 1550 bus_size_t mapsz __unused, int error) 1551 { 1552 struct et_dmamap_ctx *ctx = xctx; 1553 int i; 1554 1555 if (error) 1556 return; 1557 1558 if (nsegs > ctx->nsegs) { 1559 ctx->nsegs = 0; 1560 return; 1561 } 1562 1563 ctx->nsegs = nsegs; 1564 for (i = 0; i < nsegs; ++i) 1565 ctx->segs[i] = segs[i]; 1566 } 1567 1568 static int 1569 et_init_rxdma(struct et_softc *sc) 1570 { 1571 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1572 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1573 struct et_rxdesc_ring *rx_ring; 1574 int error; 1575 1576 error = et_stop_rxdma(sc); 1577 if (error) { 1578 if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n"); 1579 return error; 1580 } 1581 1582 /* 1583 * Install RX status 1584 */ 1585 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1586 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1587 1588 /* 1589 * Install RX stat ring 1590 */ 1591 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1592 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1593 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1594 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1595 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1596 1597 /* Match ET_RXSTAT_POS */ 1598 rxst_ring->rsr_index = 0; 1599 rxst_ring->rsr_wrap = 0; 1600 1601 /* 1602 * Install the 2nd RX descriptor ring 1603 */ 1604 rx_ring = &sc->sc_rx_ring[1]; 1605 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1606 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1607 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1608 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1609 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1610 1611 /* Match ET_RX_RING1_POS */ 1612 rx_ring->rr_index = 0; 1613 rx_ring->rr_wrap = 1; 1614 1615 /* 1616 * Install the 1st RX descriptor ring 1617 */ 1618 rx_ring = &sc->sc_rx_ring[0]; 1619 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1620 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1621 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1622 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1623 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1624 1625 /* Match ET_RX_RING0_POS */ 1626 rx_ring->rr_index = 0; 1627 rx_ring->rr_wrap = 1; 1628 1629 /* 1630 * RX intr moderation 1631 */ 1632 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1633 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1634 1635 return 0; 1636 } 1637 1638 static int 1639 et_init_txdma(struct et_softc *sc) 1640 { 1641 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1642 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1643 int error; 1644 1645 error = et_stop_txdma(sc); 1646 if (error) { 1647 if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n"); 1648 return error; 1649 } 1650 1651 /* 1652 * Install TX descriptor ring 1653 */ 1654 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1655 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1656 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1657 1658 /* 1659 * Install TX status 1660 */ 1661 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1662 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1663 1664 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1665 1666 /* Match ET_TX_READY_POS */ 1667 tx_ring->tr_ready_index = 0; 1668 tx_ring->tr_ready_wrap = 0; 1669 1670 return 0; 1671 } 1672 1673 static void 1674 et_init_mac(struct et_softc *sc) 1675 { 1676 struct ifnet *ifp = &sc->arpcom.ac_if; 1677 const uint8_t *eaddr = IF_LLADDR(ifp); 1678 uint32_t val; 1679 1680 /* Reset MAC */ 1681 CSR_WRITE_4(sc, ET_MAC_CFG1, 1682 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1683 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1684 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1685 1686 /* 1687 * Setup inter packet gap 1688 */ 1689 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1690 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1691 __SHIFTIN(80, ET_IPG_MINIFG) | 1692 __SHIFTIN(96, ET_IPG_B2B); 1693 CSR_WRITE_4(sc, ET_IPG, val); 1694 1695 /* 1696 * Setup half duplex mode 1697 */ 1698 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1699 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1700 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1701 ET_MAC_HDX_EXC_DEFER; 1702 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1703 1704 /* Clear MAC control */ 1705 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1706 1707 /* Reset MII */ 1708 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1709 1710 /* 1711 * Set MAC address 1712 */ 1713 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1714 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1715 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1716 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1717 1718 /* Set max frame length */ 1719 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu)); 1720 1721 /* Bring MAC out of reset state */ 1722 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1723 } 1724 1725 static void 1726 et_init_rxmac(struct et_softc *sc) 1727 { 1728 struct ifnet *ifp = &sc->arpcom.ac_if; 1729 const uint8_t *eaddr = IF_LLADDR(ifp); 1730 uint32_t val; 1731 int i; 1732 1733 /* Disable RX MAC and WOL */ 1734 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1735 1736 /* 1737 * Clear all WOL related registers 1738 */ 1739 for (i = 0; i < 3; ++i) 1740 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1741 for (i = 0; i < 20; ++i) 1742 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1743 1744 /* 1745 * Set WOL source address. XXX is this necessary? 1746 */ 1747 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1748 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1749 val = (eaddr[0] << 8) | eaddr[1]; 1750 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1751 1752 /* Clear packet filters */ 1753 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1754 1755 /* No ucast filtering */ 1756 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1757 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1758 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1759 1760 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) { 1761 /* 1762 * In order to transmit jumbo packets greater than 1763 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between 1764 * RX MAC and RX DMA needs to be reduced in size to 1765 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In 1766 * order to implement this, we must use "cut through" 1767 * mode in the RX MAC, which chops packets down into 1768 * segments. In this case we selected 256 bytes, 1769 * since this is the size of the PCI-Express TLP's 1770 * that the ET1310 uses. 1771 */ 1772 val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) | 1773 ET_RXMAC_MC_SEGSZ_ENABLE; 1774 } else { 1775 val = 0; 1776 } 1777 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1778 1779 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1780 1781 /* Initialize RX MAC management register */ 1782 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1783 1784 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1785 1786 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1787 ET_RXMAC_MGT_PASS_ECRC | 1788 ET_RXMAC_MGT_PASS_ELEN | 1789 ET_RXMAC_MGT_PASS_ETRUNC | 1790 ET_RXMAC_MGT_CHECK_PKT); 1791 1792 /* 1793 * Configure runt filtering (may not work on certain chip generation) 1794 */ 1795 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1796 CSR_WRITE_4(sc, ET_PKTFILT, val); 1797 1798 /* Enable RX MAC but leave WOL disabled */ 1799 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1800 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1801 1802 /* 1803 * Setup multicast hash and allmulti/promisc mode 1804 */ 1805 et_setmulti(sc); 1806 } 1807 1808 static void 1809 et_init_txmac(struct et_softc *sc) 1810 { 1811 /* Disable TX MAC and FC(?) */ 1812 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1813 1814 /* No flow control yet */ 1815 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1816 1817 /* Enable TX MAC but leave FC(?) diabled */ 1818 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1819 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1820 } 1821 1822 static int 1823 et_start_rxdma(struct et_softc *sc) 1824 { 1825 uint32_t val = 0; 1826 1827 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1828 ET_RXDMA_CTRL_RING0_SIZE) | 1829 ET_RXDMA_CTRL_RING0_ENABLE; 1830 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1831 ET_RXDMA_CTRL_RING1_SIZE) | 1832 ET_RXDMA_CTRL_RING1_ENABLE; 1833 1834 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1835 1836 DELAY(5); 1837 1838 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1839 if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n"); 1840 return ETIMEDOUT; 1841 } 1842 return 0; 1843 } 1844 1845 static int 1846 et_start_txdma(struct et_softc *sc) 1847 { 1848 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1849 return 0; 1850 } 1851 1852 static int 1853 et_enable_txrx(struct et_softc *sc, int media_upd) 1854 { 1855 struct ifnet *ifp = &sc->arpcom.ac_if; 1856 uint32_t val; 1857 int i, error; 1858 1859 val = CSR_READ_4(sc, ET_MAC_CFG1); 1860 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1861 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1862 ET_MAC_CFG1_LOOPBACK); 1863 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1864 1865 if (media_upd) 1866 et_ifmedia_upd(ifp); 1867 else 1868 et_setmedia(sc); 1869 1870 #define NRETRY 100 1871 1872 for (i = 0; i < NRETRY; ++i) { 1873 val = CSR_READ_4(sc, ET_MAC_CFG1); 1874 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1875 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1876 break; 1877 1878 DELAY(10); 1879 } 1880 if (i == NRETRY) { 1881 if_printf(ifp, "can't enable RX/TX\n"); 1882 return 0; 1883 } 1884 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 1885 1886 #undef NRETRY 1887 1888 /* 1889 * Start TX/RX DMA engine 1890 */ 1891 error = et_start_rxdma(sc); 1892 if (error) 1893 return error; 1894 1895 error = et_start_txdma(sc); 1896 if (error) 1897 return error; 1898 1899 return 0; 1900 } 1901 1902 static void 1903 et_rxeof(struct et_softc *sc) 1904 { 1905 struct ifnet *ifp = &sc->arpcom.ac_if; 1906 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1907 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1908 uint32_t rxs_stat_ring; 1909 int rxst_wrap, rxst_index; 1910 struct mbuf_chain chain[MAXCPU]; 1911 1912 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1913 return; 1914 1915 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1916 BUS_DMASYNC_POSTREAD); 1917 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1918 BUS_DMASYNC_POSTREAD); 1919 1920 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1921 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1922 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1923 1924 ether_input_chain_init(chain); 1925 1926 while (rxst_index != rxst_ring->rsr_index || 1927 rxst_wrap != rxst_ring->rsr_wrap) { 1928 struct et_rxbuf_data *rbd; 1929 struct et_rxdesc_ring *rx_ring; 1930 struct et_rxstat *st; 1931 struct mbuf *m; 1932 int buflen, buf_idx, ring_idx; 1933 uint32_t rxstat_pos, rxring_pos; 1934 1935 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1936 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1937 1938 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1939 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1940 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1941 1942 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1943 rxst_ring->rsr_index = 0; 1944 rxst_ring->rsr_wrap ^= 1; 1945 } 1946 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1947 ET_RXSTAT_POS_INDEX); 1948 if (rxst_ring->rsr_wrap) 1949 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1950 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1951 1952 if (ring_idx >= ET_RX_NRING) { 1953 ifp->if_ierrors++; 1954 if_printf(ifp, "invalid ring index %d\n", ring_idx); 1955 continue; 1956 } 1957 if (buf_idx >= ET_RX_NDESC) { 1958 ifp->if_ierrors++; 1959 if_printf(ifp, "invalid buf index %d\n", buf_idx); 1960 continue; 1961 } 1962 1963 rbd = &sc->sc_rx_data[ring_idx]; 1964 m = rbd->rbd_buf[buf_idx].rb_mbuf; 1965 1966 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1967 if (buflen < ETHER_CRC_LEN) { 1968 m_freem(m); 1969 ifp->if_ierrors++; 1970 } else { 1971 m->m_pkthdr.len = m->m_len = buflen; 1972 m->m_pkthdr.rcvif = ifp; 1973 1974 m_adj(m, -ETHER_CRC_LEN); 1975 1976 ifp->if_ipackets++; 1977 ether_input_chain(ifp, m, chain); 1978 } 1979 } else { 1980 ifp->if_ierrors++; 1981 } 1982 m = NULL; /* Catch invalid reference */ 1983 1984 rx_ring = &sc->sc_rx_ring[ring_idx]; 1985 1986 if (buf_idx != rx_ring->rr_index) { 1987 if_printf(ifp, "WARNING!! ring %d, " 1988 "buf_idx %d, rr_idx %d\n", 1989 ring_idx, buf_idx, rx_ring->rr_index); 1990 } 1991 1992 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1993 if (++rx_ring->rr_index == ET_RX_NDESC) { 1994 rx_ring->rr_index = 0; 1995 rx_ring->rr_wrap ^= 1; 1996 } 1997 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1998 if (rx_ring->rr_wrap) 1999 rxring_pos |= ET_RX_RING_POS_WRAP; 2000 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 2001 } 2002 2003 ether_input_dispatch(chain); 2004 } 2005 2006 static int 2007 et_encap(struct et_softc *sc, struct mbuf **m0) 2008 { 2009 struct mbuf *m = *m0; 2010 bus_dma_segment_t segs[ET_NSEG_MAX]; 2011 struct et_dmamap_ctx ctx; 2012 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 2013 struct et_txbuf_data *tbd = &sc->sc_tx_data; 2014 struct et_txdesc *td; 2015 bus_dmamap_t map; 2016 int error, maxsegs, first_idx, last_idx, i; 2017 uint32_t tx_ready_pos, last_td_ctrl2; 2018 2019 maxsegs = ET_TX_NDESC - tbd->tbd_used; 2020 if (maxsegs > ET_NSEG_MAX) 2021 maxsegs = ET_NSEG_MAX; 2022 KASSERT(maxsegs >= ET_NSEG_SPARE, 2023 ("not enough spare TX desc (%d)\n", maxsegs)); 2024 2025 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 2026 first_idx = tx_ring->tr_ready_index; 2027 map = tbd->tbd_buf[first_idx].tb_dmap; 2028 2029 ctx.nsegs = maxsegs; 2030 ctx.segs = segs; 2031 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m, 2032 et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT); 2033 if (!error && ctx.nsegs == 0) { 2034 bus_dmamap_unload(sc->sc_mbuf_dtag, map); 2035 error = EFBIG; 2036 } 2037 if (error && error != EFBIG) { 2038 if_printf(&sc->arpcom.ac_if, "can't load TX mbuf, error %d\n", 2039 error); 2040 goto back; 2041 } 2042 if (error) { /* error == EFBIG */ 2043 struct mbuf *m_new; 2044 2045 m_new = m_defrag(m, MB_DONTWAIT); 2046 if (m_new == NULL) { 2047 if_printf(&sc->arpcom.ac_if, "can't defrag TX mbuf\n"); 2048 error = ENOBUFS; 2049 goto back; 2050 } else { 2051 *m0 = m = m_new; 2052 } 2053 2054 ctx.nsegs = maxsegs; 2055 ctx.segs = segs; 2056 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m, 2057 et_dma_buf_addr, &ctx, 2058 BUS_DMA_NOWAIT); 2059 if (error || ctx.nsegs == 0) { 2060 if (ctx.nsegs == 0) { 2061 bus_dmamap_unload(sc->sc_mbuf_dtag, map); 2062 error = EFBIG; 2063 } 2064 if_printf(&sc->arpcom.ac_if, 2065 "can't load defraged TX mbuf\n"); 2066 goto back; 2067 } 2068 } 2069 2070 bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE); 2071 2072 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 2073 sc->sc_tx += ctx.nsegs; 2074 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 2075 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 2076 last_td_ctrl2 |= ET_TDCTRL2_INTR; 2077 } 2078 2079 last_idx = -1; 2080 for (i = 0; i < ctx.nsegs; ++i) { 2081 int idx; 2082 2083 idx = (first_idx + i) % ET_TX_NDESC; 2084 td = &tx_ring->tr_desc[idx]; 2085 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr); 2086 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr); 2087 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN); 2088 2089 if (i == ctx.nsegs - 1) { /* Last frag */ 2090 td->td_ctrl2 = last_td_ctrl2; 2091 last_idx = idx; 2092 } 2093 2094 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 2095 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 2096 tx_ring->tr_ready_index = 0; 2097 tx_ring->tr_ready_wrap ^= 1; 2098 } 2099 } 2100 td = &tx_ring->tr_desc[first_idx]; 2101 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 2102 2103 KKASSERT(last_idx >= 0); 2104 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 2105 tbd->tbd_buf[last_idx].tb_dmap = map; 2106 tbd->tbd_buf[last_idx].tb_mbuf = m; 2107 2108 tbd->tbd_used += ctx.nsegs; 2109 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 2110 2111 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2112 BUS_DMASYNC_PREWRITE); 2113 2114 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 2115 ET_TX_READY_POS_INDEX); 2116 if (tx_ring->tr_ready_wrap) 2117 tx_ready_pos |= ET_TX_READY_POS_WRAP; 2118 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 2119 2120 error = 0; 2121 back: 2122 if (error) { 2123 m_freem(m); 2124 *m0 = NULL; 2125 } 2126 return error; 2127 } 2128 2129 static void 2130 et_txeof(struct et_softc *sc, int start) 2131 { 2132 struct ifnet *ifp = &sc->arpcom.ac_if; 2133 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 2134 struct et_txbuf_data *tbd = &sc->sc_tx_data; 2135 uint32_t tx_done; 2136 int end, wrap; 2137 2138 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2139 return; 2140 2141 if (tbd->tbd_used == 0) 2142 return; 2143 2144 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2145 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 2146 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2147 2148 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2149 struct et_txbuf *tb; 2150 2151 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 2152 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2153 2154 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2155 sizeof(struct et_txdesc)); 2156 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2157 BUS_DMASYNC_PREWRITE); 2158 2159 if (tb->tb_mbuf != NULL) { 2160 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 2161 m_freem(tb->tb_mbuf); 2162 tb->tb_mbuf = NULL; 2163 ifp->if_opackets++; 2164 } 2165 2166 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2167 tbd->tbd_start_index = 0; 2168 tbd->tbd_start_wrap ^= 1; 2169 } 2170 2171 KKASSERT(tbd->tbd_used > 0); 2172 tbd->tbd_used--; 2173 } 2174 2175 if (tbd->tbd_used == 0) 2176 ifp->if_timer = 0; 2177 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2178 ifp->if_flags &= ~IFF_OACTIVE; 2179 2180 if (start) 2181 if_devstart(ifp); 2182 } 2183 2184 static void 2185 et_tick(void *xsc) 2186 { 2187 struct et_softc *sc = xsc; 2188 struct ifnet *ifp = &sc->arpcom.ac_if; 2189 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2190 2191 lwkt_serialize_enter(ifp->if_serializer); 2192 2193 mii_tick(mii); 2194 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 && 2195 (mii->mii_media_status & IFM_ACTIVE) && 2196 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2197 if_printf(ifp, "Link up, enable TX/RX\n"); 2198 if (et_enable_txrx(sc, 0) == 0) 2199 if_devstart(ifp); 2200 } 2201 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2202 2203 lwkt_serialize_exit(ifp->if_serializer); 2204 } 2205 2206 static int 2207 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2208 { 2209 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2210 } 2211 2212 static int 2213 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2214 { 2215 return et_newbuf(rbd, buf_idx, init, MHLEN); 2216 } 2217 2218 static int 2219 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2220 { 2221 struct et_softc *sc = rbd->rbd_softc; 2222 struct et_rxbuf *rb; 2223 struct mbuf *m; 2224 struct et_dmamap_ctx ctx; 2225 bus_dma_segment_t seg; 2226 bus_dmamap_t dmap; 2227 int error, len; 2228 2229 KASSERT(!rbd->rbd_jumbo, ("calling %s with jumbo ring\n", __func__)); 2230 2231 KKASSERT(buf_idx < ET_RX_NDESC); 2232 rb = &rbd->rbd_buf[buf_idx]; 2233 2234 m = m_getl(len0, init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR, &len); 2235 if (m == NULL) { 2236 error = ENOBUFS; 2237 2238 if (init) { 2239 if_printf(&sc->arpcom.ac_if, 2240 "m_getl failed, size %d\n", len0); 2241 return error; 2242 } else { 2243 goto back; 2244 } 2245 } 2246 m->m_len = m->m_pkthdr.len = len; 2247 2248 /* 2249 * Try load RX mbuf into temporary DMA tag 2250 */ 2251 ctx.nsegs = 1; 2252 ctx.segs = &seg; 2253 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m, 2254 et_dma_buf_addr, &ctx, 2255 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2256 if (error || ctx.nsegs == 0) { 2257 if (!error) { 2258 bus_dmamap_unload(sc->sc_mbuf_dtag, 2259 sc->sc_mbuf_tmp_dmap); 2260 error = EFBIG; 2261 if_printf(&sc->arpcom.ac_if, "too many segments?!\n"); 2262 } 2263 m_freem(m); 2264 2265 if (init) { 2266 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2267 return error; 2268 } else { 2269 goto back; 2270 } 2271 } 2272 2273 if (!init) { 2274 bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap, 2275 BUS_DMASYNC_POSTREAD); 2276 bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap); 2277 } 2278 rb->rb_mbuf = m; 2279 rb->rb_paddr = seg.ds_addr; 2280 2281 /* 2282 * Swap RX buf's DMA map with the loaded temporary one 2283 */ 2284 dmap = rb->rb_dmap; 2285 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2286 sc->sc_mbuf_tmp_dmap = dmap; 2287 2288 error = 0; 2289 back: 2290 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2291 return error; 2292 } 2293 2294 static int 2295 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2296 { 2297 struct et_softc *sc = arg1; 2298 struct ifnet *ifp = &sc->arpcom.ac_if; 2299 int error = 0, v; 2300 2301 lwkt_serialize_enter(ifp->if_serializer); 2302 2303 v = sc->sc_rx_intr_npkts; 2304 error = sysctl_handle_int(oidp, &v, 0, req); 2305 if (error || req->newptr == NULL) 2306 goto back; 2307 if (v <= 0) { 2308 error = EINVAL; 2309 goto back; 2310 } 2311 2312 if (sc->sc_rx_intr_npkts != v) { 2313 if (ifp->if_flags & IFF_RUNNING) 2314 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2315 sc->sc_rx_intr_npkts = v; 2316 } 2317 back: 2318 lwkt_serialize_exit(ifp->if_serializer); 2319 return error; 2320 } 2321 2322 static int 2323 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2324 { 2325 struct et_softc *sc = arg1; 2326 struct ifnet *ifp = &sc->arpcom.ac_if; 2327 int error = 0, v; 2328 2329 lwkt_serialize_enter(ifp->if_serializer); 2330 2331 v = sc->sc_rx_intr_delay; 2332 error = sysctl_handle_int(oidp, &v, 0, req); 2333 if (error || req->newptr == NULL) 2334 goto back; 2335 if (v <= 0) { 2336 error = EINVAL; 2337 goto back; 2338 } 2339 2340 if (sc->sc_rx_intr_delay != v) { 2341 if (ifp->if_flags & IFF_RUNNING) 2342 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2343 sc->sc_rx_intr_delay = v; 2344 } 2345 back: 2346 lwkt_serialize_exit(ifp->if_serializer); 2347 return error; 2348 } 2349 2350 static void 2351 et_setmedia(struct et_softc *sc) 2352 { 2353 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2354 uint32_t cfg2, ctrl; 2355 2356 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 2357 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 2358 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 2359 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 2360 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 2361 2362 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 2363 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 2364 2365 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 2366 cfg2 |= ET_MAC_CFG2_MODE_GMII; 2367 } else { 2368 cfg2 |= ET_MAC_CFG2_MODE_MII; 2369 ctrl |= ET_MAC_CTRL_MODE_MII; 2370 } 2371 2372 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 2373 cfg2 |= ET_MAC_CFG2_FDX; 2374 else 2375 ctrl |= ET_MAC_CTRL_GHDX; 2376 2377 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 2378 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 2379 } 2380 2381 static int 2382 et_jumbo_mem_alloc(device_t dev) 2383 { 2384 struct et_softc *sc = device_get_softc(dev); 2385 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2386 bus_addr_t paddr; 2387 uint8_t *buf; 2388 int error, i; 2389 2390 error = et_dma_mem_create(dev, ET_JUMBO_MEM_SIZE, &jd->jd_dtag, 2391 &jd->jd_buf, &paddr, &jd->jd_dmap); 2392 if (error) { 2393 device_printf(dev, "can't create jumbo DMA stuffs\n"); 2394 return error; 2395 } 2396 2397 jd->jd_slots = kmalloc(sizeof(*jd->jd_slots) * ET_JSLOTS, M_DEVBUF, 2398 M_WAITOK | M_ZERO); 2399 lwkt_serialize_init(&jd->jd_serializer); 2400 SLIST_INIT(&jd->jd_free_slots); 2401 2402 buf = jd->jd_buf; 2403 for (i = 0; i < ET_JSLOTS; ++i) { 2404 struct et_jslot *jslot = &jd->jd_slots[i]; 2405 2406 jslot->jslot_data = jd; 2407 jslot->jslot_buf = buf; 2408 jslot->jslot_paddr = paddr; 2409 jslot->jslot_inuse = 0; 2410 jslot->jslot_index = i; 2411 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, jslot_link); 2412 2413 buf += ET_JLEN; 2414 paddr += ET_JLEN; 2415 } 2416 return 0; 2417 } 2418 2419 static void 2420 et_jumbo_mem_free(device_t dev) 2421 { 2422 struct et_softc *sc = device_get_softc(dev); 2423 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2424 2425 KKASSERT(sc->sc_flags & ET_FLAG_JUMBO); 2426 2427 kfree(jd->jd_slots, M_DEVBUF); 2428 et_dma_mem_destroy(jd->jd_dtag, jd->jd_buf, jd->jd_dmap); 2429 } 2430 2431 static struct et_jslot * 2432 et_jalloc(struct et_jumbo_data *jd) 2433 { 2434 struct et_jslot *jslot; 2435 2436 lwkt_serialize_enter(&jd->jd_serializer); 2437 2438 jslot = SLIST_FIRST(&jd->jd_free_slots); 2439 if (jslot) { 2440 SLIST_REMOVE_HEAD(&jd->jd_free_slots, jslot_link); 2441 jslot->jslot_inuse = 1; 2442 } 2443 2444 lwkt_serialize_exit(&jd->jd_serializer); 2445 return jslot; 2446 } 2447 2448 static void 2449 et_jfree(void *xjslot) 2450 { 2451 struct et_jslot *jslot = xjslot; 2452 struct et_jumbo_data *jd = jslot->jslot_data; 2453 2454 if (&jd->jd_slots[jslot->jslot_index] != jslot) { 2455 panic("%s wrong jslot!?\n", __func__); 2456 } else if (jslot->jslot_inuse == 0) { 2457 panic("%s jslot already freed\n", __func__); 2458 } else { 2459 lwkt_serialize_enter(&jd->jd_serializer); 2460 2461 atomic_subtract_int(&jslot->jslot_inuse, 1); 2462 if (jslot->jslot_inuse == 0) { 2463 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, 2464 jslot_link); 2465 } 2466 2467 lwkt_serialize_exit(&jd->jd_serializer); 2468 } 2469 } 2470 2471 static void 2472 et_jref(void *xjslot) 2473 { 2474 struct et_jslot *jslot = xjslot; 2475 struct et_jumbo_data *jd = jslot->jslot_data; 2476 2477 if (&jd->jd_slots[jslot->jslot_index] != jslot) 2478 panic("%s wrong jslot!?\n", __func__); 2479 else if (jslot->jslot_inuse == 0) 2480 panic("%s jslot already freed\n", __func__); 2481 else 2482 atomic_add_int(&jslot->jslot_inuse, 1); 2483 } 2484 2485 static int 2486 et_newbuf_jumbo(struct et_rxbuf_data *rbd, int buf_idx, int init) 2487 { 2488 struct et_softc *sc = rbd->rbd_softc; 2489 struct et_rxbuf *rb; 2490 struct mbuf *m; 2491 struct et_jslot *jslot; 2492 int error; 2493 2494 KASSERT(rbd->rbd_jumbo, ("calling %s with non-jumbo ring\n", __func__)); 2495 2496 KKASSERT(buf_idx < ET_RX_NDESC); 2497 rb = &rbd->rbd_buf[buf_idx]; 2498 2499 error = ENOBUFS; 2500 2501 MGETHDR(m, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2502 if (m == NULL) { 2503 if (init) { 2504 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n"); 2505 return error; 2506 } else { 2507 goto back; 2508 } 2509 } 2510 2511 jslot = et_jalloc(&sc->sc_jumbo_data); 2512 if (jslot == NULL) { 2513 m_freem(m); 2514 2515 if (init) { 2516 if_printf(&sc->arpcom.ac_if, 2517 "jslot allocation failed\n"); 2518 return error; 2519 } else { 2520 goto back; 2521 } 2522 } 2523 2524 m->m_ext.ext_arg = jslot; 2525 m->m_ext.ext_buf = jslot->jslot_buf; 2526 m->m_ext.ext_free = et_jfree; 2527 m->m_ext.ext_ref = et_jref; 2528 m->m_ext.ext_size = ET_JUMBO_FRAMELEN; 2529 m->m_flags |= M_EXT; 2530 m->m_data = m->m_ext.ext_buf; 2531 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2532 2533 rb->rb_mbuf = m; 2534 rb->rb_paddr = jslot->jslot_paddr; 2535 2536 error = 0; 2537 back: 2538 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2539 return error; 2540 } 2541 2542 static void 2543 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr) 2544 { 2545 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring; 2546 struct et_rxdesc *desc; 2547 2548 KKASSERT(buf_idx < ET_RX_NDESC); 2549 desc = &rx_ring->rr_desc[buf_idx]; 2550 2551 desc->rd_addr_hi = ET_ADDR_HI(paddr); 2552 desc->rd_addr_lo = ET_ADDR_LO(paddr); 2553 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2554 2555 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap, 2556 BUS_DMASYNC_PREWRITE); 2557 } 2558