1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/bitops.h> 37 #include <sys/endian.h> 38 #include <sys/kernel.h> 39 #include <sys/bus.h> 40 #include <sys/interrupt.h> 41 #include <sys/malloc.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/sysctl.h> 48 49 #include <net/ethernet.h> 50 #include <net/if.h> 51 #include <net/bpf.h> 52 #include <net/if_arp.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 #include <net/ifq_var.h> 56 #include <net/vlan/if_vlan_var.h> 57 58 #include <dev/netif/mii_layer/miivar.h> 59 60 #include <bus/pci/pcireg.h> 61 #include <bus/pci/pcivar.h> 62 #include <bus/pci/pcidevs.h> 63 64 #include <dev/netif/et/if_etreg.h> 65 #include <dev/netif/et/if_etvar.h> 66 67 #include "miibus_if.h" 68 69 static int et_probe(device_t); 70 static int et_attach(device_t); 71 static int et_detach(device_t); 72 static int et_shutdown(device_t); 73 74 static int et_miibus_readreg(device_t, int, int); 75 static int et_miibus_writereg(device_t, int, int, int); 76 static void et_miibus_statchg(device_t); 77 78 static void et_init(void *); 79 static int et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 80 static void et_start(struct ifnet *); 81 static void et_watchdog(struct ifnet *); 82 static int et_ifmedia_upd(struct ifnet *); 83 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 84 85 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 86 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 87 88 static void et_intr(void *); 89 static void et_enable_intrs(struct et_softc *, uint32_t); 90 static void et_disable_intrs(struct et_softc *); 91 static void et_rxeof(struct et_softc *); 92 static void et_txeof(struct et_softc *, int); 93 94 static int et_dma_alloc(device_t); 95 static void et_dma_free(device_t); 96 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 97 static int et_dma_mbuf_create(device_t); 98 static void et_dma_mbuf_destroy(device_t, int, const int[]); 99 static int et_jumbo_mem_alloc(device_t); 100 static void et_jumbo_mem_free(device_t); 101 static int et_init_tx_ring(struct et_softc *); 102 static int et_init_rx_ring(struct et_softc *); 103 static void et_free_tx_ring(struct et_softc *); 104 static void et_free_rx_ring(struct et_softc *); 105 static int et_encap(struct et_softc *, struct mbuf **); 106 static struct et_jslot * 107 et_jalloc(struct et_jumbo_data *); 108 static void et_jfree(void *); 109 static void et_jref(void *); 110 static int et_newbuf(struct et_rxbuf_data *, int, int, int); 111 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 112 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 113 static int et_newbuf_jumbo(struct et_rxbuf_data *, int, int); 114 115 static void et_stop(struct et_softc *); 116 static int et_chip_init(struct et_softc *); 117 static void et_chip_attach(struct et_softc *); 118 static void et_init_mac(struct et_softc *); 119 static void et_init_rxmac(struct et_softc *); 120 static void et_init_txmac(struct et_softc *); 121 static int et_init_rxdma(struct et_softc *); 122 static int et_init_txdma(struct et_softc *); 123 static int et_start_rxdma(struct et_softc *); 124 static int et_start_txdma(struct et_softc *); 125 static int et_stop_rxdma(struct et_softc *); 126 static int et_stop_txdma(struct et_softc *); 127 static int et_enable_txrx(struct et_softc *, int); 128 static void et_reset(struct et_softc *); 129 static int et_bus_config(device_t); 130 static void et_get_eaddr(device_t, uint8_t[]); 131 static void et_setmulti(struct et_softc *); 132 static void et_tick(void *); 133 static void et_setmedia(struct et_softc *); 134 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t); 135 136 static const struct et_dev { 137 uint16_t vid; 138 uint16_t did; 139 const char *desc; 140 } et_devices[] = { 141 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 142 "Agere ET1310 Gigabit Ethernet" }, 143 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 144 "Agere ET1310 Fast Ethernet" }, 145 { 0, 0, NULL } 146 }; 147 148 static device_method_t et_methods[] = { 149 DEVMETHOD(device_probe, et_probe), 150 DEVMETHOD(device_attach, et_attach), 151 DEVMETHOD(device_detach, et_detach), 152 DEVMETHOD(device_shutdown, et_shutdown), 153 #if 0 154 DEVMETHOD(device_suspend, et_suspend), 155 DEVMETHOD(device_resume, et_resume), 156 #endif 157 158 DEVMETHOD(bus_print_child, bus_generic_print_child), 159 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 160 161 DEVMETHOD(miibus_readreg, et_miibus_readreg), 162 DEVMETHOD(miibus_writereg, et_miibus_writereg), 163 DEVMETHOD(miibus_statchg, et_miibus_statchg), 164 165 { 0, 0 } 166 }; 167 168 static driver_t et_driver = { 169 "et", 170 et_methods, 171 sizeof(struct et_softc) 172 }; 173 174 static devclass_t et_devclass; 175 176 DECLARE_DUMMY_MODULE(if_et); 177 MODULE_DEPEND(if_et, miibus, 1, 1, 1); 178 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, NULL, NULL); 179 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, NULL, NULL); 180 181 static int et_rx_intr_npkts = 129; 182 static int et_rx_intr_delay = 25; /* x4 usec */ 183 static int et_tx_intr_nsegs = 256; 184 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 185 186 TUNABLE_INT("hw.et.timer", &et_timer); 187 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 188 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay); 189 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs); 190 191 struct et_bsize { 192 int bufsize; 193 int jumbo; 194 et_newbuf_t newbuf; 195 }; 196 197 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = { 198 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 199 .newbuf = et_newbuf_hdr }, 200 { .bufsize = ET_RXDMA_CTRL_RING1_2048, .jumbo = 0, 201 .newbuf = et_newbuf_cluster }, 202 }; 203 204 static const struct et_bsize et_bufsize_jumbo[ET_RX_NRING] = { 205 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 206 .newbuf = et_newbuf_hdr }, 207 { .bufsize = ET_RXDMA_CTRL_RING1_16384, .jumbo = 1, 208 .newbuf = et_newbuf_jumbo }, 209 }; 210 211 static int 212 et_probe(device_t dev) 213 { 214 const struct et_dev *d; 215 uint16_t did, vid; 216 217 vid = pci_get_vendor(dev); 218 did = pci_get_device(dev); 219 220 for (d = et_devices; d->desc != NULL; ++d) { 221 if (vid == d->vid && did == d->did) { 222 device_set_desc(dev, d->desc); 223 return 0; 224 } 225 } 226 return ENXIO; 227 } 228 229 static int 230 et_attach(device_t dev) 231 { 232 struct et_softc *sc = device_get_softc(dev); 233 struct ifnet *ifp = &sc->arpcom.ac_if; 234 uint8_t eaddr[ETHER_ADDR_LEN]; 235 int error; 236 237 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 238 callout_init(&sc->sc_tick); 239 240 /* 241 * Initialize tunables 242 */ 243 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 244 sc->sc_rx_intr_delay = et_rx_intr_delay; 245 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 246 sc->sc_timer = et_timer; 247 248 #ifndef BURN_BRIDGES 249 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 250 uint32_t irq, mem; 251 252 irq = pci_read_config(dev, PCIR_INTLINE, 4); 253 mem = pci_read_config(dev, ET_PCIR_BAR, 4); 254 255 device_printf(dev, "chip is in D%d power mode " 256 "-- setting to D0\n", pci_get_powerstate(dev)); 257 258 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 259 260 pci_write_config(dev, PCIR_INTLINE, irq, 4); 261 pci_write_config(dev, ET_PCIR_BAR, mem, 4); 262 } 263 #endif /* !BURN_BRIDGE */ 264 265 /* Enable bus mastering */ 266 pci_enable_busmaster(dev); 267 268 /* 269 * Allocate IO memory 270 */ 271 sc->sc_mem_rid = ET_PCIR_BAR; 272 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 273 &sc->sc_mem_rid, RF_ACTIVE); 274 if (sc->sc_mem_res == NULL) { 275 device_printf(dev, "can't allocate IO memory\n"); 276 return ENXIO; 277 } 278 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res); 279 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res); 280 281 /* 282 * Allocate IRQ 283 */ 284 sc->sc_irq_rid = 0; 285 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 286 &sc->sc_irq_rid, 287 RF_SHAREABLE | RF_ACTIVE); 288 if (sc->sc_irq_res == NULL) { 289 device_printf(dev, "can't allocate irq\n"); 290 error = ENXIO; 291 goto fail; 292 } 293 294 /* 295 * Create sysctl tree 296 */ 297 sysctl_ctx_init(&sc->sc_sysctl_ctx); 298 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 299 SYSCTL_STATIC_CHILDREN(_hw), 300 OID_AUTO, 301 device_get_nameunit(dev), 302 CTLFLAG_RD, 0, ""); 303 if (sc->sc_sysctl_tree == NULL) { 304 device_printf(dev, "can't add sysctl node\n"); 305 error = ENXIO; 306 goto fail; 307 } 308 309 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 310 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 311 OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW, 312 sc, 0, et_sysctl_rx_intr_npkts, "I", 313 "RX IM, # packets per RX interrupt"); 314 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 315 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 316 OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW, 317 sc, 0, et_sysctl_rx_intr_delay, "I", 318 "RX IM, RX interrupt delay (x10 usec)"); 319 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 320 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 321 "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 322 "TX IM, # segments per TX interrupt"); 323 SYSCTL_ADD_UINT(&sc->sc_sysctl_ctx, 324 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 325 "timer", CTLFLAG_RW, &sc->sc_timer, 0, 326 "TX timer"); 327 328 error = et_bus_config(dev); 329 if (error) 330 goto fail; 331 332 et_get_eaddr(dev, eaddr); 333 334 CSR_WRITE_4(sc, ET_PM, 335 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 336 337 et_reset(sc); 338 339 et_disable_intrs(sc); 340 341 error = et_dma_alloc(dev); 342 if (error) 343 goto fail; 344 345 ifp->if_softc = sc; 346 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 347 ifp->if_init = et_init; 348 ifp->if_ioctl = et_ioctl; 349 ifp->if_start = et_start; 350 ifp->if_watchdog = et_watchdog; 351 ifp->if_mtu = ETHERMTU; 352 ifp->if_capabilities = IFCAP_VLAN_MTU; 353 ifp->if_capenable = ifp->if_capabilities; 354 ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC); 355 ifq_set_ready(&ifp->if_snd); 356 357 et_chip_attach(sc); 358 359 error = mii_phy_probe(dev, &sc->sc_miibus, 360 et_ifmedia_upd, et_ifmedia_sts); 361 if (error) { 362 device_printf(dev, "can't probe any PHY\n"); 363 goto fail; 364 } 365 366 ether_ifattach(ifp, eaddr, NULL); 367 368 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc, 369 &sc->sc_irq_handle, ifp->if_serializer); 370 if (error) { 371 ether_ifdetach(ifp); 372 device_printf(dev, "can't setup intr\n"); 373 goto fail; 374 } 375 376 ifp->if_cpuid = rman_get_cpuid(sc->sc_irq_res); 377 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 378 379 return 0; 380 fail: 381 et_detach(dev); 382 return error; 383 } 384 385 static int 386 et_detach(device_t dev) 387 { 388 struct et_softc *sc = device_get_softc(dev); 389 390 if (device_is_attached(dev)) { 391 struct ifnet *ifp = &sc->arpcom.ac_if; 392 393 lwkt_serialize_enter(ifp->if_serializer); 394 et_stop(sc); 395 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 396 lwkt_serialize_exit(ifp->if_serializer); 397 398 ether_ifdetach(ifp); 399 } 400 401 if (sc->sc_sysctl_tree != NULL) 402 sysctl_ctx_free(&sc->sc_sysctl_ctx); 403 404 if (sc->sc_miibus != NULL) 405 device_delete_child(dev, sc->sc_miibus); 406 bus_generic_detach(dev); 407 408 if (sc->sc_irq_res != NULL) { 409 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 410 sc->sc_irq_res); 411 } 412 413 if (sc->sc_mem_res != NULL) { 414 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 415 sc->sc_mem_res); 416 } 417 418 et_dma_free(dev); 419 420 return 0; 421 } 422 423 static int 424 et_shutdown(device_t dev) 425 { 426 struct et_softc *sc = device_get_softc(dev); 427 struct ifnet *ifp = &sc->arpcom.ac_if; 428 429 lwkt_serialize_enter(ifp->if_serializer); 430 et_stop(sc); 431 lwkt_serialize_exit(ifp->if_serializer); 432 return 0; 433 } 434 435 static int 436 et_miibus_readreg(device_t dev, int phy, int reg) 437 { 438 struct et_softc *sc = device_get_softc(dev); 439 uint32_t val; 440 int i, ret; 441 442 /* Stop any pending operations */ 443 CSR_WRITE_4(sc, ET_MII_CMD, 0); 444 445 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 446 __SHIFTIN(reg, ET_MII_ADDR_REG); 447 CSR_WRITE_4(sc, ET_MII_ADDR, val); 448 449 /* Start reading */ 450 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 451 452 #define NRETRY 50 453 454 for (i = 0; i < NRETRY; ++i) { 455 val = CSR_READ_4(sc, ET_MII_IND); 456 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 457 break; 458 DELAY(50); 459 } 460 if (i == NRETRY) { 461 if_printf(&sc->arpcom.ac_if, 462 "read phy %d, reg %d timed out\n", phy, reg); 463 ret = 0; 464 goto back; 465 } 466 467 #undef NRETRY 468 469 val = CSR_READ_4(sc, ET_MII_STAT); 470 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 471 472 back: 473 /* Make sure that the current operation is stopped */ 474 CSR_WRITE_4(sc, ET_MII_CMD, 0); 475 return ret; 476 } 477 478 static int 479 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 480 { 481 struct et_softc *sc = device_get_softc(dev); 482 uint32_t val; 483 int i; 484 485 /* Stop any pending operations */ 486 CSR_WRITE_4(sc, ET_MII_CMD, 0); 487 488 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 489 __SHIFTIN(reg, ET_MII_ADDR_REG); 490 CSR_WRITE_4(sc, ET_MII_ADDR, val); 491 492 /* Start writing */ 493 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 494 495 #define NRETRY 100 496 497 for (i = 0; i < NRETRY; ++i) { 498 val = CSR_READ_4(sc, ET_MII_IND); 499 if ((val & ET_MII_IND_BUSY) == 0) 500 break; 501 DELAY(50); 502 } 503 if (i == NRETRY) { 504 if_printf(&sc->arpcom.ac_if, 505 "write phy %d, reg %d timed out\n", phy, reg); 506 et_miibus_readreg(dev, phy, reg); 507 } 508 509 #undef NRETRY 510 511 /* Make sure that the current operation is stopped */ 512 CSR_WRITE_4(sc, ET_MII_CMD, 0); 513 return 0; 514 } 515 516 static void 517 et_miibus_statchg(device_t dev) 518 { 519 et_setmedia(device_get_softc(dev)); 520 } 521 522 static int 523 et_ifmedia_upd(struct ifnet *ifp) 524 { 525 struct et_softc *sc = ifp->if_softc; 526 struct mii_data *mii = device_get_softc(sc->sc_miibus); 527 528 if (mii->mii_instance != 0) { 529 struct mii_softc *miisc; 530 531 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 532 mii_phy_reset(miisc); 533 } 534 mii_mediachg(mii); 535 536 return 0; 537 } 538 539 static void 540 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 541 { 542 struct et_softc *sc = ifp->if_softc; 543 struct mii_data *mii = device_get_softc(sc->sc_miibus); 544 545 mii_pollstat(mii); 546 ifmr->ifm_active = mii->mii_media_active; 547 ifmr->ifm_status = mii->mii_media_status; 548 } 549 550 static void 551 et_stop(struct et_softc *sc) 552 { 553 struct ifnet *ifp = &sc->arpcom.ac_if; 554 555 ASSERT_SERIALIZED(ifp->if_serializer); 556 557 callout_stop(&sc->sc_tick); 558 559 et_stop_rxdma(sc); 560 et_stop_txdma(sc); 561 562 et_disable_intrs(sc); 563 564 et_free_tx_ring(sc); 565 et_free_rx_ring(sc); 566 567 et_reset(sc); 568 569 sc->sc_tx = 0; 570 sc->sc_tx_intr = 0; 571 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 572 573 ifp->if_timer = 0; 574 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 575 } 576 577 static int 578 et_bus_config(device_t dev) 579 { 580 uint32_t val, max_plsz; 581 uint16_t ack_latency, replay_timer; 582 583 /* 584 * Test whether EEPROM is valid 585 * NOTE: Read twice to get the correct value 586 */ 587 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 588 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 589 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 590 device_printf(dev, "EEPROM status error 0x%02x\n", val); 591 return ENXIO; 592 } 593 594 /* TODO: LED */ 595 596 /* 597 * Configure ACK latency and replay timer according to 598 * max playload size 599 */ 600 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4); 601 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 602 603 switch (max_plsz) { 604 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 605 ack_latency = ET_PCIV_ACK_LATENCY_128; 606 replay_timer = ET_PCIV_REPLAY_TIMER_128; 607 break; 608 609 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 610 ack_latency = ET_PCIV_ACK_LATENCY_256; 611 replay_timer = ET_PCIV_REPLAY_TIMER_256; 612 break; 613 614 default: 615 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2); 616 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2); 617 device_printf(dev, "ack latency %u, replay timer %u\n", 618 ack_latency, replay_timer); 619 break; 620 } 621 if (ack_latency != 0) { 622 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 623 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2); 624 } 625 626 /* 627 * Set L0s and L1 latency timer to 2us 628 */ 629 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 630 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1); 631 632 /* 633 * Set max read request size to 2048 bytes 634 */ 635 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2); 636 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 637 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 638 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2); 639 640 return 0; 641 } 642 643 static void 644 et_get_eaddr(device_t dev, uint8_t eaddr[]) 645 { 646 uint32_t val; 647 int i; 648 649 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 650 for (i = 0; i < 4; ++i) 651 eaddr[i] = (val >> (8 * i)) & 0xff; 652 653 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 654 for (; i < ETHER_ADDR_LEN; ++i) 655 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 656 } 657 658 static void 659 et_reset(struct et_softc *sc) 660 { 661 CSR_WRITE_4(sc, ET_MAC_CFG1, 662 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 663 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 664 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 665 666 CSR_WRITE_4(sc, ET_SWRST, 667 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 668 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 669 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 670 671 CSR_WRITE_4(sc, ET_MAC_CFG1, 672 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 673 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 674 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 675 } 676 677 static void 678 et_disable_intrs(struct et_softc *sc) 679 { 680 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 681 } 682 683 static void 684 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 685 { 686 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 687 } 688 689 static int 690 et_dma_alloc(device_t dev) 691 { 692 struct et_softc *sc = device_get_softc(dev); 693 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 694 struct et_txstatus_data *txsd = &sc->sc_tx_status; 695 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 696 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 697 int i, error; 698 699 /* 700 * Create top level DMA tag 701 */ 702 error = bus_dma_tag_create(NULL, 1, 0, 703 BUS_SPACE_MAXADDR, 704 BUS_SPACE_MAXADDR, 705 NULL, NULL, 706 BUS_SPACE_MAXSIZE_32BIT, 707 0, 708 BUS_SPACE_MAXSIZE_32BIT, 709 0, &sc->sc_dtag); 710 if (error) { 711 device_printf(dev, "can't create DMA tag\n"); 712 return error; 713 } 714 715 /* 716 * Create TX ring DMA stuffs 717 */ 718 tx_ring->tr_desc = bus_dmamem_coherent_any(sc->sc_dtag, 719 ET_ALIGN, ET_TX_RING_SIZE, 720 BUS_DMA_WAITOK | BUS_DMA_ZERO, 721 &tx_ring->tr_dtag, &tx_ring->tr_dmap, 722 &tx_ring->tr_paddr); 723 if (tx_ring->tr_desc == NULL) { 724 device_printf(dev, "can't create TX ring DMA stuffs\n"); 725 return ENOMEM; 726 } 727 728 /* 729 * Create TX status DMA stuffs 730 */ 731 txsd->txsd_status = bus_dmamem_coherent_any(sc->sc_dtag, 732 ET_ALIGN, sizeof(uint32_t), 733 BUS_DMA_WAITOK | BUS_DMA_ZERO, 734 &txsd->txsd_dtag, &txsd->txsd_dmap, 735 &txsd->txsd_paddr); 736 if (txsd->txsd_status == NULL) { 737 device_printf(dev, "can't create TX status DMA stuffs\n"); 738 return ENOMEM; 739 } 740 741 /* 742 * Create DMA stuffs for RX rings 743 */ 744 for (i = 0; i < ET_RX_NRING; ++i) { 745 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 746 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 747 748 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 749 750 rx_ring->rr_desc = bus_dmamem_coherent_any(sc->sc_dtag, 751 ET_ALIGN, ET_RX_RING_SIZE, 752 BUS_DMA_WAITOK | BUS_DMA_ZERO, 753 &rx_ring->rr_dtag, &rx_ring->rr_dmap, 754 &rx_ring->rr_paddr); 755 if (rx_ring->rr_desc == NULL) { 756 device_printf(dev, "can't create DMA stuffs for " 757 "the %d RX ring\n", i); 758 return ENOMEM; 759 } 760 rx_ring->rr_posreg = rx_ring_posreg[i]; 761 } 762 763 /* 764 * Create RX stat ring DMA stuffs 765 */ 766 rxst_ring->rsr_stat = bus_dmamem_coherent_any(sc->sc_dtag, 767 ET_ALIGN, ET_RXSTAT_RING_SIZE, 768 BUS_DMA_WAITOK | BUS_DMA_ZERO, 769 &rxst_ring->rsr_dtag, &rxst_ring->rsr_dmap, 770 &rxst_ring->rsr_paddr); 771 if (rxst_ring->rsr_stat == NULL) { 772 device_printf(dev, "can't create RX stat ring DMA stuffs\n"); 773 return ENOMEM; 774 } 775 776 /* 777 * Create RX status DMA stuffs 778 */ 779 rxsd->rxsd_status = bus_dmamem_coherent_any(sc->sc_dtag, 780 ET_ALIGN, sizeof(struct et_rxstatus), 781 BUS_DMA_WAITOK | BUS_DMA_ZERO, 782 &rxsd->rxsd_dtag, &rxsd->rxsd_dmap, 783 &rxsd->rxsd_paddr); 784 if (rxsd->rxsd_status == NULL) { 785 device_printf(dev, "can't create RX status DMA stuffs\n"); 786 return ENOMEM; 787 } 788 789 /* 790 * Create mbuf DMA stuffs 791 */ 792 error = et_dma_mbuf_create(dev); 793 if (error) 794 return error; 795 796 /* 797 * Create jumbo buffer DMA stuffs 798 * NOTE: Allow it to fail 799 */ 800 if (et_jumbo_mem_alloc(dev) == 0) 801 sc->sc_flags |= ET_FLAG_JUMBO; 802 803 return 0; 804 } 805 806 static void 807 et_dma_free(device_t dev) 808 { 809 struct et_softc *sc = device_get_softc(dev); 810 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 811 struct et_txstatus_data *txsd = &sc->sc_tx_status; 812 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 813 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 814 int i, rx_done[ET_RX_NRING]; 815 816 /* 817 * Destroy TX ring DMA stuffs 818 */ 819 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc, 820 tx_ring->tr_dmap); 821 822 /* 823 * Destroy TX status DMA stuffs 824 */ 825 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status, 826 txsd->txsd_dmap); 827 828 /* 829 * Destroy DMA stuffs for RX rings 830 */ 831 for (i = 0; i < ET_RX_NRING; ++i) { 832 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 833 834 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc, 835 rx_ring->rr_dmap); 836 } 837 838 /* 839 * Destroy RX stat ring DMA stuffs 840 */ 841 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat, 842 rxst_ring->rsr_dmap); 843 844 /* 845 * Destroy RX status DMA stuffs 846 */ 847 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status, 848 rxsd->rxsd_dmap); 849 850 /* 851 * Destroy mbuf DMA stuffs 852 */ 853 for (i = 0; i < ET_RX_NRING; ++i) 854 rx_done[i] = ET_RX_NDESC; 855 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done); 856 857 /* 858 * Destroy jumbo buffer DMA stuffs 859 */ 860 if (sc->sc_flags & ET_FLAG_JUMBO) 861 et_jumbo_mem_free(dev); 862 863 /* 864 * Destroy top level DMA tag 865 */ 866 if (sc->sc_dtag != NULL) 867 bus_dma_tag_destroy(sc->sc_dtag); 868 } 869 870 static int 871 et_dma_mbuf_create(device_t dev) 872 { 873 struct et_softc *sc = device_get_softc(dev); 874 struct et_txbuf_data *tbd = &sc->sc_tx_data; 875 int i, error, rx_done[ET_RX_NRING]; 876 877 /* 878 * Create RX mbuf DMA tag 879 */ 880 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 881 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 882 NULL, NULL, 883 MCLBYTES, 1, MCLBYTES, 884 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 885 &sc->sc_rxbuf_dtag); 886 if (error) { 887 device_printf(dev, "can't create RX mbuf DMA tag\n"); 888 return error; 889 } 890 891 /* 892 * Create spare DMA map for RX mbufs 893 */ 894 error = bus_dmamap_create(sc->sc_rxbuf_dtag, BUS_DMA_WAITOK, 895 &sc->sc_rxbuf_tmp_dmap); 896 if (error) { 897 device_printf(dev, "can't create spare mbuf DMA map\n"); 898 bus_dma_tag_destroy(sc->sc_rxbuf_dtag); 899 sc->sc_rxbuf_dtag = NULL; 900 return error; 901 } 902 903 /* 904 * Create DMA maps for RX mbufs 905 */ 906 bzero(rx_done, sizeof(rx_done)); 907 for (i = 0; i < ET_RX_NRING; ++i) { 908 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 909 int j; 910 911 for (j = 0; j < ET_RX_NDESC; ++j) { 912 error = bus_dmamap_create(sc->sc_rxbuf_dtag, 913 BUS_DMA_WAITOK, 914 &rbd->rbd_buf[j].rb_dmap); 915 if (error) { 916 device_printf(dev, "can't create %d RX mbuf " 917 "for %d RX ring\n", j, i); 918 rx_done[i] = j; 919 et_dma_mbuf_destroy(dev, 0, rx_done); 920 return error; 921 } 922 } 923 rx_done[i] = ET_RX_NDESC; 924 925 rbd->rbd_softc = sc; 926 rbd->rbd_ring = &sc->sc_rx_ring[i]; 927 } 928 929 /* 930 * Create TX mbuf DMA tag 931 */ 932 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 933 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 934 NULL, NULL, 935 ET_JUMBO_FRAMELEN, ET_NSEG_MAX, MCLBYTES, 936 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 937 BUS_DMA_ONEBPAGE, 938 &sc->sc_txbuf_dtag); 939 if (error) { 940 device_printf(dev, "can't create TX mbuf DMA tag\n"); 941 return error; 942 } 943 944 /* 945 * Create DMA maps for TX mbufs 946 */ 947 for (i = 0; i < ET_TX_NDESC; ++i) { 948 error = bus_dmamap_create(sc->sc_txbuf_dtag, 949 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 950 &tbd->tbd_buf[i].tb_dmap); 951 if (error) { 952 device_printf(dev, "can't create %d TX mbuf " 953 "DMA map\n", i); 954 et_dma_mbuf_destroy(dev, i, rx_done); 955 return error; 956 } 957 } 958 959 return 0; 960 } 961 962 static void 963 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[]) 964 { 965 struct et_softc *sc = device_get_softc(dev); 966 struct et_txbuf_data *tbd = &sc->sc_tx_data; 967 int i; 968 969 /* 970 * Destroy DMA tag and maps for RX mbufs 971 */ 972 if (sc->sc_rxbuf_dtag) { 973 for (i = 0; i < ET_RX_NRING; ++i) { 974 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 975 int j; 976 977 for (j = 0; j < rx_done[i]; ++j) { 978 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 979 980 KASSERT(rb->rb_mbuf == NULL, 981 ("RX mbuf in %d RX ring is " 982 "not freed yet", i)); 983 bus_dmamap_destroy(sc->sc_rxbuf_dtag, 984 rb->rb_dmap); 985 } 986 } 987 bus_dmamap_destroy(sc->sc_rxbuf_dtag, sc->sc_rxbuf_tmp_dmap); 988 bus_dma_tag_destroy(sc->sc_rxbuf_dtag); 989 sc->sc_rxbuf_dtag = NULL; 990 } 991 992 /* 993 * Destroy DMA tag and maps for TX mbufs 994 */ 995 if (sc->sc_txbuf_dtag) { 996 for (i = 0; i < tx_done; ++i) { 997 struct et_txbuf *tb = &tbd->tbd_buf[i]; 998 999 KASSERT(tb->tb_mbuf == NULL, 1000 ("TX mbuf is not freed yet")); 1001 bus_dmamap_destroy(sc->sc_txbuf_dtag, tb->tb_dmap); 1002 } 1003 bus_dma_tag_destroy(sc->sc_txbuf_dtag); 1004 sc->sc_txbuf_dtag = NULL; 1005 } 1006 } 1007 1008 static void 1009 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 1010 { 1011 if (dtag != NULL) { 1012 bus_dmamap_unload(dtag, dmap); 1013 bus_dmamem_free(dtag, addr, dmap); 1014 bus_dma_tag_destroy(dtag); 1015 } 1016 } 1017 1018 static void 1019 et_chip_attach(struct et_softc *sc) 1020 { 1021 uint32_t val; 1022 1023 /* 1024 * Perform minimal initialization 1025 */ 1026 1027 /* Disable loopback */ 1028 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1029 1030 /* Reset MAC */ 1031 CSR_WRITE_4(sc, ET_MAC_CFG1, 1032 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1033 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1034 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1035 1036 /* 1037 * Setup half duplex mode 1038 */ 1039 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1040 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1041 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1042 ET_MAC_HDX_EXC_DEFER; 1043 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1044 1045 /* Clear MAC control */ 1046 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1047 1048 /* Reset MII */ 1049 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1050 1051 /* Bring MAC out of reset state */ 1052 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1053 1054 /* Enable memory controllers */ 1055 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1056 } 1057 1058 static void 1059 et_intr(void *xsc) 1060 { 1061 struct et_softc *sc = xsc; 1062 struct ifnet *ifp = &sc->arpcom.ac_if; 1063 uint32_t intrs; 1064 1065 ASSERT_SERIALIZED(ifp->if_serializer); 1066 1067 if ((ifp->if_flags & IFF_RUNNING) == 0) 1068 return; 1069 1070 et_disable_intrs(sc); 1071 1072 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1073 intrs &= ET_INTRS; 1074 if (intrs == 0) /* Not interested */ 1075 goto back; 1076 1077 if (intrs & ET_INTR_RXEOF) 1078 et_rxeof(sc); 1079 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1080 et_txeof(sc, 1); 1081 if (intrs & ET_INTR_TIMER) 1082 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1083 back: 1084 et_enable_intrs(sc, ET_INTRS); 1085 } 1086 1087 static void 1088 et_init(void *xsc) 1089 { 1090 struct et_softc *sc = xsc; 1091 struct ifnet *ifp = &sc->arpcom.ac_if; 1092 const struct et_bsize *arr; 1093 int error, i; 1094 1095 ASSERT_SERIALIZED(ifp->if_serializer); 1096 1097 et_stop(sc); 1098 1099 arr = ET_FRAMELEN(ifp->if_mtu) < MCLBYTES ? 1100 et_bufsize_std : et_bufsize_jumbo; 1101 for (i = 0; i < ET_RX_NRING; ++i) { 1102 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize; 1103 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf; 1104 sc->sc_rx_data[i].rbd_jumbo = arr[i].jumbo; 1105 } 1106 1107 error = et_init_tx_ring(sc); 1108 if (error) 1109 goto back; 1110 1111 error = et_init_rx_ring(sc); 1112 if (error) 1113 goto back; 1114 1115 error = et_chip_init(sc); 1116 if (error) 1117 goto back; 1118 1119 error = et_enable_txrx(sc, 1); 1120 if (error) 1121 goto back; 1122 1123 et_enable_intrs(sc, ET_INTRS); 1124 1125 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1126 1127 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1128 1129 ifp->if_flags |= IFF_RUNNING; 1130 ifp->if_flags &= ~IFF_OACTIVE; 1131 back: 1132 if (error) 1133 et_stop(sc); 1134 } 1135 1136 static int 1137 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1138 { 1139 struct et_softc *sc = ifp->if_softc; 1140 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1141 struct ifreq *ifr = (struct ifreq *)data; 1142 int error = 0, max_framelen; 1143 1144 ASSERT_SERIALIZED(ifp->if_serializer); 1145 1146 switch (cmd) { 1147 case SIOCSIFFLAGS: 1148 if (ifp->if_flags & IFF_UP) { 1149 if (ifp->if_flags & IFF_RUNNING) { 1150 if ((ifp->if_flags ^ sc->sc_if_flags) & 1151 (IFF_ALLMULTI | IFF_PROMISC)) 1152 et_setmulti(sc); 1153 } else { 1154 et_init(sc); 1155 } 1156 } else { 1157 if (ifp->if_flags & IFF_RUNNING) 1158 et_stop(sc); 1159 } 1160 sc->sc_if_flags = ifp->if_flags; 1161 break; 1162 1163 case SIOCSIFMEDIA: 1164 case SIOCGIFMEDIA: 1165 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1166 break; 1167 1168 case SIOCADDMULTI: 1169 case SIOCDELMULTI: 1170 if (ifp->if_flags & IFF_RUNNING) 1171 et_setmulti(sc); 1172 break; 1173 1174 case SIOCSIFMTU: 1175 if (sc->sc_flags & ET_FLAG_JUMBO) 1176 max_framelen = ET_JUMBO_FRAMELEN; 1177 else 1178 max_framelen = MCLBYTES - 1; 1179 1180 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) { 1181 error = EOPNOTSUPP; 1182 break; 1183 } 1184 1185 ifp->if_mtu = ifr->ifr_mtu; 1186 if (ifp->if_flags & IFF_RUNNING) 1187 et_init(sc); 1188 break; 1189 1190 default: 1191 error = ether_ioctl(ifp, cmd, data); 1192 break; 1193 } 1194 return error; 1195 } 1196 1197 static void 1198 et_start(struct ifnet *ifp) 1199 { 1200 struct et_softc *sc = ifp->if_softc; 1201 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1202 int trans, oactive; 1203 1204 ASSERT_SERIALIZED(ifp->if_serializer); 1205 1206 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) { 1207 ifq_purge(&ifp->if_snd); 1208 return; 1209 } 1210 1211 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1212 return; 1213 1214 oactive = 0; 1215 trans = 0; 1216 for (;;) { 1217 struct mbuf *m; 1218 int error; 1219 1220 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1221 if (oactive) { 1222 ifp->if_flags |= IFF_OACTIVE; 1223 break; 1224 } 1225 1226 et_txeof(sc, 0); 1227 oactive = 1; 1228 continue; 1229 } 1230 1231 m = ifq_dequeue(&ifp->if_snd, NULL); 1232 if (m == NULL) 1233 break; 1234 1235 error = et_encap(sc, &m); 1236 if (error) { 1237 ifp->if_oerrors++; 1238 KKASSERT(m == NULL); 1239 1240 if (error == EFBIG) { 1241 /* 1242 * Excessive fragmented packets 1243 */ 1244 if (oactive) { 1245 ifp->if_flags |= IFF_OACTIVE; 1246 break; 1247 } 1248 et_txeof(sc, 0); 1249 oactive = 1; 1250 } 1251 continue; 1252 } else { 1253 oactive = 0; 1254 } 1255 trans = 1; 1256 1257 BPF_MTAP(ifp, m); 1258 } 1259 1260 if (trans) 1261 ifp->if_timer = 5; 1262 } 1263 1264 static void 1265 et_watchdog(struct ifnet *ifp) 1266 { 1267 ASSERT_SERIALIZED(ifp->if_serializer); 1268 1269 if_printf(ifp, "watchdog timed out\n"); 1270 1271 ifp->if_init(ifp->if_softc); 1272 if_devstart(ifp); 1273 } 1274 1275 static int 1276 et_stop_rxdma(struct et_softc *sc) 1277 { 1278 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1279 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1280 1281 DELAY(5); 1282 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1283 if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n"); 1284 return ETIMEDOUT; 1285 } 1286 return 0; 1287 } 1288 1289 static int 1290 et_stop_txdma(struct et_softc *sc) 1291 { 1292 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1293 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1294 return 0; 1295 } 1296 1297 static void 1298 et_free_tx_ring(struct et_softc *sc) 1299 { 1300 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1301 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1302 int i; 1303 1304 for (i = 0; i < ET_TX_NDESC; ++i) { 1305 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1306 1307 if (tb->tb_mbuf != NULL) { 1308 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap); 1309 m_freem(tb->tb_mbuf); 1310 tb->tb_mbuf = NULL; 1311 } 1312 } 1313 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1314 } 1315 1316 static void 1317 et_free_rx_ring(struct et_softc *sc) 1318 { 1319 int n; 1320 1321 for (n = 0; n < ET_RX_NRING; ++n) { 1322 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1323 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1324 int i; 1325 1326 for (i = 0; i < ET_RX_NDESC; ++i) { 1327 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1328 1329 if (rb->rb_mbuf != NULL) { 1330 if (!rbd->rbd_jumbo) { 1331 bus_dmamap_unload(sc->sc_rxbuf_dtag, 1332 rb->rb_dmap); 1333 } 1334 m_freem(rb->rb_mbuf); 1335 rb->rb_mbuf = NULL; 1336 } 1337 } 1338 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1339 } 1340 } 1341 1342 static void 1343 et_setmulti(struct et_softc *sc) 1344 { 1345 struct ifnet *ifp = &sc->arpcom.ac_if; 1346 uint32_t hash[4] = { 0, 0, 0, 0 }; 1347 uint32_t rxmac_ctrl, pktfilt; 1348 struct ifmultiaddr *ifma; 1349 int i, count; 1350 1351 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1352 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1353 1354 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1355 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1356 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1357 goto back; 1358 } 1359 1360 count = 0; 1361 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1362 uint32_t *hp, h; 1363 1364 if (ifma->ifma_addr->sa_family != AF_LINK) 1365 continue; 1366 1367 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1368 ifma->ifma_addr), ETHER_ADDR_LEN); 1369 h = (h & 0x3f800000) >> 23; 1370 1371 hp = &hash[0]; 1372 if (h >= 32 && h < 64) { 1373 h -= 32; 1374 hp = &hash[1]; 1375 } else if (h >= 64 && h < 96) { 1376 h -= 64; 1377 hp = &hash[2]; 1378 } else if (h >= 96) { 1379 h -= 96; 1380 hp = &hash[3]; 1381 } 1382 *hp |= (1 << h); 1383 1384 ++count; 1385 } 1386 1387 for (i = 0; i < 4; ++i) 1388 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1389 1390 if (count > 0) 1391 pktfilt |= ET_PKTFILT_MCAST; 1392 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1393 back: 1394 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1395 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1396 } 1397 1398 static int 1399 et_chip_init(struct et_softc *sc) 1400 { 1401 struct ifnet *ifp = &sc->arpcom.ac_if; 1402 uint32_t rxq_end; 1403 int error, frame_len, rxmem_size; 1404 1405 /* 1406 * Split 16Kbytes internal memory between TX and RX 1407 * according to frame length. 1408 */ 1409 frame_len = ET_FRAMELEN(ifp->if_mtu); 1410 if (frame_len < 2048) { 1411 rxmem_size = ET_MEM_RXSIZE_DEFAULT; 1412 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) { 1413 rxmem_size = ET_MEM_SIZE / 2; 1414 } else { 1415 rxmem_size = ET_MEM_SIZE - 1416 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT); 1417 } 1418 rxq_end = ET_QUEUE_ADDR(rxmem_size); 1419 1420 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START); 1421 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end); 1422 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1); 1423 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END); 1424 1425 /* No loopback */ 1426 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1427 1428 /* Clear MSI configure */ 1429 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1430 1431 /* Disable timer */ 1432 CSR_WRITE_4(sc, ET_TIMER, 0); 1433 1434 /* Initialize MAC */ 1435 et_init_mac(sc); 1436 1437 /* Enable memory controllers */ 1438 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1439 1440 /* Initialize RX MAC */ 1441 et_init_rxmac(sc); 1442 1443 /* Initialize TX MAC */ 1444 et_init_txmac(sc); 1445 1446 /* Initialize RX DMA engine */ 1447 error = et_init_rxdma(sc); 1448 if (error) 1449 return error; 1450 1451 /* Initialize TX DMA engine */ 1452 error = et_init_txdma(sc); 1453 if (error) 1454 return error; 1455 1456 return 0; 1457 } 1458 1459 static int 1460 et_init_tx_ring(struct et_softc *sc) 1461 { 1462 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1463 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1464 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1465 1466 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1467 1468 tbd->tbd_start_index = 0; 1469 tbd->tbd_start_wrap = 0; 1470 tbd->tbd_used = 0; 1471 1472 bzero(txsd->txsd_status, sizeof(uint32_t)); 1473 1474 return 0; 1475 } 1476 1477 static int 1478 et_init_rx_ring(struct et_softc *sc) 1479 { 1480 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1481 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1482 int n; 1483 1484 for (n = 0; n < ET_RX_NRING; ++n) { 1485 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1486 int i, error; 1487 1488 for (i = 0; i < ET_RX_NDESC; ++i) { 1489 error = rbd->rbd_newbuf(rbd, i, 1); 1490 if (error) { 1491 if_printf(&sc->arpcom.ac_if, "%d ring %d buf, " 1492 "newbuf failed: %d\n", n, i, error); 1493 return error; 1494 } 1495 } 1496 } 1497 1498 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1499 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1500 1501 return 0; 1502 } 1503 1504 static int 1505 et_init_rxdma(struct et_softc *sc) 1506 { 1507 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1508 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1509 struct et_rxdesc_ring *rx_ring; 1510 int error; 1511 1512 error = et_stop_rxdma(sc); 1513 if (error) { 1514 if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n"); 1515 return error; 1516 } 1517 1518 /* 1519 * Install RX status 1520 */ 1521 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1522 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1523 1524 /* 1525 * Install RX stat ring 1526 */ 1527 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1528 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1529 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1530 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1531 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1532 1533 /* Match ET_RXSTAT_POS */ 1534 rxst_ring->rsr_index = 0; 1535 rxst_ring->rsr_wrap = 0; 1536 1537 /* 1538 * Install the 2nd RX descriptor ring 1539 */ 1540 rx_ring = &sc->sc_rx_ring[1]; 1541 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1542 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1543 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1544 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1545 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1546 1547 /* Match ET_RX_RING1_POS */ 1548 rx_ring->rr_index = 0; 1549 rx_ring->rr_wrap = 1; 1550 1551 /* 1552 * Install the 1st RX descriptor ring 1553 */ 1554 rx_ring = &sc->sc_rx_ring[0]; 1555 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1556 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1557 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1558 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1559 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1560 1561 /* Match ET_RX_RING0_POS */ 1562 rx_ring->rr_index = 0; 1563 rx_ring->rr_wrap = 1; 1564 1565 /* 1566 * RX intr moderation 1567 */ 1568 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1569 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1570 1571 return 0; 1572 } 1573 1574 static int 1575 et_init_txdma(struct et_softc *sc) 1576 { 1577 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1578 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1579 int error; 1580 1581 error = et_stop_txdma(sc); 1582 if (error) { 1583 if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n"); 1584 return error; 1585 } 1586 1587 /* 1588 * Install TX descriptor ring 1589 */ 1590 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1591 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1592 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1593 1594 /* 1595 * Install TX status 1596 */ 1597 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1598 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1599 1600 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1601 1602 /* Match ET_TX_READY_POS */ 1603 tx_ring->tr_ready_index = 0; 1604 tx_ring->tr_ready_wrap = 0; 1605 1606 return 0; 1607 } 1608 1609 static void 1610 et_init_mac(struct et_softc *sc) 1611 { 1612 struct ifnet *ifp = &sc->arpcom.ac_if; 1613 const uint8_t *eaddr = IF_LLADDR(ifp); 1614 uint32_t val; 1615 1616 /* Reset MAC */ 1617 CSR_WRITE_4(sc, ET_MAC_CFG1, 1618 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1619 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1620 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1621 1622 /* 1623 * Setup inter packet gap 1624 */ 1625 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1626 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1627 __SHIFTIN(80, ET_IPG_MINIFG) | 1628 __SHIFTIN(96, ET_IPG_B2B); 1629 CSR_WRITE_4(sc, ET_IPG, val); 1630 1631 /* 1632 * Setup half duplex mode 1633 */ 1634 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1635 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1636 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1637 ET_MAC_HDX_EXC_DEFER; 1638 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1639 1640 /* Clear MAC control */ 1641 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1642 1643 /* Reset MII */ 1644 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1645 1646 /* 1647 * Set MAC address 1648 */ 1649 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1650 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1651 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1652 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1653 1654 /* Set max frame length */ 1655 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu)); 1656 1657 /* Bring MAC out of reset state */ 1658 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1659 } 1660 1661 static void 1662 et_init_rxmac(struct et_softc *sc) 1663 { 1664 struct ifnet *ifp = &sc->arpcom.ac_if; 1665 const uint8_t *eaddr = IF_LLADDR(ifp); 1666 uint32_t val; 1667 int i; 1668 1669 /* Disable RX MAC and WOL */ 1670 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1671 1672 /* 1673 * Clear all WOL related registers 1674 */ 1675 for (i = 0; i < 3; ++i) 1676 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1677 for (i = 0; i < 20; ++i) 1678 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1679 1680 /* 1681 * Set WOL source address. XXX is this necessary? 1682 */ 1683 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1684 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1685 val = (eaddr[0] << 8) | eaddr[1]; 1686 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1687 1688 /* Clear packet filters */ 1689 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1690 1691 /* No ucast filtering */ 1692 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1693 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1694 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1695 1696 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) { 1697 /* 1698 * In order to transmit jumbo packets greater than 1699 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between 1700 * RX MAC and RX DMA needs to be reduced in size to 1701 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In 1702 * order to implement this, we must use "cut through" 1703 * mode in the RX MAC, which chops packets down into 1704 * segments. In this case we selected 256 bytes, 1705 * since this is the size of the PCI-Express TLP's 1706 * that the ET1310 uses. 1707 */ 1708 val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) | 1709 ET_RXMAC_MC_SEGSZ_ENABLE; 1710 } else { 1711 val = 0; 1712 } 1713 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1714 1715 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1716 1717 /* Initialize RX MAC management register */ 1718 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1719 1720 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1721 1722 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1723 ET_RXMAC_MGT_PASS_ECRC | 1724 ET_RXMAC_MGT_PASS_ELEN | 1725 ET_RXMAC_MGT_PASS_ETRUNC | 1726 ET_RXMAC_MGT_CHECK_PKT); 1727 1728 /* 1729 * Configure runt filtering (may not work on certain chip generation) 1730 */ 1731 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1732 CSR_WRITE_4(sc, ET_PKTFILT, val); 1733 1734 /* Enable RX MAC but leave WOL disabled */ 1735 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1736 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1737 1738 /* 1739 * Setup multicast hash and allmulti/promisc mode 1740 */ 1741 et_setmulti(sc); 1742 } 1743 1744 static void 1745 et_init_txmac(struct et_softc *sc) 1746 { 1747 /* Disable TX MAC and FC(?) */ 1748 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1749 1750 /* No flow control yet */ 1751 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1752 1753 /* Enable TX MAC but leave FC(?) diabled */ 1754 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1755 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1756 } 1757 1758 static int 1759 et_start_rxdma(struct et_softc *sc) 1760 { 1761 uint32_t val = 0; 1762 1763 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1764 ET_RXDMA_CTRL_RING0_SIZE) | 1765 ET_RXDMA_CTRL_RING0_ENABLE; 1766 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1767 ET_RXDMA_CTRL_RING1_SIZE) | 1768 ET_RXDMA_CTRL_RING1_ENABLE; 1769 1770 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1771 1772 DELAY(5); 1773 1774 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1775 if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n"); 1776 return ETIMEDOUT; 1777 } 1778 return 0; 1779 } 1780 1781 static int 1782 et_start_txdma(struct et_softc *sc) 1783 { 1784 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1785 return 0; 1786 } 1787 1788 static int 1789 et_enable_txrx(struct et_softc *sc, int media_upd) 1790 { 1791 struct ifnet *ifp = &sc->arpcom.ac_if; 1792 uint32_t val; 1793 int i, error; 1794 1795 val = CSR_READ_4(sc, ET_MAC_CFG1); 1796 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1797 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1798 ET_MAC_CFG1_LOOPBACK); 1799 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1800 1801 if (media_upd) 1802 et_ifmedia_upd(ifp); 1803 else 1804 et_setmedia(sc); 1805 1806 #define NRETRY 100 1807 1808 for (i = 0; i < NRETRY; ++i) { 1809 val = CSR_READ_4(sc, ET_MAC_CFG1); 1810 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1811 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1812 break; 1813 1814 DELAY(10); 1815 } 1816 if (i == NRETRY) { 1817 if_printf(ifp, "can't enable RX/TX\n"); 1818 return 0; 1819 } 1820 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 1821 1822 #undef NRETRY 1823 1824 /* 1825 * Start TX/RX DMA engine 1826 */ 1827 error = et_start_rxdma(sc); 1828 if (error) 1829 return error; 1830 1831 error = et_start_txdma(sc); 1832 if (error) 1833 return error; 1834 1835 return 0; 1836 } 1837 1838 static void 1839 et_rxeof(struct et_softc *sc) 1840 { 1841 struct ifnet *ifp = &sc->arpcom.ac_if; 1842 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1843 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1844 uint32_t rxs_stat_ring; 1845 int rxst_wrap, rxst_index; 1846 1847 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1848 return; 1849 1850 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1851 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1852 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1853 1854 while (rxst_index != rxst_ring->rsr_index || 1855 rxst_wrap != rxst_ring->rsr_wrap) { 1856 struct et_rxbuf_data *rbd; 1857 struct et_rxdesc_ring *rx_ring; 1858 struct et_rxstat *st; 1859 struct mbuf *m; 1860 int buflen, buf_idx, ring_idx; 1861 uint32_t rxstat_pos, rxring_pos; 1862 1863 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1864 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1865 1866 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1867 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1868 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1869 1870 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1871 rxst_ring->rsr_index = 0; 1872 rxst_ring->rsr_wrap ^= 1; 1873 } 1874 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1875 ET_RXSTAT_POS_INDEX); 1876 if (rxst_ring->rsr_wrap) 1877 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1878 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1879 1880 if (ring_idx >= ET_RX_NRING) { 1881 ifp->if_ierrors++; 1882 if_printf(ifp, "invalid ring index %d\n", ring_idx); 1883 continue; 1884 } 1885 if (buf_idx >= ET_RX_NDESC) { 1886 ifp->if_ierrors++; 1887 if_printf(ifp, "invalid buf index %d\n", buf_idx); 1888 continue; 1889 } 1890 1891 rbd = &sc->sc_rx_data[ring_idx]; 1892 m = rbd->rbd_buf[buf_idx].rb_mbuf; 1893 1894 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1895 if (buflen < ETHER_CRC_LEN) { 1896 m_freem(m); 1897 ifp->if_ierrors++; 1898 } else { 1899 m->m_pkthdr.len = m->m_len = buflen; 1900 m->m_pkthdr.rcvif = ifp; 1901 1902 m_adj(m, -ETHER_CRC_LEN); 1903 1904 ifp->if_ipackets++; 1905 ifp->if_input(ifp, m); 1906 } 1907 } else { 1908 ifp->if_ierrors++; 1909 } 1910 m = NULL; /* Catch invalid reference */ 1911 1912 rx_ring = &sc->sc_rx_ring[ring_idx]; 1913 1914 if (buf_idx != rx_ring->rr_index) { 1915 if_printf(ifp, "WARNING!! ring %d, " 1916 "buf_idx %d, rr_idx %d\n", 1917 ring_idx, buf_idx, rx_ring->rr_index); 1918 } 1919 1920 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1921 if (++rx_ring->rr_index == ET_RX_NDESC) { 1922 rx_ring->rr_index = 0; 1923 rx_ring->rr_wrap ^= 1; 1924 } 1925 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1926 if (rx_ring->rr_wrap) 1927 rxring_pos |= ET_RX_RING_POS_WRAP; 1928 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1929 } 1930 } 1931 1932 static int 1933 et_encap(struct et_softc *sc, struct mbuf **m0) 1934 { 1935 bus_dma_segment_t segs[ET_NSEG_MAX]; 1936 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1937 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1938 struct et_txdesc *td; 1939 bus_dmamap_t map; 1940 int error, maxsegs, nsegs, first_idx, last_idx, i; 1941 uint32_t tx_ready_pos, last_td_ctrl2; 1942 1943 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1944 if (maxsegs > ET_NSEG_MAX) 1945 maxsegs = ET_NSEG_MAX; 1946 KASSERT(maxsegs >= ET_NSEG_SPARE, 1947 ("not enough spare TX desc (%d)", maxsegs)); 1948 1949 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1950 first_idx = tx_ring->tr_ready_index; 1951 map = tbd->tbd_buf[first_idx].tb_dmap; 1952 1953 error = bus_dmamap_load_mbuf_defrag(sc->sc_txbuf_dtag, map, m0, 1954 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1955 if (error) 1956 goto back; 1957 bus_dmamap_sync(sc->sc_txbuf_dtag, map, BUS_DMASYNC_PREWRITE); 1958 1959 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1960 sc->sc_tx += nsegs; 1961 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1962 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1963 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1964 } 1965 1966 last_idx = -1; 1967 for (i = 0; i < nsegs; ++i) { 1968 int idx; 1969 1970 idx = (first_idx + i) % ET_TX_NDESC; 1971 td = &tx_ring->tr_desc[idx]; 1972 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr); 1973 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr); 1974 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN); 1975 1976 if (i == nsegs - 1) { /* Last frag */ 1977 td->td_ctrl2 = last_td_ctrl2; 1978 last_idx = idx; 1979 } 1980 1981 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1982 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1983 tx_ring->tr_ready_index = 0; 1984 tx_ring->tr_ready_wrap ^= 1; 1985 } 1986 } 1987 td = &tx_ring->tr_desc[first_idx]; 1988 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1989 1990 KKASSERT(last_idx >= 0); 1991 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1992 tbd->tbd_buf[last_idx].tb_dmap = map; 1993 tbd->tbd_buf[last_idx].tb_mbuf = *m0; 1994 1995 tbd->tbd_used += nsegs; 1996 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1997 1998 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1999 ET_TX_READY_POS_INDEX); 2000 if (tx_ring->tr_ready_wrap) 2001 tx_ready_pos |= ET_TX_READY_POS_WRAP; 2002 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 2003 2004 error = 0; 2005 back: 2006 if (error) { 2007 m_freem(*m0); 2008 *m0 = NULL; 2009 } 2010 return error; 2011 } 2012 2013 static void 2014 et_txeof(struct et_softc *sc, int start) 2015 { 2016 struct ifnet *ifp = &sc->arpcom.ac_if; 2017 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 2018 struct et_txbuf_data *tbd = &sc->sc_tx_data; 2019 uint32_t tx_done; 2020 int end, wrap; 2021 2022 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2023 return; 2024 2025 if (tbd->tbd_used == 0) 2026 return; 2027 2028 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2029 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 2030 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2031 2032 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2033 struct et_txbuf *tb; 2034 2035 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 2036 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2037 2038 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2039 sizeof(struct et_txdesc)); 2040 2041 if (tb->tb_mbuf != NULL) { 2042 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap); 2043 m_freem(tb->tb_mbuf); 2044 tb->tb_mbuf = NULL; 2045 ifp->if_opackets++; 2046 } 2047 2048 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2049 tbd->tbd_start_index = 0; 2050 tbd->tbd_start_wrap ^= 1; 2051 } 2052 2053 KKASSERT(tbd->tbd_used > 0); 2054 tbd->tbd_used--; 2055 } 2056 2057 if (tbd->tbd_used == 0) 2058 ifp->if_timer = 0; 2059 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2060 ifp->if_flags &= ~IFF_OACTIVE; 2061 2062 if (start) 2063 if_devstart(ifp); 2064 } 2065 2066 static void 2067 et_tick(void *xsc) 2068 { 2069 struct et_softc *sc = xsc; 2070 struct ifnet *ifp = &sc->arpcom.ac_if; 2071 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2072 2073 lwkt_serialize_enter(ifp->if_serializer); 2074 2075 mii_tick(mii); 2076 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 && 2077 (mii->mii_media_status & IFM_ACTIVE) && 2078 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2079 if_printf(ifp, "Link up, enable TX/RX\n"); 2080 if (et_enable_txrx(sc, 0) == 0) 2081 if_devstart(ifp); 2082 } 2083 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2084 2085 lwkt_serialize_exit(ifp->if_serializer); 2086 } 2087 2088 static int 2089 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2090 { 2091 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2092 } 2093 2094 static int 2095 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2096 { 2097 return et_newbuf(rbd, buf_idx, init, MHLEN); 2098 } 2099 2100 static int 2101 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2102 { 2103 struct et_softc *sc = rbd->rbd_softc; 2104 struct et_rxbuf *rb; 2105 struct mbuf *m; 2106 bus_dma_segment_t seg; 2107 bus_dmamap_t dmap; 2108 int error, len, nseg; 2109 2110 KASSERT(!rbd->rbd_jumbo, ("calling %s with jumbo ring", __func__)); 2111 2112 KKASSERT(buf_idx < ET_RX_NDESC); 2113 rb = &rbd->rbd_buf[buf_idx]; 2114 2115 m = m_getl(len0, init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR, &len); 2116 if (m == NULL) { 2117 error = ENOBUFS; 2118 2119 if (init) { 2120 if_printf(&sc->arpcom.ac_if, 2121 "m_getl failed, size %d\n", len0); 2122 return error; 2123 } else { 2124 goto back; 2125 } 2126 } 2127 m->m_len = m->m_pkthdr.len = len; 2128 2129 /* 2130 * Try load RX mbuf into temporary DMA tag 2131 */ 2132 error = bus_dmamap_load_mbuf_segment(sc->sc_rxbuf_dtag, 2133 sc->sc_rxbuf_tmp_dmap, m, &seg, 1, &nseg, 2134 BUS_DMA_NOWAIT); 2135 if (error) { 2136 m_freem(m); 2137 if (init) { 2138 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2139 return error; 2140 } else { 2141 goto back; 2142 } 2143 } 2144 2145 if (!init) { 2146 bus_dmamap_sync(sc->sc_rxbuf_dtag, rb->rb_dmap, 2147 BUS_DMASYNC_POSTREAD); 2148 bus_dmamap_unload(sc->sc_rxbuf_dtag, rb->rb_dmap); 2149 } 2150 rb->rb_mbuf = m; 2151 rb->rb_paddr = seg.ds_addr; 2152 2153 /* 2154 * Swap RX buf's DMA map with the loaded temporary one 2155 */ 2156 dmap = rb->rb_dmap; 2157 rb->rb_dmap = sc->sc_rxbuf_tmp_dmap; 2158 sc->sc_rxbuf_tmp_dmap = dmap; 2159 2160 error = 0; 2161 back: 2162 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2163 return error; 2164 } 2165 2166 static int 2167 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2168 { 2169 struct et_softc *sc = arg1; 2170 struct ifnet *ifp = &sc->arpcom.ac_if; 2171 int error = 0, v; 2172 2173 lwkt_serialize_enter(ifp->if_serializer); 2174 2175 v = sc->sc_rx_intr_npkts; 2176 error = sysctl_handle_int(oidp, &v, 0, req); 2177 if (error || req->newptr == NULL) 2178 goto back; 2179 if (v <= 0) { 2180 error = EINVAL; 2181 goto back; 2182 } 2183 2184 if (sc->sc_rx_intr_npkts != v) { 2185 if (ifp->if_flags & IFF_RUNNING) 2186 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2187 sc->sc_rx_intr_npkts = v; 2188 } 2189 back: 2190 lwkt_serialize_exit(ifp->if_serializer); 2191 return error; 2192 } 2193 2194 static int 2195 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2196 { 2197 struct et_softc *sc = arg1; 2198 struct ifnet *ifp = &sc->arpcom.ac_if; 2199 int error = 0, v; 2200 2201 lwkt_serialize_enter(ifp->if_serializer); 2202 2203 v = sc->sc_rx_intr_delay; 2204 error = sysctl_handle_int(oidp, &v, 0, req); 2205 if (error || req->newptr == NULL) 2206 goto back; 2207 if (v <= 0) { 2208 error = EINVAL; 2209 goto back; 2210 } 2211 2212 if (sc->sc_rx_intr_delay != v) { 2213 if (ifp->if_flags & IFF_RUNNING) 2214 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2215 sc->sc_rx_intr_delay = v; 2216 } 2217 back: 2218 lwkt_serialize_exit(ifp->if_serializer); 2219 return error; 2220 } 2221 2222 static void 2223 et_setmedia(struct et_softc *sc) 2224 { 2225 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2226 uint32_t cfg2, ctrl; 2227 2228 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 2229 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 2230 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 2231 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 2232 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 2233 2234 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 2235 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 2236 2237 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 2238 cfg2 |= ET_MAC_CFG2_MODE_GMII; 2239 } else { 2240 cfg2 |= ET_MAC_CFG2_MODE_MII; 2241 ctrl |= ET_MAC_CTRL_MODE_MII; 2242 } 2243 2244 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 2245 cfg2 |= ET_MAC_CFG2_FDX; 2246 else 2247 ctrl |= ET_MAC_CTRL_GHDX; 2248 2249 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 2250 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 2251 } 2252 2253 static int 2254 et_jumbo_mem_alloc(device_t dev) 2255 { 2256 struct et_softc *sc = device_get_softc(dev); 2257 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2258 bus_addr_t paddr; 2259 uint8_t *buf; 2260 int i; 2261 2262 jd->jd_buf = bus_dmamem_coherent_any(sc->sc_dtag, 2263 ET_JUMBO_ALIGN, ET_JUMBO_MEM_SIZE, BUS_DMA_WAITOK, 2264 &jd->jd_dtag, &jd->jd_dmap, &paddr); 2265 if (jd->jd_buf == NULL) { 2266 device_printf(dev, "can't create jumbo DMA stuffs\n"); 2267 return ENOMEM; 2268 } 2269 2270 jd->jd_slots = kmalloc(sizeof(*jd->jd_slots) * ET_JSLOTS, M_DEVBUF, 2271 M_WAITOK | M_ZERO); 2272 lwkt_serialize_init(&jd->jd_serializer); 2273 SLIST_INIT(&jd->jd_free_slots); 2274 2275 buf = jd->jd_buf; 2276 for (i = 0; i < ET_JSLOTS; ++i) { 2277 struct et_jslot *jslot = &jd->jd_slots[i]; 2278 2279 jslot->jslot_data = jd; 2280 jslot->jslot_buf = buf; 2281 jslot->jslot_paddr = paddr; 2282 jslot->jslot_inuse = 0; 2283 jslot->jslot_index = i; 2284 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, jslot_link); 2285 2286 buf += ET_JLEN; 2287 paddr += ET_JLEN; 2288 } 2289 return 0; 2290 } 2291 2292 static void 2293 et_jumbo_mem_free(device_t dev) 2294 { 2295 struct et_softc *sc = device_get_softc(dev); 2296 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2297 2298 KKASSERT(sc->sc_flags & ET_FLAG_JUMBO); 2299 2300 kfree(jd->jd_slots, M_DEVBUF); 2301 et_dma_mem_destroy(jd->jd_dtag, jd->jd_buf, jd->jd_dmap); 2302 } 2303 2304 static struct et_jslot * 2305 et_jalloc(struct et_jumbo_data *jd) 2306 { 2307 struct et_jslot *jslot; 2308 2309 lwkt_serialize_enter(&jd->jd_serializer); 2310 2311 jslot = SLIST_FIRST(&jd->jd_free_slots); 2312 if (jslot) { 2313 SLIST_REMOVE_HEAD(&jd->jd_free_slots, jslot_link); 2314 jslot->jslot_inuse = 1; 2315 } 2316 2317 lwkt_serialize_exit(&jd->jd_serializer); 2318 return jslot; 2319 } 2320 2321 static void 2322 et_jfree(void *xjslot) 2323 { 2324 struct et_jslot *jslot = xjslot; 2325 struct et_jumbo_data *jd = jslot->jslot_data; 2326 2327 if (&jd->jd_slots[jslot->jslot_index] != jslot) { 2328 panic("%s wrong jslot!?", __func__); 2329 } else if (jslot->jslot_inuse == 0) { 2330 panic("%s jslot already freed", __func__); 2331 } else { 2332 lwkt_serialize_enter(&jd->jd_serializer); 2333 2334 atomic_subtract_int(&jslot->jslot_inuse, 1); 2335 if (jslot->jslot_inuse == 0) { 2336 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, 2337 jslot_link); 2338 } 2339 2340 lwkt_serialize_exit(&jd->jd_serializer); 2341 } 2342 } 2343 2344 static void 2345 et_jref(void *xjslot) 2346 { 2347 struct et_jslot *jslot = xjslot; 2348 struct et_jumbo_data *jd = jslot->jslot_data; 2349 2350 if (&jd->jd_slots[jslot->jslot_index] != jslot) 2351 panic("%s wrong jslot!?", __func__); 2352 else if (jslot->jslot_inuse == 0) 2353 panic("%s jslot already freed", __func__); 2354 else 2355 atomic_add_int(&jslot->jslot_inuse, 1); 2356 } 2357 2358 static int 2359 et_newbuf_jumbo(struct et_rxbuf_data *rbd, int buf_idx, int init) 2360 { 2361 struct et_softc *sc = rbd->rbd_softc; 2362 struct et_rxbuf *rb; 2363 struct mbuf *m; 2364 struct et_jslot *jslot; 2365 int error; 2366 2367 KASSERT(rbd->rbd_jumbo, ("calling %s with non-jumbo ring", __func__)); 2368 2369 KKASSERT(buf_idx < ET_RX_NDESC); 2370 rb = &rbd->rbd_buf[buf_idx]; 2371 2372 error = ENOBUFS; 2373 2374 MGETHDR(m, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2375 if (m == NULL) { 2376 if (init) { 2377 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n"); 2378 return error; 2379 } else { 2380 goto back; 2381 } 2382 } 2383 2384 jslot = et_jalloc(&sc->sc_jumbo_data); 2385 if (jslot == NULL) { 2386 m_freem(m); 2387 2388 if (init) { 2389 if_printf(&sc->arpcom.ac_if, 2390 "jslot allocation failed\n"); 2391 return error; 2392 } else { 2393 goto back; 2394 } 2395 } 2396 2397 m->m_ext.ext_arg = jslot; 2398 m->m_ext.ext_buf = jslot->jslot_buf; 2399 m->m_ext.ext_free = et_jfree; 2400 m->m_ext.ext_ref = et_jref; 2401 m->m_ext.ext_size = ET_JUMBO_FRAMELEN; 2402 m->m_flags |= M_EXT; 2403 m->m_data = m->m_ext.ext_buf; 2404 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2405 2406 rb->rb_mbuf = m; 2407 rb->rb_paddr = jslot->jslot_paddr; 2408 2409 error = 0; 2410 back: 2411 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2412 return error; 2413 } 2414 2415 static void 2416 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr) 2417 { 2418 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring; 2419 struct et_rxdesc *desc; 2420 2421 KKASSERT(buf_idx < ET_RX_NDESC); 2422 desc = &rx_ring->rr_desc[buf_idx]; 2423 2424 desc->rd_addr_hi = ET_ADDR_HI(paddr); 2425 desc->rd_addr_lo = ET_ADDR_LO(paddr); 2426 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2427 } 2428