1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/bitops.h> 37 #include <sys/endian.h> 38 #include <sys/kernel.h> 39 #include <sys/bus.h> 40 #include <sys/interrupt.h> 41 #include <sys/malloc.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/sysctl.h> 48 49 #include <net/ethernet.h> 50 #include <net/if.h> 51 #include <net/bpf.h> 52 #include <net/if_arp.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 #include <net/ifq_var.h> 56 #include <net/vlan/if_vlan_var.h> 57 58 #include <dev/netif/mii_layer/miivar.h> 59 60 #include <bus/pci/pcireg.h> 61 #include <bus/pci/pcivar.h> 62 #include "pcidevs.h" 63 64 #include <dev/netif/et/if_etreg.h> 65 #include <dev/netif/et/if_etvar.h> 66 67 #include "miibus_if.h" 68 69 static int et_probe(device_t); 70 static int et_attach(device_t); 71 static int et_detach(device_t); 72 static int et_shutdown(device_t); 73 74 static int et_miibus_readreg(device_t, int, int); 75 static int et_miibus_writereg(device_t, int, int, int); 76 static void et_miibus_statchg(device_t); 77 78 static void et_init(void *); 79 static int et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 80 static void et_start(struct ifnet *, struct ifaltq_subque *); 81 static void et_watchdog(struct ifnet *); 82 static int et_ifmedia_upd(struct ifnet *); 83 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 84 85 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 86 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 87 88 static void et_intr(void *); 89 static void et_enable_intrs(struct et_softc *, uint32_t); 90 static void et_disable_intrs(struct et_softc *); 91 static void et_rxeof(struct et_softc *); 92 static void et_txeof(struct et_softc *, int); 93 94 static int et_dma_alloc(device_t); 95 static void et_dma_free(device_t); 96 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 97 static int et_dma_mbuf_create(device_t); 98 static void et_dma_mbuf_destroy(device_t, int, const int[]); 99 static int et_jumbo_mem_alloc(device_t); 100 static void et_jumbo_mem_free(device_t); 101 static int et_init_tx_ring(struct et_softc *); 102 static int et_init_rx_ring(struct et_softc *); 103 static void et_free_tx_ring(struct et_softc *); 104 static void et_free_rx_ring(struct et_softc *); 105 static int et_encap(struct et_softc *, struct mbuf **); 106 static struct et_jslot * 107 et_jalloc(struct et_jumbo_data *); 108 static void et_jfree(void *); 109 static void et_jref(void *); 110 static int et_newbuf(struct et_rxbuf_data *, int, int, int); 111 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 112 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 113 static int et_newbuf_jumbo(struct et_rxbuf_data *, int, int); 114 115 static void et_stop(struct et_softc *); 116 static int et_chip_init(struct et_softc *); 117 static void et_chip_attach(struct et_softc *); 118 static void et_init_mac(struct et_softc *); 119 static void et_init_rxmac(struct et_softc *); 120 static void et_init_txmac(struct et_softc *); 121 static int et_init_rxdma(struct et_softc *); 122 static int et_init_txdma(struct et_softc *); 123 static int et_start_rxdma(struct et_softc *); 124 static int et_start_txdma(struct et_softc *); 125 static int et_stop_rxdma(struct et_softc *); 126 static int et_stop_txdma(struct et_softc *); 127 static int et_enable_txrx(struct et_softc *, int); 128 static void et_reset(struct et_softc *); 129 static int et_bus_config(device_t); 130 static void et_get_eaddr(device_t, uint8_t[]); 131 static void et_setmulti(struct et_softc *); 132 static void et_tick(void *); 133 static void et_setmedia(struct et_softc *); 134 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t); 135 136 static const struct et_dev { 137 uint16_t vid; 138 uint16_t did; 139 const char *desc; 140 } et_devices[] = { 141 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 142 "Agere ET1310 Gigabit Ethernet" }, 143 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 144 "Agere ET1310 Fast Ethernet" }, 145 { 0, 0, NULL } 146 }; 147 148 static device_method_t et_methods[] = { 149 DEVMETHOD(device_probe, et_probe), 150 DEVMETHOD(device_attach, et_attach), 151 DEVMETHOD(device_detach, et_detach), 152 DEVMETHOD(device_shutdown, et_shutdown), 153 #if 0 154 DEVMETHOD(device_suspend, et_suspend), 155 DEVMETHOD(device_resume, et_resume), 156 #endif 157 158 DEVMETHOD(bus_print_child, bus_generic_print_child), 159 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 160 161 DEVMETHOD(miibus_readreg, et_miibus_readreg), 162 DEVMETHOD(miibus_writereg, et_miibus_writereg), 163 DEVMETHOD(miibus_statchg, et_miibus_statchg), 164 165 DEVMETHOD_END 166 }; 167 168 static driver_t et_driver = { 169 "et", 170 et_methods, 171 sizeof(struct et_softc) 172 }; 173 174 static devclass_t et_devclass; 175 176 DECLARE_DUMMY_MODULE(if_et); 177 MODULE_DEPEND(if_et, miibus, 1, 1, 1); 178 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, NULL, NULL); 179 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, NULL, NULL); 180 181 static int et_rx_intr_npkts = 129; 182 static int et_rx_intr_delay = 25; /* x4 usec */ 183 static int et_tx_intr_nsegs = 256; 184 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 185 186 static int et_msi_enable = 1; 187 188 TUNABLE_INT("hw.et.timer", &et_timer); 189 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 190 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay); 191 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs); 192 TUNABLE_INT("hw.et.msi.enable", &et_msi_enable); 193 194 struct et_bsize { 195 int bufsize; 196 int jumbo; 197 et_newbuf_t newbuf; 198 }; 199 200 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = { 201 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 202 .newbuf = et_newbuf_hdr }, 203 { .bufsize = ET_RXDMA_CTRL_RING1_2048, .jumbo = 0, 204 .newbuf = et_newbuf_cluster }, 205 }; 206 207 static const struct et_bsize et_bufsize_jumbo[ET_RX_NRING] = { 208 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 209 .newbuf = et_newbuf_hdr }, 210 { .bufsize = ET_RXDMA_CTRL_RING1_16384, .jumbo = 1, 211 .newbuf = et_newbuf_jumbo }, 212 }; 213 214 static int 215 et_probe(device_t dev) 216 { 217 const struct et_dev *d; 218 uint16_t did, vid; 219 220 vid = pci_get_vendor(dev); 221 did = pci_get_device(dev); 222 223 for (d = et_devices; d->desc != NULL; ++d) { 224 if (vid == d->vid && did == d->did) { 225 device_set_desc(dev, d->desc); 226 return 0; 227 } 228 } 229 return ENXIO; 230 } 231 232 static int 233 et_attach(device_t dev) 234 { 235 struct et_softc *sc = device_get_softc(dev); 236 struct ifnet *ifp = &sc->arpcom.ac_if; 237 struct sysctl_ctx_list *ctx; 238 struct sysctl_oid *tree; 239 uint8_t eaddr[ETHER_ADDR_LEN]; 240 int error; 241 u_int irq_flags; 242 243 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 244 callout_init(&sc->sc_tick); 245 246 /* 247 * Initialize tunables 248 */ 249 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 250 sc->sc_rx_intr_delay = et_rx_intr_delay; 251 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 252 sc->sc_timer = et_timer; 253 254 #ifndef BURN_BRIDGES 255 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 256 uint32_t irq, mem; 257 258 irq = pci_read_config(dev, PCIR_INTLINE, 4); 259 mem = pci_read_config(dev, ET_PCIR_BAR, 4); 260 261 device_printf(dev, "chip is in D%d power mode " 262 "-- setting to D0\n", pci_get_powerstate(dev)); 263 264 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 265 266 pci_write_config(dev, PCIR_INTLINE, irq, 4); 267 pci_write_config(dev, ET_PCIR_BAR, mem, 4); 268 } 269 #endif /* !BURN_BRIDGE */ 270 271 /* Enable bus mastering */ 272 pci_enable_busmaster(dev); 273 274 /* 275 * Allocate IO memory 276 */ 277 sc->sc_mem_rid = ET_PCIR_BAR; 278 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 279 &sc->sc_mem_rid, RF_ACTIVE); 280 if (sc->sc_mem_res == NULL) { 281 device_printf(dev, "can't allocate IO memory\n"); 282 return ENXIO; 283 } 284 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res); 285 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res); 286 287 /* 288 * Allocate IRQ 289 */ 290 sc->sc_irq_type = pci_alloc_1intr(dev, et_msi_enable, 291 &sc->sc_irq_rid, &irq_flags); 292 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 293 &sc->sc_irq_rid, irq_flags); 294 if (sc->sc_irq_res == NULL) { 295 device_printf(dev, "can't allocate irq\n"); 296 error = ENXIO; 297 goto fail; 298 } 299 300 /* 301 * Create sysctl tree 302 */ 303 ctx = device_get_sysctl_ctx(dev); 304 tree = device_get_sysctl_tree(dev); 305 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 306 OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW, 307 sc, 0, et_sysctl_rx_intr_npkts, "I", 308 "RX IM, # packets per RX interrupt"); 309 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 310 OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW, 311 sc, 0, et_sysctl_rx_intr_delay, "I", 312 "RX IM, RX interrupt delay (x10 usec)"); 313 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 314 "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 315 "TX IM, # segments per TX interrupt"); 316 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 317 "timer", CTLFLAG_RW, &sc->sc_timer, 0, 318 "TX timer"); 319 320 error = et_bus_config(dev); 321 if (error) 322 goto fail; 323 324 et_get_eaddr(dev, eaddr); 325 326 CSR_WRITE_4(sc, ET_PM, 327 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 328 329 et_reset(sc); 330 331 et_disable_intrs(sc); 332 333 error = et_dma_alloc(dev); 334 if (error) 335 goto fail; 336 337 ifp->if_softc = sc; 338 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 339 ifp->if_init = et_init; 340 ifp->if_ioctl = et_ioctl; 341 ifp->if_start = et_start; 342 ifp->if_watchdog = et_watchdog; 343 ifp->if_mtu = ETHERMTU; 344 ifp->if_capabilities = IFCAP_VLAN_MTU; 345 ifp->if_capenable = ifp->if_capabilities; 346 ifp->if_nmbclusters = ET_RX_NDESC; 347 ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC); 348 ifq_set_ready(&ifp->if_snd); 349 350 et_chip_attach(sc); 351 352 error = mii_phy_probe(dev, &sc->sc_miibus, 353 et_ifmedia_upd, et_ifmedia_sts); 354 if (error) { 355 device_printf(dev, "can't probe any PHY\n"); 356 goto fail; 357 } 358 359 ether_ifattach(ifp, eaddr, NULL); 360 361 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res)); 362 363 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc, 364 &sc->sc_irq_handle, ifp->if_serializer); 365 if (error) { 366 ether_ifdetach(ifp); 367 device_printf(dev, "can't setup intr\n"); 368 goto fail; 369 } 370 371 /* Increase non-cluster mbuf limit; used by tiny RX ring */ 372 mb_inclimit(ET_RX_NDESC); 373 374 return 0; 375 fail: 376 et_detach(dev); 377 return error; 378 } 379 380 static int 381 et_detach(device_t dev) 382 { 383 struct et_softc *sc = device_get_softc(dev); 384 385 if (device_is_attached(dev)) { 386 struct ifnet *ifp = &sc->arpcom.ac_if; 387 388 lwkt_serialize_enter(ifp->if_serializer); 389 et_stop(sc); 390 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 391 lwkt_serialize_exit(ifp->if_serializer); 392 393 ether_ifdetach(ifp); 394 395 /* Decrease non-cluster mbuf limit increased by us */ 396 mb_inclimit(-ET_RX_NDESC); 397 } 398 399 if (sc->sc_miibus != NULL) 400 device_delete_child(dev, sc->sc_miibus); 401 bus_generic_detach(dev); 402 403 if (sc->sc_irq_res != NULL) { 404 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 405 sc->sc_irq_res); 406 } 407 if (sc->sc_irq_type == PCI_INTR_TYPE_MSI) 408 pci_release_msi(dev); 409 410 if (sc->sc_mem_res != NULL) { 411 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 412 sc->sc_mem_res); 413 } 414 415 et_dma_free(dev); 416 417 return 0; 418 } 419 420 static int 421 et_shutdown(device_t dev) 422 { 423 struct et_softc *sc = device_get_softc(dev); 424 struct ifnet *ifp = &sc->arpcom.ac_if; 425 426 lwkt_serialize_enter(ifp->if_serializer); 427 et_stop(sc); 428 lwkt_serialize_exit(ifp->if_serializer); 429 return 0; 430 } 431 432 static int 433 et_miibus_readreg(device_t dev, int phy, int reg) 434 { 435 struct et_softc *sc = device_get_softc(dev); 436 uint32_t val; 437 int i, ret; 438 439 /* Stop any pending operations */ 440 CSR_WRITE_4(sc, ET_MII_CMD, 0); 441 442 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 443 __SHIFTIN(reg, ET_MII_ADDR_REG); 444 CSR_WRITE_4(sc, ET_MII_ADDR, val); 445 446 /* Start reading */ 447 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 448 449 #define NRETRY 50 450 451 for (i = 0; i < NRETRY; ++i) { 452 val = CSR_READ_4(sc, ET_MII_IND); 453 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 454 break; 455 DELAY(50); 456 } 457 if (i == NRETRY) { 458 if_printf(&sc->arpcom.ac_if, 459 "read phy %d, reg %d timed out\n", phy, reg); 460 ret = 0; 461 goto back; 462 } 463 464 #undef NRETRY 465 466 val = CSR_READ_4(sc, ET_MII_STAT); 467 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 468 469 back: 470 /* Make sure that the current operation is stopped */ 471 CSR_WRITE_4(sc, ET_MII_CMD, 0); 472 return ret; 473 } 474 475 static int 476 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 477 { 478 struct et_softc *sc = device_get_softc(dev); 479 uint32_t val; 480 int i; 481 482 /* Stop any pending operations */ 483 CSR_WRITE_4(sc, ET_MII_CMD, 0); 484 485 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 486 __SHIFTIN(reg, ET_MII_ADDR_REG); 487 CSR_WRITE_4(sc, ET_MII_ADDR, val); 488 489 /* Start writing */ 490 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 491 492 #define NRETRY 100 493 494 for (i = 0; i < NRETRY; ++i) { 495 val = CSR_READ_4(sc, ET_MII_IND); 496 if ((val & ET_MII_IND_BUSY) == 0) 497 break; 498 DELAY(50); 499 } 500 if (i == NRETRY) { 501 if_printf(&sc->arpcom.ac_if, 502 "write phy %d, reg %d timed out\n", phy, reg); 503 et_miibus_readreg(dev, phy, reg); 504 } 505 506 #undef NRETRY 507 508 /* Make sure that the current operation is stopped */ 509 CSR_WRITE_4(sc, ET_MII_CMD, 0); 510 return 0; 511 } 512 513 static void 514 et_miibus_statchg(device_t dev) 515 { 516 et_setmedia(device_get_softc(dev)); 517 } 518 519 static int 520 et_ifmedia_upd(struct ifnet *ifp) 521 { 522 struct et_softc *sc = ifp->if_softc; 523 struct mii_data *mii = device_get_softc(sc->sc_miibus); 524 525 if (mii->mii_instance != 0) { 526 struct mii_softc *miisc; 527 528 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 529 mii_phy_reset(miisc); 530 } 531 mii_mediachg(mii); 532 533 return 0; 534 } 535 536 static void 537 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 538 { 539 struct et_softc *sc = ifp->if_softc; 540 struct mii_data *mii = device_get_softc(sc->sc_miibus); 541 542 mii_pollstat(mii); 543 ifmr->ifm_active = mii->mii_media_active; 544 ifmr->ifm_status = mii->mii_media_status; 545 } 546 547 static void 548 et_stop(struct et_softc *sc) 549 { 550 struct ifnet *ifp = &sc->arpcom.ac_if; 551 552 ASSERT_SERIALIZED(ifp->if_serializer); 553 554 callout_stop(&sc->sc_tick); 555 556 et_stop_rxdma(sc); 557 et_stop_txdma(sc); 558 559 et_disable_intrs(sc); 560 561 et_free_tx_ring(sc); 562 et_free_rx_ring(sc); 563 564 et_reset(sc); 565 566 sc->sc_tx = 0; 567 sc->sc_tx_intr = 0; 568 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 569 570 ifp->if_timer = 0; 571 ifp->if_flags &= ~IFF_RUNNING; 572 ifq_clr_oactive(&ifp->if_snd); 573 } 574 575 static int 576 et_bus_config(device_t dev) 577 { 578 uint32_t val, max_plsz; 579 uint16_t ack_latency, replay_timer; 580 581 /* 582 * Test whether EEPROM is valid 583 * NOTE: Read twice to get the correct value 584 */ 585 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 586 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 587 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 588 device_printf(dev, "EEPROM status error 0x%02x\n", val); 589 return ENXIO; 590 } 591 592 /* TODO: LED */ 593 594 /* 595 * Configure ACK latency and replay timer according to 596 * max playload size 597 */ 598 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4); 599 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 600 601 switch (max_plsz) { 602 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 603 ack_latency = ET_PCIV_ACK_LATENCY_128; 604 replay_timer = ET_PCIV_REPLAY_TIMER_128; 605 break; 606 607 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 608 ack_latency = ET_PCIV_ACK_LATENCY_256; 609 replay_timer = ET_PCIV_REPLAY_TIMER_256; 610 break; 611 612 default: 613 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2); 614 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2); 615 device_printf(dev, "ack latency %u, replay timer %u\n", 616 ack_latency, replay_timer); 617 break; 618 } 619 if (ack_latency != 0) { 620 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 621 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2); 622 } 623 624 /* 625 * Set L0s and L1 latency timer to 2us 626 */ 627 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 628 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1); 629 630 /* 631 * Set max read request size to 2048 bytes 632 */ 633 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2); 634 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 635 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 636 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2); 637 638 return 0; 639 } 640 641 static void 642 et_get_eaddr(device_t dev, uint8_t eaddr[]) 643 { 644 uint32_t val; 645 int i; 646 647 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 648 for (i = 0; i < 4; ++i) 649 eaddr[i] = (val >> (8 * i)) & 0xff; 650 651 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 652 for (; i < ETHER_ADDR_LEN; ++i) 653 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 654 } 655 656 static void 657 et_reset(struct et_softc *sc) 658 { 659 CSR_WRITE_4(sc, ET_MAC_CFG1, 660 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 661 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 662 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 663 664 CSR_WRITE_4(sc, ET_SWRST, 665 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 666 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 667 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 668 669 CSR_WRITE_4(sc, ET_MAC_CFG1, 670 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 671 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 672 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 673 } 674 675 static void 676 et_disable_intrs(struct et_softc *sc) 677 { 678 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 679 } 680 681 static void 682 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 683 { 684 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 685 } 686 687 static int 688 et_dma_alloc(device_t dev) 689 { 690 struct et_softc *sc = device_get_softc(dev); 691 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 692 struct et_txstatus_data *txsd = &sc->sc_tx_status; 693 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 694 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 695 int i, error; 696 697 /* 698 * Create top level DMA tag 699 */ 700 error = bus_dma_tag_create(NULL, 1, 0, 701 BUS_SPACE_MAXADDR, 702 BUS_SPACE_MAXADDR, 703 NULL, NULL, 704 BUS_SPACE_MAXSIZE_32BIT, 705 0, 706 BUS_SPACE_MAXSIZE_32BIT, 707 0, &sc->sc_dtag); 708 if (error) { 709 device_printf(dev, "can't create DMA tag\n"); 710 return error; 711 } 712 713 /* 714 * Create TX ring DMA stuffs 715 */ 716 tx_ring->tr_desc = bus_dmamem_coherent_any(sc->sc_dtag, 717 ET_ALIGN, ET_TX_RING_SIZE, 718 BUS_DMA_WAITOK | BUS_DMA_ZERO, 719 &tx_ring->tr_dtag, &tx_ring->tr_dmap, 720 &tx_ring->tr_paddr); 721 if (tx_ring->tr_desc == NULL) { 722 device_printf(dev, "can't create TX ring DMA stuffs\n"); 723 return ENOMEM; 724 } 725 726 /* 727 * Create TX status DMA stuffs 728 */ 729 txsd->txsd_status = bus_dmamem_coherent_any(sc->sc_dtag, 730 ET_ALIGN, sizeof(uint32_t), 731 BUS_DMA_WAITOK | BUS_DMA_ZERO, 732 &txsd->txsd_dtag, &txsd->txsd_dmap, 733 &txsd->txsd_paddr); 734 if (txsd->txsd_status == NULL) { 735 device_printf(dev, "can't create TX status DMA stuffs\n"); 736 return ENOMEM; 737 } 738 739 /* 740 * Create DMA stuffs for RX rings 741 */ 742 for (i = 0; i < ET_RX_NRING; ++i) { 743 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 744 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 745 746 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 747 748 rx_ring->rr_desc = bus_dmamem_coherent_any(sc->sc_dtag, 749 ET_ALIGN, ET_RX_RING_SIZE, 750 BUS_DMA_WAITOK | BUS_DMA_ZERO, 751 &rx_ring->rr_dtag, &rx_ring->rr_dmap, 752 &rx_ring->rr_paddr); 753 if (rx_ring->rr_desc == NULL) { 754 device_printf(dev, "can't create DMA stuffs for " 755 "the %d RX ring\n", i); 756 return ENOMEM; 757 } 758 rx_ring->rr_posreg = rx_ring_posreg[i]; 759 } 760 761 /* 762 * Create RX stat ring DMA stuffs 763 */ 764 rxst_ring->rsr_stat = bus_dmamem_coherent_any(sc->sc_dtag, 765 ET_ALIGN, ET_RXSTAT_RING_SIZE, 766 BUS_DMA_WAITOK | BUS_DMA_ZERO, 767 &rxst_ring->rsr_dtag, &rxst_ring->rsr_dmap, 768 &rxst_ring->rsr_paddr); 769 if (rxst_ring->rsr_stat == NULL) { 770 device_printf(dev, "can't create RX stat ring DMA stuffs\n"); 771 return ENOMEM; 772 } 773 774 /* 775 * Create RX status DMA stuffs 776 */ 777 rxsd->rxsd_status = bus_dmamem_coherent_any(sc->sc_dtag, 778 ET_ALIGN, sizeof(struct et_rxstatus), 779 BUS_DMA_WAITOK | BUS_DMA_ZERO, 780 &rxsd->rxsd_dtag, &rxsd->rxsd_dmap, 781 &rxsd->rxsd_paddr); 782 if (rxsd->rxsd_status == NULL) { 783 device_printf(dev, "can't create RX status DMA stuffs\n"); 784 return ENOMEM; 785 } 786 787 /* 788 * Create mbuf DMA stuffs 789 */ 790 error = et_dma_mbuf_create(dev); 791 if (error) 792 return error; 793 794 /* 795 * Create jumbo buffer DMA stuffs 796 * NOTE: Allow it to fail 797 */ 798 if (et_jumbo_mem_alloc(dev) == 0) 799 sc->sc_flags |= ET_FLAG_JUMBO; 800 801 return 0; 802 } 803 804 static void 805 et_dma_free(device_t dev) 806 { 807 struct et_softc *sc = device_get_softc(dev); 808 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 809 struct et_txstatus_data *txsd = &sc->sc_tx_status; 810 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 811 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 812 int i, rx_done[ET_RX_NRING]; 813 814 /* 815 * Destroy TX ring DMA stuffs 816 */ 817 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc, 818 tx_ring->tr_dmap); 819 820 /* 821 * Destroy TX status DMA stuffs 822 */ 823 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status, 824 txsd->txsd_dmap); 825 826 /* 827 * Destroy DMA stuffs for RX rings 828 */ 829 for (i = 0; i < ET_RX_NRING; ++i) { 830 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 831 832 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc, 833 rx_ring->rr_dmap); 834 } 835 836 /* 837 * Destroy RX stat ring DMA stuffs 838 */ 839 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat, 840 rxst_ring->rsr_dmap); 841 842 /* 843 * Destroy RX status DMA stuffs 844 */ 845 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status, 846 rxsd->rxsd_dmap); 847 848 /* 849 * Destroy mbuf DMA stuffs 850 */ 851 for (i = 0; i < ET_RX_NRING; ++i) 852 rx_done[i] = ET_RX_NDESC; 853 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done); 854 855 /* 856 * Destroy jumbo buffer DMA stuffs 857 */ 858 if (sc->sc_flags & ET_FLAG_JUMBO) 859 et_jumbo_mem_free(dev); 860 861 /* 862 * Destroy top level DMA tag 863 */ 864 if (sc->sc_dtag != NULL) 865 bus_dma_tag_destroy(sc->sc_dtag); 866 } 867 868 static int 869 et_dma_mbuf_create(device_t dev) 870 { 871 struct et_softc *sc = device_get_softc(dev); 872 struct et_txbuf_data *tbd = &sc->sc_tx_data; 873 int i, error, rx_done[ET_RX_NRING]; 874 875 /* 876 * Create RX mbuf DMA tag 877 */ 878 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 879 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 880 NULL, NULL, 881 MCLBYTES, 1, MCLBYTES, 882 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 883 &sc->sc_rxbuf_dtag); 884 if (error) { 885 device_printf(dev, "can't create RX mbuf DMA tag\n"); 886 return error; 887 } 888 889 /* 890 * Create spare DMA map for RX mbufs 891 */ 892 error = bus_dmamap_create(sc->sc_rxbuf_dtag, BUS_DMA_WAITOK, 893 &sc->sc_rxbuf_tmp_dmap); 894 if (error) { 895 device_printf(dev, "can't create spare mbuf DMA map\n"); 896 bus_dma_tag_destroy(sc->sc_rxbuf_dtag); 897 sc->sc_rxbuf_dtag = NULL; 898 return error; 899 } 900 901 /* 902 * Create DMA maps for RX mbufs 903 */ 904 bzero(rx_done, sizeof(rx_done)); 905 for (i = 0; i < ET_RX_NRING; ++i) { 906 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 907 int j; 908 909 for (j = 0; j < ET_RX_NDESC; ++j) { 910 error = bus_dmamap_create(sc->sc_rxbuf_dtag, 911 BUS_DMA_WAITOK, 912 &rbd->rbd_buf[j].rb_dmap); 913 if (error) { 914 device_printf(dev, "can't create %d RX mbuf " 915 "for %d RX ring\n", j, i); 916 rx_done[i] = j; 917 et_dma_mbuf_destroy(dev, 0, rx_done); 918 return error; 919 } 920 } 921 rx_done[i] = ET_RX_NDESC; 922 923 rbd->rbd_softc = sc; 924 rbd->rbd_ring = &sc->sc_rx_ring[i]; 925 } 926 927 /* 928 * Create TX mbuf DMA tag 929 */ 930 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 931 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 932 NULL, NULL, 933 ET_JUMBO_FRAMELEN, ET_NSEG_MAX, MCLBYTES, 934 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 935 BUS_DMA_ONEBPAGE, 936 &sc->sc_txbuf_dtag); 937 if (error) { 938 device_printf(dev, "can't create TX mbuf DMA tag\n"); 939 return error; 940 } 941 942 /* 943 * Create DMA maps for TX mbufs 944 */ 945 for (i = 0; i < ET_TX_NDESC; ++i) { 946 error = bus_dmamap_create(sc->sc_txbuf_dtag, 947 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 948 &tbd->tbd_buf[i].tb_dmap); 949 if (error) { 950 device_printf(dev, "can't create %d TX mbuf " 951 "DMA map\n", i); 952 et_dma_mbuf_destroy(dev, i, rx_done); 953 return error; 954 } 955 } 956 957 return 0; 958 } 959 960 static void 961 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[]) 962 { 963 struct et_softc *sc = device_get_softc(dev); 964 struct et_txbuf_data *tbd = &sc->sc_tx_data; 965 int i; 966 967 /* 968 * Destroy DMA tag and maps for RX mbufs 969 */ 970 if (sc->sc_rxbuf_dtag) { 971 for (i = 0; i < ET_RX_NRING; ++i) { 972 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 973 int j; 974 975 for (j = 0; j < rx_done[i]; ++j) { 976 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 977 978 KASSERT(rb->rb_mbuf == NULL, 979 ("RX mbuf in %d RX ring is " 980 "not freed yet", i)); 981 bus_dmamap_destroy(sc->sc_rxbuf_dtag, 982 rb->rb_dmap); 983 } 984 } 985 bus_dmamap_destroy(sc->sc_rxbuf_dtag, sc->sc_rxbuf_tmp_dmap); 986 bus_dma_tag_destroy(sc->sc_rxbuf_dtag); 987 sc->sc_rxbuf_dtag = NULL; 988 } 989 990 /* 991 * Destroy DMA tag and maps for TX mbufs 992 */ 993 if (sc->sc_txbuf_dtag) { 994 for (i = 0; i < tx_done; ++i) { 995 struct et_txbuf *tb = &tbd->tbd_buf[i]; 996 997 KASSERT(tb->tb_mbuf == NULL, 998 ("TX mbuf is not freed yet")); 999 bus_dmamap_destroy(sc->sc_txbuf_dtag, tb->tb_dmap); 1000 } 1001 bus_dma_tag_destroy(sc->sc_txbuf_dtag); 1002 sc->sc_txbuf_dtag = NULL; 1003 } 1004 } 1005 1006 static void 1007 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 1008 { 1009 if (dtag != NULL) { 1010 bus_dmamap_unload(dtag, dmap); 1011 bus_dmamem_free(dtag, addr, dmap); 1012 bus_dma_tag_destroy(dtag); 1013 } 1014 } 1015 1016 static void 1017 et_chip_attach(struct et_softc *sc) 1018 { 1019 uint32_t val; 1020 1021 /* 1022 * Perform minimal initialization 1023 */ 1024 1025 /* Disable loopback */ 1026 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1027 1028 /* Reset MAC */ 1029 CSR_WRITE_4(sc, ET_MAC_CFG1, 1030 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1031 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1032 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1033 1034 /* 1035 * Setup half duplex mode 1036 */ 1037 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1038 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1039 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1040 ET_MAC_HDX_EXC_DEFER; 1041 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1042 1043 /* Clear MAC control */ 1044 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1045 1046 /* Reset MII */ 1047 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1048 1049 /* Bring MAC out of reset state */ 1050 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1051 1052 /* Enable memory controllers */ 1053 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1054 } 1055 1056 static void 1057 et_intr(void *xsc) 1058 { 1059 struct et_softc *sc = xsc; 1060 struct ifnet *ifp = &sc->arpcom.ac_if; 1061 uint32_t intrs; 1062 1063 ASSERT_SERIALIZED(ifp->if_serializer); 1064 1065 if ((ifp->if_flags & IFF_RUNNING) == 0) 1066 return; 1067 1068 et_disable_intrs(sc); 1069 1070 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1071 intrs &= ET_INTRS; 1072 if (intrs == 0) /* Not interested */ 1073 goto back; 1074 1075 if (intrs & ET_INTR_RXEOF) 1076 et_rxeof(sc); 1077 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1078 et_txeof(sc, 1); 1079 if (intrs & ET_INTR_TIMER) 1080 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1081 back: 1082 et_enable_intrs(sc, ET_INTRS); 1083 } 1084 1085 static void 1086 et_init(void *xsc) 1087 { 1088 struct et_softc *sc = xsc; 1089 struct ifnet *ifp = &sc->arpcom.ac_if; 1090 const struct et_bsize *arr; 1091 int error, i; 1092 1093 ASSERT_SERIALIZED(ifp->if_serializer); 1094 1095 et_stop(sc); 1096 1097 arr = ET_FRAMELEN(ifp->if_mtu) < MCLBYTES ? 1098 et_bufsize_std : et_bufsize_jumbo; 1099 for (i = 0; i < ET_RX_NRING; ++i) { 1100 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize; 1101 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf; 1102 sc->sc_rx_data[i].rbd_jumbo = arr[i].jumbo; 1103 } 1104 1105 error = et_init_tx_ring(sc); 1106 if (error) 1107 goto back; 1108 1109 error = et_init_rx_ring(sc); 1110 if (error) 1111 goto back; 1112 1113 error = et_chip_init(sc); 1114 if (error) 1115 goto back; 1116 1117 error = et_enable_txrx(sc, 1); 1118 if (error) 1119 goto back; 1120 1121 et_enable_intrs(sc, ET_INTRS); 1122 1123 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1124 1125 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1126 1127 ifp->if_flags |= IFF_RUNNING; 1128 ifq_clr_oactive(&ifp->if_snd); 1129 back: 1130 if (error) 1131 et_stop(sc); 1132 } 1133 1134 static int 1135 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1136 { 1137 struct et_softc *sc = ifp->if_softc; 1138 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1139 struct ifreq *ifr = (struct ifreq *)data; 1140 int error = 0, max_framelen; 1141 1142 ASSERT_SERIALIZED(ifp->if_serializer); 1143 1144 switch (cmd) { 1145 case SIOCSIFFLAGS: 1146 if (ifp->if_flags & IFF_UP) { 1147 if (ifp->if_flags & IFF_RUNNING) { 1148 if ((ifp->if_flags ^ sc->sc_if_flags) & 1149 (IFF_ALLMULTI | IFF_PROMISC)) 1150 et_setmulti(sc); 1151 } else { 1152 et_init(sc); 1153 } 1154 } else { 1155 if (ifp->if_flags & IFF_RUNNING) 1156 et_stop(sc); 1157 } 1158 sc->sc_if_flags = ifp->if_flags; 1159 break; 1160 1161 case SIOCSIFMEDIA: 1162 case SIOCGIFMEDIA: 1163 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1164 break; 1165 1166 case SIOCADDMULTI: 1167 case SIOCDELMULTI: 1168 if (ifp->if_flags & IFF_RUNNING) 1169 et_setmulti(sc); 1170 break; 1171 1172 case SIOCSIFMTU: 1173 if (sc->sc_flags & ET_FLAG_JUMBO) 1174 max_framelen = ET_JUMBO_FRAMELEN; 1175 else 1176 max_framelen = MCLBYTES - 1; 1177 1178 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) { 1179 error = EOPNOTSUPP; 1180 break; 1181 } 1182 1183 ifp->if_mtu = ifr->ifr_mtu; 1184 if (ifp->if_flags & IFF_RUNNING) 1185 et_init(sc); 1186 break; 1187 1188 default: 1189 error = ether_ioctl(ifp, cmd, data); 1190 break; 1191 } 1192 return error; 1193 } 1194 1195 static void 1196 et_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1197 { 1198 struct et_softc *sc = ifp->if_softc; 1199 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1200 int trans, oactive; 1201 1202 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1203 ASSERT_SERIALIZED(ifp->if_serializer); 1204 1205 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) { 1206 ifq_purge(&ifp->if_snd); 1207 return; 1208 } 1209 1210 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1211 return; 1212 1213 oactive = 0; 1214 trans = 0; 1215 for (;;) { 1216 struct mbuf *m; 1217 int error; 1218 1219 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1220 if (oactive) { 1221 ifq_set_oactive(&ifp->if_snd); 1222 break; 1223 } 1224 1225 et_txeof(sc, 0); 1226 oactive = 1; 1227 continue; 1228 } 1229 1230 m = ifq_dequeue(&ifp->if_snd); 1231 if (m == NULL) 1232 break; 1233 1234 error = et_encap(sc, &m); 1235 if (error) { 1236 IFNET_STAT_INC(ifp, oerrors, 1); 1237 KKASSERT(m == NULL); 1238 1239 if (error == EFBIG) { 1240 /* 1241 * Excessive fragmented packets 1242 */ 1243 if (oactive) { 1244 ifq_set_oactive(&ifp->if_snd); 1245 break; 1246 } 1247 et_txeof(sc, 0); 1248 oactive = 1; 1249 } 1250 continue; 1251 } else { 1252 oactive = 0; 1253 } 1254 trans = 1; 1255 1256 BPF_MTAP(ifp, m); 1257 } 1258 1259 if (trans) 1260 ifp->if_timer = 5; 1261 } 1262 1263 static void 1264 et_watchdog(struct ifnet *ifp) 1265 { 1266 ASSERT_SERIALIZED(ifp->if_serializer); 1267 1268 if_printf(ifp, "watchdog timed out\n"); 1269 1270 ifp->if_init(ifp->if_softc); 1271 if_devstart(ifp); 1272 } 1273 1274 static int 1275 et_stop_rxdma(struct et_softc *sc) 1276 { 1277 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1278 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1279 1280 DELAY(5); 1281 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1282 if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n"); 1283 return ETIMEDOUT; 1284 } 1285 return 0; 1286 } 1287 1288 static int 1289 et_stop_txdma(struct et_softc *sc) 1290 { 1291 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1292 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1293 return 0; 1294 } 1295 1296 static void 1297 et_free_tx_ring(struct et_softc *sc) 1298 { 1299 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1300 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1301 int i; 1302 1303 for (i = 0; i < ET_TX_NDESC; ++i) { 1304 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1305 1306 if (tb->tb_mbuf != NULL) { 1307 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap); 1308 m_freem(tb->tb_mbuf); 1309 tb->tb_mbuf = NULL; 1310 } 1311 } 1312 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1313 } 1314 1315 static void 1316 et_free_rx_ring(struct et_softc *sc) 1317 { 1318 int n; 1319 1320 for (n = 0; n < ET_RX_NRING; ++n) { 1321 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1322 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1323 int i; 1324 1325 for (i = 0; i < ET_RX_NDESC; ++i) { 1326 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1327 1328 if (rb->rb_mbuf != NULL) { 1329 if (!rbd->rbd_jumbo) { 1330 bus_dmamap_unload(sc->sc_rxbuf_dtag, 1331 rb->rb_dmap); 1332 } 1333 m_freem(rb->rb_mbuf); 1334 rb->rb_mbuf = NULL; 1335 } 1336 } 1337 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1338 } 1339 } 1340 1341 static void 1342 et_setmulti(struct et_softc *sc) 1343 { 1344 struct ifnet *ifp = &sc->arpcom.ac_if; 1345 uint32_t hash[4] = { 0, 0, 0, 0 }; 1346 uint32_t rxmac_ctrl, pktfilt; 1347 struct ifmultiaddr *ifma; 1348 int i, count; 1349 1350 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1351 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1352 1353 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1354 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1355 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1356 goto back; 1357 } 1358 1359 count = 0; 1360 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1361 uint32_t *hp, h; 1362 1363 if (ifma->ifma_addr->sa_family != AF_LINK) 1364 continue; 1365 1366 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1367 ifma->ifma_addr), ETHER_ADDR_LEN); 1368 h = (h & 0x3f800000) >> 23; 1369 1370 hp = &hash[0]; 1371 if (h >= 32 && h < 64) { 1372 h -= 32; 1373 hp = &hash[1]; 1374 } else if (h >= 64 && h < 96) { 1375 h -= 64; 1376 hp = &hash[2]; 1377 } else if (h >= 96) { 1378 h -= 96; 1379 hp = &hash[3]; 1380 } 1381 *hp |= (1 << h); 1382 1383 ++count; 1384 } 1385 1386 for (i = 0; i < 4; ++i) 1387 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1388 1389 if (count > 0) 1390 pktfilt |= ET_PKTFILT_MCAST; 1391 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1392 back: 1393 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1394 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1395 } 1396 1397 static int 1398 et_chip_init(struct et_softc *sc) 1399 { 1400 struct ifnet *ifp = &sc->arpcom.ac_if; 1401 uint32_t rxq_end; 1402 int error, frame_len, rxmem_size; 1403 1404 /* 1405 * Split 16Kbytes internal memory between TX and RX 1406 * according to frame length. 1407 */ 1408 frame_len = ET_FRAMELEN(ifp->if_mtu); 1409 if (frame_len < 2048) { 1410 rxmem_size = ET_MEM_RXSIZE_DEFAULT; 1411 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) { 1412 rxmem_size = ET_MEM_SIZE / 2; 1413 } else { 1414 rxmem_size = ET_MEM_SIZE - 1415 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT); 1416 } 1417 rxq_end = ET_QUEUE_ADDR(rxmem_size); 1418 1419 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START); 1420 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end); 1421 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1); 1422 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END); 1423 1424 /* No loopback */ 1425 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1426 1427 /* Clear MSI configure */ 1428 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1429 1430 /* Disable timer */ 1431 CSR_WRITE_4(sc, ET_TIMER, 0); 1432 1433 /* Initialize MAC */ 1434 et_init_mac(sc); 1435 1436 /* Enable memory controllers */ 1437 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1438 1439 /* Initialize RX MAC */ 1440 et_init_rxmac(sc); 1441 1442 /* Initialize TX MAC */ 1443 et_init_txmac(sc); 1444 1445 /* Initialize RX DMA engine */ 1446 error = et_init_rxdma(sc); 1447 if (error) 1448 return error; 1449 1450 /* Initialize TX DMA engine */ 1451 error = et_init_txdma(sc); 1452 if (error) 1453 return error; 1454 1455 return 0; 1456 } 1457 1458 static int 1459 et_init_tx_ring(struct et_softc *sc) 1460 { 1461 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1462 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1463 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1464 1465 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1466 1467 tbd->tbd_start_index = 0; 1468 tbd->tbd_start_wrap = 0; 1469 tbd->tbd_used = 0; 1470 1471 bzero(txsd->txsd_status, sizeof(uint32_t)); 1472 1473 return 0; 1474 } 1475 1476 static int 1477 et_init_rx_ring(struct et_softc *sc) 1478 { 1479 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1480 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1481 int n; 1482 1483 for (n = 0; n < ET_RX_NRING; ++n) { 1484 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1485 int i, error; 1486 1487 for (i = 0; i < ET_RX_NDESC; ++i) { 1488 error = rbd->rbd_newbuf(rbd, i, 1); 1489 if (error) { 1490 if_printf(&sc->arpcom.ac_if, "%d ring %d buf, " 1491 "newbuf failed: %d\n", n, i, error); 1492 return error; 1493 } 1494 } 1495 } 1496 1497 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1498 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1499 1500 return 0; 1501 } 1502 1503 static int 1504 et_init_rxdma(struct et_softc *sc) 1505 { 1506 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1507 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1508 struct et_rxdesc_ring *rx_ring; 1509 int error; 1510 1511 error = et_stop_rxdma(sc); 1512 if (error) { 1513 if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n"); 1514 return error; 1515 } 1516 1517 /* 1518 * Install RX status 1519 */ 1520 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1521 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1522 1523 /* 1524 * Install RX stat ring 1525 */ 1526 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1527 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1528 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1529 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1530 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1531 1532 /* Match ET_RXSTAT_POS */ 1533 rxst_ring->rsr_index = 0; 1534 rxst_ring->rsr_wrap = 0; 1535 1536 /* 1537 * Install the 2nd RX descriptor ring 1538 */ 1539 rx_ring = &sc->sc_rx_ring[1]; 1540 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1541 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1542 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1543 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1544 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1545 1546 /* Match ET_RX_RING1_POS */ 1547 rx_ring->rr_index = 0; 1548 rx_ring->rr_wrap = 1; 1549 1550 /* 1551 * Install the 1st RX descriptor ring 1552 */ 1553 rx_ring = &sc->sc_rx_ring[0]; 1554 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1555 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1556 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1557 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1558 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1559 1560 /* Match ET_RX_RING0_POS */ 1561 rx_ring->rr_index = 0; 1562 rx_ring->rr_wrap = 1; 1563 1564 /* 1565 * RX intr moderation 1566 */ 1567 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1568 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1569 1570 return 0; 1571 } 1572 1573 static int 1574 et_init_txdma(struct et_softc *sc) 1575 { 1576 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1577 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1578 int error; 1579 1580 error = et_stop_txdma(sc); 1581 if (error) { 1582 if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n"); 1583 return error; 1584 } 1585 1586 /* 1587 * Install TX descriptor ring 1588 */ 1589 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1590 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1591 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1592 1593 /* 1594 * Install TX status 1595 */ 1596 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1597 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1598 1599 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1600 1601 /* Match ET_TX_READY_POS */ 1602 tx_ring->tr_ready_index = 0; 1603 tx_ring->tr_ready_wrap = 0; 1604 1605 return 0; 1606 } 1607 1608 static void 1609 et_init_mac(struct et_softc *sc) 1610 { 1611 struct ifnet *ifp = &sc->arpcom.ac_if; 1612 const uint8_t *eaddr = IF_LLADDR(ifp); 1613 uint32_t val; 1614 1615 /* Reset MAC */ 1616 CSR_WRITE_4(sc, ET_MAC_CFG1, 1617 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1618 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1619 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1620 1621 /* 1622 * Setup inter packet gap 1623 */ 1624 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1625 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1626 __SHIFTIN(80, ET_IPG_MINIFG) | 1627 __SHIFTIN(96, ET_IPG_B2B); 1628 CSR_WRITE_4(sc, ET_IPG, val); 1629 1630 /* 1631 * Setup half duplex mode 1632 */ 1633 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1634 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1635 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1636 ET_MAC_HDX_EXC_DEFER; 1637 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1638 1639 /* Clear MAC control */ 1640 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1641 1642 /* Reset MII */ 1643 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1644 1645 /* 1646 * Set MAC address 1647 */ 1648 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1649 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1650 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1651 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1652 1653 /* Set max frame length */ 1654 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu)); 1655 1656 /* Bring MAC out of reset state */ 1657 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1658 } 1659 1660 static void 1661 et_init_rxmac(struct et_softc *sc) 1662 { 1663 struct ifnet *ifp = &sc->arpcom.ac_if; 1664 const uint8_t *eaddr = IF_LLADDR(ifp); 1665 uint32_t val; 1666 int i; 1667 1668 /* Disable RX MAC and WOL */ 1669 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1670 1671 /* 1672 * Clear all WOL related registers 1673 */ 1674 for (i = 0; i < 3; ++i) 1675 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1676 for (i = 0; i < 20; ++i) 1677 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1678 1679 /* 1680 * Set WOL source address. XXX is this necessary? 1681 */ 1682 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1683 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1684 val = (eaddr[0] << 8) | eaddr[1]; 1685 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1686 1687 /* Clear packet filters */ 1688 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1689 1690 /* No ucast filtering */ 1691 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1692 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1693 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1694 1695 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) { 1696 /* 1697 * In order to transmit jumbo packets greater than 1698 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between 1699 * RX MAC and RX DMA needs to be reduced in size to 1700 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In 1701 * order to implement this, we must use "cut through" 1702 * mode in the RX MAC, which chops packets down into 1703 * segments. In this case we selected 256 bytes, 1704 * since this is the size of the PCI-Express TLP's 1705 * that the ET1310 uses. 1706 */ 1707 val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) | 1708 ET_RXMAC_MC_SEGSZ_ENABLE; 1709 } else { 1710 val = 0; 1711 } 1712 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1713 1714 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1715 1716 /* Initialize RX MAC management register */ 1717 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1718 1719 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1720 1721 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1722 ET_RXMAC_MGT_PASS_ECRC | 1723 ET_RXMAC_MGT_PASS_ELEN | 1724 ET_RXMAC_MGT_PASS_ETRUNC | 1725 ET_RXMAC_MGT_CHECK_PKT); 1726 1727 /* 1728 * Configure runt filtering (may not work on certain chip generation) 1729 */ 1730 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1731 CSR_WRITE_4(sc, ET_PKTFILT, val); 1732 1733 /* Enable RX MAC but leave WOL disabled */ 1734 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1735 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1736 1737 /* 1738 * Setup multicast hash and allmulti/promisc mode 1739 */ 1740 et_setmulti(sc); 1741 } 1742 1743 static void 1744 et_init_txmac(struct et_softc *sc) 1745 { 1746 /* Disable TX MAC and FC(?) */ 1747 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1748 1749 /* No flow control yet */ 1750 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1751 1752 /* Enable TX MAC but leave FC(?) diabled */ 1753 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1754 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1755 } 1756 1757 static int 1758 et_start_rxdma(struct et_softc *sc) 1759 { 1760 uint32_t val = 0; 1761 1762 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1763 ET_RXDMA_CTRL_RING0_SIZE) | 1764 ET_RXDMA_CTRL_RING0_ENABLE; 1765 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1766 ET_RXDMA_CTRL_RING1_SIZE) | 1767 ET_RXDMA_CTRL_RING1_ENABLE; 1768 1769 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1770 1771 DELAY(5); 1772 1773 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1774 if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n"); 1775 return ETIMEDOUT; 1776 } 1777 return 0; 1778 } 1779 1780 static int 1781 et_start_txdma(struct et_softc *sc) 1782 { 1783 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1784 return 0; 1785 } 1786 1787 static int 1788 et_enable_txrx(struct et_softc *sc, int media_upd) 1789 { 1790 struct ifnet *ifp = &sc->arpcom.ac_if; 1791 uint32_t val; 1792 int i, error; 1793 1794 val = CSR_READ_4(sc, ET_MAC_CFG1); 1795 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1796 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1797 ET_MAC_CFG1_LOOPBACK); 1798 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1799 1800 if (media_upd) 1801 et_ifmedia_upd(ifp); 1802 else 1803 et_setmedia(sc); 1804 1805 #define NRETRY 100 1806 1807 for (i = 0; i < NRETRY; ++i) { 1808 val = CSR_READ_4(sc, ET_MAC_CFG1); 1809 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1810 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1811 break; 1812 1813 DELAY(10); 1814 } 1815 if (i == NRETRY) { 1816 if_printf(ifp, "can't enable RX/TX\n"); 1817 return 0; 1818 } 1819 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 1820 1821 #undef NRETRY 1822 1823 /* 1824 * Start TX/RX DMA engine 1825 */ 1826 error = et_start_rxdma(sc); 1827 if (error) 1828 return error; 1829 1830 error = et_start_txdma(sc); 1831 if (error) 1832 return error; 1833 1834 return 0; 1835 } 1836 1837 static void 1838 et_rxeof(struct et_softc *sc) 1839 { 1840 struct ifnet *ifp = &sc->arpcom.ac_if; 1841 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1842 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1843 uint32_t rxs_stat_ring; 1844 int rxst_wrap, rxst_index; 1845 1846 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1847 return; 1848 1849 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1850 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1851 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1852 1853 while (rxst_index != rxst_ring->rsr_index || 1854 rxst_wrap != rxst_ring->rsr_wrap) { 1855 struct et_rxbuf_data *rbd; 1856 struct et_rxdesc_ring *rx_ring; 1857 struct et_rxstat *st; 1858 struct mbuf *m; 1859 int buflen, buf_idx, ring_idx; 1860 uint32_t rxstat_pos, rxring_pos; 1861 1862 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1863 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1864 1865 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1866 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1867 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1868 1869 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1870 rxst_ring->rsr_index = 0; 1871 rxst_ring->rsr_wrap ^= 1; 1872 } 1873 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1874 ET_RXSTAT_POS_INDEX); 1875 if (rxst_ring->rsr_wrap) 1876 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1877 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1878 1879 if (ring_idx >= ET_RX_NRING) { 1880 IFNET_STAT_INC(ifp, ierrors, 1); 1881 if_printf(ifp, "invalid ring index %d\n", ring_idx); 1882 continue; 1883 } 1884 if (buf_idx >= ET_RX_NDESC) { 1885 IFNET_STAT_INC(ifp, ierrors, 1); 1886 if_printf(ifp, "invalid buf index %d\n", buf_idx); 1887 continue; 1888 } 1889 1890 rbd = &sc->sc_rx_data[ring_idx]; 1891 m = rbd->rbd_buf[buf_idx].rb_mbuf; 1892 1893 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1894 if (buflen < ETHER_CRC_LEN) { 1895 m_freem(m); 1896 IFNET_STAT_INC(ifp, ierrors, 1); 1897 } else { 1898 m->m_pkthdr.len = m->m_len = buflen; 1899 m->m_pkthdr.rcvif = ifp; 1900 1901 m_adj(m, -ETHER_CRC_LEN); 1902 1903 IFNET_STAT_INC(ifp, ipackets, 1); 1904 ifp->if_input(ifp, m, NULL, -1); 1905 } 1906 } else { 1907 IFNET_STAT_INC(ifp, ierrors, 1); 1908 } 1909 m = NULL; /* Catch invalid reference */ 1910 1911 rx_ring = &sc->sc_rx_ring[ring_idx]; 1912 1913 if (buf_idx != rx_ring->rr_index) { 1914 if_printf(ifp, "WARNING!! ring %d, " 1915 "buf_idx %d, rr_idx %d\n", 1916 ring_idx, buf_idx, rx_ring->rr_index); 1917 } 1918 1919 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1920 if (++rx_ring->rr_index == ET_RX_NDESC) { 1921 rx_ring->rr_index = 0; 1922 rx_ring->rr_wrap ^= 1; 1923 } 1924 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1925 if (rx_ring->rr_wrap) 1926 rxring_pos |= ET_RX_RING_POS_WRAP; 1927 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1928 } 1929 } 1930 1931 static int 1932 et_encap(struct et_softc *sc, struct mbuf **m0) 1933 { 1934 bus_dma_segment_t segs[ET_NSEG_MAX]; 1935 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1936 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1937 struct et_txdesc *td; 1938 bus_dmamap_t map; 1939 int error, maxsegs, nsegs, first_idx, last_idx, i; 1940 uint32_t tx_ready_pos, last_td_ctrl2; 1941 1942 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1943 if (maxsegs > ET_NSEG_MAX) 1944 maxsegs = ET_NSEG_MAX; 1945 KASSERT(maxsegs >= ET_NSEG_SPARE, 1946 ("not enough spare TX desc (%d)", maxsegs)); 1947 1948 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1949 first_idx = tx_ring->tr_ready_index; 1950 map = tbd->tbd_buf[first_idx].tb_dmap; 1951 1952 error = bus_dmamap_load_mbuf_defrag(sc->sc_txbuf_dtag, map, m0, 1953 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1954 if (error) 1955 goto back; 1956 bus_dmamap_sync(sc->sc_txbuf_dtag, map, BUS_DMASYNC_PREWRITE); 1957 1958 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1959 sc->sc_tx += nsegs; 1960 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1961 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1962 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1963 } 1964 1965 last_idx = -1; 1966 for (i = 0; i < nsegs; ++i) { 1967 int idx; 1968 1969 idx = (first_idx + i) % ET_TX_NDESC; 1970 td = &tx_ring->tr_desc[idx]; 1971 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr); 1972 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr); 1973 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN); 1974 1975 if (i == nsegs - 1) { /* Last frag */ 1976 td->td_ctrl2 = last_td_ctrl2; 1977 last_idx = idx; 1978 } 1979 1980 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1981 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1982 tx_ring->tr_ready_index = 0; 1983 tx_ring->tr_ready_wrap ^= 1; 1984 } 1985 } 1986 td = &tx_ring->tr_desc[first_idx]; 1987 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1988 1989 KKASSERT(last_idx >= 0); 1990 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1991 tbd->tbd_buf[last_idx].tb_dmap = map; 1992 tbd->tbd_buf[last_idx].tb_mbuf = *m0; 1993 1994 tbd->tbd_used += nsegs; 1995 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1996 1997 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1998 ET_TX_READY_POS_INDEX); 1999 if (tx_ring->tr_ready_wrap) 2000 tx_ready_pos |= ET_TX_READY_POS_WRAP; 2001 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 2002 2003 error = 0; 2004 back: 2005 if (error) { 2006 m_freem(*m0); 2007 *m0 = NULL; 2008 } 2009 return error; 2010 } 2011 2012 static void 2013 et_txeof(struct et_softc *sc, int start) 2014 { 2015 struct ifnet *ifp = &sc->arpcom.ac_if; 2016 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 2017 struct et_txbuf_data *tbd = &sc->sc_tx_data; 2018 uint32_t tx_done; 2019 int end, wrap; 2020 2021 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2022 return; 2023 2024 if (tbd->tbd_used == 0) 2025 return; 2026 2027 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2028 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 2029 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2030 2031 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2032 struct et_txbuf *tb; 2033 2034 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 2035 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2036 2037 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2038 sizeof(struct et_txdesc)); 2039 2040 if (tb->tb_mbuf != NULL) { 2041 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap); 2042 m_freem(tb->tb_mbuf); 2043 tb->tb_mbuf = NULL; 2044 IFNET_STAT_INC(ifp, opackets, 1); 2045 } 2046 2047 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2048 tbd->tbd_start_index = 0; 2049 tbd->tbd_start_wrap ^= 1; 2050 } 2051 2052 KKASSERT(tbd->tbd_used > 0); 2053 tbd->tbd_used--; 2054 } 2055 2056 if (tbd->tbd_used == 0) 2057 ifp->if_timer = 0; 2058 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2059 ifq_clr_oactive(&ifp->if_snd); 2060 2061 if (start) 2062 if_devstart(ifp); 2063 } 2064 2065 static void 2066 et_tick(void *xsc) 2067 { 2068 struct et_softc *sc = xsc; 2069 struct ifnet *ifp = &sc->arpcom.ac_if; 2070 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2071 2072 lwkt_serialize_enter(ifp->if_serializer); 2073 2074 mii_tick(mii); 2075 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 && 2076 (mii->mii_media_status & IFM_ACTIVE) && 2077 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2078 if_printf(ifp, "Link up, enable TX/RX\n"); 2079 if (et_enable_txrx(sc, 0) == 0) 2080 if_devstart(ifp); 2081 } 2082 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2083 2084 lwkt_serialize_exit(ifp->if_serializer); 2085 } 2086 2087 static int 2088 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2089 { 2090 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2091 } 2092 2093 static int 2094 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2095 { 2096 return et_newbuf(rbd, buf_idx, init, MHLEN); 2097 } 2098 2099 static int 2100 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2101 { 2102 struct et_softc *sc = rbd->rbd_softc; 2103 struct et_rxbuf *rb; 2104 struct mbuf *m; 2105 bus_dma_segment_t seg; 2106 bus_dmamap_t dmap; 2107 int error, len, nseg; 2108 2109 KASSERT(!rbd->rbd_jumbo, ("calling %s with jumbo ring", __func__)); 2110 2111 KKASSERT(buf_idx < ET_RX_NDESC); 2112 rb = &rbd->rbd_buf[buf_idx]; 2113 2114 m = m_getl(len0, init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR, &len); 2115 if (m == NULL) { 2116 error = ENOBUFS; 2117 2118 if (init) { 2119 if_printf(&sc->arpcom.ac_if, 2120 "m_getl failed, size %d\n", len0); 2121 return error; 2122 } else { 2123 goto back; 2124 } 2125 } 2126 m->m_len = m->m_pkthdr.len = len; 2127 2128 /* 2129 * Try load RX mbuf into temporary DMA tag 2130 */ 2131 error = bus_dmamap_load_mbuf_segment(sc->sc_rxbuf_dtag, 2132 sc->sc_rxbuf_tmp_dmap, m, &seg, 1, &nseg, 2133 BUS_DMA_NOWAIT); 2134 if (error) { 2135 m_freem(m); 2136 if (init) { 2137 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2138 return error; 2139 } else { 2140 goto back; 2141 } 2142 } 2143 2144 if (!init) { 2145 bus_dmamap_sync(sc->sc_rxbuf_dtag, rb->rb_dmap, 2146 BUS_DMASYNC_POSTREAD); 2147 bus_dmamap_unload(sc->sc_rxbuf_dtag, rb->rb_dmap); 2148 } 2149 rb->rb_mbuf = m; 2150 rb->rb_paddr = seg.ds_addr; 2151 2152 /* 2153 * Swap RX buf's DMA map with the loaded temporary one 2154 */ 2155 dmap = rb->rb_dmap; 2156 rb->rb_dmap = sc->sc_rxbuf_tmp_dmap; 2157 sc->sc_rxbuf_tmp_dmap = dmap; 2158 2159 error = 0; 2160 back: 2161 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2162 return error; 2163 } 2164 2165 static int 2166 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2167 { 2168 struct et_softc *sc = arg1; 2169 struct ifnet *ifp = &sc->arpcom.ac_if; 2170 int error = 0, v; 2171 2172 lwkt_serialize_enter(ifp->if_serializer); 2173 2174 v = sc->sc_rx_intr_npkts; 2175 error = sysctl_handle_int(oidp, &v, 0, req); 2176 if (error || req->newptr == NULL) 2177 goto back; 2178 if (v <= 0) { 2179 error = EINVAL; 2180 goto back; 2181 } 2182 2183 if (sc->sc_rx_intr_npkts != v) { 2184 if (ifp->if_flags & IFF_RUNNING) 2185 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2186 sc->sc_rx_intr_npkts = v; 2187 } 2188 back: 2189 lwkt_serialize_exit(ifp->if_serializer); 2190 return error; 2191 } 2192 2193 static int 2194 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2195 { 2196 struct et_softc *sc = arg1; 2197 struct ifnet *ifp = &sc->arpcom.ac_if; 2198 int error = 0, v; 2199 2200 lwkt_serialize_enter(ifp->if_serializer); 2201 2202 v = sc->sc_rx_intr_delay; 2203 error = sysctl_handle_int(oidp, &v, 0, req); 2204 if (error || req->newptr == NULL) 2205 goto back; 2206 if (v <= 0) { 2207 error = EINVAL; 2208 goto back; 2209 } 2210 2211 if (sc->sc_rx_intr_delay != v) { 2212 if (ifp->if_flags & IFF_RUNNING) 2213 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2214 sc->sc_rx_intr_delay = v; 2215 } 2216 back: 2217 lwkt_serialize_exit(ifp->if_serializer); 2218 return error; 2219 } 2220 2221 static void 2222 et_setmedia(struct et_softc *sc) 2223 { 2224 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2225 uint32_t cfg2, ctrl; 2226 2227 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 2228 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 2229 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 2230 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 2231 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 2232 2233 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 2234 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 2235 2236 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 2237 cfg2 |= ET_MAC_CFG2_MODE_GMII; 2238 } else { 2239 cfg2 |= ET_MAC_CFG2_MODE_MII; 2240 ctrl |= ET_MAC_CTRL_MODE_MII; 2241 } 2242 2243 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 2244 cfg2 |= ET_MAC_CFG2_FDX; 2245 else 2246 ctrl |= ET_MAC_CTRL_GHDX; 2247 2248 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 2249 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 2250 } 2251 2252 static int 2253 et_jumbo_mem_alloc(device_t dev) 2254 { 2255 struct et_softc *sc = device_get_softc(dev); 2256 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2257 bus_addr_t paddr; 2258 uint8_t *buf; 2259 int i; 2260 2261 jd->jd_buf = bus_dmamem_coherent_any(sc->sc_dtag, 2262 ET_JUMBO_ALIGN, ET_JUMBO_MEM_SIZE, BUS_DMA_WAITOK, 2263 &jd->jd_dtag, &jd->jd_dmap, &paddr); 2264 if (jd->jd_buf == NULL) { 2265 device_printf(dev, "can't create jumbo DMA stuffs\n"); 2266 return ENOMEM; 2267 } 2268 2269 jd->jd_slots = kmalloc(sizeof(*jd->jd_slots) * ET_JSLOTS, M_DEVBUF, 2270 M_WAITOK | M_ZERO); 2271 lwkt_serialize_init(&jd->jd_serializer); 2272 SLIST_INIT(&jd->jd_free_slots); 2273 2274 buf = jd->jd_buf; 2275 for (i = 0; i < ET_JSLOTS; ++i) { 2276 struct et_jslot *jslot = &jd->jd_slots[i]; 2277 2278 jslot->jslot_data = jd; 2279 jslot->jslot_buf = buf; 2280 jslot->jslot_paddr = paddr; 2281 jslot->jslot_inuse = 0; 2282 jslot->jslot_index = i; 2283 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, jslot_link); 2284 2285 buf += ET_JLEN; 2286 paddr += ET_JLEN; 2287 } 2288 return 0; 2289 } 2290 2291 static void 2292 et_jumbo_mem_free(device_t dev) 2293 { 2294 struct et_softc *sc = device_get_softc(dev); 2295 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2296 2297 KKASSERT(sc->sc_flags & ET_FLAG_JUMBO); 2298 2299 kfree(jd->jd_slots, M_DEVBUF); 2300 et_dma_mem_destroy(jd->jd_dtag, jd->jd_buf, jd->jd_dmap); 2301 } 2302 2303 static struct et_jslot * 2304 et_jalloc(struct et_jumbo_data *jd) 2305 { 2306 struct et_jslot *jslot; 2307 2308 lwkt_serialize_enter(&jd->jd_serializer); 2309 2310 jslot = SLIST_FIRST(&jd->jd_free_slots); 2311 if (jslot) { 2312 SLIST_REMOVE_HEAD(&jd->jd_free_slots, jslot_link); 2313 jslot->jslot_inuse = 1; 2314 } 2315 2316 lwkt_serialize_exit(&jd->jd_serializer); 2317 return jslot; 2318 } 2319 2320 static void 2321 et_jfree(void *xjslot) 2322 { 2323 struct et_jslot *jslot = xjslot; 2324 struct et_jumbo_data *jd = jslot->jslot_data; 2325 2326 if (&jd->jd_slots[jslot->jslot_index] != jslot) { 2327 panic("%s wrong jslot!?", __func__); 2328 } else if (jslot->jslot_inuse == 0) { 2329 panic("%s jslot already freed", __func__); 2330 } else { 2331 lwkt_serialize_enter(&jd->jd_serializer); 2332 2333 atomic_subtract_int(&jslot->jslot_inuse, 1); 2334 if (jslot->jslot_inuse == 0) { 2335 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, 2336 jslot_link); 2337 } 2338 2339 lwkt_serialize_exit(&jd->jd_serializer); 2340 } 2341 } 2342 2343 static void 2344 et_jref(void *xjslot) 2345 { 2346 struct et_jslot *jslot = xjslot; 2347 struct et_jumbo_data *jd = jslot->jslot_data; 2348 2349 if (&jd->jd_slots[jslot->jslot_index] != jslot) 2350 panic("%s wrong jslot!?", __func__); 2351 else if (jslot->jslot_inuse == 0) 2352 panic("%s jslot already freed", __func__); 2353 else 2354 atomic_add_int(&jslot->jslot_inuse, 1); 2355 } 2356 2357 static int 2358 et_newbuf_jumbo(struct et_rxbuf_data *rbd, int buf_idx, int init) 2359 { 2360 struct et_softc *sc = rbd->rbd_softc; 2361 struct et_rxbuf *rb; 2362 struct mbuf *m; 2363 struct et_jslot *jslot; 2364 int error; 2365 2366 KASSERT(rbd->rbd_jumbo, ("calling %s with non-jumbo ring", __func__)); 2367 2368 KKASSERT(buf_idx < ET_RX_NDESC); 2369 rb = &rbd->rbd_buf[buf_idx]; 2370 2371 error = ENOBUFS; 2372 2373 MGETHDR(m, init ? M_WAITOK : M_NOWAIT, MT_DATA); 2374 if (m == NULL) { 2375 if (init) { 2376 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n"); 2377 return error; 2378 } else { 2379 goto back; 2380 } 2381 } 2382 2383 jslot = et_jalloc(&sc->sc_jumbo_data); 2384 if (jslot == NULL) { 2385 m_freem(m); 2386 2387 if (init) { 2388 if_printf(&sc->arpcom.ac_if, 2389 "jslot allocation failed\n"); 2390 return error; 2391 } else { 2392 goto back; 2393 } 2394 } 2395 2396 m->m_ext.ext_arg = jslot; 2397 m->m_ext.ext_buf = jslot->jslot_buf; 2398 m->m_ext.ext_free = et_jfree; 2399 m->m_ext.ext_ref = et_jref; 2400 m->m_ext.ext_size = ET_JUMBO_FRAMELEN; 2401 m->m_flags |= M_EXT; 2402 m->m_data = m->m_ext.ext_buf; 2403 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2404 2405 rb->rb_mbuf = m; 2406 rb->rb_paddr = jslot->jslot_paddr; 2407 2408 error = 0; 2409 back: 2410 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2411 return error; 2412 } 2413 2414 static void 2415 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr) 2416 { 2417 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring; 2418 struct et_rxdesc *desc; 2419 2420 KKASSERT(buf_idx < ET_RX_NDESC); 2421 desc = &rx_ring->rr_desc[buf_idx]; 2422 2423 desc->rd_addr_hi = ET_ADDR_HI(paddr); 2424 desc->rd_addr_lo = ET_ADDR_LO(paddr); 2425 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2426 } 2427