1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/bitops.h> 37 #include <sys/endian.h> 38 #include <sys/kernel.h> 39 #include <sys/bus.h> 40 #include <sys/interrupt.h> 41 #include <sys/malloc.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/sysctl.h> 48 49 #include <net/ethernet.h> 50 #include <net/if.h> 51 #include <net/bpf.h> 52 #include <net/if_arp.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 #include <net/ifq_var.h> 56 #include <net/vlan/if_vlan_var.h> 57 58 #include <dev/netif/mii_layer/miivar.h> 59 60 #include <bus/pci/pcireg.h> 61 #include <bus/pci/pcivar.h> 62 #include "pcidevs.h" 63 64 #include <dev/netif/et/if_etreg.h> 65 #include <dev/netif/et/if_etvar.h> 66 67 #include "miibus_if.h" 68 69 static int et_probe(device_t); 70 static int et_attach(device_t); 71 static int et_detach(device_t); 72 static int et_shutdown(device_t); 73 74 static int et_miibus_readreg(device_t, int, int); 75 static int et_miibus_writereg(device_t, int, int, int); 76 static void et_miibus_statchg(device_t); 77 78 static void et_init(void *); 79 static int et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 80 static void et_start(struct ifnet *, struct ifaltq_subque *); 81 static void et_watchdog(struct ifnet *); 82 static int et_ifmedia_upd(struct ifnet *); 83 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 84 85 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 86 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 87 88 static void et_intr(void *); 89 static void et_enable_intrs(struct et_softc *, uint32_t); 90 static void et_disable_intrs(struct et_softc *); 91 static void et_rxeof(struct et_softc *); 92 static void et_txeof(struct et_softc *, int); 93 94 static int et_dma_alloc(device_t); 95 static void et_dma_free(device_t); 96 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 97 static int et_dma_mbuf_create(device_t); 98 static void et_dma_mbuf_destroy(device_t, int, const int[]); 99 static int et_jumbo_mem_alloc(device_t); 100 static void et_jumbo_mem_free(device_t); 101 static int et_init_tx_ring(struct et_softc *); 102 static int et_init_rx_ring(struct et_softc *); 103 static void et_free_tx_ring(struct et_softc *); 104 static void et_free_rx_ring(struct et_softc *); 105 static int et_encap(struct et_softc *, struct mbuf **); 106 static struct et_jslot * 107 et_jalloc(struct et_jumbo_data *); 108 static void et_jfree(void *); 109 static void et_jref(void *); 110 static int et_newbuf(struct et_rxbuf_data *, int, int, int); 111 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 112 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 113 static int et_newbuf_jumbo(struct et_rxbuf_data *, int, int); 114 115 static void et_stop(struct et_softc *); 116 static int et_chip_init(struct et_softc *); 117 static void et_chip_attach(struct et_softc *); 118 static void et_init_mac(struct et_softc *); 119 static void et_init_rxmac(struct et_softc *); 120 static void et_init_txmac(struct et_softc *); 121 static int et_init_rxdma(struct et_softc *); 122 static int et_init_txdma(struct et_softc *); 123 static int et_start_rxdma(struct et_softc *); 124 static int et_start_txdma(struct et_softc *); 125 static int et_stop_rxdma(struct et_softc *); 126 static int et_stop_txdma(struct et_softc *); 127 static int et_enable_txrx(struct et_softc *, int); 128 static void et_reset(struct et_softc *); 129 static int et_bus_config(device_t); 130 static void et_get_eaddr(device_t, uint8_t[]); 131 static void et_setmulti(struct et_softc *); 132 static void et_tick(void *); 133 static void et_setmedia(struct et_softc *); 134 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t); 135 136 static const struct et_dev { 137 uint16_t vid; 138 uint16_t did; 139 const char *desc; 140 } et_devices[] = { 141 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 142 "Agere ET1310 Gigabit Ethernet" }, 143 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 144 "Agere ET1310 Fast Ethernet" }, 145 { 0, 0, NULL } 146 }; 147 148 static device_method_t et_methods[] = { 149 DEVMETHOD(device_probe, et_probe), 150 DEVMETHOD(device_attach, et_attach), 151 DEVMETHOD(device_detach, et_detach), 152 DEVMETHOD(device_shutdown, et_shutdown), 153 #if 0 154 DEVMETHOD(device_suspend, et_suspend), 155 DEVMETHOD(device_resume, et_resume), 156 #endif 157 158 DEVMETHOD(bus_print_child, bus_generic_print_child), 159 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 160 161 DEVMETHOD(miibus_readreg, et_miibus_readreg), 162 DEVMETHOD(miibus_writereg, et_miibus_writereg), 163 DEVMETHOD(miibus_statchg, et_miibus_statchg), 164 165 DEVMETHOD_END 166 }; 167 168 static driver_t et_driver = { 169 "et", 170 et_methods, 171 sizeof(struct et_softc) 172 }; 173 174 static devclass_t et_devclass; 175 176 DECLARE_DUMMY_MODULE(if_et); 177 MODULE_DEPEND(if_et, miibus, 1, 1, 1); 178 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, NULL, NULL); 179 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, NULL, NULL); 180 181 static int et_rx_intr_npkts = 129; 182 static int et_rx_intr_delay = 25; /* x4 usec */ 183 static int et_tx_intr_nsegs = 256; 184 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 185 186 static int et_msi_enable = 1; 187 188 TUNABLE_INT("hw.et.timer", &et_timer); 189 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 190 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay); 191 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs); 192 TUNABLE_INT("hw.et.msi.enable", &et_msi_enable); 193 194 struct et_bsize { 195 int bufsize; 196 int jumbo; 197 et_newbuf_t newbuf; 198 }; 199 200 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = { 201 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 202 .newbuf = et_newbuf_hdr }, 203 { .bufsize = ET_RXDMA_CTRL_RING1_2048, .jumbo = 0, 204 .newbuf = et_newbuf_cluster }, 205 }; 206 207 static const struct et_bsize et_bufsize_jumbo[ET_RX_NRING] = { 208 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 209 .newbuf = et_newbuf_hdr }, 210 { .bufsize = ET_RXDMA_CTRL_RING1_16384, .jumbo = 1, 211 .newbuf = et_newbuf_jumbo }, 212 }; 213 214 static int 215 et_probe(device_t dev) 216 { 217 const struct et_dev *d; 218 uint16_t did, vid; 219 220 vid = pci_get_vendor(dev); 221 did = pci_get_device(dev); 222 223 for (d = et_devices; d->desc != NULL; ++d) { 224 if (vid == d->vid && did == d->did) { 225 device_set_desc(dev, d->desc); 226 return 0; 227 } 228 } 229 return ENXIO; 230 } 231 232 static int 233 et_attach(device_t dev) 234 { 235 struct et_softc *sc = device_get_softc(dev); 236 struct ifnet *ifp = &sc->arpcom.ac_if; 237 struct sysctl_ctx_list *ctx; 238 struct sysctl_oid *tree; 239 uint8_t eaddr[ETHER_ADDR_LEN]; 240 int error; 241 u_int irq_flags; 242 243 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 244 callout_init(&sc->sc_tick); 245 246 /* 247 * Initialize tunables 248 */ 249 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 250 sc->sc_rx_intr_delay = et_rx_intr_delay; 251 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 252 sc->sc_timer = et_timer; 253 254 #ifndef BURN_BRIDGES 255 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 256 uint32_t irq, mem; 257 258 irq = pci_read_config(dev, PCIR_INTLINE, 4); 259 mem = pci_read_config(dev, ET_PCIR_BAR, 4); 260 261 device_printf(dev, "chip is in D%d power mode " 262 "-- setting to D0\n", pci_get_powerstate(dev)); 263 264 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 265 266 pci_write_config(dev, PCIR_INTLINE, irq, 4); 267 pci_write_config(dev, ET_PCIR_BAR, mem, 4); 268 } 269 #endif /* !BURN_BRIDGE */ 270 271 /* Enable bus mastering */ 272 pci_enable_busmaster(dev); 273 274 /* 275 * Allocate IO memory 276 */ 277 sc->sc_mem_rid = ET_PCIR_BAR; 278 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 279 &sc->sc_mem_rid, RF_ACTIVE); 280 if (sc->sc_mem_res == NULL) { 281 device_printf(dev, "can't allocate IO memory\n"); 282 return ENXIO; 283 } 284 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res); 285 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res); 286 287 /* 288 * Allocate IRQ 289 */ 290 sc->sc_irq_type = pci_alloc_1intr(dev, et_msi_enable, 291 &sc->sc_irq_rid, &irq_flags); 292 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 293 &sc->sc_irq_rid, irq_flags); 294 if (sc->sc_irq_res == NULL) { 295 device_printf(dev, "can't allocate irq\n"); 296 error = ENXIO; 297 goto fail; 298 } 299 300 /* 301 * Create sysctl tree 302 */ 303 ctx = device_get_sysctl_ctx(dev); 304 tree = device_get_sysctl_tree(dev); 305 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 306 OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW, 307 sc, 0, et_sysctl_rx_intr_npkts, "I", 308 "RX IM, # packets per RX interrupt"); 309 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 310 OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW, 311 sc, 0, et_sysctl_rx_intr_delay, "I", 312 "RX IM, RX interrupt delay (x10 usec)"); 313 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 314 "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 315 "TX IM, # segments per TX interrupt"); 316 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 317 "timer", CTLFLAG_RW, &sc->sc_timer, 0, 318 "TX timer"); 319 320 error = et_bus_config(dev); 321 if (error) 322 goto fail; 323 324 et_get_eaddr(dev, eaddr); 325 326 CSR_WRITE_4(sc, ET_PM, 327 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 328 329 et_reset(sc); 330 331 et_disable_intrs(sc); 332 333 error = et_dma_alloc(dev); 334 if (error) 335 goto fail; 336 337 ifp->if_softc = sc; 338 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 339 ifp->if_init = et_init; 340 ifp->if_ioctl = et_ioctl; 341 ifp->if_start = et_start; 342 ifp->if_watchdog = et_watchdog; 343 ifp->if_mtu = ETHERMTU; 344 ifp->if_capabilities = IFCAP_VLAN_MTU; 345 ifp->if_capenable = ifp->if_capabilities; 346 ifp->if_nmbclusters = ET_RX_NDESC; 347 ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC); 348 ifq_set_ready(&ifp->if_snd); 349 350 et_chip_attach(sc); 351 352 error = mii_phy_probe(dev, &sc->sc_miibus, 353 et_ifmedia_upd, et_ifmedia_sts); 354 if (error) { 355 device_printf(dev, "can't probe any PHY\n"); 356 goto fail; 357 } 358 359 ether_ifattach(ifp, eaddr, NULL); 360 361 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res)); 362 363 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc, 364 &sc->sc_irq_handle, ifp->if_serializer); 365 if (error) { 366 ether_ifdetach(ifp); 367 device_printf(dev, "can't setup intr\n"); 368 goto fail; 369 } 370 371 /* Increase non-cluster mbuf limit; used by tiny RX ring */ 372 mb_inclimit(ET_RX_NDESC); 373 374 return 0; 375 fail: 376 et_detach(dev); 377 return error; 378 } 379 380 static int 381 et_detach(device_t dev) 382 { 383 struct et_softc *sc = device_get_softc(dev); 384 385 if (device_is_attached(dev)) { 386 struct ifnet *ifp = &sc->arpcom.ac_if; 387 388 lwkt_serialize_enter(ifp->if_serializer); 389 et_stop(sc); 390 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 391 lwkt_serialize_exit(ifp->if_serializer); 392 393 ether_ifdetach(ifp); 394 395 /* Decrease non-cluster mbuf limit increased by us */ 396 mb_inclimit(-ET_RX_NDESC); 397 } 398 399 if (sc->sc_miibus != NULL) 400 device_delete_child(dev, sc->sc_miibus); 401 bus_generic_detach(dev); 402 403 if (sc->sc_irq_res != NULL) { 404 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 405 sc->sc_irq_res); 406 } 407 if (sc->sc_irq_type == PCI_INTR_TYPE_MSI) 408 pci_release_msi(dev); 409 410 if (sc->sc_mem_res != NULL) { 411 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 412 sc->sc_mem_res); 413 } 414 415 et_dma_free(dev); 416 417 return 0; 418 } 419 420 static int 421 et_shutdown(device_t dev) 422 { 423 struct et_softc *sc = device_get_softc(dev); 424 struct ifnet *ifp = &sc->arpcom.ac_if; 425 426 lwkt_serialize_enter(ifp->if_serializer); 427 et_stop(sc); 428 lwkt_serialize_exit(ifp->if_serializer); 429 return 0; 430 } 431 432 static int 433 et_miibus_readreg(device_t dev, int phy, int reg) 434 { 435 struct et_softc *sc = device_get_softc(dev); 436 uint32_t val; 437 int i, ret; 438 439 /* Stop any pending operations */ 440 CSR_WRITE_4(sc, ET_MII_CMD, 0); 441 442 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 443 __SHIFTIN(reg, ET_MII_ADDR_REG); 444 CSR_WRITE_4(sc, ET_MII_ADDR, val); 445 446 /* Start reading */ 447 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 448 449 #define NRETRY 50 450 451 for (i = 0; i < NRETRY; ++i) { 452 val = CSR_READ_4(sc, ET_MII_IND); 453 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 454 break; 455 DELAY(50); 456 } 457 if (i == NRETRY) { 458 if_printf(&sc->arpcom.ac_if, 459 "read phy %d, reg %d timed out\n", phy, reg); 460 ret = 0; 461 goto back; 462 } 463 464 #undef NRETRY 465 466 val = CSR_READ_4(sc, ET_MII_STAT); 467 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 468 469 back: 470 /* Make sure that the current operation is stopped */ 471 CSR_WRITE_4(sc, ET_MII_CMD, 0); 472 return ret; 473 } 474 475 static int 476 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 477 { 478 struct et_softc *sc = device_get_softc(dev); 479 uint32_t val; 480 int i; 481 482 /* Stop any pending operations */ 483 CSR_WRITE_4(sc, ET_MII_CMD, 0); 484 485 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 486 __SHIFTIN(reg, ET_MII_ADDR_REG); 487 CSR_WRITE_4(sc, ET_MII_ADDR, val); 488 489 /* Start writing */ 490 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 491 492 #define NRETRY 100 493 494 for (i = 0; i < NRETRY; ++i) { 495 val = CSR_READ_4(sc, ET_MII_IND); 496 if ((val & ET_MII_IND_BUSY) == 0) 497 break; 498 DELAY(50); 499 } 500 if (i == NRETRY) { 501 if_printf(&sc->arpcom.ac_if, 502 "write phy %d, reg %d timed out\n", phy, reg); 503 et_miibus_readreg(dev, phy, reg); 504 } 505 506 #undef NRETRY 507 508 /* Make sure that the current operation is stopped */ 509 CSR_WRITE_4(sc, ET_MII_CMD, 0); 510 return 0; 511 } 512 513 static void 514 et_miibus_statchg(device_t dev) 515 { 516 et_setmedia(device_get_softc(dev)); 517 } 518 519 static int 520 et_ifmedia_upd(struct ifnet *ifp) 521 { 522 struct et_softc *sc = ifp->if_softc; 523 struct mii_data *mii = device_get_softc(sc->sc_miibus); 524 525 if (mii->mii_instance != 0) { 526 struct mii_softc *miisc; 527 528 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 529 mii_phy_reset(miisc); 530 } 531 mii_mediachg(mii); 532 533 return 0; 534 } 535 536 static void 537 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 538 { 539 struct et_softc *sc = ifp->if_softc; 540 struct mii_data *mii = device_get_softc(sc->sc_miibus); 541 542 mii_pollstat(mii); 543 ifmr->ifm_active = mii->mii_media_active; 544 ifmr->ifm_status = mii->mii_media_status; 545 } 546 547 static void 548 et_stop(struct et_softc *sc) 549 { 550 struct ifnet *ifp = &sc->arpcom.ac_if; 551 552 ASSERT_SERIALIZED(ifp->if_serializer); 553 554 callout_stop(&sc->sc_tick); 555 556 et_stop_rxdma(sc); 557 et_stop_txdma(sc); 558 559 et_disable_intrs(sc); 560 561 et_free_tx_ring(sc); 562 et_free_rx_ring(sc); 563 564 et_reset(sc); 565 566 sc->sc_tx = 0; 567 sc->sc_tx_intr = 0; 568 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 569 570 ifp->if_timer = 0; 571 ifp->if_flags &= ~IFF_RUNNING; 572 ifq_clr_oactive(&ifp->if_snd); 573 } 574 575 static int 576 et_bus_config(device_t dev) 577 { 578 uint32_t val, max_plsz; 579 uint16_t ack_latency, replay_timer; 580 581 /* 582 * Test whether EEPROM is valid 583 * NOTE: Read twice to get the correct value 584 */ 585 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 586 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 587 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 588 device_printf(dev, "EEPROM status error 0x%02x\n", val); 589 return ENXIO; 590 } 591 592 /* TODO: LED */ 593 594 /* 595 * Configure ACK latency and replay timer according to 596 * max playload size 597 */ 598 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4); 599 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 600 601 switch (max_plsz) { 602 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 603 ack_latency = ET_PCIV_ACK_LATENCY_128; 604 replay_timer = ET_PCIV_REPLAY_TIMER_128; 605 break; 606 607 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 608 ack_latency = ET_PCIV_ACK_LATENCY_256; 609 replay_timer = ET_PCIV_REPLAY_TIMER_256; 610 break; 611 612 default: 613 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2); 614 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2); 615 device_printf(dev, "ack latency %u, replay timer %u\n", 616 ack_latency, replay_timer); 617 break; 618 } 619 if (ack_latency != 0) { 620 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 621 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2); 622 } 623 624 /* 625 * Set L0s and L1 latency timer to 2us 626 */ 627 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 628 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1); 629 630 /* 631 * Set max read request size to 2048 bytes 632 */ 633 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2); 634 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 635 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 636 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2); 637 638 return 0; 639 } 640 641 static void 642 et_get_eaddr(device_t dev, uint8_t eaddr[]) 643 { 644 uint32_t val; 645 int i; 646 647 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 648 for (i = 0; i < 4; ++i) 649 eaddr[i] = (val >> (8 * i)) & 0xff; 650 651 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 652 for (; i < ETHER_ADDR_LEN; ++i) 653 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 654 } 655 656 static void 657 et_reset(struct et_softc *sc) 658 { 659 CSR_WRITE_4(sc, ET_MAC_CFG1, 660 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 661 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 662 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 663 664 CSR_WRITE_4(sc, ET_SWRST, 665 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 666 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 667 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 668 669 CSR_WRITE_4(sc, ET_MAC_CFG1, 670 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 671 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 672 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 673 } 674 675 static void 676 et_disable_intrs(struct et_softc *sc) 677 { 678 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 679 } 680 681 static void 682 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 683 { 684 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 685 } 686 687 static int 688 et_dma_alloc(device_t dev) 689 { 690 struct et_softc *sc = device_get_softc(dev); 691 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 692 struct et_txstatus_data *txsd = &sc->sc_tx_status; 693 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 694 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 695 int i, error; 696 697 /* 698 * Create top level DMA tag 699 */ 700 error = bus_dma_tag_create(NULL, 1, 0, 701 BUS_SPACE_MAXADDR, 702 BUS_SPACE_MAXADDR, 703 BUS_SPACE_MAXSIZE_32BIT, 704 0, 705 BUS_SPACE_MAXSIZE_32BIT, 706 0, &sc->sc_dtag); 707 if (error) { 708 device_printf(dev, "can't create DMA tag\n"); 709 return error; 710 } 711 712 /* 713 * Create TX ring DMA stuffs 714 */ 715 tx_ring->tr_desc = bus_dmamem_coherent_any(sc->sc_dtag, 716 ET_ALIGN, ET_TX_RING_SIZE, 717 BUS_DMA_WAITOK | BUS_DMA_ZERO, 718 &tx_ring->tr_dtag, &tx_ring->tr_dmap, 719 &tx_ring->tr_paddr); 720 if (tx_ring->tr_desc == NULL) { 721 device_printf(dev, "can't create TX ring DMA stuffs\n"); 722 return ENOMEM; 723 } 724 725 /* 726 * Create TX status DMA stuffs 727 */ 728 txsd->txsd_status = bus_dmamem_coherent_any(sc->sc_dtag, 729 ET_ALIGN, sizeof(uint32_t), 730 BUS_DMA_WAITOK | BUS_DMA_ZERO, 731 &txsd->txsd_dtag, &txsd->txsd_dmap, 732 &txsd->txsd_paddr); 733 if (txsd->txsd_status == NULL) { 734 device_printf(dev, "can't create TX status DMA stuffs\n"); 735 return ENOMEM; 736 } 737 738 /* 739 * Create DMA stuffs for RX rings 740 */ 741 for (i = 0; i < ET_RX_NRING; ++i) { 742 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 743 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 744 745 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 746 747 rx_ring->rr_desc = bus_dmamem_coherent_any(sc->sc_dtag, 748 ET_ALIGN, ET_RX_RING_SIZE, 749 BUS_DMA_WAITOK | BUS_DMA_ZERO, 750 &rx_ring->rr_dtag, &rx_ring->rr_dmap, 751 &rx_ring->rr_paddr); 752 if (rx_ring->rr_desc == NULL) { 753 device_printf(dev, "can't create DMA stuffs for " 754 "the %d RX ring\n", i); 755 return ENOMEM; 756 } 757 rx_ring->rr_posreg = rx_ring_posreg[i]; 758 } 759 760 /* 761 * Create RX stat ring DMA stuffs 762 */ 763 rxst_ring->rsr_stat = bus_dmamem_coherent_any(sc->sc_dtag, 764 ET_ALIGN, ET_RXSTAT_RING_SIZE, 765 BUS_DMA_WAITOK | BUS_DMA_ZERO, 766 &rxst_ring->rsr_dtag, &rxst_ring->rsr_dmap, 767 &rxst_ring->rsr_paddr); 768 if (rxst_ring->rsr_stat == NULL) { 769 device_printf(dev, "can't create RX stat ring DMA stuffs\n"); 770 return ENOMEM; 771 } 772 773 /* 774 * Create RX status DMA stuffs 775 */ 776 rxsd->rxsd_status = bus_dmamem_coherent_any(sc->sc_dtag, 777 ET_ALIGN, sizeof(struct et_rxstatus), 778 BUS_DMA_WAITOK | BUS_DMA_ZERO, 779 &rxsd->rxsd_dtag, &rxsd->rxsd_dmap, 780 &rxsd->rxsd_paddr); 781 if (rxsd->rxsd_status == NULL) { 782 device_printf(dev, "can't create RX status DMA stuffs\n"); 783 return ENOMEM; 784 } 785 786 /* 787 * Create mbuf DMA stuffs 788 */ 789 error = et_dma_mbuf_create(dev); 790 if (error) 791 return error; 792 793 /* 794 * Create jumbo buffer DMA stuffs 795 * NOTE: Allow it to fail 796 */ 797 if (et_jumbo_mem_alloc(dev) == 0) 798 sc->sc_flags |= ET_FLAG_JUMBO; 799 800 return 0; 801 } 802 803 static void 804 et_dma_free(device_t dev) 805 { 806 struct et_softc *sc = device_get_softc(dev); 807 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 808 struct et_txstatus_data *txsd = &sc->sc_tx_status; 809 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 810 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 811 int i, rx_done[ET_RX_NRING]; 812 813 /* 814 * Destroy TX ring DMA stuffs 815 */ 816 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc, 817 tx_ring->tr_dmap); 818 819 /* 820 * Destroy TX status DMA stuffs 821 */ 822 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status, 823 txsd->txsd_dmap); 824 825 /* 826 * Destroy DMA stuffs for RX rings 827 */ 828 for (i = 0; i < ET_RX_NRING; ++i) { 829 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 830 831 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc, 832 rx_ring->rr_dmap); 833 } 834 835 /* 836 * Destroy RX stat ring DMA stuffs 837 */ 838 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat, 839 rxst_ring->rsr_dmap); 840 841 /* 842 * Destroy RX status DMA stuffs 843 */ 844 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status, 845 rxsd->rxsd_dmap); 846 847 /* 848 * Destroy mbuf DMA stuffs 849 */ 850 for (i = 0; i < ET_RX_NRING; ++i) 851 rx_done[i] = ET_RX_NDESC; 852 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done); 853 854 /* 855 * Destroy jumbo buffer DMA stuffs 856 */ 857 if (sc->sc_flags & ET_FLAG_JUMBO) 858 et_jumbo_mem_free(dev); 859 860 /* 861 * Destroy top level DMA tag 862 */ 863 if (sc->sc_dtag != NULL) 864 bus_dma_tag_destroy(sc->sc_dtag); 865 } 866 867 static int 868 et_dma_mbuf_create(device_t dev) 869 { 870 struct et_softc *sc = device_get_softc(dev); 871 struct et_txbuf_data *tbd = &sc->sc_tx_data; 872 int i, error, rx_done[ET_RX_NRING]; 873 874 /* 875 * Create RX mbuf DMA tag 876 */ 877 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 878 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 879 MCLBYTES, 1, MCLBYTES, 880 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 881 &sc->sc_rxbuf_dtag); 882 if (error) { 883 device_printf(dev, "can't create RX mbuf DMA tag\n"); 884 return error; 885 } 886 887 /* 888 * Create spare DMA map for RX mbufs 889 */ 890 error = bus_dmamap_create(sc->sc_rxbuf_dtag, BUS_DMA_WAITOK, 891 &sc->sc_rxbuf_tmp_dmap); 892 if (error) { 893 device_printf(dev, "can't create spare mbuf DMA map\n"); 894 bus_dma_tag_destroy(sc->sc_rxbuf_dtag); 895 sc->sc_rxbuf_dtag = NULL; 896 return error; 897 } 898 899 /* 900 * Create DMA maps for RX mbufs 901 */ 902 bzero(rx_done, sizeof(rx_done)); 903 for (i = 0; i < ET_RX_NRING; ++i) { 904 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 905 int j; 906 907 for (j = 0; j < ET_RX_NDESC; ++j) { 908 error = bus_dmamap_create(sc->sc_rxbuf_dtag, 909 BUS_DMA_WAITOK, 910 &rbd->rbd_buf[j].rb_dmap); 911 if (error) { 912 device_printf(dev, "can't create %d RX mbuf " 913 "for %d RX ring\n", j, i); 914 rx_done[i] = j; 915 et_dma_mbuf_destroy(dev, 0, rx_done); 916 return error; 917 } 918 } 919 rx_done[i] = ET_RX_NDESC; 920 921 rbd->rbd_softc = sc; 922 rbd->rbd_ring = &sc->sc_rx_ring[i]; 923 } 924 925 /* 926 * Create TX mbuf DMA tag 927 */ 928 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 929 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 930 ET_JUMBO_FRAMELEN, ET_NSEG_MAX, MCLBYTES, 931 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 932 BUS_DMA_ONEBPAGE, 933 &sc->sc_txbuf_dtag); 934 if (error) { 935 device_printf(dev, "can't create TX mbuf DMA tag\n"); 936 return error; 937 } 938 939 /* 940 * Create DMA maps for TX mbufs 941 */ 942 for (i = 0; i < ET_TX_NDESC; ++i) { 943 error = bus_dmamap_create(sc->sc_txbuf_dtag, 944 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 945 &tbd->tbd_buf[i].tb_dmap); 946 if (error) { 947 device_printf(dev, "can't create %d TX mbuf " 948 "DMA map\n", i); 949 et_dma_mbuf_destroy(dev, i, rx_done); 950 return error; 951 } 952 } 953 954 return 0; 955 } 956 957 static void 958 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[]) 959 { 960 struct et_softc *sc = device_get_softc(dev); 961 struct et_txbuf_data *tbd = &sc->sc_tx_data; 962 int i; 963 964 /* 965 * Destroy DMA tag and maps for RX mbufs 966 */ 967 if (sc->sc_rxbuf_dtag) { 968 for (i = 0; i < ET_RX_NRING; ++i) { 969 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 970 int j; 971 972 for (j = 0; j < rx_done[i]; ++j) { 973 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 974 975 KASSERT(rb->rb_mbuf == NULL, 976 ("RX mbuf in %d RX ring is " 977 "not freed yet", i)); 978 bus_dmamap_destroy(sc->sc_rxbuf_dtag, 979 rb->rb_dmap); 980 } 981 } 982 bus_dmamap_destroy(sc->sc_rxbuf_dtag, sc->sc_rxbuf_tmp_dmap); 983 bus_dma_tag_destroy(sc->sc_rxbuf_dtag); 984 sc->sc_rxbuf_dtag = NULL; 985 } 986 987 /* 988 * Destroy DMA tag and maps for TX mbufs 989 */ 990 if (sc->sc_txbuf_dtag) { 991 for (i = 0; i < tx_done; ++i) { 992 struct et_txbuf *tb = &tbd->tbd_buf[i]; 993 994 KASSERT(tb->tb_mbuf == NULL, 995 ("TX mbuf is not freed yet")); 996 bus_dmamap_destroy(sc->sc_txbuf_dtag, tb->tb_dmap); 997 } 998 bus_dma_tag_destroy(sc->sc_txbuf_dtag); 999 sc->sc_txbuf_dtag = NULL; 1000 } 1001 } 1002 1003 static void 1004 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 1005 { 1006 if (dtag != NULL) { 1007 bus_dmamap_unload(dtag, dmap); 1008 bus_dmamem_free(dtag, addr, dmap); 1009 bus_dma_tag_destroy(dtag); 1010 } 1011 } 1012 1013 static void 1014 et_chip_attach(struct et_softc *sc) 1015 { 1016 uint32_t val; 1017 1018 /* 1019 * Perform minimal initialization 1020 */ 1021 1022 /* Disable loopback */ 1023 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1024 1025 /* Reset MAC */ 1026 CSR_WRITE_4(sc, ET_MAC_CFG1, 1027 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1028 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1029 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1030 1031 /* 1032 * Setup half duplex mode 1033 */ 1034 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1035 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1036 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1037 ET_MAC_HDX_EXC_DEFER; 1038 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1039 1040 /* Clear MAC control */ 1041 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1042 1043 /* Reset MII */ 1044 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1045 1046 /* Bring MAC out of reset state */ 1047 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1048 1049 /* Enable memory controllers */ 1050 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1051 } 1052 1053 static void 1054 et_intr(void *xsc) 1055 { 1056 struct et_softc *sc = xsc; 1057 struct ifnet *ifp = &sc->arpcom.ac_if; 1058 uint32_t intrs; 1059 1060 ASSERT_SERIALIZED(ifp->if_serializer); 1061 1062 if ((ifp->if_flags & IFF_RUNNING) == 0) 1063 return; 1064 1065 et_disable_intrs(sc); 1066 1067 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1068 intrs &= ET_INTRS; 1069 if (intrs == 0) /* Not interested */ 1070 goto back; 1071 1072 if (intrs & ET_INTR_RXEOF) 1073 et_rxeof(sc); 1074 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1075 et_txeof(sc, 1); 1076 if (intrs & ET_INTR_TIMER) 1077 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1078 back: 1079 et_enable_intrs(sc, ET_INTRS); 1080 } 1081 1082 static void 1083 et_init(void *xsc) 1084 { 1085 struct et_softc *sc = xsc; 1086 struct ifnet *ifp = &sc->arpcom.ac_if; 1087 const struct et_bsize *arr; 1088 int error, i; 1089 1090 ASSERT_SERIALIZED(ifp->if_serializer); 1091 1092 et_stop(sc); 1093 1094 arr = ET_FRAMELEN(ifp->if_mtu) < MCLBYTES ? 1095 et_bufsize_std : et_bufsize_jumbo; 1096 for (i = 0; i < ET_RX_NRING; ++i) { 1097 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize; 1098 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf; 1099 sc->sc_rx_data[i].rbd_jumbo = arr[i].jumbo; 1100 } 1101 1102 error = et_init_tx_ring(sc); 1103 if (error) 1104 goto back; 1105 1106 error = et_init_rx_ring(sc); 1107 if (error) 1108 goto back; 1109 1110 error = et_chip_init(sc); 1111 if (error) 1112 goto back; 1113 1114 error = et_enable_txrx(sc, 1); 1115 if (error) 1116 goto back; 1117 1118 et_enable_intrs(sc, ET_INTRS); 1119 1120 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1121 1122 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1123 1124 ifp->if_flags |= IFF_RUNNING; 1125 ifq_clr_oactive(&ifp->if_snd); 1126 back: 1127 if (error) 1128 et_stop(sc); 1129 } 1130 1131 static int 1132 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1133 { 1134 struct et_softc *sc = ifp->if_softc; 1135 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1136 struct ifreq *ifr = (struct ifreq *)data; 1137 int error = 0, max_framelen; 1138 1139 ASSERT_SERIALIZED(ifp->if_serializer); 1140 1141 switch (cmd) { 1142 case SIOCSIFFLAGS: 1143 if (ifp->if_flags & IFF_UP) { 1144 if (ifp->if_flags & IFF_RUNNING) { 1145 if ((ifp->if_flags ^ sc->sc_if_flags) & 1146 (IFF_ALLMULTI | IFF_PROMISC)) 1147 et_setmulti(sc); 1148 } else { 1149 et_init(sc); 1150 } 1151 } else { 1152 if (ifp->if_flags & IFF_RUNNING) 1153 et_stop(sc); 1154 } 1155 sc->sc_if_flags = ifp->if_flags; 1156 break; 1157 1158 case SIOCSIFMEDIA: 1159 case SIOCGIFMEDIA: 1160 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1161 break; 1162 1163 case SIOCADDMULTI: 1164 case SIOCDELMULTI: 1165 if (ifp->if_flags & IFF_RUNNING) 1166 et_setmulti(sc); 1167 break; 1168 1169 case SIOCSIFMTU: 1170 if (sc->sc_flags & ET_FLAG_JUMBO) 1171 max_framelen = ET_JUMBO_FRAMELEN; 1172 else 1173 max_framelen = MCLBYTES - 1; 1174 1175 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) { 1176 error = EOPNOTSUPP; 1177 break; 1178 } 1179 1180 ifp->if_mtu = ifr->ifr_mtu; 1181 if (ifp->if_flags & IFF_RUNNING) 1182 et_init(sc); 1183 break; 1184 1185 default: 1186 error = ether_ioctl(ifp, cmd, data); 1187 break; 1188 } 1189 return error; 1190 } 1191 1192 static void 1193 et_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1194 { 1195 struct et_softc *sc = ifp->if_softc; 1196 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1197 int trans, oactive; 1198 1199 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1200 ASSERT_SERIALIZED(ifp->if_serializer); 1201 1202 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) { 1203 ifq_purge(&ifp->if_snd); 1204 return; 1205 } 1206 1207 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1208 return; 1209 1210 oactive = 0; 1211 trans = 0; 1212 for (;;) { 1213 struct mbuf *m; 1214 int error; 1215 1216 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1217 if (oactive) { 1218 ifq_set_oactive(&ifp->if_snd); 1219 break; 1220 } 1221 1222 et_txeof(sc, 0); 1223 oactive = 1; 1224 continue; 1225 } 1226 1227 m = ifq_dequeue(&ifp->if_snd); 1228 if (m == NULL) 1229 break; 1230 1231 error = et_encap(sc, &m); 1232 if (error) { 1233 IFNET_STAT_INC(ifp, oerrors, 1); 1234 KKASSERT(m == NULL); 1235 1236 if (error == EFBIG) { 1237 /* 1238 * Excessive fragmented packets 1239 */ 1240 if (oactive) { 1241 ifq_set_oactive(&ifp->if_snd); 1242 break; 1243 } 1244 et_txeof(sc, 0); 1245 oactive = 1; 1246 } 1247 continue; 1248 } else { 1249 oactive = 0; 1250 } 1251 trans = 1; 1252 1253 BPF_MTAP(ifp, m); 1254 } 1255 1256 if (trans) 1257 ifp->if_timer = 5; 1258 } 1259 1260 static void 1261 et_watchdog(struct ifnet *ifp) 1262 { 1263 ASSERT_SERIALIZED(ifp->if_serializer); 1264 1265 if_printf(ifp, "watchdog timed out\n"); 1266 1267 ifp->if_init(ifp->if_softc); 1268 if_devstart(ifp); 1269 } 1270 1271 static int 1272 et_stop_rxdma(struct et_softc *sc) 1273 { 1274 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1275 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1276 1277 DELAY(5); 1278 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1279 if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n"); 1280 return ETIMEDOUT; 1281 } 1282 return 0; 1283 } 1284 1285 static int 1286 et_stop_txdma(struct et_softc *sc) 1287 { 1288 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1289 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1290 return 0; 1291 } 1292 1293 static void 1294 et_free_tx_ring(struct et_softc *sc) 1295 { 1296 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1297 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1298 int i; 1299 1300 for (i = 0; i < ET_TX_NDESC; ++i) { 1301 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1302 1303 if (tb->tb_mbuf != NULL) { 1304 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap); 1305 m_freem(tb->tb_mbuf); 1306 tb->tb_mbuf = NULL; 1307 } 1308 } 1309 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1310 } 1311 1312 static void 1313 et_free_rx_ring(struct et_softc *sc) 1314 { 1315 int n; 1316 1317 for (n = 0; n < ET_RX_NRING; ++n) { 1318 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1319 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1320 int i; 1321 1322 for (i = 0; i < ET_RX_NDESC; ++i) { 1323 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1324 1325 if (rb->rb_mbuf != NULL) { 1326 if (!rbd->rbd_jumbo) { 1327 bus_dmamap_unload(sc->sc_rxbuf_dtag, 1328 rb->rb_dmap); 1329 } 1330 m_freem(rb->rb_mbuf); 1331 rb->rb_mbuf = NULL; 1332 } 1333 } 1334 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1335 } 1336 } 1337 1338 static void 1339 et_setmulti(struct et_softc *sc) 1340 { 1341 struct ifnet *ifp = &sc->arpcom.ac_if; 1342 uint32_t hash[4] = { 0, 0, 0, 0 }; 1343 uint32_t rxmac_ctrl, pktfilt; 1344 struct ifmultiaddr *ifma; 1345 int i, count; 1346 1347 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1348 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1349 1350 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1351 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1352 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1353 goto back; 1354 } 1355 1356 count = 0; 1357 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1358 uint32_t *hp, h; 1359 1360 if (ifma->ifma_addr->sa_family != AF_LINK) 1361 continue; 1362 1363 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1364 ifma->ifma_addr), ETHER_ADDR_LEN); 1365 h = (h & 0x3f800000) >> 23; 1366 1367 hp = &hash[0]; 1368 if (h >= 32 && h < 64) { 1369 h -= 32; 1370 hp = &hash[1]; 1371 } else if (h >= 64 && h < 96) { 1372 h -= 64; 1373 hp = &hash[2]; 1374 } else if (h >= 96) { 1375 h -= 96; 1376 hp = &hash[3]; 1377 } 1378 *hp |= (1 << h); 1379 1380 ++count; 1381 } 1382 1383 for (i = 0; i < 4; ++i) 1384 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1385 1386 if (count > 0) 1387 pktfilt |= ET_PKTFILT_MCAST; 1388 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1389 back: 1390 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1391 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1392 } 1393 1394 static int 1395 et_chip_init(struct et_softc *sc) 1396 { 1397 struct ifnet *ifp = &sc->arpcom.ac_if; 1398 uint32_t rxq_end; 1399 int error, frame_len, rxmem_size; 1400 1401 /* 1402 * Split 16Kbytes internal memory between TX and RX 1403 * according to frame length. 1404 */ 1405 frame_len = ET_FRAMELEN(ifp->if_mtu); 1406 if (frame_len < 2048) { 1407 rxmem_size = ET_MEM_RXSIZE_DEFAULT; 1408 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) { 1409 rxmem_size = ET_MEM_SIZE / 2; 1410 } else { 1411 rxmem_size = ET_MEM_SIZE - 1412 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT); 1413 } 1414 rxq_end = ET_QUEUE_ADDR(rxmem_size); 1415 1416 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START); 1417 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end); 1418 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1); 1419 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END); 1420 1421 /* No loopback */ 1422 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1423 1424 /* Clear MSI configure */ 1425 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1426 1427 /* Disable timer */ 1428 CSR_WRITE_4(sc, ET_TIMER, 0); 1429 1430 /* Initialize MAC */ 1431 et_init_mac(sc); 1432 1433 /* Enable memory controllers */ 1434 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1435 1436 /* Initialize RX MAC */ 1437 et_init_rxmac(sc); 1438 1439 /* Initialize TX MAC */ 1440 et_init_txmac(sc); 1441 1442 /* Initialize RX DMA engine */ 1443 error = et_init_rxdma(sc); 1444 if (error) 1445 return error; 1446 1447 /* Initialize TX DMA engine */ 1448 error = et_init_txdma(sc); 1449 if (error) 1450 return error; 1451 1452 return 0; 1453 } 1454 1455 static int 1456 et_init_tx_ring(struct et_softc *sc) 1457 { 1458 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1459 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1460 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1461 1462 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1463 1464 tbd->tbd_start_index = 0; 1465 tbd->tbd_start_wrap = 0; 1466 tbd->tbd_used = 0; 1467 1468 bzero(txsd->txsd_status, sizeof(uint32_t)); 1469 1470 return 0; 1471 } 1472 1473 static int 1474 et_init_rx_ring(struct et_softc *sc) 1475 { 1476 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1477 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1478 int n; 1479 1480 for (n = 0; n < ET_RX_NRING; ++n) { 1481 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1482 int i, error; 1483 1484 for (i = 0; i < ET_RX_NDESC; ++i) { 1485 error = rbd->rbd_newbuf(rbd, i, 1); 1486 if (error) { 1487 if_printf(&sc->arpcom.ac_if, "%d ring %d buf, " 1488 "newbuf failed: %d\n", n, i, error); 1489 return error; 1490 } 1491 } 1492 } 1493 1494 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1495 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1496 1497 return 0; 1498 } 1499 1500 static int 1501 et_init_rxdma(struct et_softc *sc) 1502 { 1503 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1504 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1505 struct et_rxdesc_ring *rx_ring; 1506 int error; 1507 1508 error = et_stop_rxdma(sc); 1509 if (error) { 1510 if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n"); 1511 return error; 1512 } 1513 1514 /* 1515 * Install RX status 1516 */ 1517 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1518 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1519 1520 /* 1521 * Install RX stat ring 1522 */ 1523 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1524 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1525 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1526 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1527 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1528 1529 /* Match ET_RXSTAT_POS */ 1530 rxst_ring->rsr_index = 0; 1531 rxst_ring->rsr_wrap = 0; 1532 1533 /* 1534 * Install the 2nd RX descriptor ring 1535 */ 1536 rx_ring = &sc->sc_rx_ring[1]; 1537 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1538 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1539 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1540 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1541 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1542 1543 /* Match ET_RX_RING1_POS */ 1544 rx_ring->rr_index = 0; 1545 rx_ring->rr_wrap = 1; 1546 1547 /* 1548 * Install the 1st RX descriptor ring 1549 */ 1550 rx_ring = &sc->sc_rx_ring[0]; 1551 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1552 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1553 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1554 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1555 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1556 1557 /* Match ET_RX_RING0_POS */ 1558 rx_ring->rr_index = 0; 1559 rx_ring->rr_wrap = 1; 1560 1561 /* 1562 * RX intr moderation 1563 */ 1564 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1565 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1566 1567 return 0; 1568 } 1569 1570 static int 1571 et_init_txdma(struct et_softc *sc) 1572 { 1573 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1574 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1575 int error; 1576 1577 error = et_stop_txdma(sc); 1578 if (error) { 1579 if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n"); 1580 return error; 1581 } 1582 1583 /* 1584 * Install TX descriptor ring 1585 */ 1586 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1587 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1588 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1589 1590 /* 1591 * Install TX status 1592 */ 1593 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1594 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1595 1596 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1597 1598 /* Match ET_TX_READY_POS */ 1599 tx_ring->tr_ready_index = 0; 1600 tx_ring->tr_ready_wrap = 0; 1601 1602 return 0; 1603 } 1604 1605 static void 1606 et_init_mac(struct et_softc *sc) 1607 { 1608 struct ifnet *ifp = &sc->arpcom.ac_if; 1609 const uint8_t *eaddr = IF_LLADDR(ifp); 1610 uint32_t val; 1611 1612 /* Reset MAC */ 1613 CSR_WRITE_4(sc, ET_MAC_CFG1, 1614 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1615 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1616 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1617 1618 /* 1619 * Setup inter packet gap 1620 */ 1621 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1622 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1623 __SHIFTIN(80, ET_IPG_MINIFG) | 1624 __SHIFTIN(96, ET_IPG_B2B); 1625 CSR_WRITE_4(sc, ET_IPG, val); 1626 1627 /* 1628 * Setup half duplex mode 1629 */ 1630 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1631 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1632 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1633 ET_MAC_HDX_EXC_DEFER; 1634 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1635 1636 /* Clear MAC control */ 1637 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1638 1639 /* Reset MII */ 1640 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1641 1642 /* 1643 * Set MAC address 1644 */ 1645 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1646 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1647 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1648 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1649 1650 /* Set max frame length */ 1651 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu)); 1652 1653 /* Bring MAC out of reset state */ 1654 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1655 } 1656 1657 static void 1658 et_init_rxmac(struct et_softc *sc) 1659 { 1660 struct ifnet *ifp = &sc->arpcom.ac_if; 1661 const uint8_t *eaddr = IF_LLADDR(ifp); 1662 uint32_t val; 1663 int i; 1664 1665 /* Disable RX MAC and WOL */ 1666 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1667 1668 /* 1669 * Clear all WOL related registers 1670 */ 1671 for (i = 0; i < 3; ++i) 1672 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1673 for (i = 0; i < 20; ++i) 1674 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1675 1676 /* 1677 * Set WOL source address. XXX is this necessary? 1678 */ 1679 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1680 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1681 val = (eaddr[0] << 8) | eaddr[1]; 1682 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1683 1684 /* Clear packet filters */ 1685 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1686 1687 /* No ucast filtering */ 1688 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1689 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1690 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1691 1692 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) { 1693 /* 1694 * In order to transmit jumbo packets greater than 1695 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between 1696 * RX MAC and RX DMA needs to be reduced in size to 1697 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In 1698 * order to implement this, we must use "cut through" 1699 * mode in the RX MAC, which chops packets down into 1700 * segments. In this case we selected 256 bytes, 1701 * since this is the size of the PCI-Express TLP's 1702 * that the ET1310 uses. 1703 */ 1704 val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) | 1705 ET_RXMAC_MC_SEGSZ_ENABLE; 1706 } else { 1707 val = 0; 1708 } 1709 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1710 1711 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1712 1713 /* Initialize RX MAC management register */ 1714 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1715 1716 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1717 1718 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1719 ET_RXMAC_MGT_PASS_ECRC | 1720 ET_RXMAC_MGT_PASS_ELEN | 1721 ET_RXMAC_MGT_PASS_ETRUNC | 1722 ET_RXMAC_MGT_CHECK_PKT); 1723 1724 /* 1725 * Configure runt filtering (may not work on certain chip generation) 1726 */ 1727 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1728 CSR_WRITE_4(sc, ET_PKTFILT, val); 1729 1730 /* Enable RX MAC but leave WOL disabled */ 1731 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1732 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1733 1734 /* 1735 * Setup multicast hash and allmulti/promisc mode 1736 */ 1737 et_setmulti(sc); 1738 } 1739 1740 static void 1741 et_init_txmac(struct et_softc *sc) 1742 { 1743 /* Disable TX MAC and FC(?) */ 1744 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1745 1746 /* No flow control yet */ 1747 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1748 1749 /* Enable TX MAC but leave FC(?) diabled */ 1750 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1751 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1752 } 1753 1754 static int 1755 et_start_rxdma(struct et_softc *sc) 1756 { 1757 uint32_t val = 0; 1758 1759 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1760 ET_RXDMA_CTRL_RING0_SIZE) | 1761 ET_RXDMA_CTRL_RING0_ENABLE; 1762 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1763 ET_RXDMA_CTRL_RING1_SIZE) | 1764 ET_RXDMA_CTRL_RING1_ENABLE; 1765 1766 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1767 1768 DELAY(5); 1769 1770 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1771 if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n"); 1772 return ETIMEDOUT; 1773 } 1774 return 0; 1775 } 1776 1777 static int 1778 et_start_txdma(struct et_softc *sc) 1779 { 1780 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1781 return 0; 1782 } 1783 1784 static int 1785 et_enable_txrx(struct et_softc *sc, int media_upd) 1786 { 1787 struct ifnet *ifp = &sc->arpcom.ac_if; 1788 uint32_t val; 1789 int i, error; 1790 1791 val = CSR_READ_4(sc, ET_MAC_CFG1); 1792 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1793 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1794 ET_MAC_CFG1_LOOPBACK); 1795 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1796 1797 if (media_upd) 1798 et_ifmedia_upd(ifp); 1799 else 1800 et_setmedia(sc); 1801 1802 #define NRETRY 100 1803 1804 for (i = 0; i < NRETRY; ++i) { 1805 val = CSR_READ_4(sc, ET_MAC_CFG1); 1806 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1807 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1808 break; 1809 1810 DELAY(10); 1811 } 1812 if (i == NRETRY) { 1813 if_printf(ifp, "can't enable RX/TX\n"); 1814 return 0; 1815 } 1816 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 1817 1818 #undef NRETRY 1819 1820 /* 1821 * Start TX/RX DMA engine 1822 */ 1823 error = et_start_rxdma(sc); 1824 if (error) 1825 return error; 1826 1827 error = et_start_txdma(sc); 1828 if (error) 1829 return error; 1830 1831 return 0; 1832 } 1833 1834 static void 1835 et_rxeof(struct et_softc *sc) 1836 { 1837 struct ifnet *ifp = &sc->arpcom.ac_if; 1838 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1839 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1840 uint32_t rxs_stat_ring; 1841 int rxst_wrap, rxst_index; 1842 1843 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1844 return; 1845 1846 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1847 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1848 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1849 1850 while (rxst_index != rxst_ring->rsr_index || 1851 rxst_wrap != rxst_ring->rsr_wrap) { 1852 struct et_rxbuf_data *rbd; 1853 struct et_rxdesc_ring *rx_ring; 1854 struct et_rxstat *st; 1855 struct mbuf *m; 1856 int buflen, buf_idx, ring_idx; 1857 uint32_t rxstat_pos, rxring_pos; 1858 1859 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1860 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1861 1862 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1863 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1864 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1865 1866 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1867 rxst_ring->rsr_index = 0; 1868 rxst_ring->rsr_wrap ^= 1; 1869 } 1870 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1871 ET_RXSTAT_POS_INDEX); 1872 if (rxst_ring->rsr_wrap) 1873 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1874 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1875 1876 if (ring_idx >= ET_RX_NRING) { 1877 IFNET_STAT_INC(ifp, ierrors, 1); 1878 if_printf(ifp, "invalid ring index %d\n", ring_idx); 1879 continue; 1880 } 1881 if (buf_idx >= ET_RX_NDESC) { 1882 IFNET_STAT_INC(ifp, ierrors, 1); 1883 if_printf(ifp, "invalid buf index %d\n", buf_idx); 1884 continue; 1885 } 1886 1887 rbd = &sc->sc_rx_data[ring_idx]; 1888 m = rbd->rbd_buf[buf_idx].rb_mbuf; 1889 1890 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1891 if (buflen < ETHER_CRC_LEN) { 1892 m_freem(m); 1893 IFNET_STAT_INC(ifp, ierrors, 1); 1894 } else { 1895 m->m_pkthdr.len = m->m_len = buflen; 1896 m->m_pkthdr.rcvif = ifp; 1897 1898 m_adj(m, -ETHER_CRC_LEN); 1899 1900 IFNET_STAT_INC(ifp, ipackets, 1); 1901 ifp->if_input(ifp, m, NULL, -1); 1902 } 1903 } else { 1904 IFNET_STAT_INC(ifp, ierrors, 1); 1905 } 1906 m = NULL; /* Catch invalid reference */ 1907 1908 rx_ring = &sc->sc_rx_ring[ring_idx]; 1909 1910 if (buf_idx != rx_ring->rr_index) { 1911 if_printf(ifp, "WARNING!! ring %d, " 1912 "buf_idx %d, rr_idx %d\n", 1913 ring_idx, buf_idx, rx_ring->rr_index); 1914 } 1915 1916 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1917 if (++rx_ring->rr_index == ET_RX_NDESC) { 1918 rx_ring->rr_index = 0; 1919 rx_ring->rr_wrap ^= 1; 1920 } 1921 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1922 if (rx_ring->rr_wrap) 1923 rxring_pos |= ET_RX_RING_POS_WRAP; 1924 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1925 } 1926 } 1927 1928 static int 1929 et_encap(struct et_softc *sc, struct mbuf **m0) 1930 { 1931 bus_dma_segment_t segs[ET_NSEG_MAX]; 1932 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1933 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1934 struct et_txdesc *td; 1935 bus_dmamap_t map; 1936 int error, maxsegs, nsegs, first_idx, last_idx, i; 1937 uint32_t tx_ready_pos, last_td_ctrl2; 1938 1939 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1940 if (maxsegs > ET_NSEG_MAX) 1941 maxsegs = ET_NSEG_MAX; 1942 KASSERT(maxsegs >= ET_NSEG_SPARE, 1943 ("not enough spare TX desc (%d)", maxsegs)); 1944 1945 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1946 first_idx = tx_ring->tr_ready_index; 1947 map = tbd->tbd_buf[first_idx].tb_dmap; 1948 1949 error = bus_dmamap_load_mbuf_defrag(sc->sc_txbuf_dtag, map, m0, 1950 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1951 if (error) 1952 goto back; 1953 bus_dmamap_sync(sc->sc_txbuf_dtag, map, BUS_DMASYNC_PREWRITE); 1954 1955 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1956 sc->sc_tx += nsegs; 1957 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1958 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1959 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1960 } 1961 1962 last_idx = -1; 1963 for (i = 0; i < nsegs; ++i) { 1964 int idx; 1965 1966 idx = (first_idx + i) % ET_TX_NDESC; 1967 td = &tx_ring->tr_desc[idx]; 1968 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr); 1969 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr); 1970 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN); 1971 1972 if (i == nsegs - 1) { /* Last frag */ 1973 td->td_ctrl2 = last_td_ctrl2; 1974 last_idx = idx; 1975 } 1976 1977 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1978 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1979 tx_ring->tr_ready_index = 0; 1980 tx_ring->tr_ready_wrap ^= 1; 1981 } 1982 } 1983 td = &tx_ring->tr_desc[first_idx]; 1984 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1985 1986 KKASSERT(last_idx >= 0); 1987 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1988 tbd->tbd_buf[last_idx].tb_dmap = map; 1989 tbd->tbd_buf[last_idx].tb_mbuf = *m0; 1990 1991 tbd->tbd_used += nsegs; 1992 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1993 1994 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1995 ET_TX_READY_POS_INDEX); 1996 if (tx_ring->tr_ready_wrap) 1997 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1998 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1999 2000 error = 0; 2001 back: 2002 if (error) { 2003 m_freem(*m0); 2004 *m0 = NULL; 2005 } 2006 return error; 2007 } 2008 2009 static void 2010 et_txeof(struct et_softc *sc, int start) 2011 { 2012 struct ifnet *ifp = &sc->arpcom.ac_if; 2013 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 2014 struct et_txbuf_data *tbd = &sc->sc_tx_data; 2015 uint32_t tx_done; 2016 int end, wrap; 2017 2018 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2019 return; 2020 2021 if (tbd->tbd_used == 0) 2022 return; 2023 2024 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2025 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 2026 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2027 2028 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2029 struct et_txbuf *tb; 2030 2031 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 2032 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2033 2034 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2035 sizeof(struct et_txdesc)); 2036 2037 if (tb->tb_mbuf != NULL) { 2038 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap); 2039 m_freem(tb->tb_mbuf); 2040 tb->tb_mbuf = NULL; 2041 IFNET_STAT_INC(ifp, opackets, 1); 2042 } 2043 2044 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2045 tbd->tbd_start_index = 0; 2046 tbd->tbd_start_wrap ^= 1; 2047 } 2048 2049 KKASSERT(tbd->tbd_used > 0); 2050 tbd->tbd_used--; 2051 } 2052 2053 if (tbd->tbd_used == 0) 2054 ifp->if_timer = 0; 2055 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2056 ifq_clr_oactive(&ifp->if_snd); 2057 2058 if (start) 2059 if_devstart(ifp); 2060 } 2061 2062 static void 2063 et_tick(void *xsc) 2064 { 2065 struct et_softc *sc = xsc; 2066 struct ifnet *ifp = &sc->arpcom.ac_if; 2067 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2068 2069 lwkt_serialize_enter(ifp->if_serializer); 2070 2071 mii_tick(mii); 2072 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 && 2073 (mii->mii_media_status & IFM_ACTIVE) && 2074 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2075 if_printf(ifp, "Link up, enable TX/RX\n"); 2076 if (et_enable_txrx(sc, 0) == 0) 2077 if_devstart(ifp); 2078 } 2079 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2080 2081 lwkt_serialize_exit(ifp->if_serializer); 2082 } 2083 2084 static int 2085 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2086 { 2087 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2088 } 2089 2090 static int 2091 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2092 { 2093 return et_newbuf(rbd, buf_idx, init, MHLEN); 2094 } 2095 2096 static int 2097 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2098 { 2099 struct et_softc *sc = rbd->rbd_softc; 2100 struct et_rxbuf *rb; 2101 struct mbuf *m; 2102 bus_dma_segment_t seg; 2103 bus_dmamap_t dmap; 2104 int error, len, nseg; 2105 2106 KASSERT(!rbd->rbd_jumbo, ("calling %s with jumbo ring", __func__)); 2107 2108 KKASSERT(buf_idx < ET_RX_NDESC); 2109 rb = &rbd->rbd_buf[buf_idx]; 2110 2111 m = m_getl(len0, init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR, &len); 2112 if (m == NULL) { 2113 error = ENOBUFS; 2114 2115 if (init) { 2116 if_printf(&sc->arpcom.ac_if, 2117 "m_getl failed, size %d\n", len0); 2118 return error; 2119 } else { 2120 goto back; 2121 } 2122 } 2123 m->m_len = m->m_pkthdr.len = len; 2124 2125 /* 2126 * Try load RX mbuf into temporary DMA tag 2127 */ 2128 error = bus_dmamap_load_mbuf_segment(sc->sc_rxbuf_dtag, 2129 sc->sc_rxbuf_tmp_dmap, m, &seg, 1, &nseg, 2130 BUS_DMA_NOWAIT); 2131 if (error) { 2132 m_freem(m); 2133 if (init) { 2134 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2135 return error; 2136 } else { 2137 goto back; 2138 } 2139 } 2140 2141 if (!init) { 2142 bus_dmamap_sync(sc->sc_rxbuf_dtag, rb->rb_dmap, 2143 BUS_DMASYNC_POSTREAD); 2144 bus_dmamap_unload(sc->sc_rxbuf_dtag, rb->rb_dmap); 2145 } 2146 rb->rb_mbuf = m; 2147 rb->rb_paddr = seg.ds_addr; 2148 2149 /* 2150 * Swap RX buf's DMA map with the loaded temporary one 2151 */ 2152 dmap = rb->rb_dmap; 2153 rb->rb_dmap = sc->sc_rxbuf_tmp_dmap; 2154 sc->sc_rxbuf_tmp_dmap = dmap; 2155 2156 error = 0; 2157 back: 2158 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2159 return error; 2160 } 2161 2162 static int 2163 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2164 { 2165 struct et_softc *sc = arg1; 2166 struct ifnet *ifp = &sc->arpcom.ac_if; 2167 int error = 0, v; 2168 2169 lwkt_serialize_enter(ifp->if_serializer); 2170 2171 v = sc->sc_rx_intr_npkts; 2172 error = sysctl_handle_int(oidp, &v, 0, req); 2173 if (error || req->newptr == NULL) 2174 goto back; 2175 if (v <= 0) { 2176 error = EINVAL; 2177 goto back; 2178 } 2179 2180 if (sc->sc_rx_intr_npkts != v) { 2181 if (ifp->if_flags & IFF_RUNNING) 2182 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2183 sc->sc_rx_intr_npkts = v; 2184 } 2185 back: 2186 lwkt_serialize_exit(ifp->if_serializer); 2187 return error; 2188 } 2189 2190 static int 2191 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2192 { 2193 struct et_softc *sc = arg1; 2194 struct ifnet *ifp = &sc->arpcom.ac_if; 2195 int error = 0, v; 2196 2197 lwkt_serialize_enter(ifp->if_serializer); 2198 2199 v = sc->sc_rx_intr_delay; 2200 error = sysctl_handle_int(oidp, &v, 0, req); 2201 if (error || req->newptr == NULL) 2202 goto back; 2203 if (v <= 0) { 2204 error = EINVAL; 2205 goto back; 2206 } 2207 2208 if (sc->sc_rx_intr_delay != v) { 2209 if (ifp->if_flags & IFF_RUNNING) 2210 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2211 sc->sc_rx_intr_delay = v; 2212 } 2213 back: 2214 lwkt_serialize_exit(ifp->if_serializer); 2215 return error; 2216 } 2217 2218 static void 2219 et_setmedia(struct et_softc *sc) 2220 { 2221 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2222 uint32_t cfg2, ctrl; 2223 2224 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 2225 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 2226 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 2227 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 2228 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 2229 2230 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 2231 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 2232 2233 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 2234 cfg2 |= ET_MAC_CFG2_MODE_GMII; 2235 } else { 2236 cfg2 |= ET_MAC_CFG2_MODE_MII; 2237 ctrl |= ET_MAC_CTRL_MODE_MII; 2238 } 2239 2240 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 2241 cfg2 |= ET_MAC_CFG2_FDX; 2242 else 2243 ctrl |= ET_MAC_CTRL_GHDX; 2244 2245 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 2246 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 2247 } 2248 2249 static int 2250 et_jumbo_mem_alloc(device_t dev) 2251 { 2252 struct et_softc *sc = device_get_softc(dev); 2253 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2254 bus_addr_t paddr; 2255 uint8_t *buf; 2256 int i; 2257 2258 jd->jd_buf = bus_dmamem_coherent_any(sc->sc_dtag, 2259 ET_JUMBO_ALIGN, ET_JUMBO_MEM_SIZE, BUS_DMA_WAITOK, 2260 &jd->jd_dtag, &jd->jd_dmap, &paddr); 2261 if (jd->jd_buf == NULL) { 2262 device_printf(dev, "can't create jumbo DMA stuffs\n"); 2263 return ENOMEM; 2264 } 2265 2266 jd->jd_slots = kmalloc(sizeof(*jd->jd_slots) * ET_JSLOTS, M_DEVBUF, 2267 M_WAITOK | M_ZERO); 2268 lwkt_serialize_init(&jd->jd_serializer); 2269 SLIST_INIT(&jd->jd_free_slots); 2270 2271 buf = jd->jd_buf; 2272 for (i = 0; i < ET_JSLOTS; ++i) { 2273 struct et_jslot *jslot = &jd->jd_slots[i]; 2274 2275 jslot->jslot_data = jd; 2276 jslot->jslot_buf = buf; 2277 jslot->jslot_paddr = paddr; 2278 jslot->jslot_inuse = 0; 2279 jslot->jslot_index = i; 2280 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, jslot_link); 2281 2282 buf += ET_JLEN; 2283 paddr += ET_JLEN; 2284 } 2285 return 0; 2286 } 2287 2288 static void 2289 et_jumbo_mem_free(device_t dev) 2290 { 2291 struct et_softc *sc = device_get_softc(dev); 2292 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2293 2294 KKASSERT(sc->sc_flags & ET_FLAG_JUMBO); 2295 2296 kfree(jd->jd_slots, M_DEVBUF); 2297 et_dma_mem_destroy(jd->jd_dtag, jd->jd_buf, jd->jd_dmap); 2298 } 2299 2300 static struct et_jslot * 2301 et_jalloc(struct et_jumbo_data *jd) 2302 { 2303 struct et_jslot *jslot; 2304 2305 lwkt_serialize_enter(&jd->jd_serializer); 2306 2307 jslot = SLIST_FIRST(&jd->jd_free_slots); 2308 if (jslot) { 2309 SLIST_REMOVE_HEAD(&jd->jd_free_slots, jslot_link); 2310 jslot->jslot_inuse = 1; 2311 } 2312 2313 lwkt_serialize_exit(&jd->jd_serializer); 2314 return jslot; 2315 } 2316 2317 static void 2318 et_jfree(void *xjslot) 2319 { 2320 struct et_jslot *jslot = xjslot; 2321 struct et_jumbo_data *jd = jslot->jslot_data; 2322 2323 if (&jd->jd_slots[jslot->jslot_index] != jslot) { 2324 panic("%s wrong jslot!?", __func__); 2325 } else if (jslot->jslot_inuse == 0) { 2326 panic("%s jslot already freed", __func__); 2327 } else { 2328 lwkt_serialize_enter(&jd->jd_serializer); 2329 2330 atomic_subtract_int(&jslot->jslot_inuse, 1); 2331 if (jslot->jslot_inuse == 0) { 2332 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, 2333 jslot_link); 2334 } 2335 2336 lwkt_serialize_exit(&jd->jd_serializer); 2337 } 2338 } 2339 2340 static void 2341 et_jref(void *xjslot) 2342 { 2343 struct et_jslot *jslot = xjslot; 2344 struct et_jumbo_data *jd = jslot->jslot_data; 2345 2346 if (&jd->jd_slots[jslot->jslot_index] != jslot) 2347 panic("%s wrong jslot!?", __func__); 2348 else if (jslot->jslot_inuse == 0) 2349 panic("%s jslot already freed", __func__); 2350 else 2351 atomic_add_int(&jslot->jslot_inuse, 1); 2352 } 2353 2354 static int 2355 et_newbuf_jumbo(struct et_rxbuf_data *rbd, int buf_idx, int init) 2356 { 2357 struct et_softc *sc = rbd->rbd_softc; 2358 struct et_rxbuf *rb; 2359 struct mbuf *m; 2360 struct et_jslot *jslot; 2361 int error; 2362 2363 KASSERT(rbd->rbd_jumbo, ("calling %s with non-jumbo ring", __func__)); 2364 2365 KKASSERT(buf_idx < ET_RX_NDESC); 2366 rb = &rbd->rbd_buf[buf_idx]; 2367 2368 error = ENOBUFS; 2369 2370 MGETHDR(m, init ? M_WAITOK : M_NOWAIT, MT_DATA); 2371 if (m == NULL) { 2372 if (init) { 2373 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n"); 2374 return error; 2375 } else { 2376 goto back; 2377 } 2378 } 2379 2380 jslot = et_jalloc(&sc->sc_jumbo_data); 2381 if (jslot == NULL) { 2382 m_freem(m); 2383 2384 if (init) { 2385 if_printf(&sc->arpcom.ac_if, 2386 "jslot allocation failed\n"); 2387 return error; 2388 } else { 2389 goto back; 2390 } 2391 } 2392 2393 m->m_ext.ext_arg = jslot; 2394 m->m_ext.ext_buf = jslot->jslot_buf; 2395 m->m_ext.ext_free = et_jfree; 2396 m->m_ext.ext_ref = et_jref; 2397 m->m_ext.ext_size = ET_JUMBO_FRAMELEN; 2398 m->m_flags |= M_EXT; 2399 m->m_data = m->m_ext.ext_buf; 2400 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2401 2402 rb->rb_mbuf = m; 2403 rb->rb_paddr = jslot->jslot_paddr; 2404 2405 error = 0; 2406 back: 2407 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2408 return error; 2409 } 2410 2411 static void 2412 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr) 2413 { 2414 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring; 2415 struct et_rxdesc *desc; 2416 2417 KKASSERT(buf_idx < ET_RX_NDESC); 2418 desc = &rx_ring->rr_desc[buf_idx]; 2419 2420 desc->rd_addr_hi = ET_ADDR_HI(paddr); 2421 desc->rd_addr_lo = ET_ADDR_LO(paddr); 2422 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2423 } 2424