1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/bitops.h> 37 #include <sys/endian.h> 38 #include <sys/kernel.h> 39 #include <sys/bus.h> 40 #include <sys/interrupt.h> 41 #include <sys/malloc.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/sysctl.h> 48 49 #include <net/ethernet.h> 50 #include <net/if.h> 51 #include <net/bpf.h> 52 #include <net/if_arp.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 #include <net/ifq_var.h> 56 #include <net/vlan/if_vlan_var.h> 57 58 #include <dev/netif/mii_layer/miivar.h> 59 60 #include <bus/pci/pcireg.h> 61 #include <bus/pci/pcivar.h> 62 #include "pcidevs.h" 63 64 #include <dev/netif/et/if_etreg.h> 65 #include <dev/netif/et/if_etvar.h> 66 67 #include "miibus_if.h" 68 69 static int et_probe(device_t); 70 static int et_attach(device_t); 71 static int et_detach(device_t); 72 static int et_shutdown(device_t); 73 74 static int et_miibus_readreg(device_t, int, int); 75 static int et_miibus_writereg(device_t, int, int, int); 76 static void et_miibus_statchg(device_t); 77 78 static void et_init(void *); 79 static int et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 80 static void et_start(struct ifnet *, struct ifaltq_subque *); 81 static void et_watchdog(struct ifnet *); 82 static int et_ifmedia_upd(struct ifnet *); 83 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 84 85 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 86 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 87 88 static void et_intr(void *); 89 static void et_enable_intrs(struct et_softc *, uint32_t); 90 static void et_disable_intrs(struct et_softc *); 91 static void et_rxeof(struct et_softc *); 92 static void et_txeof(struct et_softc *, int); 93 94 static int et_dma_alloc(device_t); 95 static void et_dma_free(device_t); 96 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 97 static int et_dma_mbuf_create(device_t); 98 static void et_dma_mbuf_destroy(device_t, int, const int[]); 99 static int et_jumbo_mem_alloc(device_t); 100 static void et_jumbo_mem_free(device_t); 101 static int et_init_tx_ring(struct et_softc *); 102 static int et_init_rx_ring(struct et_softc *); 103 static void et_free_tx_ring(struct et_softc *); 104 static void et_free_rx_ring(struct et_softc *); 105 static int et_encap(struct et_softc *, struct mbuf **); 106 static struct et_jslot * 107 et_jalloc(struct et_jumbo_data *); 108 static void et_jfree(void *); 109 static void et_jref(void *); 110 static int et_newbuf(struct et_rxbuf_data *, int, int, int); 111 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 112 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 113 static int et_newbuf_jumbo(struct et_rxbuf_data *, int, int); 114 115 static void et_stop(struct et_softc *); 116 static int et_chip_init(struct et_softc *); 117 static void et_chip_attach(struct et_softc *); 118 static void et_init_mac(struct et_softc *); 119 static void et_init_rxmac(struct et_softc *); 120 static void et_init_txmac(struct et_softc *); 121 static int et_init_rxdma(struct et_softc *); 122 static int et_init_txdma(struct et_softc *); 123 static int et_start_rxdma(struct et_softc *); 124 static int et_start_txdma(struct et_softc *); 125 static int et_stop_rxdma(struct et_softc *); 126 static int et_stop_txdma(struct et_softc *); 127 static int et_enable_txrx(struct et_softc *, int); 128 static void et_reset(struct et_softc *); 129 static int et_bus_config(device_t); 130 static void et_get_eaddr(device_t, uint8_t[]); 131 static void et_setmulti(struct et_softc *); 132 static void et_tick(void *); 133 static void et_setmedia(struct et_softc *); 134 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t); 135 136 static const struct et_dev { 137 uint16_t vid; 138 uint16_t did; 139 const char *desc; 140 } et_devices[] = { 141 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 142 "Agere ET1310 Gigabit Ethernet" }, 143 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 144 "Agere ET1310 Fast Ethernet" }, 145 { 0, 0, NULL } 146 }; 147 148 static device_method_t et_methods[] = { 149 DEVMETHOD(device_probe, et_probe), 150 DEVMETHOD(device_attach, et_attach), 151 DEVMETHOD(device_detach, et_detach), 152 DEVMETHOD(device_shutdown, et_shutdown), 153 #if 0 154 DEVMETHOD(device_suspend, et_suspend), 155 DEVMETHOD(device_resume, et_resume), 156 #endif 157 158 DEVMETHOD(bus_print_child, bus_generic_print_child), 159 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 160 161 DEVMETHOD(miibus_readreg, et_miibus_readreg), 162 DEVMETHOD(miibus_writereg, et_miibus_writereg), 163 DEVMETHOD(miibus_statchg, et_miibus_statchg), 164 165 DEVMETHOD_END 166 }; 167 168 static driver_t et_driver = { 169 "et", 170 et_methods, 171 sizeof(struct et_softc) 172 }; 173 174 static devclass_t et_devclass; 175 176 DECLARE_DUMMY_MODULE(if_et); 177 MODULE_DEPEND(if_et, miibus, 1, 1, 1); 178 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, NULL, NULL); 179 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, NULL, NULL); 180 181 static int et_rx_intr_npkts = 129; 182 static int et_rx_intr_delay = 25; /* x4 usec */ 183 static int et_tx_intr_nsegs = 256; 184 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 185 186 static int et_msi_enable = 1; 187 188 TUNABLE_INT("hw.et.timer", &et_timer); 189 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 190 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay); 191 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs); 192 TUNABLE_INT("hw.et.msi.enable", &et_msi_enable); 193 194 struct et_bsize { 195 int bufsize; 196 int jumbo; 197 et_newbuf_t newbuf; 198 }; 199 200 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = { 201 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 202 .newbuf = et_newbuf_hdr }, 203 { .bufsize = ET_RXDMA_CTRL_RING1_2048, .jumbo = 0, 204 .newbuf = et_newbuf_cluster }, 205 }; 206 207 static const struct et_bsize et_bufsize_jumbo[ET_RX_NRING] = { 208 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 209 .newbuf = et_newbuf_hdr }, 210 { .bufsize = ET_RXDMA_CTRL_RING1_16384, .jumbo = 1, 211 .newbuf = et_newbuf_jumbo }, 212 }; 213 214 static int 215 et_probe(device_t dev) 216 { 217 const struct et_dev *d; 218 uint16_t did, vid; 219 220 vid = pci_get_vendor(dev); 221 did = pci_get_device(dev); 222 223 for (d = et_devices; d->desc != NULL; ++d) { 224 if (vid == d->vid && did == d->did) { 225 device_set_desc(dev, d->desc); 226 return 0; 227 } 228 } 229 return ENXIO; 230 } 231 232 static int 233 et_attach(device_t dev) 234 { 235 struct et_softc *sc = device_get_softc(dev); 236 struct ifnet *ifp = &sc->arpcom.ac_if; 237 struct sysctl_ctx_list *ctx; 238 struct sysctl_oid *tree; 239 uint8_t eaddr[ETHER_ADDR_LEN]; 240 int error; 241 u_int irq_flags; 242 243 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 244 callout_init(&sc->sc_tick); 245 246 /* 247 * Initialize tunables 248 */ 249 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 250 sc->sc_rx_intr_delay = et_rx_intr_delay; 251 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 252 sc->sc_timer = et_timer; 253 254 #ifndef BURN_BRIDGES 255 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 256 uint32_t irq, mem; 257 258 irq = pci_read_config(dev, PCIR_INTLINE, 4); 259 mem = pci_read_config(dev, ET_PCIR_BAR, 4); 260 261 device_printf(dev, "chip is in D%d power mode " 262 "-- setting to D0\n", pci_get_powerstate(dev)); 263 264 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 265 266 pci_write_config(dev, PCIR_INTLINE, irq, 4); 267 pci_write_config(dev, ET_PCIR_BAR, mem, 4); 268 } 269 #endif /* !BURN_BRIDGE */ 270 271 /* Enable bus mastering */ 272 pci_enable_busmaster(dev); 273 274 /* 275 * Allocate IO memory 276 */ 277 sc->sc_mem_rid = ET_PCIR_BAR; 278 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 279 &sc->sc_mem_rid, RF_ACTIVE); 280 if (sc->sc_mem_res == NULL) { 281 device_printf(dev, "can't allocate IO memory\n"); 282 return ENXIO; 283 } 284 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res); 285 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res); 286 287 /* 288 * Allocate IRQ 289 */ 290 sc->sc_irq_type = pci_alloc_1intr(dev, et_msi_enable, 291 &sc->sc_irq_rid, &irq_flags); 292 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 293 &sc->sc_irq_rid, irq_flags); 294 if (sc->sc_irq_res == NULL) { 295 device_printf(dev, "can't allocate irq\n"); 296 error = ENXIO; 297 goto fail; 298 } 299 300 /* 301 * Create sysctl tree 302 */ 303 ctx = device_get_sysctl_ctx(dev); 304 tree = device_get_sysctl_tree(dev); 305 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 306 OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW, 307 sc, 0, et_sysctl_rx_intr_npkts, "I", 308 "RX IM, # packets per RX interrupt"); 309 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 310 OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW, 311 sc, 0, et_sysctl_rx_intr_delay, "I", 312 "RX IM, RX interrupt delay (x10 usec)"); 313 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 314 "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 315 "TX IM, # segments per TX interrupt"); 316 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 317 "timer", CTLFLAG_RW, &sc->sc_timer, 0, 318 "TX timer"); 319 320 error = et_bus_config(dev); 321 if (error) 322 goto fail; 323 324 et_get_eaddr(dev, eaddr); 325 326 CSR_WRITE_4(sc, ET_PM, 327 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 328 329 et_reset(sc); 330 331 et_disable_intrs(sc); 332 333 error = et_dma_alloc(dev); 334 if (error) 335 goto fail; 336 337 ifp->if_softc = sc; 338 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 339 ifp->if_init = et_init; 340 ifp->if_ioctl = et_ioctl; 341 ifp->if_start = et_start; 342 ifp->if_watchdog = et_watchdog; 343 ifp->if_mtu = ETHERMTU; 344 ifp->if_capabilities = IFCAP_VLAN_MTU; 345 ifp->if_capenable = ifp->if_capabilities; 346 ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC); 347 ifq_set_ready(&ifp->if_snd); 348 349 et_chip_attach(sc); 350 351 error = mii_phy_probe(dev, &sc->sc_miibus, 352 et_ifmedia_upd, et_ifmedia_sts); 353 if (error) { 354 device_printf(dev, "can't probe any PHY\n"); 355 goto fail; 356 } 357 358 ether_ifattach(ifp, eaddr, NULL); 359 360 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res)); 361 362 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc, 363 &sc->sc_irq_handle, ifp->if_serializer); 364 if (error) { 365 ether_ifdetach(ifp); 366 device_printf(dev, "can't setup intr\n"); 367 goto fail; 368 } 369 370 return 0; 371 fail: 372 et_detach(dev); 373 return error; 374 } 375 376 static int 377 et_detach(device_t dev) 378 { 379 struct et_softc *sc = device_get_softc(dev); 380 381 if (device_is_attached(dev)) { 382 struct ifnet *ifp = &sc->arpcom.ac_if; 383 384 lwkt_serialize_enter(ifp->if_serializer); 385 et_stop(sc); 386 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 387 lwkt_serialize_exit(ifp->if_serializer); 388 389 ether_ifdetach(ifp); 390 } 391 392 if (sc->sc_miibus != NULL) 393 device_delete_child(dev, sc->sc_miibus); 394 bus_generic_detach(dev); 395 396 if (sc->sc_irq_res != NULL) { 397 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 398 sc->sc_irq_res); 399 } 400 if (sc->sc_irq_type == PCI_INTR_TYPE_MSI) 401 pci_release_msi(dev); 402 403 if (sc->sc_mem_res != NULL) { 404 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 405 sc->sc_mem_res); 406 } 407 408 et_dma_free(dev); 409 410 return 0; 411 } 412 413 static int 414 et_shutdown(device_t dev) 415 { 416 struct et_softc *sc = device_get_softc(dev); 417 struct ifnet *ifp = &sc->arpcom.ac_if; 418 419 lwkt_serialize_enter(ifp->if_serializer); 420 et_stop(sc); 421 lwkt_serialize_exit(ifp->if_serializer); 422 return 0; 423 } 424 425 static int 426 et_miibus_readreg(device_t dev, int phy, int reg) 427 { 428 struct et_softc *sc = device_get_softc(dev); 429 uint32_t val; 430 int i, ret; 431 432 /* Stop any pending operations */ 433 CSR_WRITE_4(sc, ET_MII_CMD, 0); 434 435 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 436 __SHIFTIN(reg, ET_MII_ADDR_REG); 437 CSR_WRITE_4(sc, ET_MII_ADDR, val); 438 439 /* Start reading */ 440 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 441 442 #define NRETRY 50 443 444 for (i = 0; i < NRETRY; ++i) { 445 val = CSR_READ_4(sc, ET_MII_IND); 446 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 447 break; 448 DELAY(50); 449 } 450 if (i == NRETRY) { 451 if_printf(&sc->arpcom.ac_if, 452 "read phy %d, reg %d timed out\n", phy, reg); 453 ret = 0; 454 goto back; 455 } 456 457 #undef NRETRY 458 459 val = CSR_READ_4(sc, ET_MII_STAT); 460 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 461 462 back: 463 /* Make sure that the current operation is stopped */ 464 CSR_WRITE_4(sc, ET_MII_CMD, 0); 465 return ret; 466 } 467 468 static int 469 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 470 { 471 struct et_softc *sc = device_get_softc(dev); 472 uint32_t val; 473 int i; 474 475 /* Stop any pending operations */ 476 CSR_WRITE_4(sc, ET_MII_CMD, 0); 477 478 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 479 __SHIFTIN(reg, ET_MII_ADDR_REG); 480 CSR_WRITE_4(sc, ET_MII_ADDR, val); 481 482 /* Start writing */ 483 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 484 485 #define NRETRY 100 486 487 for (i = 0; i < NRETRY; ++i) { 488 val = CSR_READ_4(sc, ET_MII_IND); 489 if ((val & ET_MII_IND_BUSY) == 0) 490 break; 491 DELAY(50); 492 } 493 if (i == NRETRY) { 494 if_printf(&sc->arpcom.ac_if, 495 "write phy %d, reg %d timed out\n", phy, reg); 496 et_miibus_readreg(dev, phy, reg); 497 } 498 499 #undef NRETRY 500 501 /* Make sure that the current operation is stopped */ 502 CSR_WRITE_4(sc, ET_MII_CMD, 0); 503 return 0; 504 } 505 506 static void 507 et_miibus_statchg(device_t dev) 508 { 509 et_setmedia(device_get_softc(dev)); 510 } 511 512 static int 513 et_ifmedia_upd(struct ifnet *ifp) 514 { 515 struct et_softc *sc = ifp->if_softc; 516 struct mii_data *mii = device_get_softc(sc->sc_miibus); 517 518 if (mii->mii_instance != 0) { 519 struct mii_softc *miisc; 520 521 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 522 mii_phy_reset(miisc); 523 } 524 mii_mediachg(mii); 525 526 return 0; 527 } 528 529 static void 530 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 531 { 532 struct et_softc *sc = ifp->if_softc; 533 struct mii_data *mii = device_get_softc(sc->sc_miibus); 534 535 mii_pollstat(mii); 536 ifmr->ifm_active = mii->mii_media_active; 537 ifmr->ifm_status = mii->mii_media_status; 538 } 539 540 static void 541 et_stop(struct et_softc *sc) 542 { 543 struct ifnet *ifp = &sc->arpcom.ac_if; 544 545 ASSERT_SERIALIZED(ifp->if_serializer); 546 547 callout_stop(&sc->sc_tick); 548 549 et_stop_rxdma(sc); 550 et_stop_txdma(sc); 551 552 et_disable_intrs(sc); 553 554 et_free_tx_ring(sc); 555 et_free_rx_ring(sc); 556 557 et_reset(sc); 558 559 sc->sc_tx = 0; 560 sc->sc_tx_intr = 0; 561 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 562 563 ifp->if_timer = 0; 564 ifp->if_flags &= ~IFF_RUNNING; 565 ifq_clr_oactive(&ifp->if_snd); 566 } 567 568 static int 569 et_bus_config(device_t dev) 570 { 571 uint32_t val, max_plsz; 572 uint16_t ack_latency, replay_timer; 573 574 /* 575 * Test whether EEPROM is valid 576 * NOTE: Read twice to get the correct value 577 */ 578 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 579 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 580 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 581 device_printf(dev, "EEPROM status error 0x%02x\n", val); 582 return ENXIO; 583 } 584 585 /* TODO: LED */ 586 587 /* 588 * Configure ACK latency and replay timer according to 589 * max playload size 590 */ 591 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4); 592 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 593 594 switch (max_plsz) { 595 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 596 ack_latency = ET_PCIV_ACK_LATENCY_128; 597 replay_timer = ET_PCIV_REPLAY_TIMER_128; 598 break; 599 600 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 601 ack_latency = ET_PCIV_ACK_LATENCY_256; 602 replay_timer = ET_PCIV_REPLAY_TIMER_256; 603 break; 604 605 default: 606 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2); 607 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2); 608 device_printf(dev, "ack latency %u, replay timer %u\n", 609 ack_latency, replay_timer); 610 break; 611 } 612 if (ack_latency != 0) { 613 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 614 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2); 615 } 616 617 /* 618 * Set L0s and L1 latency timer to 2us 619 */ 620 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 621 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1); 622 623 /* 624 * Set max read request size to 2048 bytes 625 */ 626 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2); 627 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 628 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 629 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2); 630 631 return 0; 632 } 633 634 static void 635 et_get_eaddr(device_t dev, uint8_t eaddr[]) 636 { 637 uint32_t val; 638 int i; 639 640 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 641 for (i = 0; i < 4; ++i) 642 eaddr[i] = (val >> (8 * i)) & 0xff; 643 644 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 645 for (; i < ETHER_ADDR_LEN; ++i) 646 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 647 } 648 649 static void 650 et_reset(struct et_softc *sc) 651 { 652 CSR_WRITE_4(sc, ET_MAC_CFG1, 653 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 654 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 655 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 656 657 CSR_WRITE_4(sc, ET_SWRST, 658 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 659 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 660 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 661 662 CSR_WRITE_4(sc, ET_MAC_CFG1, 663 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 664 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 665 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 666 } 667 668 static void 669 et_disable_intrs(struct et_softc *sc) 670 { 671 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 672 } 673 674 static void 675 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 676 { 677 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 678 } 679 680 static int 681 et_dma_alloc(device_t dev) 682 { 683 struct et_softc *sc = device_get_softc(dev); 684 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 685 struct et_txstatus_data *txsd = &sc->sc_tx_status; 686 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 687 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 688 int i, error; 689 690 /* 691 * Create top level DMA tag 692 */ 693 error = bus_dma_tag_create(NULL, 1, 0, 694 BUS_SPACE_MAXADDR, 695 BUS_SPACE_MAXADDR, 696 NULL, NULL, 697 BUS_SPACE_MAXSIZE_32BIT, 698 0, 699 BUS_SPACE_MAXSIZE_32BIT, 700 0, &sc->sc_dtag); 701 if (error) { 702 device_printf(dev, "can't create DMA tag\n"); 703 return error; 704 } 705 706 /* 707 * Create TX ring DMA stuffs 708 */ 709 tx_ring->tr_desc = bus_dmamem_coherent_any(sc->sc_dtag, 710 ET_ALIGN, ET_TX_RING_SIZE, 711 BUS_DMA_WAITOK | BUS_DMA_ZERO, 712 &tx_ring->tr_dtag, &tx_ring->tr_dmap, 713 &tx_ring->tr_paddr); 714 if (tx_ring->tr_desc == NULL) { 715 device_printf(dev, "can't create TX ring DMA stuffs\n"); 716 return ENOMEM; 717 } 718 719 /* 720 * Create TX status DMA stuffs 721 */ 722 txsd->txsd_status = bus_dmamem_coherent_any(sc->sc_dtag, 723 ET_ALIGN, sizeof(uint32_t), 724 BUS_DMA_WAITOK | BUS_DMA_ZERO, 725 &txsd->txsd_dtag, &txsd->txsd_dmap, 726 &txsd->txsd_paddr); 727 if (txsd->txsd_status == NULL) { 728 device_printf(dev, "can't create TX status DMA stuffs\n"); 729 return ENOMEM; 730 } 731 732 /* 733 * Create DMA stuffs for RX rings 734 */ 735 for (i = 0; i < ET_RX_NRING; ++i) { 736 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 737 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 738 739 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 740 741 rx_ring->rr_desc = bus_dmamem_coherent_any(sc->sc_dtag, 742 ET_ALIGN, ET_RX_RING_SIZE, 743 BUS_DMA_WAITOK | BUS_DMA_ZERO, 744 &rx_ring->rr_dtag, &rx_ring->rr_dmap, 745 &rx_ring->rr_paddr); 746 if (rx_ring->rr_desc == NULL) { 747 device_printf(dev, "can't create DMA stuffs for " 748 "the %d RX ring\n", i); 749 return ENOMEM; 750 } 751 rx_ring->rr_posreg = rx_ring_posreg[i]; 752 } 753 754 /* 755 * Create RX stat ring DMA stuffs 756 */ 757 rxst_ring->rsr_stat = bus_dmamem_coherent_any(sc->sc_dtag, 758 ET_ALIGN, ET_RXSTAT_RING_SIZE, 759 BUS_DMA_WAITOK | BUS_DMA_ZERO, 760 &rxst_ring->rsr_dtag, &rxst_ring->rsr_dmap, 761 &rxst_ring->rsr_paddr); 762 if (rxst_ring->rsr_stat == NULL) { 763 device_printf(dev, "can't create RX stat ring DMA stuffs\n"); 764 return ENOMEM; 765 } 766 767 /* 768 * Create RX status DMA stuffs 769 */ 770 rxsd->rxsd_status = bus_dmamem_coherent_any(sc->sc_dtag, 771 ET_ALIGN, sizeof(struct et_rxstatus), 772 BUS_DMA_WAITOK | BUS_DMA_ZERO, 773 &rxsd->rxsd_dtag, &rxsd->rxsd_dmap, 774 &rxsd->rxsd_paddr); 775 if (rxsd->rxsd_status == NULL) { 776 device_printf(dev, "can't create RX status DMA stuffs\n"); 777 return ENOMEM; 778 } 779 780 /* 781 * Create mbuf DMA stuffs 782 */ 783 error = et_dma_mbuf_create(dev); 784 if (error) 785 return error; 786 787 /* 788 * Create jumbo buffer DMA stuffs 789 * NOTE: Allow it to fail 790 */ 791 if (et_jumbo_mem_alloc(dev) == 0) 792 sc->sc_flags |= ET_FLAG_JUMBO; 793 794 return 0; 795 } 796 797 static void 798 et_dma_free(device_t dev) 799 { 800 struct et_softc *sc = device_get_softc(dev); 801 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 802 struct et_txstatus_data *txsd = &sc->sc_tx_status; 803 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 804 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 805 int i, rx_done[ET_RX_NRING]; 806 807 /* 808 * Destroy TX ring DMA stuffs 809 */ 810 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc, 811 tx_ring->tr_dmap); 812 813 /* 814 * Destroy TX status DMA stuffs 815 */ 816 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status, 817 txsd->txsd_dmap); 818 819 /* 820 * Destroy DMA stuffs for RX rings 821 */ 822 for (i = 0; i < ET_RX_NRING; ++i) { 823 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 824 825 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc, 826 rx_ring->rr_dmap); 827 } 828 829 /* 830 * Destroy RX stat ring DMA stuffs 831 */ 832 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat, 833 rxst_ring->rsr_dmap); 834 835 /* 836 * Destroy RX status DMA stuffs 837 */ 838 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status, 839 rxsd->rxsd_dmap); 840 841 /* 842 * Destroy mbuf DMA stuffs 843 */ 844 for (i = 0; i < ET_RX_NRING; ++i) 845 rx_done[i] = ET_RX_NDESC; 846 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done); 847 848 /* 849 * Destroy jumbo buffer DMA stuffs 850 */ 851 if (sc->sc_flags & ET_FLAG_JUMBO) 852 et_jumbo_mem_free(dev); 853 854 /* 855 * Destroy top level DMA tag 856 */ 857 if (sc->sc_dtag != NULL) 858 bus_dma_tag_destroy(sc->sc_dtag); 859 } 860 861 static int 862 et_dma_mbuf_create(device_t dev) 863 { 864 struct et_softc *sc = device_get_softc(dev); 865 struct et_txbuf_data *tbd = &sc->sc_tx_data; 866 int i, error, rx_done[ET_RX_NRING]; 867 868 /* 869 * Create RX mbuf DMA tag 870 */ 871 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 872 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 873 NULL, NULL, 874 MCLBYTES, 1, MCLBYTES, 875 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 876 &sc->sc_rxbuf_dtag); 877 if (error) { 878 device_printf(dev, "can't create RX mbuf DMA tag\n"); 879 return error; 880 } 881 882 /* 883 * Create spare DMA map for RX mbufs 884 */ 885 error = bus_dmamap_create(sc->sc_rxbuf_dtag, BUS_DMA_WAITOK, 886 &sc->sc_rxbuf_tmp_dmap); 887 if (error) { 888 device_printf(dev, "can't create spare mbuf DMA map\n"); 889 bus_dma_tag_destroy(sc->sc_rxbuf_dtag); 890 sc->sc_rxbuf_dtag = NULL; 891 return error; 892 } 893 894 /* 895 * Create DMA maps for RX mbufs 896 */ 897 bzero(rx_done, sizeof(rx_done)); 898 for (i = 0; i < ET_RX_NRING; ++i) { 899 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 900 int j; 901 902 for (j = 0; j < ET_RX_NDESC; ++j) { 903 error = bus_dmamap_create(sc->sc_rxbuf_dtag, 904 BUS_DMA_WAITOK, 905 &rbd->rbd_buf[j].rb_dmap); 906 if (error) { 907 device_printf(dev, "can't create %d RX mbuf " 908 "for %d RX ring\n", j, i); 909 rx_done[i] = j; 910 et_dma_mbuf_destroy(dev, 0, rx_done); 911 return error; 912 } 913 } 914 rx_done[i] = ET_RX_NDESC; 915 916 rbd->rbd_softc = sc; 917 rbd->rbd_ring = &sc->sc_rx_ring[i]; 918 } 919 920 /* 921 * Create TX mbuf DMA tag 922 */ 923 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 924 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 925 NULL, NULL, 926 ET_JUMBO_FRAMELEN, ET_NSEG_MAX, MCLBYTES, 927 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 928 BUS_DMA_ONEBPAGE, 929 &sc->sc_txbuf_dtag); 930 if (error) { 931 device_printf(dev, "can't create TX mbuf DMA tag\n"); 932 return error; 933 } 934 935 /* 936 * Create DMA maps for TX mbufs 937 */ 938 for (i = 0; i < ET_TX_NDESC; ++i) { 939 error = bus_dmamap_create(sc->sc_txbuf_dtag, 940 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 941 &tbd->tbd_buf[i].tb_dmap); 942 if (error) { 943 device_printf(dev, "can't create %d TX mbuf " 944 "DMA map\n", i); 945 et_dma_mbuf_destroy(dev, i, rx_done); 946 return error; 947 } 948 } 949 950 return 0; 951 } 952 953 static void 954 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[]) 955 { 956 struct et_softc *sc = device_get_softc(dev); 957 struct et_txbuf_data *tbd = &sc->sc_tx_data; 958 int i; 959 960 /* 961 * Destroy DMA tag and maps for RX mbufs 962 */ 963 if (sc->sc_rxbuf_dtag) { 964 for (i = 0; i < ET_RX_NRING; ++i) { 965 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 966 int j; 967 968 for (j = 0; j < rx_done[i]; ++j) { 969 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 970 971 KASSERT(rb->rb_mbuf == NULL, 972 ("RX mbuf in %d RX ring is " 973 "not freed yet", i)); 974 bus_dmamap_destroy(sc->sc_rxbuf_dtag, 975 rb->rb_dmap); 976 } 977 } 978 bus_dmamap_destroy(sc->sc_rxbuf_dtag, sc->sc_rxbuf_tmp_dmap); 979 bus_dma_tag_destroy(sc->sc_rxbuf_dtag); 980 sc->sc_rxbuf_dtag = NULL; 981 } 982 983 /* 984 * Destroy DMA tag and maps for TX mbufs 985 */ 986 if (sc->sc_txbuf_dtag) { 987 for (i = 0; i < tx_done; ++i) { 988 struct et_txbuf *tb = &tbd->tbd_buf[i]; 989 990 KASSERT(tb->tb_mbuf == NULL, 991 ("TX mbuf is not freed yet")); 992 bus_dmamap_destroy(sc->sc_txbuf_dtag, tb->tb_dmap); 993 } 994 bus_dma_tag_destroy(sc->sc_txbuf_dtag); 995 sc->sc_txbuf_dtag = NULL; 996 } 997 } 998 999 static void 1000 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 1001 { 1002 if (dtag != NULL) { 1003 bus_dmamap_unload(dtag, dmap); 1004 bus_dmamem_free(dtag, addr, dmap); 1005 bus_dma_tag_destroy(dtag); 1006 } 1007 } 1008 1009 static void 1010 et_chip_attach(struct et_softc *sc) 1011 { 1012 uint32_t val; 1013 1014 /* 1015 * Perform minimal initialization 1016 */ 1017 1018 /* Disable loopback */ 1019 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1020 1021 /* Reset MAC */ 1022 CSR_WRITE_4(sc, ET_MAC_CFG1, 1023 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1024 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1025 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1026 1027 /* 1028 * Setup half duplex mode 1029 */ 1030 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1031 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1032 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1033 ET_MAC_HDX_EXC_DEFER; 1034 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1035 1036 /* Clear MAC control */ 1037 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1038 1039 /* Reset MII */ 1040 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1041 1042 /* Bring MAC out of reset state */ 1043 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1044 1045 /* Enable memory controllers */ 1046 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1047 } 1048 1049 static void 1050 et_intr(void *xsc) 1051 { 1052 struct et_softc *sc = xsc; 1053 struct ifnet *ifp = &sc->arpcom.ac_if; 1054 uint32_t intrs; 1055 1056 ASSERT_SERIALIZED(ifp->if_serializer); 1057 1058 if ((ifp->if_flags & IFF_RUNNING) == 0) 1059 return; 1060 1061 et_disable_intrs(sc); 1062 1063 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1064 intrs &= ET_INTRS; 1065 if (intrs == 0) /* Not interested */ 1066 goto back; 1067 1068 if (intrs & ET_INTR_RXEOF) 1069 et_rxeof(sc); 1070 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1071 et_txeof(sc, 1); 1072 if (intrs & ET_INTR_TIMER) 1073 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1074 back: 1075 et_enable_intrs(sc, ET_INTRS); 1076 } 1077 1078 static void 1079 et_init(void *xsc) 1080 { 1081 struct et_softc *sc = xsc; 1082 struct ifnet *ifp = &sc->arpcom.ac_if; 1083 const struct et_bsize *arr; 1084 int error, i; 1085 1086 ASSERT_SERIALIZED(ifp->if_serializer); 1087 1088 et_stop(sc); 1089 1090 arr = ET_FRAMELEN(ifp->if_mtu) < MCLBYTES ? 1091 et_bufsize_std : et_bufsize_jumbo; 1092 for (i = 0; i < ET_RX_NRING; ++i) { 1093 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize; 1094 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf; 1095 sc->sc_rx_data[i].rbd_jumbo = arr[i].jumbo; 1096 } 1097 1098 error = et_init_tx_ring(sc); 1099 if (error) 1100 goto back; 1101 1102 error = et_init_rx_ring(sc); 1103 if (error) 1104 goto back; 1105 1106 error = et_chip_init(sc); 1107 if (error) 1108 goto back; 1109 1110 error = et_enable_txrx(sc, 1); 1111 if (error) 1112 goto back; 1113 1114 et_enable_intrs(sc, ET_INTRS); 1115 1116 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1117 1118 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1119 1120 ifp->if_flags |= IFF_RUNNING; 1121 ifq_clr_oactive(&ifp->if_snd); 1122 back: 1123 if (error) 1124 et_stop(sc); 1125 } 1126 1127 static int 1128 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1129 { 1130 struct et_softc *sc = ifp->if_softc; 1131 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1132 struct ifreq *ifr = (struct ifreq *)data; 1133 int error = 0, max_framelen; 1134 1135 ASSERT_SERIALIZED(ifp->if_serializer); 1136 1137 switch (cmd) { 1138 case SIOCSIFFLAGS: 1139 if (ifp->if_flags & IFF_UP) { 1140 if (ifp->if_flags & IFF_RUNNING) { 1141 if ((ifp->if_flags ^ sc->sc_if_flags) & 1142 (IFF_ALLMULTI | IFF_PROMISC)) 1143 et_setmulti(sc); 1144 } else { 1145 et_init(sc); 1146 } 1147 } else { 1148 if (ifp->if_flags & IFF_RUNNING) 1149 et_stop(sc); 1150 } 1151 sc->sc_if_flags = ifp->if_flags; 1152 break; 1153 1154 case SIOCSIFMEDIA: 1155 case SIOCGIFMEDIA: 1156 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1157 break; 1158 1159 case SIOCADDMULTI: 1160 case SIOCDELMULTI: 1161 if (ifp->if_flags & IFF_RUNNING) 1162 et_setmulti(sc); 1163 break; 1164 1165 case SIOCSIFMTU: 1166 if (sc->sc_flags & ET_FLAG_JUMBO) 1167 max_framelen = ET_JUMBO_FRAMELEN; 1168 else 1169 max_framelen = MCLBYTES - 1; 1170 1171 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) { 1172 error = EOPNOTSUPP; 1173 break; 1174 } 1175 1176 ifp->if_mtu = ifr->ifr_mtu; 1177 if (ifp->if_flags & IFF_RUNNING) 1178 et_init(sc); 1179 break; 1180 1181 default: 1182 error = ether_ioctl(ifp, cmd, data); 1183 break; 1184 } 1185 return error; 1186 } 1187 1188 static void 1189 et_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1190 { 1191 struct et_softc *sc = ifp->if_softc; 1192 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1193 int trans, oactive; 1194 1195 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1196 ASSERT_SERIALIZED(ifp->if_serializer); 1197 1198 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) { 1199 ifq_purge(&ifp->if_snd); 1200 return; 1201 } 1202 1203 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1204 return; 1205 1206 oactive = 0; 1207 trans = 0; 1208 for (;;) { 1209 struct mbuf *m; 1210 int error; 1211 1212 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1213 if (oactive) { 1214 ifq_set_oactive(&ifp->if_snd); 1215 break; 1216 } 1217 1218 et_txeof(sc, 0); 1219 oactive = 1; 1220 continue; 1221 } 1222 1223 m = ifq_dequeue(&ifp->if_snd); 1224 if (m == NULL) 1225 break; 1226 1227 error = et_encap(sc, &m); 1228 if (error) { 1229 IFNET_STAT_INC(ifp, oerrors, 1); 1230 KKASSERT(m == NULL); 1231 1232 if (error == EFBIG) { 1233 /* 1234 * Excessive fragmented packets 1235 */ 1236 if (oactive) { 1237 ifq_set_oactive(&ifp->if_snd); 1238 break; 1239 } 1240 et_txeof(sc, 0); 1241 oactive = 1; 1242 } 1243 continue; 1244 } else { 1245 oactive = 0; 1246 } 1247 trans = 1; 1248 1249 BPF_MTAP(ifp, m); 1250 } 1251 1252 if (trans) 1253 ifp->if_timer = 5; 1254 } 1255 1256 static void 1257 et_watchdog(struct ifnet *ifp) 1258 { 1259 ASSERT_SERIALIZED(ifp->if_serializer); 1260 1261 if_printf(ifp, "watchdog timed out\n"); 1262 1263 ifp->if_init(ifp->if_softc); 1264 if_devstart(ifp); 1265 } 1266 1267 static int 1268 et_stop_rxdma(struct et_softc *sc) 1269 { 1270 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1271 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1272 1273 DELAY(5); 1274 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1275 if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n"); 1276 return ETIMEDOUT; 1277 } 1278 return 0; 1279 } 1280 1281 static int 1282 et_stop_txdma(struct et_softc *sc) 1283 { 1284 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1285 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1286 return 0; 1287 } 1288 1289 static void 1290 et_free_tx_ring(struct et_softc *sc) 1291 { 1292 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1293 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1294 int i; 1295 1296 for (i = 0; i < ET_TX_NDESC; ++i) { 1297 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1298 1299 if (tb->tb_mbuf != NULL) { 1300 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap); 1301 m_freem(tb->tb_mbuf); 1302 tb->tb_mbuf = NULL; 1303 } 1304 } 1305 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1306 } 1307 1308 static void 1309 et_free_rx_ring(struct et_softc *sc) 1310 { 1311 int n; 1312 1313 for (n = 0; n < ET_RX_NRING; ++n) { 1314 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1315 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1316 int i; 1317 1318 for (i = 0; i < ET_RX_NDESC; ++i) { 1319 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1320 1321 if (rb->rb_mbuf != NULL) { 1322 if (!rbd->rbd_jumbo) { 1323 bus_dmamap_unload(sc->sc_rxbuf_dtag, 1324 rb->rb_dmap); 1325 } 1326 m_freem(rb->rb_mbuf); 1327 rb->rb_mbuf = NULL; 1328 } 1329 } 1330 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1331 } 1332 } 1333 1334 static void 1335 et_setmulti(struct et_softc *sc) 1336 { 1337 struct ifnet *ifp = &sc->arpcom.ac_if; 1338 uint32_t hash[4] = { 0, 0, 0, 0 }; 1339 uint32_t rxmac_ctrl, pktfilt; 1340 struct ifmultiaddr *ifma; 1341 int i, count; 1342 1343 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1344 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1345 1346 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1347 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1348 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1349 goto back; 1350 } 1351 1352 count = 0; 1353 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1354 uint32_t *hp, h; 1355 1356 if (ifma->ifma_addr->sa_family != AF_LINK) 1357 continue; 1358 1359 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1360 ifma->ifma_addr), ETHER_ADDR_LEN); 1361 h = (h & 0x3f800000) >> 23; 1362 1363 hp = &hash[0]; 1364 if (h >= 32 && h < 64) { 1365 h -= 32; 1366 hp = &hash[1]; 1367 } else if (h >= 64 && h < 96) { 1368 h -= 64; 1369 hp = &hash[2]; 1370 } else if (h >= 96) { 1371 h -= 96; 1372 hp = &hash[3]; 1373 } 1374 *hp |= (1 << h); 1375 1376 ++count; 1377 } 1378 1379 for (i = 0; i < 4; ++i) 1380 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1381 1382 if (count > 0) 1383 pktfilt |= ET_PKTFILT_MCAST; 1384 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1385 back: 1386 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1387 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1388 } 1389 1390 static int 1391 et_chip_init(struct et_softc *sc) 1392 { 1393 struct ifnet *ifp = &sc->arpcom.ac_if; 1394 uint32_t rxq_end; 1395 int error, frame_len, rxmem_size; 1396 1397 /* 1398 * Split 16Kbytes internal memory between TX and RX 1399 * according to frame length. 1400 */ 1401 frame_len = ET_FRAMELEN(ifp->if_mtu); 1402 if (frame_len < 2048) { 1403 rxmem_size = ET_MEM_RXSIZE_DEFAULT; 1404 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) { 1405 rxmem_size = ET_MEM_SIZE / 2; 1406 } else { 1407 rxmem_size = ET_MEM_SIZE - 1408 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT); 1409 } 1410 rxq_end = ET_QUEUE_ADDR(rxmem_size); 1411 1412 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START); 1413 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end); 1414 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1); 1415 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END); 1416 1417 /* No loopback */ 1418 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1419 1420 /* Clear MSI configure */ 1421 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1422 1423 /* Disable timer */ 1424 CSR_WRITE_4(sc, ET_TIMER, 0); 1425 1426 /* Initialize MAC */ 1427 et_init_mac(sc); 1428 1429 /* Enable memory controllers */ 1430 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1431 1432 /* Initialize RX MAC */ 1433 et_init_rxmac(sc); 1434 1435 /* Initialize TX MAC */ 1436 et_init_txmac(sc); 1437 1438 /* Initialize RX DMA engine */ 1439 error = et_init_rxdma(sc); 1440 if (error) 1441 return error; 1442 1443 /* Initialize TX DMA engine */ 1444 error = et_init_txdma(sc); 1445 if (error) 1446 return error; 1447 1448 return 0; 1449 } 1450 1451 static int 1452 et_init_tx_ring(struct et_softc *sc) 1453 { 1454 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1455 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1456 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1457 1458 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1459 1460 tbd->tbd_start_index = 0; 1461 tbd->tbd_start_wrap = 0; 1462 tbd->tbd_used = 0; 1463 1464 bzero(txsd->txsd_status, sizeof(uint32_t)); 1465 1466 return 0; 1467 } 1468 1469 static int 1470 et_init_rx_ring(struct et_softc *sc) 1471 { 1472 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1473 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1474 int n; 1475 1476 for (n = 0; n < ET_RX_NRING; ++n) { 1477 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1478 int i, error; 1479 1480 for (i = 0; i < ET_RX_NDESC; ++i) { 1481 error = rbd->rbd_newbuf(rbd, i, 1); 1482 if (error) { 1483 if_printf(&sc->arpcom.ac_if, "%d ring %d buf, " 1484 "newbuf failed: %d\n", n, i, error); 1485 return error; 1486 } 1487 } 1488 } 1489 1490 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1491 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1492 1493 return 0; 1494 } 1495 1496 static int 1497 et_init_rxdma(struct et_softc *sc) 1498 { 1499 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1500 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1501 struct et_rxdesc_ring *rx_ring; 1502 int error; 1503 1504 error = et_stop_rxdma(sc); 1505 if (error) { 1506 if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n"); 1507 return error; 1508 } 1509 1510 /* 1511 * Install RX status 1512 */ 1513 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1514 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1515 1516 /* 1517 * Install RX stat ring 1518 */ 1519 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1520 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1521 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1522 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1523 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1524 1525 /* Match ET_RXSTAT_POS */ 1526 rxst_ring->rsr_index = 0; 1527 rxst_ring->rsr_wrap = 0; 1528 1529 /* 1530 * Install the 2nd RX descriptor ring 1531 */ 1532 rx_ring = &sc->sc_rx_ring[1]; 1533 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1534 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1535 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1536 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1537 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1538 1539 /* Match ET_RX_RING1_POS */ 1540 rx_ring->rr_index = 0; 1541 rx_ring->rr_wrap = 1; 1542 1543 /* 1544 * Install the 1st RX descriptor ring 1545 */ 1546 rx_ring = &sc->sc_rx_ring[0]; 1547 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1548 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1549 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1550 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1551 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1552 1553 /* Match ET_RX_RING0_POS */ 1554 rx_ring->rr_index = 0; 1555 rx_ring->rr_wrap = 1; 1556 1557 /* 1558 * RX intr moderation 1559 */ 1560 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1561 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1562 1563 return 0; 1564 } 1565 1566 static int 1567 et_init_txdma(struct et_softc *sc) 1568 { 1569 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1570 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1571 int error; 1572 1573 error = et_stop_txdma(sc); 1574 if (error) { 1575 if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n"); 1576 return error; 1577 } 1578 1579 /* 1580 * Install TX descriptor ring 1581 */ 1582 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1583 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1584 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1585 1586 /* 1587 * Install TX status 1588 */ 1589 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1590 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1591 1592 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1593 1594 /* Match ET_TX_READY_POS */ 1595 tx_ring->tr_ready_index = 0; 1596 tx_ring->tr_ready_wrap = 0; 1597 1598 return 0; 1599 } 1600 1601 static void 1602 et_init_mac(struct et_softc *sc) 1603 { 1604 struct ifnet *ifp = &sc->arpcom.ac_if; 1605 const uint8_t *eaddr = IF_LLADDR(ifp); 1606 uint32_t val; 1607 1608 /* Reset MAC */ 1609 CSR_WRITE_4(sc, ET_MAC_CFG1, 1610 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1611 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1612 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1613 1614 /* 1615 * Setup inter packet gap 1616 */ 1617 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1618 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1619 __SHIFTIN(80, ET_IPG_MINIFG) | 1620 __SHIFTIN(96, ET_IPG_B2B); 1621 CSR_WRITE_4(sc, ET_IPG, val); 1622 1623 /* 1624 * Setup half duplex mode 1625 */ 1626 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1627 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1628 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1629 ET_MAC_HDX_EXC_DEFER; 1630 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1631 1632 /* Clear MAC control */ 1633 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1634 1635 /* Reset MII */ 1636 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1637 1638 /* 1639 * Set MAC address 1640 */ 1641 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1642 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1643 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1644 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1645 1646 /* Set max frame length */ 1647 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu)); 1648 1649 /* Bring MAC out of reset state */ 1650 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1651 } 1652 1653 static void 1654 et_init_rxmac(struct et_softc *sc) 1655 { 1656 struct ifnet *ifp = &sc->arpcom.ac_if; 1657 const uint8_t *eaddr = IF_LLADDR(ifp); 1658 uint32_t val; 1659 int i; 1660 1661 /* Disable RX MAC and WOL */ 1662 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1663 1664 /* 1665 * Clear all WOL related registers 1666 */ 1667 for (i = 0; i < 3; ++i) 1668 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1669 for (i = 0; i < 20; ++i) 1670 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1671 1672 /* 1673 * Set WOL source address. XXX is this necessary? 1674 */ 1675 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1676 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1677 val = (eaddr[0] << 8) | eaddr[1]; 1678 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1679 1680 /* Clear packet filters */ 1681 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1682 1683 /* No ucast filtering */ 1684 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1685 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1686 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1687 1688 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) { 1689 /* 1690 * In order to transmit jumbo packets greater than 1691 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between 1692 * RX MAC and RX DMA needs to be reduced in size to 1693 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In 1694 * order to implement this, we must use "cut through" 1695 * mode in the RX MAC, which chops packets down into 1696 * segments. In this case we selected 256 bytes, 1697 * since this is the size of the PCI-Express TLP's 1698 * that the ET1310 uses. 1699 */ 1700 val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) | 1701 ET_RXMAC_MC_SEGSZ_ENABLE; 1702 } else { 1703 val = 0; 1704 } 1705 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1706 1707 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1708 1709 /* Initialize RX MAC management register */ 1710 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1711 1712 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1713 1714 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1715 ET_RXMAC_MGT_PASS_ECRC | 1716 ET_RXMAC_MGT_PASS_ELEN | 1717 ET_RXMAC_MGT_PASS_ETRUNC | 1718 ET_RXMAC_MGT_CHECK_PKT); 1719 1720 /* 1721 * Configure runt filtering (may not work on certain chip generation) 1722 */ 1723 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1724 CSR_WRITE_4(sc, ET_PKTFILT, val); 1725 1726 /* Enable RX MAC but leave WOL disabled */ 1727 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1728 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1729 1730 /* 1731 * Setup multicast hash and allmulti/promisc mode 1732 */ 1733 et_setmulti(sc); 1734 } 1735 1736 static void 1737 et_init_txmac(struct et_softc *sc) 1738 { 1739 /* Disable TX MAC and FC(?) */ 1740 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1741 1742 /* No flow control yet */ 1743 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1744 1745 /* Enable TX MAC but leave FC(?) diabled */ 1746 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1747 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1748 } 1749 1750 static int 1751 et_start_rxdma(struct et_softc *sc) 1752 { 1753 uint32_t val = 0; 1754 1755 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1756 ET_RXDMA_CTRL_RING0_SIZE) | 1757 ET_RXDMA_CTRL_RING0_ENABLE; 1758 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1759 ET_RXDMA_CTRL_RING1_SIZE) | 1760 ET_RXDMA_CTRL_RING1_ENABLE; 1761 1762 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1763 1764 DELAY(5); 1765 1766 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1767 if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n"); 1768 return ETIMEDOUT; 1769 } 1770 return 0; 1771 } 1772 1773 static int 1774 et_start_txdma(struct et_softc *sc) 1775 { 1776 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1777 return 0; 1778 } 1779 1780 static int 1781 et_enable_txrx(struct et_softc *sc, int media_upd) 1782 { 1783 struct ifnet *ifp = &sc->arpcom.ac_if; 1784 uint32_t val; 1785 int i, error; 1786 1787 val = CSR_READ_4(sc, ET_MAC_CFG1); 1788 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1789 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1790 ET_MAC_CFG1_LOOPBACK); 1791 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1792 1793 if (media_upd) 1794 et_ifmedia_upd(ifp); 1795 else 1796 et_setmedia(sc); 1797 1798 #define NRETRY 100 1799 1800 for (i = 0; i < NRETRY; ++i) { 1801 val = CSR_READ_4(sc, ET_MAC_CFG1); 1802 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1803 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1804 break; 1805 1806 DELAY(10); 1807 } 1808 if (i == NRETRY) { 1809 if_printf(ifp, "can't enable RX/TX\n"); 1810 return 0; 1811 } 1812 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 1813 1814 #undef NRETRY 1815 1816 /* 1817 * Start TX/RX DMA engine 1818 */ 1819 error = et_start_rxdma(sc); 1820 if (error) 1821 return error; 1822 1823 error = et_start_txdma(sc); 1824 if (error) 1825 return error; 1826 1827 return 0; 1828 } 1829 1830 static void 1831 et_rxeof(struct et_softc *sc) 1832 { 1833 struct ifnet *ifp = &sc->arpcom.ac_if; 1834 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1835 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1836 uint32_t rxs_stat_ring; 1837 int rxst_wrap, rxst_index; 1838 1839 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1840 return; 1841 1842 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1843 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1844 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1845 1846 while (rxst_index != rxst_ring->rsr_index || 1847 rxst_wrap != rxst_ring->rsr_wrap) { 1848 struct et_rxbuf_data *rbd; 1849 struct et_rxdesc_ring *rx_ring; 1850 struct et_rxstat *st; 1851 struct mbuf *m; 1852 int buflen, buf_idx, ring_idx; 1853 uint32_t rxstat_pos, rxring_pos; 1854 1855 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1856 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1857 1858 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1859 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1860 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1861 1862 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1863 rxst_ring->rsr_index = 0; 1864 rxst_ring->rsr_wrap ^= 1; 1865 } 1866 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1867 ET_RXSTAT_POS_INDEX); 1868 if (rxst_ring->rsr_wrap) 1869 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1870 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1871 1872 if (ring_idx >= ET_RX_NRING) { 1873 IFNET_STAT_INC(ifp, ierrors, 1); 1874 if_printf(ifp, "invalid ring index %d\n", ring_idx); 1875 continue; 1876 } 1877 if (buf_idx >= ET_RX_NDESC) { 1878 IFNET_STAT_INC(ifp, ierrors, 1); 1879 if_printf(ifp, "invalid buf index %d\n", buf_idx); 1880 continue; 1881 } 1882 1883 rbd = &sc->sc_rx_data[ring_idx]; 1884 m = rbd->rbd_buf[buf_idx].rb_mbuf; 1885 1886 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1887 if (buflen < ETHER_CRC_LEN) { 1888 m_freem(m); 1889 IFNET_STAT_INC(ifp, ierrors, 1); 1890 } else { 1891 m->m_pkthdr.len = m->m_len = buflen; 1892 m->m_pkthdr.rcvif = ifp; 1893 1894 m_adj(m, -ETHER_CRC_LEN); 1895 1896 IFNET_STAT_INC(ifp, ipackets, 1); 1897 ifp->if_input(ifp, m, NULL, -1); 1898 } 1899 } else { 1900 IFNET_STAT_INC(ifp, ierrors, 1); 1901 } 1902 m = NULL; /* Catch invalid reference */ 1903 1904 rx_ring = &sc->sc_rx_ring[ring_idx]; 1905 1906 if (buf_idx != rx_ring->rr_index) { 1907 if_printf(ifp, "WARNING!! ring %d, " 1908 "buf_idx %d, rr_idx %d\n", 1909 ring_idx, buf_idx, rx_ring->rr_index); 1910 } 1911 1912 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1913 if (++rx_ring->rr_index == ET_RX_NDESC) { 1914 rx_ring->rr_index = 0; 1915 rx_ring->rr_wrap ^= 1; 1916 } 1917 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1918 if (rx_ring->rr_wrap) 1919 rxring_pos |= ET_RX_RING_POS_WRAP; 1920 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1921 } 1922 } 1923 1924 static int 1925 et_encap(struct et_softc *sc, struct mbuf **m0) 1926 { 1927 bus_dma_segment_t segs[ET_NSEG_MAX]; 1928 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1929 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1930 struct et_txdesc *td; 1931 bus_dmamap_t map; 1932 int error, maxsegs, nsegs, first_idx, last_idx, i; 1933 uint32_t tx_ready_pos, last_td_ctrl2; 1934 1935 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1936 if (maxsegs > ET_NSEG_MAX) 1937 maxsegs = ET_NSEG_MAX; 1938 KASSERT(maxsegs >= ET_NSEG_SPARE, 1939 ("not enough spare TX desc (%d)", maxsegs)); 1940 1941 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1942 first_idx = tx_ring->tr_ready_index; 1943 map = tbd->tbd_buf[first_idx].tb_dmap; 1944 1945 error = bus_dmamap_load_mbuf_defrag(sc->sc_txbuf_dtag, map, m0, 1946 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1947 if (error) 1948 goto back; 1949 bus_dmamap_sync(sc->sc_txbuf_dtag, map, BUS_DMASYNC_PREWRITE); 1950 1951 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1952 sc->sc_tx += nsegs; 1953 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1954 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1955 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1956 } 1957 1958 last_idx = -1; 1959 for (i = 0; i < nsegs; ++i) { 1960 int idx; 1961 1962 idx = (first_idx + i) % ET_TX_NDESC; 1963 td = &tx_ring->tr_desc[idx]; 1964 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr); 1965 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr); 1966 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN); 1967 1968 if (i == nsegs - 1) { /* Last frag */ 1969 td->td_ctrl2 = last_td_ctrl2; 1970 last_idx = idx; 1971 } 1972 1973 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1974 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1975 tx_ring->tr_ready_index = 0; 1976 tx_ring->tr_ready_wrap ^= 1; 1977 } 1978 } 1979 td = &tx_ring->tr_desc[first_idx]; 1980 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1981 1982 KKASSERT(last_idx >= 0); 1983 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1984 tbd->tbd_buf[last_idx].tb_dmap = map; 1985 tbd->tbd_buf[last_idx].tb_mbuf = *m0; 1986 1987 tbd->tbd_used += nsegs; 1988 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1989 1990 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1991 ET_TX_READY_POS_INDEX); 1992 if (tx_ring->tr_ready_wrap) 1993 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1994 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1995 1996 error = 0; 1997 back: 1998 if (error) { 1999 m_freem(*m0); 2000 *m0 = NULL; 2001 } 2002 return error; 2003 } 2004 2005 static void 2006 et_txeof(struct et_softc *sc, int start) 2007 { 2008 struct ifnet *ifp = &sc->arpcom.ac_if; 2009 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 2010 struct et_txbuf_data *tbd = &sc->sc_tx_data; 2011 uint32_t tx_done; 2012 int end, wrap; 2013 2014 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2015 return; 2016 2017 if (tbd->tbd_used == 0) 2018 return; 2019 2020 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2021 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 2022 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2023 2024 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2025 struct et_txbuf *tb; 2026 2027 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 2028 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2029 2030 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2031 sizeof(struct et_txdesc)); 2032 2033 if (tb->tb_mbuf != NULL) { 2034 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap); 2035 m_freem(tb->tb_mbuf); 2036 tb->tb_mbuf = NULL; 2037 IFNET_STAT_INC(ifp, opackets, 1); 2038 } 2039 2040 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2041 tbd->tbd_start_index = 0; 2042 tbd->tbd_start_wrap ^= 1; 2043 } 2044 2045 KKASSERT(tbd->tbd_used > 0); 2046 tbd->tbd_used--; 2047 } 2048 2049 if (tbd->tbd_used == 0) 2050 ifp->if_timer = 0; 2051 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2052 ifq_clr_oactive(&ifp->if_snd); 2053 2054 if (start) 2055 if_devstart(ifp); 2056 } 2057 2058 static void 2059 et_tick(void *xsc) 2060 { 2061 struct et_softc *sc = xsc; 2062 struct ifnet *ifp = &sc->arpcom.ac_if; 2063 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2064 2065 lwkt_serialize_enter(ifp->if_serializer); 2066 2067 mii_tick(mii); 2068 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 && 2069 (mii->mii_media_status & IFM_ACTIVE) && 2070 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2071 if_printf(ifp, "Link up, enable TX/RX\n"); 2072 if (et_enable_txrx(sc, 0) == 0) 2073 if_devstart(ifp); 2074 } 2075 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2076 2077 lwkt_serialize_exit(ifp->if_serializer); 2078 } 2079 2080 static int 2081 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2082 { 2083 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2084 } 2085 2086 static int 2087 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2088 { 2089 return et_newbuf(rbd, buf_idx, init, MHLEN); 2090 } 2091 2092 static int 2093 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2094 { 2095 struct et_softc *sc = rbd->rbd_softc; 2096 struct et_rxbuf *rb; 2097 struct mbuf *m; 2098 bus_dma_segment_t seg; 2099 bus_dmamap_t dmap; 2100 int error, len, nseg; 2101 2102 KASSERT(!rbd->rbd_jumbo, ("calling %s with jumbo ring", __func__)); 2103 2104 KKASSERT(buf_idx < ET_RX_NDESC); 2105 rb = &rbd->rbd_buf[buf_idx]; 2106 2107 m = m_getl(len0, init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR, &len); 2108 if (m == NULL) { 2109 error = ENOBUFS; 2110 2111 if (init) { 2112 if_printf(&sc->arpcom.ac_if, 2113 "m_getl failed, size %d\n", len0); 2114 return error; 2115 } else { 2116 goto back; 2117 } 2118 } 2119 m->m_len = m->m_pkthdr.len = len; 2120 2121 /* 2122 * Try load RX mbuf into temporary DMA tag 2123 */ 2124 error = bus_dmamap_load_mbuf_segment(sc->sc_rxbuf_dtag, 2125 sc->sc_rxbuf_tmp_dmap, m, &seg, 1, &nseg, 2126 BUS_DMA_NOWAIT); 2127 if (error) { 2128 m_freem(m); 2129 if (init) { 2130 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2131 return error; 2132 } else { 2133 goto back; 2134 } 2135 } 2136 2137 if (!init) { 2138 bus_dmamap_sync(sc->sc_rxbuf_dtag, rb->rb_dmap, 2139 BUS_DMASYNC_POSTREAD); 2140 bus_dmamap_unload(sc->sc_rxbuf_dtag, rb->rb_dmap); 2141 } 2142 rb->rb_mbuf = m; 2143 rb->rb_paddr = seg.ds_addr; 2144 2145 /* 2146 * Swap RX buf's DMA map with the loaded temporary one 2147 */ 2148 dmap = rb->rb_dmap; 2149 rb->rb_dmap = sc->sc_rxbuf_tmp_dmap; 2150 sc->sc_rxbuf_tmp_dmap = dmap; 2151 2152 error = 0; 2153 back: 2154 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2155 return error; 2156 } 2157 2158 static int 2159 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2160 { 2161 struct et_softc *sc = arg1; 2162 struct ifnet *ifp = &sc->arpcom.ac_if; 2163 int error = 0, v; 2164 2165 lwkt_serialize_enter(ifp->if_serializer); 2166 2167 v = sc->sc_rx_intr_npkts; 2168 error = sysctl_handle_int(oidp, &v, 0, req); 2169 if (error || req->newptr == NULL) 2170 goto back; 2171 if (v <= 0) { 2172 error = EINVAL; 2173 goto back; 2174 } 2175 2176 if (sc->sc_rx_intr_npkts != v) { 2177 if (ifp->if_flags & IFF_RUNNING) 2178 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2179 sc->sc_rx_intr_npkts = v; 2180 } 2181 back: 2182 lwkt_serialize_exit(ifp->if_serializer); 2183 return error; 2184 } 2185 2186 static int 2187 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2188 { 2189 struct et_softc *sc = arg1; 2190 struct ifnet *ifp = &sc->arpcom.ac_if; 2191 int error = 0, v; 2192 2193 lwkt_serialize_enter(ifp->if_serializer); 2194 2195 v = sc->sc_rx_intr_delay; 2196 error = sysctl_handle_int(oidp, &v, 0, req); 2197 if (error || req->newptr == NULL) 2198 goto back; 2199 if (v <= 0) { 2200 error = EINVAL; 2201 goto back; 2202 } 2203 2204 if (sc->sc_rx_intr_delay != v) { 2205 if (ifp->if_flags & IFF_RUNNING) 2206 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2207 sc->sc_rx_intr_delay = v; 2208 } 2209 back: 2210 lwkt_serialize_exit(ifp->if_serializer); 2211 return error; 2212 } 2213 2214 static void 2215 et_setmedia(struct et_softc *sc) 2216 { 2217 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2218 uint32_t cfg2, ctrl; 2219 2220 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 2221 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 2222 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 2223 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 2224 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 2225 2226 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 2227 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 2228 2229 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 2230 cfg2 |= ET_MAC_CFG2_MODE_GMII; 2231 } else { 2232 cfg2 |= ET_MAC_CFG2_MODE_MII; 2233 ctrl |= ET_MAC_CTRL_MODE_MII; 2234 } 2235 2236 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 2237 cfg2 |= ET_MAC_CFG2_FDX; 2238 else 2239 ctrl |= ET_MAC_CTRL_GHDX; 2240 2241 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 2242 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 2243 } 2244 2245 static int 2246 et_jumbo_mem_alloc(device_t dev) 2247 { 2248 struct et_softc *sc = device_get_softc(dev); 2249 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2250 bus_addr_t paddr; 2251 uint8_t *buf; 2252 int i; 2253 2254 jd->jd_buf = bus_dmamem_coherent_any(sc->sc_dtag, 2255 ET_JUMBO_ALIGN, ET_JUMBO_MEM_SIZE, BUS_DMA_WAITOK, 2256 &jd->jd_dtag, &jd->jd_dmap, &paddr); 2257 if (jd->jd_buf == NULL) { 2258 device_printf(dev, "can't create jumbo DMA stuffs\n"); 2259 return ENOMEM; 2260 } 2261 2262 jd->jd_slots = kmalloc(sizeof(*jd->jd_slots) * ET_JSLOTS, M_DEVBUF, 2263 M_WAITOK | M_ZERO); 2264 lwkt_serialize_init(&jd->jd_serializer); 2265 SLIST_INIT(&jd->jd_free_slots); 2266 2267 buf = jd->jd_buf; 2268 for (i = 0; i < ET_JSLOTS; ++i) { 2269 struct et_jslot *jslot = &jd->jd_slots[i]; 2270 2271 jslot->jslot_data = jd; 2272 jslot->jslot_buf = buf; 2273 jslot->jslot_paddr = paddr; 2274 jslot->jslot_inuse = 0; 2275 jslot->jslot_index = i; 2276 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, jslot_link); 2277 2278 buf += ET_JLEN; 2279 paddr += ET_JLEN; 2280 } 2281 return 0; 2282 } 2283 2284 static void 2285 et_jumbo_mem_free(device_t dev) 2286 { 2287 struct et_softc *sc = device_get_softc(dev); 2288 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2289 2290 KKASSERT(sc->sc_flags & ET_FLAG_JUMBO); 2291 2292 kfree(jd->jd_slots, M_DEVBUF); 2293 et_dma_mem_destroy(jd->jd_dtag, jd->jd_buf, jd->jd_dmap); 2294 } 2295 2296 static struct et_jslot * 2297 et_jalloc(struct et_jumbo_data *jd) 2298 { 2299 struct et_jslot *jslot; 2300 2301 lwkt_serialize_enter(&jd->jd_serializer); 2302 2303 jslot = SLIST_FIRST(&jd->jd_free_slots); 2304 if (jslot) { 2305 SLIST_REMOVE_HEAD(&jd->jd_free_slots, jslot_link); 2306 jslot->jslot_inuse = 1; 2307 } 2308 2309 lwkt_serialize_exit(&jd->jd_serializer); 2310 return jslot; 2311 } 2312 2313 static void 2314 et_jfree(void *xjslot) 2315 { 2316 struct et_jslot *jslot = xjslot; 2317 struct et_jumbo_data *jd = jslot->jslot_data; 2318 2319 if (&jd->jd_slots[jslot->jslot_index] != jslot) { 2320 panic("%s wrong jslot!?", __func__); 2321 } else if (jslot->jslot_inuse == 0) { 2322 panic("%s jslot already freed", __func__); 2323 } else { 2324 lwkt_serialize_enter(&jd->jd_serializer); 2325 2326 atomic_subtract_int(&jslot->jslot_inuse, 1); 2327 if (jslot->jslot_inuse == 0) { 2328 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, 2329 jslot_link); 2330 } 2331 2332 lwkt_serialize_exit(&jd->jd_serializer); 2333 } 2334 } 2335 2336 static void 2337 et_jref(void *xjslot) 2338 { 2339 struct et_jslot *jslot = xjslot; 2340 struct et_jumbo_data *jd = jslot->jslot_data; 2341 2342 if (&jd->jd_slots[jslot->jslot_index] != jslot) 2343 panic("%s wrong jslot!?", __func__); 2344 else if (jslot->jslot_inuse == 0) 2345 panic("%s jslot already freed", __func__); 2346 else 2347 atomic_add_int(&jslot->jslot_inuse, 1); 2348 } 2349 2350 static int 2351 et_newbuf_jumbo(struct et_rxbuf_data *rbd, int buf_idx, int init) 2352 { 2353 struct et_softc *sc = rbd->rbd_softc; 2354 struct et_rxbuf *rb; 2355 struct mbuf *m; 2356 struct et_jslot *jslot; 2357 int error; 2358 2359 KASSERT(rbd->rbd_jumbo, ("calling %s with non-jumbo ring", __func__)); 2360 2361 KKASSERT(buf_idx < ET_RX_NDESC); 2362 rb = &rbd->rbd_buf[buf_idx]; 2363 2364 error = ENOBUFS; 2365 2366 MGETHDR(m, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2367 if (m == NULL) { 2368 if (init) { 2369 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n"); 2370 return error; 2371 } else { 2372 goto back; 2373 } 2374 } 2375 2376 jslot = et_jalloc(&sc->sc_jumbo_data); 2377 if (jslot == NULL) { 2378 m_freem(m); 2379 2380 if (init) { 2381 if_printf(&sc->arpcom.ac_if, 2382 "jslot allocation failed\n"); 2383 return error; 2384 } else { 2385 goto back; 2386 } 2387 } 2388 2389 m->m_ext.ext_arg = jslot; 2390 m->m_ext.ext_buf = jslot->jslot_buf; 2391 m->m_ext.ext_free = et_jfree; 2392 m->m_ext.ext_ref = et_jref; 2393 m->m_ext.ext_size = ET_JUMBO_FRAMELEN; 2394 m->m_flags |= M_EXT; 2395 m->m_data = m->m_ext.ext_buf; 2396 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2397 2398 rb->rb_mbuf = m; 2399 rb->rb_paddr = jslot->jslot_paddr; 2400 2401 error = 0; 2402 back: 2403 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2404 return error; 2405 } 2406 2407 static void 2408 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr) 2409 { 2410 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring; 2411 struct et_rxdesc *desc; 2412 2413 KKASSERT(buf_idx < ET_RX_NDESC); 2414 desc = &rx_ring->rr_desc[buf_idx]; 2415 2416 desc->rd_addr_hi = ET_ADDR_HI(paddr); 2417 desc->rd_addr_lo = ET_ADDR_LO(paddr); 2418 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2419 } 2420