1 /*- 2 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bus.h> 36 #include <sys/endian.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/rman.h> 44 #include <sys/socket.h> 45 #include <sys/sockio.h> 46 #include <sys/sysctl.h> 47 48 #include <net/bpf.h> 49 #include <net/if.h> 50 #include <net/if_var.h> 51 #include <net/if_arp.h> 52 #include <net/ethernet.h> 53 #include <net/if_dl.h> 54 #include <net/if_llc.h> 55 #include <net/if_media.h> 56 #include <net/if_types.h> 57 #include <net/if_vlan_var.h> 58 59 #include <netinet/in.h> 60 #include <netinet/in_systm.h> 61 62 #include <dev/mii/mii.h> 63 #include <dev/mii/miivar.h> 64 65 #include <dev/pci/pcireg.h> 66 #include <dev/pci/pcivar.h> 67 68 #include <machine/bus.h> 69 70 #include <dev/vte/if_vtereg.h> 71 #include <dev/vte/if_vtevar.h> 72 73 /* "device miibus" required. See GENERIC if you get errors here. */ 74 #include "miibus_if.h" 75 76 MODULE_DEPEND(vte, pci, 1, 1, 1); 77 MODULE_DEPEND(vte, ether, 1, 1, 1); 78 MODULE_DEPEND(vte, miibus, 1, 1, 1); 79 80 /* Tunables. */ 81 static int tx_deep_copy = 1; 82 TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy); 83 84 /* 85 * Devices supported by this driver. 86 */ 87 static const struct vte_ident vte_ident_table[] = { 88 { VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"}, 89 { 0, 0, NULL} 90 }; 91 92 static int vte_attach(device_t); 93 static int vte_detach(device_t); 94 static int vte_dma_alloc(struct vte_softc *); 95 static void vte_dma_free(struct vte_softc *); 96 static void vte_dmamap_cb(void *, bus_dma_segment_t *, int, int); 97 static struct vte_txdesc * 98 vte_encap(struct vte_softc *, struct mbuf **); 99 static const struct vte_ident * 100 vte_find_ident(device_t); 101 #ifndef __NO_STRICT_ALIGNMENT 102 static struct mbuf * 103 vte_fixup_rx(struct ifnet *, struct mbuf *); 104 #endif 105 static void vte_get_macaddr(struct vte_softc *); 106 static void vte_init(void *); 107 static void vte_init_locked(struct vte_softc *); 108 static int vte_init_rx_ring(struct vte_softc *); 109 static int vte_init_tx_ring(struct vte_softc *); 110 static void vte_intr(void *); 111 static int vte_ioctl(struct ifnet *, u_long, caddr_t); 112 static void vte_mac_config(struct vte_softc *); 113 static int vte_miibus_readreg(device_t, int, int); 114 static void vte_miibus_statchg(device_t); 115 static int vte_miibus_writereg(device_t, int, int, int); 116 static int vte_mediachange(struct ifnet *); 117 static int vte_mediachange_locked(struct ifnet *); 118 static void vte_mediastatus(struct ifnet *, struct ifmediareq *); 119 static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *); 120 static int vte_probe(device_t); 121 static void vte_reset(struct vte_softc *); 122 static int vte_resume(device_t); 123 static void vte_rxeof(struct vte_softc *); 124 static void vte_rxfilter(struct vte_softc *); 125 static int vte_shutdown(device_t); 126 static void vte_start(struct ifnet *); 127 static void vte_start_locked(struct vte_softc *); 128 static void vte_start_mac(struct vte_softc *); 129 static void vte_stats_clear(struct vte_softc *); 130 static void vte_stats_update(struct vte_softc *); 131 static void vte_stop(struct vte_softc *); 132 static void vte_stop_mac(struct vte_softc *); 133 static int vte_suspend(device_t); 134 static void vte_sysctl_node(struct vte_softc *); 135 static void vte_tick(void *); 136 static void vte_txeof(struct vte_softc *); 137 static void vte_watchdog(struct vte_softc *); 138 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 139 static int sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS); 140 141 static device_method_t vte_methods[] = { 142 /* Device interface. */ 143 DEVMETHOD(device_probe, vte_probe), 144 DEVMETHOD(device_attach, vte_attach), 145 DEVMETHOD(device_detach, vte_detach), 146 DEVMETHOD(device_shutdown, vte_shutdown), 147 DEVMETHOD(device_suspend, vte_suspend), 148 DEVMETHOD(device_resume, vte_resume), 149 150 /* MII interface. */ 151 DEVMETHOD(miibus_readreg, vte_miibus_readreg), 152 DEVMETHOD(miibus_writereg, vte_miibus_writereg), 153 DEVMETHOD(miibus_statchg, vte_miibus_statchg), 154 155 DEVMETHOD_END 156 }; 157 158 static driver_t vte_driver = { 159 "vte", 160 vte_methods, 161 sizeof(struct vte_softc) 162 }; 163 164 static devclass_t vte_devclass; 165 166 DRIVER_MODULE(vte, pci, vte_driver, vte_devclass, 0, 0); 167 DRIVER_MODULE(miibus, vte, miibus_driver, miibus_devclass, 0, 0); 168 169 static int 170 vte_miibus_readreg(device_t dev, int phy, int reg) 171 { 172 struct vte_softc *sc; 173 int i; 174 175 sc = device_get_softc(dev); 176 177 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ | 178 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 179 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 180 DELAY(5); 181 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0) 182 break; 183 } 184 185 if (i == 0) { 186 device_printf(sc->vte_dev, "phy read timeout : %d\n", reg); 187 return (0); 188 } 189 190 return (CSR_READ_2(sc, VTE_MMRD)); 191 } 192 193 static int 194 vte_miibus_writereg(device_t dev, int phy, int reg, int val) 195 { 196 struct vte_softc *sc; 197 int i; 198 199 sc = device_get_softc(dev); 200 201 CSR_WRITE_2(sc, VTE_MMWD, val); 202 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE | 203 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 204 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 205 DELAY(5); 206 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0) 207 break; 208 } 209 210 if (i == 0) 211 device_printf(sc->vte_dev, "phy write timeout : %d\n", reg); 212 213 return (0); 214 } 215 216 static void 217 vte_miibus_statchg(device_t dev) 218 { 219 struct vte_softc *sc; 220 struct mii_data *mii; 221 struct ifnet *ifp; 222 uint16_t val; 223 224 sc = device_get_softc(dev); 225 226 mii = device_get_softc(sc->vte_miibus); 227 ifp = sc->vte_ifp; 228 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 229 return; 230 231 sc->vte_flags &= ~VTE_FLAG_LINK; 232 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 233 (IFM_ACTIVE | IFM_AVALID)) { 234 switch (IFM_SUBTYPE(mii->mii_media_active)) { 235 case IFM_10_T: 236 case IFM_100_TX: 237 sc->vte_flags |= VTE_FLAG_LINK; 238 break; 239 default: 240 break; 241 } 242 } 243 244 /* Stop RX/TX MACs. */ 245 vte_stop_mac(sc); 246 /* Program MACs with resolved duplex and flow control. */ 247 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) { 248 /* 249 * Timer waiting time : (63 + TIMER * 64) MII clock. 250 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps). 251 */ 252 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 253 val = 18 << VTE_IM_TIMER_SHIFT; 254 else 255 val = 1 << VTE_IM_TIMER_SHIFT; 256 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT; 257 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 258 CSR_WRITE_2(sc, VTE_MRICR, val); 259 260 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 261 val = 18 << VTE_IM_TIMER_SHIFT; 262 else 263 val = 1 << VTE_IM_TIMER_SHIFT; 264 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT; 265 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 266 CSR_WRITE_2(sc, VTE_MTICR, val); 267 268 vte_mac_config(sc); 269 vte_start_mac(sc); 270 } 271 } 272 273 static void 274 vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 275 { 276 struct vte_softc *sc; 277 struct mii_data *mii; 278 279 sc = ifp->if_softc; 280 VTE_LOCK(sc); 281 if ((ifp->if_flags & IFF_UP) == 0) { 282 VTE_UNLOCK(sc); 283 return; 284 } 285 mii = device_get_softc(sc->vte_miibus); 286 287 mii_pollstat(mii); 288 ifmr->ifm_status = mii->mii_media_status; 289 ifmr->ifm_active = mii->mii_media_active; 290 VTE_UNLOCK(sc); 291 } 292 293 static int 294 vte_mediachange(struct ifnet *ifp) 295 { 296 struct vte_softc *sc; 297 int error; 298 299 sc = ifp->if_softc; 300 VTE_LOCK(sc); 301 error = vte_mediachange_locked(ifp); 302 VTE_UNLOCK(sc); 303 return (error); 304 } 305 306 static int 307 vte_mediachange_locked(struct ifnet *ifp) 308 { 309 struct vte_softc *sc; 310 struct mii_data *mii; 311 struct mii_softc *miisc; 312 int error; 313 314 sc = ifp->if_softc; 315 mii = device_get_softc(sc->vte_miibus); 316 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 317 PHY_RESET(miisc); 318 error = mii_mediachg(mii); 319 320 return (error); 321 } 322 323 static const struct vte_ident * 324 vte_find_ident(device_t dev) 325 { 326 const struct vte_ident *ident; 327 uint16_t vendor, devid; 328 329 vendor = pci_get_vendor(dev); 330 devid = pci_get_device(dev); 331 for (ident = vte_ident_table; ident->name != NULL; ident++) { 332 if (vendor == ident->vendorid && devid == ident->deviceid) 333 return (ident); 334 } 335 336 return (NULL); 337 } 338 339 static int 340 vte_probe(device_t dev) 341 { 342 const struct vte_ident *ident; 343 344 ident = vte_find_ident(dev); 345 if (ident != NULL) { 346 device_set_desc(dev, ident->name); 347 return (BUS_PROBE_DEFAULT); 348 } 349 350 return (ENXIO); 351 } 352 353 static void 354 vte_get_macaddr(struct vte_softc *sc) 355 { 356 uint16_t mid; 357 358 /* 359 * It seems there is no way to reload station address and 360 * it is supposed to be set by BIOS. 361 */ 362 mid = CSR_READ_2(sc, VTE_MID0L); 363 sc->vte_eaddr[0] = (mid >> 0) & 0xFF; 364 sc->vte_eaddr[1] = (mid >> 8) & 0xFF; 365 mid = CSR_READ_2(sc, VTE_MID0M); 366 sc->vte_eaddr[2] = (mid >> 0) & 0xFF; 367 sc->vte_eaddr[3] = (mid >> 8) & 0xFF; 368 mid = CSR_READ_2(sc, VTE_MID0H); 369 sc->vte_eaddr[4] = (mid >> 0) & 0xFF; 370 sc->vte_eaddr[5] = (mid >> 8) & 0xFF; 371 } 372 373 static int 374 vte_attach(device_t dev) 375 { 376 struct vte_softc *sc; 377 struct ifnet *ifp; 378 uint16_t macid; 379 int error, rid; 380 381 error = 0; 382 sc = device_get_softc(dev); 383 sc->vte_dev = dev; 384 385 mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 386 MTX_DEF); 387 callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0); 388 sc->vte_ident = vte_find_ident(dev); 389 390 /* Map the device. */ 391 pci_enable_busmaster(dev); 392 sc->vte_res_id = PCIR_BAR(1); 393 sc->vte_res_type = SYS_RES_MEMORY; 394 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 395 &sc->vte_res_id, RF_ACTIVE); 396 if (sc->vte_res == NULL) { 397 sc->vte_res_id = PCIR_BAR(0); 398 sc->vte_res_type = SYS_RES_IOPORT; 399 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 400 &sc->vte_res_id, RF_ACTIVE); 401 if (sc->vte_res == NULL) { 402 device_printf(dev, "cannot map memory/ports.\n"); 403 mtx_destroy(&sc->vte_mtx); 404 return (ENXIO); 405 } 406 } 407 if (bootverbose) { 408 device_printf(dev, "using %s space register mapping\n", 409 sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 410 device_printf(dev, "MAC Identifier : 0x%04x\n", 411 CSR_READ_2(sc, VTE_MACID)); 412 macid = CSR_READ_2(sc, VTE_MACID_REV); 413 device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n", 414 (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT, 415 (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT); 416 } 417 418 rid = 0; 419 sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 420 RF_SHAREABLE | RF_ACTIVE); 421 if (sc->vte_irq == NULL) { 422 device_printf(dev, "cannot allocate IRQ resources.\n"); 423 error = ENXIO; 424 goto fail; 425 } 426 427 /* Reset the ethernet controller. */ 428 vte_reset(sc); 429 430 if ((error = vte_dma_alloc(sc) != 0)) 431 goto fail; 432 433 /* Create device sysctl node. */ 434 vte_sysctl_node(sc); 435 436 /* Load station address. */ 437 vte_get_macaddr(sc); 438 439 ifp = sc->vte_ifp = if_alloc(IFT_ETHER); 440 if (ifp == NULL) { 441 device_printf(dev, "cannot allocate ifnet structure.\n"); 442 error = ENXIO; 443 goto fail; 444 } 445 446 ifp->if_softc = sc; 447 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 448 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 449 ifp->if_ioctl = vte_ioctl; 450 ifp->if_start = vte_start; 451 ifp->if_init = vte_init; 452 ifp->if_snd.ifq_drv_maxlen = VTE_TX_RING_CNT - 1; 453 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 454 IFQ_SET_READY(&ifp->if_snd); 455 456 /* 457 * Set up MII bus. 458 * BIOS would have initialized VTE_MPSCCR to catch PHY 459 * status changes so driver may be able to extract 460 * configured PHY address. Since it's common to see BIOS 461 * fails to initialize the register(including the sample 462 * board I have), let mii(4) probe it. This is more 463 * reliable than relying on BIOS's initialization. 464 * 465 * Advertising flow control capability to mii(4) was 466 * intentionally disabled due to severe problems in TX 467 * pause frame generation. See vte_rxeof() for more 468 * details. 469 */ 470 error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange, 471 vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 472 if (error != 0) { 473 device_printf(dev, "attaching PHYs failed\n"); 474 goto fail; 475 } 476 477 ether_ifattach(ifp, sc->vte_eaddr); 478 479 /* VLAN capability setup. */ 480 ifp->if_capabilities |= IFCAP_VLAN_MTU; 481 ifp->if_capenable = ifp->if_capabilities; 482 /* Tell the upper layer we support VLAN over-sized frames. */ 483 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 484 485 error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE, 486 NULL, vte_intr, sc, &sc->vte_intrhand); 487 if (error != 0) { 488 device_printf(dev, "could not set up interrupt handler.\n"); 489 ether_ifdetach(ifp); 490 goto fail; 491 } 492 493 fail: 494 if (error != 0) 495 vte_detach(dev); 496 497 return (error); 498 } 499 500 static int 501 vte_detach(device_t dev) 502 { 503 struct vte_softc *sc; 504 struct ifnet *ifp; 505 506 sc = device_get_softc(dev); 507 508 ifp = sc->vte_ifp; 509 if (device_is_attached(dev)) { 510 VTE_LOCK(sc); 511 vte_stop(sc); 512 VTE_UNLOCK(sc); 513 callout_drain(&sc->vte_tick_ch); 514 ether_ifdetach(ifp); 515 } 516 517 if (sc->vte_miibus != NULL) { 518 device_delete_child(dev, sc->vte_miibus); 519 sc->vte_miibus = NULL; 520 } 521 bus_generic_detach(dev); 522 523 if (sc->vte_intrhand != NULL) { 524 bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand); 525 sc->vte_intrhand = NULL; 526 } 527 if (sc->vte_irq != NULL) { 528 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq); 529 sc->vte_irq = NULL; 530 } 531 if (sc->vte_res != NULL) { 532 bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id, 533 sc->vte_res); 534 sc->vte_res = NULL; 535 } 536 if (ifp != NULL) { 537 if_free(ifp); 538 sc->vte_ifp = NULL; 539 } 540 vte_dma_free(sc); 541 mtx_destroy(&sc->vte_mtx); 542 543 return (0); 544 } 545 546 #define VTE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 547 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 548 549 static void 550 vte_sysctl_node(struct vte_softc *sc) 551 { 552 struct sysctl_ctx_list *ctx; 553 struct sysctl_oid_list *child, *parent; 554 struct sysctl_oid *tree; 555 struct vte_hw_stats *stats; 556 int error; 557 558 stats = &sc->vte_stats; 559 ctx = device_get_sysctl_ctx(sc->vte_dev); 560 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev)); 561 562 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 563 CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_rx_mod, 0, 564 sysctl_hw_vte_int_mod, "I", "vte RX interrupt moderation"); 565 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 566 CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_tx_mod, 0, 567 sysctl_hw_vte_int_mod, "I", "vte TX interrupt moderation"); 568 /* Pull in device tunables. */ 569 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 570 error = resource_int_value(device_get_name(sc->vte_dev), 571 device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod); 572 if (error == 0) { 573 if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN || 574 sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) { 575 device_printf(sc->vte_dev, "int_rx_mod value out of " 576 "range; using default: %d\n", 577 VTE_IM_RX_BUNDLE_DEFAULT); 578 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 579 } 580 } 581 582 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 583 error = resource_int_value(device_get_name(sc->vte_dev), 584 device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod); 585 if (error == 0) { 586 if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN || 587 sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) { 588 device_printf(sc->vte_dev, "int_tx_mod value out of " 589 "range; using default: %d\n", 590 VTE_IM_TX_BUNDLE_DEFAULT); 591 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 592 } 593 } 594 595 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 596 NULL, "VTE statistics"); 597 parent = SYSCTL_CHILDREN(tree); 598 599 /* RX statistics. */ 600 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 601 NULL, "RX MAC statistics"); 602 child = SYSCTL_CHILDREN(tree); 603 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 604 &stats->rx_frames, "Good frames"); 605 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 606 &stats->rx_bcast_frames, "Good broadcast frames"); 607 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 608 &stats->rx_mcast_frames, "Good multicast frames"); 609 VTE_SYSCTL_STAT_ADD32(ctx, child, "runt", 610 &stats->rx_runts, "Too short frames"); 611 VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 612 &stats->rx_crcerrs, "CRC errors"); 613 VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames", 614 &stats->rx_long_frames, 615 "Frames that have longer length than maximum packet length"); 616 VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full", 617 &stats->rx_fifo_full, "FIFO full"); 618 VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail", 619 &stats->rx_desc_unavail, "Descriptor unavailable frames"); 620 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 621 &stats->rx_pause_frames, "Pause control frames"); 622 623 /* TX statistics. */ 624 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 625 NULL, "TX MAC statistics"); 626 child = SYSCTL_CHILDREN(tree); 627 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 628 &stats->tx_frames, "Good frames"); 629 VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns", 630 &stats->tx_underruns, "FIFO underruns"); 631 VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 632 &stats->tx_late_colls, "Late collisions"); 633 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 634 &stats->tx_pause_frames, "Pause control frames"); 635 } 636 637 #undef VTE_SYSCTL_STAT_ADD32 638 639 struct vte_dmamap_arg { 640 bus_addr_t vte_busaddr; 641 }; 642 643 static void 644 vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 645 { 646 struct vte_dmamap_arg *ctx; 647 648 if (error != 0) 649 return; 650 651 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 652 653 ctx = (struct vte_dmamap_arg *)arg; 654 ctx->vte_busaddr = segs[0].ds_addr; 655 } 656 657 static int 658 vte_dma_alloc(struct vte_softc *sc) 659 { 660 struct vte_txdesc *txd; 661 struct vte_rxdesc *rxd; 662 struct vte_dmamap_arg ctx; 663 int error, i; 664 665 /* Create parent DMA tag. */ 666 error = bus_dma_tag_create( 667 bus_get_dma_tag(sc->vte_dev), /* parent */ 668 1, 0, /* alignment, boundary */ 669 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 670 BUS_SPACE_MAXADDR, /* highaddr */ 671 NULL, NULL, /* filter, filterarg */ 672 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 673 0, /* nsegments */ 674 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 675 0, /* flags */ 676 NULL, NULL, /* lockfunc, lockarg */ 677 &sc->vte_cdata.vte_parent_tag); 678 if (error != 0) { 679 device_printf(sc->vte_dev, 680 "could not create parent DMA tag.\n"); 681 goto fail; 682 } 683 684 /* Create DMA tag for TX descriptor ring. */ 685 error = bus_dma_tag_create( 686 sc->vte_cdata.vte_parent_tag, /* parent */ 687 VTE_TX_RING_ALIGN, 0, /* alignment, boundary */ 688 BUS_SPACE_MAXADDR, /* lowaddr */ 689 BUS_SPACE_MAXADDR, /* highaddr */ 690 NULL, NULL, /* filter, filterarg */ 691 VTE_TX_RING_SZ, /* maxsize */ 692 1, /* nsegments */ 693 VTE_TX_RING_SZ, /* maxsegsize */ 694 0, /* flags */ 695 NULL, NULL, /* lockfunc, lockarg */ 696 &sc->vte_cdata.vte_tx_ring_tag); 697 if (error != 0) { 698 device_printf(sc->vte_dev, 699 "could not create TX ring DMA tag.\n"); 700 goto fail; 701 } 702 703 /* Create DMA tag for RX free descriptor ring. */ 704 error = bus_dma_tag_create( 705 sc->vte_cdata.vte_parent_tag, /* parent */ 706 VTE_RX_RING_ALIGN, 0, /* alignment, boundary */ 707 BUS_SPACE_MAXADDR, /* lowaddr */ 708 BUS_SPACE_MAXADDR, /* highaddr */ 709 NULL, NULL, /* filter, filterarg */ 710 VTE_RX_RING_SZ, /* maxsize */ 711 1, /* nsegments */ 712 VTE_RX_RING_SZ, /* maxsegsize */ 713 0, /* flags */ 714 NULL, NULL, /* lockfunc, lockarg */ 715 &sc->vte_cdata.vte_rx_ring_tag); 716 if (error != 0) { 717 device_printf(sc->vte_dev, 718 "could not create RX ring DMA tag.\n"); 719 goto fail; 720 } 721 722 /* Allocate DMA'able memory and load the DMA map for TX ring. */ 723 error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag, 724 (void **)&sc->vte_cdata.vte_tx_ring, 725 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 726 &sc->vte_cdata.vte_tx_ring_map); 727 if (error != 0) { 728 device_printf(sc->vte_dev, 729 "could not allocate DMA'able memory for TX ring.\n"); 730 goto fail; 731 } 732 ctx.vte_busaddr = 0; 733 error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag, 734 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring, 735 VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0); 736 if (error != 0 || ctx.vte_busaddr == 0) { 737 device_printf(sc->vte_dev, 738 "could not load DMA'able memory for TX ring.\n"); 739 goto fail; 740 } 741 sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr; 742 743 /* Allocate DMA'able memory and load the DMA map for RX ring. */ 744 error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag, 745 (void **)&sc->vte_cdata.vte_rx_ring, 746 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 747 &sc->vte_cdata.vte_rx_ring_map); 748 if (error != 0) { 749 device_printf(sc->vte_dev, 750 "could not allocate DMA'able memory for RX ring.\n"); 751 goto fail; 752 } 753 ctx.vte_busaddr = 0; 754 error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag, 755 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring, 756 VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0); 757 if (error != 0 || ctx.vte_busaddr == 0) { 758 device_printf(sc->vte_dev, 759 "could not load DMA'able memory for RX ring.\n"); 760 goto fail; 761 } 762 sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr; 763 764 /* Create TX buffer parent tag. */ 765 error = bus_dma_tag_create( 766 bus_get_dma_tag(sc->vte_dev), /* parent */ 767 1, 0, /* alignment, boundary */ 768 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 769 BUS_SPACE_MAXADDR, /* highaddr */ 770 NULL, NULL, /* filter, filterarg */ 771 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 772 0, /* nsegments */ 773 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 774 0, /* flags */ 775 NULL, NULL, /* lockfunc, lockarg */ 776 &sc->vte_cdata.vte_buffer_tag); 777 if (error != 0) { 778 device_printf(sc->vte_dev, 779 "could not create parent buffer DMA tag.\n"); 780 goto fail; 781 } 782 783 /* Create DMA tag for TX buffers. */ 784 error = bus_dma_tag_create( 785 sc->vte_cdata.vte_buffer_tag, /* parent */ 786 1, 0, /* alignment, boundary */ 787 BUS_SPACE_MAXADDR, /* lowaddr */ 788 BUS_SPACE_MAXADDR, /* highaddr */ 789 NULL, NULL, /* filter, filterarg */ 790 MCLBYTES, /* maxsize */ 791 1, /* nsegments */ 792 MCLBYTES, /* maxsegsize */ 793 0, /* flags */ 794 NULL, NULL, /* lockfunc, lockarg */ 795 &sc->vte_cdata.vte_tx_tag); 796 if (error != 0) { 797 device_printf(sc->vte_dev, "could not create TX DMA tag.\n"); 798 goto fail; 799 } 800 801 /* Create DMA tag for RX buffers. */ 802 error = bus_dma_tag_create( 803 sc->vte_cdata.vte_buffer_tag, /* parent */ 804 VTE_RX_BUF_ALIGN, 0, /* alignment, boundary */ 805 BUS_SPACE_MAXADDR, /* lowaddr */ 806 BUS_SPACE_MAXADDR, /* highaddr */ 807 NULL, NULL, /* filter, filterarg */ 808 MCLBYTES, /* maxsize */ 809 1, /* nsegments */ 810 MCLBYTES, /* maxsegsize */ 811 0, /* flags */ 812 NULL, NULL, /* lockfunc, lockarg */ 813 &sc->vte_cdata.vte_rx_tag); 814 if (error != 0) { 815 device_printf(sc->vte_dev, "could not create RX DMA tag.\n"); 816 goto fail; 817 } 818 /* Create DMA maps for TX buffers. */ 819 for (i = 0; i < VTE_TX_RING_CNT; i++) { 820 txd = &sc->vte_cdata.vte_txdesc[i]; 821 txd->tx_m = NULL; 822 txd->tx_dmamap = NULL; 823 error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0, 824 &txd->tx_dmamap); 825 if (error != 0) { 826 device_printf(sc->vte_dev, 827 "could not create TX dmamap.\n"); 828 goto fail; 829 } 830 } 831 /* Create DMA maps for RX buffers. */ 832 if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 833 &sc->vte_cdata.vte_rx_sparemap)) != 0) { 834 device_printf(sc->vte_dev, 835 "could not create spare RX dmamap.\n"); 836 goto fail; 837 } 838 for (i = 0; i < VTE_RX_RING_CNT; i++) { 839 rxd = &sc->vte_cdata.vte_rxdesc[i]; 840 rxd->rx_m = NULL; 841 rxd->rx_dmamap = NULL; 842 error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 843 &rxd->rx_dmamap); 844 if (error != 0) { 845 device_printf(sc->vte_dev, 846 "could not create RX dmamap.\n"); 847 goto fail; 848 } 849 } 850 851 fail: 852 return (error); 853 } 854 855 static void 856 vte_dma_free(struct vte_softc *sc) 857 { 858 struct vte_txdesc *txd; 859 struct vte_rxdesc *rxd; 860 int i; 861 862 /* TX buffers. */ 863 if (sc->vte_cdata.vte_tx_tag != NULL) { 864 for (i = 0; i < VTE_TX_RING_CNT; i++) { 865 txd = &sc->vte_cdata.vte_txdesc[i]; 866 if (txd->tx_dmamap != NULL) { 867 bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag, 868 txd->tx_dmamap); 869 txd->tx_dmamap = NULL; 870 } 871 } 872 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag); 873 sc->vte_cdata.vte_tx_tag = NULL; 874 } 875 /* RX buffers */ 876 if (sc->vte_cdata.vte_rx_tag != NULL) { 877 for (i = 0; i < VTE_RX_RING_CNT; i++) { 878 rxd = &sc->vte_cdata.vte_rxdesc[i]; 879 if (rxd->rx_dmamap != NULL) { 880 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 881 rxd->rx_dmamap); 882 rxd->rx_dmamap = NULL; 883 } 884 } 885 if (sc->vte_cdata.vte_rx_sparemap != NULL) { 886 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 887 sc->vte_cdata.vte_rx_sparemap); 888 sc->vte_cdata.vte_rx_sparemap = NULL; 889 } 890 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag); 891 sc->vte_cdata.vte_rx_tag = NULL; 892 } 893 /* TX descriptor ring. */ 894 if (sc->vte_cdata.vte_tx_ring_tag != NULL) { 895 if (sc->vte_cdata.vte_tx_ring_paddr != 0) 896 bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag, 897 sc->vte_cdata.vte_tx_ring_map); 898 if (sc->vte_cdata.vte_tx_ring != NULL) 899 bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag, 900 sc->vte_cdata.vte_tx_ring, 901 sc->vte_cdata.vte_tx_ring_map); 902 sc->vte_cdata.vte_tx_ring = NULL; 903 sc->vte_cdata.vte_tx_ring_paddr = 0; 904 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag); 905 sc->vte_cdata.vte_tx_ring_tag = NULL; 906 } 907 /* RX ring. */ 908 if (sc->vte_cdata.vte_rx_ring_tag != NULL) { 909 if (sc->vte_cdata.vte_rx_ring_paddr != 0) 910 bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag, 911 sc->vte_cdata.vte_rx_ring_map); 912 if (sc->vte_cdata.vte_rx_ring != NULL) 913 bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag, 914 sc->vte_cdata.vte_rx_ring, 915 sc->vte_cdata.vte_rx_ring_map); 916 sc->vte_cdata.vte_rx_ring = NULL; 917 sc->vte_cdata.vte_rx_ring_paddr = 0; 918 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag); 919 sc->vte_cdata.vte_rx_ring_tag = NULL; 920 } 921 if (sc->vte_cdata.vte_buffer_tag != NULL) { 922 bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag); 923 sc->vte_cdata.vte_buffer_tag = NULL; 924 } 925 if (sc->vte_cdata.vte_parent_tag != NULL) { 926 bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag); 927 sc->vte_cdata.vte_parent_tag = NULL; 928 } 929 } 930 931 static int 932 vte_shutdown(device_t dev) 933 { 934 935 return (vte_suspend(dev)); 936 } 937 938 static int 939 vte_suspend(device_t dev) 940 { 941 struct vte_softc *sc; 942 struct ifnet *ifp; 943 944 sc = device_get_softc(dev); 945 946 VTE_LOCK(sc); 947 ifp = sc->vte_ifp; 948 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 949 vte_stop(sc); 950 VTE_UNLOCK(sc); 951 952 return (0); 953 } 954 955 static int 956 vte_resume(device_t dev) 957 { 958 struct vte_softc *sc; 959 struct ifnet *ifp; 960 961 sc = device_get_softc(dev); 962 963 VTE_LOCK(sc); 964 ifp = sc->vte_ifp; 965 if ((ifp->if_flags & IFF_UP) != 0) { 966 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 967 vte_init_locked(sc); 968 } 969 VTE_UNLOCK(sc); 970 971 return (0); 972 } 973 974 static struct vte_txdesc * 975 vte_encap(struct vte_softc *sc, struct mbuf **m_head) 976 { 977 struct vte_txdesc *txd; 978 struct mbuf *m, *n; 979 bus_dma_segment_t txsegs[1]; 980 int copy, error, nsegs, padlen; 981 982 VTE_LOCK_ASSERT(sc); 983 984 M_ASSERTPKTHDR((*m_head)); 985 986 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; 987 m = *m_head; 988 /* 989 * Controller doesn't auto-pad, so we have to make sure pad 990 * short frames out to the minimum frame length. 991 */ 992 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) 993 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; 994 else 995 padlen = 0; 996 997 /* 998 * Controller does not support multi-fragmented TX buffers. 999 * Controller spends most of its TX processing time in 1000 * de-fragmenting TX buffers. Either faster CPU or more 1001 * advanced controller DMA engine is required to speed up 1002 * TX path processing. 1003 * To mitigate the de-fragmenting issue, perform deep copy 1004 * from fragmented mbuf chains to a pre-allocated mbuf 1005 * cluster with extra cost of kernel memory. For frames 1006 * that is composed of single TX buffer, the deep copy is 1007 * bypassed. 1008 */ 1009 if (tx_deep_copy != 0) { 1010 copy = 0; 1011 if (m->m_next != NULL) 1012 copy++; 1013 if (padlen > 0 && (M_WRITABLE(m) == 0 || 1014 padlen > M_TRAILINGSPACE(m))) 1015 copy++; 1016 if (copy != 0) { 1017 /* Avoid expensive m_defrag(9) and do deep copy. */ 1018 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; 1019 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); 1020 n->m_pkthdr.len = m->m_pkthdr.len; 1021 n->m_len = m->m_pkthdr.len; 1022 m = n; 1023 txd->tx_flags |= VTE_TXMBUF; 1024 } 1025 1026 if (padlen > 0) { 1027 /* Zero out the bytes in the pad area. */ 1028 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1029 m->m_pkthdr.len += padlen; 1030 m->m_len = m->m_pkthdr.len; 1031 } 1032 } else { 1033 if (M_WRITABLE(m) == 0) { 1034 if (m->m_next != NULL || padlen > 0) { 1035 /* Get a writable copy. */ 1036 m = m_dup(*m_head, M_NOWAIT); 1037 /* Release original mbuf chains. */ 1038 m_freem(*m_head); 1039 if (m == NULL) { 1040 *m_head = NULL; 1041 return (NULL); 1042 } 1043 *m_head = m; 1044 } 1045 } 1046 1047 if (m->m_next != NULL) { 1048 m = m_defrag(*m_head, M_NOWAIT); 1049 if (m == NULL) { 1050 m_freem(*m_head); 1051 *m_head = NULL; 1052 return (NULL); 1053 } 1054 *m_head = m; 1055 } 1056 1057 if (padlen > 0) { 1058 if (M_TRAILINGSPACE(m) < padlen) { 1059 m = m_defrag(*m_head, M_NOWAIT); 1060 if (m == NULL) { 1061 m_freem(*m_head); 1062 *m_head = NULL; 1063 return (NULL); 1064 } 1065 *m_head = m; 1066 } 1067 /* Zero out the bytes in the pad area. */ 1068 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1069 m->m_pkthdr.len += padlen; 1070 m->m_len = m->m_pkthdr.len; 1071 } 1072 } 1073 1074 error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag, 1075 txd->tx_dmamap, m, txsegs, &nsegs, 0); 1076 if (error != 0) { 1077 txd->tx_flags &= ~VTE_TXMBUF; 1078 return (NULL); 1079 } 1080 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1081 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1082 BUS_DMASYNC_PREWRITE); 1083 1084 txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len)); 1085 txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr); 1086 sc->vte_cdata.vte_tx_cnt++; 1087 /* Update producer index. */ 1088 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT); 1089 1090 /* Finally hand over ownership to controller. */ 1091 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN); 1092 txd->tx_m = m; 1093 1094 return (txd); 1095 } 1096 1097 static void 1098 vte_start(struct ifnet *ifp) 1099 { 1100 struct vte_softc *sc; 1101 1102 sc = ifp->if_softc; 1103 VTE_LOCK(sc); 1104 vte_start_locked(sc); 1105 VTE_UNLOCK(sc); 1106 } 1107 1108 static void 1109 vte_start_locked(struct vte_softc *sc) 1110 { 1111 struct ifnet *ifp; 1112 struct vte_txdesc *txd; 1113 struct mbuf *m_head; 1114 int enq; 1115 1116 ifp = sc->vte_ifp; 1117 1118 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1119 IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0) 1120 return; 1121 1122 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1123 /* Reserve one free TX descriptor. */ 1124 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) { 1125 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1126 break; 1127 } 1128 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1129 if (m_head == NULL) 1130 break; 1131 /* 1132 * Pack the data into the transmit ring. If we 1133 * don't have room, set the OACTIVE flag and wait 1134 * for the NIC to drain the ring. 1135 */ 1136 if ((txd = vte_encap(sc, &m_head)) == NULL) { 1137 if (m_head != NULL) 1138 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1139 break; 1140 } 1141 1142 enq++; 1143 /* 1144 * If there's a BPF listener, bounce a copy of this frame 1145 * to him. 1146 */ 1147 ETHER_BPF_MTAP(ifp, m_head); 1148 /* Free consumed TX frame. */ 1149 if ((txd->tx_flags & VTE_TXMBUF) != 0) 1150 m_freem(m_head); 1151 } 1152 1153 if (enq > 0) { 1154 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1155 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1156 BUS_DMASYNC_PREWRITE); 1157 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START); 1158 sc->vte_watchdog_timer = VTE_TX_TIMEOUT; 1159 } 1160 } 1161 1162 static void 1163 vte_watchdog(struct vte_softc *sc) 1164 { 1165 struct ifnet *ifp; 1166 1167 VTE_LOCK_ASSERT(sc); 1168 1169 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer) 1170 return; 1171 1172 ifp = sc->vte_ifp; 1173 if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n"); 1174 ifp->if_oerrors++; 1175 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1176 vte_init_locked(sc); 1177 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1178 vte_start_locked(sc); 1179 } 1180 1181 static int 1182 vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1183 { 1184 struct vte_softc *sc; 1185 struct ifreq *ifr; 1186 struct mii_data *mii; 1187 int error; 1188 1189 sc = ifp->if_softc; 1190 ifr = (struct ifreq *)data; 1191 error = 0; 1192 switch (cmd) { 1193 case SIOCSIFFLAGS: 1194 VTE_LOCK(sc); 1195 if ((ifp->if_flags & IFF_UP) != 0) { 1196 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1197 ((ifp->if_flags ^ sc->vte_if_flags) & 1198 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1199 vte_rxfilter(sc); 1200 else 1201 vte_init_locked(sc); 1202 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1203 vte_stop(sc); 1204 sc->vte_if_flags = ifp->if_flags; 1205 VTE_UNLOCK(sc); 1206 break; 1207 case SIOCADDMULTI: 1208 case SIOCDELMULTI: 1209 VTE_LOCK(sc); 1210 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1211 vte_rxfilter(sc); 1212 VTE_UNLOCK(sc); 1213 break; 1214 case SIOCSIFMEDIA: 1215 case SIOCGIFMEDIA: 1216 mii = device_get_softc(sc->vte_miibus); 1217 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1218 break; 1219 default: 1220 error = ether_ioctl(ifp, cmd, data); 1221 break; 1222 } 1223 1224 return (error); 1225 } 1226 1227 static void 1228 vte_mac_config(struct vte_softc *sc) 1229 { 1230 struct mii_data *mii; 1231 uint16_t mcr; 1232 1233 VTE_LOCK_ASSERT(sc); 1234 1235 mii = device_get_softc(sc->vte_miibus); 1236 mcr = CSR_READ_2(sc, VTE_MCR0); 1237 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX); 1238 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1239 mcr |= MCR0_FULL_DUPLEX; 1240 #ifdef notyet 1241 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1242 mcr |= MCR0_FC_ENB; 1243 /* 1244 * The data sheet is not clear whether the controller 1245 * honors received pause frames or not. The is no 1246 * separate control bit for RX pause frame so just 1247 * enable MCR0_FC_ENB bit. 1248 */ 1249 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1250 mcr |= MCR0_FC_ENB; 1251 #endif 1252 } 1253 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1254 } 1255 1256 static void 1257 vte_stats_clear(struct vte_softc *sc) 1258 { 1259 1260 /* Reading counter registers clears its contents. */ 1261 CSR_READ_2(sc, VTE_CNT_RX_DONE); 1262 CSR_READ_2(sc, VTE_CNT_MECNT0); 1263 CSR_READ_2(sc, VTE_CNT_MECNT1); 1264 CSR_READ_2(sc, VTE_CNT_MECNT2); 1265 CSR_READ_2(sc, VTE_CNT_MECNT3); 1266 CSR_READ_2(sc, VTE_CNT_TX_DONE); 1267 CSR_READ_2(sc, VTE_CNT_MECNT4); 1268 CSR_READ_2(sc, VTE_CNT_PAUSE); 1269 } 1270 1271 static void 1272 vte_stats_update(struct vte_softc *sc) 1273 { 1274 struct vte_hw_stats *stat; 1275 struct ifnet *ifp; 1276 uint16_t value; 1277 1278 VTE_LOCK_ASSERT(sc); 1279 1280 ifp = sc->vte_ifp; 1281 stat = &sc->vte_stats; 1282 1283 CSR_READ_2(sc, VTE_MECISR); 1284 /* RX stats. */ 1285 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE); 1286 value = CSR_READ_2(sc, VTE_CNT_MECNT0); 1287 stat->rx_bcast_frames += (value >> 8); 1288 stat->rx_mcast_frames += (value & 0xFF); 1289 value = CSR_READ_2(sc, VTE_CNT_MECNT1); 1290 stat->rx_runts += (value >> 8); 1291 stat->rx_crcerrs += (value & 0xFF); 1292 value = CSR_READ_2(sc, VTE_CNT_MECNT2); 1293 stat->rx_long_frames += (value & 0xFF); 1294 value = CSR_READ_2(sc, VTE_CNT_MECNT3); 1295 stat->rx_fifo_full += (value >> 8); 1296 stat->rx_desc_unavail += (value & 0xFF); 1297 1298 /* TX stats. */ 1299 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE); 1300 value = CSR_READ_2(sc, VTE_CNT_MECNT4); 1301 stat->tx_underruns += (value >> 8); 1302 stat->tx_late_colls += (value & 0xFF); 1303 1304 value = CSR_READ_2(sc, VTE_CNT_PAUSE); 1305 stat->tx_pause_frames += (value >> 8); 1306 stat->rx_pause_frames += (value & 0xFF); 1307 1308 /* Update ifp counters. */ 1309 ifp->if_opackets = stat->tx_frames; 1310 ifp->if_collisions = stat->tx_late_colls; 1311 ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns; 1312 ifp->if_ipackets = stat->rx_frames; 1313 ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts + 1314 stat->rx_long_frames + stat->rx_fifo_full; 1315 } 1316 1317 static void 1318 vte_intr(void *arg) 1319 { 1320 struct vte_softc *sc; 1321 struct ifnet *ifp; 1322 uint16_t status; 1323 int n; 1324 1325 sc = (struct vte_softc *)arg; 1326 VTE_LOCK(sc); 1327 1328 ifp = sc->vte_ifp; 1329 /* Reading VTE_MISR acknowledges interrupts. */ 1330 status = CSR_READ_2(sc, VTE_MISR); 1331 if ((status & VTE_INTRS) == 0) { 1332 /* Not ours. */ 1333 VTE_UNLOCK(sc); 1334 return; 1335 } 1336 1337 /* Disable interrupts. */ 1338 CSR_WRITE_2(sc, VTE_MIER, 0); 1339 for (n = 8; (status & VTE_INTRS) != 0;) { 1340 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1341 break; 1342 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | 1343 MISR_RX_FIFO_FULL)) != 0) 1344 vte_rxeof(sc); 1345 if ((status & MISR_TX_DONE) != 0) 1346 vte_txeof(sc); 1347 if ((status & MISR_EVENT_CNT_OFLOW) != 0) 1348 vte_stats_update(sc); 1349 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1350 vte_start_locked(sc); 1351 if (--n > 0) 1352 status = CSR_READ_2(sc, VTE_MISR); 1353 else 1354 break; 1355 } 1356 1357 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1358 /* Re-enable interrupts. */ 1359 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1360 } 1361 VTE_UNLOCK(sc); 1362 } 1363 1364 static void 1365 vte_txeof(struct vte_softc *sc) 1366 { 1367 struct ifnet *ifp; 1368 struct vte_txdesc *txd; 1369 uint16_t status; 1370 int cons, prog; 1371 1372 VTE_LOCK_ASSERT(sc); 1373 1374 ifp = sc->vte_ifp; 1375 1376 if (sc->vte_cdata.vte_tx_cnt == 0) 1377 return; 1378 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1379 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD | 1380 BUS_DMASYNC_POSTWRITE); 1381 cons = sc->vte_cdata.vte_tx_cons; 1382 /* 1383 * Go through our TX list and free mbufs for those 1384 * frames which have been transmitted. 1385 */ 1386 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) { 1387 txd = &sc->vte_cdata.vte_txdesc[cons]; 1388 status = le16toh(txd->tx_desc->dtst); 1389 if ((status & VTE_DTST_TX_OWN) != 0) 1390 break; 1391 sc->vte_cdata.vte_tx_cnt--; 1392 /* Reclaim transmitted mbufs. */ 1393 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1394 BUS_DMASYNC_POSTWRITE); 1395 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap); 1396 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1397 m_freem(txd->tx_m); 1398 txd->tx_flags &= ~VTE_TXMBUF; 1399 txd->tx_m = NULL; 1400 prog++; 1401 VTE_DESC_INC(cons, VTE_TX_RING_CNT); 1402 } 1403 1404 if (prog > 0) { 1405 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1406 sc->vte_cdata.vte_tx_cons = cons; 1407 /* 1408 * Unarm watchdog timer only when there is no pending 1409 * frames in TX queue. 1410 */ 1411 if (sc->vte_cdata.vte_tx_cnt == 0) 1412 sc->vte_watchdog_timer = 0; 1413 } 1414 } 1415 1416 static int 1417 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd) 1418 { 1419 struct mbuf *m; 1420 bus_dma_segment_t segs[1]; 1421 bus_dmamap_t map; 1422 int nsegs; 1423 1424 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1425 if (m == NULL) 1426 return (ENOBUFS); 1427 m->m_len = m->m_pkthdr.len = MCLBYTES; 1428 m_adj(m, sizeof(uint32_t)); 1429 1430 if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag, 1431 sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1432 m_freem(m); 1433 return (ENOBUFS); 1434 } 1435 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1436 1437 if (rxd->rx_m != NULL) { 1438 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1439 BUS_DMASYNC_POSTREAD); 1440 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap); 1441 } 1442 map = rxd->rx_dmamap; 1443 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap; 1444 sc->vte_cdata.vte_rx_sparemap = map; 1445 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1446 BUS_DMASYNC_PREREAD); 1447 rxd->rx_m = m; 1448 rxd->rx_desc->drbp = htole32(segs[0].ds_addr); 1449 rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len)); 1450 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1451 1452 return (0); 1453 } 1454 1455 /* 1456 * It's not supposed to see this controller on strict-alignment 1457 * architectures but make it work for completeness. 1458 */ 1459 #ifndef __NO_STRICT_ALIGNMENT 1460 static struct mbuf * 1461 vte_fixup_rx(struct ifnet *ifp, struct mbuf *m) 1462 { 1463 uint16_t *src, *dst; 1464 int i; 1465 1466 src = mtod(m, uint16_t *); 1467 dst = src - 1; 1468 1469 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1470 *dst++ = *src++; 1471 m->m_data -= ETHER_ALIGN; 1472 return (m); 1473 } 1474 #endif 1475 1476 static void 1477 vte_rxeof(struct vte_softc *sc) 1478 { 1479 struct ifnet *ifp; 1480 struct vte_rxdesc *rxd; 1481 struct mbuf *m; 1482 uint16_t status, total_len; 1483 int cons, prog; 1484 1485 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1486 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD | 1487 BUS_DMASYNC_POSTWRITE); 1488 cons = sc->vte_cdata.vte_rx_cons; 1489 ifp = sc->vte_ifp; 1490 for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; prog++, 1491 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) { 1492 rxd = &sc->vte_cdata.vte_rxdesc[cons]; 1493 status = le16toh(rxd->rx_desc->drst); 1494 if ((status & VTE_DRST_RX_OWN) != 0) 1495 break; 1496 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)); 1497 m = rxd->rx_m; 1498 if ((status & VTE_DRST_RX_OK) == 0) { 1499 /* Discard errored frame. */ 1500 rxd->rx_desc->drlen = 1501 htole16(MCLBYTES - sizeof(uint32_t)); 1502 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1503 continue; 1504 } 1505 if (vte_newbuf(sc, rxd) != 0) { 1506 ifp->if_iqdrops++; 1507 rxd->rx_desc->drlen = 1508 htole16(MCLBYTES - sizeof(uint32_t)); 1509 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1510 continue; 1511 } 1512 1513 /* 1514 * It seems there is no way to strip FCS bytes. 1515 */ 1516 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1517 m->m_pkthdr.rcvif = ifp; 1518 #ifndef __NO_STRICT_ALIGNMENT 1519 vte_fixup_rx(ifp, m); 1520 #endif 1521 VTE_UNLOCK(sc); 1522 (*ifp->if_input)(ifp, m); 1523 VTE_LOCK(sc); 1524 } 1525 1526 if (prog > 0) { 1527 /* Update the consumer index. */ 1528 sc->vte_cdata.vte_rx_cons = cons; 1529 /* 1530 * Sync updated RX descriptors such that controller see 1531 * modified RX buffer addresses. 1532 */ 1533 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1534 sc->vte_cdata.vte_rx_ring_map, 1535 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1536 #ifdef notyet 1537 /* 1538 * Update residue counter. Controller does not 1539 * keep track of number of available RX descriptors 1540 * such that driver should have to update VTE_MRDCR 1541 * to make controller know how many free RX 1542 * descriptors were added to controller. This is 1543 * a similar mechanism used in VIA velocity 1544 * controllers and it indicates controller just 1545 * polls OWN bit of current RX descriptor pointer. 1546 * A couple of severe issues were seen on sample 1547 * board where the controller continuously emits TX 1548 * pause frames once RX pause threshold crossed. 1549 * Once triggered it never recovered form that 1550 * state, I couldn't find a way to make it back to 1551 * work at least. This issue effectively 1552 * disconnected the system from network. Also, the 1553 * controller used 00:00:00:00:00:00 as source 1554 * station address of TX pause frame. Probably this 1555 * is one of reason why vendor recommends not to 1556 * enable flow control on R6040 controller. 1557 */ 1558 CSR_WRITE_2(sc, VTE_MRDCR, prog | 1559 (((VTE_RX_RING_CNT * 2) / 10) << 1560 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1561 #endif 1562 } 1563 } 1564 1565 static void 1566 vte_tick(void *arg) 1567 { 1568 struct vte_softc *sc; 1569 struct mii_data *mii; 1570 1571 sc = (struct vte_softc *)arg; 1572 1573 VTE_LOCK_ASSERT(sc); 1574 1575 mii = device_get_softc(sc->vte_miibus); 1576 mii_tick(mii); 1577 vte_stats_update(sc); 1578 vte_txeof(sc); 1579 vte_watchdog(sc); 1580 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1581 } 1582 1583 static void 1584 vte_reset(struct vte_softc *sc) 1585 { 1586 uint16_t mcr; 1587 int i; 1588 1589 mcr = CSR_READ_2(sc, VTE_MCR1); 1590 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET); 1591 for (i = VTE_RESET_TIMEOUT; i > 0; i--) { 1592 DELAY(10); 1593 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0) 1594 break; 1595 } 1596 if (i == 0) 1597 device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr); 1598 /* 1599 * Follow the guide of vendor recommended way to reset MAC. 1600 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is 1601 * not reliable so manually reset internal state machine. 1602 */ 1603 CSR_WRITE_2(sc, VTE_MACSM, 0x0002); 1604 CSR_WRITE_2(sc, VTE_MACSM, 0); 1605 DELAY(5000); 1606 } 1607 1608 static void 1609 vte_init(void *xsc) 1610 { 1611 struct vte_softc *sc; 1612 1613 sc = (struct vte_softc *)xsc; 1614 VTE_LOCK(sc); 1615 vte_init_locked(sc); 1616 VTE_UNLOCK(sc); 1617 } 1618 1619 static void 1620 vte_init_locked(struct vte_softc *sc) 1621 { 1622 struct ifnet *ifp; 1623 bus_addr_t paddr; 1624 uint8_t *eaddr; 1625 1626 VTE_LOCK_ASSERT(sc); 1627 1628 ifp = sc->vte_ifp; 1629 1630 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1631 return; 1632 /* 1633 * Cancel any pending I/O. 1634 */ 1635 vte_stop(sc); 1636 /* 1637 * Reset the chip to a known state. 1638 */ 1639 vte_reset(sc); 1640 1641 /* Initialize RX descriptors. */ 1642 if (vte_init_rx_ring(sc) != 0) { 1643 device_printf(sc->vte_dev, "no memory for RX buffers.\n"); 1644 vte_stop(sc); 1645 return; 1646 } 1647 if (vte_init_tx_ring(sc) != 0) { 1648 device_printf(sc->vte_dev, "no memory for TX buffers.\n"); 1649 vte_stop(sc); 1650 return; 1651 } 1652 1653 /* 1654 * Reprogram the station address. Controller supports up 1655 * to 4 different station addresses so driver programs the 1656 * first station address as its own ethernet address and 1657 * configure the remaining three addresses as perfect 1658 * multicast addresses. 1659 */ 1660 eaddr = IF_LLADDR(sc->vte_ifp); 1661 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]); 1662 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]); 1663 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]); 1664 1665 /* Set TX descriptor base addresses. */ 1666 paddr = sc->vte_cdata.vte_tx_ring_paddr; 1667 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16); 1668 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF); 1669 /* Set RX descriptor base addresses. */ 1670 paddr = sc->vte_cdata.vte_rx_ring_paddr; 1671 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16); 1672 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF); 1673 /* 1674 * Initialize RX descriptor residue counter and set RX 1675 * pause threshold to 20% of available RX descriptors. 1676 * See comments on vte_rxeof() for details on flow control 1677 * issues. 1678 */ 1679 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) | 1680 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1681 1682 /* 1683 * Always use maximum frame size that controller can 1684 * support. Otherwise received frames that has longer 1685 * frame length than vte(4) MTU would be silently dropped 1686 * in controller. This would break path-MTU discovery as 1687 * sender wouldn't get any responses from receiver. The 1688 * RX buffer size should be multiple of 4. 1689 * Note, jumbo frames are silently ignored by controller 1690 * and even MAC counters do not detect them. 1691 */ 1692 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX); 1693 1694 /* Configure FIFO. */ 1695 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 | 1696 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 | 1697 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT); 1698 1699 /* 1700 * Configure TX/RX MACs. Actual resolved duplex and flow 1701 * control configuration is done after detecting a valid 1702 * link. Note, we don't generate early interrupt here 1703 * as well since FreeBSD does not have interrupt latency 1704 * problems like Windows. 1705 */ 1706 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT); 1707 /* 1708 * We manually keep track of PHY status changes to 1709 * configure resolved duplex and flow control since only 1710 * duplex configuration can be automatically reflected to 1711 * MCR0. 1712 */ 1713 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 | 1714 MCR1_EXCESS_COL_RETRY_16); 1715 1716 /* Initialize RX filter. */ 1717 vte_rxfilter(sc); 1718 1719 /* Disable TX/RX interrupt moderation control. */ 1720 CSR_WRITE_2(sc, VTE_MRICR, 0); 1721 CSR_WRITE_2(sc, VTE_MTICR, 0); 1722 1723 /* Enable MAC event counter interrupts. */ 1724 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS); 1725 /* Clear MAC statistics. */ 1726 vte_stats_clear(sc); 1727 1728 /* Acknowledge all pending interrupts and clear it. */ 1729 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1730 CSR_WRITE_2(sc, VTE_MISR, 0); 1731 1732 sc->vte_flags &= ~VTE_FLAG_LINK; 1733 /* Switch to the current media. */ 1734 vte_mediachange_locked(ifp); 1735 1736 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1737 1738 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1739 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1740 } 1741 1742 static void 1743 vte_stop(struct vte_softc *sc) 1744 { 1745 struct ifnet *ifp; 1746 struct vte_txdesc *txd; 1747 struct vte_rxdesc *rxd; 1748 int i; 1749 1750 VTE_LOCK_ASSERT(sc); 1751 /* 1752 * Mark the interface down and cancel the watchdog timer. 1753 */ 1754 ifp = sc->vte_ifp; 1755 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1756 sc->vte_flags &= ~VTE_FLAG_LINK; 1757 callout_stop(&sc->vte_tick_ch); 1758 sc->vte_watchdog_timer = 0; 1759 vte_stats_update(sc); 1760 /* Disable interrupts. */ 1761 CSR_WRITE_2(sc, VTE_MIER, 0); 1762 CSR_WRITE_2(sc, VTE_MECIER, 0); 1763 /* Stop RX/TX MACs. */ 1764 vte_stop_mac(sc); 1765 /* Clear interrupts. */ 1766 CSR_READ_2(sc, VTE_MISR); 1767 /* 1768 * Free TX/RX mbufs still in the queues. 1769 */ 1770 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1771 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1772 if (rxd->rx_m != NULL) { 1773 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, 1774 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1775 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, 1776 rxd->rx_dmamap); 1777 m_freem(rxd->rx_m); 1778 rxd->rx_m = NULL; 1779 } 1780 } 1781 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1782 txd = &sc->vte_cdata.vte_txdesc[i]; 1783 if (txd->tx_m != NULL) { 1784 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, 1785 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1786 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, 1787 txd->tx_dmamap); 1788 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1789 m_freem(txd->tx_m); 1790 txd->tx_m = NULL; 1791 txd->tx_flags &= ~VTE_TXMBUF; 1792 } 1793 } 1794 /* Free TX mbuf pools used for deep copy. */ 1795 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1796 if (sc->vte_cdata.vte_txmbufs[i] != NULL) { 1797 m_freem(sc->vte_cdata.vte_txmbufs[i]); 1798 sc->vte_cdata.vte_txmbufs[i] = NULL; 1799 } 1800 } 1801 } 1802 1803 static void 1804 vte_start_mac(struct vte_softc *sc) 1805 { 1806 uint16_t mcr; 1807 int i; 1808 1809 VTE_LOCK_ASSERT(sc); 1810 1811 /* Enable RX/TX MACs. */ 1812 mcr = CSR_READ_2(sc, VTE_MCR0); 1813 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 1814 (MCR0_RX_ENB | MCR0_TX_ENB)) { 1815 mcr |= MCR0_RX_ENB | MCR0_TX_ENB; 1816 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1817 for (i = VTE_TIMEOUT; i > 0; i--) { 1818 mcr = CSR_READ_2(sc, VTE_MCR0); 1819 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 1820 (MCR0_RX_ENB | MCR0_TX_ENB)) 1821 break; 1822 DELAY(10); 1823 } 1824 if (i == 0) 1825 device_printf(sc->vte_dev, 1826 "could not enable RX/TX MAC(0x%04x)!\n", mcr); 1827 } 1828 } 1829 1830 static void 1831 vte_stop_mac(struct vte_softc *sc) 1832 { 1833 uint16_t mcr; 1834 int i; 1835 1836 VTE_LOCK_ASSERT(sc); 1837 1838 /* Disable RX/TX MACs. */ 1839 mcr = CSR_READ_2(sc, VTE_MCR0); 1840 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) { 1841 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB); 1842 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1843 for (i = VTE_TIMEOUT; i > 0; i--) { 1844 mcr = CSR_READ_2(sc, VTE_MCR0); 1845 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0) 1846 break; 1847 DELAY(10); 1848 } 1849 if (i == 0) 1850 device_printf(sc->vte_dev, 1851 "could not disable RX/TX MAC(0x%04x)!\n", mcr); 1852 } 1853 } 1854 1855 static int 1856 vte_init_tx_ring(struct vte_softc *sc) 1857 { 1858 struct vte_tx_desc *desc; 1859 struct vte_txdesc *txd; 1860 bus_addr_t addr; 1861 int i; 1862 1863 VTE_LOCK_ASSERT(sc); 1864 1865 sc->vte_cdata.vte_tx_prod = 0; 1866 sc->vte_cdata.vte_tx_cons = 0; 1867 sc->vte_cdata.vte_tx_cnt = 0; 1868 1869 /* Pre-allocate TX mbufs for deep copy. */ 1870 if (tx_deep_copy != 0) { 1871 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1872 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_NOWAIT, 1873 MT_DATA, M_PKTHDR); 1874 if (sc->vte_cdata.vte_txmbufs[i] == NULL) 1875 return (ENOBUFS); 1876 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES; 1877 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES; 1878 } 1879 } 1880 desc = sc->vte_cdata.vte_tx_ring; 1881 bzero(desc, VTE_TX_RING_SZ); 1882 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1883 txd = &sc->vte_cdata.vte_txdesc[i]; 1884 txd->tx_m = NULL; 1885 if (i != VTE_TX_RING_CNT - 1) 1886 addr = sc->vte_cdata.vte_tx_ring_paddr + 1887 sizeof(struct vte_tx_desc) * (i + 1); 1888 else 1889 addr = sc->vte_cdata.vte_tx_ring_paddr + 1890 sizeof(struct vte_tx_desc) * 0; 1891 desc = &sc->vte_cdata.vte_tx_ring[i]; 1892 desc->dtnp = htole32(addr); 1893 txd->tx_desc = desc; 1894 } 1895 1896 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1897 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1898 BUS_DMASYNC_PREWRITE); 1899 return (0); 1900 } 1901 1902 static int 1903 vte_init_rx_ring(struct vte_softc *sc) 1904 { 1905 struct vte_rx_desc *desc; 1906 struct vte_rxdesc *rxd; 1907 bus_addr_t addr; 1908 int i; 1909 1910 VTE_LOCK_ASSERT(sc); 1911 1912 sc->vte_cdata.vte_rx_cons = 0; 1913 desc = sc->vte_cdata.vte_rx_ring; 1914 bzero(desc, VTE_RX_RING_SZ); 1915 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1916 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1917 rxd->rx_m = NULL; 1918 if (i != VTE_RX_RING_CNT - 1) 1919 addr = sc->vte_cdata.vte_rx_ring_paddr + 1920 sizeof(struct vte_rx_desc) * (i + 1); 1921 else 1922 addr = sc->vte_cdata.vte_rx_ring_paddr + 1923 sizeof(struct vte_rx_desc) * 0; 1924 desc = &sc->vte_cdata.vte_rx_ring[i]; 1925 desc->drnp = htole32(addr); 1926 rxd->rx_desc = desc; 1927 if (vte_newbuf(sc, rxd) != 0) 1928 return (ENOBUFS); 1929 } 1930 1931 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1932 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD | 1933 BUS_DMASYNC_PREWRITE); 1934 1935 return (0); 1936 } 1937 1938 static void 1939 vte_rxfilter(struct vte_softc *sc) 1940 { 1941 struct ifnet *ifp; 1942 struct ifmultiaddr *ifma; 1943 uint8_t *eaddr; 1944 uint32_t crc; 1945 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3]; 1946 uint16_t mchash[4], mcr; 1947 int i, nperf; 1948 1949 VTE_LOCK_ASSERT(sc); 1950 1951 ifp = sc->vte_ifp; 1952 1953 bzero(mchash, sizeof(mchash)); 1954 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 1955 rxfilt_perf[i][0] = 0xFFFF; 1956 rxfilt_perf[i][1] = 0xFFFF; 1957 rxfilt_perf[i][2] = 0xFFFF; 1958 } 1959 1960 mcr = CSR_READ_2(sc, VTE_MCR0); 1961 mcr &= ~(MCR0_PROMISC | MCR0_MULTICAST); 1962 mcr |= MCR0_BROADCAST_DIS; 1963 if ((ifp->if_flags & IFF_BROADCAST) != 0) 1964 mcr &= ~MCR0_BROADCAST_DIS; 1965 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1966 if ((ifp->if_flags & IFF_PROMISC) != 0) 1967 mcr |= MCR0_PROMISC; 1968 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 1969 mcr |= MCR0_MULTICAST; 1970 mchash[0] = 0xFFFF; 1971 mchash[1] = 0xFFFF; 1972 mchash[2] = 0xFFFF; 1973 mchash[3] = 0xFFFF; 1974 goto chipit; 1975 } 1976 1977 nperf = 0; 1978 if_maddr_rlock(ifp); 1979 TAILQ_FOREACH(ifma, &sc->vte_ifp->if_multiaddrs, ifma_link) { 1980 if (ifma->ifma_addr->sa_family != AF_LINK) 1981 continue; 1982 /* 1983 * Program the first 3 multicast groups into 1984 * the perfect filter. For all others, use the 1985 * hash table. 1986 */ 1987 if (nperf < VTE_RXFILT_PERFECT_CNT) { 1988 eaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 1989 rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0]; 1990 rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2]; 1991 rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4]; 1992 nperf++; 1993 continue; 1994 } 1995 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1996 ifma->ifma_addr), ETHER_ADDR_LEN); 1997 mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F); 1998 } 1999 if_maddr_runlock(ifp); 2000 if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 || 2001 mchash[3] != 0) 2002 mcr |= MCR0_MULTICAST; 2003 2004 chipit: 2005 /* Program multicast hash table. */ 2006 CSR_WRITE_2(sc, VTE_MAR0, mchash[0]); 2007 CSR_WRITE_2(sc, VTE_MAR1, mchash[1]); 2008 CSR_WRITE_2(sc, VTE_MAR2, mchash[2]); 2009 CSR_WRITE_2(sc, VTE_MAR3, mchash[3]); 2010 /* Program perfect filter table. */ 2011 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 2012 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0, 2013 rxfilt_perf[i][0]); 2014 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2, 2015 rxfilt_perf[i][1]); 2016 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4, 2017 rxfilt_perf[i][2]); 2018 } 2019 CSR_WRITE_2(sc, VTE_MCR0, mcr); 2020 CSR_READ_2(sc, VTE_MCR0); 2021 } 2022 2023 static int 2024 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2025 { 2026 int error, value; 2027 2028 if (arg1 == NULL) 2029 return (EINVAL); 2030 value = *(int *)arg1; 2031 error = sysctl_handle_int(oidp, &value, 0, req); 2032 if (error || req->newptr == NULL) 2033 return (error); 2034 if (value < low || value > high) 2035 return (EINVAL); 2036 *(int *)arg1 = value; 2037 2038 return (0); 2039 } 2040 2041 static int 2042 sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS) 2043 { 2044 2045 return (sysctl_int_range(oidp, arg1, arg2, req, 2046 VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX)); 2047 } 2048