1 /* $OpenBSD: if_vr.c,v 1.157 2020/12/12 11:48:53 jan Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $ 35 */ 36 37 /* 38 * VIA Rhine fast ethernet PCI NIC driver 39 * 40 * Supports various network adapters based on the VIA Rhine 41 * and Rhine II PCI controllers, including the D-Link DFE530TX. 42 * Datasheets are available at ftp://ftp.vtbridge.org/Docs/LAN/. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49 /* 50 * The VIA Rhine controllers are similar in some respects to the 51 * the DEC tulip chips, except less complicated. The controller 52 * uses an MII bus and an external physical layer interface. The 53 * receiver has a one entry perfect filter and a 64-bit hash table 54 * multicast filter. Transmit and receive descriptors are similar 55 * to the tulip. 56 * 57 * Early Rhine has a serious flaw in its transmit DMA mechanism: 58 * transmit buffers must be longword aligned. Unfortunately, 59 * OpenBSD doesn't guarantee that mbufs will be filled in starting 60 * at longword boundaries, so we have to do a buffer copy before 61 * transmission. 62 */ 63 64 #include "bpfilter.h" 65 #include "vlan.h" 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/sockio.h> 70 #include <sys/mbuf.h> 71 #include <sys/kernel.h> 72 #include <sys/timeout.h> 73 #include <sys/socket.h> 74 75 #include <net/if.h> 76 #include <sys/device.h> 77 #include <netinet/in.h> 78 #include <netinet/if_ether.h> 79 #include <net/if_media.h> 80 81 #if NBPFILTER > 0 82 #include <net/bpf.h> 83 #endif 84 85 #include <machine/bus.h> 86 87 #include <dev/mii/miivar.h> 88 89 #include <dev/pci/pcireg.h> 90 #include <dev/pci/pcivar.h> 91 #include <dev/pci/pcidevs.h> 92 93 #define VR_USEIOSPACE 94 95 #include <dev/pci/if_vrreg.h> 96 97 int vr_probe(struct device *, void *, void *); 98 int vr_quirks(struct pci_attach_args *); 99 void vr_attach(struct device *, struct device *, void *); 100 int vr_activate(struct device *, int); 101 102 struct cfattach vr_ca = { 103 sizeof(struct vr_softc), vr_probe, vr_attach, NULL, 104 vr_activate 105 }; 106 struct cfdriver vr_cd = { 107 NULL, "vr", DV_IFNET 108 }; 109 110 int vr_encap(struct vr_softc *, struct vr_chain **, struct mbuf *); 111 void vr_rxeof(struct vr_softc *); 112 void vr_rxeoc(struct vr_softc *); 113 void vr_txeof(struct vr_softc *); 114 void vr_tick(void *); 115 void vr_rxtick(void *); 116 int vr_intr(void *); 117 int vr_dmamem_alloc(struct vr_softc *, struct vr_dmamem *, 118 bus_size_t, u_int); 119 void vr_dmamem_free(struct vr_softc *, struct vr_dmamem *); 120 void vr_start(struct ifnet *); 121 int vr_ioctl(struct ifnet *, u_long, caddr_t); 122 void vr_chipinit(struct vr_softc *); 123 void vr_init(void *); 124 void vr_stop(struct vr_softc *); 125 void vr_watchdog(struct ifnet *); 126 int vr_ifmedia_upd(struct ifnet *); 127 void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 128 129 int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *); 130 int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *); 131 int vr_miibus_readreg(struct device *, int, int); 132 void vr_miibus_writereg(struct device *, int, int, int); 133 void vr_miibus_statchg(struct device *); 134 135 void vr_setcfg(struct vr_softc *, uint64_t); 136 void vr_iff(struct vr_softc *); 137 void vr_reset(struct vr_softc *); 138 int vr_list_rx_init(struct vr_softc *); 139 void vr_fill_rx_ring(struct vr_softc *); 140 int vr_list_tx_init(struct vr_softc *); 141 #ifndef SMALL_KERNEL 142 int vr_wol(struct ifnet *, int); 143 #endif 144 145 int vr_alloc_mbuf(struct vr_softc *, struct vr_chain_onefrag *); 146 147 /* 148 * Supported devices & quirks 149 */ 150 #define VR_Q_NEEDALIGN (1<<0) 151 #define VR_Q_CSUM (1<<1) 152 #define VR_Q_CAM (1<<2) 153 #define VR_Q_HWTAG (1<<3) 154 #define VR_Q_INTDISABLE (1<<4) 155 #define VR_Q_BABYJUMBO (1<<5) /* others may work too */ 156 157 struct vr_type { 158 pci_vendor_id_t vr_vid; 159 pci_product_id_t vr_pid; 160 int vr_quirks; 161 } vr_devices[] = { 162 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE, 163 VR_Q_NEEDALIGN }, 164 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII, 165 VR_Q_NEEDALIGN }, 166 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2, 167 VR_Q_BABYJUMBO }, 168 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105, 169 VR_Q_BABYJUMBO }, 170 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M, 171 VR_Q_CSUM | VR_Q_CAM | VR_Q_HWTAG | VR_Q_INTDISABLE | 172 VR_Q_BABYJUMBO }, 173 { PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII, 174 VR_Q_NEEDALIGN }, 175 { PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII, 176 VR_Q_NEEDALIGN } 177 }; 178 179 #define VR_SETBIT(sc, reg, x) \ 180 CSR_WRITE_1(sc, reg, \ 181 CSR_READ_1(sc, reg) | (x)) 182 183 #define VR_CLRBIT(sc, reg, x) \ 184 CSR_WRITE_1(sc, reg, \ 185 CSR_READ_1(sc, reg) & ~(x)) 186 187 #define VR_SETBIT16(sc, reg, x) \ 188 CSR_WRITE_2(sc, reg, \ 189 CSR_READ_2(sc, reg) | (x)) 190 191 #define VR_CLRBIT16(sc, reg, x) \ 192 CSR_WRITE_2(sc, reg, \ 193 CSR_READ_2(sc, reg) & ~(x)) 194 195 #define VR_SETBIT32(sc, reg, x) \ 196 CSR_WRITE_4(sc, reg, \ 197 CSR_READ_4(sc, reg) | (x)) 198 199 #define VR_CLRBIT32(sc, reg, x) \ 200 CSR_WRITE_4(sc, reg, \ 201 CSR_READ_4(sc, reg) & ~(x)) 202 203 #define SIO_SET(x) \ 204 CSR_WRITE_1(sc, VR_MIICMD, \ 205 CSR_READ_1(sc, VR_MIICMD) | (x)) 206 207 #define SIO_CLR(x) \ 208 CSR_WRITE_1(sc, VR_MIICMD, \ 209 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 210 211 /* 212 * Read an PHY register through the MII. 213 */ 214 int 215 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) 216 { 217 int s, i; 218 219 s = splnet(); 220 221 /* Set the PHY-address */ 222 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 223 frame->mii_phyaddr); 224 225 /* Set the register-address */ 226 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 227 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 228 229 for (i = 0; i < 10000; i++) { 230 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 231 break; 232 DELAY(1); 233 } 234 235 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 236 237 splx(s); 238 239 return(0); 240 } 241 242 /* 243 * Write to a PHY register through the MII. 244 */ 245 int 246 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) 247 { 248 int s, i; 249 250 s = splnet(); 251 252 /* Set the PHY-address */ 253 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 254 frame->mii_phyaddr); 255 256 /* Set the register-address and data to write */ 257 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 258 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 259 260 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 261 262 for (i = 0; i < 10000; i++) { 263 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 264 break; 265 DELAY(1); 266 } 267 268 splx(s); 269 270 return(0); 271 } 272 273 int 274 vr_miibus_readreg(struct device *dev, int phy, int reg) 275 { 276 struct vr_softc *sc = (struct vr_softc *)dev; 277 struct vr_mii_frame frame; 278 279 switch (sc->vr_revid) { 280 case REV_ID_VT6102_APOLLO: 281 case REV_ID_VT6103: 282 if (phy != 1) 283 return 0; 284 default: 285 break; 286 } 287 288 bzero(&frame, sizeof(frame)); 289 290 frame.mii_phyaddr = phy; 291 frame.mii_regaddr = reg; 292 vr_mii_readreg(sc, &frame); 293 294 return(frame.mii_data); 295 } 296 297 void 298 vr_miibus_writereg(struct device *dev, int phy, int reg, int data) 299 { 300 struct vr_softc *sc = (struct vr_softc *)dev; 301 struct vr_mii_frame frame; 302 303 switch (sc->vr_revid) { 304 case REV_ID_VT6102_APOLLO: 305 case REV_ID_VT6103: 306 if (phy != 1) 307 return; 308 default: 309 break; 310 } 311 312 bzero(&frame, sizeof(frame)); 313 314 frame.mii_phyaddr = phy; 315 frame.mii_regaddr = reg; 316 frame.mii_data = data; 317 318 vr_mii_writereg(sc, &frame); 319 } 320 321 void 322 vr_miibus_statchg(struct device *dev) 323 { 324 struct vr_softc *sc = (struct vr_softc *)dev; 325 326 vr_setcfg(sc, sc->sc_mii.mii_media_active); 327 } 328 329 void 330 vr_iff(struct vr_softc *sc) 331 { 332 struct arpcom *ac = &sc->arpcom; 333 struct ifnet *ifp = &sc->arpcom.ac_if; 334 int h = 0; 335 u_int32_t hashes[2]; 336 struct ether_multi *enm; 337 struct ether_multistep step; 338 u_int8_t rxfilt; 339 340 rxfilt = CSR_READ_1(sc, VR_RXCFG); 341 rxfilt &= ~(VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI | 342 VR_RXCFG_RX_PROMISC); 343 ifp->if_flags &= ~IFF_ALLMULTI; 344 345 /* 346 * Always accept broadcast frames. 347 */ 348 rxfilt |= VR_RXCFG_RX_BROAD; 349 350 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 351 ifp->if_flags |= IFF_ALLMULTI; 352 rxfilt |= VR_RXCFG_RX_MULTI; 353 if (ifp->if_flags & IFF_PROMISC) 354 rxfilt |= VR_RXCFG_RX_PROMISC; 355 hashes[0] = hashes[1] = 0xFFFFFFFF; 356 } else { 357 /* Program new filter. */ 358 rxfilt |= VR_RXCFG_RX_MULTI; 359 bzero(hashes, sizeof(hashes)); 360 361 ETHER_FIRST_MULTI(step, ac, enm); 362 while (enm != NULL) { 363 h = ether_crc32_be(enm->enm_addrlo, 364 ETHER_ADDR_LEN) >> 26; 365 366 if (h < 32) 367 hashes[0] |= (1 << h); 368 else 369 hashes[1] |= (1 << (h - 32)); 370 371 ETHER_NEXT_MULTI(step, enm); 372 } 373 } 374 375 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 376 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 377 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 378 } 379 380 /* 381 * In order to fiddle with the 382 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 383 * first have to put the transmit and/or receive logic in the idle state. 384 */ 385 void 386 vr_setcfg(struct vr_softc *sc, uint64_t media) 387 { 388 int i; 389 390 if (sc->sc_mii.mii_media_status & IFM_ACTIVE && 391 IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE) { 392 sc->vr_link = 1; 393 394 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) 395 VR_CLRBIT16(sc, VR_COMMAND, 396 (VR_CMD_TX_ON|VR_CMD_RX_ON)); 397 398 if ((media & IFM_GMASK) == IFM_FDX) 399 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 400 else 401 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 402 403 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 404 } else { 405 sc->vr_link = 0; 406 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 407 for (i = VR_TIMEOUT; i > 0; i--) { 408 DELAY(10); 409 if (!(CSR_READ_2(sc, VR_COMMAND) & 410 (VR_CMD_TX_ON|VR_CMD_RX_ON))) 411 break; 412 } 413 if (i == 0) { 414 #ifdef VR_DEBUG 415 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 416 #endif 417 sc->vr_flags |= VR_F_RESTART; 418 } 419 } 420 } 421 422 void 423 vr_reset(struct vr_softc *sc) 424 { 425 int i; 426 427 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 428 429 for (i = 0; i < VR_TIMEOUT; i++) { 430 DELAY(10); 431 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 432 break; 433 } 434 if (i == VR_TIMEOUT) { 435 if (sc->vr_revid < REV_ID_VT3065_A) 436 printf("%s: reset never completed!\n", 437 sc->sc_dev.dv_xname); 438 else { 439 #ifdef VR_DEBUG 440 /* Use newer force reset command */ 441 printf("%s: Using force reset command.\n", 442 sc->sc_dev.dv_xname); 443 #endif 444 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 445 } 446 } 447 448 /* Wait a little while for the chip to get its brains in order. */ 449 DELAY(1000); 450 } 451 452 /* 453 * Probe for a VIA Rhine chip. 454 */ 455 int 456 vr_probe(struct device *parent, void *match, void *aux) 457 { 458 const struct vr_type *vr; 459 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 460 int i, nent = nitems(vr_devices); 461 462 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 463 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 464 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 465 return(1); 466 467 return(0); 468 } 469 470 int 471 vr_quirks(struct pci_attach_args *pa) 472 { 473 const struct vr_type *vr; 474 int i, nent = nitems(vr_devices); 475 476 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 477 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 478 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 479 return(vr->vr_quirks); 480 481 return(0); 482 } 483 484 int 485 vr_dmamem_alloc(struct vr_softc *sc, struct vr_dmamem *vrm, 486 bus_size_t size, u_int align) 487 { 488 vrm->vrm_size = size; 489 490 if (bus_dmamap_create(sc->sc_dmat, vrm->vrm_size, 1, 491 vrm->vrm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 492 &vrm->vrm_map) != 0) 493 return (1); 494 if (bus_dmamem_alloc(sc->sc_dmat, vrm->vrm_size, 495 align, 0, &vrm->vrm_seg, 1, &vrm->vrm_nsegs, 496 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0) 497 goto destroy; 498 if (bus_dmamem_map(sc->sc_dmat, &vrm->vrm_seg, vrm->vrm_nsegs, 499 vrm->vrm_size, &vrm->vrm_kva, BUS_DMA_WAITOK) != 0) 500 goto free; 501 if (bus_dmamap_load(sc->sc_dmat, vrm->vrm_map, vrm->vrm_kva, 502 vrm->vrm_size, NULL, BUS_DMA_WAITOK) != 0) 503 goto unmap; 504 505 return (0); 506 unmap: 507 bus_dmamem_unmap(sc->sc_dmat, vrm->vrm_kva, vrm->vrm_size); 508 free: 509 bus_dmamem_free(sc->sc_dmat, &vrm->vrm_seg, 1); 510 destroy: 511 bus_dmamap_destroy(sc->sc_dmat, vrm->vrm_map); 512 return (1); 513 } 514 515 void 516 vr_dmamem_free(struct vr_softc *sc, struct vr_dmamem *vrm) 517 { 518 bus_dmamap_unload(sc->sc_dmat, vrm->vrm_map); 519 bus_dmamem_unmap(sc->sc_dmat, vrm->vrm_kva, vrm->vrm_size); 520 bus_dmamem_free(sc->sc_dmat, &vrm->vrm_seg, 1); 521 bus_dmamap_destroy(sc->sc_dmat, vrm->vrm_map); 522 } 523 524 /* 525 * Attach the interface. Allocate softc structures, do ifmedia 526 * setup and ethernet/BPF attach. 527 */ 528 void 529 vr_attach(struct device *parent, struct device *self, void *aux) 530 { 531 int i; 532 struct vr_softc *sc = (struct vr_softc *)self; 533 struct pci_attach_args *pa = aux; 534 pci_chipset_tag_t pc = pa->pa_pc; 535 pci_intr_handle_t ih; 536 const char *intrstr = NULL; 537 struct ifnet *ifp = &sc->arpcom.ac_if; 538 bus_size_t size; 539 540 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 541 542 /* 543 * Map control/status registers. 544 */ 545 546 #ifdef VR_USEIOSPACE 547 if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 548 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 549 printf(": can't map i/o space\n"); 550 return; 551 } 552 #else 553 if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 554 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 555 printf(": can't map mem space\n"); 556 return; 557 } 558 #endif 559 560 /* Allocate interrupt */ 561 if (pci_intr_map(pa, &ih)) { 562 printf(": can't map interrupt\n"); 563 goto fail; 564 } 565 intrstr = pci_intr_string(pc, ih); 566 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc, 567 self->dv_xname); 568 if (sc->sc_ih == NULL) { 569 printf(": can't establish interrupt"); 570 if (intrstr != NULL) 571 printf(" at %s", intrstr); 572 printf("\n"); 573 goto fail; 574 } 575 printf(": %s", intrstr); 576 577 sc->vr_revid = PCI_REVISION(pa->pa_class); 578 sc->sc_pc = pa->pa_pc; 579 sc->sc_tag = pa->pa_tag; 580 581 vr_chipinit(sc); 582 583 /* 584 * Get station address. The way the Rhine chips work, 585 * you're not allowed to directly access the EEPROM once 586 * they've been programmed a special way. Consequently, 587 * we need to read the node address from the PAR0 and PAR1 588 * registers. 589 */ 590 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 591 DELAY(1000); 592 for (i = 0; i < ETHER_ADDR_LEN; i++) 593 sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 594 595 /* 596 * A Rhine chip was detected. Inform the world. 597 */ 598 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 599 600 sc->sc_dmat = pa->pa_dmat; 601 if (vr_dmamem_alloc(sc, &sc->sc_zeromap, 64, PAGE_SIZE) != 0) { 602 printf(": failed to allocate zero pad memory\n"); 603 return; 604 } 605 bzero(sc->sc_zeromap.vrm_kva, 64); 606 bus_dmamap_sync(sc->sc_dmat, sc->sc_zeromap.vrm_map, 0, 607 sc->sc_zeromap.vrm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 608 if (vr_dmamem_alloc(sc, &sc->sc_listmap, sizeof(struct vr_list_data), 609 PAGE_SIZE) != 0) { 610 printf(": failed to allocate dma map\n"); 611 goto free_zero; 612 } 613 614 sc->vr_ldata = (struct vr_list_data *)sc->sc_listmap.vrm_kva; 615 sc->vr_quirks = vr_quirks(pa); 616 617 ifp = &sc->arpcom.ac_if; 618 ifp->if_softc = sc; 619 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 620 ifp->if_ioctl = vr_ioctl; 621 ifp->if_start = vr_start; 622 ifp->if_watchdog = vr_watchdog; 623 if (sc->vr_quirks & VR_Q_BABYJUMBO) 624 ifp->if_hardmtu = VR_RXLEN_BABYJUMBO - 625 ETHER_HDR_LEN - ETHER_CRC_LEN; 626 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 627 628 ifp->if_capabilities = IFCAP_VLAN_MTU; 629 630 if (sc->vr_quirks & VR_Q_CSUM) 631 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 632 IFCAP_CSUM_UDPv4; 633 634 #if NVLAN > 0 635 /* if the hardware can do VLAN tagging, say so. */ 636 if (sc->vr_quirks & VR_Q_HWTAG) 637 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 638 #endif 639 640 #ifndef SMALL_KERNEL 641 if (sc->vr_revid >= REV_ID_VT3065_A) { 642 ifp->if_capabilities |= IFCAP_WOL; 643 ifp->if_wol = vr_wol; 644 vr_wol(ifp, 0); 645 } 646 #endif 647 648 /* 649 * Do MII setup. 650 */ 651 sc->sc_mii.mii_ifp = ifp; 652 sc->sc_mii.mii_readreg = vr_miibus_readreg; 653 sc->sc_mii.mii_writereg = vr_miibus_writereg; 654 sc->sc_mii.mii_statchg = vr_miibus_statchg; 655 ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts); 656 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 657 0); 658 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 659 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 660 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 661 } else 662 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 663 timeout_set(&sc->sc_to, vr_tick, sc); 664 timeout_set(&sc->sc_rxto, vr_rxtick, sc); 665 666 /* 667 * Call MI attach routines. 668 */ 669 if_attach(ifp); 670 ether_ifattach(ifp); 671 return; 672 673 free_zero: 674 bus_dmamap_sync(sc->sc_dmat, sc->sc_zeromap.vrm_map, 0, 675 sc->sc_zeromap.vrm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 676 vr_dmamem_free(sc, &sc->sc_zeromap); 677 fail: 678 bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size); 679 } 680 681 int 682 vr_activate(struct device *self, int act) 683 { 684 struct vr_softc *sc = (struct vr_softc *)self; 685 struct ifnet *ifp = &sc->arpcom.ac_if; 686 int rv = 0; 687 688 switch (act) { 689 case DVACT_SUSPEND: 690 if (ifp->if_flags & IFF_RUNNING) 691 vr_stop(sc); 692 rv = config_activate_children(self, act); 693 break; 694 case DVACT_RESUME: 695 if (ifp->if_flags & IFF_UP) 696 vr_init(sc); 697 break; 698 default: 699 rv = config_activate_children(self, act); 700 break; 701 } 702 return (rv); 703 } 704 705 /* 706 * Initialize the transmit descriptors. 707 */ 708 int 709 vr_list_tx_init(struct vr_softc *sc) 710 { 711 struct vr_chain_data *cd; 712 struct vr_list_data *ld; 713 int i; 714 715 cd = &sc->vr_cdata; 716 ld = sc->vr_ldata; 717 718 cd->vr_tx_cnt = cd->vr_tx_pkts = 0; 719 720 for (i = 0; i < VR_TX_LIST_CNT; i++) { 721 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 722 cd->vr_tx_chain[i].vr_paddr = 723 sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 724 offsetof(struct vr_list_data, vr_tx_list[i]); 725 726 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, VR_MAXFRAGS, 727 MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map)) 728 return (ENOBUFS); 729 730 if (i == (VR_TX_LIST_CNT - 1)) 731 cd->vr_tx_chain[i].vr_nextdesc = 732 &cd->vr_tx_chain[0]; 733 else 734 cd->vr_tx_chain[i].vr_nextdesc = 735 &cd->vr_tx_chain[i + 1]; 736 } 737 738 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0]; 739 740 return (0); 741 } 742 743 744 /* 745 * Initialize the RX descriptors and allocate mbufs for them. Note that 746 * we arrange the descriptors in a closed ring, so that the last descriptor 747 * points back to the first. 748 */ 749 int 750 vr_list_rx_init(struct vr_softc *sc) 751 { 752 struct vr_chain_data *cd; 753 struct vr_list_data *ld; 754 struct vr_desc *d; 755 int i, nexti; 756 757 cd = &sc->vr_cdata; 758 ld = sc->vr_ldata; 759 760 for (i = 0; i < VR_RX_LIST_CNT; i++) { 761 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 762 0, BUS_DMA_NOWAIT | BUS_DMA_READ, 763 &cd->vr_rx_chain[i].vr_map)) 764 return (ENOBUFS); 765 766 d = (struct vr_desc *)&ld->vr_rx_list[i]; 767 cd->vr_rx_chain[i].vr_ptr = d; 768 cd->vr_rx_chain[i].vr_paddr = 769 sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 770 offsetof(struct vr_list_data, vr_rx_list[i]); 771 772 if (i == (VR_RX_LIST_CNT - 1)) 773 nexti = 0; 774 else 775 nexti = i + 1; 776 777 cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti]; 778 ld->vr_rx_list[i].vr_next = 779 htole32(sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 780 offsetof(struct vr_list_data, vr_rx_list[nexti])); 781 } 782 783 cd->vr_rx_prod = cd->vr_rx_cons = &cd->vr_rx_chain[0]; 784 if_rxr_init(&sc->sc_rxring, 2, VR_RX_LIST_CNT - 1); 785 vr_fill_rx_ring(sc); 786 787 return (0); 788 } 789 790 void 791 vr_fill_rx_ring(struct vr_softc *sc) 792 { 793 struct vr_chain_data *cd; 794 struct vr_list_data *ld; 795 u_int slots; 796 797 cd = &sc->vr_cdata; 798 ld = sc->vr_ldata; 799 800 for (slots = if_rxr_get(&sc->sc_rxring, VR_RX_LIST_CNT); 801 slots > 0; slots--) { 802 if (vr_alloc_mbuf(sc, cd->vr_rx_prod)) 803 break; 804 805 cd->vr_rx_prod = cd->vr_rx_prod->vr_nextdesc; 806 } 807 808 if_rxr_put(&sc->sc_rxring, slots); 809 if (if_rxr_inuse(&sc->sc_rxring) == 0) 810 timeout_add(&sc->sc_rxto, 0); 811 } 812 813 /* 814 * A frame has been uploaded: pass the resulting mbuf chain up to 815 * the higher level protocols. 816 */ 817 void 818 vr_rxeof(struct vr_softc *sc) 819 { 820 struct mbuf *m; 821 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 822 struct ifnet *ifp; 823 struct vr_chain_onefrag *cur_rx; 824 int total_len = 0; 825 u_int32_t rxstat, rxctl; 826 827 ifp = &sc->arpcom.ac_if; 828 829 while (if_rxr_inuse(&sc->sc_rxring) > 0) { 830 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 831 0, sc->sc_listmap.vrm_map->dm_mapsize, 832 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 833 rxstat = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_status); 834 if (rxstat & VR_RXSTAT_OWN) 835 break; 836 837 rxctl = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_ctl); 838 839 cur_rx = sc->vr_cdata.vr_rx_cons; 840 m = cur_rx->vr_mbuf; 841 cur_rx->vr_mbuf = NULL; 842 sc->vr_cdata.vr_rx_cons = cur_rx->vr_nextdesc; 843 if_rxr_put(&sc->sc_rxring, 1); 844 845 /* 846 * If an error occurs, update stats, clear the 847 * status word and leave the mbuf cluster in place: 848 * it should simply get re-used next time this descriptor 849 * comes up in the ring. 850 */ 851 if ((rxstat & VR_RXSTAT_RX_OK) == 0) { 852 ifp->if_ierrors++; 853 #ifdef VR_DEBUG 854 printf("%s: rx error (%02x):", 855 sc->sc_dev.dv_xname, rxstat & 0x000000ff); 856 if (rxstat & VR_RXSTAT_CRCERR) 857 printf(" crc error"); 858 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 859 printf(" frame alignment error"); 860 if (rxstat & VR_RXSTAT_FIFOOFLOW) 861 printf(" FIFO overflow"); 862 if (rxstat & VR_RXSTAT_GIANT) 863 printf(" received giant packet"); 864 if (rxstat & VR_RXSTAT_RUNT) 865 printf(" received runt packet"); 866 if (rxstat & VR_RXSTAT_BUSERR) 867 printf(" system bus error"); 868 if (rxstat & VR_RXSTAT_BUFFERR) 869 printf(" rx buffer error"); 870 printf("\n"); 871 #endif 872 873 m_freem(m); 874 continue; 875 } 876 877 /* No errors; receive the packet. */ 878 total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status)); 879 880 bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0, 881 cur_rx->vr_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 882 bus_dmamap_unload(sc->sc_dmat, cur_rx->vr_map); 883 884 /* 885 * The VIA Rhine chip includes the CRC with every 886 * received frame, and there's no way to turn this 887 * behavior off so trim the CRC manually. 888 */ 889 total_len -= ETHER_CRC_LEN; 890 891 #ifdef __STRICT_ALIGNMENT 892 { 893 struct mbuf *m0; 894 m0 = m_devget(mtod(m, caddr_t), total_len, ETHER_ALIGN); 895 m_freem(m); 896 if (m0 == NULL) { 897 ifp->if_ierrors++; 898 continue; 899 } 900 m = m0; 901 } 902 #else 903 m->m_pkthdr.len = m->m_len = total_len; 904 #endif 905 906 if (sc->vr_quirks & VR_Q_CSUM && 907 (rxstat & VR_RXSTAT_FRAG) == 0 && 908 (rxctl & VR_RXCTL_IP) != 0) { 909 /* Checksum is valid for non-fragmented IP packets. */ 910 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) 911 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 912 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP) && 913 ((rxctl & VR_RXCTL_TCPUDPOK) != 0)) 914 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 915 M_UDP_CSUM_IN_OK; 916 } 917 918 #if NVLAN > 0 919 /* 920 * If there's a tagged packet, the 802.1q header will be at the 921 * 4-byte boundary following the CRC. There will be 2 bytes 922 * TPID (0x8100) and 2 bytes TCI (including VLAN ID). 923 * This isn't in the data sheet. 924 */ 925 if (rxctl & VR_RXCTL_TAG) { 926 int offset = ((total_len + 3) & ~3) + ETHER_CRC_LEN + 2; 927 m->m_pkthdr.ether_vtag = htons(*(u_int16_t *) 928 ((u_int8_t *)m->m_data + offset)); 929 m->m_flags |= M_VLANTAG; 930 } 931 #endif 932 933 ml_enqueue(&ml, m); 934 } 935 936 if (ifiq_input(&ifp->if_rcv, &ml)) 937 if_rxr_livelocked(&sc->sc_rxring); 938 939 vr_fill_rx_ring(sc); 940 941 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 942 0, sc->sc_listmap.vrm_map->dm_mapsize, 943 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 944 } 945 946 void 947 vr_rxeoc(struct vr_softc *sc) 948 { 949 struct ifnet *ifp; 950 int i; 951 952 ifp = &sc->arpcom.ac_if; 953 954 ifp->if_ierrors++; 955 956 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 957 DELAY(10000); 958 959 for (i = 0x400; 960 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 961 i--) 962 ; /* Wait for receiver to stop */ 963 964 if (!i) { 965 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 966 sc->vr_flags |= VR_F_RESTART; 967 return; 968 } 969 970 vr_rxeof(sc); 971 972 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 973 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 974 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 975 } 976 977 /* 978 * A frame was downloaded to the chip. It's safe for us to clean up 979 * the list buffers. 980 */ 981 982 void 983 vr_txeof(struct vr_softc *sc) 984 { 985 struct vr_chain *cur_tx; 986 struct ifnet *ifp; 987 988 ifp = &sc->arpcom.ac_if; 989 990 /* 991 * Go through our tx list and free mbufs for those 992 * frames that have been transmitted. 993 */ 994 cur_tx = sc->vr_cdata.vr_tx_cons; 995 while (cur_tx != sc->vr_cdata.vr_tx_prod) { 996 u_int32_t txstat, txctl; 997 int i; 998 999 txstat = letoh32(cur_tx->vr_ptr->vr_status); 1000 txctl = letoh32(cur_tx->vr_ptr->vr_ctl); 1001 1002 if ((txstat & VR_TXSTAT_ABRT) || 1003 (txstat & VR_TXSTAT_UDF)) { 1004 for (i = 0x400; 1005 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 1006 i--) 1007 ; /* Wait for chip to shutdown */ 1008 if (!i) { 1009 printf("%s: tx shutdown timeout\n", 1010 sc->sc_dev.dv_xname); 1011 sc->vr_flags |= VR_F_RESTART; 1012 break; 1013 } 1014 cur_tx->vr_ptr->vr_status = htole32(VR_TXSTAT_OWN); 1015 CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr); 1016 break; 1017 } 1018 1019 if (txstat & VR_TXSTAT_OWN) 1020 break; 1021 1022 sc->vr_cdata.vr_tx_cnt--; 1023 /* Only the first descriptor in the chain is valid. */ 1024 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) 1025 goto next; 1026 1027 if (txstat & VR_TXSTAT_ERRSUM) { 1028 ifp->if_oerrors++; 1029 if (txstat & VR_TXSTAT_DEFER) 1030 ifp->if_collisions++; 1031 if (txstat & VR_TXSTAT_LATECOLL) 1032 ifp->if_collisions++; 1033 } 1034 1035 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1036 1037 if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0) 1038 bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map); 1039 1040 m_freem(cur_tx->vr_mbuf); 1041 cur_tx->vr_mbuf = NULL; 1042 ifq_clr_oactive(&ifp->if_snd); 1043 1044 next: 1045 cur_tx = cur_tx->vr_nextdesc; 1046 } 1047 1048 sc->vr_cdata.vr_tx_cons = cur_tx; 1049 if (sc->vr_cdata.vr_tx_cnt == 0) 1050 ifp->if_timer = 0; 1051 } 1052 1053 void 1054 vr_tick(void *xsc) 1055 { 1056 struct vr_softc *sc = xsc; 1057 int s; 1058 1059 s = splnet(); 1060 if (sc->vr_flags & VR_F_RESTART) { 1061 printf("%s: restarting\n", sc->sc_dev.dv_xname); 1062 vr_init(sc); 1063 sc->vr_flags &= ~VR_F_RESTART; 1064 } 1065 1066 mii_tick(&sc->sc_mii); 1067 timeout_add_sec(&sc->sc_to, 1); 1068 splx(s); 1069 } 1070 1071 void 1072 vr_rxtick(void *xsc) 1073 { 1074 struct vr_softc *sc = xsc; 1075 int s; 1076 1077 s = splnet(); 1078 if (if_rxr_inuse(&sc->sc_rxring) == 0) { 1079 vr_fill_rx_ring(sc); 1080 if (if_rxr_inuse(&sc->sc_rxring) == 0) 1081 timeout_add(&sc->sc_rxto, 1); 1082 } 1083 splx(s); 1084 } 1085 1086 int 1087 vr_intr(void *arg) 1088 { 1089 struct vr_softc *sc; 1090 struct ifnet *ifp; 1091 u_int16_t status; 1092 int claimed = 0; 1093 1094 sc = arg; 1095 ifp = &sc->arpcom.ac_if; 1096 1097 /* Suppress unwanted interrupts. */ 1098 if (!(ifp->if_flags & IFF_UP)) { 1099 vr_stop(sc); 1100 return 0; 1101 } 1102 1103 status = CSR_READ_2(sc, VR_ISR); 1104 if (status) 1105 CSR_WRITE_2(sc, VR_ISR, status); 1106 1107 if (status & VR_INTRS) { 1108 claimed = 1; 1109 1110 if (status & VR_ISR_RX_OK) 1111 vr_rxeof(sc); 1112 1113 if (status & VR_ISR_RX_DROPPED) { 1114 #ifdef VR_DEBUG 1115 printf("%s: rx packet lost\n", sc->sc_dev.dv_xname); 1116 #endif 1117 ifp->if_ierrors++; 1118 } 1119 1120 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1121 (status & VR_ISR_RX_OFLOW)) { 1122 #ifdef VR_DEBUG 1123 printf("%s: receive error (%04x)", 1124 sc->sc_dev.dv_xname, status); 1125 if (status & VR_ISR_RX_NOBUF) 1126 printf(" no buffers"); 1127 if (status & VR_ISR_RX_OFLOW) 1128 printf(" overflow"); 1129 printf("\n"); 1130 #endif 1131 vr_rxeoc(sc); 1132 } 1133 1134 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1135 if (status & VR_ISR_BUSERR) 1136 printf("%s: PCI bus error\n", 1137 sc->sc_dev.dv_xname); 1138 if (status & VR_ISR_TX_UNDERRUN) 1139 printf("%s: transmit underrun\n", 1140 sc->sc_dev.dv_xname); 1141 vr_init(sc); 1142 status = 0; 1143 } 1144 1145 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1146 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1147 vr_txeof(sc); 1148 if ((status & VR_ISR_UDFI) || 1149 (status & VR_ISR_TX_ABRT2) || 1150 (status & VR_ISR_TX_ABRT)) { 1151 #ifdef VR_DEBUG 1152 if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2)) 1153 printf("%s: transmit aborted\n", 1154 sc->sc_dev.dv_xname); 1155 if (status & VR_ISR_UDFI) 1156 printf("%s: transmit underflow\n", 1157 sc->sc_dev.dv_xname); 1158 #endif 1159 ifp->if_oerrors++; 1160 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1161 VR_SETBIT16(sc, VR_COMMAND, 1162 VR_CMD_TX_ON); 1163 VR_SETBIT16(sc, VR_COMMAND, 1164 VR_CMD_TX_GO); 1165 } 1166 } 1167 } 1168 } 1169 1170 if (!ifq_empty(&ifp->if_snd)) 1171 vr_start(ifp); 1172 1173 return (claimed); 1174 } 1175 1176 /* 1177 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1178 * pointers to the fragment pointers. 1179 */ 1180 int 1181 vr_encap(struct vr_softc *sc, struct vr_chain **cp, struct mbuf *m) 1182 { 1183 struct vr_chain *c = *cp; 1184 struct vr_desc *f = NULL; 1185 u_int32_t vr_ctl = 0, vr_status = 0, intdisable = 0; 1186 bus_dmamap_t txmap; 1187 int i, runt = 0; 1188 int error; 1189 1190 if (sc->vr_quirks & VR_Q_CSUM) { 1191 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1192 vr_ctl |= VR_TXCTL_IPCSUM; 1193 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1194 vr_ctl |= VR_TXCTL_TCPCSUM; 1195 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1196 vr_ctl |= VR_TXCTL_UDPCSUM; 1197 } 1198 1199 if (sc->vr_quirks & VR_Q_NEEDALIGN) { 1200 /* Deep copy for chips that need alignment */ 1201 error = EFBIG; 1202 } else { 1203 error = bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m, 1204 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 1205 } 1206 1207 switch (error) { 1208 case 0: 1209 break; 1210 case EFBIG: 1211 if (m_defrag(m, M_DONTWAIT) == 0 && 1212 bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m, 1213 BUS_DMA_NOWAIT) == 0) 1214 break; 1215 1216 /* FALLTHROUGH */ 1217 default: 1218 return (ENOBUFS); 1219 } 1220 1221 bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize, 1222 BUS_DMASYNC_PREWRITE); 1223 if (c->vr_map->dm_mapsize < VR_MIN_FRAMELEN) 1224 runt = 1; 1225 1226 #if NVLAN > 0 1227 /* 1228 * Tell chip to insert VLAN tag if needed. 1229 * This chip expects the VLAN ID (0x0FFF) and the PCP (0xE000) 1230 * in only 15 bits without the gap at 0x1000 (reserved for DEI). 1231 * Therefore we need to de- / re-construct the VLAN header. 1232 */ 1233 if (m->m_flags & M_VLANTAG) { 1234 u_int32_t vtag = m->m_pkthdr.ether_vtag; 1235 vtag = EVL_VLANOFTAG(vtag) | EVL_PRIOFTAG(vtag) << 12; 1236 vr_status |= vtag << VR_TXSTAT_PQSHIFT; 1237 vr_ctl |= htole32(VR_TXCTL_INSERTTAG); 1238 } 1239 #endif 1240 1241 /* 1242 * We only want TX completion interrupts on every Nth packet. 1243 * We need to set VR_TXNEXT_INTDISABLE on every descriptor except 1244 * for the last discriptor of every Nth packet, where we set 1245 * VR_TXCTL_FINT. The former is in the specs for only some chips. 1246 * present: VT6102 VT6105M VT8235M 1247 * not present: VT86C100 6105LOM 1248 */ 1249 if (++sc->vr_cdata.vr_tx_pkts % VR_TX_INTR_THRESH != 0 && 1250 sc->vr_quirks & VR_Q_INTDISABLE) 1251 intdisable = VR_TXNEXT_INTDISABLE; 1252 1253 c->vr_mbuf = m; 1254 txmap = c->vr_map; 1255 for (i = 0; i < txmap->dm_nsegs; i++) { 1256 if (i != 0) 1257 *cp = c = c->vr_nextdesc; 1258 f = c->vr_ptr; 1259 f->vr_ctl = htole32(txmap->dm_segs[i].ds_len | VR_TXCTL_TLINK | 1260 vr_ctl); 1261 if (i == 0) 1262 f->vr_ctl |= htole32(VR_TXCTL_FIRSTFRAG); 1263 f->vr_status = htole32(vr_status); 1264 f->vr_data = htole32(txmap->dm_segs[i].ds_addr); 1265 f->vr_next = htole32(c->vr_nextdesc->vr_paddr | intdisable); 1266 sc->vr_cdata.vr_tx_cnt++; 1267 } 1268 1269 /* Pad runt frames */ 1270 if (runt) { 1271 *cp = c = c->vr_nextdesc; 1272 f = c->vr_ptr; 1273 f->vr_ctl = htole32((VR_MIN_FRAMELEN - txmap->dm_mapsize) | 1274 VR_TXCTL_TLINK | vr_ctl); 1275 f->vr_status = htole32(vr_status); 1276 f->vr_data = htole32(sc->sc_zeromap.vrm_map->dm_segs[0].ds_addr); 1277 f->vr_next = htole32(c->vr_nextdesc->vr_paddr | intdisable); 1278 sc->vr_cdata.vr_tx_cnt++; 1279 } 1280 1281 /* Set EOP on the last descriptor */ 1282 f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); 1283 1284 if (sc->vr_cdata.vr_tx_pkts % VR_TX_INTR_THRESH == 0) 1285 f->vr_ctl |= htole32(VR_TXCTL_FINT); 1286 1287 return (0); 1288 } 1289 1290 /* 1291 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1292 * to the mbuf data regions directly in the transmit lists. We also save a 1293 * copy of the pointers since the transmit list fragment pointers are 1294 * physical addresses. 1295 */ 1296 1297 void 1298 vr_start(struct ifnet *ifp) 1299 { 1300 struct vr_softc *sc; 1301 struct mbuf *m; 1302 struct vr_chain *cur_tx, *head_tx; 1303 unsigned int queued = 0; 1304 1305 sc = ifp->if_softc; 1306 1307 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 1308 return; 1309 1310 if (sc->vr_link == 0) 1311 return; 1312 1313 cur_tx = sc->vr_cdata.vr_tx_prod; 1314 for (;;) { 1315 if (sc->vr_cdata.vr_tx_cnt + VR_MAXFRAGS >= 1316 VR_TX_LIST_CNT - 1) { 1317 ifq_set_oactive(&ifp->if_snd); 1318 break; 1319 } 1320 1321 m = ifq_dequeue(&ifp->if_snd); 1322 if (m == NULL) 1323 break; 1324 1325 /* Pack the data into the descriptor. */ 1326 head_tx = cur_tx; 1327 if (vr_encap(sc, &cur_tx, m)) { 1328 m_freem(m); 1329 ifp->if_oerrors++; 1330 continue; 1331 } 1332 queued++; 1333 1334 /* Only set ownership bit on first descriptor */ 1335 head_tx->vr_ptr->vr_status |= htole32(VR_TXSTAT_OWN); 1336 1337 #if NBPFILTER > 0 1338 /* 1339 * If there's a BPF listener, bounce a copy of this frame 1340 * to him. 1341 */ 1342 if (ifp->if_bpf) 1343 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1344 #endif 1345 cur_tx = cur_tx->vr_nextdesc; 1346 } 1347 if (queued > 0) { 1348 sc->vr_cdata.vr_tx_prod = cur_tx; 1349 1350 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0, 1351 sc->sc_listmap.vrm_map->dm_mapsize, 1352 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1353 1354 /* Tell the chip to start transmitting. */ 1355 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1356 1357 /* Set a timeout in case the chip goes out to lunch. */ 1358 ifp->if_timer = 5; 1359 } 1360 } 1361 1362 void 1363 vr_chipinit(struct vr_softc *sc) 1364 { 1365 /* 1366 * Make sure it isn't suspended. 1367 */ 1368 if (pci_get_capability(sc->sc_pc, sc->sc_tag, 1369 PCI_CAP_PWRMGMT, NULL, NULL)) 1370 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 1371 1372 /* Reset the adapter. */ 1373 vr_reset(sc); 1374 1375 /* 1376 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 1377 * initialization and disable AUTOPOLL. 1378 */ 1379 pci_conf_write(sc->sc_pc, sc->sc_tag, VR_PCI_MODE, 1380 pci_conf_read(sc->sc_pc, sc->sc_tag, VR_PCI_MODE) | 1381 (VR_MODE3_MIION << 24)); 1382 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 1383 } 1384 1385 void 1386 vr_init(void *xsc) 1387 { 1388 struct vr_softc *sc = xsc; 1389 struct ifnet *ifp = &sc->arpcom.ac_if; 1390 struct mii_data *mii = &sc->sc_mii; 1391 int s, i; 1392 1393 s = splnet(); 1394 1395 /* 1396 * Cancel pending I/O and free all RX/TX buffers. 1397 */ 1398 vr_stop(sc); 1399 vr_chipinit(sc); 1400 1401 /* 1402 * Set our station address. 1403 */ 1404 for (i = 0; i < ETHER_ADDR_LEN; i++) 1405 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1406 1407 /* Set DMA size */ 1408 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1409 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1410 1411 /* 1412 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1413 * so we must set both. 1414 */ 1415 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1416 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1417 1418 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1419 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1420 1421 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1422 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1423 1424 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1425 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1426 1427 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1428 VR_SETBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN); 1429 1430 /* Init circular RX list. */ 1431 if (vr_list_rx_init(sc) == ENOBUFS) { 1432 printf("%s: initialization failed: no memory for rx buffers\n", 1433 sc->sc_dev.dv_xname); 1434 vr_stop(sc); 1435 splx(s); 1436 return; 1437 } 1438 1439 /* 1440 * Init tx descriptors. 1441 */ 1442 if (vr_list_tx_init(sc) == ENOBUFS) { 1443 printf("%s: initialization failed: no memory for tx buffers\n", 1444 sc->sc_dev.dv_xname); 1445 vr_stop(sc); 1446 splx(s); 1447 return; 1448 } 1449 1450 /* 1451 * Program promiscuous mode and multicast filters. 1452 */ 1453 vr_iff(sc); 1454 1455 /* 1456 * Load the address of the RX list. 1457 */ 1458 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 1459 1460 /* Enable receiver and transmitter. */ 1461 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1462 VR_CMD_TX_ON|VR_CMD_RX_ON| 1463 VR_CMD_RX_GO); 1464 1465 CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 1466 offsetof(struct vr_list_data, vr_tx_list[0])); 1467 1468 /* 1469 * Enable interrupts. 1470 */ 1471 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1472 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1473 1474 /* Restore state of BMCR */ 1475 sc->vr_link = 1; 1476 mii_mediachg(mii); 1477 1478 ifp->if_flags |= IFF_RUNNING; 1479 ifq_clr_oactive(&ifp->if_snd); 1480 1481 if (!timeout_pending(&sc->sc_to)) 1482 timeout_add_sec(&sc->sc_to, 1); 1483 1484 splx(s); 1485 } 1486 1487 /* 1488 * Set media options. 1489 */ 1490 int 1491 vr_ifmedia_upd(struct ifnet *ifp) 1492 { 1493 struct vr_softc *sc = ifp->if_softc; 1494 1495 if (ifp->if_flags & IFF_UP) 1496 vr_init(sc); 1497 1498 return (0); 1499 } 1500 1501 /* 1502 * Report current media status. 1503 */ 1504 void 1505 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1506 { 1507 struct vr_softc *sc = ifp->if_softc; 1508 struct mii_data *mii = &sc->sc_mii; 1509 1510 mii_pollstat(mii); 1511 ifmr->ifm_active = mii->mii_media_active; 1512 ifmr->ifm_status = mii->mii_media_status; 1513 } 1514 1515 int 1516 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1517 { 1518 struct vr_softc *sc = ifp->if_softc; 1519 struct ifreq *ifr = (struct ifreq *) data; 1520 int s, error = 0; 1521 1522 s = splnet(); 1523 1524 switch(command) { 1525 case SIOCSIFADDR: 1526 ifp->if_flags |= IFF_UP; 1527 if (!(ifp->if_flags & IFF_RUNNING)) 1528 vr_init(sc); 1529 break; 1530 1531 case SIOCSIFFLAGS: 1532 if (ifp->if_flags & IFF_UP) { 1533 if (ifp->if_flags & IFF_RUNNING) 1534 error = ENETRESET; 1535 else 1536 vr_init(sc); 1537 } else { 1538 if (ifp->if_flags & IFF_RUNNING) 1539 vr_stop(sc); 1540 } 1541 break; 1542 1543 case SIOCGIFMEDIA: 1544 case SIOCSIFMEDIA: 1545 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1546 break; 1547 1548 case SIOCGIFRXR: 1549 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 1550 NULL, MCLBYTES, &sc->sc_rxring); 1551 break; 1552 1553 default: 1554 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1555 } 1556 1557 if (error == ENETRESET) { 1558 if (ifp->if_flags & IFF_RUNNING) 1559 vr_iff(sc); 1560 error = 0; 1561 } 1562 1563 splx(s); 1564 return(error); 1565 } 1566 1567 void 1568 vr_watchdog(struct ifnet *ifp) 1569 { 1570 struct vr_softc *sc; 1571 1572 sc = ifp->if_softc; 1573 1574 /* 1575 * Since we're only asking for completion interrupts only every 1576 * few packets, occasionally the watchdog will fire when we have 1577 * some TX descriptors to reclaim, so check for that first. 1578 */ 1579 vr_txeof(sc); 1580 if (sc->vr_cdata.vr_tx_cnt == 0) 1581 return; 1582 1583 ifp->if_oerrors++; 1584 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1585 vr_init(sc); 1586 1587 if (!ifq_empty(&ifp->if_snd)) 1588 vr_start(ifp); 1589 } 1590 1591 /* 1592 * Stop the adapter and free any mbufs allocated to the 1593 * RX and TX lists. 1594 */ 1595 void 1596 vr_stop(struct vr_softc *sc) 1597 { 1598 int i; 1599 struct ifnet *ifp; 1600 bus_dmamap_t map; 1601 1602 ifp = &sc->arpcom.ac_if; 1603 ifp->if_timer = 0; 1604 1605 timeout_del(&sc->sc_to); 1606 1607 ifp->if_flags &= ~IFF_RUNNING; 1608 ifq_clr_oactive(&ifp->if_snd); 1609 1610 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1611 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1612 1613 /* wait for xfers to shutdown */ 1614 for (i = VR_TIMEOUT; i > 0; i--) { 1615 DELAY(10); 1616 if (!(CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON))) 1617 break; 1618 } 1619 #ifdef VR_DEBUG 1620 if (i == 0) 1621 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 1622 #endif 1623 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1624 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1625 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1626 1627 /* 1628 * Free data in the RX lists. 1629 */ 1630 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1631 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1632 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1633 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1634 } 1635 map = sc->vr_cdata.vr_rx_chain[i].vr_map; 1636 if (map != NULL) { 1637 if (map->dm_nsegs > 0) 1638 bus_dmamap_unload(sc->sc_dmat, map); 1639 bus_dmamap_destroy(sc->sc_dmat, map); 1640 sc->vr_cdata.vr_rx_chain[i].vr_map = NULL; 1641 } 1642 } 1643 bzero(&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list)); 1644 1645 /* 1646 * Free the TX list buffers. 1647 */ 1648 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1649 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1650 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1651 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1652 ifp->if_oerrors++; 1653 } 1654 map = sc->vr_cdata.vr_tx_chain[i].vr_map; 1655 if (map != NULL) { 1656 if (map->dm_nsegs > 0) 1657 bus_dmamap_unload(sc->sc_dmat, map); 1658 bus_dmamap_destroy(sc->sc_dmat, map); 1659 sc->vr_cdata.vr_tx_chain[i].vr_map = NULL; 1660 } 1661 } 1662 bzero(&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list)); 1663 } 1664 1665 #ifndef SMALL_KERNEL 1666 int 1667 vr_wol(struct ifnet *ifp, int enable) 1668 { 1669 struct vr_softc *sc = ifp->if_softc; 1670 1671 /* Clear WOL configuration */ 1672 CSR_WRITE_1(sc, VR_WOLCRCLR, 0xFF); 1673 1674 /* Clear event status bits. */ 1675 CSR_WRITE_1(sc, VR_PWRCSRCLR, 0xFF); 1676 1677 /* Disable PME# assertion upon wake event. */ 1678 VR_CLRBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB); 1679 VR_SETBIT(sc, VR_WOLCFGCLR, VR_WOLCFG_PMEOVR); 1680 1681 if (enable) { 1682 VR_SETBIT(sc, VR_WOLCRSET, VR_WOLCR_MAGIC); 1683 1684 /* Enable PME# assertion upon wake event. */ 1685 VR_SETBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB); 1686 VR_SETBIT(sc, VR_WOLCFGSET, VR_WOLCFG_PMEOVR); 1687 } 1688 1689 return (0); 1690 } 1691 #endif 1692 1693 int 1694 vr_alloc_mbuf(struct vr_softc *sc, struct vr_chain_onefrag *r) 1695 { 1696 struct vr_desc *d; 1697 struct mbuf *m; 1698 1699 if (r == NULL) 1700 return (EINVAL); 1701 1702 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 1703 if (!m) 1704 return (ENOBUFS); 1705 1706 m->m_len = m->m_pkthdr.len = MCLBYTES; 1707 m_adj(m, sizeof(u_int64_t)); 1708 1709 if (bus_dmamap_load_mbuf(sc->sc_dmat, r->vr_map, m, BUS_DMA_NOWAIT)) { 1710 m_free(m); 1711 return (ENOBUFS); 1712 } 1713 1714 bus_dmamap_sync(sc->sc_dmat, r->vr_map, 0, r->vr_map->dm_mapsize, 1715 BUS_DMASYNC_PREREAD); 1716 1717 /* Reinitialize the RX descriptor */ 1718 r->vr_mbuf = m; 1719 d = r->vr_ptr; 1720 d->vr_data = htole32(r->vr_map->dm_segs[0].ds_addr); 1721 if (sc->vr_quirks & VR_Q_BABYJUMBO) 1722 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN_BABYJUMBO); 1723 else 1724 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); 1725 1726 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0, 1727 sc->sc_listmap.vrm_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1728 1729 d->vr_status = htole32(VR_RXSTAT); 1730 1731 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0, 1732 sc->sc_listmap.vrm_map->dm_mapsize, 1733 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1734 1735 return (0); 1736 } 1737