1 /* 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/pci/if_vr.c,v 1.26.2.13 2003/02/06 04:46:20 silby Exp $ 33 * $DragonFly: src/sys/dev/netif/vr/if_vr.c,v 1.19 2005/02/21 18:40:37 joerg Exp $ 34 */ 35 36 /* 37 * VIA Rhine fast ethernet PCI NIC driver 38 * 39 * Supports various network adapters based on the VIA Rhine 40 * and Rhine II PCI controllers, including the D-Link DFE530TX. 41 * Datasheets are available at http://www.via.com.tw. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 48 /* 49 * The VIA Rhine controllers are similar in some respects to the 50 * the DEC tulip chips, except less complicated. The controller 51 * uses an MII bus and an external physical layer interface. The 52 * receiver has a one entry perfect filter and a 64-bit hash table 53 * multicast filter. Transmit and receive descriptors are similar 54 * to the tulip. 55 * 56 * The Rhine has a serious flaw in its transmit DMA mechanism: 57 * transmit buffers must be longword aligned. Unfortunately, 58 * FreeBSD doesn't guarantee that mbufs will be filled in starting 59 * at longword boundaries, so we have to do a buffer copy before 60 * transmission. 61 */ 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/malloc.h> 68 #include <sys/kernel.h> 69 #include <sys/socket.h> 70 71 #include <net/if.h> 72 #include <net/if_arp.h> 73 #include <net/ethernet.h> 74 #include <net/if_dl.h> 75 #include <net/if_media.h> 76 77 #include <net/bpf.h> 78 79 #include <vm/vm.h> /* for vtophys */ 80 #include <vm/pmap.h> /* for vtophys */ 81 #include <machine/bus_pio.h> 82 #include <machine/bus_memio.h> 83 #include <machine/bus.h> 84 #include <machine/resource.h> 85 #include <sys/bus.h> 86 #include <sys/rman.h> 87 88 #include <dev/netif/mii_layer/mii.h> 89 #include <dev/netif/mii_layer/miivar.h> 90 91 #include <bus/pci/pcireg.h> 92 #include <bus/pci/pcivar.h> 93 94 #define VR_USEIOSPACE 95 96 #include <dev/netif/vr/if_vrreg.h> 97 98 /* "controller miibus0" required. See GENERIC if you get errors here. */ 99 #include "miibus_if.h" 100 101 #undef VR_USESWSHIFT 102 103 /* 104 * Various supported device vendors/types and their names. 105 */ 106 static struct vr_type vr_devs[] = { 107 { VIA_VENDORID, VIA_DEVICEID_RHINE, 108 "VIA VT3043 Rhine I 10/100BaseTX" }, 109 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 110 "VIA VT86C100A Rhine II 10/100BaseTX" }, 111 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 112 "VIA VT6102 Rhine II 10/100BaseTX" }, 113 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 114 "VIA VT6105 Rhine III 10/100BaseTX" }, 115 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 116 "VIA VT6105M Rhine III 10/100BaseTX" }, 117 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 118 "Delta Electronics Rhine II 10/100BaseTX" }, 119 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 120 "Addtron Technology Rhine II 10/100BaseTX" }, 121 { 0, 0, NULL } 122 }; 123 124 static int vr_probe(device_t); 125 static int vr_attach(device_t); 126 static int vr_detach(device_t); 127 128 static int vr_newbuf(struct vr_softc *, struct vr_chain_onefrag *, 129 struct mbuf *); 130 static int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf * ); 131 132 static void vr_rxeof(struct vr_softc *); 133 static void vr_rxeoc(struct vr_softc *); 134 static void vr_txeof(struct vr_softc *); 135 static void vr_txeoc(struct vr_softc *); 136 static void vr_tick(void *); 137 static void vr_intr(void *); 138 static void vr_start(struct ifnet *); 139 static int vr_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 140 static void vr_init(void *); 141 static void vr_stop(struct vr_softc *); 142 static void vr_watchdog(struct ifnet *); 143 static void vr_shutdown(device_t); 144 static int vr_ifmedia_upd(struct ifnet *); 145 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 146 147 #ifdef VR_USESWSHIFT 148 static void vr_mii_sync(struct vr_softc *); 149 static void vr_mii_send(struct vr_softc *, uint32_t, int); 150 #endif 151 static int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *); 152 static int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *); 153 static int vr_miibus_readreg(device_t, int, int); 154 static int vr_miibus_writereg(device_t, int, int, int); 155 static void vr_miibus_statchg(device_t); 156 157 static void vr_setcfg(struct vr_softc *, int); 158 static uint8_t vr_calchash(uint8_t *); 159 static void vr_setmulti(struct vr_softc *); 160 static void vr_reset(struct vr_softc *); 161 static int vr_list_rx_init(struct vr_softc *); 162 static int vr_list_tx_init(struct vr_softc *); 163 164 #ifdef VR_USEIOSPACE 165 #define VR_RES SYS_RES_IOPORT 166 #define VR_RID VR_PCI_LOIO 167 #else 168 #define VR_RES SYS_RES_MEMORY 169 #define VR_RID VR_PCI_LOMEM 170 #endif 171 172 static device_method_t vr_methods[] = { 173 /* Device interface */ 174 DEVMETHOD(device_probe, vr_probe), 175 DEVMETHOD(device_attach, vr_attach), 176 DEVMETHOD(device_detach, vr_detach), 177 DEVMETHOD(device_shutdown, vr_shutdown), 178 179 /* bus interface */ 180 DEVMETHOD(bus_print_child, bus_generic_print_child), 181 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 182 183 /* MII interface */ 184 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 185 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 186 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 187 188 { 0, 0 } 189 }; 190 191 static driver_t vr_driver = { 192 "vr", 193 vr_methods, 194 sizeof(struct vr_softc) 195 }; 196 197 static devclass_t vr_devclass; 198 199 DECLARE_DUMMY_MODULE(if_vr); 200 DRIVER_MODULE(if_vr, pci, vr_driver, vr_devclass, 0, 0); 201 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); 202 203 #define VR_SETBIT(sc, reg, x) \ 204 CSR_WRITE_1(sc, reg, \ 205 CSR_READ_1(sc, reg) | (x)) 206 207 #define VR_CLRBIT(sc, reg, x) \ 208 CSR_WRITE_1(sc, reg, \ 209 CSR_READ_1(sc, reg) & ~(x)) 210 211 #define VR_SETBIT16(sc, reg, x) \ 212 CSR_WRITE_2(sc, reg, \ 213 CSR_READ_2(sc, reg) | (x)) 214 215 #define VR_CLRBIT16(sc, reg, x) \ 216 CSR_WRITE_2(sc, reg, \ 217 CSR_READ_2(sc, reg) & ~(x)) 218 219 #define VR_SETBIT32(sc, reg, x) \ 220 CSR_WRITE_4(sc, reg, \ 221 CSR_READ_4(sc, reg) | (x)) 222 223 #define VR_CLRBIT32(sc, reg, x) \ 224 CSR_WRITE_4(sc, reg, \ 225 CSR_READ_4(sc, reg) & ~(x)) 226 227 #define SIO_SET(x) \ 228 CSR_WRITE_1(sc, VR_MIICMD, \ 229 CSR_READ_1(sc, VR_MIICMD) | (x)) 230 231 #define SIO_CLR(x) \ 232 CSR_WRITE_1(sc, VR_MIICMD, \ 233 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 234 235 #ifdef VR_USESWSHIFT 236 /* 237 * Sync the PHYs by setting data bit and strobing the clock 32 times. 238 */ 239 static void 240 vr_mii_sync(struct vr_softc *sc) 241 { 242 int i; 243 244 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN); 245 246 for (i = 0; i < 32; i++) { 247 SIO_SET(VR_MIICMD_CLK); 248 DELAY(1); 249 SIO_CLR(VR_MIICMD_CLK); 250 DELAY(1); 251 } 252 } 253 254 /* 255 * Clock a series of bits through the MII. 256 */ 257 static void 258 vr_mii_send(struct vr_softc *sc, uint32_t bits, int cnt) 259 { 260 int i; 261 262 SIO_CLR(VR_MIICMD_CLK); 263 264 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 265 if (bits & i) 266 SIO_SET(VR_MIICMD_DATAIN); 267 else 268 SIO_CLR(VR_MIICMD_DATAIN); 269 DELAY(1); 270 SIO_CLR(VR_MIICMD_CLK); 271 DELAY(1); 272 SIO_SET(VR_MIICMD_CLK); 273 } 274 } 275 #endif 276 277 /* 278 * Read an PHY register through the MII. 279 */ 280 static int 281 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) 282 #ifdef VR_USESWSHIFT 283 { 284 int i, ack, s; 285 286 s = splimp(); 287 288 /* Set up frame for RX. */ 289 frame->mii_stdelim = VR_MII_STARTDELIM; 290 frame->mii_opcode = VR_MII_READOP; 291 frame->mii_turnaround = 0; 292 frame->mii_data = 0; 293 294 CSR_WRITE_1(sc, VR_MIICMD, 0); 295 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 296 297 /* Turn on data xmit. */ 298 SIO_SET(VR_MIICMD_DIR); 299 300 vr_mii_sync(sc); 301 302 /* Send command/address info. */ 303 vr_mii_send(sc, frame->mii_stdelim, 2); 304 vr_mii_send(sc, frame->mii_opcode, 2); 305 vr_mii_send(sc, frame->mii_phyaddr, 5); 306 vr_mii_send(sc, frame->mii_regaddr, 5); 307 308 /* Idle bit. */ 309 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN)); 310 DELAY(1); 311 SIO_SET(VR_MIICMD_CLK); 312 DELAY(1); 313 314 /* Turn off xmit. */ 315 SIO_CLR(VR_MIICMD_DIR); 316 317 /* Check for ack */ 318 SIO_CLR(VR_MIICMD_CLK); 319 DELAY(1); 320 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT; 321 SIO_SET(VR_MIICMD_CLK); 322 DELAY(1); 323 324 /* 325 * Now try reading data bits. If the ack failed, we still 326 * need to clock through 16 cycles to keep the PHY(s) in sync. 327 */ 328 if (ack) { 329 for(i = 0; i < 16; i++) { 330 SIO_CLR(VR_MIICMD_CLK); 331 DELAY(1); 332 SIO_SET(VR_MIICMD_CLK); 333 DELAY(1); 334 } 335 goto fail; 336 } 337 338 for (i = 0x8000; i; i >>= 1) { 339 SIO_CLR(VR_MIICMD_CLK); 340 DELAY(1); 341 if (!ack) { 342 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT) 343 frame->mii_data |= i; 344 DELAY(1); 345 } 346 SIO_SET(VR_MIICMD_CLK); 347 DELAY(1); 348 } 349 350 fail: 351 SIO_CLR(VR_MIICMD_CLK); 352 DELAY(1); 353 SIO_SET(VR_MIICMD_CLK); 354 DELAY(1); 355 356 splx(s); 357 358 if (ack) 359 return(1); 360 return(0); 361 } 362 #else 363 { 364 int s, i; 365 366 s = splimp(); 367 368 /* Set the PHY address. */ 369 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 370 frame->mii_phyaddr); 371 372 /* Set the register address. */ 373 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 374 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 375 376 for (i = 0; i < 10000; i++) { 377 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 378 break; 379 DELAY(1); 380 } 381 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 382 383 splx(s); 384 385 return(0); 386 } 387 #endif 388 389 390 /* 391 * Write to a PHY register through the MII. 392 */ 393 static int 394 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) 395 #ifdef VR_USESWSHIFT 396 { 397 int s; 398 399 s = splimp(); 400 401 CSR_WRITE_1(sc, VR_MIICMD, 0); 402 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 403 404 /* Set up frame for TX. */ 405 frame->mii_stdelim = VR_MII_STARTDELIM; 406 frame->mii_opcode = VR_MII_WRITEOP; 407 frame->mii_turnaround = VR_MII_TURNAROUND; 408 409 /* Turn on data output. */ 410 SIO_SET(VR_MIICMD_DIR); 411 412 vr_mii_sync(sc); 413 414 vr_mii_send(sc, frame->mii_stdelim, 2); 415 vr_mii_send(sc, frame->mii_opcode, 2); 416 vr_mii_send(sc, frame->mii_phyaddr, 5); 417 vr_mii_send(sc, frame->mii_regaddr, 5); 418 vr_mii_send(sc, frame->mii_turnaround, 2); 419 vr_mii_send(sc, frame->mii_data, 16); 420 421 /* Idle bit. */ 422 SIO_SET(VR_MIICMD_CLK); 423 DELAY(1); 424 SIO_CLR(VR_MIICMD_CLK); 425 DELAY(1); 426 427 /* Turn off xmit. */ 428 SIO_CLR(VR_MIICMD_DIR); 429 430 splx(s); 431 432 return(0); 433 } 434 #else 435 { 436 int s, i; 437 438 s = splimp(); 439 440 /* Set the PHY-adress */ 441 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 442 frame->mii_phyaddr); 443 444 /* Set the register address and data to write. */ 445 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 446 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 447 448 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 449 450 for (i = 0; i < 10000; i++) { 451 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 452 break; 453 DELAY(1); 454 } 455 456 splx(s); 457 458 return(0); 459 } 460 #endif 461 462 static int 463 vr_miibus_readreg(device_t dev, int phy, int reg) 464 { 465 struct vr_mii_frame frame; 466 struct vr_softc *sc; 467 468 sc = device_get_softc(dev); 469 470 switch (sc->vr_revid) { 471 case REV_ID_VT6102_APOLLO: 472 if (phy != 1) 473 return(0); 474 break; 475 default: 476 break; 477 } 478 479 bzero(&frame, sizeof(frame)); 480 481 frame.mii_phyaddr = phy; 482 frame.mii_regaddr = reg; 483 vr_mii_readreg(sc, &frame); 484 485 return(frame.mii_data); 486 } 487 488 static int 489 vr_miibus_writereg(device_t dev, int phy, int reg, int data) 490 { 491 struct vr_mii_frame frame; 492 struct vr_softc *sc; 493 494 sc = device_get_softc(dev); 495 496 switch (sc->vr_revid) { 497 case REV_ID_VT6102_APOLLO: 498 if (phy != 1) 499 return 0; 500 break; 501 default: 502 break; 503 } 504 505 bzero(&frame, sizeof(frame)); 506 507 frame.mii_phyaddr = phy; 508 frame.mii_regaddr = reg; 509 frame.mii_data = data; 510 511 vr_mii_writereg(sc, &frame); 512 513 return(0); 514 } 515 516 static void 517 vr_miibus_statchg(device_t dev) 518 { 519 struct mii_data *mii; 520 struct vr_softc *sc; 521 522 sc = device_get_softc(dev); 523 mii = device_get_softc(sc->vr_miibus); 524 vr_setcfg(sc, mii->mii_media_active); 525 } 526 527 /* 528 * Calculate CRC of a multicast group address, return the lower 6 bits. 529 */ 530 static uint8_t 531 vr_calchash(uint8_t *addr) 532 { 533 uint32_t crc, carry; 534 int i, j; 535 uint8_t c; 536 537 /* Compute CRC for the address value. */ 538 crc = 0xFFFFFFFF; /* initial value */ 539 540 for (i = 0; i < 6; i++) { 541 c = *(addr + i); 542 for (j = 0; j < 8; j++) { 543 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 544 crc <<= 1; 545 c >>= 1; 546 if (carry) 547 crc = (crc ^ 0x04c11db6) | carry; 548 } 549 } 550 551 /* return the filter bit position */ 552 return((crc >> 26) & 0x0000003F); 553 } 554 555 /* 556 * Program the 64-bit multicast hash filter. 557 */ 558 static void 559 vr_setmulti(struct vr_softc *sc) 560 { 561 struct ifnet *ifp; 562 int h = 0; 563 uint32_t hashes[2] = { 0, 0 }; 564 struct ifmultiaddr *ifma; 565 uint8_t rxfilt; 566 int mcnt = 0; 567 568 ifp = &sc->arpcom.ac_if; 569 570 rxfilt = CSR_READ_1(sc, VR_RXCFG); 571 572 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 573 rxfilt |= VR_RXCFG_RX_MULTI; 574 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 575 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 576 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 577 return; 578 } 579 580 /* First, zero out all the existing hash bits. */ 581 CSR_WRITE_4(sc, VR_MAR0, 0); 582 CSR_WRITE_4(sc, VR_MAR1, 0); 583 584 /* Now program new ones. */ 585 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 586 ifma = ifma->ifma_link.le_next) { 587 if (ifma->ifma_addr->sa_family != AF_LINK) 588 continue; 589 h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 590 if (h < 32) 591 hashes[0] |= (1 << h); 592 else 593 hashes[1] |= (1 << (h - 32)); 594 mcnt++; 595 } 596 597 if (mcnt) 598 rxfilt |= VR_RXCFG_RX_MULTI; 599 else 600 rxfilt &= ~VR_RXCFG_RX_MULTI; 601 602 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 603 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 604 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 605 } 606 607 /* 608 * In order to fiddle with the 609 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 610 * first have to put the transmit and/or receive logic in the idle state. 611 */ 612 static void 613 vr_setcfg(struct vr_softc *sc, int media) 614 { 615 int restart = 0; 616 617 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { 618 restart = 1; 619 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 620 } 621 622 if ((media & IFM_GMASK) == IFM_FDX) 623 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 624 else 625 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 626 627 if (restart) 628 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 629 } 630 631 static void 632 vr_reset(struct vr_softc *sc) 633 { 634 int i; 635 636 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 637 638 for (i = 0; i < VR_TIMEOUT; i++) { 639 DELAY(10); 640 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 641 break; 642 } 643 if (i == VR_TIMEOUT) { 644 struct ifnet *ifp = &sc->arpcom.ac_if; 645 646 if (sc->vr_revid < REV_ID_VT3065_A) { 647 if_printf(ifp, "reset never completed!\n"); 648 } else { 649 /* Use newer force reset command */ 650 if_printf(ifp, "Using force reset command.\n"); 651 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 652 } 653 } 654 655 /* Wait a little while for the chip to get its brains in order. */ 656 DELAY(1000); 657 } 658 659 /* 660 * Probe for a VIA Rhine chip. Check the PCI vendor and device 661 * IDs against our list and return a device name if we find a match. 662 */ 663 static int 664 vr_probe(device_t dev) 665 { 666 struct vr_type *t; 667 668 t = vr_devs; 669 670 while(t->vr_name != NULL) { 671 if ((pci_get_vendor(dev) == t->vr_vid) && 672 (pci_get_device(dev) == t->vr_did)) { 673 device_set_desc(dev, t->vr_name); 674 return(0); 675 } 676 t++; 677 } 678 679 return(ENXIO); 680 } 681 682 /* 683 * Attach the interface. Allocate softc structures, do ifmedia 684 * setup and ethernet/BPF attach. 685 */ 686 static int 687 vr_attach(device_t dev) 688 { 689 int i, s; 690 uint8_t eaddr[ETHER_ADDR_LEN]; 691 uint32_t command; 692 struct vr_softc *sc; 693 struct ifnet *ifp; 694 int unit, error = 0, rid; 695 696 s = splimp(); 697 698 sc = device_get_softc(dev); 699 unit = device_get_unit(dev); 700 callout_init(&sc->vr_stat_timer); 701 702 /* 703 * Handle power management nonsense. 704 */ 705 706 command = pci_read_config(dev, VR_PCI_CAPID, 4) & 0x000000FF; 707 if (command == 0x01) { 708 command = pci_read_config(dev, VR_PCI_PWRMGMTCTRL, 4); 709 if (command & VR_PSTATE_MASK) { 710 uint32_t iobase, membase, irq; 711 712 /* Save important PCI config data. */ 713 iobase = pci_read_config(dev, VR_PCI_LOIO, 4); 714 membase = pci_read_config(dev, VR_PCI_LOMEM, 4); 715 irq = pci_read_config(dev, VR_PCI_INTLINE, 4); 716 717 /* Reset the power state. */ 718 device_printf(dev, "chip is in D%d power mode " 719 "-- setting to D0\n", command & VR_PSTATE_MASK); 720 command &= 0xFFFFFFFC; 721 pci_write_config(dev, VR_PCI_PWRMGMTCTRL, command, 4); 722 723 /* Restore PCI config data. */ 724 pci_write_config(dev, VR_PCI_LOIO, iobase, 4); 725 pci_write_config(dev, VR_PCI_LOMEM, membase, 4); 726 pci_write_config(dev, VR_PCI_INTLINE, irq, 4); 727 } 728 } 729 730 /* 731 * Map control/status registers. 732 */ 733 command = pci_read_config(dev, PCIR_COMMAND, 4); 734 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 735 pci_write_config(dev, PCIR_COMMAND, command, 4); 736 command = pci_read_config(dev, PCIR_COMMAND, 4); 737 sc->vr_revid = pci_read_config(dev, VR_PCI_REVID, 4) & 0x000000FF; 738 739 #ifdef VR_USEIOSPACE 740 if (!(command & PCIM_CMD_PORTEN)) { 741 device_printf(dev, "failed to enable I/O ports!\n"); 742 free(sc, M_DEVBUF); 743 goto fail; 744 } 745 #else 746 if (!(command & PCIM_CMD_MEMEN)) { 747 device_printf(dev, "failed to enable memory mapping!\n"); 748 goto fail; 749 } 750 #endif 751 752 rid = VR_RID; 753 sc->vr_res = bus_alloc_resource_any(dev, VR_RES, &rid, RF_ACTIVE); 754 755 if (sc->vr_res == NULL) { 756 device_printf(dev, "couldn't map ports/memory\n"); 757 error = ENXIO; 758 goto fail; 759 } 760 761 sc->vr_btag = rman_get_bustag(sc->vr_res); 762 sc->vr_bhandle = rman_get_bushandle(sc->vr_res); 763 764 /* Allocate interrupt */ 765 rid = 0; 766 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 767 RF_SHAREABLE | RF_ACTIVE); 768 769 if (sc->vr_irq == NULL) { 770 device_printf(dev, "couldn't map interrupt\n"); 771 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); 772 error = ENXIO; 773 goto fail; 774 } 775 776 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET, 777 vr_intr, sc, &sc->vr_intrhand); 778 779 if (error) { 780 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 781 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); 782 device_printf(dev, "couldn't set up irq\n"); 783 goto fail; 784 } 785 786 /* 787 * Windows may put the chip in suspend mode when it 788 * shuts down. Be sure to kick it in the head to wake it 789 * up again. 790 */ 791 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 792 793 /* Reset the adapter. */ 794 vr_reset(sc); 795 796 /* 797 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 798 * initialization and disable AUTOPOLL. 799 */ 800 pci_write_config(dev, VR_PCI_MODE, 801 pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4); 802 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 803 804 /* 805 * Get station address. The way the Rhine chips work, 806 * you're not allowed to directly access the EEPROM once 807 * they've been programmed a special way. Consequently, 808 * we need to read the node address from the PAR0 and PAR1 809 * registers. 810 */ 811 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 812 DELAY(200); 813 for (i = 0; i < ETHER_ADDR_LEN; i++) 814 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 815 816 sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF, 817 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 818 819 if (sc->vr_ldata == NULL) { 820 device_printf(dev, "no memory for list buffers!\n"); 821 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 822 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 823 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); 824 error = ENXIO; 825 goto fail; 826 } 827 828 bzero(sc->vr_ldata, sizeof(struct vr_list_data)); 829 830 ifp = &sc->arpcom.ac_if; 831 ifp->if_softc = sc; 832 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 833 ifp->if_mtu = ETHERMTU; 834 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 835 ifp->if_ioctl = vr_ioctl; 836 ifp->if_start = vr_start; 837 ifp->if_watchdog = vr_watchdog; 838 ifp->if_init = vr_init; 839 ifp->if_baudrate = 10000000; 840 ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1; 841 842 /* 843 * Do MII setup. 844 */ 845 if (mii_phy_probe(dev, &sc->vr_miibus, 846 vr_ifmedia_upd, vr_ifmedia_sts)) { 847 if_printf(ifp, "MII without any phy!\n"); 848 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 849 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 850 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); 851 contigfree(sc->vr_ldata, 852 sizeof(struct vr_list_data), M_DEVBUF); 853 error = ENXIO; 854 goto fail; 855 } 856 857 /* Call MI attach routine. */ 858 ether_ifattach(ifp, eaddr); 859 860 fail: 861 splx(s); 862 return(error); 863 } 864 865 static int 866 vr_detach(device_t dev) 867 { 868 struct vr_softc *sc; 869 struct ifnet *ifp; 870 int s; 871 872 s = splimp(); 873 874 sc = device_get_softc(dev); 875 ifp = &sc->arpcom.ac_if; 876 877 vr_stop(sc); 878 ether_ifdetach(ifp); 879 880 bus_generic_detach(dev); 881 device_delete_child(dev, sc->vr_miibus); 882 883 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 884 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 885 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); 886 887 contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF); 888 889 splx(s); 890 891 return(0); 892 } 893 894 /* 895 * Initialize the transmit descriptors. 896 */ 897 static int 898 vr_list_tx_init(struct vr_softc *sc) 899 { 900 struct vr_chain_data *cd; 901 struct vr_list_data *ld; 902 int i, nexti; 903 904 cd = &sc->vr_cdata; 905 ld = sc->vr_ldata; 906 for (i = 0; i < VR_TX_LIST_CNT; i++) { 907 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 908 if (i == (VR_TX_LIST_CNT - 1)) 909 nexti = 0; 910 else 911 nexti = i + 1; 912 cd->vr_tx_chain[i].vr_nextdesc = &cd->vr_tx_chain[nexti]; 913 } 914 915 cd->vr_tx_free = &cd->vr_tx_chain[0]; 916 cd->vr_tx_tail = cd->vr_tx_head = NULL; 917 918 return(0); 919 } 920 921 922 /* 923 * Initialize the RX descriptors and allocate mbufs for them. Note that 924 * we arrange the descriptors in a closed ring, so that the last descriptor 925 * points back to the first. 926 */ 927 static int 928 vr_list_rx_init(struct vr_softc *sc) 929 { 930 struct vr_chain_data *cd; 931 struct vr_list_data *ld; 932 int i, nexti; 933 934 cd = &sc->vr_cdata; 935 ld = sc->vr_ldata; 936 937 for (i = 0; i < VR_RX_LIST_CNT; i++) { 938 cd->vr_rx_chain[i].vr_ptr = (struct vr_desc *)&ld->vr_rx_list[i]; 939 if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS) 940 return(ENOBUFS); 941 if (i == (VR_RX_LIST_CNT - 1)) 942 nexti = 0; 943 else 944 nexti = i + 1; 945 cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti]; 946 ld->vr_rx_list[i].vr_next = vtophys(&ld->vr_rx_list[nexti]); 947 } 948 949 cd->vr_rx_head = &cd->vr_rx_chain[0]; 950 951 return(0); 952 } 953 954 /* 955 * Initialize an RX descriptor and attach an MBUF cluster. 956 * Note: the length fields are only 11 bits wide, which means the 957 * largest size we can specify is 2047. This is important because 958 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 959 * overflow the field and make a mess. 960 */ 961 static int 962 vr_newbuf(struct vr_softc *sc, struct vr_chain_onefrag *c, struct mbuf *m) 963 { 964 struct mbuf *m_new = NULL; 965 966 if (m == NULL) { 967 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 968 if (m_new == NULL) 969 return(ENOBUFS); 970 971 MCLGET(m_new, MB_DONTWAIT); 972 if (!(m_new->m_flags & M_EXT)) { 973 m_freem(m_new); 974 return(ENOBUFS); 975 } 976 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 977 } else { 978 m_new = m; 979 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 980 m_new->m_data = m_new->m_ext.ext_buf; 981 } 982 983 m_adj(m_new, sizeof(uint64_t)); 984 985 c->vr_mbuf = m_new; 986 c->vr_ptr->vr_status = VR_RXSTAT; 987 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t)); 988 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN; 989 990 return(0); 991 } 992 993 /* 994 * A frame has been uploaded: pass the resulting mbuf chain up to 995 * the higher level protocols. 996 */ 997 static void 998 vr_rxeof(struct vr_softc *sc) 999 { 1000 struct mbuf *m; 1001 struct ifnet *ifp; 1002 struct vr_chain_onefrag *cur_rx; 1003 int total_len = 0; 1004 uint32_t rxstat; 1005 1006 ifp = &sc->arpcom.ac_if; 1007 1008 while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & 1009 VR_RXSTAT_OWN)) { 1010 struct mbuf *m0 = NULL; 1011 1012 cur_rx = sc->vr_cdata.vr_rx_head; 1013 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; 1014 m = cur_rx->vr_mbuf; 1015 1016 /* 1017 * If an error occurs, update stats, clear the 1018 * status word and leave the mbuf cluster in place: 1019 * it should simply get re-used next time this descriptor 1020 * comes up in the ring. 1021 */ 1022 if (rxstat & VR_RXSTAT_RXERR) { 1023 ifp->if_ierrors++; 1024 if_printf(ifp, "rx error (%02x):", rxstat & 0x000000ff); 1025 if (rxstat & VR_RXSTAT_CRCERR) 1026 printf(" crc error"); 1027 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 1028 printf(" frame alignment error\n"); 1029 if (rxstat & VR_RXSTAT_FIFOOFLOW) 1030 printf(" FIFO overflow"); 1031 if (rxstat & VR_RXSTAT_GIANT) 1032 printf(" received giant packet"); 1033 if (rxstat & VR_RXSTAT_RUNT) 1034 printf(" received runt packet"); 1035 if (rxstat & VR_RXSTAT_BUSERR) 1036 printf(" system bus error"); 1037 if (rxstat & VR_RXSTAT_BUFFERR) 1038 printf("rx buffer error"); 1039 printf("\n"); 1040 vr_newbuf(sc, cur_rx, m); 1041 continue; 1042 } 1043 1044 /* No errors; receive the packet. */ 1045 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status); 1046 1047 /* 1048 * XXX The VIA Rhine chip includes the CRC with every 1049 * received frame, and there's no way to turn this 1050 * behavior off (at least, I can't find anything in 1051 * the manual that explains how to do it) so we have 1052 * to trim off the CRC manually. 1053 */ 1054 total_len -= ETHER_CRC_LEN; 1055 1056 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 1057 total_len + ETHER_ALIGN, 0, ifp, NULL); 1058 vr_newbuf(sc, cur_rx, m); 1059 if (m0 == NULL) { 1060 ifp->if_ierrors++; 1061 continue; 1062 } 1063 m_adj(m0, ETHER_ALIGN); 1064 m = m0; 1065 1066 ifp->if_ipackets++; 1067 (*ifp->if_input)(ifp, m); 1068 } 1069 } 1070 1071 static void 1072 vr_rxeoc(struct vr_softc *sc) 1073 { 1074 struct ifnet *ifp; 1075 int i; 1076 1077 ifp = &sc->arpcom.ac_if; 1078 1079 ifp->if_ierrors++; 1080 1081 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1082 DELAY(10000); 1083 1084 /* Wait for receiver to stop */ 1085 for (i = 0x400; 1086 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 1087 i--) 1088 ; /* Wait for receiver to stop */ 1089 1090 if (i == 0) { 1091 if_printf(ifp, "rx shutdown error!\n"); 1092 sc->vr_flags |= VR_F_RESTART; 1093 return; 1094 } 1095 1096 vr_rxeof(sc); 1097 1098 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1099 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1100 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 1101 } 1102 1103 /* 1104 * A frame was downloaded to the chip. It's safe for us to clean up 1105 * the list buffers. 1106 */ 1107 static void 1108 vr_txeof(struct vr_softc *sc) 1109 { 1110 struct vr_chain *cur_tx; 1111 struct ifnet *ifp; 1112 1113 ifp = &sc->arpcom.ac_if; 1114 1115 /* Reset the timeout timer; if_txeoc will clear it. */ 1116 ifp->if_timer = 5; 1117 1118 /* Sanity check. */ 1119 if (sc->vr_cdata.vr_tx_head == NULL) 1120 return; 1121 1122 /* 1123 * Go through our tx list and free mbufs for those 1124 * frames that have been transmitted. 1125 */ 1126 while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) { 1127 uint32_t txstat; 1128 int i; 1129 1130 cur_tx = sc->vr_cdata.vr_tx_head; 1131 txstat = cur_tx->vr_ptr->vr_status; 1132 1133 if ((txstat & VR_TXSTAT_ABRT) || 1134 (txstat & VR_TXSTAT_UDF)) { 1135 for (i = 0x400; 1136 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 1137 i--) 1138 ; /* Wait for chip to shutdown */ 1139 if (i == 0) { 1140 if_printf(ifp, "tx shutdown timeout\n"); 1141 sc->vr_flags |= VR_F_RESTART; 1142 break; 1143 } 1144 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1145 CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr)); 1146 break; 1147 } 1148 1149 if (txstat & VR_TXSTAT_OWN) 1150 break; 1151 1152 if (txstat & VR_TXSTAT_ERRSUM) { 1153 ifp->if_oerrors++; 1154 if (txstat & VR_TXSTAT_DEFER) 1155 ifp->if_collisions++; 1156 if (txstat & VR_TXSTAT_LATECOLL) 1157 ifp->if_collisions++; 1158 } 1159 1160 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1161 1162 ifp->if_opackets++; 1163 if (cur_tx->vr_mbuf != NULL) { 1164 m_freem(cur_tx->vr_mbuf); 1165 cur_tx->vr_mbuf = NULL; 1166 } 1167 1168 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) { 1169 sc->vr_cdata.vr_tx_head = NULL; 1170 sc->vr_cdata.vr_tx_tail = NULL; 1171 break; 1172 } 1173 1174 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc; 1175 } 1176 } 1177 1178 /* 1179 * TX 'end of channel' interrupt handler. 1180 */ 1181 static void 1182 vr_txeoc(struct vr_softc *sc) 1183 { 1184 struct ifnet *ifp; 1185 1186 ifp = &sc->arpcom.ac_if; 1187 1188 if (sc->vr_cdata.vr_tx_head == NULL) { 1189 ifp->if_flags &= ~IFF_OACTIVE; 1190 sc->vr_cdata.vr_tx_tail = NULL; 1191 ifp->if_timer = 0; 1192 } 1193 } 1194 1195 static void 1196 vr_tick(void *xsc) 1197 { 1198 struct vr_softc *sc; 1199 struct mii_data *mii; 1200 int s; 1201 1202 s = splimp(); 1203 1204 sc = xsc; 1205 if (sc->vr_flags & VR_F_RESTART) { 1206 if_printf(&sc->arpcom.ac_if, "restarting\n"); 1207 vr_stop(sc); 1208 vr_reset(sc); 1209 vr_init(sc); 1210 sc->vr_flags &= ~VR_F_RESTART; 1211 } 1212 1213 mii = device_get_softc(sc->vr_miibus); 1214 mii_tick(mii); 1215 1216 callout_reset(&sc->vr_stat_timer, hz, vr_tick, sc); 1217 1218 splx(s); 1219 } 1220 1221 static void 1222 vr_intr(void *arg) 1223 { 1224 struct vr_softc *sc; 1225 struct ifnet *ifp; 1226 uint16_t status; 1227 1228 sc = arg; 1229 ifp = &sc->arpcom.ac_if; 1230 1231 /* Supress unwanted interrupts. */ 1232 if (!(ifp->if_flags & IFF_UP)) { 1233 vr_stop(sc); 1234 return; 1235 } 1236 1237 /* Disable interrupts. */ 1238 if ((ifp->if_flags & IFF_POLLING) == 0) 1239 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1240 1241 for (;;) { 1242 status = CSR_READ_2(sc, VR_ISR); 1243 if (status) 1244 CSR_WRITE_2(sc, VR_ISR, status); 1245 1246 if ((status & VR_INTRS) == 0) 1247 break; 1248 1249 if (status & VR_ISR_RX_OK) 1250 vr_rxeof(sc); 1251 1252 if (status & VR_ISR_RX_DROPPED) { 1253 if_printf(ifp, "rx packet lost\n"); 1254 ifp->if_ierrors++; 1255 } 1256 1257 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1258 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) { 1259 if_printf(ifp, "receive error (%04x)", status); 1260 if (status & VR_ISR_RX_NOBUF) 1261 printf(" no buffers"); 1262 if (status & VR_ISR_RX_OFLOW) 1263 printf(" overflow"); 1264 if (status & VR_ISR_RX_DROPPED) 1265 printf(" packet lost"); 1266 printf("\n"); 1267 vr_rxeoc(sc); 1268 } 1269 1270 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1271 vr_reset(sc); 1272 vr_init(sc); 1273 break; 1274 } 1275 1276 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1277 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1278 vr_txeof(sc); 1279 if ((status & VR_ISR_UDFI) || 1280 (status & VR_ISR_TX_ABRT2) || 1281 (status & VR_ISR_TX_ABRT)) { 1282 ifp->if_oerrors++; 1283 if (sc->vr_cdata.vr_tx_head != NULL) { 1284 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 1285 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 1286 } 1287 } else { 1288 vr_txeoc(sc); 1289 } 1290 } 1291 1292 } 1293 1294 /* Re-enable interrupts. */ 1295 if ((ifp->if_flags & IFF_POLLING) == 0) 1296 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1297 1298 if (ifp->if_snd.ifq_head != NULL) 1299 vr_start(ifp); 1300 } 1301 1302 /* 1303 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1304 * pointers to the fragment pointers. 1305 */ 1306 static int 1307 vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head) 1308 { 1309 int frag = 0; 1310 struct vr_desc *f = NULL; 1311 int total_len; 1312 struct mbuf *m_new; 1313 1314 total_len = 0; 1315 1316 /* 1317 * The VIA Rhine wants packet buffers to be longword 1318 * aligned, but very often our mbufs aren't. Rather than 1319 * waste time trying to decide when to copy and when not 1320 * to copy, just do it all the time. 1321 */ 1322 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 1323 if (m_new == NULL) { 1324 if_printf(&sc->arpcom.ac_if, "no memory for tx list\n"); 1325 return(1); 1326 } 1327 if (m_head->m_pkthdr.len > MHLEN) { 1328 MCLGET(m_new, MB_DONTWAIT); 1329 if (!(m_new->m_flags & M_EXT)) { 1330 m_freem(m_new); 1331 if_printf(&sc->arpcom.ac_if, 1332 "no memory for tx list\n"); 1333 return(1); 1334 } 1335 } 1336 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1337 mtod(m_new, caddr_t)); 1338 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1339 /* 1340 * The Rhine chip doesn't auto-pad, so we have to make 1341 * sure to pad short frames out to the minimum frame length 1342 * ourselves. 1343 */ 1344 if (m_new->m_len < VR_MIN_FRAMELEN) { 1345 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; 1346 m_new->m_len = m_new->m_pkthdr.len; 1347 } 1348 f = c->vr_ptr; 1349 f->vr_data = vtophys(mtod(m_new, caddr_t)); 1350 f->vr_ctl = total_len = m_new->m_len; 1351 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG; 1352 f->vr_status = 0; 1353 frag = 1; 1354 1355 c->vr_mbuf = m_new; 1356 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT; 1357 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr); 1358 1359 return(0); 1360 } 1361 1362 /* 1363 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1364 * to the mbuf data regions directly in the transmit lists. We also save a 1365 * copy of the pointers since the transmit list fragment pointers are 1366 * physical addresses. 1367 */ 1368 static void 1369 vr_start(struct ifnet *ifp) 1370 { 1371 struct vr_softc *sc; 1372 struct mbuf *m_head = NULL; 1373 struct vr_chain *cur_tx = NULL, *start_tx; 1374 1375 sc = ifp->if_softc; 1376 1377 if (ifp->if_flags & IFF_OACTIVE) 1378 return; 1379 1380 /* Check for an available queue slot. If there are none, punt. */ 1381 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) { 1382 ifp->if_flags |= IFF_OACTIVE; 1383 return; 1384 } 1385 1386 start_tx = sc->vr_cdata.vr_tx_free; 1387 1388 while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) { 1389 IF_DEQUEUE(&ifp->if_snd, m_head); 1390 if (m_head == NULL) 1391 break; 1392 1393 /* Pick a descriptor off the free list. */ 1394 cur_tx = sc->vr_cdata.vr_tx_free; 1395 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc; 1396 1397 /* Pack the data into the descriptor. */ 1398 if (vr_encap(sc, cur_tx, m_head)) { 1399 IF_PREPEND(&ifp->if_snd, m_head); 1400 ifp->if_flags |= IFF_OACTIVE; 1401 cur_tx = NULL; 1402 break; 1403 } 1404 1405 if (cur_tx != start_tx) 1406 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1407 1408 BPF_MTAP(ifp, m_head); 1409 m_freem(m_head); 1410 1411 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1412 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1413 } 1414 1415 /* If there are no frames queued, bail. */ 1416 if (cur_tx == NULL) 1417 return; 1418 1419 sc->vr_cdata.vr_tx_tail = cur_tx; 1420 1421 if (sc->vr_cdata.vr_tx_head == NULL) 1422 sc->vr_cdata.vr_tx_head = start_tx; 1423 1424 /* 1425 * Set a timeout in case the chip goes out to lunch. 1426 */ 1427 ifp->if_timer = 5; 1428 } 1429 1430 static void 1431 vr_init(void *xsc) 1432 { 1433 struct vr_softc *sc = xsc; 1434 struct ifnet *ifp = &sc->arpcom.ac_if; 1435 struct mii_data *mii; 1436 int s, i; 1437 1438 s = splimp(); 1439 1440 mii = device_get_softc(sc->vr_miibus); 1441 1442 /* Cancel pending I/O and free all RX/TX buffers. */ 1443 vr_stop(sc); 1444 vr_reset(sc); 1445 1446 /* Set our station address. */ 1447 for (i = 0; i < ETHER_ADDR_LEN; i++) 1448 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1449 1450 /* Set DMA size. */ 1451 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1452 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1453 1454 /* 1455 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1456 * so we must set both. 1457 */ 1458 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1459 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1460 1461 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1462 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1463 1464 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1465 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1466 1467 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1468 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1469 1470 /* Init circular RX list. */ 1471 if (vr_list_rx_init(sc) == ENOBUFS) { 1472 if_printf(ifp, "initialization failed: no memory for rx buffers\n"); 1473 vr_stop(sc); 1474 splx(s); 1475 return; 1476 } 1477 1478 /* Init tx descriptors. */ 1479 vr_list_tx_init(sc); 1480 1481 /* If we want promiscuous mode, set the allframes bit. */ 1482 if (ifp->if_flags & IFF_PROMISC) 1483 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1484 else 1485 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1486 1487 /* Set capture broadcast bit to capture broadcast frames. */ 1488 if (ifp->if_flags & IFF_BROADCAST) 1489 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1490 else 1491 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1492 1493 /* 1494 * Program the multicast filter, if necessary. 1495 */ 1496 vr_setmulti(sc); 1497 1498 /* 1499 * Load the address of the RX list. 1500 */ 1501 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1502 1503 /* Enable receiver and transmitter. */ 1504 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1505 VR_CMD_TX_ON|VR_CMD_RX_ON| 1506 VR_CMD_RX_GO); 1507 1508 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0])); 1509 1510 /* 1511 * Enable interrupts, unless we are polling. 1512 */ 1513 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1514 if ((ifp->if_flags & IFF_POLLING) == 0) 1515 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1516 1517 mii_mediachg(mii); 1518 1519 ifp->if_flags |= IFF_RUNNING; 1520 ifp->if_flags &= ~IFF_OACTIVE; 1521 1522 splx(s); 1523 1524 callout_reset(&sc->vr_stat_timer, hz, vr_tick, sc); 1525 } 1526 1527 /* 1528 * Set media options. 1529 */ 1530 static int 1531 vr_ifmedia_upd(struct ifnet *ifp) 1532 { 1533 struct vr_softc *sc; 1534 1535 sc = ifp->if_softc; 1536 1537 if (ifp->if_flags & IFF_UP) 1538 vr_init(sc); 1539 1540 return(0); 1541 } 1542 1543 /* 1544 * Report current media status. 1545 */ 1546 static void 1547 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1548 { 1549 struct vr_softc *sc; 1550 struct mii_data *mii; 1551 1552 sc = ifp->if_softc; 1553 mii = device_get_softc(sc->vr_miibus); 1554 mii_pollstat(mii); 1555 ifmr->ifm_active = mii->mii_media_active; 1556 ifmr->ifm_status = mii->mii_media_status; 1557 } 1558 1559 static int 1560 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1561 { 1562 struct vr_softc *sc = ifp->if_softc; 1563 struct ifreq *ifr = (struct ifreq *) data; 1564 struct mii_data *mii; 1565 int s, error = 0; 1566 1567 s = splimp(); 1568 1569 switch(command) { 1570 case SIOCSIFADDR: 1571 case SIOCGIFADDR: 1572 case SIOCSIFMTU: 1573 error = ether_ioctl(ifp, command, data); 1574 break; 1575 case SIOCSIFFLAGS: 1576 if (ifp->if_flags & IFF_UP) { 1577 vr_init(sc); 1578 } else { 1579 if (ifp->if_flags & IFF_RUNNING) 1580 vr_stop(sc); 1581 } 1582 error = 0; 1583 break; 1584 case SIOCADDMULTI: 1585 case SIOCDELMULTI: 1586 vr_setmulti(sc); 1587 error = 0; 1588 break; 1589 case SIOCGIFMEDIA: 1590 case SIOCSIFMEDIA: 1591 mii = device_get_softc(sc->vr_miibus); 1592 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1593 break; 1594 default: 1595 error = EINVAL; 1596 break; 1597 } 1598 1599 splx(s); 1600 1601 return(error); 1602 } 1603 1604 #ifdef DEVICE_POLLING 1605 static void 1606 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1607 { 1608 struct vr_softc *sc = ifp->if_softc; 1609 1610 if (cmd == POLL_DEREGISTER) 1611 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1612 else 1613 vr_intr(sc); 1614 } 1615 #endif 1616 1617 static void 1618 vr_watchdog(struct ifnet *ifp) 1619 { 1620 struct vr_softc *sc; 1621 1622 sc = ifp->if_softc; 1623 1624 ifp->if_oerrors++; 1625 if_printf(ifp, "watchdog timeout\n"); 1626 1627 #ifdef DEVICE_POLLING 1628 if (++sc->vr_wdogerrors == 1 && (ifp->if_flags & IFF_POLLING) == 0) { 1629 if_printf(ifp, "ints don't seem to be working, " 1630 "emergency switch to polling\n"); 1631 emergency_poll_enable("if_vr"); 1632 if (ether_poll_register(vr_poll, ifp)) 1633 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1634 } else 1635 #endif 1636 { 1637 vr_stop(sc); 1638 vr_reset(sc); 1639 vr_init(sc); 1640 } 1641 1642 if (ifp->if_snd.ifq_head != NULL) 1643 vr_start(ifp); 1644 } 1645 1646 /* 1647 * Stop the adapter and free any mbufs allocated to the 1648 * RX and TX lists. 1649 */ 1650 static void 1651 vr_stop(struct vr_softc *sc) 1652 { 1653 int i; 1654 struct ifnet *ifp; 1655 1656 ifp = &sc->arpcom.ac_if; 1657 ifp->if_timer = 0; 1658 1659 callout_stop(&sc->vr_stat_timer); 1660 1661 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1662 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1663 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1664 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1665 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1666 1667 /* 1668 * Free data in the RX lists. 1669 */ 1670 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1671 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1672 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1673 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1674 } 1675 } 1676 bzero((char *)&sc->vr_ldata->vr_rx_list, 1677 sizeof(sc->vr_ldata->vr_rx_list)); 1678 1679 /* 1680 * Free the TX list buffers. 1681 */ 1682 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1683 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1684 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1685 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1686 } 1687 } 1688 1689 bzero((char *)&sc->vr_ldata->vr_tx_list, 1690 sizeof(sc->vr_ldata->vr_tx_list)); 1691 1692 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1693 } 1694 1695 /* 1696 * Stop all chip I/O so that the kernel's probe routines don't 1697 * get confused by errant DMAs when rebooting. 1698 */ 1699 static void 1700 vr_shutdown(device_t dev) 1701 { 1702 struct vr_softc *sc; 1703 1704 sc = device_get_softc(dev); 1705 1706 vr_stop(sc); 1707 } 1708