1 /* 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/dev/vge/if_vge.c,v 1.24 2006/02/14 12:44:56 glebius Exp $ 33 * $DragonFly: src/sys/dev/netif/vge/if_vge.c,v 1.9 2008/05/16 13:19:12 sephe Exp $ 34 */ 35 36 /* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44 /* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83 #include "opt_polling.h" 84 85 #include <sys/param.h> 86 #include <sys/endian.h> 87 #include <sys/systm.h> 88 #include <sys/sockio.h> 89 #include <sys/mbuf.h> 90 #include <sys/malloc.h> 91 #include <sys/module.h> 92 #include <sys/kernel.h> 93 #include <sys/socket.h> 94 #include <sys/serialize.h> 95 #include <sys/proc.h> 96 #include <sys/bus.h> 97 #include <sys/rman.h> 98 #include <sys/interrupt.h> 99 100 #include <net/if.h> 101 #include <net/if_arp.h> 102 #include <net/ethernet.h> 103 #include <net/if_dl.h> 104 #include <net/if_media.h> 105 #include <net/ifq_var.h> 106 #include <net/if_types.h> 107 #include <net/vlan/if_vlan_var.h> 108 #include <net/vlan/if_vlan_ether.h> 109 110 #include <net/bpf.h> 111 112 #include <dev/netif/mii_layer/mii.h> 113 #include <dev/netif/mii_layer/miivar.h> 114 115 #include <bus/pci/pcireg.h> 116 #include <bus/pci/pcivar.h> 117 #include <bus/pci/pcidevs.h> 118 119 #include "miibus_if.h" 120 121 #include <dev/netif/vge/if_vgereg.h> 122 #include <dev/netif/vge/if_vgevar.h> 123 124 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 125 126 /* 127 * Various supported device vendors/types and their names. 128 */ 129 static const struct vge_type vge_devs[] = { 130 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612X, 131 "VIA Networking Gigabit Ethernet" }, 132 { 0, 0, NULL } 133 }; 134 135 static int vge_probe (device_t); 136 static int vge_attach (device_t); 137 static int vge_detach (device_t); 138 139 static int vge_encap (struct vge_softc *, struct mbuf *, int); 140 141 static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 142 static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int, 143 bus_size_t, int); 144 static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, 145 bus_size_t, int); 146 static int vge_dma_alloc (device_t); 147 static void vge_dma_free (struct vge_softc *); 148 static int vge_newbuf (struct vge_softc *, int, struct mbuf *); 149 static int vge_rx_list_init (struct vge_softc *); 150 static int vge_tx_list_init (struct vge_softc *); 151 #ifdef VGE_FIXUP_RX 152 static __inline void vge_fixup_rx 153 (struct mbuf *); 154 #endif 155 static void vge_rxeof (struct vge_softc *, int); 156 static void vge_txeof (struct vge_softc *); 157 static void vge_intr (void *); 158 static void vge_tick (struct vge_softc *); 159 static void vge_start (struct ifnet *); 160 static int vge_ioctl (struct ifnet *, u_long, caddr_t, 161 struct ucred *); 162 static void vge_init (void *); 163 static void vge_stop (struct vge_softc *); 164 static void vge_watchdog (struct ifnet *); 165 static int vge_suspend (device_t); 166 static int vge_resume (device_t); 167 static void vge_shutdown (device_t); 168 static int vge_ifmedia_upd (struct ifnet *); 169 static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 170 171 #ifdef VGE_EEPROM 172 static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 173 #endif 174 static void vge_read_eeprom (struct vge_softc *, uint8_t *, int, int, int); 175 176 static void vge_miipoll_start (struct vge_softc *); 177 static void vge_miipoll_stop (struct vge_softc *); 178 static int vge_miibus_readreg (device_t, int, int); 179 static int vge_miibus_writereg (device_t, int, int, int); 180 static void vge_miibus_statchg (device_t); 181 182 static void vge_cam_clear (struct vge_softc *); 183 static int vge_cam_set (struct vge_softc *, uint8_t *); 184 static void vge_setmulti (struct vge_softc *); 185 static void vge_reset (struct vge_softc *); 186 187 #ifdef DEVICE_POLLING 188 static void vge_poll(struct ifnet *, enum poll_cmd, int); 189 static void vge_disable_intr(struct vge_softc *); 190 #endif 191 static void vge_enable_intr(struct vge_softc *, uint32_t); 192 193 #define VGE_PCI_LOIO 0x10 194 #define VGE_PCI_LOMEM 0x14 195 196 static device_method_t vge_methods[] = { 197 /* Device interface */ 198 DEVMETHOD(device_probe, vge_probe), 199 DEVMETHOD(device_attach, vge_attach), 200 DEVMETHOD(device_detach, vge_detach), 201 DEVMETHOD(device_suspend, vge_suspend), 202 DEVMETHOD(device_resume, vge_resume), 203 DEVMETHOD(device_shutdown, vge_shutdown), 204 205 /* bus interface */ 206 DEVMETHOD(bus_print_child, bus_generic_print_child), 207 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 208 209 /* MII interface */ 210 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 211 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 212 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 213 214 { 0, 0 } 215 }; 216 217 static driver_t vge_driver = { 218 "vge", 219 vge_methods, 220 sizeof(struct vge_softc) 221 }; 222 223 static devclass_t vge_devclass; 224 225 DECLARE_DUMMY_MODULE(if_vge); 226 MODULE_DEPEND(if_vge, miibus, 1, 1, 1); 227 DRIVER_MODULE(if_vge, pci, vge_driver, vge_devclass, 0, 0); 228 DRIVER_MODULE(if_vge, cardbus, vge_driver, vge_devclass, 0, 0); 229 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 230 231 #ifdef VGE_EEPROM 232 /* 233 * Read a word of data stored in the EEPROM at address 'addr.' 234 */ 235 static void 236 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t dest) 237 { 238 uint16_t word = 0; 239 int i; 240 241 /* 242 * Enter EEPROM embedded programming mode. In order to 243 * access the EEPROM at all, we first have to set the 244 * EELOAD bit in the CHIPCFG2 register. 245 */ 246 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 247 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 248 249 /* Select the address of the word we want to read */ 250 CSR_WRITE_1(sc, VGE_EEADDR, addr); 251 252 /* Issue read command */ 253 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 254 255 /* Wait for the done bit to be set. */ 256 for (i = 0; i < VGE_TIMEOUT; i++) { 257 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 258 break; 259 } 260 if (i == VGE_TIMEOUT) { 261 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 262 *dest = 0; 263 return; 264 } 265 266 /* Read the result */ 267 word = CSR_READ_2(sc, VGE_EERDDAT); 268 269 /* Turn off EEPROM access mode. */ 270 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 271 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 272 273 *dest = word; 274 } 275 #endif 276 277 /* 278 * Read a sequence of words from the EEPROM. 279 */ 280 static void 281 vge_read_eeprom(struct vge_softc *sc, uint8_t *dest, int off, int cnt, int swap) 282 { 283 int i; 284 #ifdef VGE_EEPROM 285 uint16_t word = 0, *ptr; 286 287 for (i = 0; i < cnt; i++) { 288 vge_eeprom_getword(sc, off + i, &word); 289 ptr = (uint16_t *)(dest + (i * 2)); 290 if (swap) 291 *ptr = ntohs(word); 292 else 293 *ptr = word; 294 } 295 #else 296 for (i = 0; i < ETHER_ADDR_LEN; i++) 297 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 298 #endif 299 } 300 301 static void 302 vge_miipoll_stop(struct vge_softc *sc) 303 { 304 int i; 305 306 CSR_WRITE_1(sc, VGE_MIICMD, 0); 307 308 for (i = 0; i < VGE_TIMEOUT; i++) { 309 DELAY(1); 310 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 311 break; 312 } 313 if (i == VGE_TIMEOUT) 314 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n"); 315 } 316 317 static void 318 vge_miipoll_start(struct vge_softc *sc) 319 { 320 int i; 321 322 /* First, make sure we're idle. */ 323 CSR_WRITE_1(sc, VGE_MIICMD, 0); 324 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 325 326 for (i = 0; i < VGE_TIMEOUT; i++) { 327 DELAY(1); 328 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 329 break; 330 } 331 if (i == VGE_TIMEOUT) { 332 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n"); 333 return; 334 } 335 336 /* Now enable auto poll mode. */ 337 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 338 339 /* And make sure it started. */ 340 for (i = 0; i < VGE_TIMEOUT; i++) { 341 DELAY(1); 342 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 343 break; 344 } 345 if (i == VGE_TIMEOUT) 346 if_printf(&sc->arpcom.ac_if, "failed to start MII autopoll\n"); 347 } 348 349 static int 350 vge_miibus_readreg(device_t dev, int phy, int reg) 351 { 352 struct vge_softc *sc; 353 int i; 354 uint16_t rval = 0; 355 356 sc = device_get_softc(dev); 357 358 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 359 return(0); 360 361 vge_miipoll_stop(sc); 362 363 /* Specify the register we want to read. */ 364 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 365 366 /* Issue read command. */ 367 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 368 369 /* Wait for the read command bit to self-clear. */ 370 for (i = 0; i < VGE_TIMEOUT; i++) { 371 DELAY(1); 372 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 373 break; 374 } 375 if (i == VGE_TIMEOUT) 376 if_printf(&sc->arpcom.ac_if, "MII read timed out\n"); 377 else 378 rval = CSR_READ_2(sc, VGE_MIIDATA); 379 380 vge_miipoll_start(sc); 381 382 return (rval); 383 } 384 385 static int 386 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 387 { 388 struct vge_softc *sc; 389 int i, rval = 0; 390 391 sc = device_get_softc(dev); 392 393 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 394 return(0); 395 396 vge_miipoll_stop(sc); 397 398 /* Specify the register we want to write. */ 399 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 400 401 /* Specify the data we want to write. */ 402 CSR_WRITE_2(sc, VGE_MIIDATA, data); 403 404 /* Issue write command. */ 405 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 406 407 /* Wait for the write command bit to self-clear. */ 408 for (i = 0; i < VGE_TIMEOUT; i++) { 409 DELAY(1); 410 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 411 break; 412 } 413 if (i == VGE_TIMEOUT) { 414 if_printf(&sc->arpcom.ac_if, "MII write timed out\n"); 415 rval = EIO; 416 } 417 418 vge_miipoll_start(sc); 419 420 return (rval); 421 } 422 423 static void 424 vge_cam_clear(struct vge_softc *sc) 425 { 426 int i; 427 428 /* 429 * Turn off all the mask bits. This tells the chip 430 * that none of the entries in the CAM filter are valid. 431 * desired entries will be enabled as we fill the filter in. 432 */ 433 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 434 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 435 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 436 for (i = 0; i < 8; i++) 437 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 438 439 /* Clear the VLAN filter too. */ 440 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 441 for (i = 0; i < 8; i++) 442 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 443 444 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 445 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 446 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 447 448 sc->vge_camidx = 0; 449 } 450 451 static int 452 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 453 { 454 int i, error = 0; 455 456 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 457 return(ENOSPC); 458 459 /* Select the CAM data page. */ 460 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 461 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 462 463 /* Set the filter entry we want to update and enable writing. */ 464 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 465 466 /* Write the address to the CAM registers */ 467 for (i = 0; i < ETHER_ADDR_LEN; i++) 468 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 469 470 /* Issue a write command. */ 471 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 472 473 /* Wake for it to clear. */ 474 for (i = 0; i < VGE_TIMEOUT; i++) { 475 DELAY(1); 476 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 477 break; 478 } 479 if (i == VGE_TIMEOUT) { 480 if_printf(&sc->arpcom.ac_if, "setting CAM filter failed\n"); 481 error = EIO; 482 goto fail; 483 } 484 485 /* Select the CAM mask page. */ 486 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 487 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 488 489 /* Set the mask bit that enables this filter. */ 490 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 491 1<<(sc->vge_camidx & 7)); 492 493 sc->vge_camidx++; 494 495 fail: 496 /* Turn off access to CAM. */ 497 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 498 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 499 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 500 501 return (error); 502 } 503 504 /* 505 * Program the multicast filter. We use the 64-entry CAM filter 506 * for perfect filtering. If there's more than 64 multicast addresses, 507 * we use the hash filter insted. 508 */ 509 static void 510 vge_setmulti(struct vge_softc *sc) 511 { 512 struct ifnet *ifp = &sc->arpcom.ac_if; 513 int error = 0; 514 struct ifmultiaddr *ifma; 515 uint32_t h, hashes[2] = { 0, 0 }; 516 517 /* First, zot all the multicast entries. */ 518 vge_cam_clear(sc); 519 CSR_WRITE_4(sc, VGE_MAR0, 0); 520 CSR_WRITE_4(sc, VGE_MAR1, 0); 521 522 /* 523 * If the user wants allmulti or promisc mode, enable reception 524 * of all multicast frames. 525 */ 526 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 527 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 528 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 529 return; 530 } 531 532 /* Now program new ones */ 533 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 534 if (ifma->ifma_addr->sa_family != AF_LINK) 535 continue; 536 error = vge_cam_set(sc, 537 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 538 if (error) 539 break; 540 } 541 542 /* If there were too many addresses, use the hash filter. */ 543 if (error) { 544 vge_cam_clear(sc); 545 546 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 547 if (ifma->ifma_addr->sa_family != AF_LINK) 548 continue; 549 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 550 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 551 if (h < 32) 552 hashes[0] |= (1 << h); 553 else 554 hashes[1] |= (1 << (h - 32)); 555 } 556 557 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 558 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 559 } 560 } 561 562 static void 563 vge_reset(struct vge_softc *sc) 564 { 565 int i; 566 567 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 568 569 for (i = 0; i < VGE_TIMEOUT; i++) { 570 DELAY(5); 571 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 572 break; 573 } 574 575 if (i == VGE_TIMEOUT) { 576 if_printf(&sc->arpcom.ac_if, "soft reset timed out"); 577 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 578 DELAY(2000); 579 } 580 581 DELAY(5000); 582 583 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 584 585 for (i = 0; i < VGE_TIMEOUT; i++) { 586 DELAY(5); 587 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 588 break; 589 } 590 if (i == VGE_TIMEOUT) { 591 if_printf(&sc->arpcom.ac_if, "EEPROM reload timed out\n"); 592 return; 593 } 594 595 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 596 } 597 598 /* 599 * Probe for a VIA gigabit chip. Check the PCI vendor and device 600 * IDs against our list and return a device name if we find a match. 601 */ 602 static int 603 vge_probe(device_t dev) 604 { 605 const struct vge_type *t; 606 uint16_t did, vid; 607 608 did = pci_get_device(dev); 609 vid = pci_get_vendor(dev); 610 for (t = vge_devs; t->vge_name != NULL; ++t) { 611 if (vid == t->vge_vid && did == t->vge_did) { 612 device_set_desc(dev, t->vge_name); 613 return 0; 614 } 615 } 616 return (ENXIO); 617 } 618 619 static void 620 vge_dma_map_rx_desc(void *arg, bus_dma_segment_t *segs, int nseg, 621 bus_size_t mapsize, int error) 622 { 623 624 struct vge_dmaload_arg *ctx; 625 struct vge_rx_desc *d = NULL; 626 627 if (error) 628 return; 629 630 ctx = arg; 631 632 /* Signal error to caller if there's too many segments */ 633 if (nseg > ctx->vge_maxsegs) { 634 ctx->vge_maxsegs = 0; 635 return; 636 } 637 638 /* 639 * Map the segment array into descriptors. 640 */ 641 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx]; 642 643 /* If this descriptor is still owned by the chip, bail. */ 644 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) { 645 if_printf(&ctx->sc->arpcom.ac_if, 646 "tried to map busy descriptor\n"); 647 ctx->vge_maxsegs = 0; 648 return; 649 } 650 651 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I); 652 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 653 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 654 d->vge_sts = 0; 655 d->vge_ctl = 0; 656 657 ctx->vge_maxsegs = 1; 658 } 659 660 static void 661 vge_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs, int nseg, 662 bus_size_t mapsize, int error) 663 { 664 struct vge_dmaload_arg *ctx; 665 struct vge_tx_desc *d = NULL; 666 struct vge_tx_frag *f; 667 int i = 0; 668 669 if (error) 670 return; 671 672 ctx = arg; 673 674 /* Signal error to caller if there's too many segments */ 675 if (nseg > ctx->vge_maxsegs) { 676 ctx->vge_maxsegs = 0; 677 return; 678 } 679 680 /* Map the segment array into descriptors. */ 681 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx]; 682 683 /* If this descriptor is still owned by the chip, bail. */ 684 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) { 685 ctx->vge_maxsegs = 0; 686 return; 687 } 688 689 for (i = 0; i < nseg; i++) { 690 f = &d->vge_frag[i]; 691 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len)); 692 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr)); 693 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF); 694 } 695 696 /* Argh. This chip does not autopad short frames */ 697 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) { 698 f = &d->vge_frag[i]; 699 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 700 ctx->vge_m0->m_pkthdr.len)); 701 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 702 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 703 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN; 704 i++; 705 } 706 707 /* 708 * When telling the chip how many segments there are, we 709 * must use nsegs + 1 instead of just nsegs. Darned if I 710 * know why. 711 */ 712 i++; 713 714 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16; 715 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM; 716 717 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 718 d->vge_ctl |= VGE_TDCTL_JUMBO; 719 720 ctx->vge_maxsegs = nseg; 721 } 722 723 /* 724 * Map a single buffer address. 725 */ 726 727 static void 728 vge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 729 { 730 if (error) 731 return; 732 733 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 734 *((bus_addr_t *)arg) = segs->ds_addr; 735 } 736 737 static int 738 vge_dma_alloc(device_t dev) 739 { 740 struct vge_softc *sc = device_get_softc(dev); 741 int error, nseg, i, tx_pos = 0, rx_pos = 0; 742 743 /* 744 * Allocate the parent bus DMA tag appropriate for PCI. 745 */ 746 #define VGE_NSEG_NEW 32 747 error = bus_dma_tag_create(NULL, /* parent */ 748 1, 0, /* alignment, boundary */ 749 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 750 BUS_SPACE_MAXADDR, /* highaddr */ 751 NULL, NULL, /* filter, filterarg */ 752 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */ 753 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 754 BUS_DMA_ALLOCNOW, /* flags */ 755 &sc->vge_parent_tag); 756 if (error) { 757 device_printf(dev, "can't create parent dma tag\n"); 758 return error; 759 } 760 761 /* 762 * Allocate map for RX mbufs. 763 */ 764 nseg = 32; 765 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0, 766 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 767 NULL, NULL, 768 MCLBYTES * nseg, nseg, MCLBYTES, 769 BUS_DMA_ALLOCNOW, &sc->vge_ldata.vge_mtag); 770 if (error) { 771 device_printf(dev, "could not allocate mbuf dma tag\n"); 772 return error; 773 } 774 775 /* 776 * Allocate map for TX descriptor list. 777 */ 778 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, 779 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 780 NULL, NULL, 781 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 782 BUS_DMA_ALLOCNOW, 783 &sc->vge_ldata.vge_tx_list_tag); 784 if (error) { 785 device_printf(dev, "could not allocate tx list dma tag\n"); 786 return error; 787 } 788 789 /* Allocate DMA'able memory for the TX ring */ 790 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag, 791 (void **)&sc->vge_ldata.vge_tx_list, 792 BUS_DMA_WAITOK | BUS_DMA_ZERO, 793 &sc->vge_ldata.vge_tx_list_map); 794 if (error) { 795 device_printf(dev, "could not allocate tx list dma memory\n"); 796 return error; 797 } 798 799 /* Load the map for the TX ring. */ 800 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag, 801 sc->vge_ldata.vge_tx_list_map, 802 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, 803 vge_dma_map_addr, 804 &sc->vge_ldata.vge_tx_list_addr, 805 BUS_DMA_WAITOK); 806 if (error) { 807 device_printf(dev, "could not load tx list\n"); 808 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 809 sc->vge_ldata.vge_tx_list, 810 sc->vge_ldata.vge_tx_list_map); 811 sc->vge_ldata.vge_tx_list = NULL; 812 return error; 813 } 814 815 /* Create DMA maps for TX buffers */ 816 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 817 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 818 &sc->vge_ldata.vge_tx_dmamap[i]); 819 if (error) { 820 device_printf(dev, "can't create DMA map for TX\n"); 821 tx_pos = i; 822 goto map_fail; 823 } 824 } 825 tx_pos = VGE_TX_DESC_CNT; 826 827 /* 828 * Allocate map for RX descriptor list. 829 */ 830 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, 831 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 832 NULL, NULL, 833 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 834 BUS_DMA_ALLOCNOW, 835 &sc->vge_ldata.vge_rx_list_tag); 836 if (error) { 837 device_printf(dev, "could not allocate rx list dma tag\n"); 838 return error; 839 } 840 841 /* Allocate DMA'able memory for the RX ring */ 842 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag, 843 (void **)&sc->vge_ldata.vge_rx_list, 844 BUS_DMA_WAITOK | BUS_DMA_ZERO, 845 &sc->vge_ldata.vge_rx_list_map); 846 if (error) { 847 device_printf(dev, "could not allocate rx list dma memory\n"); 848 return error; 849 } 850 851 /* Load the map for the RX ring. */ 852 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag, 853 sc->vge_ldata.vge_rx_list_map, 854 sc->vge_ldata.vge_rx_list, VGE_TX_LIST_SZ, 855 vge_dma_map_addr, 856 &sc->vge_ldata.vge_rx_list_addr, 857 BUS_DMA_WAITOK); 858 if (error) { 859 device_printf(dev, "could not load rx list\n"); 860 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 861 sc->vge_ldata.vge_rx_list, 862 sc->vge_ldata.vge_rx_list_map); 863 sc->vge_ldata.vge_rx_list = NULL; 864 return error; 865 } 866 867 /* Create DMA maps for RX buffers */ 868 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 869 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 870 &sc->vge_ldata.vge_rx_dmamap[i]); 871 if (error) { 872 device_printf(dev, "can't create DMA map for RX\n"); 873 rx_pos = i; 874 goto map_fail; 875 } 876 } 877 return (0); 878 879 map_fail: 880 for (i = 0; i < tx_pos; ++i) { 881 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 882 sc->vge_ldata.vge_tx_dmamap[i]); 883 } 884 for (i = 0; i < rx_pos; ++i) { 885 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 886 sc->vge_ldata.vge_rx_dmamap[i]); 887 } 888 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 889 sc->vge_ldata.vge_mtag = NULL; 890 891 return error; 892 } 893 894 static void 895 vge_dma_free(struct vge_softc *sc) 896 { 897 /* Unload and free the RX DMA ring memory and map */ 898 if (sc->vge_ldata.vge_rx_list_tag) { 899 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag, 900 sc->vge_ldata.vge_rx_list_map); 901 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 902 sc->vge_ldata.vge_rx_list, 903 sc->vge_ldata.vge_rx_list_map); 904 } 905 906 if (sc->vge_ldata.vge_rx_list_tag) 907 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag); 908 909 /* Unload and free the TX DMA ring memory and map */ 910 if (sc->vge_ldata.vge_tx_list_tag) { 911 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag, 912 sc->vge_ldata.vge_tx_list_map); 913 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 914 sc->vge_ldata.vge_tx_list, 915 sc->vge_ldata.vge_tx_list_map); 916 } 917 918 if (sc->vge_ldata.vge_tx_list_tag) 919 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag); 920 921 /* Destroy all the RX and TX buffer maps */ 922 if (sc->vge_ldata.vge_mtag) { 923 int i; 924 925 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 926 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 927 sc->vge_ldata.vge_tx_dmamap[i]); 928 } 929 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 930 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 931 sc->vge_ldata.vge_rx_dmamap[i]); 932 } 933 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 934 } 935 936 if (sc->vge_parent_tag) 937 bus_dma_tag_destroy(sc->vge_parent_tag); 938 } 939 940 /* 941 * Attach the interface. Allocate softc structures, do ifmedia 942 * setup and ethernet/BPF attach. 943 */ 944 static int 945 vge_attach(device_t dev) 946 { 947 uint8_t eaddr[ETHER_ADDR_LEN]; 948 struct vge_softc *sc; 949 struct ifnet *ifp; 950 int error = 0; 951 952 sc = device_get_softc(dev); 953 ifp = &sc->arpcom.ac_if; 954 955 /* Initialize if_xname early, so if_printf() can be used */ 956 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 957 958 /* 959 * Map control/status registers. 960 */ 961 pci_enable_busmaster(dev); 962 963 sc->vge_res_rid = VGE_PCI_LOMEM; 964 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 965 &sc->vge_res_rid, RF_ACTIVE); 966 if (sc->vge_res == NULL) { 967 device_printf(dev, "couldn't map ports/memory\n"); 968 return ENXIO; 969 } 970 971 sc->vge_btag = rman_get_bustag(sc->vge_res); 972 sc->vge_bhandle = rman_get_bushandle(sc->vge_res); 973 974 /* Allocate interrupt */ 975 sc->vge_irq_rid = 0; 976 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->vge_irq_rid, 977 RF_SHAREABLE | RF_ACTIVE); 978 if (sc->vge_irq == NULL) { 979 device_printf(dev, "couldn't map interrupt\n"); 980 error = ENXIO; 981 goto fail; 982 } 983 984 /* Reset the adapter. */ 985 vge_reset(sc); 986 987 /* 988 * Get station address from the EEPROM. 989 */ 990 vge_read_eeprom(sc, eaddr, VGE_EE_EADDR, 3, 0); 991 992 /* Allocate DMA related stuffs */ 993 error = vge_dma_alloc(dev); 994 if (error) 995 goto fail; 996 997 /* Do MII setup */ 998 error = mii_phy_probe(dev, &sc->vge_miibus, vge_ifmedia_upd, 999 vge_ifmedia_sts); 1000 if (error) { 1001 device_printf(dev, "MII without any phy!\n"); 1002 goto fail; 1003 } 1004 1005 ifp->if_softc = sc; 1006 ifp->if_mtu = ETHERMTU; 1007 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1008 ifp->if_init = vge_init; 1009 ifp->if_start = vge_start; 1010 ifp->if_watchdog = vge_watchdog; 1011 ifp->if_ioctl = vge_ioctl; 1012 #ifdef DEVICE_POLLING 1013 ifp->if_poll = vge_poll; 1014 #endif 1015 ifp->if_hwassist = VGE_CSUM_FEATURES; 1016 ifp->if_capabilities = IFCAP_VLAN_MTU | 1017 IFCAP_HWCSUM | 1018 IFCAP_VLAN_HWTAGGING; 1019 ifp->if_capenable = ifp->if_capabilities; 1020 ifq_set_maxlen(&ifp->if_snd, VGE_IFQ_MAXLEN); 1021 ifq_set_ready(&ifp->if_snd); 1022 1023 /* 1024 * Call MI attach routine. 1025 */ 1026 ether_ifattach(ifp, eaddr, NULL); 1027 1028 /* Hook interrupt last to avoid having to lock softc */ 1029 error = bus_setup_intr(dev, sc->vge_irq, INTR_MPSAFE, vge_intr, sc, 1030 &sc->vge_intrhand, ifp->if_serializer); 1031 if (error) { 1032 device_printf(dev, "couldn't set up irq\n"); 1033 ether_ifdetach(ifp); 1034 goto fail; 1035 } 1036 1037 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->vge_irq)); 1038 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 1039 1040 return 0; 1041 fail: 1042 vge_detach(dev); 1043 return error; 1044 } 1045 1046 /* 1047 * Shutdown hardware and free up resources. This can be called any 1048 * time after the mutex has been initialized. It is called in both 1049 * the error case in attach and the normal detach case so it needs 1050 * to be careful about only freeing resources that have actually been 1051 * allocated. 1052 */ 1053 static int 1054 vge_detach(device_t dev) 1055 { 1056 struct vge_softc *sc = device_get_softc(dev); 1057 struct ifnet *ifp = &sc->arpcom.ac_if; 1058 1059 /* These should only be active if attach succeeded */ 1060 if (device_is_attached(dev)) { 1061 lwkt_serialize_enter(ifp->if_serializer); 1062 1063 vge_stop(sc); 1064 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1065 /* 1066 * Force off the IFF_UP flag here, in case someone 1067 * still had a BPF descriptor attached to this 1068 * interface. If they do, ether_ifattach() will cause 1069 * the BPF code to try and clear the promisc mode 1070 * flag, which will bubble down to vge_ioctl(), 1071 * which will try to call vge_init() again. This will 1072 * turn the NIC back on and restart the MII ticker, 1073 * which will panic the system when the kernel tries 1074 * to invoke the vge_tick() function that isn't there 1075 * anymore. 1076 */ 1077 ifp->if_flags &= ~IFF_UP; 1078 1079 lwkt_serialize_exit(ifp->if_serializer); 1080 1081 ether_ifdetach(ifp); 1082 } 1083 1084 if (sc->vge_miibus) 1085 device_delete_child(dev, sc->vge_miibus); 1086 bus_generic_detach(dev); 1087 1088 if (sc->vge_irq) { 1089 bus_release_resource(dev, SYS_RES_IRQ, sc->vge_irq_rid, 1090 sc->vge_irq); 1091 } 1092 1093 if (sc->vge_res) { 1094 bus_release_resource(dev, SYS_RES_MEMORY, sc->vge_res_rid, 1095 sc->vge_res); 1096 } 1097 1098 vge_dma_free(sc); 1099 return (0); 1100 } 1101 1102 static int 1103 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) 1104 { 1105 struct vge_dmaload_arg arg; 1106 struct mbuf *n = NULL; 1107 int i, error; 1108 1109 if (m == NULL) { 1110 n = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 1111 if (n == NULL) 1112 return (ENOBUFS); 1113 m = n; 1114 } else { 1115 m->m_data = m->m_ext.ext_buf; 1116 } 1117 1118 1119 #ifdef VGE_FIXUP_RX 1120 /* 1121 * This is part of an evil trick to deal with non-x86 platforms. 1122 * The VIA chip requires RX buffers to be aligned on 32-bit 1123 * boundaries, but that will hose non-x86 machines. To get around 1124 * this, we leave some empty space at the start of each buffer 1125 * and for non-x86 hosts, we copy the buffer back two bytes 1126 * to achieve word alignment. This is slightly more efficient 1127 * than allocating a new buffer, copying the contents, and 1128 * discarding the old buffer. 1129 */ 1130 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN; 1131 m_adj(m, VGE_ETHER_ALIGN); 1132 #else 1133 m->m_len = m->m_pkthdr.len = MCLBYTES; 1134 #endif 1135 1136 arg.sc = sc; 1137 arg.vge_idx = idx; 1138 arg.vge_maxsegs = 1; 1139 arg.vge_flags = 0; 1140 1141 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, 1142 sc->vge_ldata.vge_rx_dmamap[idx], m, 1143 vge_dma_map_rx_desc, &arg, BUS_DMA_NOWAIT); 1144 if (error || arg.vge_maxsegs != 1) { 1145 if (n != NULL) 1146 m_freem(n); 1147 return (ENOMEM); 1148 } 1149 1150 /* 1151 * Note: the manual fails to document the fact that for 1152 * proper opration, the driver needs to replentish the RX 1153 * DMA ring 4 descriptors at a time (rather than one at a 1154 * time, like most chips). We can allocate the new buffers 1155 * but we should not set the OWN bits until we're ready 1156 * to hand back 4 of them in one shot. 1157 */ 1158 1159 #define VGE_RXCHUNK 4 1160 sc->vge_rx_consumed++; 1161 if (sc->vge_rx_consumed == VGE_RXCHUNK) { 1162 for (i = idx; i != idx - sc->vge_rx_consumed; i--) { 1163 sc->vge_ldata.vge_rx_list[i].vge_sts |= 1164 htole32(VGE_RDSTS_OWN); 1165 } 1166 sc->vge_rx_consumed = 0; 1167 } 1168 1169 sc->vge_ldata.vge_rx_mbuf[idx] = m; 1170 1171 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1172 sc->vge_ldata.vge_rx_dmamap[idx], BUS_DMASYNC_PREREAD); 1173 1174 return (0); 1175 } 1176 1177 static int 1178 vge_tx_list_init(struct vge_softc *sc) 1179 { 1180 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 1181 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, 1182 (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 1183 1184 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1185 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE); 1186 sc->vge_ldata.vge_tx_prodidx = 0; 1187 sc->vge_ldata.vge_tx_considx = 0; 1188 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 1189 1190 return (0); 1191 } 1192 1193 static int 1194 vge_rx_list_init(struct vge_softc *sc) 1195 { 1196 int i; 1197 1198 bzero(sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 1199 bzero(&sc->vge_ldata.vge_rx_mbuf, 1200 VGE_RX_DESC_CNT * sizeof(struct mbuf *)); 1201 1202 sc->vge_rx_consumed = 0; 1203 1204 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1205 if (vge_newbuf(sc, i, NULL) == ENOBUFS) 1206 return (ENOBUFS); 1207 } 1208 1209 /* Flush the RX descriptors */ 1210 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1211 sc->vge_ldata.vge_rx_list_map, 1212 BUS_DMASYNC_PREWRITE); 1213 1214 sc->vge_ldata.vge_rx_prodidx = 0; 1215 sc->vge_rx_consumed = 0; 1216 sc->vge_head = sc->vge_tail = NULL; 1217 return (0); 1218 } 1219 1220 #ifdef VGE_FIXUP_RX 1221 static __inline void 1222 vge_fixup_rx(struct mbuf *m) 1223 { 1224 uint16_t *src, *dst; 1225 int i; 1226 1227 src = mtod(m, uint16_t *); 1228 dst = src - 1; 1229 1230 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1231 *dst++ = *src++; 1232 1233 m->m_data -= ETHER_ALIGN; 1234 } 1235 #endif 1236 1237 /* 1238 * RX handler. We support the reception of jumbo frames that have 1239 * been fragmented across multiple 2K mbuf cluster buffers. 1240 */ 1241 static void 1242 vge_rxeof(struct vge_softc *sc, int count) 1243 { 1244 struct ifnet *ifp = &sc->arpcom.ac_if; 1245 struct mbuf *m; 1246 int i, total_len, lim = 0; 1247 struct vge_rx_desc *cur_rx; 1248 uint32_t rxstat, rxctl; 1249 1250 ASSERT_SERIALIZED(ifp->if_serializer); 1251 1252 i = sc->vge_ldata.vge_rx_prodidx; 1253 1254 /* Invalidate the descriptor memory */ 1255 1256 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1257 sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_POSTREAD); 1258 1259 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 1260 #ifdef DEVICE_POLLING 1261 if (count >= 0 && count-- == 0) 1262 break; 1263 #endif 1264 1265 cur_rx = &sc->vge_ldata.vge_rx_list[i]; 1266 m = sc->vge_ldata.vge_rx_mbuf[i]; 1267 total_len = VGE_RXBYTES(cur_rx); 1268 rxstat = le32toh(cur_rx->vge_sts); 1269 rxctl = le32toh(cur_rx->vge_ctl); 1270 1271 /* Invalidate the RX mbuf and unload its map */ 1272 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1273 sc->vge_ldata.vge_rx_dmamap[i], 1274 BUS_DMASYNC_POSTWRITE); 1275 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1276 sc->vge_ldata.vge_rx_dmamap[i]); 1277 1278 /* 1279 * If the 'start of frame' bit is set, this indicates 1280 * either the first fragment in a multi-fragment receive, 1281 * or an intermediate fragment. Either way, we want to 1282 * accumulate the buffers. 1283 */ 1284 if (rxstat & VGE_RXPKT_SOF) { 1285 m->m_len = MCLBYTES - VGE_ETHER_ALIGN; 1286 if (sc->vge_head == NULL) { 1287 sc->vge_head = sc->vge_tail = m; 1288 } else { 1289 m->m_flags &= ~M_PKTHDR; 1290 sc->vge_tail->m_next = m; 1291 sc->vge_tail = m; 1292 } 1293 vge_newbuf(sc, i, NULL); 1294 VGE_RX_DESC_INC(i); 1295 continue; 1296 } 1297 1298 /* 1299 * Bad/error frames will have the RXOK bit cleared. 1300 * However, there's one error case we want to allow: 1301 * if a VLAN tagged frame arrives and the chip can't 1302 * match it against the CAM filter, it considers this 1303 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1304 * We don't want to drop the frame though: our VLAN 1305 * filtering is done in software. 1306 */ 1307 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) && 1308 !(rxstat & VGE_RDSTS_CSUMERR)) { 1309 ifp->if_ierrors++; 1310 /* 1311 * If this is part of a multi-fragment packet, 1312 * discard all the pieces. 1313 */ 1314 if (sc->vge_head != NULL) { 1315 m_freem(sc->vge_head); 1316 sc->vge_head = sc->vge_tail = NULL; 1317 } 1318 vge_newbuf(sc, i, m); 1319 VGE_RX_DESC_INC(i); 1320 continue; 1321 } 1322 1323 /* 1324 * If allocating a replacement mbuf fails, 1325 * reload the current one. 1326 */ 1327 if (vge_newbuf(sc, i, NULL)) { 1328 ifp->if_ierrors++; 1329 if (sc->vge_head != NULL) { 1330 m_freem(sc->vge_head); 1331 sc->vge_head = sc->vge_tail = NULL; 1332 } 1333 vge_newbuf(sc, i, m); 1334 VGE_RX_DESC_INC(i); 1335 continue; 1336 } 1337 1338 VGE_RX_DESC_INC(i); 1339 1340 if (sc->vge_head != NULL) { 1341 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN); 1342 /* 1343 * Special case: if there's 4 bytes or less 1344 * in this buffer, the mbuf can be discarded: 1345 * the last 4 bytes is the CRC, which we don't 1346 * care about anyway. 1347 */ 1348 if (m->m_len <= ETHER_CRC_LEN) { 1349 sc->vge_tail->m_len -= 1350 (ETHER_CRC_LEN - m->m_len); 1351 m_freem(m); 1352 } else { 1353 m->m_len -= ETHER_CRC_LEN; 1354 m->m_flags &= ~M_PKTHDR; 1355 sc->vge_tail->m_next = m; 1356 } 1357 m = sc->vge_head; 1358 sc->vge_head = sc->vge_tail = NULL; 1359 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1360 } else { 1361 m->m_pkthdr.len = m->m_len = 1362 (total_len - ETHER_CRC_LEN); 1363 } 1364 1365 #ifdef VGE_FIXUP_RX 1366 vge_fixup_rx(m); 1367 #endif 1368 ifp->if_ipackets++; 1369 m->m_pkthdr.rcvif = ifp; 1370 1371 /* Do RX checksumming if enabled */ 1372 if (ifp->if_capenable & IFCAP_RXCSUM) { 1373 /* Check IP header checksum */ 1374 if (rxctl & VGE_RDCTL_IPPKT) 1375 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1376 if (rxctl & VGE_RDCTL_IPCSUMOK) 1377 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1378 1379 /* Check TCP/UDP checksum */ 1380 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) && 1381 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1382 m->m_pkthdr.csum_flags |= 1383 CSUM_DATA_VALID|CSUM_PSEUDO_HDR| 1384 CSUM_FRAG_NOT_CHECKED; 1385 m->m_pkthdr.csum_data = 0xffff; 1386 } 1387 } 1388 1389 if (rxstat & VGE_RDSTS_VTAG) { 1390 m->m_flags |= M_VLANTAG; 1391 m->m_pkthdr.ether_vlantag = 1392 ntohs((rxctl & VGE_RDCTL_VLANID)); 1393 } 1394 ifp->if_input(ifp, m); 1395 1396 lim++; 1397 if (lim == VGE_RX_DESC_CNT) 1398 break; 1399 } 1400 1401 /* Flush the RX DMA ring */ 1402 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1403 sc->vge_ldata.vge_rx_list_map, 1404 BUS_DMASYNC_PREWRITE); 1405 1406 sc->vge_ldata.vge_rx_prodidx = i; 1407 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1408 } 1409 1410 static void 1411 vge_txeof(struct vge_softc *sc) 1412 { 1413 struct ifnet *ifp = &sc->arpcom.ac_if; 1414 uint32_t txstat; 1415 int idx; 1416 1417 idx = sc->vge_ldata.vge_tx_considx; 1418 1419 /* Invalidate the TX descriptor list */ 1420 1421 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1422 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_POSTREAD); 1423 1424 while (idx != sc->vge_ldata.vge_tx_prodidx) { 1425 1426 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1427 if (txstat & VGE_TDSTS_OWN) 1428 break; 1429 1430 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1431 sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1432 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1433 sc->vge_ldata.vge_tx_dmamap[idx]); 1434 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1435 ifp->if_collisions++; 1436 if (txstat & VGE_TDSTS_TXERR) 1437 ifp->if_oerrors++; 1438 else 1439 ifp->if_opackets++; 1440 1441 sc->vge_ldata.vge_tx_free++; 1442 VGE_TX_DESC_INC(idx); 1443 } 1444 1445 /* No changes made to the TX ring, so no flush needed */ 1446 if (idx != sc->vge_ldata.vge_tx_considx) { 1447 sc->vge_ldata.vge_tx_considx = idx; 1448 ifp->if_flags &= ~IFF_OACTIVE; 1449 ifp->if_timer = 0; 1450 } 1451 1452 /* 1453 * If not all descriptors have been released reaped yet, 1454 * reload the timer so that we will eventually get another 1455 * interrupt that will cause us to re-enter this routine. 1456 * This is done in case the transmitter has gone idle. 1457 */ 1458 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) 1459 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1460 } 1461 1462 static void 1463 vge_tick(struct vge_softc *sc) 1464 { 1465 struct ifnet *ifp = &sc->arpcom.ac_if; 1466 struct mii_data *mii; 1467 1468 mii = device_get_softc(sc->vge_miibus); 1469 1470 mii_tick(mii); 1471 if (sc->vge_link) { 1472 if (!(mii->mii_media_status & IFM_ACTIVE)) 1473 sc->vge_link = 0; 1474 } else { 1475 if (mii->mii_media_status & IFM_ACTIVE && 1476 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1477 sc->vge_link = 1; 1478 if (!ifq_is_empty(&ifp->if_snd)) 1479 if_devstart(ifp); 1480 } 1481 } 1482 } 1483 1484 #ifdef DEVICE_POLLING 1485 static void 1486 vge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1487 { 1488 struct vge_softc *sc = ifp->if_softc; 1489 1490 sc->rxcycles = count; 1491 1492 switch (cmd) { 1493 case POLL_REGISTER: 1494 vge_disable_intr(sc); 1495 break; 1496 case POLL_DEREGISTER: 1497 vge_enable_intr(sc, 0xffffffff); 1498 break; 1499 case POLL_ONLY: 1500 case POLL_AND_CHECK_STATUS: 1501 vge_rxeof(sc, count); 1502 vge_txeof(sc); 1503 1504 if (!ifq_is_empty(&ifp->if_snd)) 1505 if_devstart(ifp); 1506 1507 /* XXX copy & paste from vge_intr */ 1508 if (cmd == POLL_AND_CHECK_STATUS) { 1509 uint32_t status = 0; 1510 1511 status = CSR_READ_4(sc, VGE_ISR); 1512 if (status == 0xffffffff) 1513 break; 1514 1515 if (status) 1516 CSR_WRITE_4(sc, VGE_ISR, status); 1517 1518 if (status & (VGE_ISR_TXDMA_STALL | 1519 VGE_ISR_RXDMA_STALL)) 1520 vge_init(sc); 1521 1522 if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) { 1523 ifp->if_ierrors++; 1524 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1525 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1526 } 1527 } 1528 break; 1529 } 1530 1531 } 1532 #endif /* DEVICE_POLLING */ 1533 1534 static void 1535 vge_intr(void *arg) 1536 { 1537 struct vge_softc *sc = arg; 1538 struct ifnet *ifp = &sc->arpcom.ac_if; 1539 uint32_t status; 1540 1541 if (sc->suspended || !(ifp->if_flags & IFF_UP)) 1542 return; 1543 1544 /* Disable interrupts */ 1545 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1546 1547 for (;;) { 1548 status = CSR_READ_4(sc, VGE_ISR); 1549 /* If the card has gone away the read returns 0xffff. */ 1550 if (status == 0xFFFFFFFF) 1551 break; 1552 1553 if (status) 1554 CSR_WRITE_4(sc, VGE_ISR, status); 1555 1556 if ((status & VGE_INTRS) == 0) 1557 break; 1558 1559 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1560 vge_rxeof(sc, -1); 1561 1562 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1563 vge_rxeof(sc, -1); 1564 ifp->if_ierrors++; 1565 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1566 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1567 } 1568 1569 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1570 vge_txeof(sc); 1571 1572 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) 1573 vge_init(sc); 1574 1575 if (status & VGE_ISR_LINKSTS) 1576 vge_tick(sc); 1577 } 1578 1579 /* Re-enable interrupts */ 1580 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1581 1582 if (!ifq_is_empty(&ifp->if_snd)) 1583 if_devstart(ifp); 1584 } 1585 1586 static int 1587 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) 1588 { 1589 struct vge_dmaload_arg arg; 1590 bus_dmamap_t map; 1591 int error; 1592 1593 arg.vge_flags = 0; 1594 1595 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1596 arg.vge_flags |= VGE_TDCTL_IPCSUM; 1597 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 1598 arg.vge_flags |= VGE_TDCTL_TCPCSUM; 1599 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 1600 arg.vge_flags |= VGE_TDCTL_UDPCSUM; 1601 1602 arg.sc = sc; 1603 arg.vge_idx = idx; 1604 arg.vge_m0 = m_head; 1605 arg.vge_maxsegs = VGE_TX_FRAGS; 1606 1607 map = sc->vge_ldata.vge_tx_dmamap[idx]; 1608 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head, 1609 vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); 1610 if (error && error != EFBIG) { 1611 if_printf(&sc->arpcom.ac_if, "can't map mbuf (error %d)\n", 1612 error); 1613 goto fail; 1614 } 1615 1616 /* Too many segments to map, coalesce into a single mbuf */ 1617 if (error || arg.vge_maxsegs == 0) { 1618 struct mbuf *m_new; 1619 1620 m_new = m_defrag(m_head, MB_DONTWAIT); 1621 if (m_new == NULL) { 1622 error = ENOBUFS; 1623 goto fail; 1624 } else { 1625 m_head = m_new; 1626 } 1627 1628 arg.sc = sc; 1629 arg.vge_m0 = m_head; 1630 arg.vge_idx = idx; 1631 arg.vge_maxsegs = 1; 1632 1633 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, 1634 m_head, vge_dma_map_tx_desc, &arg, 1635 BUS_DMA_NOWAIT); 1636 if (error) { 1637 if_printf(&sc->arpcom.ac_if, 1638 "can't map mbuf (error %d)\n", error); 1639 goto fail; 1640 } 1641 } 1642 1643 sc->vge_ldata.vge_tx_mbuf[idx] = m_head; 1644 sc->vge_ldata.vge_tx_free--; 1645 1646 /* 1647 * Set up hardware VLAN tagging. 1648 */ 1649 if (m_head->m_flags & M_VLANTAG) { 1650 sc->vge_ldata.vge_tx_list[idx].vge_ctl |= 1651 htole32(htons(m_head->m_pkthdr.ether_vlantag) | 1652 VGE_TDCTL_VTAG); 1653 } 1654 1655 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); 1656 return (0); 1657 1658 fail: 1659 m_freem(m_head); 1660 return error; 1661 } 1662 1663 /* 1664 * Main transmit routine. 1665 */ 1666 1667 static void 1668 vge_start(struct ifnet *ifp) 1669 { 1670 struct vge_softc *sc = ifp->if_softc; 1671 struct mbuf *m_head = NULL; 1672 int idx, pidx = 0; 1673 1674 ASSERT_SERIALIZED(ifp->if_serializer); 1675 1676 if (!sc->vge_link) { 1677 ifq_purge(&ifp->if_snd); 1678 return; 1679 } 1680 1681 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 1682 return; 1683 1684 idx = sc->vge_ldata.vge_tx_prodidx; 1685 1686 pidx = idx - 1; 1687 if (pidx < 0) 1688 pidx = VGE_TX_DESC_CNT - 1; 1689 1690 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { 1691 if (sc->vge_ldata.vge_tx_free <= 2) { 1692 ifp->if_flags |= IFF_OACTIVE; 1693 break; 1694 } 1695 1696 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1697 if (m_head == NULL) 1698 break; 1699 1700 if (vge_encap(sc, m_head, idx)) { 1701 /* If vge_encap() failed, it will free m_head for us */ 1702 ifp->if_flags |= IFF_OACTIVE; 1703 break; 1704 } 1705 1706 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1707 htole16(VGE_TXDESC_Q); 1708 1709 pidx = idx; 1710 VGE_TX_DESC_INC(idx); 1711 1712 /* 1713 * If there's a BPF listener, bounce a copy of this frame 1714 * to him. 1715 */ 1716 ETHER_BPF_MTAP(ifp, m_head); 1717 } 1718 1719 if (idx == sc->vge_ldata.vge_tx_prodidx) 1720 return; 1721 1722 /* Flush the TX descriptors */ 1723 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1724 sc->vge_ldata.vge_tx_list_map, 1725 BUS_DMASYNC_PREWRITE); 1726 1727 /* Issue a transmit command. */ 1728 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1729 1730 sc->vge_ldata.vge_tx_prodidx = idx; 1731 1732 /* 1733 * Use the countdown timer for interrupt moderation. 1734 * 'TX done' interrupts are disabled. Instead, we reset the 1735 * countdown timer, which will begin counting until it hits 1736 * the value in the SSTIMER register, and then trigger an 1737 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1738 * the timer count is reloaded. Only when the transmitter 1739 * is idle will the timer hit 0 and an interrupt fire. 1740 */ 1741 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1742 1743 /* 1744 * Set a timeout in case the chip goes out to lunch. 1745 */ 1746 ifp->if_timer = 5; 1747 } 1748 1749 static void 1750 vge_init(void *xsc) 1751 { 1752 struct vge_softc *sc = xsc; 1753 struct ifnet *ifp = &sc->arpcom.ac_if; 1754 struct mii_data *mii; 1755 int i; 1756 1757 ASSERT_SERIALIZED(ifp->if_serializer); 1758 1759 mii = device_get_softc(sc->vge_miibus); 1760 1761 /* 1762 * Cancel pending I/O and free all RX/TX buffers. 1763 */ 1764 vge_stop(sc); 1765 vge_reset(sc); 1766 1767 /* 1768 * Initialize the RX and TX descriptors and mbufs. 1769 */ 1770 vge_rx_list_init(sc); 1771 vge_tx_list_init(sc); 1772 1773 /* Set our station address */ 1774 for (i = 0; i < ETHER_ADDR_LEN; i++) 1775 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(ifp)[i]); 1776 1777 /* 1778 * Set receive FIFO threshold. Also allow transmission and 1779 * reception of VLAN tagged frames. 1780 */ 1781 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1782 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1783 1784 /* Set DMA burst length */ 1785 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1786 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1787 1788 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1789 1790 /* Set collision backoff algorithm */ 1791 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1792 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1793 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1794 1795 /* Disable LPSEL field in priority resolution */ 1796 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1797 1798 /* 1799 * Load the addresses of the DMA queues into the chip. 1800 * Note that we only use one transmit queue. 1801 */ 1802 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 1803 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr)); 1804 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 1805 1806 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 1807 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr)); 1808 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 1809 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 1810 1811 /* Enable and wake up the RX descriptor queue */ 1812 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1813 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1814 1815 /* Enable the TX descriptor queue */ 1816 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1817 1818 /* Set up the receive filter -- allow large frames for VLANs. */ 1819 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 1820 1821 /* If we want promiscuous mode, set the allframes bit. */ 1822 if (ifp->if_flags & IFF_PROMISC) 1823 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1824 1825 /* Set capture broadcast bit to capture broadcast frames. */ 1826 if (ifp->if_flags & IFF_BROADCAST) 1827 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1828 1829 /* Set multicast bit to capture multicast frames. */ 1830 if (ifp->if_flags & IFF_MULTICAST) 1831 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1832 1833 /* Init the cam filter. */ 1834 vge_cam_clear(sc); 1835 1836 /* Init the multicast filter. */ 1837 vge_setmulti(sc); 1838 1839 /* Enable flow control */ 1840 1841 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1842 1843 /* Enable jumbo frame reception (if desired) */ 1844 1845 /* Start the MAC. */ 1846 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1847 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1848 CSR_WRITE_1(sc, VGE_CRS0, 1849 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 1850 1851 /* 1852 * Configure one-shot timer for microsecond 1853 * resulution and load it for 500 usecs. 1854 */ 1855 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1856 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1857 1858 /* 1859 * Configure interrupt moderation for receive. Enable 1860 * the holdoff counter and load it, and set the RX 1861 * suppression count to the number of descriptors we 1862 * want to allow before triggering an interrupt. 1863 * The holdoff timer is in units of 20 usecs. 1864 */ 1865 1866 #ifdef notyet 1867 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1868 /* Select the interrupt holdoff timer page. */ 1869 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1870 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1871 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1872 1873 /* Enable use of the holdoff timer. */ 1874 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1875 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1876 1877 /* Select the RX suppression threshold page. */ 1878 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1879 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 1880 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 1881 1882 /* Restore the page select bits. */ 1883 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1884 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 1885 #endif 1886 1887 #ifdef DEVICE_POLLING 1888 /* Disable intr if polling(4) is enabled */ 1889 if (ifp->if_flags & IFF_POLLING) 1890 vge_disable_intr(sc); 1891 else 1892 #endif 1893 vge_enable_intr(sc, 0); 1894 1895 mii_mediachg(mii); 1896 1897 ifp->if_flags |= IFF_RUNNING; 1898 ifp->if_flags &= ~IFF_OACTIVE; 1899 1900 sc->vge_if_flags = 0; 1901 sc->vge_link = 0; 1902 } 1903 1904 /* 1905 * Set media options. 1906 */ 1907 static int 1908 vge_ifmedia_upd(struct ifnet *ifp) 1909 { 1910 struct vge_softc *sc = ifp->if_softc; 1911 struct mii_data *mii = device_get_softc(sc->vge_miibus); 1912 1913 mii_mediachg(mii); 1914 1915 return (0); 1916 } 1917 1918 /* 1919 * Report current media status. 1920 */ 1921 static void 1922 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1923 { 1924 struct vge_softc *sc = ifp->if_softc; 1925 struct mii_data *mii = device_get_softc(sc->vge_miibus); 1926 1927 mii_pollstat(mii); 1928 ifmr->ifm_active = mii->mii_media_active; 1929 ifmr->ifm_status = mii->mii_media_status; 1930 } 1931 1932 static void 1933 vge_miibus_statchg(device_t dev) 1934 { 1935 struct vge_softc *sc; 1936 struct mii_data *mii; 1937 struct ifmedia_entry *ife; 1938 1939 sc = device_get_softc(dev); 1940 mii = device_get_softc(sc->vge_miibus); 1941 ife = mii->mii_media.ifm_cur; 1942 1943 /* 1944 * If the user manually selects a media mode, we need to turn 1945 * on the forced MAC mode bit in the DIAGCTL register. If the 1946 * user happens to choose a full duplex mode, we also need to 1947 * set the 'force full duplex' bit. This applies only to 1948 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 1949 * mode is disabled, and in 1000baseT mode, full duplex is 1950 * always implied, so we turn on the forced mode bit but leave 1951 * the FDX bit cleared. 1952 */ 1953 1954 switch (IFM_SUBTYPE(ife->ifm_media)) { 1955 case IFM_AUTO: 1956 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1957 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1958 break; 1959 case IFM_1000_T: 1960 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1961 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1962 break; 1963 case IFM_100_TX: 1964 case IFM_10_T: 1965 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1966 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) 1967 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1968 else 1969 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1970 break; 1971 default: 1972 device_printf(dev, "unknown media type: %x\n", 1973 IFM_SUBTYPE(ife->ifm_media)); 1974 break; 1975 } 1976 } 1977 1978 static int 1979 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1980 { 1981 struct vge_softc *sc = ifp->if_softc; 1982 struct ifreq *ifr = (struct ifreq *)data; 1983 struct mii_data *mii; 1984 int error = 0; 1985 1986 switch (command) { 1987 case SIOCSIFMTU: 1988 if (ifr->ifr_mtu > VGE_JUMBO_MTU) 1989 error = EINVAL; 1990 ifp->if_mtu = ifr->ifr_mtu; 1991 break; 1992 case SIOCSIFFLAGS: 1993 if (ifp->if_flags & IFF_UP) { 1994 if ((ifp->if_flags & IFF_RUNNING) && 1995 (ifp->if_flags & IFF_PROMISC) && 1996 !(sc->vge_if_flags & IFF_PROMISC)) { 1997 CSR_SETBIT_1(sc, VGE_RXCTL, 1998 VGE_RXCTL_RX_PROMISC); 1999 vge_setmulti(sc); 2000 } else if ((ifp->if_flags & IFF_RUNNING) && 2001 !(ifp->if_flags & IFF_PROMISC) && 2002 (sc->vge_if_flags & IFF_PROMISC)) { 2003 CSR_CLRBIT_1(sc, VGE_RXCTL, 2004 VGE_RXCTL_RX_PROMISC); 2005 vge_setmulti(sc); 2006 } else { 2007 vge_init(sc); 2008 } 2009 } else { 2010 if (ifp->if_flags & IFF_RUNNING) 2011 vge_stop(sc); 2012 } 2013 sc->vge_if_flags = ifp->if_flags; 2014 break; 2015 case SIOCADDMULTI: 2016 case SIOCDELMULTI: 2017 vge_setmulti(sc); 2018 break; 2019 case SIOCGIFMEDIA: 2020 case SIOCSIFMEDIA: 2021 mii = device_get_softc(sc->vge_miibus); 2022 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2023 break; 2024 case SIOCSIFCAP: 2025 { 2026 uint32_t mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2027 2028 if (mask & IFCAP_HWCSUM) { 2029 ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM); 2030 if (ifp->if_capenable & IFCAP_TXCSUM) 2031 ifp->if_hwassist = VGE_CSUM_FEATURES; 2032 else 2033 ifp->if_hwassist = 0; 2034 if (ifp->if_flags & IFF_RUNNING) 2035 vge_init(sc); 2036 } 2037 } 2038 break; 2039 default: 2040 error = ether_ioctl(ifp, command, data); 2041 break; 2042 } 2043 return (error); 2044 } 2045 2046 static void 2047 vge_watchdog(struct ifnet *ifp) 2048 { 2049 struct vge_softc *sc = ifp->if_softc; 2050 2051 if_printf(ifp, "watchdog timeout\n"); 2052 ifp->if_oerrors++; 2053 2054 vge_txeof(sc); 2055 vge_rxeof(sc, -1); 2056 2057 vge_init(sc); 2058 } 2059 2060 /* 2061 * Stop the adapter and free any mbufs allocated to the 2062 * RX and TX lists. 2063 */ 2064 static void 2065 vge_stop(struct vge_softc *sc) 2066 { 2067 struct ifnet *ifp = &sc->arpcom.ac_if; 2068 int i; 2069 2070 ASSERT_SERIALIZED(ifp->if_serializer); 2071 2072 ifp->if_timer = 0; 2073 2074 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2075 2076 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2077 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2078 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2079 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2080 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2081 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2082 2083 if (sc->vge_head != NULL) { 2084 m_freem(sc->vge_head); 2085 sc->vge_head = sc->vge_tail = NULL; 2086 } 2087 2088 /* Free the TX list buffers. */ 2089 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 2090 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { 2091 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2092 sc->vge_ldata.vge_tx_dmamap[i]); 2093 m_freem(sc->vge_ldata.vge_tx_mbuf[i]); 2094 sc->vge_ldata.vge_tx_mbuf[i] = NULL; 2095 } 2096 } 2097 2098 /* Free the RX list buffers. */ 2099 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 2100 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { 2101 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2102 sc->vge_ldata.vge_rx_dmamap[i]); 2103 m_freem(sc->vge_ldata.vge_rx_mbuf[i]); 2104 sc->vge_ldata.vge_rx_mbuf[i] = NULL; 2105 } 2106 } 2107 } 2108 2109 /* 2110 * Device suspend routine. Stop the interface and save some PCI 2111 * settings in case the BIOS doesn't restore them properly on 2112 * resume. 2113 */ 2114 static int 2115 vge_suspend(device_t dev) 2116 { 2117 struct vge_softc *sc = device_get_softc(dev); 2118 struct ifnet *ifp = &sc->arpcom.ac_if; 2119 2120 lwkt_serialize_enter(ifp->if_serializer); 2121 vge_stop(sc); 2122 sc->suspended = 1; 2123 lwkt_serialize_exit(ifp->if_serializer); 2124 2125 return (0); 2126 } 2127 2128 /* 2129 * Device resume routine. Restore some PCI settings in case the BIOS 2130 * doesn't, re-enable busmastering, and restart the interface if 2131 * appropriate. 2132 */ 2133 static int 2134 vge_resume(device_t dev) 2135 { 2136 struct vge_softc *sc = device_get_softc(dev); 2137 struct ifnet *ifp = &sc->arpcom.ac_if; 2138 2139 /* reenable busmastering */ 2140 pci_enable_busmaster(dev); 2141 pci_enable_io(dev, SYS_RES_MEMORY); 2142 2143 lwkt_serialize_enter(ifp->if_serializer); 2144 /* reinitialize interface if necessary */ 2145 if (ifp->if_flags & IFF_UP) 2146 vge_init(sc); 2147 2148 sc->suspended = 0; 2149 lwkt_serialize_exit(ifp->if_serializer); 2150 2151 return (0); 2152 } 2153 2154 /* 2155 * Stop all chip I/O so that the kernel's probe routines don't 2156 * get confused by errant DMAs when rebooting. 2157 */ 2158 static void 2159 vge_shutdown(device_t dev) 2160 { 2161 struct vge_softc *sc = device_get_softc(dev); 2162 struct ifnet *ifp = &sc->arpcom.ac_if; 2163 2164 lwkt_serialize_enter(ifp->if_serializer); 2165 vge_stop(sc); 2166 lwkt_serialize_exit(ifp->if_serializer); 2167 } 2168 2169 static void 2170 vge_enable_intr(struct vge_softc *sc, uint32_t isr) 2171 { 2172 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2173 CSR_WRITE_4(sc, VGE_ISR, isr); 2174 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2175 } 2176 2177 #ifdef DEVICE_POLLING 2178 static void 2179 vge_disable_intr(struct vge_softc *sc) 2180 { 2181 CSR_WRITE_4(sc, VGE_IMR, 0); 2182 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2183 } 2184 #endif 2185