1 /* 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/dev/vge/if_vge.c,v 1.24 2006/02/14 12:44:56 glebius Exp $ 33 * $DragonFly: src/sys/dev/netif/vge/if_vge.c,v 1.5 2007/08/14 13:30:35 sephe Exp $ 34 */ 35 36 /* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44 /* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83 #include "opt_polling.h" 84 85 #include <sys/param.h> 86 #include <sys/endian.h> 87 #include <sys/systm.h> 88 #include <sys/sockio.h> 89 #include <sys/mbuf.h> 90 #include <sys/malloc.h> 91 #include <sys/module.h> 92 #include <sys/kernel.h> 93 #include <sys/socket.h> 94 #include <sys/serialize.h> 95 #include <sys/proc.h> 96 #include <sys/bus.h> 97 #include <sys/rman.h> 98 99 #include <net/if.h> 100 #include <net/if_arp.h> 101 #include <net/ethernet.h> 102 #include <net/if_dl.h> 103 #include <net/if_media.h> 104 #include <net/ifq_var.h> 105 #include <net/if_types.h> 106 #include <net/vlan/if_vlan_var.h> 107 108 #include <net/bpf.h> 109 110 #include <dev/netif/mii_layer/mii.h> 111 #include <dev/netif/mii_layer/miivar.h> 112 113 #include <bus/pci/pcireg.h> 114 #include <bus/pci/pcivar.h> 115 #include <bus/pci/pcidevs.h> 116 117 #include "miibus_if.h" 118 119 #include <dev/netif/vge/if_vgereg.h> 120 #include <dev/netif/vge/if_vgevar.h> 121 122 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 123 124 /* 125 * Various supported device vendors/types and their names. 126 */ 127 static const struct vge_type vge_devs[] = { 128 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612X, 129 "VIA Networking Gigabit Ethernet" }, 130 { 0, 0, NULL } 131 }; 132 133 static int vge_probe (device_t); 134 static int vge_attach (device_t); 135 static int vge_detach (device_t); 136 137 static int vge_encap (struct vge_softc *, struct mbuf *, int); 138 139 static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 140 static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int, 141 bus_size_t, int); 142 static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, 143 bus_size_t, int); 144 static int vge_dma_alloc (device_t); 145 static void vge_dma_free (struct vge_softc *); 146 static int vge_newbuf (struct vge_softc *, int, struct mbuf *); 147 static int vge_rx_list_init (struct vge_softc *); 148 static int vge_tx_list_init (struct vge_softc *); 149 #ifdef VGE_FIXUP_RX 150 static __inline void vge_fixup_rx 151 (struct mbuf *); 152 #endif 153 static void vge_rxeof (struct vge_softc *, int); 154 static void vge_txeof (struct vge_softc *); 155 static void vge_intr (void *); 156 static void vge_tick (struct vge_softc *); 157 static void vge_start (struct ifnet *); 158 static int vge_ioctl (struct ifnet *, u_long, caddr_t, 159 struct ucred *); 160 static void vge_init (void *); 161 static void vge_stop (struct vge_softc *); 162 static void vge_watchdog (struct ifnet *); 163 static int vge_suspend (device_t); 164 static int vge_resume (device_t); 165 static void vge_shutdown (device_t); 166 static int vge_ifmedia_upd (struct ifnet *); 167 static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 168 169 #ifdef VGE_EEPROM 170 static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 171 #endif 172 static void vge_read_eeprom (struct vge_softc *, uint8_t *, int, int, int); 173 174 static void vge_miipoll_start (struct vge_softc *); 175 static void vge_miipoll_stop (struct vge_softc *); 176 static int vge_miibus_readreg (device_t, int, int); 177 static int vge_miibus_writereg (device_t, int, int, int); 178 static void vge_miibus_statchg (device_t); 179 180 static void vge_cam_clear (struct vge_softc *); 181 static int vge_cam_set (struct vge_softc *, uint8_t *); 182 static void vge_setmulti (struct vge_softc *); 183 static void vge_reset (struct vge_softc *); 184 185 #ifdef DEVICE_POLLING 186 static void vge_poll(struct ifnet *, enum poll_cmd, int); 187 static void vge_disable_intr(struct vge_softc *); 188 #endif 189 static void vge_enable_intr(struct vge_softc *, uint32_t); 190 191 #define VGE_PCI_LOIO 0x10 192 #define VGE_PCI_LOMEM 0x14 193 194 static device_method_t vge_methods[] = { 195 /* Device interface */ 196 DEVMETHOD(device_probe, vge_probe), 197 DEVMETHOD(device_attach, vge_attach), 198 DEVMETHOD(device_detach, vge_detach), 199 DEVMETHOD(device_suspend, vge_suspend), 200 DEVMETHOD(device_resume, vge_resume), 201 DEVMETHOD(device_shutdown, vge_shutdown), 202 203 /* bus interface */ 204 DEVMETHOD(bus_print_child, bus_generic_print_child), 205 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 206 207 /* MII interface */ 208 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 209 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 210 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 211 212 { 0, 0 } 213 }; 214 215 static driver_t vge_driver = { 216 "vge", 217 vge_methods, 218 sizeof(struct vge_softc) 219 }; 220 221 static devclass_t vge_devclass; 222 223 DECLARE_DUMMY_MODULE(if_vge); 224 MODULE_DEPEND(if_vge, miibus, 1, 1, 1); 225 DRIVER_MODULE(if_vge, pci, vge_driver, vge_devclass, 0, 0); 226 DRIVER_MODULE(if_vge, cardbus, vge_driver, vge_devclass, 0, 0); 227 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 228 229 #ifdef VGE_EEPROM 230 /* 231 * Read a word of data stored in the EEPROM at address 'addr.' 232 */ 233 static void 234 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t dest) 235 { 236 uint16_t word = 0; 237 int i; 238 239 /* 240 * Enter EEPROM embedded programming mode. In order to 241 * access the EEPROM at all, we first have to set the 242 * EELOAD bit in the CHIPCFG2 register. 243 */ 244 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 245 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 246 247 /* Select the address of the word we want to read */ 248 CSR_WRITE_1(sc, VGE_EEADDR, addr); 249 250 /* Issue read command */ 251 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 252 253 /* Wait for the done bit to be set. */ 254 for (i = 0; i < VGE_TIMEOUT; i++) { 255 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 256 break; 257 } 258 if (i == VGE_TIMEOUT) { 259 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 260 *dest = 0; 261 return; 262 } 263 264 /* Read the result */ 265 word = CSR_READ_2(sc, VGE_EERDDAT); 266 267 /* Turn off EEPROM access mode. */ 268 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 269 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 270 271 *dest = word; 272 } 273 #endif 274 275 /* 276 * Read a sequence of words from the EEPROM. 277 */ 278 static void 279 vge_read_eeprom(struct vge_softc *sc, uint8_t *dest, int off, int cnt, int swap) 280 { 281 int i; 282 #ifdef VGE_EEPROM 283 uint16_t word = 0, *ptr; 284 285 for (i = 0; i < cnt; i++) { 286 vge_eeprom_getword(sc, off + i, &word); 287 ptr = (uint16_t *)(dest + (i * 2)); 288 if (swap) 289 *ptr = ntohs(word); 290 else 291 *ptr = word; 292 } 293 #else 294 for (i = 0; i < ETHER_ADDR_LEN; i++) 295 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 296 #endif 297 } 298 299 static void 300 vge_miipoll_stop(struct vge_softc *sc) 301 { 302 int i; 303 304 CSR_WRITE_1(sc, VGE_MIICMD, 0); 305 306 for (i = 0; i < VGE_TIMEOUT; i++) { 307 DELAY(1); 308 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 309 break; 310 } 311 if (i == VGE_TIMEOUT) 312 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n"); 313 } 314 315 static void 316 vge_miipoll_start(struct vge_softc *sc) 317 { 318 int i; 319 320 /* First, make sure we're idle. */ 321 CSR_WRITE_1(sc, VGE_MIICMD, 0); 322 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 323 324 for (i = 0; i < VGE_TIMEOUT; i++) { 325 DELAY(1); 326 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 327 break; 328 } 329 if (i == VGE_TIMEOUT) { 330 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n"); 331 return; 332 } 333 334 /* Now enable auto poll mode. */ 335 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 336 337 /* And make sure it started. */ 338 for (i = 0; i < VGE_TIMEOUT; i++) { 339 DELAY(1); 340 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 341 break; 342 } 343 if (i == VGE_TIMEOUT) 344 if_printf(&sc->arpcom.ac_if, "failed to start MII autopoll\n"); 345 } 346 347 static int 348 vge_miibus_readreg(device_t dev, int phy, int reg) 349 { 350 struct vge_softc *sc; 351 int i; 352 uint16_t rval = 0; 353 354 sc = device_get_softc(dev); 355 356 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 357 return(0); 358 359 vge_miipoll_stop(sc); 360 361 /* Specify the register we want to read. */ 362 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 363 364 /* Issue read command. */ 365 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 366 367 /* Wait for the read command bit to self-clear. */ 368 for (i = 0; i < VGE_TIMEOUT; i++) { 369 DELAY(1); 370 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 371 break; 372 } 373 if (i == VGE_TIMEOUT) 374 if_printf(&sc->arpcom.ac_if, "MII read timed out\n"); 375 else 376 rval = CSR_READ_2(sc, VGE_MIIDATA); 377 378 vge_miipoll_start(sc); 379 380 return (rval); 381 } 382 383 static int 384 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 385 { 386 struct vge_softc *sc; 387 int i, rval = 0; 388 389 sc = device_get_softc(dev); 390 391 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 392 return(0); 393 394 vge_miipoll_stop(sc); 395 396 /* Specify the register we want to write. */ 397 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 398 399 /* Specify the data we want to write. */ 400 CSR_WRITE_2(sc, VGE_MIIDATA, data); 401 402 /* Issue write command. */ 403 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 404 405 /* Wait for the write command bit to self-clear. */ 406 for (i = 0; i < VGE_TIMEOUT; i++) { 407 DELAY(1); 408 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 409 break; 410 } 411 if (i == VGE_TIMEOUT) { 412 if_printf(&sc->arpcom.ac_if, "MII write timed out\n"); 413 rval = EIO; 414 } 415 416 vge_miipoll_start(sc); 417 418 return (rval); 419 } 420 421 static void 422 vge_cam_clear(struct vge_softc *sc) 423 { 424 int i; 425 426 /* 427 * Turn off all the mask bits. This tells the chip 428 * that none of the entries in the CAM filter are valid. 429 * desired entries will be enabled as we fill the filter in. 430 */ 431 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 432 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 433 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 434 for (i = 0; i < 8; i++) 435 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 436 437 /* Clear the VLAN filter too. */ 438 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 439 for (i = 0; i < 8; i++) 440 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 441 442 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 443 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 444 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 445 446 sc->vge_camidx = 0; 447 } 448 449 static int 450 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 451 { 452 int i, error = 0; 453 454 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 455 return(ENOSPC); 456 457 /* Select the CAM data page. */ 458 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 459 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 460 461 /* Set the filter entry we want to update and enable writing. */ 462 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 463 464 /* Write the address to the CAM registers */ 465 for (i = 0; i < ETHER_ADDR_LEN; i++) 466 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 467 468 /* Issue a write command. */ 469 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 470 471 /* Wake for it to clear. */ 472 for (i = 0; i < VGE_TIMEOUT; i++) { 473 DELAY(1); 474 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 475 break; 476 } 477 if (i == VGE_TIMEOUT) { 478 if_printf(&sc->arpcom.ac_if, "setting CAM filter failed\n"); 479 error = EIO; 480 goto fail; 481 } 482 483 /* Select the CAM mask page. */ 484 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 485 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 486 487 /* Set the mask bit that enables this filter. */ 488 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 489 1<<(sc->vge_camidx & 7)); 490 491 sc->vge_camidx++; 492 493 fail: 494 /* Turn off access to CAM. */ 495 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 496 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 497 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 498 499 return (error); 500 } 501 502 /* 503 * Program the multicast filter. We use the 64-entry CAM filter 504 * for perfect filtering. If there's more than 64 multicast addresses, 505 * we use the hash filter insted. 506 */ 507 static void 508 vge_setmulti(struct vge_softc *sc) 509 { 510 struct ifnet *ifp = &sc->arpcom.ac_if; 511 int error = 0; 512 struct ifmultiaddr *ifma; 513 uint32_t h, hashes[2] = { 0, 0 }; 514 515 /* First, zot all the multicast entries. */ 516 vge_cam_clear(sc); 517 CSR_WRITE_4(sc, VGE_MAR0, 0); 518 CSR_WRITE_4(sc, VGE_MAR1, 0); 519 520 /* 521 * If the user wants allmulti or promisc mode, enable reception 522 * of all multicast frames. 523 */ 524 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 525 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 526 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 527 return; 528 } 529 530 /* Now program new ones */ 531 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 532 if (ifma->ifma_addr->sa_family != AF_LINK) 533 continue; 534 error = vge_cam_set(sc, 535 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 536 if (error) 537 break; 538 } 539 540 /* If there were too many addresses, use the hash filter. */ 541 if (error) { 542 vge_cam_clear(sc); 543 544 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 545 if (ifma->ifma_addr->sa_family != AF_LINK) 546 continue; 547 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 548 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 549 if (h < 32) 550 hashes[0] |= (1 << h); 551 else 552 hashes[1] |= (1 << (h - 32)); 553 } 554 555 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 556 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 557 } 558 } 559 560 static void 561 vge_reset(struct vge_softc *sc) 562 { 563 int i; 564 565 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 566 567 for (i = 0; i < VGE_TIMEOUT; i++) { 568 DELAY(5); 569 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 570 break; 571 } 572 573 if (i == VGE_TIMEOUT) { 574 if_printf(&sc->arpcom.ac_if, "soft reset timed out"); 575 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 576 DELAY(2000); 577 } 578 579 DELAY(5000); 580 581 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 582 583 for (i = 0; i < VGE_TIMEOUT; i++) { 584 DELAY(5); 585 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 586 break; 587 } 588 if (i == VGE_TIMEOUT) { 589 if_printf(&sc->arpcom.ac_if, "EEPROM reload timed out\n"); 590 return; 591 } 592 593 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 594 } 595 596 /* 597 * Probe for a VIA gigabit chip. Check the PCI vendor and device 598 * IDs against our list and return a device name if we find a match. 599 */ 600 static int 601 vge_probe(device_t dev) 602 { 603 const struct vge_type *t; 604 uint16_t did, vid; 605 606 did = pci_get_device(dev); 607 vid = pci_get_vendor(dev); 608 for (t = vge_devs; t->vge_name != NULL; ++t) { 609 if (vid == t->vge_vid && did == t->vge_did) { 610 device_set_desc(dev, t->vge_name); 611 return 0; 612 } 613 } 614 return (ENXIO); 615 } 616 617 static void 618 vge_dma_map_rx_desc(void *arg, bus_dma_segment_t *segs, int nseg, 619 bus_size_t mapsize, int error) 620 { 621 622 struct vge_dmaload_arg *ctx; 623 struct vge_rx_desc *d = NULL; 624 625 if (error) 626 return; 627 628 ctx = arg; 629 630 /* Signal error to caller if there's too many segments */ 631 if (nseg > ctx->vge_maxsegs) { 632 ctx->vge_maxsegs = 0; 633 return; 634 } 635 636 /* 637 * Map the segment array into descriptors. 638 */ 639 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx]; 640 641 /* If this descriptor is still owned by the chip, bail. */ 642 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) { 643 if_printf(&ctx->sc->arpcom.ac_if, 644 "tried to map busy descriptor\n"); 645 ctx->vge_maxsegs = 0; 646 return; 647 } 648 649 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I); 650 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 651 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 652 d->vge_sts = 0; 653 d->vge_ctl = 0; 654 655 ctx->vge_maxsegs = 1; 656 } 657 658 static void 659 vge_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs, int nseg, 660 bus_size_t mapsize, int error) 661 { 662 struct vge_dmaload_arg *ctx; 663 struct vge_tx_desc *d = NULL; 664 struct vge_tx_frag *f; 665 int i = 0; 666 667 if (error) 668 return; 669 670 ctx = arg; 671 672 /* Signal error to caller if there's too many segments */ 673 if (nseg > ctx->vge_maxsegs) { 674 ctx->vge_maxsegs = 0; 675 return; 676 } 677 678 /* Map the segment array into descriptors. */ 679 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx]; 680 681 /* If this descriptor is still owned by the chip, bail. */ 682 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) { 683 ctx->vge_maxsegs = 0; 684 return; 685 } 686 687 for (i = 0; i < nseg; i++) { 688 f = &d->vge_frag[i]; 689 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len)); 690 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr)); 691 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF); 692 } 693 694 /* Argh. This chip does not autopad short frames */ 695 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) { 696 f = &d->vge_frag[i]; 697 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 698 ctx->vge_m0->m_pkthdr.len)); 699 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 700 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 701 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN; 702 i++; 703 } 704 705 /* 706 * When telling the chip how many segments there are, we 707 * must use nsegs + 1 instead of just nsegs. Darned if I 708 * know why. 709 */ 710 i++; 711 712 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16; 713 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM; 714 715 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 716 d->vge_ctl |= VGE_TDCTL_JUMBO; 717 718 ctx->vge_maxsegs = nseg; 719 } 720 721 /* 722 * Map a single buffer address. 723 */ 724 725 static void 726 vge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 727 { 728 if (error) 729 return; 730 731 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 732 *((bus_addr_t *)arg) = segs->ds_addr; 733 } 734 735 static int 736 vge_dma_alloc(device_t dev) 737 { 738 struct vge_softc *sc = device_get_softc(dev); 739 int error, nseg, i, tx_pos = 0, rx_pos = 0; 740 741 /* 742 * Allocate the parent bus DMA tag appropriate for PCI. 743 */ 744 #define VGE_NSEG_NEW 32 745 error = bus_dma_tag_create(NULL, /* parent */ 746 1, 0, /* alignment, boundary */ 747 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 748 BUS_SPACE_MAXADDR, /* highaddr */ 749 NULL, NULL, /* filter, filterarg */ 750 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */ 751 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 752 BUS_DMA_ALLOCNOW, /* flags */ 753 &sc->vge_parent_tag); 754 if (error) { 755 device_printf(dev, "can't create parent dma tag\n"); 756 return error; 757 } 758 759 /* 760 * Allocate map for RX mbufs. 761 */ 762 nseg = 32; 763 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0, 764 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 765 NULL, NULL, 766 MCLBYTES * nseg, nseg, MCLBYTES, 767 BUS_DMA_ALLOCNOW, &sc->vge_ldata.vge_mtag); 768 if (error) { 769 device_printf(dev, "could not allocate mbuf dma tag\n"); 770 return error; 771 } 772 773 /* 774 * Allocate map for TX descriptor list. 775 */ 776 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, 777 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 778 NULL, NULL, 779 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 780 BUS_DMA_ALLOCNOW, 781 &sc->vge_ldata.vge_tx_list_tag); 782 if (error) { 783 device_printf(dev, "could not allocate tx list dma tag\n"); 784 return error; 785 } 786 787 /* Allocate DMA'able memory for the TX ring */ 788 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag, 789 (void **)&sc->vge_ldata.vge_tx_list, 790 BUS_DMA_WAITOK | BUS_DMA_ZERO, 791 &sc->vge_ldata.vge_tx_list_map); 792 if (error) { 793 device_printf(dev, "could not allocate tx list dma memory\n"); 794 return error; 795 } 796 797 /* Load the map for the TX ring. */ 798 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag, 799 sc->vge_ldata.vge_tx_list_map, 800 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, 801 vge_dma_map_addr, 802 &sc->vge_ldata.vge_tx_list_addr, 803 BUS_DMA_WAITOK); 804 if (error) { 805 device_printf(dev, "could not load tx list\n"); 806 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 807 sc->vge_ldata.vge_tx_list, 808 sc->vge_ldata.vge_tx_list_map); 809 sc->vge_ldata.vge_tx_list = NULL; 810 return error; 811 } 812 813 /* Create DMA maps for TX buffers */ 814 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 815 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 816 &sc->vge_ldata.vge_tx_dmamap[i]); 817 if (error) { 818 device_printf(dev, "can't create DMA map for TX\n"); 819 tx_pos = i; 820 goto map_fail; 821 } 822 } 823 tx_pos = VGE_TX_DESC_CNT; 824 825 /* 826 * Allocate map for RX descriptor list. 827 */ 828 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, 829 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 830 NULL, NULL, 831 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 832 BUS_DMA_ALLOCNOW, 833 &sc->vge_ldata.vge_rx_list_tag); 834 if (error) { 835 device_printf(dev, "could not allocate rx list dma tag\n"); 836 return error; 837 } 838 839 /* Allocate DMA'able memory for the RX ring */ 840 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag, 841 (void **)&sc->vge_ldata.vge_rx_list, 842 BUS_DMA_WAITOK | BUS_DMA_ZERO, 843 &sc->vge_ldata.vge_rx_list_map); 844 if (error) { 845 device_printf(dev, "could not allocate rx list dma memory\n"); 846 return error; 847 } 848 849 /* Load the map for the RX ring. */ 850 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag, 851 sc->vge_ldata.vge_rx_list_map, 852 sc->vge_ldata.vge_rx_list, VGE_TX_LIST_SZ, 853 vge_dma_map_addr, 854 &sc->vge_ldata.vge_rx_list_addr, 855 BUS_DMA_WAITOK); 856 if (error) { 857 device_printf(dev, "could not load rx list\n"); 858 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 859 sc->vge_ldata.vge_rx_list, 860 sc->vge_ldata.vge_rx_list_map); 861 sc->vge_ldata.vge_rx_list = NULL; 862 return error; 863 } 864 865 /* Create DMA maps for RX buffers */ 866 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 867 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 868 &sc->vge_ldata.vge_rx_dmamap[i]); 869 if (error) { 870 device_printf(dev, "can't create DMA map for RX\n"); 871 rx_pos = i; 872 goto map_fail; 873 } 874 } 875 return (0); 876 877 map_fail: 878 for (i = 0; i < tx_pos; ++i) { 879 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 880 sc->vge_ldata.vge_tx_dmamap[i]); 881 } 882 for (i = 0; i < rx_pos; ++i) { 883 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 884 sc->vge_ldata.vge_rx_dmamap[i]); 885 } 886 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 887 sc->vge_ldata.vge_mtag = NULL; 888 889 return error; 890 } 891 892 static void 893 vge_dma_free(struct vge_softc *sc) 894 { 895 /* Unload and free the RX DMA ring memory and map */ 896 if (sc->vge_ldata.vge_rx_list_tag) { 897 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag, 898 sc->vge_ldata.vge_rx_list_map); 899 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 900 sc->vge_ldata.vge_rx_list, 901 sc->vge_ldata.vge_rx_list_map); 902 } 903 904 if (sc->vge_ldata.vge_rx_list_tag) 905 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag); 906 907 /* Unload and free the TX DMA ring memory and map */ 908 if (sc->vge_ldata.vge_tx_list_tag) { 909 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag, 910 sc->vge_ldata.vge_tx_list_map); 911 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 912 sc->vge_ldata.vge_tx_list, 913 sc->vge_ldata.vge_tx_list_map); 914 } 915 916 if (sc->vge_ldata.vge_tx_list_tag) 917 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag); 918 919 /* Destroy all the RX and TX buffer maps */ 920 if (sc->vge_ldata.vge_mtag) { 921 int i; 922 923 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 924 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 925 sc->vge_ldata.vge_tx_dmamap[i]); 926 } 927 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 928 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 929 sc->vge_ldata.vge_rx_dmamap[i]); 930 } 931 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 932 } 933 934 if (sc->vge_parent_tag) 935 bus_dma_tag_destroy(sc->vge_parent_tag); 936 } 937 938 /* 939 * Attach the interface. Allocate softc structures, do ifmedia 940 * setup and ethernet/BPF attach. 941 */ 942 static int 943 vge_attach(device_t dev) 944 { 945 uint8_t eaddr[ETHER_ADDR_LEN]; 946 struct vge_softc *sc; 947 struct ifnet *ifp; 948 int error = 0; 949 950 sc = device_get_softc(dev); 951 ifp = &sc->arpcom.ac_if; 952 953 /* Initialize if_xname early, so if_printf() can be used */ 954 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 955 956 /* 957 * Map control/status registers. 958 */ 959 pci_enable_busmaster(dev); 960 961 sc->vge_res_rid = VGE_PCI_LOMEM; 962 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 963 &sc->vge_res_rid, RF_ACTIVE); 964 if (sc->vge_res == NULL) { 965 device_printf(dev, "couldn't map ports/memory\n"); 966 return ENXIO; 967 } 968 969 sc->vge_btag = rman_get_bustag(sc->vge_res); 970 sc->vge_bhandle = rman_get_bushandle(sc->vge_res); 971 972 /* Allocate interrupt */ 973 sc->vge_irq_rid = 0; 974 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->vge_irq_rid, 975 RF_SHAREABLE | RF_ACTIVE); 976 if (sc->vge_irq == NULL) { 977 device_printf(dev, "couldn't map interrupt\n"); 978 error = ENXIO; 979 goto fail; 980 } 981 982 /* Reset the adapter. */ 983 vge_reset(sc); 984 985 /* 986 * Get station address from the EEPROM. 987 */ 988 vge_read_eeprom(sc, eaddr, VGE_EE_EADDR, 3, 0); 989 990 /* Allocate DMA related stuffs */ 991 error = vge_dma_alloc(dev); 992 if (error) 993 goto fail; 994 995 /* Do MII setup */ 996 error = mii_phy_probe(dev, &sc->vge_miibus, vge_ifmedia_upd, 997 vge_ifmedia_sts); 998 if (error) { 999 device_printf(dev, "MII without any phy!\n"); 1000 goto fail; 1001 } 1002 1003 ifp->if_softc = sc; 1004 ifp->if_mtu = ETHERMTU; 1005 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1006 ifp->if_init = vge_init; 1007 ifp->if_start = vge_start; 1008 ifp->if_watchdog = vge_watchdog; 1009 ifp->if_ioctl = vge_ioctl; 1010 #ifdef DEVICE_POLLING 1011 ifp->if_poll = vge_poll; 1012 #endif 1013 ifp->if_hwassist = VGE_CSUM_FEATURES; 1014 ifp->if_capabilities = IFCAP_VLAN_MTU | 1015 IFCAP_HWCSUM | 1016 IFCAP_VLAN_HWTAGGING; 1017 ifp->if_capenable = ifp->if_capabilities; 1018 ifq_set_maxlen(&ifp->if_snd, VGE_IFQ_MAXLEN); 1019 ifq_set_ready(&ifp->if_snd); 1020 1021 /* 1022 * Call MI attach routine. 1023 */ 1024 ether_ifattach(ifp, eaddr, NULL); 1025 1026 /* Hook interrupt last to avoid having to lock softc */ 1027 error = bus_setup_intr(dev, sc->vge_irq, INTR_MPSAFE, vge_intr, sc, 1028 &sc->vge_intrhand, ifp->if_serializer); 1029 if (error) { 1030 device_printf(dev, "couldn't set up irq\n"); 1031 ether_ifdetach(ifp); 1032 goto fail; 1033 } 1034 1035 return 0; 1036 fail: 1037 vge_detach(dev); 1038 return error; 1039 } 1040 1041 /* 1042 * Shutdown hardware and free up resources. This can be called any 1043 * time after the mutex has been initialized. It is called in both 1044 * the error case in attach and the normal detach case so it needs 1045 * to be careful about only freeing resources that have actually been 1046 * allocated. 1047 */ 1048 static int 1049 vge_detach(device_t dev) 1050 { 1051 struct vge_softc *sc = device_get_softc(dev); 1052 struct ifnet *ifp = &sc->arpcom.ac_if; 1053 1054 /* These should only be active if attach succeeded */ 1055 if (device_is_attached(dev)) { 1056 lwkt_serialize_enter(ifp->if_serializer); 1057 1058 vge_stop(sc); 1059 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1060 /* 1061 * Force off the IFF_UP flag here, in case someone 1062 * still had a BPF descriptor attached to this 1063 * interface. If they do, ether_ifattach() will cause 1064 * the BPF code to try and clear the promisc mode 1065 * flag, which will bubble down to vge_ioctl(), 1066 * which will try to call vge_init() again. This will 1067 * turn the NIC back on and restart the MII ticker, 1068 * which will panic the system when the kernel tries 1069 * to invoke the vge_tick() function that isn't there 1070 * anymore. 1071 */ 1072 ifp->if_flags &= ~IFF_UP; 1073 1074 lwkt_serialize_exit(ifp->if_serializer); 1075 1076 ether_ifdetach(ifp); 1077 } 1078 1079 if (sc->vge_miibus) 1080 device_delete_child(dev, sc->vge_miibus); 1081 bus_generic_detach(dev); 1082 1083 if (sc->vge_irq) { 1084 bus_release_resource(dev, SYS_RES_IRQ, sc->vge_irq_rid, 1085 sc->vge_irq); 1086 } 1087 1088 if (sc->vge_res) { 1089 bus_release_resource(dev, SYS_RES_MEMORY, sc->vge_res_rid, 1090 sc->vge_res); 1091 } 1092 1093 vge_dma_free(sc); 1094 return (0); 1095 } 1096 1097 static int 1098 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) 1099 { 1100 struct vge_dmaload_arg arg; 1101 struct mbuf *n = NULL; 1102 int i, error; 1103 1104 if (m == NULL) { 1105 n = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 1106 if (n == NULL) 1107 return (ENOBUFS); 1108 m = n; 1109 } else { 1110 m->m_data = m->m_ext.ext_buf; 1111 } 1112 1113 1114 #ifdef VGE_FIXUP_RX 1115 /* 1116 * This is part of an evil trick to deal with non-x86 platforms. 1117 * The VIA chip requires RX buffers to be aligned on 32-bit 1118 * boundaries, but that will hose non-x86 machines. To get around 1119 * this, we leave some empty space at the start of each buffer 1120 * and for non-x86 hosts, we copy the buffer back two bytes 1121 * to achieve word alignment. This is slightly more efficient 1122 * than allocating a new buffer, copying the contents, and 1123 * discarding the old buffer. 1124 */ 1125 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN; 1126 m_adj(m, VGE_ETHER_ALIGN); 1127 #else 1128 m->m_len = m->m_pkthdr.len = MCLBYTES; 1129 #endif 1130 1131 arg.sc = sc; 1132 arg.vge_idx = idx; 1133 arg.vge_maxsegs = 1; 1134 arg.vge_flags = 0; 1135 1136 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, 1137 sc->vge_ldata.vge_rx_dmamap[idx], m, 1138 vge_dma_map_rx_desc, &arg, BUS_DMA_NOWAIT); 1139 if (error || arg.vge_maxsegs != 1) { 1140 if (n != NULL) 1141 m_freem(n); 1142 return (ENOMEM); 1143 } 1144 1145 /* 1146 * Note: the manual fails to document the fact that for 1147 * proper opration, the driver needs to replentish the RX 1148 * DMA ring 4 descriptors at a time (rather than one at a 1149 * time, like most chips). We can allocate the new buffers 1150 * but we should not set the OWN bits until we're ready 1151 * to hand back 4 of them in one shot. 1152 */ 1153 1154 #define VGE_RXCHUNK 4 1155 sc->vge_rx_consumed++; 1156 if (sc->vge_rx_consumed == VGE_RXCHUNK) { 1157 for (i = idx; i != idx - sc->vge_rx_consumed; i--) { 1158 sc->vge_ldata.vge_rx_list[i].vge_sts |= 1159 htole32(VGE_RDSTS_OWN); 1160 } 1161 sc->vge_rx_consumed = 0; 1162 } 1163 1164 sc->vge_ldata.vge_rx_mbuf[idx] = m; 1165 1166 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1167 sc->vge_ldata.vge_rx_dmamap[idx], BUS_DMASYNC_PREREAD); 1168 1169 return (0); 1170 } 1171 1172 static int 1173 vge_tx_list_init(struct vge_softc *sc) 1174 { 1175 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 1176 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, 1177 (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 1178 1179 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1180 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE); 1181 sc->vge_ldata.vge_tx_prodidx = 0; 1182 sc->vge_ldata.vge_tx_considx = 0; 1183 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 1184 1185 return (0); 1186 } 1187 1188 static int 1189 vge_rx_list_init(struct vge_softc *sc) 1190 { 1191 int i; 1192 1193 bzero(sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 1194 bzero(&sc->vge_ldata.vge_rx_mbuf, 1195 VGE_RX_DESC_CNT * sizeof(struct mbuf *)); 1196 1197 sc->vge_rx_consumed = 0; 1198 1199 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1200 if (vge_newbuf(sc, i, NULL) == ENOBUFS) 1201 return (ENOBUFS); 1202 } 1203 1204 /* Flush the RX descriptors */ 1205 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1206 sc->vge_ldata.vge_rx_list_map, 1207 BUS_DMASYNC_PREWRITE); 1208 1209 sc->vge_ldata.vge_rx_prodidx = 0; 1210 sc->vge_rx_consumed = 0; 1211 sc->vge_head = sc->vge_tail = NULL; 1212 return (0); 1213 } 1214 1215 #ifdef VGE_FIXUP_RX 1216 static __inline void 1217 vge_fixup_rx(struct mbuf *m) 1218 { 1219 uint16_t *src, *dst; 1220 int i; 1221 1222 src = mtod(m, uint16_t *); 1223 dst = src - 1; 1224 1225 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1226 *dst++ = *src++; 1227 1228 m->m_data -= ETHER_ALIGN; 1229 } 1230 #endif 1231 1232 /* 1233 * RX handler. We support the reception of jumbo frames that have 1234 * been fragmented across multiple 2K mbuf cluster buffers. 1235 */ 1236 static void 1237 vge_rxeof(struct vge_softc *sc, int count) 1238 { 1239 struct ifnet *ifp = &sc->arpcom.ac_if; 1240 struct mbuf *m; 1241 int i, total_len, lim = 0; 1242 struct vge_rx_desc *cur_rx; 1243 uint32_t rxstat, rxctl; 1244 1245 ASSERT_SERIALIZED(ifp->if_serializer); 1246 1247 i = sc->vge_ldata.vge_rx_prodidx; 1248 1249 /* Invalidate the descriptor memory */ 1250 1251 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1252 sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_POSTREAD); 1253 1254 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 1255 #ifdef DEVICE_POLLING 1256 if (count >= 0 && count-- == 0) 1257 break; 1258 #endif 1259 1260 cur_rx = &sc->vge_ldata.vge_rx_list[i]; 1261 m = sc->vge_ldata.vge_rx_mbuf[i]; 1262 total_len = VGE_RXBYTES(cur_rx); 1263 rxstat = le32toh(cur_rx->vge_sts); 1264 rxctl = le32toh(cur_rx->vge_ctl); 1265 1266 /* Invalidate the RX mbuf and unload its map */ 1267 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1268 sc->vge_ldata.vge_rx_dmamap[i], 1269 BUS_DMASYNC_POSTWRITE); 1270 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1271 sc->vge_ldata.vge_rx_dmamap[i]); 1272 1273 /* 1274 * If the 'start of frame' bit is set, this indicates 1275 * either the first fragment in a multi-fragment receive, 1276 * or an intermediate fragment. Either way, we want to 1277 * accumulate the buffers. 1278 */ 1279 if (rxstat & VGE_RXPKT_SOF) { 1280 m->m_len = MCLBYTES - VGE_ETHER_ALIGN; 1281 if (sc->vge_head == NULL) { 1282 sc->vge_head = sc->vge_tail = m; 1283 } else { 1284 m->m_flags &= ~M_PKTHDR; 1285 sc->vge_tail->m_next = m; 1286 sc->vge_tail = m; 1287 } 1288 vge_newbuf(sc, i, NULL); 1289 VGE_RX_DESC_INC(i); 1290 continue; 1291 } 1292 1293 /* 1294 * Bad/error frames will have the RXOK bit cleared. 1295 * However, there's one error case we want to allow: 1296 * if a VLAN tagged frame arrives and the chip can't 1297 * match it against the CAM filter, it considers this 1298 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1299 * We don't want to drop the frame though: our VLAN 1300 * filtering is done in software. 1301 */ 1302 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) && 1303 !(rxstat & VGE_RDSTS_CSUMERR)) { 1304 ifp->if_ierrors++; 1305 /* 1306 * If this is part of a multi-fragment packet, 1307 * discard all the pieces. 1308 */ 1309 if (sc->vge_head != NULL) { 1310 m_freem(sc->vge_head); 1311 sc->vge_head = sc->vge_tail = NULL; 1312 } 1313 vge_newbuf(sc, i, m); 1314 VGE_RX_DESC_INC(i); 1315 continue; 1316 } 1317 1318 /* 1319 * If allocating a replacement mbuf fails, 1320 * reload the current one. 1321 */ 1322 if (vge_newbuf(sc, i, NULL)) { 1323 ifp->if_ierrors++; 1324 if (sc->vge_head != NULL) { 1325 m_freem(sc->vge_head); 1326 sc->vge_head = sc->vge_tail = NULL; 1327 } 1328 vge_newbuf(sc, i, m); 1329 VGE_RX_DESC_INC(i); 1330 continue; 1331 } 1332 1333 VGE_RX_DESC_INC(i); 1334 1335 if (sc->vge_head != NULL) { 1336 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN); 1337 /* 1338 * Special case: if there's 4 bytes or less 1339 * in this buffer, the mbuf can be discarded: 1340 * the last 4 bytes is the CRC, which we don't 1341 * care about anyway. 1342 */ 1343 if (m->m_len <= ETHER_CRC_LEN) { 1344 sc->vge_tail->m_len -= 1345 (ETHER_CRC_LEN - m->m_len); 1346 m_freem(m); 1347 } else { 1348 m->m_len -= ETHER_CRC_LEN; 1349 m->m_flags &= ~M_PKTHDR; 1350 sc->vge_tail->m_next = m; 1351 } 1352 m = sc->vge_head; 1353 sc->vge_head = sc->vge_tail = NULL; 1354 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1355 } else { 1356 m->m_pkthdr.len = m->m_len = 1357 (total_len - ETHER_CRC_LEN); 1358 } 1359 1360 #ifdef VGE_FIXUP_RX 1361 vge_fixup_rx(m); 1362 #endif 1363 ifp->if_ipackets++; 1364 m->m_pkthdr.rcvif = ifp; 1365 1366 /* Do RX checksumming if enabled */ 1367 if (ifp->if_capenable & IFCAP_RXCSUM) { 1368 /* Check IP header checksum */ 1369 if (rxctl & VGE_RDCTL_IPPKT) 1370 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1371 if (rxctl & VGE_RDCTL_IPCSUMOK) 1372 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1373 1374 /* Check TCP/UDP checksum */ 1375 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) && 1376 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1377 m->m_pkthdr.csum_flags |= 1378 CSUM_DATA_VALID|CSUM_PSEUDO_HDR| 1379 CSUM_FRAG_NOT_CHECKED; 1380 m->m_pkthdr.csum_data = 0xffff; 1381 } 1382 } 1383 1384 if (rxstat & VGE_RDSTS_VTAG) 1385 VLAN_INPUT_TAG(m, ntohs((rxctl & VGE_RDCTL_VLANID))); 1386 else 1387 ifp->if_input(ifp, m); 1388 1389 lim++; 1390 if (lim == VGE_RX_DESC_CNT) 1391 break; 1392 } 1393 1394 /* Flush the RX DMA ring */ 1395 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1396 sc->vge_ldata.vge_rx_list_map, 1397 BUS_DMASYNC_PREWRITE); 1398 1399 sc->vge_ldata.vge_rx_prodidx = i; 1400 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1401 } 1402 1403 static void 1404 vge_txeof(struct vge_softc *sc) 1405 { 1406 struct ifnet *ifp = &sc->arpcom.ac_if; 1407 uint32_t txstat; 1408 int idx; 1409 1410 idx = sc->vge_ldata.vge_tx_considx; 1411 1412 /* Invalidate the TX descriptor list */ 1413 1414 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1415 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_POSTREAD); 1416 1417 while (idx != sc->vge_ldata.vge_tx_prodidx) { 1418 1419 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1420 if (txstat & VGE_TDSTS_OWN) 1421 break; 1422 1423 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1424 sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1425 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1426 sc->vge_ldata.vge_tx_dmamap[idx]); 1427 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1428 ifp->if_collisions++; 1429 if (txstat & VGE_TDSTS_TXERR) 1430 ifp->if_oerrors++; 1431 else 1432 ifp->if_opackets++; 1433 1434 sc->vge_ldata.vge_tx_free++; 1435 VGE_TX_DESC_INC(idx); 1436 } 1437 1438 /* No changes made to the TX ring, so no flush needed */ 1439 if (idx != sc->vge_ldata.vge_tx_considx) { 1440 sc->vge_ldata.vge_tx_considx = idx; 1441 ifp->if_flags &= ~IFF_OACTIVE; 1442 ifp->if_timer = 0; 1443 } 1444 1445 /* 1446 * If not all descriptors have been released reaped yet, 1447 * reload the timer so that we will eventually get another 1448 * interrupt that will cause us to re-enter this routine. 1449 * This is done in case the transmitter has gone idle. 1450 */ 1451 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) 1452 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1453 } 1454 1455 static void 1456 vge_tick(struct vge_softc *sc) 1457 { 1458 struct ifnet *ifp = &sc->arpcom.ac_if; 1459 struct mii_data *mii; 1460 1461 mii = device_get_softc(sc->vge_miibus); 1462 1463 mii_tick(mii); 1464 if (sc->vge_link) { 1465 if (!(mii->mii_media_status & IFM_ACTIVE)) 1466 sc->vge_link = 0; 1467 } else { 1468 if (mii->mii_media_status & IFM_ACTIVE && 1469 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1470 sc->vge_link = 1; 1471 if (!ifq_is_empty(&ifp->if_snd)) 1472 ifp->if_start(ifp); 1473 } 1474 } 1475 } 1476 1477 #ifdef DEVICE_POLLING 1478 static void 1479 vge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1480 { 1481 struct vge_softc *sc = ifp->if_softc; 1482 1483 sc->rxcycles = count; 1484 1485 switch (cmd) { 1486 case POLL_REGISTER: 1487 vge_disable_intr(sc); 1488 break; 1489 case POLL_DEREGISTER: 1490 vge_enable_intr(sc, 0xffffffff); 1491 break; 1492 case POLL_ONLY: 1493 case POLL_AND_CHECK_STATUS: 1494 vge_rxeof(sc, count); 1495 vge_txeof(sc); 1496 1497 if (!ifq_is_empty(&ifp->if_snd)) 1498 ifp->if_start(ifp); 1499 1500 /* XXX copy & paste from vge_intr */ 1501 if (cmd == POLL_AND_CHECK_STATUS) { 1502 uint32_t status = 0; 1503 1504 status = CSR_READ_4(sc, VGE_ISR); 1505 if (status == 0xffffffff) 1506 break; 1507 1508 if (status) 1509 CSR_WRITE_4(sc, VGE_ISR, status); 1510 1511 if (status & (VGE_ISR_TXDMA_STALL | 1512 VGE_ISR_RXDMA_STALL)) 1513 vge_init(sc); 1514 1515 if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) { 1516 ifp->if_ierrors++; 1517 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1518 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1519 } 1520 } 1521 break; 1522 } 1523 1524 } 1525 #endif /* DEVICE_POLLING */ 1526 1527 static void 1528 vge_intr(void *arg) 1529 { 1530 struct vge_softc *sc = arg; 1531 struct ifnet *ifp = &sc->arpcom.ac_if; 1532 uint32_t status; 1533 1534 if (sc->suspended || !(ifp->if_flags & IFF_UP)) 1535 return; 1536 1537 /* Disable interrupts */ 1538 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1539 1540 for (;;) { 1541 status = CSR_READ_4(sc, VGE_ISR); 1542 /* If the card has gone away the read returns 0xffff. */ 1543 if (status == 0xFFFFFFFF) 1544 break; 1545 1546 if (status) 1547 CSR_WRITE_4(sc, VGE_ISR, status); 1548 1549 if ((status & VGE_INTRS) == 0) 1550 break; 1551 1552 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1553 vge_rxeof(sc, -1); 1554 1555 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1556 vge_rxeof(sc, -1); 1557 ifp->if_ierrors++; 1558 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1559 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1560 } 1561 1562 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1563 vge_txeof(sc); 1564 1565 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) 1566 vge_init(sc); 1567 1568 if (status & VGE_ISR_LINKSTS) 1569 vge_tick(sc); 1570 } 1571 1572 /* Re-enable interrupts */ 1573 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1574 1575 if (!ifq_is_empty(&ifp->if_snd)) 1576 ifp->if_start(ifp); 1577 } 1578 1579 static int 1580 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) 1581 { 1582 struct vge_dmaload_arg arg; 1583 bus_dmamap_t map; 1584 int error; 1585 1586 arg.vge_flags = 0; 1587 1588 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1589 arg.vge_flags |= VGE_TDCTL_IPCSUM; 1590 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 1591 arg.vge_flags |= VGE_TDCTL_TCPCSUM; 1592 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 1593 arg.vge_flags |= VGE_TDCTL_UDPCSUM; 1594 1595 arg.sc = sc; 1596 arg.vge_idx = idx; 1597 arg.vge_m0 = m_head; 1598 arg.vge_maxsegs = VGE_TX_FRAGS; 1599 1600 map = sc->vge_ldata.vge_tx_dmamap[idx]; 1601 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head, 1602 vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); 1603 if (error && error != EFBIG) { 1604 if_printf(&sc->arpcom.ac_if, "can't map mbuf (error %d)\n", 1605 error); 1606 goto fail; 1607 } 1608 1609 /* Too many segments to map, coalesce into a single mbuf */ 1610 if (error || arg.vge_maxsegs == 0) { 1611 struct mbuf *m_new; 1612 1613 m_new = m_defrag(m_head, MB_DONTWAIT); 1614 if (m_new == NULL) { 1615 error = ENOBUFS; 1616 goto fail; 1617 } else { 1618 m_head = m_new; 1619 } 1620 1621 arg.sc = sc; 1622 arg.vge_m0 = m_head; 1623 arg.vge_idx = idx; 1624 arg.vge_maxsegs = 1; 1625 1626 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, 1627 m_head, vge_dma_map_tx_desc, &arg, 1628 BUS_DMA_NOWAIT); 1629 if (error) { 1630 if_printf(&sc->arpcom.ac_if, 1631 "can't map mbuf (error %d)\n", error); 1632 goto fail; 1633 } 1634 } 1635 1636 sc->vge_ldata.vge_tx_mbuf[idx] = m_head; 1637 sc->vge_ldata.vge_tx_free--; 1638 1639 /* 1640 * Set up hardware VLAN tagging. 1641 */ 1642 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 1643 m_head->m_pkthdr.rcvif != NULL && 1644 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) { 1645 struct ifvlan *ifv = m_head->m_pkthdr.rcvif->if_softc; 1646 1647 if (ifv != NULL) { 1648 sc->vge_ldata.vge_tx_list[idx].vge_ctl |= 1649 htole32(htons(ifv->ifv_tag) | VGE_TDCTL_VTAG); 1650 } 1651 } 1652 1653 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); 1654 return (0); 1655 1656 fail: 1657 m_freem(m_head); 1658 return error; 1659 } 1660 1661 /* 1662 * Main transmit routine. 1663 */ 1664 1665 static void 1666 vge_start(struct ifnet *ifp) 1667 { 1668 struct vge_softc *sc = ifp->if_softc; 1669 struct mbuf *m_head = NULL; 1670 int idx, pidx = 0; 1671 1672 ASSERT_SERIALIZED(ifp->if_serializer); 1673 1674 if (!sc->vge_link || (ifp->if_flags & IFF_OACTIVE)) 1675 return; 1676 1677 if (ifq_is_empty(&ifp->if_snd)) 1678 return; 1679 1680 idx = sc->vge_ldata.vge_tx_prodidx; 1681 1682 pidx = idx - 1; 1683 if (pidx < 0) 1684 pidx = VGE_TX_DESC_CNT - 1; 1685 1686 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { 1687 m_head = ifq_poll(&ifp->if_snd); 1688 if (m_head == NULL) 1689 break; 1690 1691 if (sc->vge_ldata.vge_tx_free <= 2) { 1692 ifp->if_flags |= IFF_OACTIVE; 1693 break; 1694 } 1695 1696 m_head = ifq_dequeue(&ifp->if_snd, m_head); 1697 1698 if (vge_encap(sc, m_head, idx)) { 1699 /* If vge_encap() failed, it will free m_head for us */ 1700 ifp->if_flags |= IFF_OACTIVE; 1701 break; 1702 } 1703 1704 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1705 htole16(VGE_TXDESC_Q); 1706 1707 pidx = idx; 1708 VGE_TX_DESC_INC(idx); 1709 1710 /* 1711 * If there's a BPF listener, bounce a copy of this frame 1712 * to him. 1713 */ 1714 BPF_MTAP(ifp, m_head); 1715 } 1716 1717 if (idx == sc->vge_ldata.vge_tx_prodidx) 1718 return; 1719 1720 /* Flush the TX descriptors */ 1721 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1722 sc->vge_ldata.vge_tx_list_map, 1723 BUS_DMASYNC_PREWRITE); 1724 1725 /* Issue a transmit command. */ 1726 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1727 1728 sc->vge_ldata.vge_tx_prodidx = idx; 1729 1730 /* 1731 * Use the countdown timer for interrupt moderation. 1732 * 'TX done' interrupts are disabled. Instead, we reset the 1733 * countdown timer, which will begin counting until it hits 1734 * the value in the SSTIMER register, and then trigger an 1735 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1736 * the timer count is reloaded. Only when the transmitter 1737 * is idle will the timer hit 0 and an interrupt fire. 1738 */ 1739 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1740 1741 /* 1742 * Set a timeout in case the chip goes out to lunch. 1743 */ 1744 ifp->if_timer = 5; 1745 } 1746 1747 static void 1748 vge_init(void *xsc) 1749 { 1750 struct vge_softc *sc = xsc; 1751 struct ifnet *ifp = &sc->arpcom.ac_if; 1752 struct mii_data *mii; 1753 int i; 1754 1755 ASSERT_SERIALIZED(ifp->if_serializer); 1756 1757 mii = device_get_softc(sc->vge_miibus); 1758 1759 /* 1760 * Cancel pending I/O and free all RX/TX buffers. 1761 */ 1762 vge_stop(sc); 1763 vge_reset(sc); 1764 1765 /* 1766 * Initialize the RX and TX descriptors and mbufs. 1767 */ 1768 vge_rx_list_init(sc); 1769 vge_tx_list_init(sc); 1770 1771 /* Set our station address */ 1772 for (i = 0; i < ETHER_ADDR_LEN; i++) 1773 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(ifp)[i]); 1774 1775 /* 1776 * Set receive FIFO threshold. Also allow transmission and 1777 * reception of VLAN tagged frames. 1778 */ 1779 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1780 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1781 1782 /* Set DMA burst length */ 1783 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1784 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1785 1786 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1787 1788 /* Set collision backoff algorithm */ 1789 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1790 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1791 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1792 1793 /* Disable LPSEL field in priority resolution */ 1794 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1795 1796 /* 1797 * Load the addresses of the DMA queues into the chip. 1798 * Note that we only use one transmit queue. 1799 */ 1800 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 1801 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr)); 1802 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 1803 1804 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 1805 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr)); 1806 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 1807 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 1808 1809 /* Enable and wake up the RX descriptor queue */ 1810 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1811 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1812 1813 /* Enable the TX descriptor queue */ 1814 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1815 1816 /* Set up the receive filter -- allow large frames for VLANs. */ 1817 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 1818 1819 /* If we want promiscuous mode, set the allframes bit. */ 1820 if (ifp->if_flags & IFF_PROMISC) 1821 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1822 1823 /* Set capture broadcast bit to capture broadcast frames. */ 1824 if (ifp->if_flags & IFF_BROADCAST) 1825 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1826 1827 /* Set multicast bit to capture multicast frames. */ 1828 if (ifp->if_flags & IFF_MULTICAST) 1829 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1830 1831 /* Init the cam filter. */ 1832 vge_cam_clear(sc); 1833 1834 /* Init the multicast filter. */ 1835 vge_setmulti(sc); 1836 1837 /* Enable flow control */ 1838 1839 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1840 1841 /* Enable jumbo frame reception (if desired) */ 1842 1843 /* Start the MAC. */ 1844 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1845 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1846 CSR_WRITE_1(sc, VGE_CRS0, 1847 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 1848 1849 /* 1850 * Configure one-shot timer for microsecond 1851 * resulution and load it for 500 usecs. 1852 */ 1853 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1854 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1855 1856 /* 1857 * Configure interrupt moderation for receive. Enable 1858 * the holdoff counter and load it, and set the RX 1859 * suppression count to the number of descriptors we 1860 * want to allow before triggering an interrupt. 1861 * The holdoff timer is in units of 20 usecs. 1862 */ 1863 1864 #ifdef notyet 1865 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1866 /* Select the interrupt holdoff timer page. */ 1867 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1868 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1869 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1870 1871 /* Enable use of the holdoff timer. */ 1872 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1873 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1874 1875 /* Select the RX suppression threshold page. */ 1876 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1877 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 1878 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 1879 1880 /* Restore the page select bits. */ 1881 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1882 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 1883 #endif 1884 1885 #ifdef DEVICE_POLLING 1886 /* Disable intr if polling(4) is enabled */ 1887 if (ifp->if_flags & IFF_POLLING) 1888 vge_disable_intr(sc); 1889 else 1890 #endif 1891 vge_enable_intr(sc, 0); 1892 1893 mii_mediachg(mii); 1894 1895 ifp->if_flags |= IFF_RUNNING; 1896 ifp->if_flags &= ~IFF_OACTIVE; 1897 1898 sc->vge_if_flags = 0; 1899 sc->vge_link = 0; 1900 } 1901 1902 /* 1903 * Set media options. 1904 */ 1905 static int 1906 vge_ifmedia_upd(struct ifnet *ifp) 1907 { 1908 struct vge_softc *sc = ifp->if_softc; 1909 struct mii_data *mii = device_get_softc(sc->vge_miibus); 1910 1911 mii_mediachg(mii); 1912 1913 return (0); 1914 } 1915 1916 /* 1917 * Report current media status. 1918 */ 1919 static void 1920 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1921 { 1922 struct vge_softc *sc = ifp->if_softc; 1923 struct mii_data *mii = device_get_softc(sc->vge_miibus); 1924 1925 mii_pollstat(mii); 1926 ifmr->ifm_active = mii->mii_media_active; 1927 ifmr->ifm_status = mii->mii_media_status; 1928 } 1929 1930 static void 1931 vge_miibus_statchg(device_t dev) 1932 { 1933 struct vge_softc *sc; 1934 struct mii_data *mii; 1935 struct ifmedia_entry *ife; 1936 1937 sc = device_get_softc(dev); 1938 mii = device_get_softc(sc->vge_miibus); 1939 ife = mii->mii_media.ifm_cur; 1940 1941 /* 1942 * If the user manually selects a media mode, we need to turn 1943 * on the forced MAC mode bit in the DIAGCTL register. If the 1944 * user happens to choose a full duplex mode, we also need to 1945 * set the 'force full duplex' bit. This applies only to 1946 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 1947 * mode is disabled, and in 1000baseT mode, full duplex is 1948 * always implied, so we turn on the forced mode bit but leave 1949 * the FDX bit cleared. 1950 */ 1951 1952 switch (IFM_SUBTYPE(ife->ifm_media)) { 1953 case IFM_AUTO: 1954 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1955 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1956 break; 1957 case IFM_1000_T: 1958 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1959 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1960 break; 1961 case IFM_100_TX: 1962 case IFM_10_T: 1963 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1964 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) 1965 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1966 else 1967 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1968 break; 1969 default: 1970 device_printf(dev, "unknown media type: %x\n", 1971 IFM_SUBTYPE(ife->ifm_media)); 1972 break; 1973 } 1974 } 1975 1976 static int 1977 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1978 { 1979 struct vge_softc *sc = ifp->if_softc; 1980 struct ifreq *ifr = (struct ifreq *)data; 1981 struct mii_data *mii; 1982 int error = 0; 1983 1984 switch (command) { 1985 case SIOCSIFMTU: 1986 if (ifr->ifr_mtu > VGE_JUMBO_MTU) 1987 error = EINVAL; 1988 ifp->if_mtu = ifr->ifr_mtu; 1989 break; 1990 case SIOCSIFFLAGS: 1991 if (ifp->if_flags & IFF_UP) { 1992 if ((ifp->if_flags & IFF_RUNNING) && 1993 (ifp->if_flags & IFF_PROMISC) && 1994 !(sc->vge_if_flags & IFF_PROMISC)) { 1995 CSR_SETBIT_1(sc, VGE_RXCTL, 1996 VGE_RXCTL_RX_PROMISC); 1997 vge_setmulti(sc); 1998 } else if ((ifp->if_flags & IFF_RUNNING) && 1999 !(ifp->if_flags & IFF_PROMISC) && 2000 (sc->vge_if_flags & IFF_PROMISC)) { 2001 CSR_CLRBIT_1(sc, VGE_RXCTL, 2002 VGE_RXCTL_RX_PROMISC); 2003 vge_setmulti(sc); 2004 } else { 2005 vge_init(sc); 2006 } 2007 } else { 2008 if (ifp->if_flags & IFF_RUNNING) 2009 vge_stop(sc); 2010 } 2011 sc->vge_if_flags = ifp->if_flags; 2012 break; 2013 case SIOCADDMULTI: 2014 case SIOCDELMULTI: 2015 vge_setmulti(sc); 2016 break; 2017 case SIOCGIFMEDIA: 2018 case SIOCSIFMEDIA: 2019 mii = device_get_softc(sc->vge_miibus); 2020 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2021 break; 2022 case SIOCSIFCAP: 2023 { 2024 uint32_t mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2025 2026 if (mask & IFCAP_HWCSUM) { 2027 ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM); 2028 if (ifp->if_capenable & IFCAP_TXCSUM) 2029 ifp->if_hwassist = VGE_CSUM_FEATURES; 2030 else 2031 ifp->if_hwassist = 0; 2032 if (ifp->if_flags & IFF_RUNNING) 2033 vge_init(sc); 2034 } 2035 } 2036 break; 2037 default: 2038 error = ether_ioctl(ifp, command, data); 2039 break; 2040 } 2041 return (error); 2042 } 2043 2044 static void 2045 vge_watchdog(struct ifnet *ifp) 2046 { 2047 struct vge_softc *sc = ifp->if_softc; 2048 2049 if_printf(ifp, "watchdog timeout\n"); 2050 ifp->if_oerrors++; 2051 2052 vge_txeof(sc); 2053 vge_rxeof(sc, -1); 2054 2055 vge_init(sc); 2056 } 2057 2058 /* 2059 * Stop the adapter and free any mbufs allocated to the 2060 * RX and TX lists. 2061 */ 2062 static void 2063 vge_stop(struct vge_softc *sc) 2064 { 2065 struct ifnet *ifp = &sc->arpcom.ac_if; 2066 int i; 2067 2068 ASSERT_SERIALIZED(ifp->if_serializer); 2069 2070 ifp->if_timer = 0; 2071 2072 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2073 2074 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2075 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2076 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2077 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2078 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2079 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2080 2081 if (sc->vge_head != NULL) { 2082 m_freem(sc->vge_head); 2083 sc->vge_head = sc->vge_tail = NULL; 2084 } 2085 2086 /* Free the TX list buffers. */ 2087 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 2088 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { 2089 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2090 sc->vge_ldata.vge_tx_dmamap[i]); 2091 m_freem(sc->vge_ldata.vge_tx_mbuf[i]); 2092 sc->vge_ldata.vge_tx_mbuf[i] = NULL; 2093 } 2094 } 2095 2096 /* Free the RX list buffers. */ 2097 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 2098 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { 2099 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2100 sc->vge_ldata.vge_rx_dmamap[i]); 2101 m_freem(sc->vge_ldata.vge_rx_mbuf[i]); 2102 sc->vge_ldata.vge_rx_mbuf[i] = NULL; 2103 } 2104 } 2105 } 2106 2107 /* 2108 * Device suspend routine. Stop the interface and save some PCI 2109 * settings in case the BIOS doesn't restore them properly on 2110 * resume. 2111 */ 2112 static int 2113 vge_suspend(device_t dev) 2114 { 2115 struct vge_softc *sc = device_get_softc(dev); 2116 struct ifnet *ifp = &sc->arpcom.ac_if; 2117 2118 lwkt_serialize_enter(ifp->if_serializer); 2119 vge_stop(sc); 2120 sc->suspended = 1; 2121 lwkt_serialize_exit(ifp->if_serializer); 2122 2123 return (0); 2124 } 2125 2126 /* 2127 * Device resume routine. Restore some PCI settings in case the BIOS 2128 * doesn't, re-enable busmastering, and restart the interface if 2129 * appropriate. 2130 */ 2131 static int 2132 vge_resume(device_t dev) 2133 { 2134 struct vge_softc *sc = device_get_softc(dev); 2135 struct ifnet *ifp = &sc->arpcom.ac_if; 2136 2137 /* reenable busmastering */ 2138 pci_enable_busmaster(dev); 2139 pci_enable_io(dev, SYS_RES_MEMORY); 2140 2141 lwkt_serialize_enter(ifp->if_serializer); 2142 /* reinitialize interface if necessary */ 2143 if (ifp->if_flags & IFF_UP) 2144 vge_init(sc); 2145 2146 sc->suspended = 0; 2147 lwkt_serialize_exit(ifp->if_serializer); 2148 2149 return (0); 2150 } 2151 2152 /* 2153 * Stop all chip I/O so that the kernel's probe routines don't 2154 * get confused by errant DMAs when rebooting. 2155 */ 2156 static void 2157 vge_shutdown(device_t dev) 2158 { 2159 struct vge_softc *sc = device_get_softc(dev); 2160 struct ifnet *ifp = &sc->arpcom.ac_if; 2161 2162 lwkt_serialize_enter(ifp->if_serializer); 2163 vge_stop(sc); 2164 lwkt_serialize_exit(ifp->if_serializer); 2165 } 2166 2167 static void 2168 vge_enable_intr(struct vge_softc *sc, uint32_t isr) 2169 { 2170 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2171 CSR_WRITE_4(sc, VGE_ISR, isr); 2172 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2173 } 2174 2175 #ifdef DEVICE_POLLING 2176 static void 2177 vge_disable_intr(struct vge_softc *sc) 2178 { 2179 CSR_WRITE_4(sc, VGE_IMR, 0); 2180 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2181 } 2182 #endif 2183