1 /* 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/dev/vge/if_vge.c,v 1.24 2006/02/14 12:44:56 glebius Exp $ 33 */ 34 35 /* 36 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 */ 42 43 /* 44 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 45 * combines a tri-speed ethernet MAC and PHY, with the following 46 * features: 47 * 48 * o Jumbo frame support up to 16K 49 * o Transmit and receive flow control 50 * o IPv4 checksum offload 51 * o VLAN tag insertion and stripping 52 * o TCP large send 53 * o 64-bit multicast hash table filter 54 * o 64 entry CAM filter 55 * o 16K RX FIFO and 48K TX FIFO memory 56 * o Interrupt moderation 57 * 58 * The VT6122 supports up to four transmit DMA queues. The descriptors 59 * in the transmit ring can address up to 7 data fragments; frames which 60 * span more than 7 data buffers must be coalesced, but in general the 61 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 62 * long. The receive descriptors address only a single buffer. 63 * 64 * There are two peculiar design issues with the VT6122. One is that 65 * receive data buffers must be aligned on a 32-bit boundary. This is 66 * not a problem where the VT6122 is used as a LOM device in x86-based 67 * systems, but on architectures that generate unaligned access traps, we 68 * have to do some copying. 69 * 70 * The other issue has to do with the way 64-bit addresses are handled. 71 * The DMA descriptors only allow you to specify 48 bits of addressing 72 * information. The remaining 16 bits are specified using one of the 73 * I/O registers. If you only have a 32-bit system, then this isn't 74 * an issue, but if you have a 64-bit system and more than 4GB of 75 * memory, you must have to make sure your network data buffers reside 76 * in the same 48-bit 'segment.' 77 * 78 * Special thanks to Ryan Fu at VIA Networking for providing documentation 79 * and sample NICs for testing. 80 */ 81 82 #include "opt_polling.h" 83 84 #include <sys/param.h> 85 #include <sys/endian.h> 86 #include <sys/systm.h> 87 #include <sys/sockio.h> 88 #include <sys/mbuf.h> 89 #include <sys/malloc.h> 90 #include <sys/module.h> 91 #include <sys/kernel.h> 92 #include <sys/socket.h> 93 #include <sys/serialize.h> 94 #include <sys/proc.h> 95 #include <sys/bus.h> 96 #include <sys/rman.h> 97 #include <sys/interrupt.h> 98 99 #include <net/if.h> 100 #include <net/if_arp.h> 101 #include <net/ethernet.h> 102 #include <net/if_dl.h> 103 #include <net/if_media.h> 104 #include <net/ifq_var.h> 105 #include <net/if_types.h> 106 #include <net/vlan/if_vlan_var.h> 107 #include <net/vlan/if_vlan_ether.h> 108 109 #include <net/bpf.h> 110 111 #include <dev/netif/mii_layer/mii.h> 112 #include <dev/netif/mii_layer/miivar.h> 113 114 #include <bus/pci/pcireg.h> 115 #include <bus/pci/pcivar.h> 116 #include <bus/pci/pcidevs.h> 117 118 #include "miibus_if.h" 119 120 #include <dev/netif/vge/if_vgereg.h> 121 #include <dev/netif/vge/if_vgevar.h> 122 123 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 124 125 /* 126 * Various supported device vendors/types and their names. 127 */ 128 static const struct vge_type vge_devs[] = { 129 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612X, 130 "VIA Networking Gigabit Ethernet" }, 131 { 0, 0, NULL } 132 }; 133 134 static int vge_probe (device_t); 135 static int vge_attach (device_t); 136 static int vge_detach (device_t); 137 138 static int vge_encap (struct vge_softc *, struct mbuf *, int); 139 140 static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 141 static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int, 142 bus_size_t, int); 143 static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, 144 bus_size_t, int); 145 static int vge_dma_alloc (device_t); 146 static void vge_dma_free (struct vge_softc *); 147 static int vge_newbuf (struct vge_softc *, int, struct mbuf *); 148 static int vge_rx_list_init (struct vge_softc *); 149 static int vge_tx_list_init (struct vge_softc *); 150 #ifdef VGE_FIXUP_RX 151 static __inline void vge_fixup_rx 152 (struct mbuf *); 153 #endif 154 static void vge_rxeof (struct vge_softc *, int); 155 static void vge_txeof (struct vge_softc *); 156 static void vge_intr (void *); 157 static void vge_tick (struct vge_softc *); 158 static void vge_start (struct ifnet *); 159 static int vge_ioctl (struct ifnet *, u_long, caddr_t, 160 struct ucred *); 161 static void vge_init (void *); 162 static void vge_stop (struct vge_softc *); 163 static void vge_watchdog (struct ifnet *); 164 static int vge_suspend (device_t); 165 static int vge_resume (device_t); 166 static void vge_shutdown (device_t); 167 static int vge_ifmedia_upd (struct ifnet *); 168 static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 169 170 #ifdef VGE_EEPROM 171 static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 172 #endif 173 static void vge_read_eeprom (struct vge_softc *, uint8_t *, int, int, int); 174 175 static void vge_miipoll_start (struct vge_softc *); 176 static void vge_miipoll_stop (struct vge_softc *); 177 static int vge_miibus_readreg (device_t, int, int); 178 static int vge_miibus_writereg (device_t, int, int, int); 179 static void vge_miibus_statchg (device_t); 180 181 static void vge_cam_clear (struct vge_softc *); 182 static int vge_cam_set (struct vge_softc *, uint8_t *); 183 static void vge_setmulti (struct vge_softc *); 184 static void vge_reset (struct vge_softc *); 185 186 #ifdef DEVICE_POLLING 187 static void vge_poll(struct ifnet *, enum poll_cmd, int); 188 static void vge_disable_intr(struct vge_softc *); 189 #endif 190 static void vge_enable_intr(struct vge_softc *, uint32_t); 191 192 #define VGE_PCI_LOIO 0x10 193 #define VGE_PCI_LOMEM 0x14 194 195 static device_method_t vge_methods[] = { 196 /* Device interface */ 197 DEVMETHOD(device_probe, vge_probe), 198 DEVMETHOD(device_attach, vge_attach), 199 DEVMETHOD(device_detach, vge_detach), 200 DEVMETHOD(device_suspend, vge_suspend), 201 DEVMETHOD(device_resume, vge_resume), 202 DEVMETHOD(device_shutdown, vge_shutdown), 203 204 /* bus interface */ 205 DEVMETHOD(bus_print_child, bus_generic_print_child), 206 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 207 208 /* MII interface */ 209 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 210 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 211 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 212 213 { 0, 0 } 214 }; 215 216 static driver_t vge_driver = { 217 "vge", 218 vge_methods, 219 sizeof(struct vge_softc) 220 }; 221 222 static devclass_t vge_devclass; 223 224 DECLARE_DUMMY_MODULE(if_vge); 225 MODULE_DEPEND(if_vge, miibus, 1, 1, 1); 226 DRIVER_MODULE(if_vge, pci, vge_driver, vge_devclass, NULL, NULL); 227 DRIVER_MODULE(if_vge, cardbus, vge_driver, vge_devclass, NULL, NULL); 228 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, NULL, NULL); 229 230 #ifdef VGE_EEPROM 231 /* 232 * Read a word of data stored in the EEPROM at address 'addr.' 233 */ 234 static void 235 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t dest) 236 { 237 uint16_t word = 0; 238 int i; 239 240 /* 241 * Enter EEPROM embedded programming mode. In order to 242 * access the EEPROM at all, we first have to set the 243 * EELOAD bit in the CHIPCFG2 register. 244 */ 245 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 246 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 247 248 /* Select the address of the word we want to read */ 249 CSR_WRITE_1(sc, VGE_EEADDR, addr); 250 251 /* Issue read command */ 252 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 253 254 /* Wait for the done bit to be set. */ 255 for (i = 0; i < VGE_TIMEOUT; i++) { 256 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 257 break; 258 } 259 if (i == VGE_TIMEOUT) { 260 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 261 *dest = 0; 262 return; 263 } 264 265 /* Read the result */ 266 word = CSR_READ_2(sc, VGE_EERDDAT); 267 268 /* Turn off EEPROM access mode. */ 269 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 270 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 271 272 *dest = word; 273 } 274 #endif 275 276 /* 277 * Read a sequence of words from the EEPROM. 278 */ 279 static void 280 vge_read_eeprom(struct vge_softc *sc, uint8_t *dest, int off, int cnt, int swap) 281 { 282 int i; 283 #ifdef VGE_EEPROM 284 uint16_t word = 0, *ptr; 285 286 for (i = 0; i < cnt; i++) { 287 vge_eeprom_getword(sc, off + i, &word); 288 ptr = (uint16_t *)(dest + (i * 2)); 289 if (swap) 290 *ptr = ntohs(word); 291 else 292 *ptr = word; 293 } 294 #else 295 for (i = 0; i < ETHER_ADDR_LEN; i++) 296 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 297 #endif 298 } 299 300 static void 301 vge_miipoll_stop(struct vge_softc *sc) 302 { 303 int i; 304 305 CSR_WRITE_1(sc, VGE_MIICMD, 0); 306 307 for (i = 0; i < VGE_TIMEOUT; i++) { 308 DELAY(1); 309 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 310 break; 311 } 312 if (i == VGE_TIMEOUT) 313 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n"); 314 } 315 316 static void 317 vge_miipoll_start(struct vge_softc *sc) 318 { 319 int i; 320 321 /* First, make sure we're idle. */ 322 CSR_WRITE_1(sc, VGE_MIICMD, 0); 323 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 324 325 for (i = 0; i < VGE_TIMEOUT; i++) { 326 DELAY(1); 327 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 328 break; 329 } 330 if (i == VGE_TIMEOUT) { 331 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n"); 332 return; 333 } 334 335 /* Now enable auto poll mode. */ 336 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 337 338 /* And make sure it started. */ 339 for (i = 0; i < VGE_TIMEOUT; i++) { 340 DELAY(1); 341 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 342 break; 343 } 344 if (i == VGE_TIMEOUT) 345 if_printf(&sc->arpcom.ac_if, "failed to start MII autopoll\n"); 346 } 347 348 static int 349 vge_miibus_readreg(device_t dev, int phy, int reg) 350 { 351 struct vge_softc *sc; 352 int i; 353 uint16_t rval = 0; 354 355 sc = device_get_softc(dev); 356 357 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 358 return(0); 359 360 vge_miipoll_stop(sc); 361 362 /* Specify the register we want to read. */ 363 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 364 365 /* Issue read command. */ 366 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 367 368 /* Wait for the read command bit to self-clear. */ 369 for (i = 0; i < VGE_TIMEOUT; i++) { 370 DELAY(1); 371 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 372 break; 373 } 374 if (i == VGE_TIMEOUT) 375 if_printf(&sc->arpcom.ac_if, "MII read timed out\n"); 376 else 377 rval = CSR_READ_2(sc, VGE_MIIDATA); 378 379 vge_miipoll_start(sc); 380 381 return (rval); 382 } 383 384 static int 385 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 386 { 387 struct vge_softc *sc; 388 int i, rval = 0; 389 390 sc = device_get_softc(dev); 391 392 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 393 return(0); 394 395 vge_miipoll_stop(sc); 396 397 /* Specify the register we want to write. */ 398 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 399 400 /* Specify the data we want to write. */ 401 CSR_WRITE_2(sc, VGE_MIIDATA, data); 402 403 /* Issue write command. */ 404 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 405 406 /* Wait for the write command bit to self-clear. */ 407 for (i = 0; i < VGE_TIMEOUT; i++) { 408 DELAY(1); 409 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 410 break; 411 } 412 if (i == VGE_TIMEOUT) { 413 if_printf(&sc->arpcom.ac_if, "MII write timed out\n"); 414 rval = EIO; 415 } 416 417 vge_miipoll_start(sc); 418 419 return (rval); 420 } 421 422 static void 423 vge_cam_clear(struct vge_softc *sc) 424 { 425 int i; 426 427 /* 428 * Turn off all the mask bits. This tells the chip 429 * that none of the entries in the CAM filter are valid. 430 * desired entries will be enabled as we fill the filter in. 431 */ 432 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 433 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 434 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 435 for (i = 0; i < 8; i++) 436 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 437 438 /* Clear the VLAN filter too. */ 439 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 440 for (i = 0; i < 8; i++) 441 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 442 443 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 444 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 445 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 446 447 sc->vge_camidx = 0; 448 } 449 450 static int 451 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 452 { 453 int i, error = 0; 454 455 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 456 return(ENOSPC); 457 458 /* Select the CAM data page. */ 459 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 460 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 461 462 /* Set the filter entry we want to update and enable writing. */ 463 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 464 465 /* Write the address to the CAM registers */ 466 for (i = 0; i < ETHER_ADDR_LEN; i++) 467 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 468 469 /* Issue a write command. */ 470 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 471 472 /* Wake for it to clear. */ 473 for (i = 0; i < VGE_TIMEOUT; i++) { 474 DELAY(1); 475 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 476 break; 477 } 478 if (i == VGE_TIMEOUT) { 479 if_printf(&sc->arpcom.ac_if, "setting CAM filter failed\n"); 480 error = EIO; 481 goto fail; 482 } 483 484 /* Select the CAM mask page. */ 485 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 486 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 487 488 /* Set the mask bit that enables this filter. */ 489 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 490 1<<(sc->vge_camidx & 7)); 491 492 sc->vge_camidx++; 493 494 fail: 495 /* Turn off access to CAM. */ 496 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 497 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 498 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 499 500 return (error); 501 } 502 503 /* 504 * Program the multicast filter. We use the 64-entry CAM filter 505 * for perfect filtering. If there's more than 64 multicast addresses, 506 * we use the hash filter insted. 507 */ 508 static void 509 vge_setmulti(struct vge_softc *sc) 510 { 511 struct ifnet *ifp = &sc->arpcom.ac_if; 512 int error = 0; 513 struct ifmultiaddr *ifma; 514 uint32_t h, hashes[2] = { 0, 0 }; 515 516 /* First, zot all the multicast entries. */ 517 vge_cam_clear(sc); 518 CSR_WRITE_4(sc, VGE_MAR0, 0); 519 CSR_WRITE_4(sc, VGE_MAR1, 0); 520 521 /* 522 * If the user wants allmulti or promisc mode, enable reception 523 * of all multicast frames. 524 */ 525 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 526 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 527 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 528 return; 529 } 530 531 /* Now program new ones */ 532 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 533 if (ifma->ifma_addr->sa_family != AF_LINK) 534 continue; 535 error = vge_cam_set(sc, 536 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 537 if (error) 538 break; 539 } 540 541 /* If there were too many addresses, use the hash filter. */ 542 if (error) { 543 vge_cam_clear(sc); 544 545 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 546 if (ifma->ifma_addr->sa_family != AF_LINK) 547 continue; 548 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 549 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 550 if (h < 32) 551 hashes[0] |= (1 << h); 552 else 553 hashes[1] |= (1 << (h - 32)); 554 } 555 556 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 557 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 558 } 559 } 560 561 static void 562 vge_reset(struct vge_softc *sc) 563 { 564 int i; 565 566 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 567 568 for (i = 0; i < VGE_TIMEOUT; i++) { 569 DELAY(5); 570 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 571 break; 572 } 573 574 if (i == VGE_TIMEOUT) { 575 if_printf(&sc->arpcom.ac_if, "soft reset timed out"); 576 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 577 DELAY(2000); 578 } 579 580 DELAY(5000); 581 582 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 583 584 for (i = 0; i < VGE_TIMEOUT; i++) { 585 DELAY(5); 586 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 587 break; 588 } 589 if (i == VGE_TIMEOUT) { 590 if_printf(&sc->arpcom.ac_if, "EEPROM reload timed out\n"); 591 return; 592 } 593 594 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 595 } 596 597 /* 598 * Probe for a VIA gigabit chip. Check the PCI vendor and device 599 * IDs against our list and return a device name if we find a match. 600 */ 601 static int 602 vge_probe(device_t dev) 603 { 604 const struct vge_type *t; 605 uint16_t did, vid; 606 607 did = pci_get_device(dev); 608 vid = pci_get_vendor(dev); 609 for (t = vge_devs; t->vge_name != NULL; ++t) { 610 if (vid == t->vge_vid && did == t->vge_did) { 611 device_set_desc(dev, t->vge_name); 612 return 0; 613 } 614 } 615 return (ENXIO); 616 } 617 618 static void 619 vge_dma_map_rx_desc(void *arg, bus_dma_segment_t *segs, int nseg, 620 bus_size_t mapsize, int error) 621 { 622 623 struct vge_dmaload_arg *ctx; 624 struct vge_rx_desc *d = NULL; 625 626 if (error) 627 return; 628 629 ctx = arg; 630 631 /* Signal error to caller if there's too many segments */ 632 if (nseg > ctx->vge_maxsegs) { 633 ctx->vge_maxsegs = 0; 634 return; 635 } 636 637 /* 638 * Map the segment array into descriptors. 639 */ 640 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx]; 641 642 /* If this descriptor is still owned by the chip, bail. */ 643 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) { 644 if_printf(&ctx->sc->arpcom.ac_if, 645 "tried to map busy descriptor\n"); 646 ctx->vge_maxsegs = 0; 647 return; 648 } 649 650 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I); 651 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 652 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 653 d->vge_sts = 0; 654 d->vge_ctl = 0; 655 656 ctx->vge_maxsegs = 1; 657 } 658 659 static void 660 vge_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs, int nseg, 661 bus_size_t mapsize, int error) 662 { 663 struct vge_dmaload_arg *ctx; 664 struct vge_tx_desc *d = NULL; 665 struct vge_tx_frag *f; 666 int i = 0; 667 668 if (error) 669 return; 670 671 ctx = arg; 672 673 /* Signal error to caller if there's too many segments */ 674 if (nseg > ctx->vge_maxsegs) { 675 ctx->vge_maxsegs = 0; 676 return; 677 } 678 679 /* Map the segment array into descriptors. */ 680 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx]; 681 682 /* If this descriptor is still owned by the chip, bail. */ 683 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) { 684 ctx->vge_maxsegs = 0; 685 return; 686 } 687 688 for (i = 0; i < nseg; i++) { 689 f = &d->vge_frag[i]; 690 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len)); 691 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr)); 692 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF); 693 } 694 695 /* Argh. This chip does not autopad short frames */ 696 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) { 697 f = &d->vge_frag[i]; 698 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 699 ctx->vge_m0->m_pkthdr.len)); 700 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 701 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 702 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN; 703 i++; 704 } 705 706 /* 707 * When telling the chip how many segments there are, we 708 * must use nsegs + 1 instead of just nsegs. Darned if I 709 * know why. 710 */ 711 i++; 712 713 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16; 714 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM; 715 716 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 717 d->vge_ctl |= VGE_TDCTL_JUMBO; 718 719 ctx->vge_maxsegs = nseg; 720 } 721 722 /* 723 * Map a single buffer address. 724 */ 725 726 static void 727 vge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 728 { 729 if (error) 730 return; 731 732 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 733 *((bus_addr_t *)arg) = segs->ds_addr; 734 } 735 736 static int 737 vge_dma_alloc(device_t dev) 738 { 739 struct vge_softc *sc = device_get_softc(dev); 740 int error, nseg, i, tx_pos = 0, rx_pos = 0; 741 742 /* 743 * Allocate the parent bus DMA tag appropriate for PCI. 744 */ 745 #define VGE_NSEG_NEW 32 746 error = bus_dma_tag_create(NULL, /* parent */ 747 1, 0, /* alignment, boundary */ 748 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 749 BUS_SPACE_MAXADDR, /* highaddr */ 750 NULL, NULL, /* filter, filterarg */ 751 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */ 752 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 753 BUS_DMA_ALLOCNOW, /* flags */ 754 &sc->vge_parent_tag); 755 if (error) { 756 device_printf(dev, "can't create parent dma tag\n"); 757 return error; 758 } 759 760 /* 761 * Allocate map for RX mbufs. 762 */ 763 nseg = 32; 764 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0, 765 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 766 NULL, NULL, 767 MCLBYTES * nseg, nseg, MCLBYTES, 768 BUS_DMA_ALLOCNOW, &sc->vge_ldata.vge_mtag); 769 if (error) { 770 device_printf(dev, "could not allocate mbuf dma tag\n"); 771 return error; 772 } 773 774 /* 775 * Allocate map for TX descriptor list. 776 */ 777 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, 778 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 779 NULL, NULL, 780 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 781 BUS_DMA_ALLOCNOW, 782 &sc->vge_ldata.vge_tx_list_tag); 783 if (error) { 784 device_printf(dev, "could not allocate tx list dma tag\n"); 785 return error; 786 } 787 788 /* Allocate DMA'able memory for the TX ring */ 789 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag, 790 (void **)&sc->vge_ldata.vge_tx_list, 791 BUS_DMA_WAITOK | BUS_DMA_ZERO, 792 &sc->vge_ldata.vge_tx_list_map); 793 if (error) { 794 device_printf(dev, "could not allocate tx list dma memory\n"); 795 return error; 796 } 797 798 /* Load the map for the TX ring. */ 799 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag, 800 sc->vge_ldata.vge_tx_list_map, 801 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, 802 vge_dma_map_addr, 803 &sc->vge_ldata.vge_tx_list_addr, 804 BUS_DMA_WAITOK); 805 if (error) { 806 device_printf(dev, "could not load tx list\n"); 807 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 808 sc->vge_ldata.vge_tx_list, 809 sc->vge_ldata.vge_tx_list_map); 810 sc->vge_ldata.vge_tx_list = NULL; 811 return error; 812 } 813 814 /* Create DMA maps for TX buffers */ 815 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 816 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 817 &sc->vge_ldata.vge_tx_dmamap[i]); 818 if (error) { 819 device_printf(dev, "can't create DMA map for TX\n"); 820 tx_pos = i; 821 goto map_fail; 822 } 823 } 824 tx_pos = VGE_TX_DESC_CNT; 825 826 /* 827 * Allocate map for RX descriptor list. 828 */ 829 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, 830 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 831 NULL, NULL, 832 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 833 BUS_DMA_ALLOCNOW, 834 &sc->vge_ldata.vge_rx_list_tag); 835 if (error) { 836 device_printf(dev, "could not allocate rx list dma tag\n"); 837 return error; 838 } 839 840 /* Allocate DMA'able memory for the RX ring */ 841 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag, 842 (void **)&sc->vge_ldata.vge_rx_list, 843 BUS_DMA_WAITOK | BUS_DMA_ZERO, 844 &sc->vge_ldata.vge_rx_list_map); 845 if (error) { 846 device_printf(dev, "could not allocate rx list dma memory\n"); 847 return error; 848 } 849 850 /* Load the map for the RX ring. */ 851 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag, 852 sc->vge_ldata.vge_rx_list_map, 853 sc->vge_ldata.vge_rx_list, VGE_TX_LIST_SZ, 854 vge_dma_map_addr, 855 &sc->vge_ldata.vge_rx_list_addr, 856 BUS_DMA_WAITOK); 857 if (error) { 858 device_printf(dev, "could not load rx list\n"); 859 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 860 sc->vge_ldata.vge_rx_list, 861 sc->vge_ldata.vge_rx_list_map); 862 sc->vge_ldata.vge_rx_list = NULL; 863 return error; 864 } 865 866 /* Create DMA maps for RX buffers */ 867 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 868 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 869 &sc->vge_ldata.vge_rx_dmamap[i]); 870 if (error) { 871 device_printf(dev, "can't create DMA map for RX\n"); 872 rx_pos = i; 873 goto map_fail; 874 } 875 } 876 return (0); 877 878 map_fail: 879 for (i = 0; i < tx_pos; ++i) { 880 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 881 sc->vge_ldata.vge_tx_dmamap[i]); 882 } 883 for (i = 0; i < rx_pos; ++i) { 884 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 885 sc->vge_ldata.vge_rx_dmamap[i]); 886 } 887 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 888 sc->vge_ldata.vge_mtag = NULL; 889 890 return error; 891 } 892 893 static void 894 vge_dma_free(struct vge_softc *sc) 895 { 896 /* Unload and free the RX DMA ring memory and map */ 897 if (sc->vge_ldata.vge_rx_list_tag) { 898 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag, 899 sc->vge_ldata.vge_rx_list_map); 900 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 901 sc->vge_ldata.vge_rx_list, 902 sc->vge_ldata.vge_rx_list_map); 903 } 904 905 if (sc->vge_ldata.vge_rx_list_tag) 906 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag); 907 908 /* Unload and free the TX DMA ring memory and map */ 909 if (sc->vge_ldata.vge_tx_list_tag) { 910 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag, 911 sc->vge_ldata.vge_tx_list_map); 912 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 913 sc->vge_ldata.vge_tx_list, 914 sc->vge_ldata.vge_tx_list_map); 915 } 916 917 if (sc->vge_ldata.vge_tx_list_tag) 918 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag); 919 920 /* Destroy all the RX and TX buffer maps */ 921 if (sc->vge_ldata.vge_mtag) { 922 int i; 923 924 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 925 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 926 sc->vge_ldata.vge_tx_dmamap[i]); 927 } 928 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 929 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 930 sc->vge_ldata.vge_rx_dmamap[i]); 931 } 932 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 933 } 934 935 if (sc->vge_parent_tag) 936 bus_dma_tag_destroy(sc->vge_parent_tag); 937 } 938 939 /* 940 * Attach the interface. Allocate softc structures, do ifmedia 941 * setup and ethernet/BPF attach. 942 */ 943 static int 944 vge_attach(device_t dev) 945 { 946 uint8_t eaddr[ETHER_ADDR_LEN]; 947 struct vge_softc *sc; 948 struct ifnet *ifp; 949 int error = 0; 950 951 sc = device_get_softc(dev); 952 ifp = &sc->arpcom.ac_if; 953 954 /* Initialize if_xname early, so if_printf() can be used */ 955 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 956 957 /* 958 * Map control/status registers. 959 */ 960 pci_enable_busmaster(dev); 961 962 sc->vge_res_rid = VGE_PCI_LOMEM; 963 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 964 &sc->vge_res_rid, RF_ACTIVE); 965 if (sc->vge_res == NULL) { 966 device_printf(dev, "couldn't map ports/memory\n"); 967 return ENXIO; 968 } 969 970 sc->vge_btag = rman_get_bustag(sc->vge_res); 971 sc->vge_bhandle = rman_get_bushandle(sc->vge_res); 972 973 /* Allocate interrupt */ 974 sc->vge_irq_rid = 0; 975 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->vge_irq_rid, 976 RF_SHAREABLE | RF_ACTIVE); 977 if (sc->vge_irq == NULL) { 978 device_printf(dev, "couldn't map interrupt\n"); 979 error = ENXIO; 980 goto fail; 981 } 982 983 /* Reset the adapter. */ 984 vge_reset(sc); 985 986 /* 987 * Get station address from the EEPROM. 988 */ 989 vge_read_eeprom(sc, eaddr, VGE_EE_EADDR, 3, 0); 990 991 /* Allocate DMA related stuffs */ 992 error = vge_dma_alloc(dev); 993 if (error) 994 goto fail; 995 996 /* Do MII setup */ 997 error = mii_phy_probe(dev, &sc->vge_miibus, vge_ifmedia_upd, 998 vge_ifmedia_sts); 999 if (error) { 1000 device_printf(dev, "MII without any phy!\n"); 1001 goto fail; 1002 } 1003 1004 ifp->if_softc = sc; 1005 ifp->if_mtu = ETHERMTU; 1006 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1007 ifp->if_init = vge_init; 1008 ifp->if_start = vge_start; 1009 ifp->if_watchdog = vge_watchdog; 1010 ifp->if_ioctl = vge_ioctl; 1011 #ifdef DEVICE_POLLING 1012 ifp->if_poll = vge_poll; 1013 #endif 1014 ifp->if_hwassist = VGE_CSUM_FEATURES; 1015 ifp->if_capabilities = IFCAP_VLAN_MTU | 1016 IFCAP_HWCSUM | 1017 IFCAP_VLAN_HWTAGGING; 1018 ifp->if_capenable = ifp->if_capabilities; 1019 ifq_set_maxlen(&ifp->if_snd, VGE_IFQ_MAXLEN); 1020 ifq_set_ready(&ifp->if_snd); 1021 1022 /* 1023 * Call MI attach routine. 1024 */ 1025 ether_ifattach(ifp, eaddr, NULL); 1026 1027 /* Hook interrupt last to avoid having to lock softc */ 1028 error = bus_setup_intr(dev, sc->vge_irq, INTR_MPSAFE, vge_intr, sc, 1029 &sc->vge_intrhand, ifp->if_serializer); 1030 if (error) { 1031 device_printf(dev, "couldn't set up irq\n"); 1032 ether_ifdetach(ifp); 1033 goto fail; 1034 } 1035 1036 ifp->if_cpuid = rman_get_cpuid(sc->vge_irq); 1037 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 1038 1039 return 0; 1040 fail: 1041 vge_detach(dev); 1042 return error; 1043 } 1044 1045 /* 1046 * Shutdown hardware and free up resources. This can be called any 1047 * time after the mutex has been initialized. It is called in both 1048 * the error case in attach and the normal detach case so it needs 1049 * to be careful about only freeing resources that have actually been 1050 * allocated. 1051 */ 1052 static int 1053 vge_detach(device_t dev) 1054 { 1055 struct vge_softc *sc = device_get_softc(dev); 1056 struct ifnet *ifp = &sc->arpcom.ac_if; 1057 1058 /* These should only be active if attach succeeded */ 1059 if (device_is_attached(dev)) { 1060 lwkt_serialize_enter(ifp->if_serializer); 1061 1062 vge_stop(sc); 1063 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1064 /* 1065 * Force off the IFF_UP flag here, in case someone 1066 * still had a BPF descriptor attached to this 1067 * interface. If they do, ether_ifattach() will cause 1068 * the BPF code to try and clear the promisc mode 1069 * flag, which will bubble down to vge_ioctl(), 1070 * which will try to call vge_init() again. This will 1071 * turn the NIC back on and restart the MII ticker, 1072 * which will panic the system when the kernel tries 1073 * to invoke the vge_tick() function that isn't there 1074 * anymore. 1075 */ 1076 ifp->if_flags &= ~IFF_UP; 1077 1078 lwkt_serialize_exit(ifp->if_serializer); 1079 1080 ether_ifdetach(ifp); 1081 } 1082 1083 if (sc->vge_miibus) 1084 device_delete_child(dev, sc->vge_miibus); 1085 bus_generic_detach(dev); 1086 1087 if (sc->vge_irq) { 1088 bus_release_resource(dev, SYS_RES_IRQ, sc->vge_irq_rid, 1089 sc->vge_irq); 1090 } 1091 1092 if (sc->vge_res) { 1093 bus_release_resource(dev, SYS_RES_MEMORY, sc->vge_res_rid, 1094 sc->vge_res); 1095 } 1096 1097 vge_dma_free(sc); 1098 return (0); 1099 } 1100 1101 static int 1102 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) 1103 { 1104 struct vge_dmaload_arg arg; 1105 struct mbuf *n = NULL; 1106 int i, error; 1107 1108 if (m == NULL) { 1109 n = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 1110 if (n == NULL) 1111 return (ENOBUFS); 1112 m = n; 1113 } else { 1114 m->m_data = m->m_ext.ext_buf; 1115 } 1116 1117 1118 #ifdef VGE_FIXUP_RX 1119 /* 1120 * This is part of an evil trick to deal with non-x86 platforms. 1121 * The VIA chip requires RX buffers to be aligned on 32-bit 1122 * boundaries, but that will hose non-x86 machines. To get around 1123 * this, we leave some empty space at the start of each buffer 1124 * and for non-x86 hosts, we copy the buffer back two bytes 1125 * to achieve word alignment. This is slightly more efficient 1126 * than allocating a new buffer, copying the contents, and 1127 * discarding the old buffer. 1128 */ 1129 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN; 1130 m_adj(m, VGE_ETHER_ALIGN); 1131 #else 1132 m->m_len = m->m_pkthdr.len = MCLBYTES; 1133 #endif 1134 1135 arg.sc = sc; 1136 arg.vge_idx = idx; 1137 arg.vge_maxsegs = 1; 1138 arg.vge_flags = 0; 1139 1140 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, 1141 sc->vge_ldata.vge_rx_dmamap[idx], m, 1142 vge_dma_map_rx_desc, &arg, BUS_DMA_NOWAIT); 1143 if (error || arg.vge_maxsegs != 1) { 1144 if (n != NULL) 1145 m_freem(n); 1146 return (ENOMEM); 1147 } 1148 1149 /* 1150 * Note: the manual fails to document the fact that for 1151 * proper opration, the driver needs to replentish the RX 1152 * DMA ring 4 descriptors at a time (rather than one at a 1153 * time, like most chips). We can allocate the new buffers 1154 * but we should not set the OWN bits until we're ready 1155 * to hand back 4 of them in one shot. 1156 */ 1157 1158 #define VGE_RXCHUNK 4 1159 sc->vge_rx_consumed++; 1160 if (sc->vge_rx_consumed == VGE_RXCHUNK) { 1161 for (i = idx; i != idx - sc->vge_rx_consumed; i--) { 1162 sc->vge_ldata.vge_rx_list[i].vge_sts |= 1163 htole32(VGE_RDSTS_OWN); 1164 } 1165 sc->vge_rx_consumed = 0; 1166 } 1167 1168 sc->vge_ldata.vge_rx_mbuf[idx] = m; 1169 1170 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1171 sc->vge_ldata.vge_rx_dmamap[idx], BUS_DMASYNC_PREREAD); 1172 1173 return (0); 1174 } 1175 1176 static int 1177 vge_tx_list_init(struct vge_softc *sc) 1178 { 1179 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 1180 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, 1181 (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 1182 1183 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1184 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE); 1185 sc->vge_ldata.vge_tx_prodidx = 0; 1186 sc->vge_ldata.vge_tx_considx = 0; 1187 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 1188 1189 return (0); 1190 } 1191 1192 static int 1193 vge_rx_list_init(struct vge_softc *sc) 1194 { 1195 int i; 1196 1197 bzero(sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 1198 bzero(&sc->vge_ldata.vge_rx_mbuf, 1199 VGE_RX_DESC_CNT * sizeof(struct mbuf *)); 1200 1201 sc->vge_rx_consumed = 0; 1202 1203 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1204 if (vge_newbuf(sc, i, NULL) == ENOBUFS) 1205 return (ENOBUFS); 1206 } 1207 1208 /* Flush the RX descriptors */ 1209 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1210 sc->vge_ldata.vge_rx_list_map, 1211 BUS_DMASYNC_PREWRITE); 1212 1213 sc->vge_ldata.vge_rx_prodidx = 0; 1214 sc->vge_rx_consumed = 0; 1215 sc->vge_head = sc->vge_tail = NULL; 1216 return (0); 1217 } 1218 1219 #ifdef VGE_FIXUP_RX 1220 static __inline void 1221 vge_fixup_rx(struct mbuf *m) 1222 { 1223 uint16_t *src, *dst; 1224 int i; 1225 1226 src = mtod(m, uint16_t *); 1227 dst = src - 1; 1228 1229 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1230 *dst++ = *src++; 1231 1232 m->m_data -= ETHER_ALIGN; 1233 } 1234 #endif 1235 1236 /* 1237 * RX handler. We support the reception of jumbo frames that have 1238 * been fragmented across multiple 2K mbuf cluster buffers. 1239 */ 1240 static void 1241 vge_rxeof(struct vge_softc *sc, int count) 1242 { 1243 struct ifnet *ifp = &sc->arpcom.ac_if; 1244 struct mbuf *m; 1245 int i, total_len, lim = 0; 1246 struct vge_rx_desc *cur_rx; 1247 uint32_t rxstat, rxctl; 1248 1249 ASSERT_SERIALIZED(ifp->if_serializer); 1250 1251 i = sc->vge_ldata.vge_rx_prodidx; 1252 1253 /* Invalidate the descriptor memory */ 1254 1255 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1256 sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_POSTREAD); 1257 1258 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 1259 #ifdef DEVICE_POLLING 1260 if (count >= 0 && count-- == 0) 1261 break; 1262 #endif 1263 1264 cur_rx = &sc->vge_ldata.vge_rx_list[i]; 1265 m = sc->vge_ldata.vge_rx_mbuf[i]; 1266 total_len = VGE_RXBYTES(cur_rx); 1267 rxstat = le32toh(cur_rx->vge_sts); 1268 rxctl = le32toh(cur_rx->vge_ctl); 1269 1270 /* Invalidate the RX mbuf and unload its map */ 1271 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1272 sc->vge_ldata.vge_rx_dmamap[i], 1273 BUS_DMASYNC_POSTWRITE); 1274 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1275 sc->vge_ldata.vge_rx_dmamap[i]); 1276 1277 /* 1278 * If the 'start of frame' bit is set, this indicates 1279 * either the first fragment in a multi-fragment receive, 1280 * or an intermediate fragment. Either way, we want to 1281 * accumulate the buffers. 1282 */ 1283 if (rxstat & VGE_RXPKT_SOF) { 1284 m->m_len = MCLBYTES - VGE_ETHER_ALIGN; 1285 if (sc->vge_head == NULL) { 1286 sc->vge_head = sc->vge_tail = m; 1287 } else { 1288 m->m_flags &= ~M_PKTHDR; 1289 sc->vge_tail->m_next = m; 1290 sc->vge_tail = m; 1291 } 1292 vge_newbuf(sc, i, NULL); 1293 VGE_RX_DESC_INC(i); 1294 continue; 1295 } 1296 1297 /* 1298 * Bad/error frames will have the RXOK bit cleared. 1299 * However, there's one error case we want to allow: 1300 * if a VLAN tagged frame arrives and the chip can't 1301 * match it against the CAM filter, it considers this 1302 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1303 * We don't want to drop the frame though: our VLAN 1304 * filtering is done in software. 1305 */ 1306 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) && 1307 !(rxstat & VGE_RDSTS_CSUMERR)) { 1308 ifp->if_ierrors++; 1309 /* 1310 * If this is part of a multi-fragment packet, 1311 * discard all the pieces. 1312 */ 1313 if (sc->vge_head != NULL) { 1314 m_freem(sc->vge_head); 1315 sc->vge_head = sc->vge_tail = NULL; 1316 } 1317 vge_newbuf(sc, i, m); 1318 VGE_RX_DESC_INC(i); 1319 continue; 1320 } 1321 1322 /* 1323 * If allocating a replacement mbuf fails, 1324 * reload the current one. 1325 */ 1326 if (vge_newbuf(sc, i, NULL)) { 1327 ifp->if_ierrors++; 1328 if (sc->vge_head != NULL) { 1329 m_freem(sc->vge_head); 1330 sc->vge_head = sc->vge_tail = NULL; 1331 } 1332 vge_newbuf(sc, i, m); 1333 VGE_RX_DESC_INC(i); 1334 continue; 1335 } 1336 1337 VGE_RX_DESC_INC(i); 1338 1339 if (sc->vge_head != NULL) { 1340 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN); 1341 /* 1342 * Special case: if there's 4 bytes or less 1343 * in this buffer, the mbuf can be discarded: 1344 * the last 4 bytes is the CRC, which we don't 1345 * care about anyway. 1346 */ 1347 if (m->m_len <= ETHER_CRC_LEN) { 1348 sc->vge_tail->m_len -= 1349 (ETHER_CRC_LEN - m->m_len); 1350 m_freem(m); 1351 } else { 1352 m->m_len -= ETHER_CRC_LEN; 1353 m->m_flags &= ~M_PKTHDR; 1354 sc->vge_tail->m_next = m; 1355 } 1356 m = sc->vge_head; 1357 sc->vge_head = sc->vge_tail = NULL; 1358 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1359 } else { 1360 m->m_pkthdr.len = m->m_len = 1361 (total_len - ETHER_CRC_LEN); 1362 } 1363 1364 #ifdef VGE_FIXUP_RX 1365 vge_fixup_rx(m); 1366 #endif 1367 ifp->if_ipackets++; 1368 m->m_pkthdr.rcvif = ifp; 1369 1370 /* Do RX checksumming if enabled */ 1371 if (ifp->if_capenable & IFCAP_RXCSUM) { 1372 /* Check IP header checksum */ 1373 if (rxctl & VGE_RDCTL_IPPKT) 1374 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1375 if (rxctl & VGE_RDCTL_IPCSUMOK) 1376 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1377 1378 /* Check TCP/UDP checksum */ 1379 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) && 1380 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1381 m->m_pkthdr.csum_flags |= 1382 CSUM_DATA_VALID|CSUM_PSEUDO_HDR| 1383 CSUM_FRAG_NOT_CHECKED; 1384 m->m_pkthdr.csum_data = 0xffff; 1385 } 1386 } 1387 1388 if (rxstat & VGE_RDSTS_VTAG) { 1389 m->m_flags |= M_VLANTAG; 1390 m->m_pkthdr.ether_vlantag = 1391 ntohs((rxctl & VGE_RDCTL_VLANID)); 1392 } 1393 ifp->if_input(ifp, m); 1394 1395 lim++; 1396 if (lim == VGE_RX_DESC_CNT) 1397 break; 1398 } 1399 1400 /* Flush the RX DMA ring */ 1401 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1402 sc->vge_ldata.vge_rx_list_map, 1403 BUS_DMASYNC_PREWRITE); 1404 1405 sc->vge_ldata.vge_rx_prodidx = i; 1406 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1407 } 1408 1409 static void 1410 vge_txeof(struct vge_softc *sc) 1411 { 1412 struct ifnet *ifp = &sc->arpcom.ac_if; 1413 uint32_t txstat; 1414 int idx; 1415 1416 idx = sc->vge_ldata.vge_tx_considx; 1417 1418 /* Invalidate the TX descriptor list */ 1419 1420 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1421 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_POSTREAD); 1422 1423 while (idx != sc->vge_ldata.vge_tx_prodidx) { 1424 1425 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1426 if (txstat & VGE_TDSTS_OWN) 1427 break; 1428 1429 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1430 sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1431 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1432 sc->vge_ldata.vge_tx_dmamap[idx]); 1433 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1434 ifp->if_collisions++; 1435 if (txstat & VGE_TDSTS_TXERR) 1436 ifp->if_oerrors++; 1437 else 1438 ifp->if_opackets++; 1439 1440 sc->vge_ldata.vge_tx_free++; 1441 VGE_TX_DESC_INC(idx); 1442 } 1443 1444 /* No changes made to the TX ring, so no flush needed */ 1445 if (idx != sc->vge_ldata.vge_tx_considx) { 1446 sc->vge_ldata.vge_tx_considx = idx; 1447 ifp->if_flags &= ~IFF_OACTIVE; 1448 ifp->if_timer = 0; 1449 } 1450 1451 /* 1452 * If not all descriptors have been released reaped yet, 1453 * reload the timer so that we will eventually get another 1454 * interrupt that will cause us to re-enter this routine. 1455 * This is done in case the transmitter has gone idle. 1456 */ 1457 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) 1458 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1459 } 1460 1461 static void 1462 vge_tick(struct vge_softc *sc) 1463 { 1464 struct ifnet *ifp = &sc->arpcom.ac_if; 1465 struct mii_data *mii; 1466 1467 mii = device_get_softc(sc->vge_miibus); 1468 1469 mii_tick(mii); 1470 if (sc->vge_link) { 1471 if (!(mii->mii_media_status & IFM_ACTIVE)) 1472 sc->vge_link = 0; 1473 } else { 1474 if (mii->mii_media_status & IFM_ACTIVE && 1475 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1476 sc->vge_link = 1; 1477 if (!ifq_is_empty(&ifp->if_snd)) 1478 if_devstart(ifp); 1479 } 1480 } 1481 } 1482 1483 #ifdef DEVICE_POLLING 1484 static void 1485 vge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1486 { 1487 struct vge_softc *sc = ifp->if_softc; 1488 1489 sc->rxcycles = count; 1490 1491 switch (cmd) { 1492 case POLL_REGISTER: 1493 vge_disable_intr(sc); 1494 break; 1495 case POLL_DEREGISTER: 1496 vge_enable_intr(sc, 0xffffffff); 1497 break; 1498 case POLL_ONLY: 1499 case POLL_AND_CHECK_STATUS: 1500 vge_rxeof(sc, count); 1501 vge_txeof(sc); 1502 1503 if (!ifq_is_empty(&ifp->if_snd)) 1504 if_devstart(ifp); 1505 1506 /* XXX copy & paste from vge_intr */ 1507 if (cmd == POLL_AND_CHECK_STATUS) { 1508 uint32_t status = 0; 1509 1510 status = CSR_READ_4(sc, VGE_ISR); 1511 if (status == 0xffffffff) 1512 break; 1513 1514 if (status) 1515 CSR_WRITE_4(sc, VGE_ISR, status); 1516 1517 if (status & (VGE_ISR_TXDMA_STALL | 1518 VGE_ISR_RXDMA_STALL)) 1519 vge_init(sc); 1520 1521 if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) { 1522 ifp->if_ierrors++; 1523 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1524 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1525 } 1526 } 1527 break; 1528 } 1529 1530 } 1531 #endif /* DEVICE_POLLING */ 1532 1533 static void 1534 vge_intr(void *arg) 1535 { 1536 struct vge_softc *sc = arg; 1537 struct ifnet *ifp = &sc->arpcom.ac_if; 1538 uint32_t status; 1539 1540 if (sc->suspended || !(ifp->if_flags & IFF_UP)) 1541 return; 1542 1543 /* Disable interrupts */ 1544 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1545 1546 for (;;) { 1547 status = CSR_READ_4(sc, VGE_ISR); 1548 /* If the card has gone away the read returns 0xffff. */ 1549 if (status == 0xFFFFFFFF) 1550 break; 1551 1552 if (status) 1553 CSR_WRITE_4(sc, VGE_ISR, status); 1554 1555 if ((status & VGE_INTRS) == 0) 1556 break; 1557 1558 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1559 vge_rxeof(sc, -1); 1560 1561 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1562 vge_rxeof(sc, -1); 1563 ifp->if_ierrors++; 1564 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1565 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1566 } 1567 1568 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1569 vge_txeof(sc); 1570 1571 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) 1572 vge_init(sc); 1573 1574 if (status & VGE_ISR_LINKSTS) 1575 vge_tick(sc); 1576 } 1577 1578 /* Re-enable interrupts */ 1579 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1580 1581 if (!ifq_is_empty(&ifp->if_snd)) 1582 if_devstart(ifp); 1583 } 1584 1585 static int 1586 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) 1587 { 1588 struct vge_dmaload_arg arg; 1589 bus_dmamap_t map; 1590 int error; 1591 1592 arg.vge_flags = 0; 1593 1594 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1595 arg.vge_flags |= VGE_TDCTL_IPCSUM; 1596 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 1597 arg.vge_flags |= VGE_TDCTL_TCPCSUM; 1598 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 1599 arg.vge_flags |= VGE_TDCTL_UDPCSUM; 1600 1601 arg.sc = sc; 1602 arg.vge_idx = idx; 1603 arg.vge_m0 = m_head; 1604 arg.vge_maxsegs = VGE_TX_FRAGS; 1605 1606 map = sc->vge_ldata.vge_tx_dmamap[idx]; 1607 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head, 1608 vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); 1609 if (error && error != EFBIG) { 1610 if_printf(&sc->arpcom.ac_if, "can't map mbuf (error %d)\n", 1611 error); 1612 goto fail; 1613 } 1614 1615 /* Too many segments to map, coalesce into a single mbuf */ 1616 if (error || arg.vge_maxsegs == 0) { 1617 struct mbuf *m_new; 1618 1619 m_new = m_defrag(m_head, MB_DONTWAIT); 1620 if (m_new == NULL) { 1621 error = ENOBUFS; 1622 goto fail; 1623 } else { 1624 m_head = m_new; 1625 } 1626 1627 arg.sc = sc; 1628 arg.vge_m0 = m_head; 1629 arg.vge_idx = idx; 1630 arg.vge_maxsegs = 1; 1631 1632 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, 1633 m_head, vge_dma_map_tx_desc, &arg, 1634 BUS_DMA_NOWAIT); 1635 if (error) { 1636 if_printf(&sc->arpcom.ac_if, 1637 "can't map mbuf (error %d)\n", error); 1638 goto fail; 1639 } 1640 } 1641 1642 sc->vge_ldata.vge_tx_mbuf[idx] = m_head; 1643 sc->vge_ldata.vge_tx_free--; 1644 1645 /* 1646 * Set up hardware VLAN tagging. 1647 */ 1648 if (m_head->m_flags & M_VLANTAG) { 1649 sc->vge_ldata.vge_tx_list[idx].vge_ctl |= 1650 htole32(htons(m_head->m_pkthdr.ether_vlantag) | 1651 VGE_TDCTL_VTAG); 1652 } 1653 1654 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); 1655 return (0); 1656 1657 fail: 1658 m_freem(m_head); 1659 return error; 1660 } 1661 1662 /* 1663 * Main transmit routine. 1664 */ 1665 1666 static void 1667 vge_start(struct ifnet *ifp) 1668 { 1669 struct vge_softc *sc = ifp->if_softc; 1670 struct mbuf *m_head = NULL; 1671 int idx, pidx = 0; 1672 1673 ASSERT_SERIALIZED(ifp->if_serializer); 1674 1675 if (!sc->vge_link) { 1676 ifq_purge(&ifp->if_snd); 1677 return; 1678 } 1679 1680 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 1681 return; 1682 1683 idx = sc->vge_ldata.vge_tx_prodidx; 1684 1685 pidx = idx - 1; 1686 if (pidx < 0) 1687 pidx = VGE_TX_DESC_CNT - 1; 1688 1689 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { 1690 if (sc->vge_ldata.vge_tx_free <= 2) { 1691 ifp->if_flags |= IFF_OACTIVE; 1692 break; 1693 } 1694 1695 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1696 if (m_head == NULL) 1697 break; 1698 1699 if (vge_encap(sc, m_head, idx)) { 1700 /* If vge_encap() failed, it will free m_head for us */ 1701 ifp->if_flags |= IFF_OACTIVE; 1702 break; 1703 } 1704 1705 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1706 htole16(VGE_TXDESC_Q); 1707 1708 pidx = idx; 1709 VGE_TX_DESC_INC(idx); 1710 1711 /* 1712 * If there's a BPF listener, bounce a copy of this frame 1713 * to him. 1714 */ 1715 ETHER_BPF_MTAP(ifp, m_head); 1716 } 1717 1718 if (idx == sc->vge_ldata.vge_tx_prodidx) 1719 return; 1720 1721 /* Flush the TX descriptors */ 1722 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1723 sc->vge_ldata.vge_tx_list_map, 1724 BUS_DMASYNC_PREWRITE); 1725 1726 /* Issue a transmit command. */ 1727 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1728 1729 sc->vge_ldata.vge_tx_prodidx = idx; 1730 1731 /* 1732 * Use the countdown timer for interrupt moderation. 1733 * 'TX done' interrupts are disabled. Instead, we reset the 1734 * countdown timer, which will begin counting until it hits 1735 * the value in the SSTIMER register, and then trigger an 1736 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1737 * the timer count is reloaded. Only when the transmitter 1738 * is idle will the timer hit 0 and an interrupt fire. 1739 */ 1740 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1741 1742 /* 1743 * Set a timeout in case the chip goes out to lunch. 1744 */ 1745 ifp->if_timer = 5; 1746 } 1747 1748 static void 1749 vge_init(void *xsc) 1750 { 1751 struct vge_softc *sc = xsc; 1752 struct ifnet *ifp = &sc->arpcom.ac_if; 1753 struct mii_data *mii; 1754 int i; 1755 1756 ASSERT_SERIALIZED(ifp->if_serializer); 1757 1758 mii = device_get_softc(sc->vge_miibus); 1759 1760 /* 1761 * Cancel pending I/O and free all RX/TX buffers. 1762 */ 1763 vge_stop(sc); 1764 vge_reset(sc); 1765 1766 /* 1767 * Initialize the RX and TX descriptors and mbufs. 1768 */ 1769 vge_rx_list_init(sc); 1770 vge_tx_list_init(sc); 1771 1772 /* Set our station address */ 1773 for (i = 0; i < ETHER_ADDR_LEN; i++) 1774 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(ifp)[i]); 1775 1776 /* 1777 * Set receive FIFO threshold. Also allow transmission and 1778 * reception of VLAN tagged frames. 1779 */ 1780 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1781 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1782 1783 /* Set DMA burst length */ 1784 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1785 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1786 1787 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1788 1789 /* Set collision backoff algorithm */ 1790 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1791 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1792 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1793 1794 /* Disable LPSEL field in priority resolution */ 1795 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1796 1797 /* 1798 * Load the addresses of the DMA queues into the chip. 1799 * Note that we only use one transmit queue. 1800 */ 1801 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 1802 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr)); 1803 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 1804 1805 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 1806 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr)); 1807 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 1808 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 1809 1810 /* Enable and wake up the RX descriptor queue */ 1811 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1812 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1813 1814 /* Enable the TX descriptor queue */ 1815 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1816 1817 /* Set up the receive filter -- allow large frames for VLANs. */ 1818 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 1819 1820 /* If we want promiscuous mode, set the allframes bit. */ 1821 if (ifp->if_flags & IFF_PROMISC) 1822 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1823 1824 /* Set capture broadcast bit to capture broadcast frames. */ 1825 if (ifp->if_flags & IFF_BROADCAST) 1826 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1827 1828 /* Set multicast bit to capture multicast frames. */ 1829 if (ifp->if_flags & IFF_MULTICAST) 1830 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1831 1832 /* Init the cam filter. */ 1833 vge_cam_clear(sc); 1834 1835 /* Init the multicast filter. */ 1836 vge_setmulti(sc); 1837 1838 /* Enable flow control */ 1839 1840 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1841 1842 /* Enable jumbo frame reception (if desired) */ 1843 1844 /* Start the MAC. */ 1845 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1846 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1847 CSR_WRITE_1(sc, VGE_CRS0, 1848 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 1849 1850 /* 1851 * Configure one-shot timer for microsecond 1852 * resulution and load it for 500 usecs. 1853 */ 1854 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1855 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1856 1857 /* 1858 * Configure interrupt moderation for receive. Enable 1859 * the holdoff counter and load it, and set the RX 1860 * suppression count to the number of descriptors we 1861 * want to allow before triggering an interrupt. 1862 * The holdoff timer is in units of 20 usecs. 1863 */ 1864 1865 #ifdef notyet 1866 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1867 /* Select the interrupt holdoff timer page. */ 1868 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1869 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1870 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1871 1872 /* Enable use of the holdoff timer. */ 1873 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1874 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1875 1876 /* Select the RX suppression threshold page. */ 1877 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1878 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 1879 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 1880 1881 /* Restore the page select bits. */ 1882 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1883 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 1884 #endif 1885 1886 #ifdef DEVICE_POLLING 1887 /* Disable intr if polling(4) is enabled */ 1888 if (ifp->if_flags & IFF_POLLING) 1889 vge_disable_intr(sc); 1890 else 1891 #endif 1892 vge_enable_intr(sc, 0); 1893 1894 mii_mediachg(mii); 1895 1896 ifp->if_flags |= IFF_RUNNING; 1897 ifp->if_flags &= ~IFF_OACTIVE; 1898 1899 sc->vge_if_flags = 0; 1900 sc->vge_link = 0; 1901 } 1902 1903 /* 1904 * Set media options. 1905 */ 1906 static int 1907 vge_ifmedia_upd(struct ifnet *ifp) 1908 { 1909 struct vge_softc *sc = ifp->if_softc; 1910 struct mii_data *mii = device_get_softc(sc->vge_miibus); 1911 1912 mii_mediachg(mii); 1913 1914 return (0); 1915 } 1916 1917 /* 1918 * Report current media status. 1919 */ 1920 static void 1921 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1922 { 1923 struct vge_softc *sc = ifp->if_softc; 1924 struct mii_data *mii = device_get_softc(sc->vge_miibus); 1925 1926 mii_pollstat(mii); 1927 ifmr->ifm_active = mii->mii_media_active; 1928 ifmr->ifm_status = mii->mii_media_status; 1929 } 1930 1931 static void 1932 vge_miibus_statchg(device_t dev) 1933 { 1934 struct vge_softc *sc; 1935 struct mii_data *mii; 1936 struct ifmedia_entry *ife; 1937 1938 sc = device_get_softc(dev); 1939 mii = device_get_softc(sc->vge_miibus); 1940 ife = mii->mii_media.ifm_cur; 1941 1942 /* 1943 * If the user manually selects a media mode, we need to turn 1944 * on the forced MAC mode bit in the DIAGCTL register. If the 1945 * user happens to choose a full duplex mode, we also need to 1946 * set the 'force full duplex' bit. This applies only to 1947 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 1948 * mode is disabled, and in 1000baseT mode, full duplex is 1949 * always implied, so we turn on the forced mode bit but leave 1950 * the FDX bit cleared. 1951 */ 1952 1953 switch (IFM_SUBTYPE(ife->ifm_media)) { 1954 case IFM_AUTO: 1955 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1956 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1957 break; 1958 case IFM_1000_T: 1959 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1960 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1961 break; 1962 case IFM_100_TX: 1963 case IFM_10_T: 1964 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1965 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) 1966 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1967 else 1968 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1969 break; 1970 default: 1971 device_printf(dev, "unknown media type: %x\n", 1972 IFM_SUBTYPE(ife->ifm_media)); 1973 break; 1974 } 1975 } 1976 1977 static int 1978 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1979 { 1980 struct vge_softc *sc = ifp->if_softc; 1981 struct ifreq *ifr = (struct ifreq *)data; 1982 struct mii_data *mii; 1983 int error = 0; 1984 1985 switch (command) { 1986 case SIOCSIFMTU: 1987 if (ifr->ifr_mtu > VGE_JUMBO_MTU) 1988 error = EINVAL; 1989 ifp->if_mtu = ifr->ifr_mtu; 1990 break; 1991 case SIOCSIFFLAGS: 1992 if (ifp->if_flags & IFF_UP) { 1993 if ((ifp->if_flags & IFF_RUNNING) && 1994 (ifp->if_flags & IFF_PROMISC) && 1995 !(sc->vge_if_flags & IFF_PROMISC)) { 1996 CSR_SETBIT_1(sc, VGE_RXCTL, 1997 VGE_RXCTL_RX_PROMISC); 1998 vge_setmulti(sc); 1999 } else if ((ifp->if_flags & IFF_RUNNING) && 2000 !(ifp->if_flags & IFF_PROMISC) && 2001 (sc->vge_if_flags & IFF_PROMISC)) { 2002 CSR_CLRBIT_1(sc, VGE_RXCTL, 2003 VGE_RXCTL_RX_PROMISC); 2004 vge_setmulti(sc); 2005 } else { 2006 vge_init(sc); 2007 } 2008 } else { 2009 if (ifp->if_flags & IFF_RUNNING) 2010 vge_stop(sc); 2011 } 2012 sc->vge_if_flags = ifp->if_flags; 2013 break; 2014 case SIOCADDMULTI: 2015 case SIOCDELMULTI: 2016 vge_setmulti(sc); 2017 break; 2018 case SIOCGIFMEDIA: 2019 case SIOCSIFMEDIA: 2020 mii = device_get_softc(sc->vge_miibus); 2021 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2022 break; 2023 case SIOCSIFCAP: 2024 { 2025 uint32_t mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2026 2027 if (mask & IFCAP_HWCSUM) { 2028 ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM); 2029 if (ifp->if_capenable & IFCAP_TXCSUM) 2030 ifp->if_hwassist = VGE_CSUM_FEATURES; 2031 else 2032 ifp->if_hwassist = 0; 2033 if (ifp->if_flags & IFF_RUNNING) 2034 vge_init(sc); 2035 } 2036 } 2037 break; 2038 default: 2039 error = ether_ioctl(ifp, command, data); 2040 break; 2041 } 2042 return (error); 2043 } 2044 2045 static void 2046 vge_watchdog(struct ifnet *ifp) 2047 { 2048 struct vge_softc *sc = ifp->if_softc; 2049 2050 if_printf(ifp, "watchdog timeout\n"); 2051 ifp->if_oerrors++; 2052 2053 vge_txeof(sc); 2054 vge_rxeof(sc, -1); 2055 2056 vge_init(sc); 2057 } 2058 2059 /* 2060 * Stop the adapter and free any mbufs allocated to the 2061 * RX and TX lists. 2062 */ 2063 static void 2064 vge_stop(struct vge_softc *sc) 2065 { 2066 struct ifnet *ifp = &sc->arpcom.ac_if; 2067 int i; 2068 2069 ASSERT_SERIALIZED(ifp->if_serializer); 2070 2071 ifp->if_timer = 0; 2072 2073 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2074 2075 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2076 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2077 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2078 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2079 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2080 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2081 2082 if (sc->vge_head != NULL) { 2083 m_freem(sc->vge_head); 2084 sc->vge_head = sc->vge_tail = NULL; 2085 } 2086 2087 /* Free the TX list buffers. */ 2088 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 2089 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { 2090 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2091 sc->vge_ldata.vge_tx_dmamap[i]); 2092 m_freem(sc->vge_ldata.vge_tx_mbuf[i]); 2093 sc->vge_ldata.vge_tx_mbuf[i] = NULL; 2094 } 2095 } 2096 2097 /* Free the RX list buffers. */ 2098 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 2099 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { 2100 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2101 sc->vge_ldata.vge_rx_dmamap[i]); 2102 m_freem(sc->vge_ldata.vge_rx_mbuf[i]); 2103 sc->vge_ldata.vge_rx_mbuf[i] = NULL; 2104 } 2105 } 2106 } 2107 2108 /* 2109 * Device suspend routine. Stop the interface and save some PCI 2110 * settings in case the BIOS doesn't restore them properly on 2111 * resume. 2112 */ 2113 static int 2114 vge_suspend(device_t dev) 2115 { 2116 struct vge_softc *sc = device_get_softc(dev); 2117 struct ifnet *ifp = &sc->arpcom.ac_if; 2118 2119 lwkt_serialize_enter(ifp->if_serializer); 2120 vge_stop(sc); 2121 sc->suspended = 1; 2122 lwkt_serialize_exit(ifp->if_serializer); 2123 2124 return (0); 2125 } 2126 2127 /* 2128 * Device resume routine. Restore some PCI settings in case the BIOS 2129 * doesn't, re-enable busmastering, and restart the interface if 2130 * appropriate. 2131 */ 2132 static int 2133 vge_resume(device_t dev) 2134 { 2135 struct vge_softc *sc = device_get_softc(dev); 2136 struct ifnet *ifp = &sc->arpcom.ac_if; 2137 2138 /* reenable busmastering */ 2139 pci_enable_busmaster(dev); 2140 pci_enable_io(dev, SYS_RES_MEMORY); 2141 2142 lwkt_serialize_enter(ifp->if_serializer); 2143 /* reinitialize interface if necessary */ 2144 if (ifp->if_flags & IFF_UP) 2145 vge_init(sc); 2146 2147 sc->suspended = 0; 2148 lwkt_serialize_exit(ifp->if_serializer); 2149 2150 return (0); 2151 } 2152 2153 /* 2154 * Stop all chip I/O so that the kernel's probe routines don't 2155 * get confused by errant DMAs when rebooting. 2156 */ 2157 static void 2158 vge_shutdown(device_t dev) 2159 { 2160 struct vge_softc *sc = device_get_softc(dev); 2161 struct ifnet *ifp = &sc->arpcom.ac_if; 2162 2163 lwkt_serialize_enter(ifp->if_serializer); 2164 vge_stop(sc); 2165 lwkt_serialize_exit(ifp->if_serializer); 2166 } 2167 2168 static void 2169 vge_enable_intr(struct vge_softc *sc, uint32_t isr) 2170 { 2171 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2172 CSR_WRITE_4(sc, VGE_ISR, isr); 2173 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2174 } 2175 2176 #ifdef DEVICE_POLLING 2177 static void 2178 vge_disable_intr(struct vge_softc *sc) 2179 { 2180 CSR_WRITE_4(sc, VGE_IMR, 0); 2181 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2182 } 2183 #endif 2184